mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
Merge branch 'master' into @min-@max-coercion-checks
This commit is contained in:
commit
c091e47f54
154 changed files with 6674 additions and 3390 deletions
35
.github/workflows/ci-pr-riscv64-linux.yaml
vendored
35
.github/workflows/ci-pr-riscv64-linux.yaml
vendored
|
|
@ -1,35 +0,0 @@
|
|||
name: ci-pr-riscv64-linux
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- labeled
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- unlabeled
|
||||
concurrency:
|
||||
# Cancels pending runs when a PR gets updated.
|
||||
group: riscv64-linux-${{ github.head_ref || github.run_id }}-${{ github.actor }}
|
||||
cancel-in-progress: true
|
||||
permissions:
|
||||
# Sets permission policy for `GITHUB_TOKEN`
|
||||
contents: read
|
||||
jobs:
|
||||
riscv64-linux-debug:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'ci-riscv64-linux')
|
||||
timeout-minutes: 420
|
||||
runs-on: [self-hosted, Linux, riscv64]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Build and Test
|
||||
run: sh ci/riscv64-linux-debug.sh
|
||||
riscv64-linux-release:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'ci-riscv64-linux')
|
||||
timeout-minutes: 420
|
||||
runs-on: [self-hosted, Linux, riscv64]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Build and Test
|
||||
run: sh ci/riscv64-linux-release.sh
|
||||
27
.github/workflows/ci.yaml
vendored
27
.github/workflows/ci.yaml
vendored
|
|
@ -50,33 +50,6 @@ jobs:
|
|||
uses: actions/checkout@v4
|
||||
- name: Build and Test
|
||||
run: sh ci/aarch64-linux-release.sh
|
||||
riscv64-linux-debug:
|
||||
if: github.event_name == 'push'
|
||||
timeout-minutes: 420
|
||||
runs-on: [self-hosted, Linux, riscv64]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Build and Test
|
||||
run: sh ci/riscv64-linux-debug.sh
|
||||
riscv64-linux-release:
|
||||
if: github.event_name == 'push'
|
||||
timeout-minutes: 420
|
||||
runs-on: [self-hosted, Linux, riscv64]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Build and Test
|
||||
run: sh ci/riscv64-linux-release.sh
|
||||
x86_64-macos-release:
|
||||
runs-on: "macos-13"
|
||||
env:
|
||||
ARCH: "x86_64"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Build and Test
|
||||
run: ci/x86_64-macos-release.sh
|
||||
aarch64-macos-debug:
|
||||
runs-on: [self-hosted, macOS, aarch64]
|
||||
env:
|
||||
|
|
|
|||
|
|
@ -583,6 +583,7 @@ set(ZIG_STAGE2_SOURCES
|
|||
src/link/Elf/relocatable.zig
|
||||
src/link/Elf/relocation.zig
|
||||
src/link/Elf/synthetic_sections.zig
|
||||
src/link/Elf2.zig
|
||||
src/link/Goff.zig
|
||||
src/link/LdScript.zig
|
||||
src/link/Lld.zig
|
||||
|
|
@ -612,6 +613,7 @@ set(ZIG_STAGE2_SOURCES
|
|||
src/link/MachO/synthetic.zig
|
||||
src/link/MachO/Thunk.zig
|
||||
src/link/MachO/uuid.zig
|
||||
src/link/MappedFile.zig
|
||||
src/link/Queue.zig
|
||||
src/link/StringTable.zig
|
||||
src/link/Wasm.zig
|
||||
|
|
|
|||
|
|
@ -202,6 +202,7 @@ pub fn build(b: *std.Build) !void {
|
|||
});
|
||||
exe.pie = pie;
|
||||
exe.entitlements = entitlements;
|
||||
exe.use_new_linker = b.option(bool, "new-linker", "Use the new linker");
|
||||
|
||||
const use_llvm = b.option(bool, "use-llvm", "Use the llvm backend");
|
||||
exe.use_llvm = use_llvm;
|
||||
|
|
@ -631,6 +632,12 @@ pub fn build(b: *std.Build) !void {
|
|||
const test_incremental_step = b.step("test-incremental", "Run the incremental compilation test cases");
|
||||
try tests.addIncrementalTests(b, test_incremental_step);
|
||||
test_step.dependOn(test_incremental_step);
|
||||
|
||||
if (tests.addLibcTests(b, .{
|
||||
.optimize_modes = optimization_modes,
|
||||
.test_filters = test_filters,
|
||||
.test_target_filters = test_target_filters,
|
||||
})) |test_libc_step| test_step.dependOn(test_libc_step);
|
||||
}
|
||||
|
||||
fn addWasiUpdateStep(b: *std.Build, version: [:0]const u8) !void {
|
||||
|
|
|
|||
71
ci/x86_64-freebsd-debug.sh
Executable file
71
ci/x86_64-freebsd-debug.sh
Executable file
|
|
@ -0,0 +1,71 @@
|
|||
#!/bin/sh
|
||||
|
||||
# Requires cmake ninja-build
|
||||
|
||||
set -x
|
||||
set -e
|
||||
|
||||
ARCH="x86_64"
|
||||
TARGET="$ARCH-freebsd-none"
|
||||
MCPU="baseline"
|
||||
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.16.0-dev.312+164c598cd"
|
||||
PREFIX="$HOME/deps/$CACHE_BASENAME"
|
||||
ZIG="$PREFIX/bin/zig"
|
||||
|
||||
# Make the `zig version` number consistent.
|
||||
# This will affect the cmake command below.
|
||||
git fetch --unshallow || true
|
||||
git fetch --tags
|
||||
|
||||
# Override the cache directories because they won't actually help other CI runs
|
||||
# which will be testing alternate versions of zig, and ultimately would just
|
||||
# fill up space on the hard drive for no reason.
|
||||
export ZIG_GLOBAL_CACHE_DIR="$PWD/zig-global-cache"
|
||||
export ZIG_LOCAL_CACHE_DIR="$PWD/zig-local-cache"
|
||||
|
||||
mkdir build-debug
|
||||
cd build-debug
|
||||
|
||||
export CC="$ZIG cc -target $TARGET -mcpu=$MCPU"
|
||||
export CXX="$ZIG c++ -target $TARGET -mcpu=$MCPU"
|
||||
|
||||
cmake .. \
|
||||
-DCMAKE_INSTALL_PREFIX="stage3-debug" \
|
||||
-DCMAKE_PREFIX_PATH="$PREFIX" \
|
||||
-DCMAKE_BUILD_TYPE=Debug \
|
||||
-DZIG_TARGET_TRIPLE="$TARGET" \
|
||||
-DZIG_TARGET_MCPU="$MCPU" \
|
||||
-DZIG_STATIC=ON \
|
||||
-DZIG_NO_LIB=ON \
|
||||
-GNinja \
|
||||
-DCMAKE_C_LINKER_DEPFILE_SUPPORTED=FALSE \
|
||||
-DCMAKE_CXX_LINKER_DEPFILE_SUPPORTED=FALSE
|
||||
# https://github.com/ziglang/zig/issues/22213
|
||||
|
||||
# Now cmake will use zig as the C/C++ compiler. We reset the environment variables
|
||||
# so that installation and testing do not get affected by them.
|
||||
unset CC
|
||||
unset CXX
|
||||
|
||||
ninja install
|
||||
|
||||
stage3-debug/bin/zig build test docs \
|
||||
--maxrss 32212254720 \
|
||||
-Dstatic-llvm \
|
||||
-Dskip-linux \
|
||||
-Dskip-netbsd \
|
||||
-Dskip-windows \
|
||||
-Dskip-macos \
|
||||
-Dtarget=native-native-none \
|
||||
--search-prefix "$PREFIX" \
|
||||
--zig-lib-dir "$PWD/../lib"
|
||||
|
||||
stage3-debug/bin/zig build \
|
||||
--prefix stage4-debug \
|
||||
-Denable-llvm \
|
||||
-Dno-lib \
|
||||
-Dtarget=$TARGET \
|
||||
-Duse-zig-libcxx \
|
||||
-Dversion-string="$(stage3-debug/bin/zig version)"
|
||||
|
||||
stage4-debug/bin/zig test ../test/behavior.zig
|
||||
77
ci/x86_64-freebsd-release.sh
Executable file
77
ci/x86_64-freebsd-release.sh
Executable file
|
|
@ -0,0 +1,77 @@
|
|||
#!/bin/sh
|
||||
|
||||
# Requires cmake ninja-build
|
||||
|
||||
set -x
|
||||
set -e
|
||||
|
||||
ARCH="x86_64"
|
||||
TARGET="$ARCH-freebsd-none"
|
||||
MCPU="baseline"
|
||||
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.16.0-dev.312+164c598cd"
|
||||
PREFIX="$HOME/deps/$CACHE_BASENAME"
|
||||
ZIG="$PREFIX/bin/zig"
|
||||
|
||||
# Make the `zig version` number consistent.
|
||||
# This will affect the cmake command below.
|
||||
git fetch --unshallow || true
|
||||
git fetch --tags
|
||||
|
||||
# Override the cache directories because they won't actually help other CI runs
|
||||
# which will be testing alternate versions of zig, and ultimately would just
|
||||
# fill up space on the hard drive for no reason.
|
||||
export ZIG_GLOBAL_CACHE_DIR="$PWD/zig-global-cache"
|
||||
export ZIG_LOCAL_CACHE_DIR="$PWD/zig-local-cache"
|
||||
|
||||
mkdir build-release
|
||||
cd build-release
|
||||
|
||||
export CC="$ZIG cc -target $TARGET -mcpu=$MCPU"
|
||||
export CXX="$ZIG c++ -target $TARGET -mcpu=$MCPU"
|
||||
|
||||
cmake .. \
|
||||
-DCMAKE_INSTALL_PREFIX="stage3-release" \
|
||||
-DCMAKE_PREFIX_PATH="$PREFIX" \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DZIG_TARGET_TRIPLE="$TARGET" \
|
||||
-DZIG_TARGET_MCPU="$MCPU" \
|
||||
-DZIG_STATIC=ON \
|
||||
-DZIG_NO_LIB=ON \
|
||||
-GNinja \
|
||||
-DCMAKE_C_LINKER_DEPFILE_SUPPORTED=FALSE \
|
||||
-DCMAKE_CXX_LINKER_DEPFILE_SUPPORTED=FALSE
|
||||
# https://github.com/ziglang/zig/issues/22213
|
||||
|
||||
# Now cmake will use zig as the C/C++ compiler. We reset the environment variables
|
||||
# so that installation and testing do not get affected by them.
|
||||
unset CC
|
||||
unset CXX
|
||||
|
||||
ninja install
|
||||
|
||||
stage3-release/bin/zig build test docs \
|
||||
--maxrss 32212254720 \
|
||||
-Dstatic-llvm \
|
||||
-Dskip-linux \
|
||||
-Dskip-netbsd \
|
||||
-Dskip-windows \
|
||||
-Dskip-macos \
|
||||
-Dtarget=native-native-none \
|
||||
--search-prefix "$PREFIX" \
|
||||
--zig-lib-dir "$PWD/../lib"
|
||||
|
||||
# Ensure that stage3 and stage4 are byte-for-byte identical.
|
||||
stage3-release/bin/zig build \
|
||||
--prefix stage4-release \
|
||||
-Denable-llvm \
|
||||
-Dno-lib \
|
||||
-Doptimize=ReleaseFast \
|
||||
-Dstrip \
|
||||
-Dtarget=$TARGET \
|
||||
-Duse-zig-libcxx \
|
||||
-Dversion-string="$(stage3-release/bin/zig version)"
|
||||
|
||||
# diff returns an error code if the files differ.
|
||||
echo "If the following command fails, it means nondeterminism has been"
|
||||
echo "introduced, making stage3 and stage4 no longer byte-for-byte identical."
|
||||
diff stage3-release/bin/zig stage4-release/bin/zig
|
||||
0
ci/x86_64-linux-debug-llvm.sh
Normal file → Executable file
0
ci/x86_64-linux-debug-llvm.sh
Normal file → Executable file
|
|
@ -65,6 +65,7 @@ stage3-release/bin/zig build test docs \
|
|||
-fqemu \
|
||||
-fwasmtime \
|
||||
-Dstatic-llvm \
|
||||
-Dskip-freebsd \
|
||||
-Dtarget=native-native-musl \
|
||||
--search-prefix "$PREFIX" \
|
||||
--zig-lib-dir "$PWD/../lib" \
|
||||
|
|
|
|||
|
|
@ -1,75 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -x
|
||||
set -e
|
||||
|
||||
ZIGDIR="$PWD"
|
||||
TARGET="$ARCH-macos-none"
|
||||
MCPU="baseline"
|
||||
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.16.0-dev.104+689461e31"
|
||||
PREFIX="$HOME/$CACHE_BASENAME"
|
||||
JOBS="-j3"
|
||||
ZIG="$PREFIX/bin/zig"
|
||||
|
||||
if [ ! -d "$PREFIX" ]; then
|
||||
cd $HOME
|
||||
curl -L -O "https://ziglang.org/deps/$CACHE_BASENAME.tar.xz"
|
||||
tar xf "$CACHE_BASENAME.tar.xz"
|
||||
fi
|
||||
|
||||
cd $ZIGDIR
|
||||
|
||||
# Make the `zig version` number consistent.
|
||||
# This will affect the cmake command below.
|
||||
git fetch --unshallow || true
|
||||
git fetch --tags
|
||||
|
||||
# Override the cache directories because they won't actually help other CI runs
|
||||
# which will be testing alternate versions of zig, and ultimately would just
|
||||
# fill up space on the hard drive for no reason.
|
||||
export ZIG_GLOBAL_CACHE_DIR="$PWD/zig-global-cache"
|
||||
export ZIG_LOCAL_CACHE_DIR="$PWD/zig-local-cache"
|
||||
|
||||
# Test building from source without LLVM.
|
||||
cc -o bootstrap bootstrap.c
|
||||
./bootstrap
|
||||
./zig2 build -Dno-lib
|
||||
./zig-out/bin/zig test test/behavior.zig
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
|
||||
cmake .. \
|
||||
-DCMAKE_PREFIX_PATH="$PREFIX" \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_C_COMPILER="$ZIG;cc;-target;$TARGET;-mcpu=$MCPU" \
|
||||
-DCMAKE_CXX_COMPILER="$ZIG;c++;-target;$TARGET;-mcpu=$MCPU" \
|
||||
-DZIG_TARGET_TRIPLE="$TARGET" \
|
||||
-DZIG_TARGET_MCPU="$MCPU" \
|
||||
-DZIG_STATIC=ON \
|
||||
-DZIG_NO_LIB=ON
|
||||
|
||||
make $JOBS install
|
||||
|
||||
stage3/bin/zig build test docs \
|
||||
--zig-lib-dir "$PWD/../lib" \
|
||||
-Denable-macos-sdk \
|
||||
-Dstatic-llvm \
|
||||
-Dskip-non-native \
|
||||
--search-prefix "$PREFIX"
|
||||
|
||||
# Ensure that stage3 and stage4 are byte-for-byte identical.
|
||||
stage3/bin/zig build \
|
||||
--prefix stage4 \
|
||||
-Denable-llvm \
|
||||
-Dno-lib \
|
||||
-Doptimize=ReleaseFast \
|
||||
-Dstrip \
|
||||
-Dtarget=$TARGET \
|
||||
-Duse-zig-libcxx \
|
||||
-Dversion-string="$(stage3/bin/zig version)"
|
||||
|
||||
# diff returns an error code if the files differ.
|
||||
echo "If the following command fails, it means nondeterminism has been"
|
||||
echo "introduced, making stage3 and stage4 no longer byte-for-byte identical."
|
||||
diff stage3/bin/zig stage4/bin/zig
|
||||
|
|
@ -8,7 +8,7 @@ const S = packed struct {
|
|||
test "overaligned pointer to packed struct" {
|
||||
var foo: S align(4) = .{ .a = 1, .b = 2 };
|
||||
const ptr: *align(4) S = &foo;
|
||||
const ptr_to_b: *u32 = &ptr.b;
|
||||
const ptr_to_b = &ptr.b;
|
||||
try expect(ptr_to_b.* == 2);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
//!
|
||||
//! https://git.musl-libc.org/cgit/musl/tree/src/math/ceilf.c
|
||||
//! https://git.musl-libc.org/cgit/musl/tree/src/math/ceil.c
|
||||
//! https://git.musl-libc.org/cgit/musl/tree/src/math/ceill.c
|
||||
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
|
|
@ -27,8 +28,23 @@ comptime {
|
|||
}
|
||||
|
||||
pub fn __ceilh(x: f16) callconv(.c) f16 {
|
||||
// TODO: more efficient implementation
|
||||
return @floatCast(ceilf(x));
|
||||
var u: u16 = @bitCast(x);
|
||||
const e = @as(i16, @intCast((u >> 10) & 31)) - 15;
|
||||
var m: u16 = undefined;
|
||||
|
||||
if (e >= 10) return x;
|
||||
|
||||
if (e >= 0) {
|
||||
m = @as(u16, 0x03FF) >> @intCast(e);
|
||||
if (u & m == 0) return x;
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(x + 0x1.0p120);
|
||||
if (u >> 15 == 0) u += m;
|
||||
u &= ~m;
|
||||
return @bitCast(u);
|
||||
} else {
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(x + 0x1.0p120);
|
||||
return if (u >> 15 != 0) -0.0 else if (u << 1 != 0) 1.0 else x;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ceilf(x: f32) callconv(.c) f32 {
|
||||
|
|
@ -36,31 +52,18 @@ pub fn ceilf(x: f32) callconv(.c) f32 {
|
|||
const e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F;
|
||||
var m: u32 = undefined;
|
||||
|
||||
// TODO: Shouldn't need this explicit check.
|
||||
if (x == 0.0) {
|
||||
return x;
|
||||
}
|
||||
if (e >= 23) return x;
|
||||
|
||||
if (e >= 23) {
|
||||
return x;
|
||||
} else if (e >= 0) {
|
||||
if (e >= 0) {
|
||||
m = @as(u32, 0x007FFFFF) >> @intCast(e);
|
||||
if (u & m == 0) {
|
||||
return x;
|
||||
}
|
||||
if (u & m == 0) return x;
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(x + 0x1.0p120);
|
||||
if (u >> 31 == 0) {
|
||||
u += m;
|
||||
}
|
||||
if (u >> 31 == 0) u += m;
|
||||
u &= ~m;
|
||||
return @bitCast(u);
|
||||
} else {
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(x + 0x1.0p120);
|
||||
if (u >> 31 != 0) {
|
||||
return -0.0;
|
||||
} else {
|
||||
return 1.0;
|
||||
}
|
||||
return if (u >> 31 != 0) -0.0 else if (u << 1 != 0) 1.0 else x;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -96,8 +99,32 @@ pub fn ceil(x: f64) callconv(.c) f64 {
|
|||
}
|
||||
|
||||
pub fn __ceilx(x: f80) callconv(.c) f80 {
|
||||
// TODO: more efficient implementation
|
||||
return @floatCast(ceilq(x));
|
||||
const f80_toint = 1.0 / math.floatEps(f80);
|
||||
|
||||
const u: u80 = @bitCast(x);
|
||||
const e = (u >> 64) & 0x7FFF;
|
||||
var y: f80 = undefined;
|
||||
|
||||
if (e >= 0x3FFF + 64 or x == 0) return x;
|
||||
|
||||
if (u >> 79 != 0) {
|
||||
y = x - f80_toint + f80_toint - x;
|
||||
} else {
|
||||
y = x + f80_toint - f80_toint - x;
|
||||
}
|
||||
|
||||
if (e <= 0x3FFF - 1) {
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(y);
|
||||
if (u >> 79 != 0) {
|
||||
return -0.0;
|
||||
} else {
|
||||
return 1.0;
|
||||
}
|
||||
} else if (y < 0) {
|
||||
return x + y + 1;
|
||||
} else {
|
||||
return x + y;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ceilq(x: f128) callconv(.c) f128 {
|
||||
|
|
@ -140,6 +167,12 @@ pub fn ceill(x: c_longdouble) callconv(.c) c_longdouble {
|
|||
}
|
||||
}
|
||||
|
||||
test "ceil16" {
|
||||
try expect(__ceilh(1.3) == 2.0);
|
||||
try expect(__ceilh(-1.3) == -1.0);
|
||||
try expect(__ceilh(0.2) == 1.0);
|
||||
}
|
||||
|
||||
test "ceil32" {
|
||||
try expect(ceilf(1.3) == 2.0);
|
||||
try expect(ceilf(-1.3) == -1.0);
|
||||
|
|
@ -152,12 +185,26 @@ test "ceil64" {
|
|||
try expect(ceil(0.2) == 1.0);
|
||||
}
|
||||
|
||||
test "ceil80" {
|
||||
try expect(__ceilx(1.3) == 2.0);
|
||||
try expect(__ceilx(-1.3) == -1.0);
|
||||
try expect(__ceilx(0.2) == 1.0);
|
||||
}
|
||||
|
||||
test "ceil128" {
|
||||
try expect(ceilq(1.3) == 2.0);
|
||||
try expect(ceilq(-1.3) == -1.0);
|
||||
try expect(ceilq(0.2) == 1.0);
|
||||
}
|
||||
|
||||
test "ceil16.special" {
|
||||
try expect(__ceilh(0.0) == 0.0);
|
||||
try expect(__ceilh(-0.0) == -0.0);
|
||||
try expect(math.isPositiveInf(__ceilh(math.inf(f16))));
|
||||
try expect(math.isNegativeInf(__ceilh(-math.inf(f16))));
|
||||
try expect(math.isNan(__ceilh(math.nan(f16))));
|
||||
}
|
||||
|
||||
test "ceil32.special" {
|
||||
try expect(ceilf(0.0) == 0.0);
|
||||
try expect(ceilf(-0.0) == -0.0);
|
||||
|
|
@ -174,6 +221,14 @@ test "ceil64.special" {
|
|||
try expect(math.isNan(ceil(math.nan(f64))));
|
||||
}
|
||||
|
||||
test "ceil80.special" {
|
||||
try expect(__ceilx(0.0) == 0.0);
|
||||
try expect(__ceilx(-0.0) == -0.0);
|
||||
try expect(math.isPositiveInf(__ceilx(math.inf(f80))));
|
||||
try expect(math.isNegativeInf(__ceilx(-math.inf(f80))));
|
||||
try expect(math.isNan(__ceilx(math.nan(f80))));
|
||||
}
|
||||
|
||||
test "ceil128.special" {
|
||||
try expect(ceilq(0.0) == 0.0);
|
||||
try expect(ceilq(-0.0) == -0.0);
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
//!
|
||||
//! https://git.musl-libc.org/cgit/musl/tree/src/math/floorf.c
|
||||
//! https://git.musl-libc.org/cgit/musl/tree/src/math/floor.c
|
||||
//! https://git.musl-libc.org/cgit/musl/tree/src/math/floorl.c
|
||||
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
|
|
@ -31,32 +32,17 @@ pub fn __floorh(x: f16) callconv(.c) f16 {
|
|||
const e = @as(i16, @intCast((u >> 10) & 31)) - 15;
|
||||
var m: u16 = undefined;
|
||||
|
||||
// TODO: Shouldn't need this explicit check.
|
||||
if (x == 0.0) {
|
||||
return x;
|
||||
}
|
||||
|
||||
if (e >= 10) {
|
||||
return x;
|
||||
}
|
||||
if (e >= 10) return x;
|
||||
|
||||
if (e >= 0) {
|
||||
m = @as(u16, 1023) >> @intCast(e);
|
||||
if (u & m == 0) {
|
||||
return x;
|
||||
}
|
||||
m = @as(u16, 0x03FF) >> @intCast(e);
|
||||
if (u & m == 0) return x;
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(x + 0x1.0p120);
|
||||
if (u >> 15 != 0) {
|
||||
u += m;
|
||||
}
|
||||
if (u >> 15 != 0) u += m;
|
||||
return @bitCast(u & ~m);
|
||||
} else {
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(x + 0x1.0p120);
|
||||
if (u >> 15 == 0) {
|
||||
return 0.0;
|
||||
} else {
|
||||
return -1.0;
|
||||
}
|
||||
return if (u >> 15 == 0) 0.0 else if (u << 1 != 0) -1.0 else x;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -65,32 +51,17 @@ pub fn floorf(x: f32) callconv(.c) f32 {
|
|||
const e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F;
|
||||
var m: u32 = undefined;
|
||||
|
||||
// TODO: Shouldn't need this explicit check.
|
||||
if (x == 0.0) {
|
||||
return x;
|
||||
}
|
||||
|
||||
if (e >= 23) {
|
||||
return x;
|
||||
}
|
||||
if (e >= 23) return x;
|
||||
|
||||
if (e >= 0) {
|
||||
m = @as(u32, 0x007FFFFF) >> @intCast(e);
|
||||
if (u & m == 0) {
|
||||
return x;
|
||||
}
|
||||
if (u & m == 0) return x;
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(x + 0x1.0p120);
|
||||
if (u >> 31 != 0) {
|
||||
u += m;
|
||||
}
|
||||
if (u >> 31 != 0) u += m;
|
||||
return @bitCast(u & ~m);
|
||||
} else {
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(x + 0x1.0p120);
|
||||
if (u >> 31 == 0) {
|
||||
return 0.0;
|
||||
} else {
|
||||
return -1.0;
|
||||
}
|
||||
return if (u >> 31 == 0) 0.0 else if (u << 1 != 0) -1.0 else x;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -126,8 +97,34 @@ pub fn floor(x: f64) callconv(.c) f64 {
|
|||
}
|
||||
|
||||
pub fn __floorx(x: f80) callconv(.c) f80 {
|
||||
// TODO: more efficient implementation
|
||||
return @floatCast(floorq(x));
|
||||
const f80_toint = 1.0 / math.floatEps(f80);
|
||||
|
||||
const u: u80 = @bitCast(x);
|
||||
const e = (u >> 64) & 0x7FFF;
|
||||
var y: f80 = undefined;
|
||||
|
||||
if (e >= 0x3FFF + 64 or x == 0) {
|
||||
return x;
|
||||
}
|
||||
|
||||
if (u >> 79 != 0) {
|
||||
y = x - f80_toint + f80_toint - x;
|
||||
} else {
|
||||
y = x + f80_toint - f80_toint - x;
|
||||
}
|
||||
|
||||
if (e <= 0x3FFF - 1) {
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(y);
|
||||
if (u >> 79 != 0) {
|
||||
return -1.0;
|
||||
} else {
|
||||
return 0.0;
|
||||
}
|
||||
} else if (y > 0) {
|
||||
return x + y - 1;
|
||||
} else {
|
||||
return x + y;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn floorq(x: f128) callconv(.c) f128 {
|
||||
|
|
@ -188,6 +185,12 @@ test "floor64" {
|
|||
try expect(floor(0.2) == 0.0);
|
||||
}
|
||||
|
||||
test "floor80" {
|
||||
try expect(__floorx(1.3) == 1.0);
|
||||
try expect(__floorx(-1.3) == -2.0);
|
||||
try expect(__floorx(0.2) == 0.0);
|
||||
}
|
||||
|
||||
test "floor128" {
|
||||
try expect(floorq(1.3) == 1.0);
|
||||
try expect(floorq(-1.3) == -2.0);
|
||||
|
|
@ -218,6 +221,14 @@ test "floor64.special" {
|
|||
try expect(math.isNan(floor(math.nan(f64))));
|
||||
}
|
||||
|
||||
test "floor80.special" {
|
||||
try expect(__floorx(0.0) == 0.0);
|
||||
try expect(__floorx(-0.0) == -0.0);
|
||||
try expect(math.isPositiveInf(__floorx(math.inf(f80))));
|
||||
try expect(math.isNegativeInf(__floorx(-math.inf(f80))));
|
||||
try expect(math.isNan(__floorx(math.nan(f80))));
|
||||
}
|
||||
|
||||
test "floor128.special" {
|
||||
try expect(floorq(0.0) == 0.0);
|
||||
try expect(floorq(-0.0) == -0.0);
|
||||
|
|
|
|||
|
|
@ -203,7 +203,7 @@ fn add_adjusted(a: f64, b: f64) f64 {
|
|||
if (uhii & 1 == 0) {
|
||||
// hibits += copysign(1.0, sum.hi, sum.lo)
|
||||
const uloi: u64 = @bitCast(sum.lo);
|
||||
uhii += 1 - ((uhii ^ uloi) >> 62);
|
||||
uhii = uhii + 1 - ((uhii ^ uloi) >> 62);
|
||||
sum.hi = @bitCast(uhii);
|
||||
}
|
||||
}
|
||||
|
|
@ -217,7 +217,7 @@ fn add_and_denorm(a: f64, b: f64, scale: i32) f64 {
|
|||
const bits_lost = -@as(i32, @intCast((uhii >> 52) & 0x7FF)) - scale + 1;
|
||||
if ((bits_lost != 1) == (uhii & 1 != 0)) {
|
||||
const uloi: u64 = @bitCast(sum.lo);
|
||||
uhii += 1 - (((uhii ^ uloi) >> 62) & 2);
|
||||
uhii = uhii + 1 - (((uhii ^ uloi) >> 62) & 2);
|
||||
sum.hi = @bitCast(uhii);
|
||||
}
|
||||
}
|
||||
|
|
@ -259,7 +259,7 @@ fn add_adjusted128(a: f128, b: f128) f128 {
|
|||
if (uhii & 1 == 0) {
|
||||
// hibits += copysign(1.0, sum.hi, sum.lo)
|
||||
const uloi: u128 = @bitCast(sum.lo);
|
||||
uhii += 1 - ((uhii ^ uloi) >> 126);
|
||||
uhii = uhii + 1 - ((uhii ^ uloi) >> 126);
|
||||
sum.hi = @bitCast(uhii);
|
||||
}
|
||||
}
|
||||
|
|
@ -284,7 +284,7 @@ fn add_and_denorm128(a: f128, b: f128, scale: i32) f128 {
|
|||
const bits_lost = -@as(i32, @intCast((uhii >> 112) & 0x7FFF)) - scale + 1;
|
||||
if ((bits_lost != 1) == (uhii & 1 != 0)) {
|
||||
const uloi: u128 = @bitCast(sum.lo);
|
||||
uhii += 1 - (((uhii ^ uloi) >> 126) & 2);
|
||||
uhii = uhii + 1 - (((uhii ^ uloi) >> 126) & 2);
|
||||
sum.hi = @bitCast(uhii);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
3
lib/libc/include/generic-glibc/arpa/inet.h
vendored
3
lib/libc/include/generic-glibc/arpa/inet.h
vendored
|
|
@ -101,10 +101,13 @@ extern char *inet_nsap_ntoa (int __len, const unsigned char *__cp,
|
|||
char *__buf) __THROW;
|
||||
#endif
|
||||
|
||||
// zig patch: inet was fortified in glibc 2.42
|
||||
#if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 42) || __GLIBC__ > 2
|
||||
#if __USE_FORTIFY_LEVEL > 0 && defined __fortify_function
|
||||
/* Include functions with security checks. */
|
||||
# include <bits/inet-fortified.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
__END_DECLS
|
||||
|
||||
|
|
|
|||
|
|
@ -192,6 +192,7 @@ want_lto: ?bool = null,
|
|||
|
||||
use_llvm: ?bool,
|
||||
use_lld: ?bool,
|
||||
use_new_linker: ?bool,
|
||||
|
||||
/// Corresponds to the `-fallow-so-scripts` / `-fno-allow-so-scripts` CLI
|
||||
/// flags, overriding the global user setting provided to the `zig build`
|
||||
|
|
@ -441,6 +442,7 @@ pub fn create(owner: *std.Build, options: Options) *Compile {
|
|||
|
||||
.use_llvm = options.use_llvm,
|
||||
.use_lld = options.use_lld,
|
||||
.use_new_linker = null,
|
||||
|
||||
.zig_process = null,
|
||||
};
|
||||
|
|
@ -1096,6 +1098,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
|
|||
|
||||
try addFlag(&zig_args, "llvm", compile.use_llvm);
|
||||
try addFlag(&zig_args, "lld", compile.use_lld);
|
||||
try addFlag(&zig_args, "new-linker", compile.use_new_linker);
|
||||
|
||||
if (compile.root_module.resolved_target.?.query.ofmt) |ofmt| {
|
||||
try zig_args.append(try std.fmt.allocPrint(arena, "-ofmt={s}", .{@tagName(ofmt)}));
|
||||
|
|
|
|||
|
|
@ -80,8 +80,8 @@ max_stdio_size: usize,
|
|||
/// the step fails.
|
||||
stdio_limit: std.Io.Limit,
|
||||
|
||||
captured_stdout: ?*Output,
|
||||
captured_stderr: ?*Output,
|
||||
captured_stdout: ?*CapturedStdIo,
|
||||
captured_stderr: ?*CapturedStdIo,
|
||||
|
||||
dep_output_file: ?*Output,
|
||||
|
||||
|
|
@ -142,6 +142,7 @@ pub const Arg = union(enum) {
|
|||
artifact: PrefixedArtifact,
|
||||
lazy_path: PrefixedLazyPath,
|
||||
decorated_directory: DecoratedLazyPath,
|
||||
file_content: PrefixedLazyPath,
|
||||
bytes: []u8,
|
||||
output_file: *Output,
|
||||
output_directory: *Output,
|
||||
|
|
@ -169,6 +170,25 @@ pub const Output = struct {
|
|||
basename: []const u8,
|
||||
};
|
||||
|
||||
pub const CapturedStdIo = struct {
|
||||
output: Output,
|
||||
trim_whitespace: TrimWhitespace,
|
||||
|
||||
pub const Options = struct {
|
||||
/// `null` means `stdout`/`stderr`.
|
||||
basename: ?[]const u8 = null,
|
||||
/// Does not affect `expectStdOutEqual`/`expectStdErrEqual`.
|
||||
trim_whitespace: TrimWhitespace = .none,
|
||||
};
|
||||
|
||||
pub const TrimWhitespace = enum {
|
||||
none,
|
||||
all,
|
||||
leading,
|
||||
trailing,
|
||||
};
|
||||
};
|
||||
|
||||
pub fn create(owner: *std.Build, name: []const u8) *Run {
|
||||
const run = owner.allocator.create(Run) catch @panic("OOM");
|
||||
run.* = .{
|
||||
|
|
@ -319,6 +339,60 @@ pub fn addPrefixedFileArg(run: *Run, prefix: []const u8, lp: std.Build.LazyPath)
|
|||
lp.addStepDependencies(&run.step);
|
||||
}
|
||||
|
||||
/// Appends the content of an input file to the command line arguments.
|
||||
///
|
||||
/// The child process will see a single argument, even if the file contains whitespace.
|
||||
/// This means that the entire file content up to EOF is rendered as one contiguous
|
||||
/// string, including escape sequences. Notably, any (trailing) newlines will show up
|
||||
/// like this: "hello,\nfile world!\n"
|
||||
///
|
||||
/// Modifications to the source file will be detected as a cache miss in subsequent
|
||||
/// builds, causing the child process to be re-executed.
|
||||
///
|
||||
/// This function may not be used to supply the first argument of a `Run` step.
|
||||
///
|
||||
/// Related:
|
||||
/// * `addPrefixedFileContentArg` - same thing but prepends a string to the argument
|
||||
pub fn addFileContentArg(run: *Run, lp: std.Build.LazyPath) void {
|
||||
run.addPrefixedFileContentArg("", lp);
|
||||
}
|
||||
|
||||
/// Appends the content of an input file to the command line arguments prepended with a string.
|
||||
///
|
||||
/// For example, a prefix of "-F" will result in the child process seeing something
|
||||
/// like this: "-Fmy file content"
|
||||
///
|
||||
/// The child process will see a single argument, even if the prefix and/or the file
|
||||
/// contain whitespace.
|
||||
/// This means that the entire file content up to EOF is rendered as one contiguous
|
||||
/// string, including escape sequences. Notably, any (trailing) newlines will show up
|
||||
/// like this: "hello,\nfile world!\n"
|
||||
///
|
||||
/// Modifications to the source file will be detected as a cache miss in subsequent
|
||||
/// builds, causing the child process to be re-executed.
|
||||
///
|
||||
/// This function may not be used to supply the first argument of a `Run` step.
|
||||
///
|
||||
/// Related:
|
||||
/// * `addFileContentArg` - same thing but without the prefix
|
||||
pub fn addPrefixedFileContentArg(run: *Run, prefix: []const u8, lp: std.Build.LazyPath) void {
|
||||
const b = run.step.owner;
|
||||
|
||||
// Some parts of this step's configure phase API rely on the first argument being somewhat
|
||||
// transparent/readable, but the content of the file specified by `lp` remains completely
|
||||
// opaque until its path can be resolved during the make phase.
|
||||
if (run.argv.items.len == 0) {
|
||||
@panic("'addFileContentArg'/'addPrefixedFileContentArg' cannot be first argument");
|
||||
}
|
||||
|
||||
const prefixed_file_source: PrefixedLazyPath = .{
|
||||
.prefix = b.dupe(prefix),
|
||||
.lazy_path = lp.dupe(b),
|
||||
};
|
||||
run.argv.append(b.allocator, .{ .file_content = prefixed_file_source }) catch @panic("OOM");
|
||||
lp.addStepDependencies(&run.step);
|
||||
}
|
||||
|
||||
/// Provides a directory path as a command line argument to the command being run.
|
||||
///
|
||||
/// Returns a `std.Build.LazyPath` which can be used as inputs to other APIs
|
||||
|
|
@ -469,6 +543,7 @@ pub fn addPathDir(run: *Run, search_path: []const u8) void {
|
|||
break :use_wine std.mem.endsWith(u8, p.lazy_path.basename(b, &run.step), ".exe");
|
||||
},
|
||||
.decorated_directory => false,
|
||||
.file_content => unreachable, // not allowed as first arg
|
||||
.bytes => |bytes| std.mem.endsWith(u8, bytes, ".exe"),
|
||||
.output_file, .output_directory => false,
|
||||
};
|
||||
|
|
@ -553,34 +628,42 @@ pub fn addCheck(run: *Run, new_check: StdIo.Check) void {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn captureStdErr(run: *Run) std.Build.LazyPath {
|
||||
pub fn captureStdErr(run: *Run, options: CapturedStdIo.Options) std.Build.LazyPath {
|
||||
assert(run.stdio != .inherit);
|
||||
const b = run.step.owner;
|
||||
|
||||
if (run.captured_stderr) |output| return .{ .generated = .{ .file = &output.generated_file } };
|
||||
if (run.captured_stderr) |captured| return .{ .generated = .{ .file = &captured.output.generated_file } };
|
||||
|
||||
const output = run.step.owner.allocator.create(Output) catch @panic("OOM");
|
||||
output.* = .{
|
||||
.prefix = "",
|
||||
.basename = "stderr",
|
||||
.generated_file = .{ .step = &run.step },
|
||||
const captured = b.allocator.create(CapturedStdIo) catch @panic("OOM");
|
||||
captured.* = .{
|
||||
.output = .{
|
||||
.prefix = "",
|
||||
.basename = if (options.basename) |basename| b.dupe(basename) else "stderr",
|
||||
.generated_file = .{ .step = &run.step },
|
||||
},
|
||||
.trim_whitespace = options.trim_whitespace,
|
||||
};
|
||||
run.captured_stderr = output;
|
||||
return .{ .generated = .{ .file = &output.generated_file } };
|
||||
run.captured_stderr = captured;
|
||||
return .{ .generated = .{ .file = &captured.output.generated_file } };
|
||||
}
|
||||
|
||||
pub fn captureStdOut(run: *Run) std.Build.LazyPath {
|
||||
pub fn captureStdOut(run: *Run, options: CapturedStdIo.Options) std.Build.LazyPath {
|
||||
assert(run.stdio != .inherit);
|
||||
const b = run.step.owner;
|
||||
|
||||
if (run.captured_stdout) |output| return .{ .generated = .{ .file = &output.generated_file } };
|
||||
if (run.captured_stdout) |captured| return .{ .generated = .{ .file = &captured.output.generated_file } };
|
||||
|
||||
const output = run.step.owner.allocator.create(Output) catch @panic("OOM");
|
||||
output.* = .{
|
||||
.prefix = "",
|
||||
.basename = "stdout",
|
||||
.generated_file = .{ .step = &run.step },
|
||||
const captured = b.allocator.create(CapturedStdIo) catch @panic("OOM");
|
||||
captured.* = .{
|
||||
.output = .{
|
||||
.prefix = "",
|
||||
.basename = if (options.basename) |basename| b.dupe(basename) else "stdout",
|
||||
.generated_file = .{ .step = &run.step },
|
||||
},
|
||||
.trim_whitespace = options.trim_whitespace,
|
||||
};
|
||||
run.captured_stdout = output;
|
||||
return .{ .generated = .{ .file = &output.generated_file } };
|
||||
run.captured_stdout = captured;
|
||||
return .{ .generated = .{ .file = &captured.output.generated_file } };
|
||||
}
|
||||
|
||||
/// Adds an additional input files that, when modified, indicates that this Run
|
||||
|
|
@ -732,6 +815,35 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
|
|||
try argv_list.append(resolved_arg);
|
||||
man.hash.addBytes(resolved_arg);
|
||||
},
|
||||
.file_content => |file_plp| {
|
||||
const file_path = file_plp.lazy_path.getPath3(b, step);
|
||||
|
||||
var result: std.Io.Writer.Allocating = .init(arena);
|
||||
errdefer result.deinit();
|
||||
result.writer.writeAll(file_plp.prefix) catch return error.OutOfMemory;
|
||||
|
||||
const file = file_path.root_dir.handle.openFile(file_path.subPathOrDot(), .{}) catch |err| {
|
||||
return step.fail(
|
||||
"unable to open input file '{f}': {t}",
|
||||
.{ file_path, err },
|
||||
);
|
||||
};
|
||||
defer file.close();
|
||||
|
||||
var buf: [1024]u8 = undefined;
|
||||
var file_reader = file.reader(&buf);
|
||||
_ = file_reader.interface.streamRemaining(&result.writer) catch |err| switch (err) {
|
||||
error.ReadFailed => return step.fail(
|
||||
"failed to read from '{f}': {t}",
|
||||
.{ file_path, file_reader.err.? },
|
||||
),
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
|
||||
try argv_list.append(result.written());
|
||||
man.hash.addBytes(file_plp.prefix);
|
||||
_ = try man.addFilePath(file_path, null);
|
||||
},
|
||||
.artifact => |pa| {
|
||||
const artifact = pa.artifact;
|
||||
|
||||
|
|
@ -775,12 +887,14 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
|
|||
.none => {},
|
||||
}
|
||||
|
||||
if (run.captured_stdout) |output| {
|
||||
man.hash.addBytes(output.basename);
|
||||
if (run.captured_stdout) |captured| {
|
||||
man.hash.addBytes(captured.output.basename);
|
||||
man.hash.add(captured.trim_whitespace);
|
||||
}
|
||||
|
||||
if (run.captured_stderr) |output| {
|
||||
man.hash.addBytes(output.basename);
|
||||
if (run.captured_stderr) |captured| {
|
||||
man.hash.addBytes(captured.output.basename);
|
||||
man.hash.add(captured.trim_whitespace);
|
||||
}
|
||||
|
||||
hashStdIo(&man.hash, run.stdio);
|
||||
|
|
@ -951,7 +1065,7 @@ pub fn rerunInFuzzMode(
|
|||
const step = &run.step;
|
||||
const b = step.owner;
|
||||
const arena = b.allocator;
|
||||
var argv_list: std.ArrayListUnmanaged([]const u8) = .empty;
|
||||
var argv_list: std.ArrayList([]const u8) = .empty;
|
||||
for (run.argv.items) |arg| {
|
||||
switch (arg) {
|
||||
.bytes => |bytes| {
|
||||
|
|
@ -965,6 +1079,25 @@ pub fn rerunInFuzzMode(
|
|||
const file_path = dd.lazy_path.getPath3(b, step);
|
||||
try argv_list.append(arena, b.fmt("{s}{s}{s}", .{ dd.prefix, run.convertPathArg(file_path), dd.suffix }));
|
||||
},
|
||||
.file_content => |file_plp| {
|
||||
const file_path = file_plp.lazy_path.getPath3(b, step);
|
||||
|
||||
var result: std.Io.Writer.Allocating = .init(arena);
|
||||
errdefer result.deinit();
|
||||
result.writer.writeAll(file_plp.prefix) catch return error.OutOfMemory;
|
||||
|
||||
const file = try file_path.root_dir.handle.openFile(file_path.subPathOrDot(), .{});
|
||||
defer file.close();
|
||||
|
||||
var buf: [1024]u8 = undefined;
|
||||
var file_reader = file.reader(&buf);
|
||||
_ = file_reader.interface.streamRemaining(&result.writer) catch |err| switch (err) {
|
||||
error.ReadFailed => return file_reader.err.?,
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
};
|
||||
|
||||
try argv_list.append(arena, result.written());
|
||||
},
|
||||
.artifact => |pa| {
|
||||
const artifact = pa.artifact;
|
||||
const file_path: []const u8 = p: {
|
||||
|
|
@ -991,8 +1124,8 @@ pub fn rerunInFuzzMode(
|
|||
fn populateGeneratedPaths(
|
||||
arena: std.mem.Allocator,
|
||||
output_placeholders: []const IndexedOutput,
|
||||
captured_stdout: ?*Output,
|
||||
captured_stderr: ?*Output,
|
||||
captured_stdout: ?*CapturedStdIo,
|
||||
captured_stderr: ?*CapturedStdIo,
|
||||
cache_root: Build.Cache.Directory,
|
||||
digest: *const Build.Cache.HexDigest,
|
||||
) !void {
|
||||
|
|
@ -1002,15 +1135,15 @@ fn populateGeneratedPaths(
|
|||
});
|
||||
}
|
||||
|
||||
if (captured_stdout) |output| {
|
||||
output.generated_file.path = try cache_root.join(arena, &.{
|
||||
"o", digest, output.basename,
|
||||
if (captured_stdout) |captured| {
|
||||
captured.output.generated_file.path = try cache_root.join(arena, &.{
|
||||
"o", digest, captured.output.basename,
|
||||
});
|
||||
}
|
||||
|
||||
if (captured_stderr) |output| {
|
||||
output.generated_file.path = try cache_root.join(arena, &.{
|
||||
"o", digest, output.basename,
|
||||
if (captured_stderr) |captured| {
|
||||
captured.output.generated_file.path = try cache_root.join(arena, &.{
|
||||
"o", digest, captured.output.basename,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
@ -1251,7 +1384,7 @@ fn runCommand(
|
|||
|
||||
// Capture stdout and stderr to GeneratedFile objects.
|
||||
const Stream = struct {
|
||||
captured: ?*Output,
|
||||
captured: ?*CapturedStdIo,
|
||||
bytes: ?[]const u8,
|
||||
};
|
||||
for ([_]Stream{
|
||||
|
|
@ -1264,10 +1397,10 @@ fn runCommand(
|
|||
.bytes = result.stdio.stderr,
|
||||
},
|
||||
}) |stream| {
|
||||
if (stream.captured) |output| {
|
||||
const output_components = .{ output_dir_path, output.basename };
|
||||
if (stream.captured) |captured| {
|
||||
const output_components = .{ output_dir_path, captured.output.basename };
|
||||
const output_path = try b.cache_root.join(arena, &output_components);
|
||||
output.generated_file.path = output_path;
|
||||
captured.output.generated_file.path = output_path;
|
||||
|
||||
const sub_path = b.pathJoin(&output_components);
|
||||
const sub_path_dirname = fs.path.dirname(sub_path).?;
|
||||
|
|
@ -1276,7 +1409,13 @@ fn runCommand(
|
|||
b.cache_root, sub_path_dirname, @errorName(err),
|
||||
});
|
||||
};
|
||||
b.cache_root.handle.writeFile(.{ .sub_path = sub_path, .data = stream.bytes.? }) catch |err| {
|
||||
const data = switch (captured.trim_whitespace) {
|
||||
.none => stream.bytes.?,
|
||||
.all => mem.trim(u8, stream.bytes.?, &std.ascii.whitespace),
|
||||
.leading => mem.trimStart(u8, stream.bytes.?, &std.ascii.whitespace),
|
||||
.trailing => mem.trimEnd(u8, stream.bytes.?, &std.ascii.whitespace),
|
||||
};
|
||||
b.cache_root.handle.writeFile(.{ .sub_path = sub_path, .data = data }) catch |err| {
|
||||
return step.fail("unable to write file '{f}{s}': {s}", .{
|
||||
b.cache_root, sub_path, @errorName(err),
|
||||
});
|
||||
|
|
|
|||
|
|
@ -432,10 +432,11 @@ pub fn defaultReadVec(r: *Reader, data: [][]u8) Error!usize {
|
|||
.vtable = &.{ .drain = Writer.fixedDrain },
|
||||
};
|
||||
const limit: Limit = .limited(writer.buffer.len - writer.end);
|
||||
r.end += r.vtable.stream(r, &writer, limit) catch |err| switch (err) {
|
||||
const n = r.vtable.stream(r, &writer, limit) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
else => |e| return e,
|
||||
};
|
||||
r.end += n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1370,19 +1370,12 @@ pub fn printValue(
|
|||
},
|
||||
.array => {
|
||||
if (!is_any) @compileError("cannot format array without a specifier (i.e. {s} or {any})");
|
||||
if (max_depth == 0) return w.writeAll("{ ... }");
|
||||
try w.writeAll("{ ");
|
||||
for (value, 0..) |elem, i| {
|
||||
try w.printValue(fmt, options, elem, max_depth - 1);
|
||||
if (i < value.len - 1) {
|
||||
try w.writeAll(", ");
|
||||
}
|
||||
}
|
||||
try w.writeAll(" }");
|
||||
return printArray(w, fmt, options, &value, max_depth);
|
||||
},
|
||||
.vector => {
|
||||
.vector => |vector| {
|
||||
if (!is_any and fmt.len != 0) invalidFmtError(fmt, value);
|
||||
return printVector(w, fmt, options, value, max_depth);
|
||||
const array: [vector.len]vector.child = value;
|
||||
return printArray(w, fmt, options, &array, max_depth);
|
||||
},
|
||||
.@"fn" => @compileError("unable to format function body type, use '*const " ++ @typeName(T) ++ "' for a function pointer type"),
|
||||
.type => {
|
||||
|
|
@ -1436,12 +1429,25 @@ pub fn printVector(
|
|||
value: anytype,
|
||||
max_depth: usize,
|
||||
) Error!void {
|
||||
const len = @typeInfo(@TypeOf(value)).vector.len;
|
||||
const vector = @typeInfo(@TypeOf(value)).vector;
|
||||
const array: [vector.len]vector.child = value;
|
||||
return printArray(w, fmt, options, &array, max_depth);
|
||||
}
|
||||
|
||||
pub fn printArray(
|
||||
w: *Writer,
|
||||
comptime fmt: []const u8,
|
||||
options: std.fmt.Options,
|
||||
ptr_to_array: anytype,
|
||||
max_depth: usize,
|
||||
) Error!void {
|
||||
if (max_depth == 0) return w.writeAll("{ ... }");
|
||||
try w.writeAll("{ ");
|
||||
inline for (0..len) |i| {
|
||||
try w.printValue(fmt, options, value[i], max_depth - 1);
|
||||
if (i < len - 1) try w.writeAll(", ");
|
||||
for (ptr_to_array, 0..) |elem, i| {
|
||||
try w.printValue(fmt, options, elem, max_depth - 1);
|
||||
if (i < ptr_to_array.len - 1) {
|
||||
try w.writeAll(", ");
|
||||
}
|
||||
}
|
||||
try w.writeAll(" }");
|
||||
}
|
||||
|
|
@ -2661,7 +2667,7 @@ pub const Allocating = struct {
|
|||
pub fn ensureTotalCapacity(a: *Allocating, new_capacity: usize) Allocator.Error!void {
|
||||
// Protects growing unnecessarily since better_capacity will be larger.
|
||||
if (a.writer.buffer.len >= new_capacity) return;
|
||||
const better_capacity = ArrayList(u8).growCapacity(a.writer.buffer.len, new_capacity);
|
||||
const better_capacity = ArrayList(u8).growCapacity(new_capacity);
|
||||
return ensureTotalCapacityPrecise(a, better_capacity);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1248,7 +1248,9 @@ fn computeRedraw(serialized_buffer: *Serialized.Buffer) struct { []u8, usize } {
|
|||
i += progress_pulsing.len;
|
||||
} else {
|
||||
const percent = completed_items * 100 / estimated_total;
|
||||
i += (std.fmt.bufPrint(buf[i..], @"progress_normal {d}", .{percent}) catch &.{}).len;
|
||||
if (std.fmt.bufPrint(buf[i..], @"progress_normal {d}", .{percent})) |b| {
|
||||
i += b.len;
|
||||
} else |_| {}
|
||||
}
|
||||
},
|
||||
.success => {
|
||||
|
|
@ -1265,7 +1267,9 @@ fn computeRedraw(serialized_buffer: *Serialized.Buffer) struct { []u8, usize } {
|
|||
i += progress_pulsing_error.len;
|
||||
} else {
|
||||
const percent = completed_items * 100 / estimated_total;
|
||||
i += (std.fmt.bufPrint(buf[i..], @"progress_error {d}", .{percent}) catch &.{}).len;
|
||||
if (std.fmt.bufPrint(buf[i..], @"progress_error {d}", .{percent})) |b| {
|
||||
i += b.len;
|
||||
} else |_| {}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
|
@ -1364,12 +1368,18 @@ fn computeNode(
|
|||
if (!is_empty_root) {
|
||||
if (name.len != 0 or estimated_total > 0) {
|
||||
if (estimated_total > 0) {
|
||||
i += (std.fmt.bufPrint(buf[i..], "[{d}/{d}] ", .{ completed_items, estimated_total }) catch &.{}).len;
|
||||
if (std.fmt.bufPrint(buf[i..], "[{d}/{d}] ", .{ completed_items, estimated_total })) |b| {
|
||||
i += b.len;
|
||||
} else |_| {}
|
||||
} else if (completed_items != 0) {
|
||||
i += (std.fmt.bufPrint(buf[i..], "[{d}] ", .{completed_items}) catch &.{}).len;
|
||||
if (std.fmt.bufPrint(buf[i..], "[{d}] ", .{completed_items})) |b| {
|
||||
i += b.len;
|
||||
} else |_| {}
|
||||
}
|
||||
if (name.len != 0) {
|
||||
i += (std.fmt.bufPrint(buf[i..], "{s}", .{name}) catch &.{}).len;
|
||||
if (std.fmt.bufPrint(buf[i..], "{s}", .{name})) |b| {
|
||||
i += b.len;
|
||||
} else |_| {}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1187,7 +1187,7 @@ pub const Cpu = struct {
|
|||
pub const Index = std.math.Log2Int(std.meta.Int(.unsigned, usize_count * @bitSizeOf(usize)));
|
||||
pub const ShiftInt = std.math.Log2Int(usize);
|
||||
|
||||
pub const empty = Set{ .ints = [1]usize{0} ** usize_count };
|
||||
pub const empty: Set = .{ .ints = @splat(0) };
|
||||
|
||||
pub fn isEmpty(set: Set) bool {
|
||||
return for (set.ints) |x| {
|
||||
|
|
|
|||
|
|
@ -1661,6 +1661,11 @@ test "Thread.getCurrentId" {
|
|||
test "thread local storage" {
|
||||
if (builtin.single_threaded) return error.SkipZigTest;
|
||||
|
||||
if (builtin.cpu.arch == .thumbeb) {
|
||||
// https://github.com/ziglang/zig/issues/24061
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
|
||||
const thread1 = try Thread.spawn(.{}, testTls, .{});
|
||||
const thread2 = try Thread.spawn(.{}, testTls, .{});
|
||||
try testTls();
|
||||
|
|
|
|||
|
|
@ -172,7 +172,7 @@ pub fn AlignedManaged(comptime T: type, comptime alignment: ?mem.Alignment) type
|
|||
// a new buffer and doing our own copy. With a realloc() call,
|
||||
// the allocator implementation would pointlessly copy our
|
||||
// extra capacity.
|
||||
const new_capacity = Aligned(T, alignment).growCapacity(self.capacity, new_len);
|
||||
const new_capacity = Aligned(T, alignment).growCapacity(new_len);
|
||||
const old_memory = self.allocatedSlice();
|
||||
if (self.allocator.remap(old_memory, new_capacity)) |new_memory| {
|
||||
self.items.ptr = new_memory.ptr;
|
||||
|
|
@ -408,7 +408,7 @@ pub fn AlignedManaged(comptime T: type, comptime alignment: ?mem.Alignment) type
|
|||
// Protects growing unnecessarily since better_capacity will be larger.
|
||||
if (self.capacity >= new_capacity) return;
|
||||
|
||||
const better_capacity = Aligned(T, alignment).growCapacity(self.capacity, new_capacity);
|
||||
const better_capacity = Aligned(T, alignment).growCapacity(new_capacity);
|
||||
return self.ensureTotalCapacityPrecise(better_capacity);
|
||||
}
|
||||
|
||||
|
|
@ -1160,7 +1160,7 @@ pub fn Aligned(comptime T: type, comptime alignment: ?mem.Alignment) type {
|
|||
/// Invalidates element pointers if additional memory is needed.
|
||||
pub fn ensureTotalCapacity(self: *Self, gpa: Allocator, new_capacity: usize) Allocator.Error!void {
|
||||
if (self.capacity >= new_capacity) return;
|
||||
return self.ensureTotalCapacityPrecise(gpa, growCapacity(self.capacity, new_capacity));
|
||||
return self.ensureTotalCapacityPrecise(gpa, growCapacity(new_capacity));
|
||||
}
|
||||
|
||||
/// If the current capacity is less than `new_capacity`, this function will
|
||||
|
|
@ -1359,17 +1359,12 @@ pub fn Aligned(comptime T: type, comptime alignment: ?mem.Alignment) type {
|
|||
return self.getLast();
|
||||
}
|
||||
|
||||
const init_capacity = @as(comptime_int, @max(1, std.atomic.cache_line / @sizeOf(T)));
|
||||
const init_capacity: comptime_int = @max(1, std.atomic.cache_line / @sizeOf(T));
|
||||
|
||||
/// Called when memory growth is necessary. Returns a capacity larger than
|
||||
/// minimum that grows super-linearly.
|
||||
pub fn growCapacity(current: usize, minimum: usize) usize {
|
||||
var new = current;
|
||||
while (true) {
|
||||
new +|= new / 2 + init_capacity;
|
||||
if (new >= minimum)
|
||||
return new;
|
||||
}
|
||||
pub fn growCapacity(minimum: usize) usize {
|
||||
return minimum +| (minimum / 2 + init_capacity);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5825,6 +5825,22 @@ pub const MSG = switch (native_os) {
|
|||
pub const WAITFORONE = 0x1000;
|
||||
pub const CMSG_CLOFORK = 0x2000;
|
||||
},
|
||||
.dragonfly => struct {
|
||||
pub const OOB = 0x0001;
|
||||
pub const PEEK = 0x0002;
|
||||
pub const DONTROUTE = 0x0004;
|
||||
pub const EOR = 0x0008;
|
||||
pub const TRUNC = 0x0010;
|
||||
pub const CTRUNC = 0x0020;
|
||||
pub const WAITALL = 0x0040;
|
||||
pub const DONTWAIT = 0x0080;
|
||||
pub const NOSIGNAL = 0x0400;
|
||||
pub const SYNC = 0x0800;
|
||||
pub const CMSG_CLOEXEC = 0x1000;
|
||||
pub const CMSG_CLOFORK = 0x2000;
|
||||
pub const FBLOCKING = 0x10000;
|
||||
pub const FNONBLOCKING = 0x20000;
|
||||
},
|
||||
else => void,
|
||||
};
|
||||
pub const SOCK = switch (native_os) {
|
||||
|
|
@ -11038,7 +11054,10 @@ pub extern "c" fn sem_trywait(sem: *sem_t) c_int;
|
|||
pub extern "c" fn sem_timedwait(sem: *sem_t, abs_timeout: *const timespec) c_int;
|
||||
pub extern "c" fn sem_getvalue(sem: *sem_t, sval: *c_int) c_int;
|
||||
|
||||
pub extern "c" fn shm_open(name: [*:0]const u8, flag: c_int, mode: mode_t) c_int;
|
||||
pub const shm_open = switch (native_os) {
|
||||
.driverkit, .macos, .ios, .tvos, .watchos, .visionos => darwin.shm_open,
|
||||
else => private.shm_open,
|
||||
};
|
||||
pub extern "c" fn shm_unlink(name: [*:0]const u8) c_int;
|
||||
|
||||
pub extern "c" fn kqueue() c_int;
|
||||
|
|
@ -11616,6 +11635,7 @@ const private = struct {
|
|||
extern "c" fn stat(noalias path: [*:0]const u8, noalias buf: *Stat) c_int;
|
||||
extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int;
|
||||
extern "c" fn sysconf(sc: c_int) c_long;
|
||||
extern "c" fn shm_open(name: [*:0]const u8, flag: c_int, mode: mode_t) c_int;
|
||||
|
||||
extern "c" fn pthread_setname_np(thread: pthread_t, name: [*:0]const u8) c_int;
|
||||
extern "c" fn getcontext(ucp: *ucontext_t) c_int;
|
||||
|
|
|
|||
|
|
@ -490,6 +490,7 @@ pub const mach_header = std.macho.mach_header;
|
|||
pub extern "c" fn @"close$NOCANCEL"(fd: fd_t) c_int;
|
||||
pub extern "c" fn mach_host_self() mach_port_t;
|
||||
pub extern "c" fn clock_get_time(clock_serv: clock_serv_t, cur_time: *mach_timespec_t) kern_return_t;
|
||||
pub extern "c" fn shm_open(name: [*:0]const u8, flag: c_int, ...) c_int;
|
||||
|
||||
pub const exception_data_type_t = integer_t;
|
||||
pub const exception_data_t = ?*mach_exception_data_type_t;
|
||||
|
|
|
|||
|
|
@ -966,6 +966,8 @@ test "Ascon-CXOF128 with customization" {
|
|||
}
|
||||
|
||||
test "Ascon-AEAD128 round trip with various data sizes" {
|
||||
if (builtin.cpu.has(.riscv, .v) and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
|
||||
|
||||
const key = [_]u8{ 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10 };
|
||||
const nonce = [_]u8{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF };
|
||||
|
||||
|
|
|
|||
|
|
@ -215,8 +215,8 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
|
|||
}
|
||||
}
|
||||
|
||||
fn hashToBytes(comptime dm: usize, out: *[64 * dm]u8, x: BlockVec) void {
|
||||
for (0..dm) |d| {
|
||||
fn hashToBytes(comptime dm: usize, out: *[64 * dm]u8, x: *const BlockVec) void {
|
||||
inline for (0..dm) |d| {
|
||||
for (0..4) |i| {
|
||||
mem.writeInt(u32, out[64 * d + 16 * i + 0 ..][0..4], x[i][0 + 4 * d], .little);
|
||||
mem.writeInt(u32, out[64 * d + 16 * i + 4 ..][0..4], x[i][1 + 4 * d], .little);
|
||||
|
|
@ -242,7 +242,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
|
|||
while (degree >= d and i + 64 * d <= in.len) : (i += 64 * d) {
|
||||
chacha20Core(x[0..], ctx);
|
||||
contextFeedback(&x, ctx);
|
||||
hashToBytes(d, buf[0 .. 64 * d], x);
|
||||
hashToBytes(d, buf[0 .. 64 * d], &x);
|
||||
|
||||
var xout = out[i..];
|
||||
const xin = in[i..];
|
||||
|
|
@ -266,7 +266,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
|
|||
if (i < in.len) {
|
||||
chacha20Core(x[0..], ctx);
|
||||
contextFeedback(&x, ctx);
|
||||
hashToBytes(1, buf[0..64], x);
|
||||
hashToBytes(1, buf[0..64], &x);
|
||||
|
||||
var xout = out[i..];
|
||||
const xin = in[i..];
|
||||
|
|
@ -284,7 +284,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
|
|||
while (degree >= d and i + 64 * d <= out.len) : (i += 64 * d) {
|
||||
chacha20Core(x[0..], ctx);
|
||||
contextFeedback(&x, ctx);
|
||||
hashToBytes(d, out[i..][0 .. 64 * d], x);
|
||||
hashToBytes(d, out[i..][0 .. 64 * d], &x);
|
||||
inline for (0..d) |d_| {
|
||||
if (count64) {
|
||||
const next = @addWithOverflow(ctx[3][4 * d_], d);
|
||||
|
|
@ -301,7 +301,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
|
|||
contextFeedback(&x, ctx);
|
||||
|
||||
var buf: [64]u8 = undefined;
|
||||
hashToBytes(1, buf[0..], x);
|
||||
hashToBytes(1, buf[0..], &x);
|
||||
@memcpy(out[i..], buf[0 .. out.len - i]);
|
||||
}
|
||||
}
|
||||
|
|
@ -394,7 +394,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
|
|||
}
|
||||
}
|
||||
|
||||
fn hashToBytes(out: *[64]u8, x: BlockVec) void {
|
||||
fn hashToBytes(out: *[64]u8, x: *const BlockVec) void {
|
||||
for (0..4) |i| {
|
||||
mem.writeInt(u32, out[16 * i + 0 ..][0..4], x[i * 4 + 0], .little);
|
||||
mem.writeInt(u32, out[16 * i + 4 ..][0..4], x[i * 4 + 1], .little);
|
||||
|
|
@ -417,7 +417,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
|
|||
while (i + 64 <= in.len) : (i += 64) {
|
||||
chacha20Core(x[0..], ctx);
|
||||
contextFeedback(&x, ctx);
|
||||
hashToBytes(buf[0..], x);
|
||||
hashToBytes(buf[0..], &x);
|
||||
|
||||
var xout = out[i..];
|
||||
const xin = in[i..];
|
||||
|
|
@ -438,7 +438,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
|
|||
if (i < in.len) {
|
||||
chacha20Core(x[0..], ctx);
|
||||
contextFeedback(&x, ctx);
|
||||
hashToBytes(buf[0..], x);
|
||||
hashToBytes(buf[0..], &x);
|
||||
|
||||
var xout = out[i..];
|
||||
const xin = in[i..];
|
||||
|
|
@ -455,7 +455,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
|
|||
while (i + 64 <= out.len) : (i += 64) {
|
||||
chacha20Core(x[0..], ctx);
|
||||
contextFeedback(&x, ctx);
|
||||
hashToBytes(out[i..][0..64], x);
|
||||
hashToBytes(out[i..][0..64], &x);
|
||||
if (count64) {
|
||||
const next = @addWithOverflow(ctx[12], 1);
|
||||
ctx[12] = next[0];
|
||||
|
|
@ -469,7 +469,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
|
|||
contextFeedback(&x, ctx);
|
||||
|
||||
var buf: [64]u8 = undefined;
|
||||
hashToBytes(buf[0..], x);
|
||||
hashToBytes(buf[0..], &x);
|
||||
@memcpy(out[i..], buf[0 .. out.len - i]);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ pub fn Deque(comptime T: type) type {
|
|||
/// Invalidates element pointers if additional memory is needed.
|
||||
pub fn ensureTotalCapacity(deque: *Self, gpa: Allocator, new_capacity: usize) Allocator.Error!void {
|
||||
if (deque.buffer.len >= new_capacity) return;
|
||||
return deque.ensureTotalCapacityPrecise(gpa, growCapacity(deque.buffer.len, new_capacity));
|
||||
return deque.ensureTotalCapacityPrecise(gpa, std.ArrayList(T).growCapacity(new_capacity));
|
||||
}
|
||||
|
||||
/// If the current capacity is less than `new_capacity`, this function will
|
||||
|
|
@ -243,18 +243,6 @@ pub fn Deque(comptime T: type) type {
|
|||
return index - head_len;
|
||||
}
|
||||
}
|
||||
|
||||
const init_capacity: comptime_int = @max(1, std.atomic.cache_line / @sizeOf(T));
|
||||
|
||||
/// Called when memory growth is necessary. Returns a capacity larger than
|
||||
/// minimum that grows super-linearly.
|
||||
fn growCapacity(current: usize, minimum: usize) usize {
|
||||
var new = current;
|
||||
while (true) {
|
||||
new +|= new / 2 + init_capacity;
|
||||
if (new >= minimum) return new;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
589
lib/std/elf.zig
589
lib/std/elf.zig
|
|
@ -323,6 +323,8 @@ pub const PT_LOPROC = 0x70000000;
|
|||
/// End of processor-specific
|
||||
pub const PT_HIPROC = 0x7fffffff;
|
||||
|
||||
pub const PN_XNUM = 0xffff;
|
||||
|
||||
/// Section header table entry unused
|
||||
pub const SHT_NULL = 0;
|
||||
/// Program data
|
||||
|
|
@ -385,63 +387,149 @@ pub const SHT_HIUSER = 0xffffffff;
|
|||
// Note type for .note.gnu.build_id
|
||||
pub const NT_GNU_BUILD_ID = 3;
|
||||
|
||||
/// Local symbol
|
||||
pub const STB_LOCAL = 0;
|
||||
/// Global symbol
|
||||
pub const STB_GLOBAL = 1;
|
||||
/// Weak symbol
|
||||
pub const STB_WEAK = 2;
|
||||
/// Number of defined types
|
||||
pub const STB_NUM = 3;
|
||||
/// Start of OS-specific
|
||||
pub const STB_LOOS = 10;
|
||||
/// Unique symbol
|
||||
pub const STB_GNU_UNIQUE = 10;
|
||||
/// End of OS-specific
|
||||
pub const STB_HIOS = 12;
|
||||
/// Start of processor-specific
|
||||
pub const STB_LOPROC = 13;
|
||||
/// End of processor-specific
|
||||
pub const STB_HIPROC = 15;
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STB.LOCAL)`
|
||||
pub const STB_LOCAL = @intFromEnum(STB.LOCAL);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STB.GLOBAL)`
|
||||
pub const STB_GLOBAL = @intFromEnum(STB.GLOBAL);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STB.WEAK)`
|
||||
pub const STB_WEAK = @intFromEnum(STB.WEAK);
|
||||
/// Deprecated, use `std.elf.STB.NUM`
|
||||
pub const STB_NUM = STB.NUM;
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STB.LOOS)`
|
||||
pub const STB_LOOS = @intFromEnum(STB.LOOS);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STB.GNU_UNIQUE)`
|
||||
pub const STB_GNU_UNIQUE = @intFromEnum(STB.GNU_UNIQUE);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STB.HIOS)`
|
||||
pub const STB_HIOS = @intFromEnum(STB.HIOS);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STB.LOPROC)`
|
||||
pub const STB_LOPROC = @intFromEnum(STB.LOPROC);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STB.HIPROC)`
|
||||
pub const STB_HIPROC = @intFromEnum(STB.HIPROC);
|
||||
|
||||
pub const STB_MIPS_SPLIT_COMMON = 13;
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STB.MIPS_SPLIT_COMMON)`
|
||||
pub const STB_MIPS_SPLIT_COMMON = @intFromEnum(STB.MIBS_SPLIT_COMMON);
|
||||
|
||||
/// Symbol type is unspecified
|
||||
pub const STT_NOTYPE = 0;
|
||||
/// Symbol is a data object
|
||||
pub const STT_OBJECT = 1;
|
||||
/// Symbol is a code object
|
||||
pub const STT_FUNC = 2;
|
||||
/// Symbol associated with a section
|
||||
pub const STT_SECTION = 3;
|
||||
/// Symbol's name is file name
|
||||
pub const STT_FILE = 4;
|
||||
/// Symbol is a common data object
|
||||
pub const STT_COMMON = 5;
|
||||
/// Symbol is thread-local data object
|
||||
pub const STT_TLS = 6;
|
||||
/// Number of defined types
|
||||
pub const STT_NUM = 7;
|
||||
/// Start of OS-specific
|
||||
pub const STT_LOOS = 10;
|
||||
/// Symbol is indirect code object
|
||||
pub const STT_GNU_IFUNC = 10;
|
||||
/// End of OS-specific
|
||||
pub const STT_HIOS = 12;
|
||||
/// Start of processor-specific
|
||||
pub const STT_LOPROC = 13;
|
||||
/// End of processor-specific
|
||||
pub const STT_HIPROC = 15;
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.NOTYPE)`
|
||||
pub const STT_NOTYPE = @intFromEnum(STT.NOTYPE);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.OBJECT)`
|
||||
pub const STT_OBJECT = @intFromEnum(STT.OBJECT);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.FUNC)`
|
||||
pub const STT_FUNC = @intFromEnum(STT.FUNC);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.SECTION)`
|
||||
pub const STT_SECTION = @intFromEnum(STT.SECTION);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.FILE)`
|
||||
pub const STT_FILE = @intFromEnum(STT.FILE);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.COMMON)`
|
||||
pub const STT_COMMON = @intFromEnum(STT.COMMON);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.TLS)`
|
||||
pub const STT_TLS = @intFromEnum(STT.TLS);
|
||||
/// Deprecated, use `std.elf.STT.NUM`
|
||||
pub const STT_NUM = STT.NUM;
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.LOOS)`
|
||||
pub const STT_LOOS = @intFromEnum(STT.LOOS);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.GNU_IFUNC)`
|
||||
pub const STT_GNU_IFUNC = @intFromEnum(STT.GNU_IFUNC);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.HIOS)`
|
||||
pub const STT_HIOS = @intFromEnum(STT.HIOS);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.LOPROC)`
|
||||
pub const STT_LOPROC = @intFromEnum(STT.LOPROC);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.HIPROC)`
|
||||
pub const STT_HIPROC = @intFromEnum(STT.HIPROC);
|
||||
|
||||
pub const STT_SPARC_REGISTER = 13;
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.SPARC_REGISTER)`
|
||||
pub const STT_SPARC_REGISTER = @intFromEnum(STT.SPARC_REGISTER);
|
||||
|
||||
pub const STT_PARISC_MILLICODE = 13;
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.PARISC_MILLICODE)`
|
||||
pub const STT_PARISC_MILLICODE = @intFromEnum(STT.PARISC_MILLICODE);
|
||||
|
||||
pub const STT_HP_OPAQUE = (STT_LOOS + 0x1);
|
||||
pub const STT_HP_STUB = (STT_LOOS + 0x2);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.HP_OPAQUE)`
|
||||
pub const STT_HP_OPAQUE = @intFromEnum(STT.HP_OPAQUE);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.HP_STUB)`
|
||||
pub const STT_HP_STUB = @intFromEnum(STT.HP_STUB);
|
||||
|
||||
pub const STT_ARM_TFUNC = STT_LOPROC;
|
||||
pub const STT_ARM_16BIT = STT_HIPROC;
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.ARM_TFUNC)`
|
||||
pub const STT_ARM_TFUNC = @intFromEnum(STT.ARM_TFUNC);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.STT.ARM_16BIT)`
|
||||
pub const STT_ARM_16BIT = @intFromEnum(STT.ARM_16BIT);
|
||||
|
||||
pub const STB = enum(u4) {
|
||||
/// Local symbol
|
||||
LOCAL = 0,
|
||||
/// Global symbol
|
||||
GLOBAL = 1,
|
||||
/// Weak symbol
|
||||
WEAK = 2,
|
||||
_,
|
||||
|
||||
/// Number of defined types
|
||||
pub const NUM = @typeInfo(STB).@"enum".fields.len;
|
||||
|
||||
/// Start of OS-specific
|
||||
pub const LOOS: STB = @enumFromInt(10);
|
||||
/// End of OS-specific
|
||||
pub const HIOS: STB = @enumFromInt(12);
|
||||
|
||||
/// Unique symbol
|
||||
pub const GNU_UNIQUE: STB = @enumFromInt(@intFromEnum(LOOS) + 0);
|
||||
|
||||
/// Start of processor-specific
|
||||
pub const LOPROC: STB = @enumFromInt(13);
|
||||
/// End of processor-specific
|
||||
pub const HIPROC: STB = @enumFromInt(15);
|
||||
|
||||
pub const MIPS_SPLIT_COMMON: STB = @enumFromInt(@intFromEnum(LOPROC) + 0);
|
||||
};
|
||||
|
||||
pub const STT = enum(u4) {
|
||||
/// Symbol type is unspecified
|
||||
NOTYPE = 0,
|
||||
/// Symbol is a data object
|
||||
OBJECT = 1,
|
||||
/// Symbol is a code object
|
||||
FUNC = 2,
|
||||
/// Symbol associated with a section
|
||||
SECTION = 3,
|
||||
/// Symbol's name is file name
|
||||
FILE = 4,
|
||||
/// Symbol is a common data object
|
||||
COMMON = 5,
|
||||
/// Symbol is thread-local data object
|
||||
TLS = 6,
|
||||
_,
|
||||
|
||||
/// Number of defined types
|
||||
pub const NUM = @typeInfo(STT).@"enum".fields.len;
|
||||
|
||||
/// Start of OS-specific
|
||||
pub const LOOS: STT = @enumFromInt(10);
|
||||
/// End of OS-specific
|
||||
pub const HIOS: STT = @enumFromInt(12);
|
||||
|
||||
/// Symbol is indirect code object
|
||||
pub const GNU_IFUNC: STT = @enumFromInt(@intFromEnum(LOOS) + 0);
|
||||
|
||||
pub const HP_OPAQUE: STT = @enumFromInt(@intFromEnum(LOOS) + 1);
|
||||
pub const HP_STUB: STT = @enumFromInt(@intFromEnum(LOOS) + 2);
|
||||
|
||||
/// Start of processor-specific
|
||||
pub const LOPROC: STT = @enumFromInt(13);
|
||||
/// End of processor-specific
|
||||
pub const HIPROC: STT = @enumFromInt(15);
|
||||
|
||||
pub const SPARC_REGISTER: STT = @enumFromInt(@intFromEnum(LOPROC) + 0);
|
||||
|
||||
pub const PARISC_MILLICODE: STT = @enumFromInt(@intFromEnum(LOPROC) + 0);
|
||||
|
||||
pub const ARM_TFUNC: STT = @enumFromInt(@intFromEnum(LOPROC) + 0);
|
||||
pub const ARM_16BIT: STT = @enumFromInt(@intFromEnum(HIPROC) + 2);
|
||||
};
|
||||
|
||||
pub const STV = enum(u3) {
|
||||
DEFAULT = 0,
|
||||
INTERNAL = 1,
|
||||
HIDDEN = 2,
|
||||
PROTECTED = 3,
|
||||
};
|
||||
|
||||
pub const MAGIC = "\x7fELF";
|
||||
|
||||
|
|
@ -534,15 +622,15 @@ pub const Header = struct {
|
|||
const buf = try r.peek(@sizeOf(Elf64_Ehdr));
|
||||
|
||||
if (!mem.eql(u8, buf[0..4], MAGIC)) return error.InvalidElfMagic;
|
||||
if (buf[EI_VERSION] != 1) return error.InvalidElfVersion;
|
||||
if (buf[EI.VERSION] != 1) return error.InvalidElfVersion;
|
||||
|
||||
const endian: std.builtin.Endian = switch (buf[EI_DATA]) {
|
||||
const endian: std.builtin.Endian = switch (buf[EI.DATA]) {
|
||||
ELFDATA2LSB => .little,
|
||||
ELFDATA2MSB => .big,
|
||||
else => return error.InvalidElfEndian,
|
||||
};
|
||||
|
||||
return switch (buf[EI_CLASS]) {
|
||||
return switch (buf[EI.CLASS]) {
|
||||
ELFCLASS32 => .init(try r.takeStruct(Elf32_Ehdr, endian), endian),
|
||||
ELFCLASS64 => .init(try r.takeStruct(Elf64_Ehdr, endian), endian),
|
||||
else => return error.InvalidElfClass,
|
||||
|
|
@ -559,8 +647,8 @@ pub const Header = struct {
|
|||
else => @compileError("bad type"),
|
||||
},
|
||||
.endian = endian,
|
||||
.os_abi = @enumFromInt(hdr.e_ident[EI_OSABI]),
|
||||
.abi_version = hdr.e_ident[EI_ABIVERSION],
|
||||
.os_abi = @enumFromInt(hdr.e_ident[EI.OSABI]),
|
||||
.abi_version = hdr.e_ident[EI.ABIVERSION],
|
||||
.type = hdr.e_type,
|
||||
.machine = hdr.e_machine,
|
||||
.entry = hdr.e_entry,
|
||||
|
|
@ -683,38 +771,200 @@ fn takeShdr(reader: *std.Io.Reader, elf_header: Header) !?Elf64_Shdr {
|
|||
};
|
||||
}
|
||||
|
||||
pub const ELFCLASSNONE = 0;
|
||||
pub const ELFCLASS32 = 1;
|
||||
pub const ELFCLASS64 = 2;
|
||||
pub const ELFCLASSNUM = 3;
|
||||
pub const EI = struct {
|
||||
pub const CLASS = 4;
|
||||
pub const DATA = 5;
|
||||
pub const VERSION = 6;
|
||||
pub const OSABI = 7;
|
||||
pub const ABIVERSION = 8;
|
||||
pub const PAD = 9;
|
||||
pub const NIDENT = 16;
|
||||
};
|
||||
|
||||
pub const ELFDATANONE = 0;
|
||||
pub const ELFDATA2LSB = 1;
|
||||
pub const ELFDATA2MSB = 2;
|
||||
pub const ELFDATANUM = 3;
|
||||
|
||||
pub const EI_CLASS = 4;
|
||||
pub const EI_DATA = 5;
|
||||
pub const EI_VERSION = 6;
|
||||
pub const EI_OSABI = 7;
|
||||
pub const EI_ABIVERSION = 8;
|
||||
pub const EI_PAD = 9;
|
||||
|
||||
pub const EI_NIDENT = 16;
|
||||
/// Deprecated, use `std.elf.EI.CLASS`
|
||||
pub const EI_CLASS = EI.CLASS;
|
||||
/// Deprecated, use `std.elf.EI.DATA`
|
||||
pub const EI_DATA = EI.DATA;
|
||||
/// Deprecated, use `std.elf.EI.VERSION`
|
||||
pub const EI_VERSION = EI.VERSION;
|
||||
/// Deprecated, use `std.elf.EI.OSABI`
|
||||
pub const EI_OSABI = EI.OSABI;
|
||||
/// Deprecated, use `std.elf.EI.ABIVERSION`
|
||||
pub const EI_ABIVERSION = EI.ABIVERSION;
|
||||
/// Deprecated, use `std.elf.EI.PAD`
|
||||
pub const EI_PAD = EI.PAD;
|
||||
/// Deprecated, use `std.elf.EI.NIDENT`
|
||||
pub const EI_NIDENT = EI.NIDENT;
|
||||
|
||||
pub const Half = u16;
|
||||
pub const Word = u32;
|
||||
pub const Sword = i32;
|
||||
pub const Elf32_Xword = u64;
|
||||
pub const Elf32_Sxword = i64;
|
||||
pub const Elf64_Xword = u64;
|
||||
pub const Xword = u64;
|
||||
pub const Sxword = i64;
|
||||
pub const Section = u16;
|
||||
pub const Elf32 = struct {
|
||||
pub const Addr = u32;
|
||||
pub const Off = u32;
|
||||
pub const Ehdr = extern struct {
|
||||
ident: [EI.NIDENT]u8,
|
||||
type: ET,
|
||||
machine: EM,
|
||||
version: Word,
|
||||
entry: Elf32.Addr,
|
||||
phoff: Elf32.Off,
|
||||
shoff: Elf32.Off,
|
||||
flags: Word,
|
||||
ehsize: Half,
|
||||
phentsize: Half,
|
||||
phnum: Half,
|
||||
shentsize: Half,
|
||||
shnum: Half,
|
||||
shstrndx: Half,
|
||||
};
|
||||
pub const Phdr = extern struct {
|
||||
type: Word,
|
||||
offset: Elf32.Off,
|
||||
vaddr: Elf32.Addr,
|
||||
paddr: Elf32.Addr,
|
||||
filesz: Word,
|
||||
memsz: Word,
|
||||
flags: PF,
|
||||
@"align": Word,
|
||||
};
|
||||
pub const Shdr = extern struct {
|
||||
name: Word,
|
||||
type: Word,
|
||||
flags: packed struct { shf: SHF },
|
||||
addr: Elf32.Addr,
|
||||
offset: Elf32.Off,
|
||||
size: Word,
|
||||
link: Word,
|
||||
info: Word,
|
||||
addralign: Word,
|
||||
entsize: Word,
|
||||
};
|
||||
pub const Chdr = extern struct {
|
||||
type: COMPRESS,
|
||||
size: Word,
|
||||
addralign: Word,
|
||||
};
|
||||
pub const Sym = extern struct {
|
||||
name: Word,
|
||||
value: Elf32.Addr,
|
||||
size: Word,
|
||||
info: Info,
|
||||
other: Other,
|
||||
shndx: Section,
|
||||
|
||||
pub const Info = packed struct(u8) {
|
||||
type: STT,
|
||||
bind: STB,
|
||||
};
|
||||
|
||||
pub const Other = packed struct(u8) {
|
||||
visibility: STV,
|
||||
unused: u5 = 0,
|
||||
};
|
||||
};
|
||||
comptime {
|
||||
assert(@sizeOf(Elf32.Ehdr) == 52);
|
||||
assert(@sizeOf(Elf32.Phdr) == 32);
|
||||
assert(@sizeOf(Elf32.Shdr) == 40);
|
||||
assert(@sizeOf(Elf32.Sym) == 16);
|
||||
}
|
||||
};
|
||||
pub const Elf64 = struct {
|
||||
pub const Addr = u64;
|
||||
pub const Off = u64;
|
||||
pub const Ehdr = extern struct {
|
||||
ident: [EI.NIDENT]u8,
|
||||
type: ET,
|
||||
machine: EM,
|
||||
version: Word,
|
||||
entry: Elf64.Addr,
|
||||
phoff: Elf64.Off,
|
||||
shoff: Elf64.Off,
|
||||
flags: Word,
|
||||
ehsize: Half,
|
||||
phentsize: Half,
|
||||
phnum: Half,
|
||||
shentsize: Half,
|
||||
shnum: Half,
|
||||
shstrndx: Half,
|
||||
};
|
||||
pub const Phdr = extern struct {
|
||||
type: Word,
|
||||
flags: PF,
|
||||
offset: Elf64.Off,
|
||||
vaddr: Elf64.Addr,
|
||||
paddr: Elf64.Addr,
|
||||
filesz: Xword,
|
||||
memsz: Xword,
|
||||
@"align": Xword,
|
||||
};
|
||||
pub const Shdr = extern struct {
|
||||
name: Word,
|
||||
type: Word,
|
||||
flags: packed struct { shf: SHF, unused: Word = 0 },
|
||||
addr: Elf64.Addr,
|
||||
offset: Elf64.Off,
|
||||
size: Xword,
|
||||
link: Word,
|
||||
info: Word,
|
||||
addralign: Xword,
|
||||
entsize: Xword,
|
||||
};
|
||||
pub const Chdr = extern struct {
|
||||
type: COMPRESS,
|
||||
reserved: Word = 0,
|
||||
size: Xword,
|
||||
addralign: Xword,
|
||||
};
|
||||
pub const Sym = extern struct {
|
||||
name: Word,
|
||||
info: Info,
|
||||
other: Other,
|
||||
shndx: Section,
|
||||
value: Elf64.Addr,
|
||||
size: Xword,
|
||||
|
||||
pub const Info = Elf32.Sym.Info;
|
||||
pub const Other = Elf32.Sym.Other;
|
||||
};
|
||||
comptime {
|
||||
assert(@sizeOf(Elf64.Ehdr) == 64);
|
||||
assert(@sizeOf(Elf64.Phdr) == 56);
|
||||
assert(@sizeOf(Elf64.Shdr) == 64);
|
||||
assert(@sizeOf(Elf64.Sym) == 24);
|
||||
}
|
||||
};
|
||||
pub const ElfN = switch (@sizeOf(usize)) {
|
||||
4 => Elf32,
|
||||
8 => Elf64,
|
||||
else => @compileError("expected pointer size of 32 or 64"),
|
||||
};
|
||||
|
||||
/// Deprecated, use `std.elf.Xword`
|
||||
pub const Elf32_Xword = Xword;
|
||||
/// Deprecated, use `std.elf.Sxword`
|
||||
pub const Elf32_Sxword = Sxword;
|
||||
/// Deprecated, use `std.elf.Xword`
|
||||
pub const Elf64_Xword = Xword;
|
||||
/// Deprecated, use `std.elf.Sxword`
|
||||
pub const Elf64_Sxword = i64;
|
||||
/// Deprecated, use `std.elf.Elf32.Addr`
|
||||
pub const Elf32_Addr = u32;
|
||||
/// Deprecated, use `std.elf.Elf64.Addr`
|
||||
pub const Elf64_Addr = u64;
|
||||
/// Deprecated, use `std.elf.Elf32.Off`
|
||||
pub const Elf32_Off = u32;
|
||||
/// Deprecated, use `std.elf.Elf64.Off`
|
||||
pub const Elf64_Off = u64;
|
||||
/// Deprecated, use `std.elf.Section`
|
||||
pub const Elf32_Section = u16;
|
||||
/// Deprecated, use `std.elf.Section`
|
||||
pub const Elf64_Section = u16;
|
||||
/// Deprecated, use `std.elf.Elf32.Ehdr`
|
||||
pub const Elf32_Ehdr = extern struct {
|
||||
e_ident: [EI_NIDENT]u8,
|
||||
e_type: ET,
|
||||
|
|
@ -731,8 +981,9 @@ pub const Elf32_Ehdr = extern struct {
|
|||
e_shnum: Half,
|
||||
e_shstrndx: Half,
|
||||
};
|
||||
/// Deprecated, use `std.elf.Elf64.Ehdr`
|
||||
pub const Elf64_Ehdr = extern struct {
|
||||
e_ident: [EI_NIDENT]u8,
|
||||
e_ident: [EI.NIDENT]u8,
|
||||
e_type: ET,
|
||||
e_machine: EM,
|
||||
e_version: Word,
|
||||
|
|
@ -747,6 +998,7 @@ pub const Elf64_Ehdr = extern struct {
|
|||
e_shnum: Half,
|
||||
e_shstrndx: Half,
|
||||
};
|
||||
/// Deprecated, use `std.elf.Elf32.Phdr`
|
||||
pub const Elf32_Phdr = extern struct {
|
||||
p_type: Word,
|
||||
p_offset: Elf32_Off,
|
||||
|
|
@ -757,6 +1009,7 @@ pub const Elf32_Phdr = extern struct {
|
|||
p_flags: Word,
|
||||
p_align: Word,
|
||||
};
|
||||
/// Deprecated, use `std.elf.Elf64.Phdr`
|
||||
pub const Elf64_Phdr = extern struct {
|
||||
p_type: Word,
|
||||
p_flags: Word,
|
||||
|
|
@ -767,6 +1020,7 @@ pub const Elf64_Phdr = extern struct {
|
|||
p_memsz: Elf64_Xword,
|
||||
p_align: Elf64_Xword,
|
||||
};
|
||||
/// Deprecated, use `std.elf.Elf32.Shdr`
|
||||
pub const Elf32_Shdr = extern struct {
|
||||
sh_name: Word,
|
||||
sh_type: Word,
|
||||
|
|
@ -779,6 +1033,7 @@ pub const Elf32_Shdr = extern struct {
|
|||
sh_addralign: Word,
|
||||
sh_entsize: Word,
|
||||
};
|
||||
/// Deprecated, use `std.elf.Elf64.Shdr`
|
||||
pub const Elf64_Shdr = extern struct {
|
||||
sh_name: Word,
|
||||
sh_type: Word,
|
||||
|
|
@ -791,17 +1046,20 @@ pub const Elf64_Shdr = extern struct {
|
|||
sh_addralign: Elf64_Xword,
|
||||
sh_entsize: Elf64_Xword,
|
||||
};
|
||||
/// Deprecated, use `std.elf.Elf32.Chdr`
|
||||
pub const Elf32_Chdr = extern struct {
|
||||
ch_type: COMPRESS,
|
||||
ch_size: Word,
|
||||
ch_addralign: Word,
|
||||
};
|
||||
/// Deprecated, use `std.elf.Elf64.Chdr`
|
||||
pub const Elf64_Chdr = extern struct {
|
||||
ch_type: COMPRESS,
|
||||
ch_reserved: Word = 0,
|
||||
ch_size: Elf64_Xword,
|
||||
ch_addralign: Elf64_Xword,
|
||||
};
|
||||
/// Deprecated, use `std.elf.Elf32.Sym`
|
||||
pub const Elf32_Sym = extern struct {
|
||||
st_name: Word,
|
||||
st_value: Elf32_Addr,
|
||||
|
|
@ -817,6 +1075,7 @@ pub const Elf32_Sym = extern struct {
|
|||
return @truncate(self.st_info >> 4);
|
||||
}
|
||||
};
|
||||
/// Deprecated, use `std.elf.Elf64.Sym`
|
||||
pub const Elf64_Sym = extern struct {
|
||||
st_name: Word,
|
||||
st_info: u8,
|
||||
|
|
@ -1020,27 +1279,18 @@ pub const Elf_MIPS_ABIFlags_v0 = extern struct {
|
|||
flags2: Word,
|
||||
};
|
||||
|
||||
comptime {
|
||||
assert(@sizeOf(Elf32_Ehdr) == 52);
|
||||
assert(@sizeOf(Elf64_Ehdr) == 64);
|
||||
|
||||
assert(@sizeOf(Elf32_Phdr) == 32);
|
||||
assert(@sizeOf(Elf64_Phdr) == 56);
|
||||
|
||||
assert(@sizeOf(Elf32_Shdr) == 40);
|
||||
assert(@sizeOf(Elf64_Shdr) == 64);
|
||||
}
|
||||
|
||||
pub const Auxv = switch (@sizeOf(usize)) {
|
||||
4 => Elf32_auxv_t,
|
||||
8 => Elf64_auxv_t,
|
||||
else => @compileError("expected pointer size of 32 or 64"),
|
||||
};
|
||||
/// Deprecated, use `std.elf.ElfN.Ehdr`
|
||||
pub const Ehdr = switch (@sizeOf(usize)) {
|
||||
4 => Elf32_Ehdr,
|
||||
8 => Elf64_Ehdr,
|
||||
else => @compileError("expected pointer size of 32 or 64"),
|
||||
};
|
||||
/// Deprecated, use `std.elf.ElfN.Phdr`
|
||||
pub const Phdr = switch (@sizeOf(usize)) {
|
||||
4 => Elf32_Phdr,
|
||||
8 => Elf64_Phdr,
|
||||
|
|
@ -1071,20 +1321,53 @@ pub const Shdr = switch (@sizeOf(usize)) {
|
|||
8 => Elf64_Shdr,
|
||||
else => @compileError("expected pointer size of 32 or 64"),
|
||||
};
|
||||
/// Deprecated, use `std.elf.ElfN.Chdr`
|
||||
pub const Chdr = switch (@sizeOf(usize)) {
|
||||
4 => Elf32_Chdr,
|
||||
8 => Elf64_Chdr,
|
||||
else => @compileError("expected pointer size of 32 or 64"),
|
||||
};
|
||||
/// Deprecated, use `std.elf.ElfN.Sym`
|
||||
pub const Sym = switch (@sizeOf(usize)) {
|
||||
4 => Elf32_Sym,
|
||||
8 => Elf64_Sym,
|
||||
else => @compileError("expected pointer size of 32 or 64"),
|
||||
};
|
||||
pub const Addr = switch (@sizeOf(usize)) {
|
||||
4 => Elf32_Addr,
|
||||
8 => Elf64_Addr,
|
||||
else => @compileError("expected pointer size of 32 or 64"),
|
||||
/// Deprecated, use `std.elf.ElfN.Addr`
|
||||
pub const Addr = ElfN.Addr;
|
||||
|
||||
/// Deprecated, use `@intFromEnum(std.elf.CLASS.NONE)`
|
||||
pub const ELFCLASSNONE = @intFromEnum(CLASS.NONE);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.CLASS.@"32")`
|
||||
pub const ELFCLASS32 = @intFromEnum(CLASS.@"32");
|
||||
/// Deprecated, use `@intFromEnum(std.elf.CLASS.@"64")`
|
||||
pub const ELFCLASS64 = @intFromEnum(CLASS.@"64");
|
||||
/// Deprecated, use `@intFromEnum(std.elf.CLASS.NUM)`
|
||||
pub const ELFCLASSNUM = CLASS.NUM;
|
||||
pub const CLASS = enum(u8) {
|
||||
NONE = 0,
|
||||
@"32" = 1,
|
||||
@"64" = 2,
|
||||
_,
|
||||
|
||||
pub const NUM = @typeInfo(CLASS).@"enum".fields.len;
|
||||
};
|
||||
|
||||
/// Deprecated, use `@intFromEnum(std.elf.DATA.NONE)`
|
||||
pub const ELFDATANONE = @intFromEnum(DATA.NONE);
|
||||
/// Deprecated, use `@intFromEnum(std.elf.DATA.@"2LSB")`
|
||||
pub const ELFDATA2LSB = @intFromEnum(DATA.@"2LSB");
|
||||
/// Deprecated, use `@intFromEnum(std.elf.DATA.@"2MSB")`
|
||||
pub const ELFDATA2MSB = @intFromEnum(DATA.@"2MSB");
|
||||
/// Deprecated, use `@intFromEnum(std.elf.DATA.NUM)`
|
||||
pub const ELFDATANUM = DATA.NUM;
|
||||
pub const DATA = enum(u8) {
|
||||
NONE = 0,
|
||||
@"2LSB" = 1,
|
||||
@"2MSB" = 2,
|
||||
_,
|
||||
|
||||
pub const NUM = @typeInfo(DATA).@"enum".fields.len;
|
||||
};
|
||||
|
||||
pub const OSABI = enum(u8) {
|
||||
|
|
@ -1718,6 +2001,108 @@ pub const SHF_MIPS_STRING = 0x80000000;
|
|||
/// Make code section unreadable when in execute-only mode
|
||||
pub const SHF_ARM_PURECODE = 0x2000000;
|
||||
|
||||
pub const SHF = packed struct(Word) {
|
||||
/// Section data should be writable during execution.
|
||||
WRITE: bool = false,
|
||||
/// Section occupies memory during program execution.
|
||||
ALLOC: bool = false,
|
||||
/// Section contains executable machine instructions.
|
||||
EXECINSTR: bool = false,
|
||||
unused3: u1 = 0,
|
||||
/// The data in this section may be merged.
|
||||
MERGE: bool = false,
|
||||
/// The data in this section is null-terminated strings.
|
||||
STRINGS: bool = false,
|
||||
/// A field in this section holds a section header table index.
|
||||
INFO_LINK: bool = false,
|
||||
/// Adds special ordering requirements for link editors.
|
||||
LINK_ORDER: bool = false,
|
||||
/// This section requires special OS-specific processing to avoid incorrect behavior.
|
||||
OS_NONCONFORMING: bool = false,
|
||||
/// This section is a member of a section group.
|
||||
GROUP: bool = false,
|
||||
/// This section holds Thread-Local Storage.
|
||||
TLS: bool = false,
|
||||
/// Identifies a section containing compressed data.
|
||||
COMPRESSED: bool = false,
|
||||
unused12: u8 = 0,
|
||||
OS: packed union {
|
||||
MASK: u8,
|
||||
GNU: packed struct(u8) {
|
||||
unused0: u1 = 0,
|
||||
/// Not to be GCed by the linker
|
||||
RETAIN: bool = false,
|
||||
unused2: u6 = 0,
|
||||
},
|
||||
MIPS: packed struct(u8) {
|
||||
unused0: u4 = 0,
|
||||
/// Section contains text/data which may be replicated in other sections.
|
||||
/// Linker must retain only one copy.
|
||||
NODUPES: bool = false,
|
||||
/// Linker must generate implicit hidden weak names.
|
||||
NAMES: bool = false,
|
||||
/// Section data local to process.
|
||||
LOCAL: bool = false,
|
||||
/// Do not strip this section.
|
||||
NOSTRIP: bool = false,
|
||||
},
|
||||
ARM: packed struct(u8) {
|
||||
unused0: u5 = 0,
|
||||
/// Make code section unreadable when in execute-only mode
|
||||
PURECODE: bool = false,
|
||||
unused6: u2 = 0,
|
||||
},
|
||||
} = .{ .MASK = 0 },
|
||||
PROC: packed union {
|
||||
MASK: u4,
|
||||
XCORE: packed struct(u4) {
|
||||
/// All sections with the "d" flag are grouped together by the linker to form
|
||||
/// the data section and the dp register is set to the start of the section by
|
||||
/// the boot code.
|
||||
DP_SECTION: bool = false,
|
||||
/// All sections with the "c" flag are grouped together by the linker to form
|
||||
/// the constant pool and the cp register is set to the start of the constant
|
||||
/// pool by the boot code.
|
||||
CP_SECTION: bool = false,
|
||||
unused2: u1 = 0,
|
||||
/// This section is excluded from the final executable or shared library.
|
||||
EXCLUDE: bool = false,
|
||||
},
|
||||
X86_64: packed struct(u4) {
|
||||
/// If an object file section does not have this flag set, then it may not hold
|
||||
/// more than 2GB and can be freely referred to in objects using smaller code
|
||||
/// models. Otherwise, only objects using larger code models can refer to them.
|
||||
/// For example, a medium code model object can refer to data in a section that
|
||||
/// sets this flag besides being able to refer to data in a section that does
|
||||
/// not set it; likewise, a small code model object can refer only to code in a
|
||||
/// section that does not set this flag.
|
||||
LARGE: bool = false,
|
||||
unused1: u2 = 0,
|
||||
/// This section is excluded from the final executable or shared library.
|
||||
EXCLUDE: bool = false,
|
||||
},
|
||||
HEX: packed struct(u4) {
|
||||
/// All sections with the GPREL flag are grouped into a global data area
|
||||
/// for faster accesses
|
||||
GPREL: bool = false,
|
||||
unused1: u2 = 0,
|
||||
/// This section is excluded from the final executable or shared library.
|
||||
EXCLUDE: bool = false,
|
||||
},
|
||||
MIPS: packed struct(u4) {
|
||||
/// All sections with the GPREL flag are grouped into a global data area
|
||||
/// for faster accesses
|
||||
GPREL: bool = false,
|
||||
/// This section should be merged.
|
||||
MERGE: bool = false,
|
||||
/// Address size to be inferred from section entry size.
|
||||
ADDR: bool = false,
|
||||
/// Section data is string data by default.
|
||||
STRING: bool = false,
|
||||
},
|
||||
} = .{ .MASK = 0 },
|
||||
};
|
||||
|
||||
/// Execute
|
||||
pub const PF_X = 1;
|
||||
|
||||
|
|
@ -1733,6 +2118,19 @@ pub const PF_MASKOS = 0x0ff00000;
|
|||
/// Bits for processor-specific semantics.
|
||||
pub const PF_MASKPROC = 0xf0000000;
|
||||
|
||||
pub const PF = packed struct(Word) {
|
||||
X: bool = false,
|
||||
W: bool = false,
|
||||
R: bool = false,
|
||||
unused3: u17 = 0,
|
||||
OS: packed union {
|
||||
MASK: u8,
|
||||
} = .{ .MASK = 0 },
|
||||
PROC: packed union {
|
||||
MASK: u4,
|
||||
} = .{ .MASK = 0 },
|
||||
};
|
||||
|
||||
/// Undefined section
|
||||
pub const SHN_UNDEF = 0;
|
||||
/// Start of reserved indices
|
||||
|
|
@ -2303,13 +2701,6 @@ pub const R_PPC64 = enum(u32) {
|
|||
_,
|
||||
};
|
||||
|
||||
pub const STV = enum(u3) {
|
||||
DEFAULT = 0,
|
||||
INTERNAL = 1,
|
||||
HIDDEN = 2,
|
||||
PROTECTED = 3,
|
||||
};
|
||||
|
||||
pub const ar_hdr = extern struct {
|
||||
/// Member file name, sometimes / terminated.
|
||||
ar_name: [16]u8,
|
||||
|
|
|
|||
|
|
@ -1916,7 +1916,7 @@ pub const Writer = struct {
|
|||
|
||||
const copy_file_range = switch (native_os) {
|
||||
.freebsd => std.os.freebsd.copy_file_range,
|
||||
.linux => if (std.c.versionCheck(if (builtin.abi.isAndroid()) .{ .major = 34, .minor = 0, .patch = 0 } else .{ .major = 2, .minor = 27, .patch = 0 })) std.os.linux.wrapped.copy_file_range else {},
|
||||
.linux => std.os.linux.wrapped.copy_file_range,
|
||||
else => {},
|
||||
};
|
||||
if (@TypeOf(copy_file_range) != void) cfr: {
|
||||
|
|
|
|||
|
|
@ -431,33 +431,6 @@ test "skipValue" {
|
|||
try std.testing.expectError(error.SyntaxError, testSkipValue("[102, 111, 111}"));
|
||||
}
|
||||
|
||||
fn testEnsureStackCapacity(do_ensure: bool) !void {
|
||||
var fail_alloc = std.testing.FailingAllocator.init(std.testing.allocator, .{ .fail_index = 1 });
|
||||
const failing_allocator = fail_alloc.allocator();
|
||||
|
||||
const nestings = 2049; // intentionally not a power of 2.
|
||||
var input_string: std.ArrayListUnmanaged(u8) = .empty;
|
||||
try input_string.appendNTimes(std.testing.allocator, '[', nestings);
|
||||
try input_string.appendNTimes(std.testing.allocator, ']', nestings);
|
||||
defer input_string.deinit(std.testing.allocator);
|
||||
|
||||
var scanner = Scanner.initCompleteInput(failing_allocator, input_string.items);
|
||||
defer scanner.deinit();
|
||||
|
||||
if (do_ensure) {
|
||||
try scanner.ensureTotalStackCapacity(nestings);
|
||||
}
|
||||
|
||||
try scanner.skipValue();
|
||||
try std.testing.expectEqual(Token.end_of_document, try scanner.next());
|
||||
}
|
||||
test "ensureTotalStackCapacity" {
|
||||
// Once to demonstrate failure.
|
||||
try std.testing.expectError(error.OutOfMemory, testEnsureStackCapacity(false));
|
||||
// Then to demonstrate it works.
|
||||
try testEnsureStackCapacity(true);
|
||||
}
|
||||
|
||||
fn testDiagnosticsFromSource(expected_error: ?anyerror, line: u64, col: u64, byte_offset: u64, source: anytype) !void {
|
||||
var diagnostics = Diagnostics{};
|
||||
source.enableDiagnostics(&diagnostics);
|
||||
|
|
|
|||
|
|
@ -389,7 +389,7 @@ pub fn innerParse(
|
|||
switch (try source.peekNextTokenType()) {
|
||||
.array_begin => {
|
||||
// Typical array.
|
||||
return internalParseArray(T, arrayInfo.child, arrayInfo.len, allocator, source, options);
|
||||
return internalParseArray(T, arrayInfo.child, allocator, source, options);
|
||||
},
|
||||
.string => {
|
||||
if (arrayInfo.child != u8) return error.UnexpectedToken;
|
||||
|
|
@ -440,10 +440,11 @@ pub fn innerParse(
|
|||
}
|
||||
},
|
||||
|
||||
.vector => |vecInfo| {
|
||||
.vector => |vector_info| {
|
||||
switch (try source.peekNextTokenType()) {
|
||||
.array_begin => {
|
||||
return internalParseArray(T, vecInfo.child, vecInfo.len, allocator, source, options);
|
||||
const A = [vector_info.len]vector_info.child;
|
||||
return try internalParseArray(A, vector_info.child, allocator, source, options);
|
||||
},
|
||||
else => return error.UnexpectedToken,
|
||||
}
|
||||
|
|
@ -519,7 +520,6 @@ pub fn innerParse(
|
|||
fn internalParseArray(
|
||||
comptime T: type,
|
||||
comptime Child: type,
|
||||
comptime len: comptime_int,
|
||||
allocator: Allocator,
|
||||
source: anytype,
|
||||
options: ParseOptions,
|
||||
|
|
@ -527,9 +527,8 @@ fn internalParseArray(
|
|||
assert(.array_begin == try source.next());
|
||||
|
||||
var r: T = undefined;
|
||||
var i: usize = 0;
|
||||
while (i < len) : (i += 1) {
|
||||
r[i] = try innerParse(Child, allocator, source, options);
|
||||
for (&r) |*elem| {
|
||||
elem.* = try innerParse(Child, allocator, source, options);
|
||||
}
|
||||
|
||||
if (.array_end != try source.next()) return error.UnexpectedToken;
|
||||
|
|
@ -569,12 +568,12 @@ pub fn innerParseFromValue(
|
|||
if (@round(f) != f) return error.InvalidNumber;
|
||||
if (f > @as(@TypeOf(f), @floatFromInt(std.math.maxInt(T)))) return error.Overflow;
|
||||
if (f < @as(@TypeOf(f), @floatFromInt(std.math.minInt(T)))) return error.Overflow;
|
||||
return @as(T, @intFromFloat(f));
|
||||
return @intFromFloat(f);
|
||||
},
|
||||
.integer => |i| {
|
||||
if (i > std.math.maxInt(T)) return error.Overflow;
|
||||
if (i < std.math.minInt(T)) return error.Overflow;
|
||||
return @as(T, @intCast(i));
|
||||
return @intCast(i);
|
||||
},
|
||||
.number_string, .string => |s| {
|
||||
return sliceToInt(T, s);
|
||||
|
|
|
|||
|
|
@ -914,7 +914,7 @@ test "parse at comptime" {
|
|||
uptime: u64,
|
||||
};
|
||||
const config = comptime x: {
|
||||
var buf: [256]u8 = undefined;
|
||||
var buf: [300]u8 = undefined;
|
||||
var fba = std.heap.FixedBufferAllocator.init(&buf);
|
||||
const res = parseFromSliceLeaky(Config, fba.allocator(), doc, .{});
|
||||
// Assert no error can occur since we are
|
||||
|
|
|
|||
|
|
@ -3633,10 +3633,34 @@ test indexOfMinMax {
|
|||
}
|
||||
|
||||
/// Exchanges contents of two memory locations.
|
||||
pub fn swap(comptime T: type, a: *T, b: *T) void {
|
||||
const tmp = a.*;
|
||||
a.* = b.*;
|
||||
b.* = tmp;
|
||||
pub fn swap(comptime T: type, noalias a: *T, noalias b: *T) void {
|
||||
if (@inComptime()) {
|
||||
// In comptime, accessing bytes of values with no defined layout is a compile error.
|
||||
const tmp = a.*;
|
||||
a.* = b.*;
|
||||
b.* = tmp;
|
||||
} else {
|
||||
// Swapping in streaming nature from start to end instead of swapping
|
||||
// everything in one step allows easier optimizations and less stack usage.
|
||||
const a_bytes: []align(@alignOf(T)) u8 = @ptrCast(a);
|
||||
const b_bytes: []align(@alignOf(T)) u8 = @ptrCast(b);
|
||||
for (a_bytes, b_bytes) |*ab, *bb| {
|
||||
const tmp = ab.*;
|
||||
ab.* = bb.*;
|
||||
bb.* = tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test "swap works at comptime with types with no defined layout" {
|
||||
comptime {
|
||||
const T = struct { val: u64 };
|
||||
var a: T = .{ .val = 0 };
|
||||
var b: T = .{ .val = 1 };
|
||||
swap(T, &a, &b);
|
||||
try testing.expectEqual(T{ .val = 1 }, a);
|
||||
try testing.expectEqual(T{ .val = 0 }, b);
|
||||
}
|
||||
}
|
||||
|
||||
inline fn reverseVector(comptime N: usize, comptime T: type, a: []T) [N]T {
|
||||
|
|
|
|||
|
|
@ -742,13 +742,7 @@ pub fn eql(a: anytype, b: @TypeOf(a)) bool {
|
|||
if (!eql(e, b[i])) return false;
|
||||
return true;
|
||||
},
|
||||
.vector => |info| {
|
||||
var i: usize = 0;
|
||||
while (i < info.len) : (i += 1) {
|
||||
if (!eql(a[i], b[i])) return false;
|
||||
}
|
||||
return true;
|
||||
},
|
||||
.vector => return @reduce(.And, a == b),
|
||||
.pointer => |info| {
|
||||
return switch (info.size) {
|
||||
.one, .many, .c => a == b,
|
||||
|
|
|
|||
|
|
@ -457,24 +457,19 @@ pub fn MultiArrayList(comptime T: type) type {
|
|||
/// Invalidates element pointers if additional memory is needed.
|
||||
pub fn ensureTotalCapacity(self: *Self, gpa: Allocator, new_capacity: usize) Allocator.Error!void {
|
||||
if (self.capacity >= new_capacity) return;
|
||||
return self.setCapacity(gpa, growCapacity(self.capacity, new_capacity));
|
||||
return self.setCapacity(gpa, growCapacity(new_capacity));
|
||||
}
|
||||
|
||||
const init_capacity = init: {
|
||||
var max = 1;
|
||||
for (fields) |field| max = @as(comptime_int, @max(max, @sizeOf(field.type)));
|
||||
break :init @as(comptime_int, @max(1, std.atomic.cache_line / max));
|
||||
const init_capacity: comptime_int = init: {
|
||||
var max: comptime_int = 1;
|
||||
for (fields) |field| max = @max(max, @sizeOf(field.type));
|
||||
break :init @max(1, std.atomic.cache_line / max);
|
||||
};
|
||||
|
||||
/// Called when memory growth is necessary. Returns a capacity larger than
|
||||
/// minimum that grows super-linearly.
|
||||
fn growCapacity(current: usize, minimum: usize) usize {
|
||||
var new = current;
|
||||
while (true) {
|
||||
new +|= new / 2 + init_capacity;
|
||||
if (new >= minimum)
|
||||
return new;
|
||||
}
|
||||
/// Given a lower bound of required memory capacity, returns a larger value
|
||||
/// with super-linear growth.
|
||||
pub fn growCapacity(minimum: usize) usize {
|
||||
return minimum +| (minimum / 2 + init_capacity);
|
||||
}
|
||||
|
||||
/// Modify the array so that it can hold at least `additional_count` **more** items.
|
||||
|
|
|
|||
|
|
@ -9891,7 +9891,9 @@ pub const wrapped = struct {
|
|||
};
|
||||
|
||||
pub fn copy_file_range(fd_in: fd_t, off_in: ?*i64, fd_out: fd_t, off_out: ?*i64, len: usize, flags: u32) CopyFileRangeError!usize {
|
||||
const rc = system.copy_file_range(fd_in, off_in, fd_out, off_out, len, flags);
|
||||
const use_c = std.c.versionCheck(if (builtin.abi.isAndroid()) .{ .major = 34, .minor = 0, .patch = 0 } else .{ .major = 2, .minor = 27, .patch = 0 });
|
||||
const sys = if (use_c) std.c else std.os.linux;
|
||||
const rc = sys.copy_file_range(fd_in, off_in, fd_out, off_out, len, flags);
|
||||
switch (errno(rc)) {
|
||||
.SUCCESS => return @intCast(rc),
|
||||
.BADF => return error.BadFileFlags,
|
||||
|
|
|
|||
|
|
@ -141,12 +141,12 @@ pub fn restore_rt() callconv(.naked) noreturn {
|
|||
\\ svc #0
|
||||
:
|
||||
: [number] "i" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true }),
|
||||
),
|
||||
else => asm volatile (
|
||||
\\ svc #0
|
||||
:
|
||||
: [number] "{x8}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true }),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -150,12 +150,12 @@ pub fn restore_rt() callconv(.naked) noreturn {
|
|||
\\ svc #0
|
||||
:
|
||||
: [number] "I" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true }),
|
||||
),
|
||||
else => asm volatile (
|
||||
\\ svc #0
|
||||
:
|
||||
: [number] "{r7}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true }),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -135,7 +135,7 @@ pub fn restore_rt() callconv(.naked) noreturn {
|
|||
\\ trap0(#0)
|
||||
:
|
||||
: [number] "{r6}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true });
|
||||
);
|
||||
}
|
||||
|
||||
pub const F = struct {
|
||||
|
|
|
|||
|
|
@ -143,7 +143,7 @@ pub fn restore_rt() callconv(.naked) noreturn {
|
|||
\\ syscall 0
|
||||
:
|
||||
: [number] "r" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .r12 = true, .r13 = true, .r14 = true, .r15 = true, .r16 = true, .r17 = true, .r18 = true, .r19 = true, .r20 = true, .memory = true });
|
||||
);
|
||||
}
|
||||
|
||||
pub const msghdr = extern struct {
|
||||
|
|
|
|||
|
|
@ -148,7 +148,7 @@ pub fn restore_rt() callconv(.naked) noreturn {
|
|||
asm volatile ("trap #0"
|
||||
:
|
||||
: [number] "{d0}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true });
|
||||
);
|
||||
}
|
||||
|
||||
pub const F = struct {
|
||||
|
|
|
|||
|
|
@ -254,7 +254,7 @@ pub fn restore_rt() callconv(.naked) noreturn {
|
|||
\\ syscall
|
||||
:
|
||||
: [number] "{$2}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .r1 = true, .r3 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .r13 = true, .r14 = true, .r15 = true, .r24 = true, .r25 = true, .hi = true, .lo = true, .memory = true });
|
||||
);
|
||||
}
|
||||
|
||||
pub const F = struct {
|
||||
|
|
|
|||
|
|
@ -233,7 +233,7 @@ pub fn restore_rt() callconv(.naked) noreturn {
|
|||
\\ syscall
|
||||
:
|
||||
: [number] "{$2}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .r1 = true, .r3 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .r13 = true, .r14 = true, .r15 = true, .r24 = true, .r25 = true, .hi = true, .lo = true, .memory = true });
|
||||
);
|
||||
}
|
||||
|
||||
pub const F = struct {
|
||||
|
|
|
|||
|
|
@ -15,84 +15,125 @@ const sockaddr = linux.sockaddr;
|
|||
const timespec = linux.timespec;
|
||||
|
||||
pub fn syscall0(number: SYS) usize {
|
||||
// r0 is both an input register and a clobber. musl and glibc achieve this with
|
||||
// a "+" constraint, which isn't supported in Zig, so instead we separately list
|
||||
// r0 as both an input and an output. (Listing it as an input and a clobber would
|
||||
// cause the C backend to emit invalid code; see #25209.)
|
||||
var r0_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
: .{ .memory = true, .cr0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall1(number: SYS, arg1: usize) usize {
|
||||
// r0 is both an input and a clobber.
|
||||
var r0_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
: .{ .memory = true, .cr0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall2(number: SYS, arg1: usize, arg2: usize) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
: .{ .memory = true, .cr0 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall3(number: SYS, arg1: usize, arg2: usize, arg3: usize) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
var r5_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
[r5_out] "={r5}" (r5_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
[arg3] "{r5}" (arg3),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
: .{ .memory = true, .cr0 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall4(number: SYS, arg1: usize, arg2: usize, arg3: usize, arg4: usize) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
var r5_out: usize = undefined;
|
||||
var r6_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
[r5_out] "={r5}" (r5_out),
|
||||
[r6_out] "={r6}" (r6_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
[arg3] "{r5}" (arg3),
|
||||
[arg4] "{r6}" (arg4),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
: .{ .memory = true, .cr0 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall5(number: SYS, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
var r5_out: usize = undefined;
|
||||
var r6_out: usize = undefined;
|
||||
var r7_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
[r5_out] "={r5}" (r5_out),
|
||||
[r6_out] "={r6}" (r6_out),
|
||||
[r7_out] "={r7}" (r7_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
[arg3] "{r5}" (arg3),
|
||||
[arg4] "{r6}" (arg4),
|
||||
[arg5] "{r7}" (arg5),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
: .{ .memory = true, .cr0 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall6(
|
||||
|
|
@ -104,12 +145,25 @@ pub fn syscall6(
|
|||
arg5: usize,
|
||||
arg6: usize,
|
||||
) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
var r5_out: usize = undefined;
|
||||
var r6_out: usize = undefined;
|
||||
var r7_out: usize = undefined;
|
||||
var r8_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
[r5_out] "={r5}" (r5_out),
|
||||
[r6_out] "={r6}" (r6_out),
|
||||
[r7_out] "={r7}" (r7_out),
|
||||
[r8_out] "={r8}" (r8_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
|
|
@ -117,7 +171,7 @@ pub fn syscall6(
|
|||
[arg4] "{r6}" (arg4),
|
||||
[arg5] "{r7}" (arg5),
|
||||
[arg6] "{r8}" (arg6),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
: .{ .memory = true, .cr0 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn clone() callconv(.naked) usize {
|
||||
|
|
@ -193,11 +247,19 @@ pub fn clone() callconv(.naked) usize {
|
|||
pub const restore = restore_rt;
|
||||
|
||||
pub fn restore_rt() callconv(.naked) noreturn {
|
||||
asm volatile (
|
||||
\\ sc
|
||||
:
|
||||
: [number] "{r0}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true, .cr0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
switch (@import("builtin").zig_backend) {
|
||||
.stage2_c => asm volatile (
|
||||
\\ li 0, %[number]
|
||||
\\ sc
|
||||
:
|
||||
: [number] "i" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
),
|
||||
else => _ = asm volatile (
|
||||
\\ sc
|
||||
:
|
||||
: [number] "{r0}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub const F = struct {
|
||||
|
|
|
|||
|
|
@ -15,84 +15,125 @@ const sockaddr = linux.sockaddr;
|
|||
const timespec = linux.timespec;
|
||||
|
||||
pub fn syscall0(number: SYS) usize {
|
||||
// r0 is both an input register and a clobber. musl and glibc achieve this with
|
||||
// a "+" constraint, which isn't supported in Zig, so instead we separately list
|
||||
// r0 as both an input and an output. (Listing it as an input and a clobber would
|
||||
// cause the C backend to emit invalid code; see #25209.)
|
||||
var r0_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
: .{ .memory = true, .cr0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall1(number: SYS, arg1: usize) usize {
|
||||
// r0 is both an input and a clobber.
|
||||
var r0_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
: .{ .memory = true, .cr0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall2(number: SYS, arg1: usize, arg2: usize) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
: .{ .memory = true, .cr0 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall3(number: SYS, arg1: usize, arg2: usize, arg3: usize) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
var r5_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
[r5_out] "={r5}" (r5_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
[arg3] "{r5}" (arg3),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
: .{ .memory = true, .cr0 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall4(number: SYS, arg1: usize, arg2: usize, arg3: usize, arg4: usize) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
var r5_out: usize = undefined;
|
||||
var r6_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
[r5_out] "={r5}" (r5_out),
|
||||
[r6_out] "={r6}" (r6_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
[arg3] "{r5}" (arg3),
|
||||
[arg4] "{r6}" (arg4),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
: .{ .memory = true, .cr0 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall5(number: SYS, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
var r5_out: usize = undefined;
|
||||
var r6_out: usize = undefined;
|
||||
var r7_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
[r5_out] "={r5}" (r5_out),
|
||||
[r6_out] "={r6}" (r6_out),
|
||||
[r7_out] "={r7}" (r7_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
[arg3] "{r5}" (arg3),
|
||||
[arg4] "{r6}" (arg4),
|
||||
[arg5] "{r7}" (arg5),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
: .{ .memory = true, .cr0 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall6(
|
||||
|
|
@ -104,12 +145,25 @@ pub fn syscall6(
|
|||
arg5: usize,
|
||||
arg6: usize,
|
||||
) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
var r5_out: usize = undefined;
|
||||
var r6_out: usize = undefined;
|
||||
var r7_out: usize = undefined;
|
||||
var r8_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
[r5_out] "={r5}" (r5_out),
|
||||
[r6_out] "={r6}" (r6_out),
|
||||
[r7_out] "={r7}" (r7_out),
|
||||
[r8_out] "={r8}" (r8_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
|
|
@ -117,7 +171,7 @@ pub fn syscall6(
|
|||
[arg4] "{r6}" (arg4),
|
||||
[arg5] "{r7}" (arg5),
|
||||
[arg6] "{r8}" (arg6),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
: .{ .memory = true, .cr0 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn clone() callconv(.naked) usize {
|
||||
|
|
@ -178,11 +232,19 @@ pub fn clone() callconv(.naked) usize {
|
|||
pub const restore = restore_rt;
|
||||
|
||||
pub fn restore_rt() callconv(.naked) noreturn {
|
||||
asm volatile (
|
||||
\\ sc
|
||||
:
|
||||
: [number] "{r0}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true, .cr0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
switch (@import("builtin").zig_backend) {
|
||||
.stage2_c => asm volatile (
|
||||
\\ li 0, %[number]
|
||||
\\ sc
|
||||
:
|
||||
: [number] "i" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
),
|
||||
else => _ = asm volatile (
|
||||
\\ sc
|
||||
:
|
||||
: [number] "{r0}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub const F = struct {
|
||||
|
|
|
|||
|
|
@ -142,7 +142,7 @@ pub fn restore_rt() callconv(.naked) noreturn {
|
|||
\\ ecall
|
||||
:
|
||||
: [number] "{x17}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true });
|
||||
);
|
||||
}
|
||||
|
||||
pub const F = struct {
|
||||
|
|
|
|||
|
|
@ -142,7 +142,7 @@ pub fn restore_rt() callconv(.naked) noreturn {
|
|||
\\ ecall
|
||||
:
|
||||
: [number] "{x17}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true });
|
||||
);
|
||||
}
|
||||
|
||||
pub const F = struct {
|
||||
|
|
|
|||
|
|
@ -154,7 +154,7 @@ pub fn restore_rt() callconv(.naked) noreturn {
|
|||
\\svc 0
|
||||
:
|
||||
: [number] "{r1}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true });
|
||||
);
|
||||
}
|
||||
|
||||
pub const F = struct {
|
||||
|
|
|
|||
|
|
@ -151,5 +151,5 @@ pub fn restore_rt() callconv(.naked) noreturn {
|
|||
\\ svc #0
|
||||
:
|
||||
: [number] "I" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true });
|
||||
);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -187,12 +187,12 @@ pub fn restore_rt() callconv(.naked) noreturn {
|
|||
\\ int $0x80
|
||||
:
|
||||
: [number] "i" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true }),
|
||||
),
|
||||
else => asm volatile (
|
||||
\\ int $0x80
|
||||
:
|
||||
: [number] "{eax}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true }),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -137,12 +137,12 @@ pub fn restore_rt() callconv(.naked) noreturn {
|
|||
\\ syscall
|
||||
:
|
||||
: [number] "i" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .rcx = true, .r11 = true, .memory = true }),
|
||||
),
|
||||
else => asm volatile (
|
||||
\\ syscall
|
||||
:
|
||||
: [number] "{rax}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .rcx = true, .r11 = true, .memory = true }),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ inline fn getDynamicSymbol() [*]const elf.Dyn {
|
|||
\\ jg 2f
|
||||
\\ 1: .quad _DYNAMIC - .
|
||||
\\ 2:
|
||||
: [ret] "=r" (-> [*]const elf.Dyn),
|
||||
: [ret] "=a" (-> [*]const elf.Dyn),
|
||||
),
|
||||
// The compiler does not necessarily have any obligation to load the `l7` register (pointing
|
||||
// to the GOT), so do it ourselves just in case.
|
||||
|
|
|
|||
|
|
@ -6515,14 +6515,16 @@ pub const CopyFileRangeError = error{
|
|||
///
|
||||
/// Maximum offsets on Linux and FreeBSD are `maxInt(i64)`.
|
||||
pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len: usize, flags: u32) CopyFileRangeError!usize {
|
||||
if (builtin.os.tag == .freebsd or
|
||||
(comptime builtin.os.tag == .linux and std.c.versionCheck(if (builtin.abi.isAndroid()) .{ .major = 34, .minor = 0, .patch = 0 } else .{ .major = 2, .minor = 27, .patch = 0 })))
|
||||
{
|
||||
if (builtin.os.tag == .freebsd or builtin.os.tag == .linux) {
|
||||
const use_c = native_os != .linux or
|
||||
std.c.versionCheck(if (builtin.abi.isAndroid()) .{ .major = 34, .minor = 0, .patch = 0 } else .{ .major = 2, .minor = 27, .patch = 0 });
|
||||
const sys = if (use_c) std.c else linux;
|
||||
|
||||
var off_in_copy: i64 = @bitCast(off_in);
|
||||
var off_out_copy: i64 = @bitCast(off_out);
|
||||
|
||||
while (true) {
|
||||
const rc = system.copy_file_range(fd_in, &off_in_copy, fd_out, &off_out_copy, len, flags);
|
||||
const rc = sys.copy_file_range(fd_in, &off_in_copy, fd_out, &off_out_copy, len, flags);
|
||||
if (native_os == .freebsd) {
|
||||
switch (errno(rc)) {
|
||||
.SUCCESS => return @intCast(rc),
|
||||
|
|
|
|||
|
|
@ -644,7 +644,6 @@ test "sigrtmin/max" {
|
|||
try std.testing.expect(posix.sigrtmin() >= 32);
|
||||
try std.testing.expect(posix.sigrtmin() >= posix.system.sigrtmin());
|
||||
try std.testing.expect(posix.sigrtmin() < posix.system.sigrtmax());
|
||||
try std.testing.expect(posix.sigrtmax() < posix.NSIG);
|
||||
}
|
||||
|
||||
test "sigset empty/full" {
|
||||
|
|
|
|||
|
|
@ -52,6 +52,8 @@ term: ?(SpawnError!Term),
|
|||
argv: []const []const u8,
|
||||
|
||||
/// Leave as null to use the current env map using the supplied allocator.
|
||||
/// Required if unable to access the current env map (e.g. building a library on
|
||||
/// some platforms).
|
||||
env_map: ?*const EnvMap,
|
||||
|
||||
stdin_behavior: StdIo,
|
||||
|
|
@ -414,6 +416,8 @@ pub fn run(args: struct {
|
|||
argv: []const []const u8,
|
||||
cwd: ?[]const u8 = null,
|
||||
cwd_dir: ?fs.Dir = null,
|
||||
/// Required if unable to access the current env map (e.g. building a
|
||||
/// library on some platforms).
|
||||
env_map: ?*const EnvMap = null,
|
||||
max_output_bytes: usize = 50 * 1024,
|
||||
expand_arg0: Arg0Expand = .no_expand,
|
||||
|
|
@ -614,7 +618,7 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void {
|
|||
})).ptr;
|
||||
} else {
|
||||
// TODO come up with a solution for this.
|
||||
@compileError("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process");
|
||||
@panic("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process");
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1,531 +0,0 @@
|
|||
const std = @import("std.zig");
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
const mem = std.mem;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
// Imagine that `fn at(self: *Self, index: usize) &T` is a customer asking for a box
|
||||
// from a warehouse, based on a flat array, boxes ordered from 0 to N - 1.
|
||||
// But the warehouse actually stores boxes in shelves of increasing powers of 2 sizes.
|
||||
// So when the customer requests a box index, we have to translate it to shelf index
|
||||
// and box index within that shelf. Illustration:
|
||||
//
|
||||
// customer indexes:
|
||||
// shelf 0: 0
|
||||
// shelf 1: 1 2
|
||||
// shelf 2: 3 4 5 6
|
||||
// shelf 3: 7 8 9 10 11 12 13 14
|
||||
// shelf 4: 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
|
||||
// shelf 5: 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
|
||||
// ...
|
||||
//
|
||||
// warehouse indexes:
|
||||
// shelf 0: 0
|
||||
// shelf 1: 0 1
|
||||
// shelf 2: 0 1 2 3
|
||||
// shelf 3: 0 1 2 3 4 5 6 7
|
||||
// shelf 4: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
||||
// shelf 5: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
|
||||
// ...
|
||||
//
|
||||
// With this arrangement, here are the equations to get the shelf index and
|
||||
// box index based on customer box index:
|
||||
//
|
||||
// shelf_index = floor(log2(customer_index + 1))
|
||||
// shelf_count = ceil(log2(box_count + 1))
|
||||
// box_index = customer_index + 1 - 2 ** shelf
|
||||
// shelf_size = 2 ** shelf_index
|
||||
//
|
||||
// Now we complicate it a little bit further by adding a preallocated shelf, which must be
|
||||
// a power of 2:
|
||||
// prealloc=4
|
||||
//
|
||||
// customer indexes:
|
||||
// prealloc: 0 1 2 3
|
||||
// shelf 0: 4 5 6 7 8 9 10 11
|
||||
// shelf 1: 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
|
||||
// shelf 2: 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
|
||||
// ...
|
||||
//
|
||||
// warehouse indexes:
|
||||
// prealloc: 0 1 2 3
|
||||
// shelf 0: 0 1 2 3 4 5 6 7
|
||||
// shelf 1: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
||||
// shelf 2: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
|
||||
// ...
|
||||
//
|
||||
// Now the equations are:
|
||||
//
|
||||
// shelf_index = floor(log2(customer_index + prealloc)) - log2(prealloc) - 1
|
||||
// shelf_count = ceil(log2(box_count + prealloc)) - log2(prealloc) - 1
|
||||
// box_index = customer_index + prealloc - 2 ** (log2(prealloc) + 1 + shelf)
|
||||
// shelf_size = prealloc * 2 ** (shelf_index + 1)
|
||||
|
||||
/// This is a stack data structure where pointers to indexes have the same lifetime as the data structure
|
||||
/// itself, unlike ArrayList where append() invalidates all existing element pointers.
|
||||
/// The tradeoff is that elements are not guaranteed to be contiguous. For that, use ArrayList.
|
||||
/// Note however that most elements are contiguous, making this data structure cache-friendly.
|
||||
///
|
||||
/// Because it never has to copy elements from an old location to a new location, it does not require
|
||||
/// its elements to be copyable, and it avoids wasting memory when backed by an ArenaAllocator.
|
||||
/// Note that the append() and pop() convenience methods perform a copy, but you can instead use
|
||||
/// addOne(), at(), setCapacity(), and shrinkCapacity() to avoid copying items.
|
||||
///
|
||||
/// This data structure has O(1) append and O(1) pop.
|
||||
///
|
||||
/// It supports preallocated elements, making it especially well suited when the expected maximum
|
||||
/// size is small. `prealloc_item_count` must be 0, or a power of 2.
|
||||
pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
const ShelfIndex = std.math.Log2Int(usize);
|
||||
|
||||
const prealloc_exp: ShelfIndex = blk: {
|
||||
// we don't use the prealloc_exp constant when prealloc_item_count is 0
|
||||
// but lazy-init may still be triggered by other code so supply a value
|
||||
if (prealloc_item_count == 0) {
|
||||
break :blk 0;
|
||||
} else {
|
||||
assert(std.math.isPowerOfTwo(prealloc_item_count));
|
||||
const value = std.math.log2_int(usize, prealloc_item_count);
|
||||
break :blk value;
|
||||
}
|
||||
};
|
||||
|
||||
prealloc_segment: [prealloc_item_count]T = undefined,
|
||||
dynamic_segments: [][*]T = &[_][*]T{},
|
||||
len: usize = 0,
|
||||
|
||||
pub const prealloc_count = prealloc_item_count;
|
||||
|
||||
fn AtType(comptime SelfType: type) type {
|
||||
if (@typeInfo(SelfType).pointer.is_const) {
|
||||
return *const T;
|
||||
} else {
|
||||
return *T;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self, allocator: Allocator) void {
|
||||
self.freeShelves(allocator, @as(ShelfIndex, @intCast(self.dynamic_segments.len)), 0);
|
||||
allocator.free(self.dynamic_segments);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn at(self: anytype, i: usize) AtType(@TypeOf(self)) {
|
||||
assert(i < self.len);
|
||||
return self.uncheckedAt(i);
|
||||
}
|
||||
|
||||
pub fn count(self: Self) usize {
|
||||
return self.len;
|
||||
}
|
||||
|
||||
pub fn append(self: *Self, allocator: Allocator, item: T) Allocator.Error!void {
|
||||
const new_item_ptr = try self.addOne(allocator);
|
||||
new_item_ptr.* = item;
|
||||
}
|
||||
|
||||
pub fn appendSlice(self: *Self, allocator: Allocator, items: []const T) Allocator.Error!void {
|
||||
for (items) |item| {
|
||||
try self.append(allocator, item);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pop(self: *Self) ?T {
|
||||
if (self.len == 0) return null;
|
||||
|
||||
const index = self.len - 1;
|
||||
const result = uncheckedAt(self, index).*;
|
||||
self.len = index;
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn addOne(self: *Self, allocator: Allocator) Allocator.Error!*T {
|
||||
const new_length = self.len + 1;
|
||||
try self.growCapacity(allocator, new_length);
|
||||
const result = uncheckedAt(self, self.len);
|
||||
self.len = new_length;
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Reduce length to `new_len`.
|
||||
/// Invalidates pointers for the elements at index new_len and beyond.
|
||||
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
|
||||
assert(new_len <= self.len);
|
||||
self.len = new_len;
|
||||
}
|
||||
|
||||
/// Invalidates all element pointers.
|
||||
pub fn clearRetainingCapacity(self: *Self) void {
|
||||
self.len = 0;
|
||||
}
|
||||
|
||||
/// Invalidates all element pointers.
|
||||
pub fn clearAndFree(self: *Self, allocator: Allocator) void {
|
||||
self.setCapacity(allocator, 0) catch unreachable;
|
||||
self.len = 0;
|
||||
}
|
||||
|
||||
/// Grows or shrinks capacity to match usage.
|
||||
/// TODO update this and related methods to match the conventions set by ArrayList
|
||||
pub fn setCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void {
|
||||
if (prealloc_item_count != 0) {
|
||||
if (new_capacity <= @as(usize, 1) << (prealloc_exp + @as(ShelfIndex, @intCast(self.dynamic_segments.len)))) {
|
||||
return self.shrinkCapacity(allocator, new_capacity);
|
||||
}
|
||||
}
|
||||
return self.growCapacity(allocator, new_capacity);
|
||||
}
|
||||
|
||||
/// Only grows capacity, or retains current capacity.
|
||||
pub fn growCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void {
|
||||
const new_cap_shelf_count = shelfCount(new_capacity);
|
||||
const old_shelf_count = @as(ShelfIndex, @intCast(self.dynamic_segments.len));
|
||||
if (new_cap_shelf_count <= old_shelf_count) return;
|
||||
|
||||
const new_dynamic_segments = try allocator.alloc([*]T, new_cap_shelf_count);
|
||||
errdefer allocator.free(new_dynamic_segments);
|
||||
|
||||
var i: ShelfIndex = 0;
|
||||
while (i < old_shelf_count) : (i += 1) {
|
||||
new_dynamic_segments[i] = self.dynamic_segments[i];
|
||||
}
|
||||
errdefer while (i > old_shelf_count) : (i -= 1) {
|
||||
allocator.free(new_dynamic_segments[i][0..shelfSize(i)]);
|
||||
};
|
||||
while (i < new_cap_shelf_count) : (i += 1) {
|
||||
new_dynamic_segments[i] = (try allocator.alloc(T, shelfSize(i))).ptr;
|
||||
}
|
||||
|
||||
allocator.free(self.dynamic_segments);
|
||||
self.dynamic_segments = new_dynamic_segments;
|
||||
}
|
||||
|
||||
/// Only shrinks capacity or retains current capacity.
|
||||
/// It may fail to reduce the capacity in which case the capacity will remain unchanged.
|
||||
pub fn shrinkCapacity(self: *Self, allocator: Allocator, new_capacity: usize) void {
|
||||
if (new_capacity <= prealloc_item_count) {
|
||||
const len = @as(ShelfIndex, @intCast(self.dynamic_segments.len));
|
||||
self.freeShelves(allocator, len, 0);
|
||||
allocator.free(self.dynamic_segments);
|
||||
self.dynamic_segments = &[_][*]T{};
|
||||
return;
|
||||
}
|
||||
|
||||
const new_cap_shelf_count = shelfCount(new_capacity);
|
||||
const old_shelf_count = @as(ShelfIndex, @intCast(self.dynamic_segments.len));
|
||||
assert(new_cap_shelf_count <= old_shelf_count);
|
||||
if (new_cap_shelf_count == old_shelf_count) return;
|
||||
|
||||
// freeShelves() must be called before resizing the dynamic
|
||||
// segments, but we don't know if resizing the dynamic segments
|
||||
// will work until we try it. So we must allocate a fresh memory
|
||||
// buffer in order to reduce capacity.
|
||||
const new_dynamic_segments = allocator.alloc([*]T, new_cap_shelf_count) catch return;
|
||||
self.freeShelves(allocator, old_shelf_count, new_cap_shelf_count);
|
||||
if (allocator.resize(self.dynamic_segments, new_cap_shelf_count)) {
|
||||
// We didn't need the new memory allocation after all.
|
||||
self.dynamic_segments = self.dynamic_segments[0..new_cap_shelf_count];
|
||||
allocator.free(new_dynamic_segments);
|
||||
} else {
|
||||
// Good thing we allocated that new memory slice.
|
||||
@memcpy(new_dynamic_segments, self.dynamic_segments[0..new_cap_shelf_count]);
|
||||
allocator.free(self.dynamic_segments);
|
||||
self.dynamic_segments = new_dynamic_segments;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn shrink(self: *Self, new_len: usize) void {
|
||||
assert(new_len <= self.len);
|
||||
// TODO take advantage of the new realloc semantics
|
||||
self.len = new_len;
|
||||
}
|
||||
|
||||
pub fn writeToSlice(self: *Self, dest: []T, start: usize) void {
|
||||
const end = start + dest.len;
|
||||
assert(end <= self.len);
|
||||
|
||||
var i = start;
|
||||
if (end <= prealloc_item_count) {
|
||||
const src = self.prealloc_segment[i..end];
|
||||
@memcpy(dest[i - start ..][0..src.len], src);
|
||||
return;
|
||||
} else if (i < prealloc_item_count) {
|
||||
const src = self.prealloc_segment[i..];
|
||||
@memcpy(dest[i - start ..][0..src.len], src);
|
||||
i = prealloc_item_count;
|
||||
}
|
||||
|
||||
while (i < end) {
|
||||
const shelf_index = shelfIndex(i);
|
||||
const copy_start = boxIndex(i, shelf_index);
|
||||
const copy_end = @min(shelfSize(shelf_index), copy_start + end - i);
|
||||
const src = self.dynamic_segments[shelf_index][copy_start..copy_end];
|
||||
@memcpy(dest[i - start ..][0..src.len], src);
|
||||
i += (copy_end - copy_start);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn uncheckedAt(self: anytype, index: usize) AtType(@TypeOf(self)) {
|
||||
if (index < prealloc_item_count) {
|
||||
return &self.prealloc_segment[index];
|
||||
}
|
||||
const shelf_index = shelfIndex(index);
|
||||
const box_index = boxIndex(index, shelf_index);
|
||||
return &self.dynamic_segments[shelf_index][box_index];
|
||||
}
|
||||
|
||||
fn shelfCount(box_count: usize) ShelfIndex {
|
||||
if (prealloc_item_count == 0) {
|
||||
return log2_int_ceil(usize, box_count + 1);
|
||||
}
|
||||
return log2_int_ceil(usize, box_count + prealloc_item_count) - prealloc_exp - 1;
|
||||
}
|
||||
|
||||
fn shelfSize(shelf_index: ShelfIndex) usize {
|
||||
if (prealloc_item_count == 0) {
|
||||
return @as(usize, 1) << shelf_index;
|
||||
}
|
||||
return @as(usize, 1) << (shelf_index + (prealloc_exp + 1));
|
||||
}
|
||||
|
||||
fn shelfIndex(list_index: usize) ShelfIndex {
|
||||
if (prealloc_item_count == 0) {
|
||||
return std.math.log2_int(usize, list_index + 1);
|
||||
}
|
||||
return std.math.log2_int(usize, list_index + prealloc_item_count) - prealloc_exp - 1;
|
||||
}
|
||||
|
||||
fn boxIndex(list_index: usize, shelf_index: ShelfIndex) usize {
|
||||
if (prealloc_item_count == 0) {
|
||||
return (list_index + 1) - (@as(usize, 1) << shelf_index);
|
||||
}
|
||||
return list_index + prealloc_item_count - (@as(usize, 1) << ((prealloc_exp + 1) + shelf_index));
|
||||
}
|
||||
|
||||
fn freeShelves(self: *Self, allocator: Allocator, from_count: ShelfIndex, to_count: ShelfIndex) void {
|
||||
var i = from_count;
|
||||
while (i != to_count) {
|
||||
i -= 1;
|
||||
allocator.free(self.dynamic_segments[i][0..shelfSize(i)]);
|
||||
}
|
||||
}
|
||||
|
||||
pub const Iterator = BaseIterator(*Self, *T);
|
||||
pub const ConstIterator = BaseIterator(*const Self, *const T);
|
||||
fn BaseIterator(comptime SelfType: type, comptime ElementPtr: type) type {
|
||||
return struct {
|
||||
list: SelfType,
|
||||
index: usize,
|
||||
box_index: usize,
|
||||
shelf_index: ShelfIndex,
|
||||
shelf_size: usize,
|
||||
|
||||
pub fn next(it: *@This()) ?ElementPtr {
|
||||
if (it.index >= it.list.len) return null;
|
||||
if (it.index < prealloc_item_count) {
|
||||
const ptr = &it.list.prealloc_segment[it.index];
|
||||
it.index += 1;
|
||||
if (it.index == prealloc_item_count) {
|
||||
it.box_index = 0;
|
||||
it.shelf_index = 0;
|
||||
it.shelf_size = prealloc_item_count * 2;
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
const ptr = &it.list.dynamic_segments[it.shelf_index][it.box_index];
|
||||
it.index += 1;
|
||||
it.box_index += 1;
|
||||
if (it.box_index == it.shelf_size) {
|
||||
it.shelf_index += 1;
|
||||
it.box_index = 0;
|
||||
it.shelf_size *= 2;
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
pub fn prev(it: *@This()) ?ElementPtr {
|
||||
if (it.index == 0) return null;
|
||||
|
||||
it.index -= 1;
|
||||
if (it.index < prealloc_item_count) return &it.list.prealloc_segment[it.index];
|
||||
|
||||
if (it.box_index == 0) {
|
||||
it.shelf_index -= 1;
|
||||
it.shelf_size /= 2;
|
||||
it.box_index = it.shelf_size - 1;
|
||||
} else {
|
||||
it.box_index -= 1;
|
||||
}
|
||||
|
||||
return &it.list.dynamic_segments[it.shelf_index][it.box_index];
|
||||
}
|
||||
|
||||
pub fn peek(it: *@This()) ?ElementPtr {
|
||||
if (it.index >= it.list.len)
|
||||
return null;
|
||||
if (it.index < prealloc_item_count)
|
||||
return &it.list.prealloc_segment[it.index];
|
||||
|
||||
return &it.list.dynamic_segments[it.shelf_index][it.box_index];
|
||||
}
|
||||
|
||||
pub fn set(it: *@This(), index: usize) void {
|
||||
it.index = index;
|
||||
if (index < prealloc_item_count) return;
|
||||
it.shelf_index = shelfIndex(index);
|
||||
it.box_index = boxIndex(index, it.shelf_index);
|
||||
it.shelf_size = shelfSize(it.shelf_index);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn iterator(self: *Self, start_index: usize) Iterator {
|
||||
var it = Iterator{
|
||||
.list = self,
|
||||
.index = undefined,
|
||||
.shelf_index = undefined,
|
||||
.box_index = undefined,
|
||||
.shelf_size = undefined,
|
||||
};
|
||||
it.set(start_index);
|
||||
return it;
|
||||
}
|
||||
|
||||
pub fn constIterator(self: *const Self, start_index: usize) ConstIterator {
|
||||
var it = ConstIterator{
|
||||
.list = self,
|
||||
.index = undefined,
|
||||
.shelf_index = undefined,
|
||||
.box_index = undefined,
|
||||
.shelf_size = undefined,
|
||||
};
|
||||
it.set(start_index);
|
||||
return it;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
test "basic usage" {
|
||||
try testSegmentedList(0);
|
||||
try testSegmentedList(1);
|
||||
try testSegmentedList(2);
|
||||
try testSegmentedList(4);
|
||||
try testSegmentedList(8);
|
||||
try testSegmentedList(16);
|
||||
}
|
||||
|
||||
fn testSegmentedList(comptime prealloc: usize) !void {
|
||||
var list = SegmentedList(i32, prealloc){};
|
||||
defer list.deinit(testing.allocator);
|
||||
|
||||
{
|
||||
var i: usize = 0;
|
||||
while (i < 100) : (i += 1) {
|
||||
try list.append(testing.allocator, @as(i32, @intCast(i + 1)));
|
||||
try testing.expect(list.len == i + 1);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
var i: usize = 0;
|
||||
while (i < 100) : (i += 1) {
|
||||
try testing.expect(list.at(i).* == @as(i32, @intCast(i + 1)));
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
var it = list.iterator(0);
|
||||
var x: i32 = 0;
|
||||
while (it.next()) |item| {
|
||||
x += 1;
|
||||
try testing.expect(item.* == x);
|
||||
}
|
||||
try testing.expect(x == 100);
|
||||
while (it.prev()) |item| : (x -= 1) {
|
||||
try testing.expect(item.* == x);
|
||||
}
|
||||
try testing.expect(x == 0);
|
||||
}
|
||||
|
||||
{
|
||||
var it = list.constIterator(0);
|
||||
var x: i32 = 0;
|
||||
while (it.next()) |item| {
|
||||
x += 1;
|
||||
try testing.expect(item.* == x);
|
||||
}
|
||||
try testing.expect(x == 100);
|
||||
while (it.prev()) |item| : (x -= 1) {
|
||||
try testing.expect(item.* == x);
|
||||
}
|
||||
try testing.expect(x == 0);
|
||||
}
|
||||
|
||||
try testing.expect(list.pop().? == 100);
|
||||
try testing.expect(list.len == 99);
|
||||
|
||||
try list.appendSlice(testing.allocator, &[_]i32{ 1, 2, 3 });
|
||||
try testing.expect(list.len == 102);
|
||||
try testing.expect(list.pop().? == 3);
|
||||
try testing.expect(list.pop().? == 2);
|
||||
try testing.expect(list.pop().? == 1);
|
||||
try testing.expect(list.len == 99);
|
||||
|
||||
try list.appendSlice(testing.allocator, &[_]i32{});
|
||||
try testing.expect(list.len == 99);
|
||||
|
||||
{
|
||||
var i: i32 = 99;
|
||||
while (list.pop()) |item| : (i -= 1) {
|
||||
try testing.expect(item == i);
|
||||
list.shrinkCapacity(testing.allocator, list.len);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
var control: [100]i32 = undefined;
|
||||
var dest: [100]i32 = undefined;
|
||||
|
||||
var i: i32 = 0;
|
||||
while (i < 100) : (i += 1) {
|
||||
try list.append(testing.allocator, i + 1);
|
||||
control[@as(usize, @intCast(i))] = i + 1;
|
||||
}
|
||||
|
||||
@memset(dest[0..], 0);
|
||||
list.writeToSlice(dest[0..], 0);
|
||||
try testing.expect(mem.eql(i32, control[0..], dest[0..]));
|
||||
|
||||
@memset(dest[0..], 0);
|
||||
list.writeToSlice(dest[50..], 50);
|
||||
try testing.expect(mem.eql(i32, control[50..], dest[50..]));
|
||||
}
|
||||
|
||||
try list.setCapacity(testing.allocator, 0);
|
||||
}
|
||||
|
||||
test "clearRetainingCapacity" {
|
||||
var list = SegmentedList(i32, 1){};
|
||||
defer list.deinit(testing.allocator);
|
||||
|
||||
try list.appendSlice(testing.allocator, &[_]i32{ 4, 5 });
|
||||
list.clearRetainingCapacity();
|
||||
try list.append(testing.allocator, 6);
|
||||
try testing.expect(list.at(0).* == 6);
|
||||
try testing.expect(list.len == 1);
|
||||
list.clearRetainingCapacity();
|
||||
try testing.expect(list.len == 0);
|
||||
}
|
||||
|
||||
/// TODO look into why this std.math function was changed in
|
||||
/// fc9430f56798a53f9393a697f4ccd6bf9981b970.
|
||||
fn log2_int_ceil(comptime T: type, x: T) std.math.Log2Int(T) {
|
||||
assert(x != 0);
|
||||
const log2_val = std.math.log2_int(T, x);
|
||||
if (@as(T, 1) << log2_val == x)
|
||||
return log2_val;
|
||||
return log2_val + 1;
|
||||
}
|
||||
|
|
@ -26,7 +26,6 @@ pub const PriorityQueue = @import("priority_queue.zig").PriorityQueue;
|
|||
pub const PriorityDequeue = @import("priority_dequeue.zig").PriorityDequeue;
|
||||
pub const Progress = @import("Progress.zig");
|
||||
pub const Random = @import("Random.zig");
|
||||
pub const SegmentedList = @import("segmented_list.zig").SegmentedList;
|
||||
pub const SemanticVersion = @import("SemanticVersion.zig");
|
||||
pub const SinglyLinkedList = @import("SinglyLinkedList.zig");
|
||||
pub const StaticBitSet = bit_set.StaticBitSet;
|
||||
|
|
|
|||
|
|
@ -135,15 +135,9 @@ fn expectEqualInner(comptime T: type, expected: T, actual: T) !void {
|
|||
.array => |array| try expectEqualSlices(array.child, &expected, &actual),
|
||||
|
||||
.vector => |info| {
|
||||
var i: usize = 0;
|
||||
while (i < info.len) : (i += 1) {
|
||||
if (!std.meta.eql(expected[i], actual[i])) {
|
||||
print("index {d} incorrect. expected {any}, found {any}\n", .{
|
||||
i, expected[i], actual[i],
|
||||
});
|
||||
return error.TestExpectedEqual;
|
||||
}
|
||||
}
|
||||
const expect_array: [info.len]info.child = expected;
|
||||
const actual_array: [info.len]info.child = actual;
|
||||
try expectEqualSlices(info.child, &expect_array, &actual_array);
|
||||
},
|
||||
|
||||
.@"struct" => |structType| {
|
||||
|
|
@ -828,8 +822,7 @@ fn expectEqualDeepInner(comptime T: type, expected: T, actual: T) error{TestExpe
|
|||
print("Vector len not the same, expected {d}, found {d}\n", .{ info.len, @typeInfo(@TypeOf(actual)).vector.len });
|
||||
return error.TestExpectedEqual;
|
||||
}
|
||||
var i: usize = 0;
|
||||
while (i < info.len) : (i += 1) {
|
||||
inline for (0..info.len) |i| {
|
||||
expectEqualDeep(expected[i], actual[i]) catch |e| {
|
||||
print("index {d} incorrect. expected {any}, found {any}\n", .{
|
||||
i, expected[i], actual[i],
|
||||
|
|
|
|||
|
|
@ -383,6 +383,8 @@ const ResultInfo = struct {
|
|||
assignment,
|
||||
/// No specific operator in particular.
|
||||
none,
|
||||
/// The expression is operand to address-of which is the operand to a return expression.
|
||||
return_addrof,
|
||||
};
|
||||
};
|
||||
|
||||
|
|
@ -955,7 +957,14 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE
|
|||
_ = try gz.addUnTok(.validate_ref_ty, res_ty_inst, tree.firstToken(node));
|
||||
break :rl .{ .ref_coerced_ty = res_ty_inst };
|
||||
} else .ref;
|
||||
const result = try expr(gz, scope, .{ .rl = operand_rl }, tree.nodeData(node).node);
|
||||
const operand_node = tree.nodeData(node).node;
|
||||
const result = try expr(gz, scope, .{
|
||||
.rl = operand_rl,
|
||||
.ctx = switch (ri.ctx) {
|
||||
.@"return" => .return_addrof,
|
||||
else => .none,
|
||||
},
|
||||
}, operand_node);
|
||||
return rvalue(gz, ri, result, node);
|
||||
},
|
||||
.optional_type => {
|
||||
|
|
@ -2728,12 +2737,12 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
|
|||
.elem_ptr,
|
||||
.elem_val,
|
||||
.elem_ptr_node,
|
||||
.elem_val_node,
|
||||
.elem_ptr_load,
|
||||
.elem_val_imm,
|
||||
.field_ptr,
|
||||
.field_val,
|
||||
.field_ptr_load,
|
||||
.field_ptr_named,
|
||||
.field_val_named,
|
||||
.field_ptr_named_load,
|
||||
.func,
|
||||
.func_inferred,
|
||||
.func_fancy,
|
||||
|
|
@ -6160,7 +6169,7 @@ fn fieldAccess(
|
|||
switch (ri.rl) {
|
||||
.ref, .ref_coerced_ty => return addFieldAccess(.field_ptr, gz, scope, .{ .rl = .ref }, node),
|
||||
else => {
|
||||
const access = try addFieldAccess(.field_val, gz, scope, .{ .rl = .none }, node);
|
||||
const access = try addFieldAccess(.field_ptr_load, gz, scope, .{ .rl = .ref }, node);
|
||||
return rvalue(gz, ri, access, node);
|
||||
},
|
||||
}
|
||||
|
|
@ -6210,14 +6219,14 @@ fn arrayAccess(
|
|||
},
|
||||
else => {
|
||||
const lhs_node, const rhs_node = tree.nodeData(node).node_and_node;
|
||||
const lhs = try expr(gz, scope, .{ .rl = .none }, lhs_node);
|
||||
const lhs = try expr(gz, scope, .{ .rl = .ref }, lhs_node);
|
||||
|
||||
const cursor = maybeAdvanceSourceCursorToMainToken(gz, node);
|
||||
|
||||
const rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, rhs_node);
|
||||
try emitDbgStmt(gz, cursor);
|
||||
|
||||
return rvalue(gz, ri, try gz.addPlNode(.elem_val_node, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }), node);
|
||||
return rvalue(gz, ri, try gz.addPlNode(.elem_ptr_load, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }), node);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -8420,13 +8429,19 @@ fn localVarRef(
|
|||
local_ptr.used = .fromToken(ident_token);
|
||||
}
|
||||
|
||||
// Can't close over a runtime variable
|
||||
if (num_namespaces_out != 0 and !local_ptr.maybe_comptime and !gz.is_typeof) {
|
||||
const ident_name = try astgen.identifierTokenString(ident_token);
|
||||
return astgen.failNodeNotes(ident, "mutable '{s}' not accessible from here", .{ident_name}, &.{
|
||||
try astgen.errNoteTok(local_ptr.token_src, "declared mutable here", .{}),
|
||||
try astgen.errNoteNode(capturing_namespace.node, "crosses namespace boundary here", .{}),
|
||||
});
|
||||
if (!local_ptr.maybe_comptime and !gz.is_typeof) {
|
||||
if (num_namespaces_out != 0) {
|
||||
const ident_name = try astgen.identifierTokenString(ident_token);
|
||||
return astgen.failNodeNotes(ident, "mutable '{s}' not accessible from here", .{ident_name}, &.{
|
||||
try astgen.errNoteTok(local_ptr.token_src, "declared mutable here", .{}),
|
||||
try astgen.errNoteNode(capturing_namespace.node, "crosses namespace boundary here", .{}),
|
||||
});
|
||||
} else if (ri.ctx == .return_addrof) {
|
||||
const ident_name = try astgen.identifierTokenString(ident_token);
|
||||
return astgen.failNodeNotes(ident, "returning address of expired local variable '{s}'", .{ident_name}, &.{
|
||||
try astgen.errNoteTok(local_ptr.token_src, "declared runtime-known here", .{}),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
switch (ri.rl) {
|
||||
|
|
@ -9286,17 +9301,21 @@ fn builtinCall(
|
|||
return rvalue(gz, ri, result, node);
|
||||
},
|
||||
.field => {
|
||||
if (ri.rl == .ref or ri.rl == .ref_coerced_ty) {
|
||||
return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{
|
||||
.lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]),
|
||||
.field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1], .field_name),
|
||||
});
|
||||
switch (ri.rl) {
|
||||
.ref, .ref_coerced_ty => {
|
||||
return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{
|
||||
.lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]),
|
||||
.field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1], .field_name),
|
||||
});
|
||||
},
|
||||
else => {
|
||||
const result = try gz.addPlNode(.field_ptr_named_load, node, Zir.Inst.FieldNamed{
|
||||
.lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]),
|
||||
.field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1], .field_name),
|
||||
});
|
||||
return rvalue(gz, ri, result, node);
|
||||
},
|
||||
}
|
||||
const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{
|
||||
.lhs = try expr(gz, scope, .{ .rl = .none }, params[0]),
|
||||
.field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1], .field_name),
|
||||
});
|
||||
return rvalue(gz, ri, result, node);
|
||||
},
|
||||
.FieldType => {
|
||||
const ty_inst = try typeExpr(gz, scope, params[0]);
|
||||
|
|
|
|||
|
|
@ -420,6 +420,7 @@ pub const Inst = struct {
|
|||
/// is the local's value.
|
||||
dbg_var_val,
|
||||
/// Uses a name to identify a Decl and takes a pointer to it.
|
||||
///
|
||||
/// Uses the `str_tok` union field.
|
||||
decl_ref,
|
||||
/// Uses a name to identify a Decl and uses it as a value.
|
||||
|
|
@ -440,12 +441,17 @@ pub const Inst = struct {
|
|||
/// Payload is `Bin`.
|
||||
/// No OOB safety check is emitted.
|
||||
elem_ptr,
|
||||
/// Given an array, slice, or pointer, returns the element at the provided index.
|
||||
/// Given a pointer to an array, slice, or pointer, loads the element
|
||||
/// at the provided index.
|
||||
///
|
||||
/// Uses the `pl_node` union field. AST node is a[b] syntax. Payload is `Bin`.
|
||||
elem_val_node,
|
||||
/// Same as `elem_val_node` but used only for for loop.
|
||||
/// Uses the `pl_node` union field. AST node is the condition of a for loop.
|
||||
/// Payload is `Bin`.
|
||||
elem_ptr_load,
|
||||
/// Given an array, slice, or pointer, returns the element at the
|
||||
/// provided index.
|
||||
///
|
||||
/// Uses the `pl_node` union field. AST node is the condition of a for
|
||||
/// loop. Payload is `Bin`.
|
||||
///
|
||||
/// No OOB safety check is emitted.
|
||||
elem_val,
|
||||
/// Same as `elem_val` but takes the index as an immediate value.
|
||||
|
|
@ -472,19 +478,26 @@ pub const Inst = struct {
|
|||
/// to the named field. The field name is stored in string_bytes. Used by a.b syntax.
|
||||
/// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field.
|
||||
field_ptr,
|
||||
/// Given a struct or object that contains virtual fields, returns the named field.
|
||||
/// Given a pointer to a struct or object that contains virtual fields, loads from the
|
||||
/// named field.
|
||||
///
|
||||
/// The field name is stored in string_bytes. Used by a.b syntax.
|
||||
///
|
||||
/// This instruction also accepts a pointer.
|
||||
///
|
||||
/// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field.
|
||||
field_val,
|
||||
field_ptr_load,
|
||||
/// Given a pointer to a struct or object that contains virtual fields, returns a pointer
|
||||
/// to the named field. The field name is a comptime instruction. Used by @field.
|
||||
/// Uses `pl_node` field. The AST node is the builtin call. Payload is FieldNamed.
|
||||
field_ptr_named,
|
||||
/// Given a struct or object that contains virtual fields, returns the named field.
|
||||
/// Given a pointer to a struct or object that contains virtual fields,
|
||||
/// loads from the named field.
|
||||
///
|
||||
/// The field name is a comptime instruction. Used by @field.
|
||||
///
|
||||
/// Uses `pl_node` field. The AST node is the builtin call. Payload is FieldNamed.
|
||||
field_val_named,
|
||||
field_ptr_named_load,
|
||||
/// Returns a function type, or a function instance, depending on whether
|
||||
/// the body_len is 0. Calling convention is auto.
|
||||
/// Uses the `pl_node` union field. `payload_index` points to a `Func`.
|
||||
|
|
@ -1138,16 +1151,16 @@ pub const Inst = struct {
|
|||
.elem_ptr,
|
||||
.elem_val,
|
||||
.elem_ptr_node,
|
||||
.elem_val_node,
|
||||
.elem_ptr_load,
|
||||
.elem_val_imm,
|
||||
.ensure_result_used,
|
||||
.ensure_result_non_error,
|
||||
.ensure_err_union_payload_void,
|
||||
.@"export",
|
||||
.field_ptr,
|
||||
.field_val,
|
||||
.field_ptr_load,
|
||||
.field_ptr_named,
|
||||
.field_val_named,
|
||||
.field_ptr_named_load,
|
||||
.func,
|
||||
.func_inferred,
|
||||
.func_fancy,
|
||||
|
|
@ -1432,12 +1445,12 @@ pub const Inst = struct {
|
|||
.elem_ptr,
|
||||
.elem_val,
|
||||
.elem_ptr_node,
|
||||
.elem_val_node,
|
||||
.elem_ptr_load,
|
||||
.elem_val_imm,
|
||||
.field_ptr,
|
||||
.field_val,
|
||||
.field_ptr_load,
|
||||
.field_ptr_named,
|
||||
.field_val_named,
|
||||
.field_ptr_named_load,
|
||||
.func,
|
||||
.func_inferred,
|
||||
.func_fancy,
|
||||
|
|
@ -1679,7 +1692,7 @@ pub const Inst = struct {
|
|||
.elem_ptr = .pl_node,
|
||||
.elem_ptr_node = .pl_node,
|
||||
.elem_val = .pl_node,
|
||||
.elem_val_node = .pl_node,
|
||||
.elem_ptr_load = .pl_node,
|
||||
.elem_val_imm = .elem_val_imm,
|
||||
.ensure_result_used = .un_node,
|
||||
.ensure_result_non_error = .un_node,
|
||||
|
|
@ -1688,9 +1701,9 @@ pub const Inst = struct {
|
|||
.error_value = .str_tok,
|
||||
.@"export" = .pl_node,
|
||||
.field_ptr = .pl_node,
|
||||
.field_val = .pl_node,
|
||||
.field_ptr_load = .pl_node,
|
||||
.field_ptr_named = .pl_node,
|
||||
.field_val_named = .pl_node,
|
||||
.field_ptr_named_load = .pl_node,
|
||||
.func = .pl_node,
|
||||
.func_inferred = .pl_node,
|
||||
.func_fancy = .pl_node,
|
||||
|
|
@ -4215,7 +4228,7 @@ fn findTrackableInner(
|
|||
.div,
|
||||
.elem_ptr_node,
|
||||
.elem_ptr,
|
||||
.elem_val_node,
|
||||
.elem_ptr_load,
|
||||
.elem_val,
|
||||
.elem_val_imm,
|
||||
.ensure_result_used,
|
||||
|
|
@ -4225,9 +4238,9 @@ fn findTrackableInner(
|
|||
.error_value,
|
||||
.@"export",
|
||||
.field_ptr,
|
||||
.field_val,
|
||||
.field_ptr_load,
|
||||
.field_ptr_named,
|
||||
.field_val_named,
|
||||
.field_ptr_named_load,
|
||||
.import,
|
||||
.int,
|
||||
.int_big,
|
||||
|
|
|
|||
|
|
@ -516,15 +516,15 @@ pub fn abiAndDynamicLinkerFromFile(
|
|||
const hdr32: *elf.Elf32_Ehdr = @ptrCast(&hdr_buf);
|
||||
const hdr64: *elf.Elf64_Ehdr = @ptrCast(&hdr_buf);
|
||||
if (!mem.eql(u8, hdr32.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic;
|
||||
const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI_DATA]) {
|
||||
const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI.DATA]) {
|
||||
elf.ELFDATA2LSB => .little,
|
||||
elf.ELFDATA2MSB => .big,
|
||||
else => return error.InvalidElfEndian,
|
||||
};
|
||||
const need_bswap = elf_endian != native_endian;
|
||||
if (hdr32.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion;
|
||||
if (hdr32.e_ident[elf.EI.VERSION] != 1) return error.InvalidElfVersion;
|
||||
|
||||
const is_64 = switch (hdr32.e_ident[elf.EI_CLASS]) {
|
||||
const is_64 = switch (hdr32.e_ident[elf.EI.CLASS]) {
|
||||
elf.ELFCLASS32 => false,
|
||||
elf.ELFCLASS64 => true,
|
||||
else => return error.InvalidElfClass,
|
||||
|
|
@ -920,15 +920,15 @@ fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion {
|
|||
const hdr32: *elf.Elf32_Ehdr = @ptrCast(&hdr_buf);
|
||||
const hdr64: *elf.Elf64_Ehdr = @ptrCast(&hdr_buf);
|
||||
if (!mem.eql(u8, hdr32.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic;
|
||||
const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI_DATA]) {
|
||||
const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI.DATA]) {
|
||||
elf.ELFDATA2LSB => .little,
|
||||
elf.ELFDATA2MSB => .big,
|
||||
else => return error.InvalidElfEndian,
|
||||
};
|
||||
const need_bswap = elf_endian != native_endian;
|
||||
if (hdr32.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion;
|
||||
if (hdr32.e_ident[elf.EI.VERSION] != 1) return error.InvalidElfVersion;
|
||||
|
||||
const is_64 = switch (hdr32.e_ident[elf.EI_CLASS]) {
|
||||
const is_64 = switch (hdr32.e_ident[elf.EI.CLASS]) {
|
||||
elf.ELFCLASS32 => false,
|
||||
elf.ELFCLASS64 => true,
|
||||
else => return error.InvalidElfClass,
|
||||
|
|
|
|||
|
|
@ -157,13 +157,11 @@ pub fn valueArbitraryDepth(self: *Serializer, val: anytype, options: ValueOption
|
|||
}
|
||||
},
|
||||
.array => {
|
||||
var container = try self.beginTuple(
|
||||
.{ .whitespace_style = .{ .fields = val.len } },
|
||||
);
|
||||
for (val) |item_val| {
|
||||
try container.fieldArbitraryDepth(item_val, options);
|
||||
}
|
||||
try container.end();
|
||||
try valueArbitraryDepthArray(self, @TypeOf(val), &val, options);
|
||||
},
|
||||
.vector => |vector| {
|
||||
const array: [vector.len]vector.child = val;
|
||||
try valueArbitraryDepthArray(self, @TypeOf(array), &array, options);
|
||||
},
|
||||
.@"struct" => |@"struct"| if (@"struct".is_tuple) {
|
||||
var container = try self.beginTuple(
|
||||
|
|
@ -231,20 +229,21 @@ pub fn valueArbitraryDepth(self: *Serializer, val: anytype, options: ValueOption
|
|||
} else {
|
||||
try self.writer.writeAll("null");
|
||||
},
|
||||
.vector => |vector| {
|
||||
var container = try self.beginTuple(
|
||||
.{ .whitespace_style = .{ .fields = vector.len } },
|
||||
);
|
||||
for (0..vector.len) |i| {
|
||||
try container.fieldArbitraryDepth(val[i], options);
|
||||
}
|
||||
try container.end();
|
||||
},
|
||||
|
||||
else => comptime unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn valueArbitraryDepthArray(s: *Serializer, comptime A: type, array: *const A, options: ValueOptions) Error!void {
|
||||
var container = try s.beginTuple(
|
||||
.{ .whitespace_style = .{ .fields = array.len } },
|
||||
);
|
||||
for (array) |elem| {
|
||||
try container.fieldArbitraryDepth(elem, options);
|
||||
}
|
||||
try container.end();
|
||||
}
|
||||
|
||||
/// Serialize an integer.
|
||||
pub fn int(self: *Serializer, val: anytype) Error!void {
|
||||
try self.writer.printInt(val, 10, .lower, .{});
|
||||
|
|
|
|||
|
|
@ -430,8 +430,12 @@ pub fn free(gpa: Allocator, value: anytype) void {
|
|||
.many, .c => comptime unreachable,
|
||||
}
|
||||
},
|
||||
.array => for (value) |item| {
|
||||
free(gpa, item);
|
||||
.array => {
|
||||
freeArray(gpa, @TypeOf(value), &value);
|
||||
},
|
||||
.vector => |vector| {
|
||||
const array: [vector.len]vector.child = value;
|
||||
freeArray(gpa, @TypeOf(array), &array);
|
||||
},
|
||||
.@"struct" => |@"struct"| inline for (@"struct".fields) |field| {
|
||||
free(gpa, @field(value, field.name));
|
||||
|
|
@ -446,12 +450,15 @@ pub fn free(gpa: Allocator, value: anytype) void {
|
|||
.optional => if (value) |some| {
|
||||
free(gpa, some);
|
||||
},
|
||||
.vector => |vector| for (0..vector.len) |i| free(gpa, value[i]),
|
||||
.void => {},
|
||||
else => comptime unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn freeArray(gpa: Allocator, comptime A: type, array: *const A) void {
|
||||
for (array) |elem| free(gpa, elem);
|
||||
}
|
||||
|
||||
fn requiresAllocator(T: type) bool {
|
||||
_ = valid_types;
|
||||
return switch (@typeInfo(T)) {
|
||||
|
|
@ -521,12 +528,15 @@ const Parser = struct {
|
|||
else => comptime unreachable,
|
||||
},
|
||||
.array => return self.parseArray(T, node),
|
||||
.vector => |vector| {
|
||||
const A = [vector.len]vector.child;
|
||||
return try self.parseArray(A, node);
|
||||
},
|
||||
.@"struct" => |@"struct"| if (@"struct".is_tuple)
|
||||
return self.parseTuple(T, node)
|
||||
else
|
||||
return self.parseStruct(T, node),
|
||||
.@"union" => return self.parseUnion(T, node),
|
||||
.vector => return self.parseVector(T, node),
|
||||
|
||||
else => comptime unreachable,
|
||||
}
|
||||
|
|
@ -786,6 +796,7 @@ const Parser = struct {
|
|||
|
||||
elem.* = try self.parseExpr(array_info.child, nodes.at(@intCast(i)));
|
||||
}
|
||||
if (array_info.sentinel()) |s| result[result.len] = s;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
@ -998,37 +1009,6 @@ const Parser = struct {
|
|||
}
|
||||
}
|
||||
|
||||
fn parseVector(
|
||||
self: *@This(),
|
||||
T: type,
|
||||
node: Zoir.Node.Index,
|
||||
) !T {
|
||||
const vector_info = @typeInfo(T).vector;
|
||||
|
||||
const nodes: Zoir.Node.Index.Range = switch (node.get(self.zoir)) {
|
||||
.array_literal => |nodes| nodes,
|
||||
.empty_literal => .{ .start = node, .len = 0 },
|
||||
else => return error.WrongType,
|
||||
};
|
||||
|
||||
var result: T = undefined;
|
||||
|
||||
if (nodes.len != vector_info.len) {
|
||||
return self.failNodeFmt(
|
||||
node,
|
||||
"expected {} vector elements; found {}",
|
||||
.{ vector_info.len, nodes.len },
|
||||
);
|
||||
}
|
||||
|
||||
for (0..vector_info.len) |i| {
|
||||
errdefer for (0..i) |j| free(self.gpa, result[j]);
|
||||
result[i] = try self.parseExpr(vector_info.child, nodes.at(@intCast(i)));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
fn failTokenFmt(
|
||||
self: @This(),
|
||||
token: Ast.TokenIndex,
|
||||
|
|
@ -3209,7 +3189,7 @@ test "std.zon vector" {
|
|||
fromSlice(@Vector(2, f32), gpa, ".{0.5}", &diag, .{}),
|
||||
);
|
||||
try std.testing.expectFmt(
|
||||
"1:2: error: expected 2 vector elements; found 1\n",
|
||||
"1:2: error: expected 2 array elements; found 1\n",
|
||||
"{f}",
|
||||
.{diag},
|
||||
);
|
||||
|
|
@ -3224,7 +3204,7 @@ test "std.zon vector" {
|
|||
fromSlice(@Vector(2, f32), gpa, ".{0.5, 1.5, 2.5}", &diag, .{}),
|
||||
);
|
||||
try std.testing.expectFmt(
|
||||
"1:2: error: expected 2 vector elements; found 3\n",
|
||||
"1:13: error: index 2 outside of array of length 2\n",
|
||||
"{f}",
|
||||
.{diag},
|
||||
);
|
||||
|
|
|
|||
30
src/Air.zig
30
src/Air.zig
|
|
@ -166,19 +166,25 @@ pub const Inst = struct {
|
|||
mod,
|
||||
/// Same as `mod` with optimized float mode.
|
||||
mod_optimized,
|
||||
/// Add an offset to a pointer, returning a new pointer.
|
||||
/// The offset is in element type units, not bytes.
|
||||
/// Wrapping is illegal behavior.
|
||||
/// The lhs is the pointer, rhs is the offset. Result type is the same as lhs.
|
||||
/// The pointer may be a slice.
|
||||
/// Uses the `ty_pl` field. Payload is `Bin`.
|
||||
/// Add an offset, in element type units, to a pointer, returning a new
|
||||
/// pointer. Element type may not be zero bits.
|
||||
///
|
||||
/// Wrapping is illegal behavior. If the newly computed address is
|
||||
/// outside the provenance of the operand, the result is undefined.
|
||||
///
|
||||
/// Uses the `ty_pl` field. Payload is `Bin`. The lhs is the pointer,
|
||||
/// rhs is the offset. Result type is the same as lhs. The operand may
|
||||
/// be a slice.
|
||||
ptr_add,
|
||||
/// Subtract an offset from a pointer, returning a new pointer.
|
||||
/// The offset is in element type units, not bytes.
|
||||
/// Wrapping is illegal behavior.
|
||||
/// The lhs is the pointer, rhs is the offset. Result type is the same as lhs.
|
||||
/// The pointer may be a slice.
|
||||
/// Uses the `ty_pl` field. Payload is `Bin`.
|
||||
/// Subtract an offset, in element type units, from a pointer,
|
||||
/// returning a new pointer. Element type may not be zero bits.
|
||||
///
|
||||
/// Wrapping is illegal behavior. If the newly computed address is
|
||||
/// outside the provenance of the operand, the result is undefined.
|
||||
///
|
||||
/// Uses the `ty_pl` field. Payload is `Bin`. The lhs is the pointer,
|
||||
/// rhs is the offset. Result type is the same as lhs. The operand may
|
||||
/// be a slice.
|
||||
ptr_sub,
|
||||
/// Given two operands which can be floats, integers, or vectors, returns the
|
||||
/// greater of the operands. For vectors it operates element-wise.
|
||||
|
|
|
|||
|
|
@ -2682,12 +2682,10 @@ const Block = struct {
|
|||
},
|
||||
.@"packed" => switch (agg_ty.zigTypeTag(zcu)) {
|
||||
else => unreachable,
|
||||
.@"struct" => switch (agg_ty.packedStructFieldPtrInfo(agg_ptr_ty, @intCast(field_index), pt)) {
|
||||
.bit_ptr => |packed_offset| {
|
||||
field_ptr_info.packed_offset = packed_offset;
|
||||
break :field_ptr_align agg_ptr_align;
|
||||
},
|
||||
.byte_ptr => |ptr_info| ptr_info.alignment,
|
||||
.@"struct" => {
|
||||
const packed_offset = agg_ty.packedStructFieldPtrInfo(agg_ptr_ty, @intCast(field_index), pt);
|
||||
field_ptr_info.packed_offset = packed_offset;
|
||||
break :field_ptr_align agg_ptr_align;
|
||||
},
|
||||
.@"union" => {
|
||||
field_ptr_info.packed_offset = .{
|
||||
|
|
|
|||
|
|
@ -207,501 +207,6 @@ pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool
|
|||
return (l.tomb_bits[usize_index] & mask) != 0;
|
||||
}
|
||||
|
||||
const OperandCategory = enum {
|
||||
/// The operand lives on, but this instruction cannot possibly mutate memory.
|
||||
none,
|
||||
/// The operand lives on and this instruction can mutate memory.
|
||||
write,
|
||||
/// The operand dies at this instruction.
|
||||
tomb,
|
||||
/// The operand lives on, and this instruction is noreturn.
|
||||
noret,
|
||||
/// This instruction is too complicated for analysis, no information is available.
|
||||
complex,
|
||||
};
|
||||
|
||||
/// Given an instruction that we are examining, and an operand that we are looking for,
|
||||
/// returns a classification.
|
||||
pub fn categorizeOperand(
|
||||
l: Liveness,
|
||||
air: Air,
|
||||
zcu: *Zcu,
|
||||
inst: Air.Inst.Index,
|
||||
operand: Air.Inst.Index,
|
||||
ip: *const InternPool,
|
||||
) OperandCategory {
|
||||
const air_tags = air.instructions.items(.tag);
|
||||
const air_datas = air.instructions.items(.data);
|
||||
const operand_ref = operand.toRef();
|
||||
switch (air_tags[@intFromEnum(inst)]) {
|
||||
.add,
|
||||
.add_safe,
|
||||
.add_wrap,
|
||||
.add_sat,
|
||||
.add_optimized,
|
||||
.sub,
|
||||
.sub_safe,
|
||||
.sub_wrap,
|
||||
.sub_sat,
|
||||
.sub_optimized,
|
||||
.mul,
|
||||
.mul_safe,
|
||||
.mul_wrap,
|
||||
.mul_sat,
|
||||
.mul_optimized,
|
||||
.div_float,
|
||||
.div_trunc,
|
||||
.div_floor,
|
||||
.div_exact,
|
||||
.rem,
|
||||
.mod,
|
||||
.bit_and,
|
||||
.bit_or,
|
||||
.xor,
|
||||
.cmp_lt,
|
||||
.cmp_lte,
|
||||
.cmp_eq,
|
||||
.cmp_gte,
|
||||
.cmp_gt,
|
||||
.cmp_neq,
|
||||
.bool_and,
|
||||
.bool_or,
|
||||
.array_elem_val,
|
||||
.slice_elem_val,
|
||||
.ptr_elem_val,
|
||||
.shl,
|
||||
.shl_exact,
|
||||
.shl_sat,
|
||||
.shr,
|
||||
.shr_exact,
|
||||
.min,
|
||||
.max,
|
||||
.div_float_optimized,
|
||||
.div_trunc_optimized,
|
||||
.div_floor_optimized,
|
||||
.div_exact_optimized,
|
||||
.rem_optimized,
|
||||
.mod_optimized,
|
||||
.neg_optimized,
|
||||
.cmp_lt_optimized,
|
||||
.cmp_lte_optimized,
|
||||
.cmp_eq_optimized,
|
||||
.cmp_gte_optimized,
|
||||
.cmp_gt_optimized,
|
||||
.cmp_neq_optimized,
|
||||
=> {
|
||||
const o = air_datas[@intFromEnum(inst)].bin_op;
|
||||
if (o.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
if (o.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
|
||||
return .none;
|
||||
},
|
||||
|
||||
.store,
|
||||
.store_safe,
|
||||
.atomic_store_unordered,
|
||||
.atomic_store_monotonic,
|
||||
.atomic_store_release,
|
||||
.atomic_store_seq_cst,
|
||||
.set_union_tag,
|
||||
.memset,
|
||||
.memset_safe,
|
||||
.memcpy,
|
||||
.memmove,
|
||||
=> {
|
||||
const o = air_datas[@intFromEnum(inst)].bin_op;
|
||||
if (o.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
|
||||
if (o.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .write);
|
||||
return .write;
|
||||
},
|
||||
|
||||
.vector_store_elem => {
|
||||
const o = air_datas[@intFromEnum(inst)].vector_store_elem;
|
||||
const extra = air.extraData(Air.Bin, o.payload).data;
|
||||
if (o.vector_ptr == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
|
||||
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
|
||||
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 2, .none);
|
||||
return .write;
|
||||
},
|
||||
|
||||
.arg,
|
||||
.alloc,
|
||||
.inferred_alloc,
|
||||
.inferred_alloc_comptime,
|
||||
.ret_ptr,
|
||||
.trap,
|
||||
.breakpoint,
|
||||
.repeat,
|
||||
.switch_dispatch,
|
||||
.dbg_stmt,
|
||||
.dbg_empty_stmt,
|
||||
.unreach,
|
||||
.ret_addr,
|
||||
.frame_addr,
|
||||
.wasm_memory_size,
|
||||
.err_return_trace,
|
||||
.save_err_return_trace_index,
|
||||
.runtime_nav_ptr,
|
||||
.c_va_start,
|
||||
.work_item_id,
|
||||
.work_group_size,
|
||||
.work_group_id,
|
||||
=> return .none,
|
||||
|
||||
.not,
|
||||
.bitcast,
|
||||
.load,
|
||||
.fpext,
|
||||
.fptrunc,
|
||||
.intcast,
|
||||
.intcast_safe,
|
||||
.trunc,
|
||||
.optional_payload,
|
||||
.optional_payload_ptr,
|
||||
.wrap_optional,
|
||||
.unwrap_errunion_payload,
|
||||
.unwrap_errunion_err,
|
||||
.unwrap_errunion_payload_ptr,
|
||||
.unwrap_errunion_err_ptr,
|
||||
.wrap_errunion_payload,
|
||||
.wrap_errunion_err,
|
||||
.slice_ptr,
|
||||
.slice_len,
|
||||
.ptr_slice_len_ptr,
|
||||
.ptr_slice_ptr_ptr,
|
||||
.struct_field_ptr_index_0,
|
||||
.struct_field_ptr_index_1,
|
||||
.struct_field_ptr_index_2,
|
||||
.struct_field_ptr_index_3,
|
||||
.array_to_slice,
|
||||
.int_from_float,
|
||||
.int_from_float_optimized,
|
||||
.int_from_float_safe,
|
||||
.int_from_float_optimized_safe,
|
||||
.float_from_int,
|
||||
.get_union_tag,
|
||||
.clz,
|
||||
.ctz,
|
||||
.popcount,
|
||||
.byte_swap,
|
||||
.bit_reverse,
|
||||
.splat,
|
||||
.error_set_has_value,
|
||||
.addrspace_cast,
|
||||
.c_va_arg,
|
||||
.c_va_copy,
|
||||
.abs,
|
||||
=> {
|
||||
const o = air_datas[@intFromEnum(inst)].ty_op;
|
||||
if (o.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
return .none;
|
||||
},
|
||||
|
||||
.optional_payload_ptr_set,
|
||||
.errunion_payload_ptr_set,
|
||||
=> {
|
||||
const o = air_datas[@intFromEnum(inst)].ty_op;
|
||||
if (o.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
|
||||
return .write;
|
||||
},
|
||||
|
||||
.is_null,
|
||||
.is_non_null,
|
||||
.is_null_ptr,
|
||||
.is_non_null_ptr,
|
||||
.is_err,
|
||||
.is_non_err,
|
||||
.is_err_ptr,
|
||||
.is_non_err_ptr,
|
||||
.is_named_enum_value,
|
||||
.tag_name,
|
||||
.error_name,
|
||||
.sqrt,
|
||||
.sin,
|
||||
.cos,
|
||||
.tan,
|
||||
.exp,
|
||||
.exp2,
|
||||
.log,
|
||||
.log2,
|
||||
.log10,
|
||||
.floor,
|
||||
.ceil,
|
||||
.round,
|
||||
.trunc_float,
|
||||
.neg,
|
||||
.cmp_lt_errors_len,
|
||||
.c_va_end,
|
||||
=> {
|
||||
const o = air_datas[@intFromEnum(inst)].un_op;
|
||||
if (o == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
return .none;
|
||||
},
|
||||
|
||||
.ret,
|
||||
.ret_safe,
|
||||
.ret_load,
|
||||
=> {
|
||||
const o = air_datas[@intFromEnum(inst)].un_op;
|
||||
if (o == operand_ref) return matchOperandSmallIndex(l, inst, 0, .noret);
|
||||
return .noret;
|
||||
},
|
||||
|
||||
.set_err_return_trace => {
|
||||
const o = air_datas[@intFromEnum(inst)].un_op;
|
||||
if (o == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
|
||||
return .write;
|
||||
},
|
||||
|
||||
.add_with_overflow,
|
||||
.sub_with_overflow,
|
||||
.mul_with_overflow,
|
||||
.shl_with_overflow,
|
||||
.ptr_add,
|
||||
.ptr_sub,
|
||||
.ptr_elem_ptr,
|
||||
.slice_elem_ptr,
|
||||
.slice,
|
||||
=> {
|
||||
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
|
||||
const extra = air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
|
||||
return .none;
|
||||
},
|
||||
|
||||
.dbg_var_ptr,
|
||||
.dbg_var_val,
|
||||
.dbg_arg_inline,
|
||||
=> {
|
||||
const o = air_datas[@intFromEnum(inst)].pl_op.operand;
|
||||
if (o == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
return .none;
|
||||
},
|
||||
|
||||
.prefetch => {
|
||||
const prefetch = air_datas[@intFromEnum(inst)].prefetch;
|
||||
if (prefetch.ptr == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
return .none;
|
||||
},
|
||||
|
||||
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
|
||||
const inst_data = air_datas[@intFromEnum(inst)].pl_op;
|
||||
const callee = inst_data.operand;
|
||||
const extra = air.extraData(Air.Call, inst_data.payload);
|
||||
const args = @as([]const Air.Inst.Ref, @ptrCast(air.extra.items[extra.end..][0..extra.data.args_len]));
|
||||
if (args.len + 1 <= bpi - 1) {
|
||||
if (callee == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
|
||||
for (args, 0..) |arg, i| {
|
||||
if (arg == operand_ref) return matchOperandSmallIndex(l, inst, @as(OperandInt, @intCast(i + 1)), .write);
|
||||
}
|
||||
return .write;
|
||||
}
|
||||
var bt = l.iterateBigTomb(inst);
|
||||
if (bt.feed()) {
|
||||
if (callee == operand_ref) return .tomb;
|
||||
} else {
|
||||
if (callee == operand_ref) return .write;
|
||||
}
|
||||
for (args) |arg| {
|
||||
if (bt.feed()) {
|
||||
if (arg == operand_ref) return .tomb;
|
||||
} else {
|
||||
if (arg == operand_ref) return .write;
|
||||
}
|
||||
}
|
||||
return .write;
|
||||
},
|
||||
.select => {
|
||||
const pl_op = air_datas[@intFromEnum(inst)].pl_op;
|
||||
const extra = air.extraData(Air.Bin, pl_op.payload).data;
|
||||
if (pl_op.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
|
||||
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 2, .none);
|
||||
return .none;
|
||||
},
|
||||
.shuffle_one => {
|
||||
const unwrapped = air.unwrapShuffleOne(zcu, inst);
|
||||
if (unwrapped.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
return .none;
|
||||
},
|
||||
.shuffle_two => {
|
||||
const unwrapped = air.unwrapShuffleTwo(zcu, inst);
|
||||
if (unwrapped.operand_a == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
if (unwrapped.operand_b == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
|
||||
return .none;
|
||||
},
|
||||
.reduce, .reduce_optimized => {
|
||||
const reduce = air_datas[@intFromEnum(inst)].reduce;
|
||||
if (reduce.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
return .none;
|
||||
},
|
||||
.cmp_vector, .cmp_vector_optimized => {
|
||||
const extra = air.extraData(Air.VectorCmp, air_datas[@intFromEnum(inst)].ty_pl.payload).data;
|
||||
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
|
||||
return .none;
|
||||
},
|
||||
.aggregate_init => {
|
||||
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
|
||||
const aggregate_ty = ty_pl.ty.toType();
|
||||
const len = @as(usize, @intCast(aggregate_ty.arrayLenIp(ip)));
|
||||
const elements = @as([]const Air.Inst.Ref, @ptrCast(air.extra.items[ty_pl.payload..][0..len]));
|
||||
|
||||
if (elements.len <= bpi - 1) {
|
||||
for (elements, 0..) |elem, i| {
|
||||
if (elem == operand_ref) return matchOperandSmallIndex(l, inst, @as(OperandInt, @intCast(i)), .none);
|
||||
}
|
||||
return .none;
|
||||
}
|
||||
|
||||
var bt = l.iterateBigTomb(inst);
|
||||
for (elements) |elem| {
|
||||
if (bt.feed()) {
|
||||
if (elem == operand_ref) return .tomb;
|
||||
} else {
|
||||
if (elem == operand_ref) return .write;
|
||||
}
|
||||
}
|
||||
return .write;
|
||||
},
|
||||
.union_init => {
|
||||
const extra = air.extraData(Air.UnionInit, air_datas[@intFromEnum(inst)].ty_pl.payload).data;
|
||||
if (extra.init == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
return .none;
|
||||
},
|
||||
.struct_field_ptr, .struct_field_val => {
|
||||
const extra = air.extraData(Air.StructField, air_datas[@intFromEnum(inst)].ty_pl.payload).data;
|
||||
if (extra.struct_operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
return .none;
|
||||
},
|
||||
.field_parent_ptr => {
|
||||
const extra = air.extraData(Air.FieldParentPtr, air_datas[@intFromEnum(inst)].ty_pl.payload).data;
|
||||
if (extra.field_ptr == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
return .none;
|
||||
},
|
||||
.cmpxchg_strong, .cmpxchg_weak => {
|
||||
const extra = air.extraData(Air.Cmpxchg, air_datas[@intFromEnum(inst)].ty_pl.payload).data;
|
||||
if (extra.ptr == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
|
||||
if (extra.expected_value == operand_ref) return matchOperandSmallIndex(l, inst, 1, .write);
|
||||
if (extra.new_value == operand_ref) return matchOperandSmallIndex(l, inst, 2, .write);
|
||||
return .write;
|
||||
},
|
||||
.mul_add => {
|
||||
const pl_op = air_datas[@intFromEnum(inst)].pl_op;
|
||||
const extra = air.extraData(Air.Bin, pl_op.payload).data;
|
||||
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
|
||||
if (pl_op.operand == operand_ref) return matchOperandSmallIndex(l, inst, 2, .none);
|
||||
return .none;
|
||||
},
|
||||
.atomic_load => {
|
||||
const ptr = air_datas[@intFromEnum(inst)].atomic_load.ptr;
|
||||
if (ptr == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
return .none;
|
||||
},
|
||||
.atomic_rmw => {
|
||||
const pl_op = air_datas[@intFromEnum(inst)].pl_op;
|
||||
const extra = air.extraData(Air.AtomicRmw, pl_op.payload).data;
|
||||
if (pl_op.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
|
||||
if (extra.operand == operand_ref) return matchOperandSmallIndex(l, inst, 1, .write);
|
||||
return .write;
|
||||
},
|
||||
|
||||
.br => {
|
||||
const br = air_datas[@intFromEnum(inst)].br;
|
||||
if (br.operand == operand_ref) return matchOperandSmallIndex(l, operand, 0, .noret);
|
||||
return .noret;
|
||||
},
|
||||
.assembly => {
|
||||
return .complex;
|
||||
},
|
||||
.block, .dbg_inline_block => |tag| {
|
||||
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
|
||||
const body: []const Air.Inst.Index = @ptrCast(switch (tag) {
|
||||
inline .block, .dbg_inline_block => |comptime_tag| body: {
|
||||
const extra = air.extraData(switch (comptime_tag) {
|
||||
.block => Air.Block,
|
||||
.dbg_inline_block => Air.DbgInlineBlock,
|
||||
else => unreachable,
|
||||
}, ty_pl.payload);
|
||||
break :body air.extra.items[extra.end..][0..extra.data.body_len];
|
||||
},
|
||||
else => unreachable,
|
||||
});
|
||||
|
||||
if (body.len == 1 and air_tags[@intFromEnum(body[0])] == .cond_br) {
|
||||
// Peephole optimization for "panic-like" conditionals, which have
|
||||
// one empty branch and another which calls a `noreturn` function.
|
||||
// This allows us to infer that safety checks do not modify memory,
|
||||
// as far as control flow successors are concerned.
|
||||
|
||||
const inst_data = air_datas[@intFromEnum(body[0])].pl_op;
|
||||
const cond_extra = air.extraData(Air.CondBr, inst_data.payload);
|
||||
if (inst_data.operand == operand_ref and operandDies(l, body[0], 0))
|
||||
return .tomb;
|
||||
|
||||
if (cond_extra.data.then_body_len > 2 or cond_extra.data.else_body_len > 2)
|
||||
return .complex;
|
||||
|
||||
const then_body: []const Air.Inst.Index = @ptrCast(air.extra.items[cond_extra.end..][0..cond_extra.data.then_body_len]);
|
||||
const else_body: []const Air.Inst.Index = @ptrCast(air.extra.items[cond_extra.end + cond_extra.data.then_body_len ..][0..cond_extra.data.else_body_len]);
|
||||
if (then_body.len > 1 and air_tags[@intFromEnum(then_body[1])] != .unreach)
|
||||
return .complex;
|
||||
if (else_body.len > 1 and air_tags[@intFromEnum(else_body[1])] != .unreach)
|
||||
return .complex;
|
||||
|
||||
var operand_live: bool = true;
|
||||
for (&[_]Air.Inst.Index{ then_body[0], else_body[0] }) |cond_inst| {
|
||||
if (l.categorizeOperand(air, zcu, cond_inst, operand, ip) == .tomb)
|
||||
operand_live = false;
|
||||
|
||||
switch (air_tags[@intFromEnum(cond_inst)]) {
|
||||
.br => { // Breaks immediately back to block
|
||||
const br = air_datas[@intFromEnum(cond_inst)].br;
|
||||
if (br.block_inst != inst)
|
||||
return .complex;
|
||||
},
|
||||
.call => {}, // Calls a noreturn function
|
||||
else => return .complex,
|
||||
}
|
||||
}
|
||||
return if (operand_live) .none else .tomb;
|
||||
}
|
||||
|
||||
return .complex;
|
||||
},
|
||||
|
||||
.@"try",
|
||||
.try_cold,
|
||||
.try_ptr,
|
||||
.try_ptr_cold,
|
||||
.loop,
|
||||
.cond_br,
|
||||
.switch_br,
|
||||
.loop_switch_br,
|
||||
=> return .complex,
|
||||
|
||||
.wasm_memory_grow => {
|
||||
const pl_op = air_datas[@intFromEnum(inst)].pl_op;
|
||||
if (pl_op.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
|
||||
return .none;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn matchOperandSmallIndex(
|
||||
l: Liveness,
|
||||
inst: Air.Inst.Index,
|
||||
operand: OperandInt,
|
||||
default: OperandCategory,
|
||||
) OperandCategory {
|
||||
if (operandDies(l, inst, operand)) {
|
||||
return .tomb;
|
||||
} else {
|
||||
return default;
|
||||
}
|
||||
}
|
||||
|
||||
/// Higher level API.
|
||||
pub const CondBrSlices = struct {
|
||||
then_deaths: []const Air.Inst.Index,
|
||||
|
|
|
|||
|
|
@ -177,7 +177,6 @@ debug_compiler_runtime_libs: bool,
|
|||
debug_compile_errors: bool,
|
||||
/// Do not check this field directly. Instead, use the `debugIncremental` wrapper function.
|
||||
debug_incremental: bool,
|
||||
incremental: bool,
|
||||
alloc_failure_occurred: bool = false,
|
||||
last_update_was_cache_hit: bool = false,
|
||||
|
||||
|
|
@ -256,7 +255,9 @@ mutex: if (builtin.single_threaded) struct {
|
|||
test_filters: []const []const u8,
|
||||
|
||||
link_task_wait_group: WaitGroup = .{},
|
||||
link_prog_node: std.Progress.Node = std.Progress.Node.none,
|
||||
link_prog_node: std.Progress.Node = .none,
|
||||
link_uav_prog_node: std.Progress.Node = .none,
|
||||
link_lazy_prog_node: std.Progress.Node = .none,
|
||||
|
||||
llvm_opt_bisect_limit: c_int,
|
||||
|
||||
|
|
@ -1746,7 +1747,6 @@ pub const CreateOptions = struct {
|
|||
debug_compiler_runtime_libs: bool = false,
|
||||
debug_compile_errors: bool = false,
|
||||
debug_incremental: bool = false,
|
||||
incremental: bool = false,
|
||||
/// Normally when you create a `Compilation`, Zig will automatically build
|
||||
/// and link in required dependencies, such as compiler-rt and libc. When
|
||||
/// building such dependencies themselves, this flag must be set to avoid
|
||||
|
|
@ -1982,6 +1982,7 @@ pub fn create(gpa: Allocator, arena: Allocator, diag: *CreateDiagnostic, options
|
|||
};
|
||||
if (have_zcu and (!need_llvm or use_llvm)) {
|
||||
if (output_mode == .Obj) break :s .zcu;
|
||||
if (options.config.use_new_linker) break :s .zcu;
|
||||
switch (target_util.zigBackend(target, use_llvm)) {
|
||||
else => {},
|
||||
.stage2_aarch64, .stage2_x86_64 => if (target.ofmt == .coff) {
|
||||
|
|
@ -2188,8 +2189,8 @@ pub fn create(gpa: Allocator, arena: Allocator, diag: *CreateDiagnostic, options
|
|||
.inherited = .{},
|
||||
.global = options.config,
|
||||
.parent = options.root_mod,
|
||||
}) catch |err| return switch (err) {
|
||||
error.OutOfMemory => |e| return e,
|
||||
}) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
// None of these are possible because the configuration matches the root module
|
||||
// which already passed these checks.
|
||||
error.ValgrindUnsupportedOnTarget => unreachable,
|
||||
|
|
@ -2266,7 +2267,6 @@ pub fn create(gpa: Allocator, arena: Allocator, diag: *CreateDiagnostic, options
|
|||
.debug_compiler_runtime_libs = options.debug_compiler_runtime_libs,
|
||||
.debug_compile_errors = options.debug_compile_errors,
|
||||
.debug_incremental = options.debug_incremental,
|
||||
.incremental = options.incremental,
|
||||
.root_name = root_name,
|
||||
.sysroot = sysroot,
|
||||
.windows_libs = .empty,
|
||||
|
|
@ -2409,6 +2409,8 @@ pub fn create(gpa: Allocator, arena: Allocator, diag: *CreateDiagnostic, options
|
|||
// Synchronize with other matching comments: ZigOnlyHashStuff
|
||||
hash.add(use_llvm);
|
||||
hash.add(options.config.use_lib_llvm);
|
||||
hash.add(options.config.use_lld);
|
||||
hash.add(options.config.use_new_linker);
|
||||
hash.add(options.config.dll_export_fns);
|
||||
hash.add(options.config.is_test);
|
||||
hash.addListOfBytes(options.test_filters);
|
||||
|
|
@ -3075,14 +3077,29 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
|
|||
|
||||
// The linker progress node is set up here instead of in `performAllTheWork`, because
|
||||
// we also want it around during `flush`.
|
||||
const have_link_node = comp.bin_file != null;
|
||||
if (have_link_node) {
|
||||
if (comp.bin_file) |lf| {
|
||||
comp.link_prog_node = main_progress_node.start("Linking", 0);
|
||||
if (lf.cast(.elf2)) |elf| {
|
||||
comp.link_prog_node.increaseEstimatedTotalItems(3);
|
||||
comp.link_uav_prog_node = comp.link_prog_node.start("Constants", 0);
|
||||
comp.link_lazy_prog_node = comp.link_prog_node.start("Synthetics", 0);
|
||||
elf.mf.update_prog_node = comp.link_prog_node.start("Relocations", elf.mf.updates.items.len);
|
||||
}
|
||||
}
|
||||
defer if (have_link_node) {
|
||||
defer {
|
||||
comp.link_prog_node.end();
|
||||
comp.link_prog_node = .none;
|
||||
};
|
||||
comp.link_uav_prog_node.end();
|
||||
comp.link_uav_prog_node = .none;
|
||||
comp.link_lazy_prog_node.end();
|
||||
comp.link_lazy_prog_node = .none;
|
||||
if (comp.bin_file) |lf| {
|
||||
if (lf.cast(.elf2)) |elf| {
|
||||
elf.mf.update_prog_node.end();
|
||||
elf.mf.update_prog_node = .none;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try comp.performAllTheWork(main_progress_node);
|
||||
|
||||
|
|
@ -3100,6 +3117,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
|
|||
try pt.populateTestFunctions();
|
||||
}
|
||||
|
||||
link.updateErrorData(pt);
|
||||
|
||||
try pt.processExports();
|
||||
}
|
||||
|
||||
|
|
@ -3474,6 +3493,8 @@ fn addNonIncrementalStuffToCacheManifest(
|
|||
|
||||
man.hash.add(comp.config.use_llvm);
|
||||
man.hash.add(comp.config.use_lib_llvm);
|
||||
man.hash.add(comp.config.use_lld);
|
||||
man.hash.add(comp.config.use_new_linker);
|
||||
man.hash.add(comp.config.is_test);
|
||||
man.hash.add(comp.config.import_memory);
|
||||
man.hash.add(comp.config.export_memory);
|
||||
|
|
@ -4073,7 +4094,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) error{OutOfMemory}!ErrorBundle {
|
|||
defer sorted_failed_analysis.deinit(gpa);
|
||||
var added_any_analysis_error = false;
|
||||
for (sorted_failed_analysis.items(.key), sorted_failed_analysis.items(.value)) |anal_unit, error_msg| {
|
||||
if (comp.incremental) {
|
||||
if (comp.config.incremental) {
|
||||
const refs = try zcu.resolveReferences();
|
||||
if (!refs.contains(anal_unit)) continue;
|
||||
}
|
||||
|
|
@ -4240,7 +4261,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) error{OutOfMemory}!ErrorBundle {
|
|||
|
||||
// TODO: eventually, this should be behind `std.debug.runtime_safety`. But right now, this is a
|
||||
// very common way for incremental compilation bugs to manifest, so let's always check it.
|
||||
if (comp.zcu) |zcu| if (comp.incremental and bundle.root_list.items.len == 0) {
|
||||
if (comp.zcu) |zcu| if (comp.config.incremental and bundle.root_list.items.len == 0) {
|
||||
for (zcu.transitive_failed_analysis.keys()) |failed_unit| {
|
||||
const refs = try zcu.resolveReferences();
|
||||
var ref = refs.get(failed_unit) orelse continue;
|
||||
|
|
@ -4949,7 +4970,7 @@ fn performAllTheWork(
|
|||
tr.stats.n_reachable_files = @intCast(zcu.alive_files.count());
|
||||
}
|
||||
|
||||
if (comp.incremental) {
|
||||
if (comp.config.incremental) {
|
||||
const update_zir_refs_node = main_progress_node.start("Update ZIR References", 0);
|
||||
defer update_zir_refs_node.end();
|
||||
try pt.updateZirRefs();
|
||||
|
|
|
|||
|
|
@ -34,7 +34,6 @@ any_error_tracing: bool,
|
|||
any_sanitize_thread: bool,
|
||||
any_sanitize_c: std.zig.SanitizeC,
|
||||
any_fuzz: bool,
|
||||
pie: bool,
|
||||
/// If this is true then linker code is responsible for making an LLVM IR
|
||||
/// Module, outputting it to an object file, and then linking that together
|
||||
/// with link options and other objects. Otherwise (depending on `use_lld`)
|
||||
|
|
@ -48,7 +47,10 @@ use_lib_llvm: bool,
|
|||
/// and updates the final binary.
|
||||
use_lld: bool,
|
||||
c_frontend: CFrontend,
|
||||
use_new_linker: bool,
|
||||
pie: bool,
|
||||
lto: std.zig.LtoMode,
|
||||
incremental: bool,
|
||||
/// WASI-only. Type of WASI execution model ("command" or "reactor").
|
||||
/// Always set to `command` for non-WASI targets.
|
||||
wasi_exec_model: std.builtin.WasiExecModel,
|
||||
|
|
@ -98,12 +100,14 @@ pub const Options = struct {
|
|||
link_libc: ?bool = null,
|
||||
link_libcpp: ?bool = null,
|
||||
link_libunwind: ?bool = null,
|
||||
pie: ?bool = null,
|
||||
use_llvm: ?bool = null,
|
||||
use_lib_llvm: ?bool = null,
|
||||
use_lld: ?bool = null,
|
||||
use_clang: ?bool = null,
|
||||
use_new_linker: ?bool = null,
|
||||
pie: ?bool = null,
|
||||
lto: ?std.zig.LtoMode = null,
|
||||
incremental: bool = false,
|
||||
/// WASI-only. Type of WASI execution model ("command" or "reactor").
|
||||
wasi_exec_model: ?std.builtin.WasiExecModel = null,
|
||||
import_memory: ?bool = null,
|
||||
|
|
@ -147,6 +151,8 @@ pub const ResolveError = error{
|
|||
LldUnavailable,
|
||||
ClangUnavailable,
|
||||
DllExportFnsRequiresWindows,
|
||||
NewLinkerIncompatibleWithLld,
|
||||
NewLinkerIncompatibleObjectFormat,
|
||||
};
|
||||
|
||||
pub fn resolve(options: Options) ResolveError!Config {
|
||||
|
|
@ -318,33 +324,6 @@ pub fn resolve(options: Options) ResolveError!Config {
|
|||
break :b !import_memory;
|
||||
};
|
||||
|
||||
const pie: bool = b: {
|
||||
switch (options.output_mode) {
|
||||
.Exe => if (target.os.tag == .fuchsia or
|
||||
(target.abi.isAndroid() and link_mode == .dynamic))
|
||||
{
|
||||
if (options.pie == false) return error.TargetRequiresPie;
|
||||
break :b true;
|
||||
},
|
||||
.Lib => if (link_mode == .dynamic) {
|
||||
if (options.pie == true) return error.DynamicLibraryPrecludesPie;
|
||||
break :b false;
|
||||
},
|
||||
.Obj => {},
|
||||
}
|
||||
if (options.any_sanitize_thread) {
|
||||
if (options.pie == false) return error.SanitizeThreadRequiresPie;
|
||||
break :b true;
|
||||
}
|
||||
if (options.pie) |pie| break :b pie;
|
||||
break :b if (options.output_mode == .Exe) switch (target.os.tag) {
|
||||
.fuchsia,
|
||||
.openbsd,
|
||||
=> true,
|
||||
else => target.os.tag.isDarwin(),
|
||||
} else false;
|
||||
};
|
||||
|
||||
const is_dyn_lib = switch (options.output_mode) {
|
||||
.Obj, .Exe => false,
|
||||
.Lib => link_mode == .dynamic,
|
||||
|
|
@ -399,6 +378,7 @@ pub fn resolve(options: Options) ResolveError!Config {
|
|||
// we are confident in the robustness of the backend.
|
||||
break :b !target_util.selfHostedBackendIsAsRobustAsLlvm(target);
|
||||
};
|
||||
const backend = target_util.zigBackend(target, use_llvm);
|
||||
|
||||
if (options.emit_bin and options.have_zcu) {
|
||||
if (!use_lib_llvm and use_llvm) {
|
||||
|
|
@ -407,7 +387,7 @@ pub fn resolve(options: Options) ResolveError!Config {
|
|||
return error.EmittingBinaryRequiresLlvmLibrary;
|
||||
}
|
||||
|
||||
if (target_util.zigBackend(target, use_llvm) == .other) {
|
||||
if (backend == .other) {
|
||||
// There is no compiler backend available for this target.
|
||||
return error.ZigLacksTargetSupport;
|
||||
}
|
||||
|
|
@ -445,6 +425,49 @@ pub fn resolve(options: Options) ResolveError!Config {
|
|||
break :b use_llvm;
|
||||
};
|
||||
|
||||
const use_new_linker = b: {
|
||||
if (use_lld) {
|
||||
if (options.use_new_linker == true) return error.NewLinkerIncompatibleWithLld;
|
||||
break :b false;
|
||||
}
|
||||
|
||||
if (!target_util.hasNewLinkerSupport(target.ofmt, backend)) {
|
||||
if (options.use_new_linker == true) return error.NewLinkerIncompatibleObjectFormat;
|
||||
break :b false;
|
||||
}
|
||||
|
||||
if (options.use_new_linker) |x| break :b x;
|
||||
|
||||
break :b options.incremental;
|
||||
};
|
||||
|
||||
const pie = b: {
|
||||
switch (options.output_mode) {
|
||||
.Exe => if (target.os.tag == .fuchsia or
|
||||
(target.abi.isAndroid() and link_mode == .dynamic))
|
||||
{
|
||||
if (options.pie == false) return error.TargetRequiresPie;
|
||||
break :b true;
|
||||
},
|
||||
.Lib => if (link_mode == .dynamic) {
|
||||
if (options.pie == true) return error.DynamicLibraryPrecludesPie;
|
||||
break :b false;
|
||||
},
|
||||
.Obj => {},
|
||||
}
|
||||
if (options.any_sanitize_thread) {
|
||||
if (options.pie == false) return error.SanitizeThreadRequiresPie;
|
||||
break :b true;
|
||||
}
|
||||
if (options.pie) |pie| break :b pie;
|
||||
break :b if (options.output_mode == .Exe) switch (target.os.tag) {
|
||||
.fuchsia,
|
||||
.openbsd,
|
||||
=> true,
|
||||
else => target.os.tag.isDarwin(),
|
||||
} else false;
|
||||
};
|
||||
|
||||
const lto: std.zig.LtoMode = b: {
|
||||
if (!use_lld) {
|
||||
// zig ld LTO support is tracked by
|
||||
|
|
@ -479,7 +502,6 @@ pub fn resolve(options: Options) ResolveError!Config {
|
|||
};
|
||||
};
|
||||
|
||||
const backend = target_util.zigBackend(target, use_llvm);
|
||||
const backend_supports_error_tracing = target_util.backendSupportsFeature(backend, .error_return_trace);
|
||||
|
||||
const root_error_tracing = b: {
|
||||
|
|
@ -529,8 +551,10 @@ pub fn resolve(options: Options) ResolveError!Config {
|
|||
.any_fuzz = options.any_fuzz,
|
||||
.san_cov_trace_pc_guard = options.san_cov_trace_pc_guard,
|
||||
.root_error_tracing = root_error_tracing,
|
||||
.use_new_linker = use_new_linker,
|
||||
.pie = pie,
|
||||
.lto = lto,
|
||||
.incremental = options.incremental,
|
||||
.import_memory = import_memory,
|
||||
.export_memory = export_memory,
|
||||
.shared_memory = shared_memory,
|
||||
|
|
|
|||
|
|
@ -6424,14 +6424,25 @@ pub const Alignment = enum(u6) {
|
|||
return n + 1;
|
||||
}
|
||||
|
||||
const LlvmBuilderAlignment = std.zig.llvm.Builder.Alignment;
|
||||
|
||||
pub fn toLlvm(this: @This()) LlvmBuilderAlignment {
|
||||
return @enumFromInt(@intFromEnum(this));
|
||||
pub fn toStdMem(a: Alignment) std.mem.Alignment {
|
||||
assert(a != .none);
|
||||
return @enumFromInt(@intFromEnum(a));
|
||||
}
|
||||
|
||||
pub fn fromLlvm(other: LlvmBuilderAlignment) @This() {
|
||||
return @enumFromInt(@intFromEnum(other));
|
||||
pub fn fromStdMem(a: std.mem.Alignment) Alignment {
|
||||
const r: Alignment = @enumFromInt(@intFromEnum(a));
|
||||
assert(r != .none);
|
||||
return r;
|
||||
}
|
||||
|
||||
const LlvmBuilderAlignment = std.zig.llvm.Builder.Alignment;
|
||||
|
||||
pub fn toLlvm(a: Alignment) LlvmBuilderAlignment {
|
||||
return @enumFromInt(@intFromEnum(a));
|
||||
}
|
||||
|
||||
pub fn fromLlvm(a: LlvmBuilderAlignment) Alignment {
|
||||
return @enumFromInt(@intFromEnum(a));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
156
src/Sema.zig
156
src/Sema.zig
|
|
@ -1193,7 +1193,7 @@ fn analyzeBodyInner(
|
|||
.elem_ptr => try sema.zirElemPtr(block, inst),
|
||||
.elem_ptr_node => try sema.zirElemPtrNode(block, inst),
|
||||
.elem_val => try sema.zirElemVal(block, inst),
|
||||
.elem_val_node => try sema.zirElemValNode(block, inst),
|
||||
.elem_ptr_load => try sema.zirElemPtrLoad(block, inst),
|
||||
.elem_val_imm => try sema.zirElemValImm(block, inst),
|
||||
.elem_type => try sema.zirElemType(block, inst),
|
||||
.indexable_ptr_elem_type => try sema.zirIndexablePtrElemType(block, inst),
|
||||
|
|
@ -1211,8 +1211,8 @@ fn analyzeBodyInner(
|
|||
.error_value => try sema.zirErrorValue(block, inst),
|
||||
.field_ptr => try sema.zirFieldPtr(block, inst),
|
||||
.field_ptr_named => try sema.zirFieldPtrNamed(block, inst),
|
||||
.field_val => try sema.zirFieldVal(block, inst),
|
||||
.field_val_named => try sema.zirFieldValNamed(block, inst),
|
||||
.field_ptr_load => try sema.zirFieldPtrLoad(block, inst),
|
||||
.field_ptr_named_load => try sema.zirFieldPtrNamedLoad(block, inst),
|
||||
.func => try sema.zirFunc(block, inst, false),
|
||||
.func_inferred => try sema.zirFunc(block, inst, true),
|
||||
.func_fancy => try sema.zirFuncFancy(block, inst),
|
||||
|
|
@ -3032,7 +3032,7 @@ fn zirStructDecl(
|
|||
});
|
||||
errdefer pt.destroyNamespace(new_namespace_index);
|
||||
|
||||
if (pt.zcu.comp.incremental) {
|
||||
if (pt.zcu.comp.config.incremental) {
|
||||
try pt.addDependency(.wrap(.{ .type = wip_ty.index }), .{ .src_hash = tracked_inst });
|
||||
}
|
||||
|
||||
|
|
@ -3430,7 +3430,7 @@ fn zirUnionDecl(
|
|||
});
|
||||
errdefer pt.destroyNamespace(new_namespace_index);
|
||||
|
||||
if (pt.zcu.comp.incremental) {
|
||||
if (pt.zcu.comp.config.incremental) {
|
||||
try pt.addDependency(.wrap(.{ .type = wip_ty.index }), .{ .src_hash = tracked_inst });
|
||||
}
|
||||
|
||||
|
|
@ -3756,9 +3756,9 @@ fn zirAllocExtended(
|
|||
const pt = sema.pt;
|
||||
const gpa = sema.gpa;
|
||||
const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand);
|
||||
const var_src = block.nodeOffset(extra.data.src_node);
|
||||
const ty_src = block.src(.{ .node_offset_var_decl_ty = extra.data.src_node });
|
||||
const align_src = block.src(.{ .node_offset_var_decl_align = extra.data.src_node });
|
||||
const init_src = block.src(.{ .node_offset_var_decl_init = extra.data.src_node });
|
||||
const small: Zir.Inst.AllocExtended.Small = @bitCast(extended.small);
|
||||
|
||||
var extra_index: usize = extra.end;
|
||||
|
|
@ -3777,7 +3777,7 @@ fn zirAllocExtended(
|
|||
|
||||
if (block.isComptime() or small.is_comptime) {
|
||||
if (small.has_type) {
|
||||
return sema.analyzeComptimeAlloc(block, init_src, var_ty, alignment);
|
||||
return sema.analyzeComptimeAlloc(block, var_src, var_ty, alignment);
|
||||
} else {
|
||||
try sema.air_instructions.append(gpa, .{
|
||||
.tag = .inferred_alloc_comptime,
|
||||
|
|
@ -3792,7 +3792,7 @@ fn zirAllocExtended(
|
|||
}
|
||||
|
||||
if (small.has_type and try var_ty.comptimeOnlySema(pt)) {
|
||||
return sema.analyzeComptimeAlloc(block, init_src, var_ty, alignment);
|
||||
return sema.analyzeComptimeAlloc(block, var_src, var_ty, alignment);
|
||||
}
|
||||
|
||||
if (small.has_type) {
|
||||
|
|
@ -3802,8 +3802,8 @@ fn zirAllocExtended(
|
|||
const target = pt.zcu.getTarget();
|
||||
try var_ty.resolveLayout(pt);
|
||||
if (sema.func_is_naked and try var_ty.hasRuntimeBitsSema(pt)) {
|
||||
const var_src = block.src(.{ .node_offset_store_ptr = extra.data.src_node });
|
||||
return sema.fail(block, var_src, "local variable in naked function", .{});
|
||||
const store_src = block.src(.{ .node_offset_store_ptr = extra.data.src_node });
|
||||
return sema.fail(block, store_src, "local variable in naked function", .{});
|
||||
}
|
||||
const ptr_type = try sema.pt.ptrTypeSema(.{
|
||||
.child = var_ty.toIntern(),
|
||||
|
|
@ -3842,9 +3842,9 @@ fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
|
|||
|
||||
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
||||
const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node });
|
||||
const init_src = block.src(.{ .node_offset_var_decl_init = inst_data.src_node });
|
||||
const var_src = block.nodeOffset(inst_data.src_node);
|
||||
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
|
||||
return sema.analyzeComptimeAlloc(block, init_src, var_ty, .none);
|
||||
return sema.analyzeComptimeAlloc(block, var_src, var_ty, .none);
|
||||
}
|
||||
|
||||
fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
|
|
@ -4254,11 +4254,11 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
|
|||
|
||||
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
||||
const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node });
|
||||
const init_src = block.src(.{ .node_offset_var_decl_init = inst_data.src_node });
|
||||
const var_src = block.nodeOffset(inst_data.src_node);
|
||||
|
||||
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
|
||||
if (block.isComptime() or try var_ty.comptimeOnlySema(pt)) {
|
||||
return sema.analyzeComptimeAlloc(block, init_src, var_ty, .none);
|
||||
return sema.analyzeComptimeAlloc(block, var_src, var_ty, .none);
|
||||
}
|
||||
if (sema.func_is_naked and try var_ty.hasRuntimeBitsSema(pt)) {
|
||||
const mut_src = block.src(.{ .node_offset_store_ptr = inst_data.src_node });
|
||||
|
|
@ -4284,14 +4284,14 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
|||
|
||||
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
|
||||
const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node });
|
||||
const init_src = block.src(.{ .node_offset_var_decl_init = inst_data.src_node });
|
||||
const var_src = block.nodeOffset(inst_data.src_node);
|
||||
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
|
||||
if (block.isComptime()) {
|
||||
return sema.analyzeComptimeAlloc(block, init_src, var_ty, .none);
|
||||
return sema.analyzeComptimeAlloc(block, var_src, var_ty, .none);
|
||||
}
|
||||
if (sema.func_is_naked and try var_ty.hasRuntimeBitsSema(pt)) {
|
||||
const var_src = block.src(.{ .node_offset_store_ptr = inst_data.src_node });
|
||||
return sema.fail(block, var_src, "local variable in naked function", .{});
|
||||
const store_src = block.src(.{ .node_offset_store_ptr = inst_data.src_node });
|
||||
return sema.fail(block, store_src, "local variable in naked function", .{});
|
||||
}
|
||||
try sema.validateVarType(block, ty_src, var_ty, false);
|
||||
const target = pt.zcu.getTarget();
|
||||
|
|
@ -6217,7 +6217,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
|
|||
if (ptr_info.byte_offset != 0) {
|
||||
return sema.fail(block, ptr_src, "TODO: export pointer in middle of value", .{});
|
||||
}
|
||||
if (options.linkage == .internal) return;
|
||||
if (zcu.llvm_object != null and options.linkage == .internal) return;
|
||||
const export_ty = Value.fromInterned(uav.val).typeOf(zcu);
|
||||
if (!try sema.validateExternType(export_ty, .other)) {
|
||||
return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
|
|
@ -6256,7 +6256,7 @@ pub fn analyzeExport(
|
|||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
if (options.linkage == .internal)
|
||||
if (zcu.llvm_object != null and options.linkage == .internal)
|
||||
return;
|
||||
|
||||
try sema.ensureNavResolved(block, src, orig_nav_index, .fully);
|
||||
|
|
@ -7709,7 +7709,7 @@ fn analyzeCall(
|
|||
// TODO: comptime call memoization is currently not supported under incremental compilation
|
||||
// since dependencies are not marked on callers. If we want to keep this around (we should
|
||||
// check that it's worthwhile first!), each memoized call needs an `AnalUnit`.
|
||||
if (zcu.comp.incremental) break :m false;
|
||||
if (zcu.comp.config.incremental) break :m false;
|
||||
if (!block.isComptime()) break :m false;
|
||||
for (args) |a| {
|
||||
const val = (try sema.resolveValue(a)).?;
|
||||
|
|
@ -9711,7 +9711,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
|
|||
return block.addBitCast(dest_ty, operand);
|
||||
}
|
||||
|
||||
fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
fn zirFieldPtrLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
|
|
@ -9727,8 +9727,8 @@ fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
|||
sema.code.nullTerminatedString(extra.field_name_start),
|
||||
.no_embedded_nulls,
|
||||
);
|
||||
const object = try sema.resolveInst(extra.lhs);
|
||||
return sema.fieldVal(block, src, object, field_name, field_name_src);
|
||||
const object_ptr = try sema.resolveInst(extra.lhs);
|
||||
return fieldPtrLoad(sema, block, src, object_ptr, field_name, field_name_src);
|
||||
}
|
||||
|
||||
fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
|
|
@ -9779,7 +9779,7 @@ fn zirStructInitFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi
|
|||
}
|
||||
}
|
||||
|
||||
fn zirFieldValNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
fn zirFieldPtrNamedLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
|
|
@ -9787,9 +9787,9 @@ fn zirFieldValNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
|
|||
const src = block.nodeOffset(inst_data.src_node);
|
||||
const field_name_src = block.builtinCallArgSrc(inst_data.src_node, 1);
|
||||
const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data;
|
||||
const object = try sema.resolveInst(extra.lhs);
|
||||
const object_ptr = try sema.resolveInst(extra.lhs);
|
||||
const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, .{ .simple = .field_name });
|
||||
return sema.fieldVal(block, src, object, field_name, field_name_src);
|
||||
return fieldPtrLoad(sema, block, src, object_ptr, field_name, field_name_src);
|
||||
}
|
||||
|
||||
fn zirFieldPtrNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
|
|
@ -10102,7 +10102,7 @@ fn zirElemVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
|
|||
return sema.elemVal(block, src, array, elem_index, src, false);
|
||||
}
|
||||
|
||||
fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
fn zirElemPtrLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
|
|
@ -10110,10 +10110,18 @@ fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
|
|||
const src = block.nodeOffset(inst_data.src_node);
|
||||
const elem_index_src = block.src(.{ .node_offset_array_access_index = inst_data.src_node });
|
||||
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
||||
const array = try sema.resolveInst(extra.lhs);
|
||||
const array_ptr = try sema.resolveInst(extra.lhs);
|
||||
const uncoerced_elem_index = try sema.resolveInst(extra.rhs);
|
||||
if (try sema.resolveDefinedValue(block, src, array_ptr)) |array_ptr_val| {
|
||||
const array_ptr_ty = sema.typeOf(array_ptr);
|
||||
if (try sema.pointerDeref(block, src, array_ptr_val, array_ptr_ty)) |array_val| {
|
||||
const array: Air.Inst.Ref = .fromValue(array_val);
|
||||
return elemVal(sema, block, src, array, uncoerced_elem_index, elem_index_src, true);
|
||||
}
|
||||
}
|
||||
const elem_index = try sema.coerce(block, .usize, uncoerced_elem_index, elem_index_src);
|
||||
return sema.elemVal(block, src, array, elem_index, elem_index_src, true);
|
||||
const elem_ptr = try elemPtr(sema, block, src, array_ptr, elem_index, elem_index_src, false, true);
|
||||
return analyzeLoad(sema, block, src, elem_ptr, elem_index_src);
|
||||
}
|
||||
|
||||
fn zirElemValImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
|
|
@ -13612,7 +13620,6 @@ fn maybeErrorUnwrap(
|
|||
.str,
|
||||
.as_node,
|
||||
.panic,
|
||||
.field_val,
|
||||
=> {},
|
||||
else => return false,
|
||||
}
|
||||
|
|
@ -13631,7 +13638,6 @@ fn maybeErrorUnwrap(
|
|||
},
|
||||
.str => try sema.zirStr(inst),
|
||||
.as_node => try sema.zirAsNode(block, inst),
|
||||
.field_val => try sema.zirFieldVal(block, inst),
|
||||
.@"unreachable" => {
|
||||
try safetyPanicUnwrapError(sema, block, operand_src, operand);
|
||||
return true;
|
||||
|
|
@ -15996,7 +16002,6 @@ fn splat(sema: *Sema, ty: Type, val: Value) !Value {
|
|||
fn analyzeArithmetic(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
/// TODO performance investigation: make this comptime?
|
||||
zir_tag: Zir.Inst.Tag,
|
||||
lhs: Air.Inst.Ref,
|
||||
rhs: Air.Inst.Ref,
|
||||
|
|
@ -16195,6 +16200,11 @@ fn analyzePtrArithmetic(
|
|||
const ptr_info = ptr_ty.ptrInfo(zcu);
|
||||
assert(ptr_info.flags.size == .many or ptr_info.flags.size == .c);
|
||||
|
||||
if ((try sema.typeHasOnePossibleValue(.fromInterned(ptr_info.child))) != null) {
|
||||
// Offset will be multiplied by zero, so result is the same as the base pointer.
|
||||
return ptr;
|
||||
}
|
||||
|
||||
const new_ptr_ty = t: {
|
||||
// Calculate the new pointer alignment.
|
||||
// This code is duplicated in `Type.elemPtrType`.
|
||||
|
|
@ -26666,6 +26676,33 @@ fn emitBackwardBranch(sema: *Sema, block: *Block, src: LazySrcLoc) !void {
|
|||
}
|
||||
}
|
||||
|
||||
fn fieldPtrLoad(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
src: LazySrcLoc,
|
||||
object_ptr: Air.Inst.Ref,
|
||||
field_name: InternPool.NullTerminatedString,
|
||||
field_name_src: LazySrcLoc,
|
||||
) CompileError!Air.Inst.Ref {
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
const object_ptr_ty = sema.typeOf(object_ptr);
|
||||
const pointee_ty = object_ptr_ty.childType(zcu);
|
||||
if (try typeHasOnePossibleValue(sema, pointee_ty)) |opv| {
|
||||
const object: Air.Inst.Ref = .fromValue(opv);
|
||||
return fieldVal(sema, block, src, object, field_name, field_name_src);
|
||||
}
|
||||
|
||||
if (try sema.resolveDefinedValue(block, src, object_ptr)) |object_ptr_val| {
|
||||
if (try sema.pointerDeref(block, src, object_ptr_val, object_ptr_ty)) |object_val| {
|
||||
const object: Air.Inst.Ref = .fromValue(object_val);
|
||||
return fieldVal(sema, block, src, object, field_name, field_name_src);
|
||||
}
|
||||
}
|
||||
const field_ptr = try sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, false);
|
||||
return analyzeLoad(sema, block, src, field_ptr, field_name_src);
|
||||
}
|
||||
|
||||
fn fieldVal(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
|
|
@ -26885,7 +26922,7 @@ fn fieldPtr(
|
|||
const ptr_info = object_ty.ptrInfo(zcu);
|
||||
const new_ptr_ty = try pt.ptrTypeSema(.{
|
||||
.child = Type.fromInterned(ptr_info.child).childType(zcu).toIntern(),
|
||||
.sentinel = if (object_ty.sentinel(zcu)) |s| s.toIntern() else .none,
|
||||
.sentinel = if (inner_ty.sentinel(zcu)) |s| s.toIntern() else .none,
|
||||
.flags = .{
|
||||
.size = .many,
|
||||
.alignment = ptr_info.flags.alignment,
|
||||
|
|
@ -27413,15 +27450,9 @@ fn structFieldPtrByIndex(
|
|||
|
||||
if (struct_type.layout == .@"packed") {
|
||||
assert(!field_is_comptime);
|
||||
switch (struct_ty.packedStructFieldPtrInfo(struct_ptr_ty, field_index, pt)) {
|
||||
.bit_ptr => |packed_offset| {
|
||||
ptr_ty_data.flags.alignment = parent_align;
|
||||
ptr_ty_data.packed_offset = packed_offset;
|
||||
},
|
||||
.byte_ptr => |ptr_info| {
|
||||
ptr_ty_data.flags.alignment = ptr_info.alignment;
|
||||
},
|
||||
}
|
||||
const packed_offset = struct_ty.packedStructFieldPtrInfo(struct_ptr_ty, field_index, pt);
|
||||
ptr_ty_data.flags.alignment = parent_align;
|
||||
ptr_ty_data.packed_offset = packed_offset;
|
||||
} else if (struct_type.layout == .@"extern") {
|
||||
assert(!field_is_comptime);
|
||||
// For extern structs, field alignment might be bigger than type's
|
||||
|
|
@ -27965,6 +27996,7 @@ fn elemVal(
|
|||
}
|
||||
}
|
||||
|
||||
/// Called when the index or indexable is runtime known.
|
||||
fn validateRuntimeElemAccess(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
|
|
@ -28229,6 +28261,10 @@ fn elemPtrArray(
|
|||
try sema.validateRuntimeValue(block, array_ptr_src, array_ptr);
|
||||
}
|
||||
|
||||
if (offset == null and array_ty.zigTypeTag(zcu) == .vector) {
|
||||
return sema.fail(block, elem_index_src, "vector index not comptime known", .{});
|
||||
}
|
||||
|
||||
// Runtime check is only needed if unable to comptime check.
|
||||
if (oob_safety and block.wantSafety() and offset == null) {
|
||||
const len_inst = try pt.intRef(.usize, array_len);
|
||||
|
|
@ -30627,6 +30663,19 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul
|
|||
} };
|
||||
return false;
|
||||
}
|
||||
|
||||
if (inst_info.packed_offset.host_size != dest_info.packed_offset.host_size or
|
||||
inst_info.packed_offset.bit_offset != dest_info.packed_offset.bit_offset)
|
||||
{
|
||||
in_memory_result.* = .{ .ptr_bit_range = .{
|
||||
.actual_host = inst_info.packed_offset.host_size,
|
||||
.wanted_host = dest_info.packed_offset.host_size,
|
||||
.actual_offset = inst_info.packed_offset.bit_offset,
|
||||
.wanted_offset = dest_info.packed_offset.bit_offset,
|
||||
} };
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -31152,7 +31201,7 @@ fn addReferenceEntry(
|
|||
.func => |f| assert(ip.unwrapCoercedFunc(f) == f), // for `.{ .func = f }`, `f` must be uncoerced
|
||||
else => {},
|
||||
}
|
||||
if (!zcu.comp.incremental and zcu.comp.reference_trace == 0) return;
|
||||
if (!zcu.comp.config.incremental and zcu.comp.reference_trace == 0) return;
|
||||
const gop = try sema.references.getOrPut(sema.gpa, referenced_unit);
|
||||
if (gop.found_existing) return;
|
||||
try zcu.addUnitReference(sema.owner, referenced_unit, src, inline_frame: {
|
||||
|
|
@ -31169,7 +31218,7 @@ pub fn addTypeReferenceEntry(
|
|||
referenced_type: InternPool.Index,
|
||||
) !void {
|
||||
const zcu = sema.pt.zcu;
|
||||
if (!zcu.comp.incremental and zcu.comp.reference_trace == 0) return;
|
||||
if (!zcu.comp.config.incremental and zcu.comp.reference_trace == 0) return;
|
||||
const gop = try sema.type_references.getOrPut(sema.gpa, referenced_type);
|
||||
if (gop.found_existing) return;
|
||||
try zcu.addTypeReference(sema.owner, referenced_type, src);
|
||||
|
|
@ -31418,19 +31467,6 @@ fn analyzeLoad(
|
|||
}
|
||||
}
|
||||
|
||||
if (ptr_ty.ptrInfo(zcu).flags.vector_index == .runtime) {
|
||||
const ptr_inst = ptr.toIndex().?;
|
||||
const air_tags = sema.air_instructions.items(.tag);
|
||||
if (air_tags[@intFromEnum(ptr_inst)] == .ptr_elem_ptr) {
|
||||
const ty_pl = sema.air_instructions.items(.data)[@intFromEnum(ptr_inst)].ty_pl;
|
||||
const bin_op = sema.getTmpAir().extraData(Air.Bin, ty_pl.payload).data;
|
||||
return block.addBinOp(.ptr_elem_val, bin_op.lhs, bin_op.rhs);
|
||||
}
|
||||
return sema.fail(block, ptr_src, "unable to determine vector element index of type '{f}'", .{
|
||||
ptr_ty.fmt(pt),
|
||||
});
|
||||
}
|
||||
|
||||
return block.addTyOp(.load, elem_ty, ptr);
|
||||
}
|
||||
|
||||
|
|
@ -34947,7 +34983,7 @@ fn resolveInferredErrorSet(
|
|||
const resolved_ty = func.resolvedErrorSetUnordered(ip);
|
||||
if (resolved_ty != .none) return resolved_ty;
|
||||
|
||||
if (zcu.analysis_in_progress.contains(AnalUnit.wrap(.{ .func = func_index }))) {
|
||||
if (zcu.analysis_in_progress.contains(.wrap(.{ .func = func_index }))) {
|
||||
return sema.fail(block, src, "unable to resolve inferred error set", .{});
|
||||
}
|
||||
|
||||
|
|
@ -36832,7 +36868,7 @@ fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool
|
|||
|
||||
pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void {
|
||||
const pt = sema.pt;
|
||||
if (!pt.zcu.comp.incremental) return;
|
||||
if (!pt.zcu.comp.config.incremental) return;
|
||||
|
||||
const gop = try sema.dependencies.getOrPut(sema.gpa, dependee);
|
||||
if (gop.found_existing) return;
|
||||
|
|
|
|||
41
src/Type.zig
41
src/Type.zig
|
|
@ -3514,22 +3514,17 @@ pub fn arrayBase(ty: Type, zcu: *const Zcu) struct { Type, u64 } {
|
|||
return .{ cur_ty, cur_len };
|
||||
}
|
||||
|
||||
pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: u32, pt: Zcu.PerThread) union(enum) {
|
||||
/// The result is a bit-pointer with the same value and a new packed offset.
|
||||
bit_ptr: InternPool.Key.PtrType.PackedOffset,
|
||||
/// The result is a standard pointer.
|
||||
byte_ptr: struct {
|
||||
/// The byte offset of the field pointer from the parent pointer value.
|
||||
offset: u64,
|
||||
/// The alignment of the field pointer type.
|
||||
alignment: InternPool.Alignment,
|
||||
},
|
||||
} {
|
||||
/// Returns a bit-pointer with the same value and a new packed offset.
|
||||
pub fn packedStructFieldPtrInfo(
|
||||
struct_ty: Type,
|
||||
parent_ptr_ty: Type,
|
||||
field_idx: u32,
|
||||
pt: Zcu.PerThread,
|
||||
) InternPool.Key.PtrType.PackedOffset {
|
||||
comptime assert(Type.packed_struct_layout_version == 2);
|
||||
|
||||
const zcu = pt.zcu;
|
||||
const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu);
|
||||
const field_ty = struct_ty.fieldType(field_idx, zcu);
|
||||
|
||||
var bit_offset: u16 = 0;
|
||||
var running_bits: u16 = 0;
|
||||
|
|
@ -3552,28 +3547,10 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx:
|
|||
bit_offset,
|
||||
};
|
||||
|
||||
// If the field happens to be byte-aligned, simplify the pointer type.
|
||||
// We can only do this if the pointee's bit size matches its ABI byte size,
|
||||
// so that loads and stores do not interfere with surrounding packed bits.
|
||||
//
|
||||
// TODO: we do not attempt this with big-endian targets yet because of nested
|
||||
// structs and floats. I need to double-check the desired behavior for big endian
|
||||
// targets before adding the necessary complications to this code. This will not
|
||||
// cause miscompilations; it only means the field pointer uses bit masking when it
|
||||
// might not be strictly necessary.
|
||||
if (res_bit_offset % 8 == 0 and field_ty.bitSize(zcu) == field_ty.abiSize(zcu) * 8 and zcu.getTarget().cpu.arch.endian() == .little) {
|
||||
const byte_offset = res_bit_offset / 8;
|
||||
const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(zcu).toByteUnits().?));
|
||||
return .{ .byte_ptr = .{
|
||||
.offset = byte_offset,
|
||||
.alignment = new_align,
|
||||
} };
|
||||
}
|
||||
|
||||
return .{ .bit_ptr = .{
|
||||
return .{
|
||||
.host_size = res_host_size,
|
||||
.bit_offset = res_bit_offset,
|
||||
} };
|
||||
};
|
||||
}
|
||||
|
||||
pub fn resolveLayout(ty: Type, pt: Zcu.PerThread) SemaError!void {
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ pub fn format(val: Value, writer: *std.Io.Writer) !void {
|
|||
|
||||
/// This is a debug function. In order to print values in a meaningful way
|
||||
/// we also need access to the type.
|
||||
pub fn dump(start_val: Value, w: std.Io.Writer) std.Io.Writer.Error!void {
|
||||
pub fn dump(start_val: Value, w: *std.Io.Writer) std.Io.Writer.Error!void {
|
||||
try w.print("(interned: {})", .{start_val.toIntern()});
|
||||
}
|
||||
|
||||
|
|
@ -2149,15 +2149,18 @@ pub fn makeBool(x: bool) Value {
|
|||
return if (x) .true else .false;
|
||||
}
|
||||
|
||||
/// `parent_ptr` must be a single-pointer to some optional.
|
||||
/// `parent_ptr` must be a single-pointer or C pointer to some optional.
|
||||
///
|
||||
/// Returns a pointer to the payload of the optional.
|
||||
///
|
||||
/// May perform type resolution.
|
||||
pub fn ptrOptPayload(parent_ptr: Value, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
const parent_ptr_ty = parent_ptr.typeOf(zcu);
|
||||
const opt_ty = parent_ptr_ty.childType(zcu);
|
||||
const ptr_size = parent_ptr_ty.ptrSize(zcu);
|
||||
|
||||
assert(parent_ptr_ty.ptrSize(zcu) == .one);
|
||||
assert(ptr_size == .one or ptr_size == .c);
|
||||
assert(opt_ty.zigTypeTag(zcu) == .optional);
|
||||
|
||||
const result_ty = try pt.ptrTypeSema(info: {
|
||||
|
|
@ -2212,9 +2215,12 @@ pub fn ptrEuPayload(parent_ptr: Value, pt: Zcu.PerThread) !Value {
|
|||
} }));
|
||||
}
|
||||
|
||||
/// `parent_ptr` must be a single-pointer to a struct, union, or slice.
|
||||
/// `parent_ptr` must be a single-pointer or c pointer to a struct, union, or slice.
|
||||
///
|
||||
/// Returns a pointer to the aggregate field at the specified index.
|
||||
///
|
||||
/// For slices, uses `slice_ptr_index` and `slice_len_index`.
|
||||
///
|
||||
/// May perform type resolution.
|
||||
pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
|
||||
const zcu = pt.zcu;
|
||||
|
|
@ -2222,7 +2228,7 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
|
|||
const aggregate_ty = parent_ptr_ty.childType(zcu);
|
||||
|
||||
const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu);
|
||||
assert(parent_ptr_info.flags.size == .one);
|
||||
assert(parent_ptr_info.flags.size == .one or parent_ptr_info.flags.size == .c);
|
||||
|
||||
// Exiting this `switch` indicates that the `field` pointer representation should be used.
|
||||
// `field_align` may be `.none` to represent the natural alignment of `field_ty`, but is not necessarily.
|
||||
|
|
@ -2249,32 +2255,18 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
|
|||
});
|
||||
return parent_ptr.getOffsetPtr(byte_off, result_ty, pt);
|
||||
},
|
||||
.@"packed" => switch (aggregate_ty.packedStructFieldPtrInfo(parent_ptr_ty, field_idx, pt)) {
|
||||
.bit_ptr => |packed_offset| {
|
||||
const result_ty = try pt.ptrType(info: {
|
||||
var new = parent_ptr_info;
|
||||
new.packed_offset = packed_offset;
|
||||
new.child = field_ty.toIntern();
|
||||
if (new.flags.alignment == .none) {
|
||||
new.flags.alignment = try aggregate_ty.abiAlignmentSema(pt);
|
||||
}
|
||||
break :info new;
|
||||
});
|
||||
return pt.getCoerced(parent_ptr, result_ty);
|
||||
},
|
||||
.byte_ptr => |ptr_info| {
|
||||
const result_ty = try pt.ptrTypeSema(info: {
|
||||
var new = parent_ptr_info;
|
||||
new.child = field_ty.toIntern();
|
||||
new.packed_offset = .{
|
||||
.host_size = 0,
|
||||
.bit_offset = 0,
|
||||
};
|
||||
new.flags.alignment = ptr_info.alignment;
|
||||
break :info new;
|
||||
});
|
||||
return parent_ptr.getOffsetPtr(ptr_info.offset, result_ty, pt);
|
||||
},
|
||||
.@"packed" => {
|
||||
const packed_offset = aggregate_ty.packedStructFieldPtrInfo(parent_ptr_ty, field_idx, pt);
|
||||
const result_ty = try pt.ptrType(info: {
|
||||
var new = parent_ptr_info;
|
||||
new.packed_offset = packed_offset;
|
||||
new.child = field_ty.toIntern();
|
||||
if (new.flags.alignment == .none) {
|
||||
new.flags.alignment = try aggregate_ty.abiAlignmentSema(pt);
|
||||
}
|
||||
break :info new;
|
||||
});
|
||||
return pt.getCoerced(parent_ptr, result_ty);
|
||||
},
|
||||
}
|
||||
},
|
||||
|
|
|
|||
|
|
@ -3166,7 +3166,7 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
|
|||
}
|
||||
|
||||
pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
|
||||
if (!zcu.comp.incremental) return null;
|
||||
if (!zcu.comp.config.incremental) return null;
|
||||
|
||||
if (zcu.outdated.count() == 0) {
|
||||
// Any units in `potentially_outdated` must just be stuck in loops with one another: none of those
|
||||
|
|
|
|||
|
|
@ -700,7 +700,7 @@ fn analyzeMemoizedState(pt: Zcu.PerThread, stage: InternPool.MemoizedStateStage)
|
|||
|
||||
const unit: AnalUnit = .wrap(.{ .memoized_state = stage });
|
||||
|
||||
try zcu.analysis_in_progress.put(gpa, unit, {});
|
||||
try zcu.analysis_in_progress.putNoClobber(gpa, unit, {});
|
||||
defer assert(zcu.analysis_in_progress.swapRemove(unit));
|
||||
|
||||
// Before we begin, collect:
|
||||
|
|
@ -864,7 +864,7 @@ fn analyzeComptimeUnit(pt: Zcu.PerThread, cu_id: InternPool.ComptimeUnit.Id) Zcu
|
|||
const file = zcu.fileByIndex(inst_resolved.file);
|
||||
const zir = file.zir.?;
|
||||
|
||||
try zcu.analysis_in_progress.put(gpa, anal_unit, {});
|
||||
try zcu.analysis_in_progress.putNoClobber(gpa, anal_unit, {});
|
||||
defer assert(zcu.analysis_in_progress.swapRemove(anal_unit));
|
||||
|
||||
var analysis_arena: std.heap.ArenaAllocator = .init(gpa);
|
||||
|
|
@ -958,6 +958,8 @@ pub fn ensureNavValUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu
|
|||
|
||||
log.debug("ensureNavValUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
|
||||
assert(!zcu.analysis_in_progress.contains(anal_unit));
|
||||
|
||||
// Determine whether or not this `Nav`'s value is outdated. This also includes checking if the
|
||||
// status is `.unresolved`, which indicates that the value is outdated because it has *never*
|
||||
// been analyzed so far.
|
||||
|
|
@ -1090,10 +1092,19 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr
|
|||
const inst_resolved = old_nav.analysis.?.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
|
||||
const file = zcu.fileByIndex(inst_resolved.file);
|
||||
const zir = file.zir.?;
|
||||
const zir_decl = zir.getDeclaration(inst_resolved.inst);
|
||||
|
||||
try zcu.analysis_in_progress.put(gpa, anal_unit, {});
|
||||
try zcu.analysis_in_progress.putNoClobber(gpa, anal_unit, {});
|
||||
errdefer _ = zcu.analysis_in_progress.swapRemove(anal_unit);
|
||||
|
||||
// If there's no type body, we are also resolving the type here.
|
||||
if (zir_decl.type_body == null) {
|
||||
try zcu.analysis_in_progress.putNoClobber(gpa, .wrap(.{ .nav_ty = nav_id }), {});
|
||||
}
|
||||
errdefer if (zir_decl.type_body == null) {
|
||||
_ = zcu.analysis_in_progress.swapRemove(.wrap(.{ .nav_ty = nav_id }));
|
||||
};
|
||||
|
||||
var analysis_arena: std.heap.ArenaAllocator = .init(gpa);
|
||||
defer analysis_arena.deinit();
|
||||
|
||||
|
|
@ -1133,8 +1144,6 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr
|
|||
};
|
||||
defer block.instructions.deinit(gpa);
|
||||
|
||||
const zir_decl = zir.getDeclaration(inst_resolved.inst);
|
||||
|
||||
const ty_src = block.src(.{ .node_offset_var_decl_ty = .zero });
|
||||
const init_src = block.src(.{ .node_offset_var_decl_init = .zero });
|
||||
const align_src = block.src(.{ .node_offset_var_decl_align = .zero });
|
||||
|
|
@ -1305,6 +1314,9 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr
|
|||
|
||||
// Mark the unit as completed before evaluating the export!
|
||||
assert(zcu.analysis_in_progress.swapRemove(anal_unit));
|
||||
if (zir_decl.type_body == null) {
|
||||
assert(zcu.analysis_in_progress.swapRemove(.wrap(.{ .nav_ty = nav_id })));
|
||||
}
|
||||
|
||||
if (zir_decl.linkage == .@"export") {
|
||||
const export_src = block.src(.{ .token_offset = @enumFromInt(@intFromBool(zir_decl.is_pub)) });
|
||||
|
|
@ -1347,6 +1359,8 @@ pub fn ensureNavTypeUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zc
|
|||
|
||||
log.debug("ensureNavTypeUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
|
||||
assert(!zcu.analysis_in_progress.contains(anal_unit));
|
||||
|
||||
const type_resolved_by_value: bool = from_val: {
|
||||
const analysis = nav.analysis orelse break :from_val false;
|
||||
const inst_resolved = analysis.zir_index.resolveFull(ip) orelse break :from_val false;
|
||||
|
|
@ -1463,8 +1477,8 @@ fn analyzeNavType(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileEr
|
|||
const file = zcu.fileByIndex(inst_resolved.file);
|
||||
const zir = file.zir.?;
|
||||
|
||||
try zcu.analysis_in_progress.put(gpa, anal_unit, {});
|
||||
defer _ = zcu.analysis_in_progress.swapRemove(anal_unit);
|
||||
try zcu.analysis_in_progress.putNoClobber(gpa, anal_unit, {});
|
||||
defer assert(zcu.analysis_in_progress.swapRemove(anal_unit));
|
||||
|
||||
const zir_decl = zir.getDeclaration(inst_resolved.inst);
|
||||
const type_body = zir_decl.type_body.?;
|
||||
|
|
@ -1587,6 +1601,8 @@ pub fn ensureFuncBodyUpToDate(pt: Zcu.PerThread, func_index: InternPool.Index) Z
|
|||
|
||||
log.debug("ensureFuncBodyUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)});
|
||||
|
||||
assert(!zcu.analysis_in_progress.contains(anal_unit));
|
||||
|
||||
const func = zcu.funcInfo(func_index);
|
||||
|
||||
assert(func.ty == func.uncoerced_ty); // analyze the body of the original function, not a coerced one
|
||||
|
|
@ -1799,7 +1815,7 @@ fn createFileRootStruct(
|
|||
wip_ty.setName(ip, try file.internFullyQualifiedName(pt), .none);
|
||||
ip.namespacePtr(namespace_index).owner_type = wip_ty.index;
|
||||
|
||||
if (zcu.comp.incremental) {
|
||||
if (zcu.comp.config.incremental) {
|
||||
try pt.addDependency(.wrap(.{ .type = wip_ty.index }), .{ .src_hash = tracked_inst });
|
||||
}
|
||||
|
||||
|
|
@ -2781,7 +2797,7 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
|
|||
const file = zcu.fileByIndex(inst_info.file);
|
||||
const zir = file.zir.?;
|
||||
|
||||
try zcu.analysis_in_progress.put(gpa, anal_unit, {});
|
||||
try zcu.analysis_in_progress.putNoClobber(gpa, anal_unit, {});
|
||||
errdefer _ = zcu.analysis_in_progress.swapRemove(anal_unit);
|
||||
|
||||
func.setAnalyzed(ip);
|
||||
|
|
|
|||
|
|
@ -858,9 +858,11 @@ pub fn generateLazy(
|
|||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
lazy_sym: link.File.LazySymbol,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
atom_index: u32,
|
||||
w: *std.Io.Writer,
|
||||
debug_output: link.File.DebugInfoOutput,
|
||||
) CodeGenError!void {
|
||||
) (CodeGenError || std.Io.Writer.Error)!void {
|
||||
_ = atom_index;
|
||||
const comp = bin_file.comp;
|
||||
const gpa = comp.gpa;
|
||||
const mod = comp.root_mod;
|
||||
|
|
@ -914,7 +916,7 @@ pub fn generateLazy(
|
|||
},
|
||||
.bin_file = bin_file,
|
||||
.debug_output = debug_output,
|
||||
.code = code,
|
||||
.w = w,
|
||||
.prev_di_pc = undefined, // no debug info yet
|
||||
.prev_di_line = undefined, // no debug info yet
|
||||
.prev_di_column = undefined, // no debug info yet
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
bin_file: *link.File,
|
||||
lower: Lower,
|
||||
debug_output: link.File.DebugInfoOutput,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
w: *std.Io.Writer,
|
||||
|
||||
prev_di_line: u32,
|
||||
prev_di_column: u32,
|
||||
|
|
@ -13,7 +13,7 @@ prev_di_pc: usize,
|
|||
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty,
|
||||
relocs: std.ArrayListUnmanaged(Reloc) = .empty,
|
||||
|
||||
pub const Error = Lower.Error || error{
|
||||
pub const Error = Lower.Error || std.Io.Writer.Error || error{
|
||||
EmitFail,
|
||||
};
|
||||
|
||||
|
|
@ -25,13 +25,13 @@ pub fn emitMir(emit: *Emit) Error!void {
|
|||
try emit.code_offset_mapping.putNoClobber(
|
||||
emit.lower.allocator,
|
||||
mir_index,
|
||||
@intCast(emit.code.items.len),
|
||||
@intCast(emit.w.end),
|
||||
);
|
||||
const lowered = try emit.lower.lowerMir(mir_index, .{ .allow_frame_locs = true });
|
||||
var lowered_relocs = lowered.relocs;
|
||||
for (lowered.insts, 0..) |lowered_inst, lowered_index| {
|
||||
const start_offset: u32 = @intCast(emit.code.items.len);
|
||||
std.mem.writeInt(u32, try emit.code.addManyAsArray(gpa, 4), lowered_inst.toU32(), .little);
|
||||
const start_offset: u32 = @intCast(emit.w.end);
|
||||
try emit.w.writeInt(u32, lowered_inst.toU32(), .little);
|
||||
|
||||
while (lowered_relocs.len > 0 and
|
||||
lowered_relocs[0].lowered_inst_index == lowered_index) : ({
|
||||
|
|
@ -175,7 +175,7 @@ fn fixupRelocs(emit: *Emit) Error!void {
|
|||
return emit.fail("relocation target not found!", .{});
|
||||
|
||||
const disp = @as(i32, @intCast(target)) - @as(i32, @intCast(reloc.source));
|
||||
const code: *[4]u8 = emit.code.items[reloc.source + reloc.offset ..][0..4];
|
||||
const code = emit.w.buffered()[reloc.source + reloc.offset ..][0..4];
|
||||
|
||||
switch (reloc.fmt) {
|
||||
.J => riscv_util.writeInstJ(code, @bitCast(disp)),
|
||||
|
|
@ -187,7 +187,7 @@ fn fixupRelocs(emit: *Emit) Error!void {
|
|||
|
||||
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void {
|
||||
const delta_line = @as(i33, line) - @as(i33, emit.prev_di_line);
|
||||
const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
|
||||
const delta_pc: usize = emit.w.end - emit.prev_di_pc;
|
||||
log.debug(" (advance pc={d} and line={d})", .{ delta_pc, delta_line });
|
||||
switch (emit.debug_output) {
|
||||
.dwarf => |dw| {
|
||||
|
|
@ -196,7 +196,7 @@ fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void {
|
|||
try dw.advancePCAndLine(delta_line, delta_pc);
|
||||
emit.prev_di_line = line;
|
||||
emit.prev_di_column = column;
|
||||
emit.prev_di_pc = emit.code.items.len;
|
||||
emit.prev_di_pc = emit.w.end;
|
||||
},
|
||||
.none => {},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -109,9 +109,11 @@ pub fn emit(
|
|||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
func_index: InternPool.Index,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
atom_index: u32,
|
||||
w: *std.Io.Writer,
|
||||
debug_output: link.File.DebugInfoOutput,
|
||||
) codegen.CodeGenError!void {
|
||||
) (codegen.CodeGenError || std.Io.Writer.Error)!void {
|
||||
_ = atom_index;
|
||||
const zcu = pt.zcu;
|
||||
const comp = zcu.comp;
|
||||
const gpa = comp.gpa;
|
||||
|
|
@ -132,7 +134,7 @@ pub fn emit(
|
|||
},
|
||||
.bin_file = lf,
|
||||
.debug_output = debug_output,
|
||||
.code = code,
|
||||
.w = w,
|
||||
.prev_di_pc = 0,
|
||||
.prev_di_line = func.lbrace_line,
|
||||
.prev_di_column = func.lbrace_column,
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ debug_output: link.File.DebugInfoOutput,
|
|||
target: *const std.Target,
|
||||
err_msg: ?*ErrorMsg = null,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
w: *std.Io.Writer,
|
||||
|
||||
prev_di_line: u32,
|
||||
prev_di_column: u32,
|
||||
|
|
@ -40,7 +40,7 @@ branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUn
|
|||
/// instruction
|
||||
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty,
|
||||
|
||||
const InnerError = error{
|
||||
const InnerError = std.Io.Writer.Error || error{
|
||||
OutOfMemory,
|
||||
EmitFail,
|
||||
};
|
||||
|
|
@ -292,7 +292,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
|
|||
.bpcc => switch (tag) {
|
||||
.bpcc => {
|
||||
const branch_predict_int = emit.mir.instructions.items(.data)[inst].branch_predict_int;
|
||||
const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_int.inst).?)) - @as(i64, @intCast(emit.code.items.len));
|
||||
const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_int.inst).?)) - @as(i64, @intCast(emit.w.end));
|
||||
log.debug("mirConditionalBranch: {} offset={}", .{ inst, offset });
|
||||
|
||||
try emit.writeInstruction(
|
||||
|
|
@ -310,7 +310,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
|
|||
.bpr => switch (tag) {
|
||||
.bpr => {
|
||||
const branch_predict_reg = emit.mir.instructions.items(.data)[inst].branch_predict_reg;
|
||||
const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_reg.inst).?)) - @as(i64, @intCast(emit.code.items.len));
|
||||
const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_reg.inst).?)) - @as(i64, @intCast(emit.w.end));
|
||||
log.debug("mirConditionalBranch: {} offset={}", .{ inst, offset });
|
||||
|
||||
try emit.writeInstruction(
|
||||
|
|
@ -494,13 +494,13 @@ fn branchTarget(emit: *Emit, inst: Mir.Inst.Index) Mir.Inst.Index {
|
|||
|
||||
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void {
|
||||
const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line));
|
||||
const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
|
||||
const delta_pc: usize = emit.w.end - emit.prev_di_pc;
|
||||
switch (emit.debug_output) {
|
||||
.dwarf => |dbg_out| {
|
||||
try dbg_out.advancePCAndLine(delta_line, delta_pc);
|
||||
emit.prev_di_line = line;
|
||||
emit.prev_di_column = column;
|
||||
emit.prev_di_pc = emit.code.items.len;
|
||||
emit.prev_di_pc = emit.w.end;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
|
@ -675,13 +675,8 @@ fn optimalBranchType(emit: *Emit, tag: Mir.Inst.Tag, offset: i64) !BranchType {
|
|||
}
|
||||
|
||||
fn writeInstruction(emit: *Emit, instruction: Instruction) !void {
|
||||
const comp = emit.bin_file.comp;
|
||||
const gpa = comp.gpa;
|
||||
|
||||
// SPARCv9 instructions are always arranged in BE regardless of the
|
||||
// endianness mode the CPU is running in (Section 3.1 of the ISA specification).
|
||||
// This is to ease porting in case someone wants to do a LE SPARCv9 backend.
|
||||
const endian: Endian = .big;
|
||||
|
||||
std.mem.writeInt(u32, try emit.code.addManyAsArray(gpa, 4), instruction.toU32(), endian);
|
||||
try emit.w.writeInt(u32, instruction.toU32(), .big);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -380,9 +380,11 @@ pub fn emit(
|
|||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
func_index: InternPool.Index,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
atom_index: u32,
|
||||
w: *std.Io.Writer,
|
||||
debug_output: link.File.DebugInfoOutput,
|
||||
) codegen.CodeGenError!void {
|
||||
) (codegen.CodeGenError || std.Io.Writer.Error)!void {
|
||||
_ = atom_index;
|
||||
const zcu = pt.zcu;
|
||||
const func = zcu.funcInfo(func_index);
|
||||
const nav = func.owner_nav;
|
||||
|
|
@ -393,7 +395,7 @@ pub fn emit(
|
|||
.debug_output = debug_output,
|
||||
.target = &mod.resolved_target.result,
|
||||
.src_loc = src_loc,
|
||||
.code = code,
|
||||
.w = w,
|
||||
.prev_di_pc = 0,
|
||||
.prev_di_line = func.lbrace_line,
|
||||
.prev_di_column = func.lbrace_column,
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -6,7 +6,7 @@ pt: Zcu.PerThread,
|
|||
pic: bool,
|
||||
atom_index: u32,
|
||||
debug_output: link.File.DebugInfoOutput,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
w: *std.Io.Writer,
|
||||
|
||||
prev_di_loc: Loc,
|
||||
/// Relative to the beginning of `code`.
|
||||
|
|
@ -18,7 +18,8 @@ table_relocs: std.ArrayListUnmanaged(TableReloc),
|
|||
|
||||
pub const Error = Lower.Error || error{
|
||||
EmitFail,
|
||||
} || link.File.UpdateDebugInfoError;
|
||||
NotFile,
|
||||
} || std.posix.MMapError || std.posix.MRemapError || link.File.UpdateDebugInfoError;
|
||||
|
||||
pub fn emitMir(emit: *Emit) Error!void {
|
||||
const comp = emit.bin_file.comp;
|
||||
|
|
@ -29,12 +30,12 @@ pub fn emitMir(emit: *Emit) Error!void {
|
|||
var local_index: usize = 0;
|
||||
for (0..emit.lower.mir.instructions.len) |mir_i| {
|
||||
const mir_index: Mir.Inst.Index = @intCast(mir_i);
|
||||
emit.code_offset_mapping.items[mir_index] = @intCast(emit.code.items.len);
|
||||
emit.code_offset_mapping.items[mir_index] = @intCast(emit.w.end);
|
||||
const lowered = try emit.lower.lowerMir(mir_index);
|
||||
var lowered_relocs = lowered.relocs;
|
||||
lowered_inst: for (lowered.insts, 0..) |lowered_inst, lowered_index| {
|
||||
if (lowered_inst.prefix == .directive) {
|
||||
const start_offset: u32 = @intCast(emit.code.items.len);
|
||||
const start_offset: u32 = @intCast(emit.w.end);
|
||||
switch (emit.debug_output) {
|
||||
.dwarf => |dwarf| switch (lowered_inst.encoding.mnemonic) {
|
||||
.@".cfi_def_cfa" => try dwarf.genDebugFrame(start_offset, .{ .def_cfa = .{
|
||||
|
|
@ -164,6 +165,8 @@ pub fn emitMir(emit: *Emit) Error!void {
|
|||
.index = if (emit.bin_file.cast(.elf)) |elf_file|
|
||||
elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, emit.pt, lazy_sym) catch |err|
|
||||
return emit.fail("{s} creating lazy symbol", .{@errorName(err)})
|
||||
else if (emit.bin_file.cast(.elf2)) |elf|
|
||||
@intFromEnum(try elf.lazySymbol(lazy_sym))
|
||||
else if (emit.bin_file.cast(.macho)) |macho_file|
|
||||
macho_file.getZigObject().?.getOrCreateMetadataForLazySymbol(macho_file, emit.pt, lazy_sym) catch |err|
|
||||
return emit.fail("{s} creating lazy symbol", .{@errorName(err)})
|
||||
|
|
@ -180,12 +183,15 @@ pub fn emitMir(emit: *Emit) Error!void {
|
|||
.extern_func => |extern_func| .{
|
||||
.index = if (emit.bin_file.cast(.elf)) |elf_file|
|
||||
try elf_file.getGlobalSymbol(extern_func.toSlice(&emit.lower.mir).?, null)
|
||||
else if (emit.bin_file.cast(.macho)) |macho_file|
|
||||
else if (emit.bin_file.cast(.elf2)) |elf| @intFromEnum(try elf.globalSymbol(.{
|
||||
.name = extern_func.toSlice(&emit.lower.mir).?,
|
||||
.type = .FUNC,
|
||||
})) else if (emit.bin_file.cast(.macho)) |macho_file|
|
||||
try macho_file.getGlobalSymbol(extern_func.toSlice(&emit.lower.mir).?, null)
|
||||
else if (emit.bin_file.cast(.coff)) |coff_file|
|
||||
try coff_file.getGlobalSymbol(extern_func.toSlice(&emit.lower.mir).?, "compiler_rt")
|
||||
else
|
||||
return emit.fail("external symbols unimplemented for {s}", .{@tagName(emit.bin_file.tag)}),
|
||||
return emit.fail("external symbol unimplemented for {s}", .{@tagName(emit.bin_file.tag)}),
|
||||
.is_extern = true,
|
||||
.type = .symbol,
|
||||
},
|
||||
|
|
@ -205,7 +211,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
|||
},
|
||||
else => {},
|
||||
}
|
||||
if (emit.bin_file.cast(.elf)) |_| {
|
||||
if (emit.bin_file.cast(.elf) != null or emit.bin_file.cast(.elf2) != null) {
|
||||
if (!emit.pic) switch (lowered_inst.encoding.mnemonic) {
|
||||
.lea => try emit.encodeInst(try .new(.none, .mov, &.{
|
||||
lowered_inst.ops[0],
|
||||
|
|
@ -315,7 +321,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
|||
},
|
||||
.branch, .tls => unreachable,
|
||||
.tlv => {
|
||||
if (emit.bin_file.cast(.elf)) |elf_file| {
|
||||
if (emit.bin_file.cast(.elf) != null or emit.bin_file.cast(.elf2) != null) {
|
||||
// TODO handle extern TLS vars, i.e., emit GD model
|
||||
if (emit.pic) switch (lowered_inst.encoding.mnemonic) {
|
||||
.lea, .mov => {
|
||||
|
|
@ -337,7 +343,12 @@ pub fn emitMir(emit: *Emit) Error!void {
|
|||
}, emit.lower.target), &.{.{
|
||||
.op_index = 0,
|
||||
.target = .{
|
||||
.index = try elf_file.getGlobalSymbol("__tls_get_addr", null),
|
||||
.index = if (emit.bin_file.cast(.elf)) |elf_file|
|
||||
try elf_file.getGlobalSymbol("__tls_get_addr", null)
|
||||
else if (emit.bin_file.cast(.elf2)) |elf| @intFromEnum(try elf.globalSymbol(.{
|
||||
.name = "__tls_get_addr",
|
||||
.type = .FUNC,
|
||||
})) else unreachable,
|
||||
.is_extern = true,
|
||||
.type = .branch,
|
||||
},
|
||||
|
|
@ -441,7 +452,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
|||
log.debug("mirDbgEnterBlock (line={d}, col={d})", .{
|
||||
emit.prev_di_loc.line, emit.prev_di_loc.column,
|
||||
});
|
||||
try dwarf.enterBlock(emit.code.items.len);
|
||||
try dwarf.enterBlock(emit.w.end);
|
||||
},
|
||||
.none => {},
|
||||
},
|
||||
|
|
@ -450,7 +461,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
|||
log.debug("mirDbgLeaveBlock (line={d}, col={d})", .{
|
||||
emit.prev_di_loc.line, emit.prev_di_loc.column,
|
||||
});
|
||||
try dwarf.leaveBlock(emit.code.items.len);
|
||||
try dwarf.leaveBlock(emit.w.end);
|
||||
},
|
||||
.none => {},
|
||||
},
|
||||
|
|
@ -459,7 +470,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
|||
log.debug("mirDbgEnterInline (line={d}, col={d})", .{
|
||||
emit.prev_di_loc.line, emit.prev_di_loc.column,
|
||||
});
|
||||
try dwarf.enterInlineFunc(mir_inst.data.ip_index, emit.code.items.len, emit.prev_di_loc.line, emit.prev_di_loc.column);
|
||||
try dwarf.enterInlineFunc(mir_inst.data.ip_index, emit.w.end, emit.prev_di_loc.line, emit.prev_di_loc.column);
|
||||
},
|
||||
.none => {},
|
||||
},
|
||||
|
|
@ -468,7 +479,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
|||
log.debug("mirDbgLeaveInline (line={d}, col={d})", .{
|
||||
emit.prev_di_loc.line, emit.prev_di_loc.column,
|
||||
});
|
||||
try dwarf.leaveInlineFunc(mir_inst.data.ip_index, emit.code.items.len);
|
||||
try dwarf.leaveInlineFunc(mir_inst.data.ip_index, emit.w.end);
|
||||
},
|
||||
.none => {},
|
||||
},
|
||||
|
|
@ -634,7 +645,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
|||
for (emit.relocs.items) |reloc| {
|
||||
const target = emit.code_offset_mapping.items[reloc.target];
|
||||
const disp = @as(i64, @intCast(target)) - @as(i64, @intCast(reloc.inst_offset + reloc.inst_length)) + reloc.target_offset;
|
||||
const inst_bytes = emit.code.items[reloc.inst_offset..][0..reloc.inst_length];
|
||||
const inst_bytes = emit.w.buffered()[reloc.inst_offset..][0..reloc.inst_length];
|
||||
switch (reloc.source_length) {
|
||||
else => unreachable,
|
||||
inline 1, 4 => |source_length| std.mem.writeInt(
|
||||
|
|
@ -646,12 +657,12 @@ pub fn emitMir(emit: *Emit) Error!void {
|
|||
}
|
||||
}
|
||||
if (emit.lower.mir.table.len > 0) {
|
||||
const ptr_size = @divExact(emit.lower.target.ptrBitWidth(), 8);
|
||||
var table_offset = std.mem.alignForward(u32, @intCast(emit.w.end), ptr_size);
|
||||
if (emit.bin_file.cast(.elf)) |elf_file| {
|
||||
const zo = elf_file.zigObjectPtr().?;
|
||||
const atom = zo.symbol(emit.atom_index).atom(elf_file).?;
|
||||
|
||||
const ptr_size = @divExact(emit.lower.target.ptrBitWidth(), 8);
|
||||
var table_offset = std.mem.alignForward(u32, @intCast(emit.code.items.len), ptr_size);
|
||||
for (emit.table_relocs.items) |table_reloc| try atom.addReloc(gpa, .{
|
||||
.r_offset = table_reloc.source_offset,
|
||||
.r_info = @as(u64, emit.atom_index) << 32 | @intFromEnum(std.elf.R_X86_64.@"32"),
|
||||
|
|
@ -665,7 +676,26 @@ pub fn emitMir(emit: *Emit) Error!void {
|
|||
}, zo);
|
||||
table_offset += ptr_size;
|
||||
}
|
||||
try emit.code.appendNTimes(gpa, 0, table_offset - emit.code.items.len);
|
||||
try emit.w.splatByteAll(0, table_offset - emit.w.end);
|
||||
} else if (emit.bin_file.cast(.elf2)) |elf| {
|
||||
for (emit.table_relocs.items) |table_reloc| try elf.addReloc(
|
||||
@enumFromInt(emit.atom_index),
|
||||
table_reloc.source_offset,
|
||||
@enumFromInt(emit.atom_index),
|
||||
@as(i64, table_offset) + table_reloc.target_offset,
|
||||
.{ .x86_64 = .@"32" },
|
||||
);
|
||||
for (emit.lower.mir.table) |entry| {
|
||||
try elf.addReloc(
|
||||
@enumFromInt(emit.atom_index),
|
||||
table_offset,
|
||||
@enumFromInt(emit.atom_index),
|
||||
emit.code_offset_mapping.items[entry],
|
||||
.{ .x86_64 = .@"64" },
|
||||
);
|
||||
table_offset += ptr_size;
|
||||
}
|
||||
try emit.w.splatByteAll(0, table_offset - emit.w.end);
|
||||
} else unreachable;
|
||||
}
|
||||
}
|
||||
|
|
@ -696,16 +726,12 @@ const RelocInfo = struct {
|
|||
fn encodeInst(emit: *Emit, lowered_inst: Instruction, reloc_info: []const RelocInfo) Error!void {
|
||||
const comp = emit.bin_file.comp;
|
||||
const gpa = comp.gpa;
|
||||
const start_offset: u32 = @intCast(emit.code.items.len);
|
||||
{
|
||||
var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, emit.code);
|
||||
defer emit.code.* = aw.toArrayList();
|
||||
lowered_inst.encode(&aw.writer, .{}) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
const end_offset: u32 = @intCast(emit.code.items.len);
|
||||
const start_offset: u32 = @intCast(emit.w.end);
|
||||
lowered_inst.encode(emit.w, .{}) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
else => |e| return e,
|
||||
};
|
||||
const end_offset: u32 = @intCast(emit.w.end);
|
||||
for (reloc_info) |reloc| switch (reloc.target.type) {
|
||||
.inst => {
|
||||
const inst_length: u4 = @intCast(end_offset - start_offset);
|
||||
|
|
@ -769,7 +795,13 @@ fn encodeInst(emit: *Emit, lowered_inst: Instruction, reloc_info: []const RelocI
|
|||
.symbolnum = @intCast(reloc.target.index),
|
||||
},
|
||||
});
|
||||
} else if (emit.bin_file.cast(.coff)) |coff_file| {
|
||||
} else if (emit.bin_file.cast(.elf2)) |elf| try elf.addReloc(
|
||||
@enumFromInt(emit.atom_index),
|
||||
end_offset - 4,
|
||||
@enumFromInt(reloc.target.index),
|
||||
reloc.off,
|
||||
.{ .x86_64 = .@"32" },
|
||||
) else if (emit.bin_file.cast(.coff)) |coff_file| {
|
||||
const atom_index = coff_file.getAtomIndexForSymbol(
|
||||
.{ .sym_index = emit.atom_index, .file = null },
|
||||
).?;
|
||||
|
|
@ -794,7 +826,13 @@ fn encodeInst(emit: *Emit, lowered_inst: Instruction, reloc_info: []const RelocI
|
|||
.r_info = @as(u64, reloc.target.index) << 32 | @intFromEnum(r_type),
|
||||
.r_addend = reloc.off - 4,
|
||||
}, zo);
|
||||
} else if (emit.bin_file.cast(.macho)) |macho_file| {
|
||||
} else if (emit.bin_file.cast(.elf2)) |elf| try elf.addReloc(
|
||||
@enumFromInt(emit.atom_index),
|
||||
end_offset - 4,
|
||||
@enumFromInt(reloc.target.index),
|
||||
reloc.off - 4,
|
||||
.{ .x86_64 = .PC32 },
|
||||
) else if (emit.bin_file.cast(.macho)) |macho_file| {
|
||||
const zo = macho_file.getZigObject().?;
|
||||
const atom = zo.symbols.items[emit.atom_index].getAtom(macho_file).?;
|
||||
try atom.addReloc(macho_file, .{
|
||||
|
|
@ -849,7 +887,13 @@ fn encodeInst(emit: *Emit, lowered_inst: Instruction, reloc_info: []const RelocI
|
|||
.r_info = @as(u64, reloc.target.index) << 32 | @intFromEnum(r_type),
|
||||
.r_addend = reloc.off,
|
||||
}, zo);
|
||||
} else if (emit.bin_file.cast(.macho)) |macho_file| {
|
||||
} else if (emit.bin_file.cast(.elf2)) |elf| try elf.addReloc(
|
||||
@enumFromInt(emit.atom_index),
|
||||
end_offset - 4,
|
||||
@enumFromInt(reloc.target.index),
|
||||
reloc.off,
|
||||
.{ .x86_64 = .TPOFF32 },
|
||||
) else if (emit.bin_file.cast(.macho)) |macho_file| {
|
||||
const zo = macho_file.getZigObject().?;
|
||||
const atom = zo.symbols.items[emit.atom_index].getAtom(macho_file).?;
|
||||
try atom.addReloc(macho_file, .{
|
||||
|
|
@ -908,7 +952,7 @@ const Loc = struct {
|
|||
|
||||
fn dbgAdvancePCAndLine(emit: *Emit, loc: Loc) Error!void {
|
||||
const delta_line = @as(i33, loc.line) - @as(i33, emit.prev_di_loc.line);
|
||||
const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
|
||||
const delta_pc: usize = emit.w.end - emit.prev_di_pc;
|
||||
log.debug(" (advance pc={d} and line={d})", .{ delta_pc, delta_line });
|
||||
switch (emit.debug_output) {
|
||||
.dwarf => |dwarf| {
|
||||
|
|
@ -916,7 +960,7 @@ fn dbgAdvancePCAndLine(emit: *Emit, loc: Loc) Error!void {
|
|||
if (loc.column != emit.prev_di_loc.column) try dwarf.setColumn(loc.column);
|
||||
try dwarf.advancePCAndLine(delta_line, delta_pc);
|
||||
emit.prev_di_loc = loc;
|
||||
emit.prev_di_pc = emit.code.items.len;
|
||||
emit.prev_di_pc = emit.w.end;
|
||||
},
|
||||
.none => {},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1976,7 +1976,8 @@ pub fn emit(
|
|||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
func_index: InternPool.Index,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
atom_index: u32,
|
||||
w: *std.Io.Writer,
|
||||
debug_output: link.File.DebugInfoOutput,
|
||||
) codegen.CodeGenError!void {
|
||||
const zcu = pt.zcu;
|
||||
|
|
@ -1997,17 +1998,9 @@ pub fn emit(
|
|||
.bin_file = lf,
|
||||
.pt = pt,
|
||||
.pic = mod.pic,
|
||||
.atom_index = sym: {
|
||||
if (lf.cast(.elf)) |ef| break :sym try ef.zigObjectPtr().?.getOrCreateMetadataForNav(zcu, nav);
|
||||
if (lf.cast(.macho)) |mf| break :sym try mf.getZigObject().?.getOrCreateMetadataForNav(mf, nav);
|
||||
if (lf.cast(.coff)) |cf| {
|
||||
const atom = try cf.getOrCreateAtomForNav(nav);
|
||||
break :sym cf.getAtom(atom).getSymbolIndex().?;
|
||||
}
|
||||
unreachable;
|
||||
},
|
||||
.atom_index = atom_index,
|
||||
.debug_output = debug_output,
|
||||
.code = code,
|
||||
.w = w,
|
||||
|
||||
.prev_di_loc = .{
|
||||
.line = func.lbrace_line,
|
||||
|
|
@ -2037,7 +2030,8 @@ pub fn emitLazy(
|
|||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
lazy_sym: link.File.LazySymbol,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
atom_index: u32,
|
||||
w: *std.Io.Writer,
|
||||
debug_output: link.File.DebugInfoOutput,
|
||||
) codegen.CodeGenError!void {
|
||||
const zcu = pt.zcu;
|
||||
|
|
@ -2055,20 +2049,9 @@ pub fn emitLazy(
|
|||
.bin_file = lf,
|
||||
.pt = pt,
|
||||
.pic = mod.pic,
|
||||
.atom_index = sym: {
|
||||
if (lf.cast(.elf)) |ef| break :sym ef.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(ef, pt, lazy_sym) catch |err|
|
||||
return zcu.codegenFailType(lazy_sym.ty, "{s} creating lazy symbol", .{@errorName(err)});
|
||||
if (lf.cast(.macho)) |mf| break :sym mf.getZigObject().?.getOrCreateMetadataForLazySymbol(mf, pt, lazy_sym) catch |err|
|
||||
return zcu.codegenFailType(lazy_sym.ty, "{s} creating lazy symbol", .{@errorName(err)});
|
||||
if (lf.cast(.coff)) |cf| {
|
||||
const atom = cf.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err|
|
||||
return zcu.codegenFailType(lazy_sym.ty, "{s} creating lazy symbol", .{@errorName(err)});
|
||||
break :sym cf.getAtom(atom).getSymbolIndex().?;
|
||||
}
|
||||
unreachable;
|
||||
},
|
||||
.atom_index = atom_index,
|
||||
.debug_output = debug_output,
|
||||
.code = code,
|
||||
.w = w,
|
||||
|
||||
.prev_di_loc = undefined,
|
||||
.prev_di_pc = undefined,
|
||||
|
|
|
|||
|
|
@ -727,6 +727,14 @@ pub const FrameIndex = enum(u32) {
|
|||
pub fn isNamed(fi: FrameIndex) bool {
|
||||
return @intFromEnum(fi) < named_count;
|
||||
}
|
||||
|
||||
pub fn format(fi: FrameIndex, writer: *std.Io.Writer) std.Io.Writer.Error!void {
|
||||
if (fi.isNamed()) {
|
||||
try writer.print("FrameIndex.{t}", .{fi});
|
||||
} else {
|
||||
try writer.print("FrameIndex({d})", .{@intFromEnum(fi)});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const FrameAddr = struct { index: FrameIndex, off: i32 = 0 };
|
||||
|
|
|
|||
|
|
@ -259,7 +259,7 @@ pub const Instruction = struct {
|
|||
switch (sib.base) {
|
||||
.none => any = false,
|
||||
.reg => |reg| try w.print("{s}", .{@tagName(reg)}),
|
||||
.frame => |frame_index| try w.print("{}", .{frame_index}),
|
||||
.frame => |frame_index| try w.print("{f}", .{frame_index}),
|
||||
.table => try w.print("Table", .{}),
|
||||
.rip_inst => |inst_index| try w.print("RipInst({d})", .{inst_index}),
|
||||
.nav => |nav| try w.print("Nav({d})", .{@intFromEnum(nav)}),
|
||||
|
|
|
|||
271
src/codegen.zig
271
src/codegen.zig
|
|
@ -6,7 +6,6 @@ const link = @import("link.zig");
|
|||
const log = std.log.scoped(.codegen);
|
||||
const mem = std.mem;
|
||||
const math = std.math;
|
||||
const ArrayList = std.ArrayList;
|
||||
const target_util = @import("target.zig");
|
||||
const trace = @import("tracy.zig").trace;
|
||||
|
||||
|
|
@ -179,10 +178,11 @@ pub fn emitFunction(
|
|||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
func_index: InternPool.Index,
|
||||
atom_index: u32,
|
||||
any_mir: *const AnyMir,
|
||||
code: *ArrayList(u8),
|
||||
w: *std.Io.Writer,
|
||||
debug_output: link.File.DebugInfoOutput,
|
||||
) CodeGenError!void {
|
||||
) (CodeGenError || std.Io.Writer.Error)!void {
|
||||
const zcu = pt.zcu;
|
||||
const func = zcu.funcInfo(func_index);
|
||||
const target = &zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
|
||||
|
|
@ -195,7 +195,7 @@ pub fn emitFunction(
|
|||
=> |backend| {
|
||||
dev.check(devFeatureForBackend(backend));
|
||||
const mir = &@field(any_mir, AnyMir.tag(backend));
|
||||
return mir.emit(lf, pt, src_loc, func_index, code, debug_output);
|
||||
return mir.emit(lf, pt, src_loc, func_index, atom_index, w, debug_output);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -205,9 +205,10 @@ pub fn generateLazyFunction(
|
|||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
lazy_sym: link.File.LazySymbol,
|
||||
code: *ArrayList(u8),
|
||||
atom_index: u32,
|
||||
w: *std.Io.Writer,
|
||||
debug_output: link.File.DebugInfoOutput,
|
||||
) CodeGenError!void {
|
||||
) (CodeGenError || std.Io.Writer.Error)!void {
|
||||
const zcu = pt.zcu;
|
||||
const target = if (Type.fromInterned(lazy_sym.ty).typeDeclInstAllowGeneratedTag(zcu)) |inst_index|
|
||||
&zcu.fileByIndex(inst_index.resolveFile(&zcu.intern_pool)).mod.?.resolved_target.result
|
||||
|
|
@ -217,19 +218,11 @@ pub fn generateLazyFunction(
|
|||
else => unreachable,
|
||||
inline .stage2_riscv64, .stage2_x86_64 => |backend| {
|
||||
dev.check(devFeatureForBackend(backend));
|
||||
return importBackend(backend).generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output);
|
||||
return importBackend(backend).generateLazy(lf, pt, src_loc, lazy_sym, atom_index, w, debug_output);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn writeFloat(comptime F: type, f: F, target: *const std.Target, endian: std.builtin.Endian, code: []u8) void {
|
||||
_ = target;
|
||||
const bits = @typeInfo(F).float.bits;
|
||||
const Int = @Type(.{ .int = .{ .signedness = .unsigned, .bits = bits } });
|
||||
const int: Int = @bitCast(f);
|
||||
mem.writeInt(Int, code[0..@divExact(bits, 8)], int, endian);
|
||||
}
|
||||
|
||||
pub fn generateLazySymbol(
|
||||
bin_file: *link.File,
|
||||
pt: Zcu.PerThread,
|
||||
|
|
@ -237,17 +230,14 @@ pub fn generateLazySymbol(
|
|||
lazy_sym: link.File.LazySymbol,
|
||||
// TODO don't use an "out" parameter like this; put it in the result instead
|
||||
alignment: *Alignment,
|
||||
code: *ArrayList(u8),
|
||||
w: *std.Io.Writer,
|
||||
debug_output: link.File.DebugInfoOutput,
|
||||
reloc_parent: link.File.RelocInfo.Parent,
|
||||
) CodeGenError!void {
|
||||
_ = reloc_parent;
|
||||
|
||||
) (CodeGenError || std.Io.Writer.Error)!void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const comp = bin_file.comp;
|
||||
const gpa = comp.gpa;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const target = &comp.root_mod.resolved_target.result;
|
||||
|
|
@ -260,37 +250,36 @@ pub fn generateLazySymbol(
|
|||
|
||||
if (lazy_sym.kind == .code) {
|
||||
alignment.* = target_util.defaultFunctionAlignment(target);
|
||||
return generateLazyFunction(bin_file, pt, src_loc, lazy_sym, code, debug_output);
|
||||
return generateLazyFunction(bin_file, pt, src_loc, lazy_sym, reloc_parent.atom_index, w, debug_output);
|
||||
}
|
||||
|
||||
if (lazy_sym.ty == .anyerror_type) {
|
||||
alignment.* = .@"4";
|
||||
const err_names = ip.global_error_set.getNamesFromMainThread();
|
||||
var offset_index: u32 = @intCast(code.items.len);
|
||||
var string_index: u32 = @intCast(4 * (1 + err_names.len + @intFromBool(err_names.len > 0)));
|
||||
try code.resize(gpa, offset_index + string_index);
|
||||
mem.writeInt(u32, code.items[offset_index..][0..4], @intCast(err_names.len), endian);
|
||||
const strings_start: u32 = @intCast(4 * (1 + err_names.len + @intFromBool(err_names.len > 0)));
|
||||
var string_index = strings_start;
|
||||
try w.rebase(w.end, string_index);
|
||||
w.writeInt(u32, @intCast(err_names.len), endian) catch unreachable;
|
||||
if (err_names.len == 0) return;
|
||||
offset_index += 4;
|
||||
for (err_names) |err_name_nts| {
|
||||
const err_name = err_name_nts.toSlice(ip);
|
||||
mem.writeInt(u32, code.items[offset_index..][0..4], string_index, endian);
|
||||
offset_index += 4;
|
||||
try code.ensureUnusedCapacity(gpa, err_name.len + 1);
|
||||
code.appendSliceAssumeCapacity(err_name);
|
||||
code.appendAssumeCapacity(0);
|
||||
string_index += @intCast(err_name.len + 1);
|
||||
w.writeInt(u32, string_index, endian) catch unreachable;
|
||||
string_index += @intCast(err_name_nts.toSlice(ip).len + 1);
|
||||
}
|
||||
w.writeInt(u32, string_index, endian) catch unreachable;
|
||||
try w.rebase(w.end, string_index - strings_start);
|
||||
for (err_names) |err_name_nts| {
|
||||
w.writeAll(err_name_nts.toSlice(ip)) catch unreachable;
|
||||
w.writeByte(0) catch unreachable;
|
||||
}
|
||||
mem.writeInt(u32, code.items[offset_index..][0..4], string_index, endian);
|
||||
} else if (Type.fromInterned(lazy_sym.ty).zigTypeTag(zcu) == .@"enum") {
|
||||
alignment.* = .@"1";
|
||||
const enum_ty = Type.fromInterned(lazy_sym.ty);
|
||||
const tag_names = enum_ty.enumFields(zcu);
|
||||
for (0..tag_names.len) |tag_index| {
|
||||
const tag_name = tag_names.get(ip)[tag_index].toSlice(ip);
|
||||
try code.ensureUnusedCapacity(gpa, tag_name.len + 1);
|
||||
code.appendSliceAssumeCapacity(tag_name);
|
||||
code.appendAssumeCapacity(0);
|
||||
try w.rebase(w.end, tag_name.len + 1);
|
||||
w.writeAll(tag_name) catch unreachable;
|
||||
w.writeByte(0) catch unreachable;
|
||||
}
|
||||
} else {
|
||||
return zcu.codegenFailType(lazy_sym.ty, "TODO implement generateLazySymbol for {s} {f}", .{
|
||||
|
|
@ -312,14 +301,13 @@ pub fn generateSymbol(
|
|||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
val: Value,
|
||||
code: *ArrayList(u8),
|
||||
w: *std.Io.Writer,
|
||||
reloc_parent: link.File.RelocInfo.Parent,
|
||||
) GenerateSymbolError!void {
|
||||
) (GenerateSymbolError || std.Io.Writer.Error)!void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
const ty = val.typeOf(zcu);
|
||||
|
||||
|
|
@ -330,7 +318,7 @@ pub fn generateSymbol(
|
|||
|
||||
if (val.isUndef(zcu)) {
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
try code.appendNTimes(gpa, 0xaa, abi_size);
|
||||
try w.splatByteAll(0xaa, abi_size);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -360,7 +348,7 @@ pub fn generateSymbol(
|
|||
.null => unreachable, // non-runtime value
|
||||
.@"unreachable" => unreachable, // non-runtime value
|
||||
.empty_tuple => return,
|
||||
.false, .true => try code.append(gpa, switch (simple_value) {
|
||||
.false, .true => try w.writeByte(switch (simple_value) {
|
||||
.false => 0,
|
||||
.true => 1,
|
||||
else => unreachable,
|
||||
|
|
@ -376,11 +364,11 @@ pub fn generateSymbol(
|
|||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
var space: Value.BigIntSpace = undefined;
|
||||
const int_val = val.toBigInt(&space, zcu);
|
||||
int_val.writeTwosComplement(try code.addManyAsSlice(gpa, abi_size), endian);
|
||||
int_val.writeTwosComplement(try w.writableSlice(abi_size), endian);
|
||||
},
|
||||
.err => |err| {
|
||||
const int = try pt.getErrorValue(err.name);
|
||||
mem.writeInt(u16, try code.addManyAsArray(gpa, 2), @intCast(int), endian);
|
||||
try w.writeInt(u16, @intCast(int), endian);
|
||||
},
|
||||
.error_union => |error_union| {
|
||||
const payload_ty = ty.errorUnionPayload(zcu);
|
||||
|
|
@ -390,7 +378,7 @@ pub fn generateSymbol(
|
|||
};
|
||||
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
mem.writeInt(u16, try code.addManyAsArray(gpa, 2), err_val, endian);
|
||||
try w.writeInt(u16, err_val, endian);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -400,63 +388,63 @@ pub fn generateSymbol(
|
|||
|
||||
// error value first when its type is larger than the error union's payload
|
||||
if (error_align.order(payload_align) == .gt) {
|
||||
mem.writeInt(u16, try code.addManyAsArray(gpa, 2), err_val, endian);
|
||||
try w.writeInt(u16, err_val, endian);
|
||||
}
|
||||
|
||||
// emit payload part of the error union
|
||||
{
|
||||
const begin = code.items.len;
|
||||
const begin = w.end;
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (error_union.val) {
|
||||
.err_name => try pt.intern(.{ .undef = payload_ty.toIntern() }),
|
||||
.payload => |payload| payload,
|
||||
}), code, reloc_parent);
|
||||
const unpadded_end = code.items.len - begin;
|
||||
}), w, reloc_parent);
|
||||
const unpadded_end = w.end - begin;
|
||||
const padded_end = abi_align.forward(unpadded_end);
|
||||
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
|
||||
|
||||
if (padding > 0) {
|
||||
try code.appendNTimes(gpa, 0, padding);
|
||||
try w.splatByteAll(0, padding);
|
||||
}
|
||||
}
|
||||
|
||||
// Payload size is larger than error set, so emit our error set last
|
||||
if (error_align.compare(.lte, payload_align)) {
|
||||
const begin = code.items.len;
|
||||
mem.writeInt(u16, try code.addManyAsArray(gpa, 2), err_val, endian);
|
||||
const unpadded_end = code.items.len - begin;
|
||||
const begin = w.end;
|
||||
try w.writeInt(u16, err_val, endian);
|
||||
const unpadded_end = w.end - begin;
|
||||
const padded_end = abi_align.forward(unpadded_end);
|
||||
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
|
||||
|
||||
if (padding > 0) {
|
||||
try code.appendNTimes(gpa, 0, padding);
|
||||
try w.splatByteAll(0, padding);
|
||||
}
|
||||
}
|
||||
},
|
||||
.enum_tag => |enum_tag| {
|
||||
const int_tag_ty = ty.intTagType(zcu);
|
||||
try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, reloc_parent);
|
||||
try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), w, reloc_parent);
|
||||
},
|
||||
.float => |float| storage: switch (float.storage) {
|
||||
.f16 => |f16_val| writeFloat(f16, f16_val, target, endian, try code.addManyAsArray(gpa, 2)),
|
||||
.f32 => |f32_val| writeFloat(f32, f32_val, target, endian, try code.addManyAsArray(gpa, 4)),
|
||||
.f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(gpa, 8)),
|
||||
.f16 => |f16_val| try w.writeInt(u16, @bitCast(f16_val), endian),
|
||||
.f32 => |f32_val| try w.writeInt(u32, @bitCast(f32_val), endian),
|
||||
.f64 => |f64_val| try w.writeInt(u64, @bitCast(f64_val), endian),
|
||||
.f80 => |f80_val| {
|
||||
writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(gpa, 10));
|
||||
try w.writeInt(u80, @bitCast(f80_val), endian);
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
try code.appendNTimes(gpa, 0, abi_size - 10);
|
||||
try w.splatByteAll(0, abi_size - 10);
|
||||
},
|
||||
.f128 => |f128_val| switch (Type.fromInterned(float.ty).floatBits(target)) {
|
||||
else => unreachable,
|
||||
16 => continue :storage .{ .f16 = @floatCast(f128_val) },
|
||||
32 => continue :storage .{ .f32 = @floatCast(f128_val) },
|
||||
64 => continue :storage .{ .f64 = @floatCast(f128_val) },
|
||||
128 => writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(gpa, 16)),
|
||||
128 => try w.writeInt(u128, @bitCast(f128_val), endian),
|
||||
},
|
||||
},
|
||||
.ptr => try lowerPtr(bin_file, pt, src_loc, val.toIntern(), code, reloc_parent, 0),
|
||||
.ptr => try lowerPtr(bin_file, pt, src_loc, val.toIntern(), w, reloc_parent, 0),
|
||||
.slice => |slice| {
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.ptr), code, reloc_parent);
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.len), code, reloc_parent);
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.ptr), w, reloc_parent);
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.len), w, reloc_parent);
|
||||
},
|
||||
.opt => {
|
||||
const payload_type = ty.optionalChild(zcu);
|
||||
|
|
@ -465,9 +453,9 @@ pub fn generateSymbol(
|
|||
|
||||
if (ty.optionalReprIsPayload(zcu)) {
|
||||
if (payload_val) |value| {
|
||||
try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent);
|
||||
try generateSymbol(bin_file, pt, src_loc, value, w, reloc_parent);
|
||||
} else {
|
||||
try code.appendNTimes(gpa, 0, abi_size);
|
||||
try w.splatByteAll(0, abi_size);
|
||||
}
|
||||
} else {
|
||||
const padding = abi_size - (math.cast(usize, payload_type.abiSize(zcu)) orelse return error.Overflow) - 1;
|
||||
|
|
@ -475,15 +463,15 @@ pub fn generateSymbol(
|
|||
const value = payload_val orelse Value.fromInterned(try pt.intern(.{
|
||||
.undef = payload_type.toIntern(),
|
||||
}));
|
||||
try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent);
|
||||
try generateSymbol(bin_file, pt, src_loc, value, w, reloc_parent);
|
||||
}
|
||||
try code.append(gpa, @intFromBool(payload_val != null));
|
||||
try code.appendNTimes(gpa, 0, padding);
|
||||
try w.writeByte(@intFromBool(payload_val != null));
|
||||
try w.splatByteAll(0, padding);
|
||||
}
|
||||
},
|
||||
.aggregate => |aggregate| switch (ip.indexToKey(ty.toIntern())) {
|
||||
.array_type => |array_type| switch (aggregate.storage) {
|
||||
.bytes => |bytes| try code.appendSlice(gpa, bytes.toSlice(array_type.lenIncludingSentinel(), ip)),
|
||||
.bytes => |bytes| try w.writeAll(bytes.toSlice(array_type.lenIncludingSentinel(), ip)),
|
||||
.elems, .repeated_elem => {
|
||||
var index: u64 = 0;
|
||||
while (index < array_type.lenIncludingSentinel()) : (index += 1) {
|
||||
|
|
@ -494,14 +482,14 @@ pub fn generateSymbol(
|
|||
elem
|
||||
else
|
||||
array_type.sentinel,
|
||||
}), code, reloc_parent);
|
||||
}), w, reloc_parent);
|
||||
}
|
||||
},
|
||||
},
|
||||
.vector_type => |vector_type| {
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
if (vector_type.child == .bool_type) {
|
||||
const bytes = try code.addManyAsSlice(gpa, abi_size);
|
||||
const bytes = try w.writableSlice(abi_size);
|
||||
@memset(bytes, 0xaa);
|
||||
var index: usize = 0;
|
||||
const len = math.cast(usize, vector_type.len) orelse return error.Overflow;
|
||||
|
|
@ -540,7 +528,7 @@ pub fn generateSymbol(
|
|||
}
|
||||
} else {
|
||||
switch (aggregate.storage) {
|
||||
.bytes => |bytes| try code.appendSlice(gpa, bytes.toSlice(vector_type.len, ip)),
|
||||
.bytes => |bytes| try w.writeAll(bytes.toSlice(vector_type.len, ip)),
|
||||
.elems, .repeated_elem => {
|
||||
var index: u64 = 0;
|
||||
while (index < vector_type.len) : (index += 1) {
|
||||
|
|
@ -550,7 +538,7 @@ pub fn generateSymbol(
|
|||
math.cast(usize, index) orelse return error.Overflow
|
||||
],
|
||||
.repeated_elem => |elem| elem,
|
||||
}), code, reloc_parent);
|
||||
}), w, reloc_parent);
|
||||
}
|
||||
},
|
||||
}
|
||||
|
|
@ -558,11 +546,11 @@ pub fn generateSymbol(
|
|||
const padding = abi_size -
|
||||
(math.cast(usize, Type.fromInterned(vector_type.child).abiSize(zcu) * vector_type.len) orelse
|
||||
return error.Overflow);
|
||||
if (padding > 0) try code.appendNTimes(gpa, 0, padding);
|
||||
if (padding > 0) try w.splatByteAll(0, padding);
|
||||
}
|
||||
},
|
||||
.tuple_type => |tuple| {
|
||||
const struct_begin = code.items.len;
|
||||
const struct_begin = w.end;
|
||||
for (
|
||||
tuple.types.get(ip),
|
||||
tuple.values.get(ip),
|
||||
|
|
@ -580,8 +568,8 @@ pub fn generateSymbol(
|
|||
.repeated_elem => |elem| elem,
|
||||
};
|
||||
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent);
|
||||
const unpadded_field_end = code.items.len - struct_begin;
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), w, reloc_parent);
|
||||
const unpadded_field_end = w.end - struct_begin;
|
||||
|
||||
// Pad struct members if required
|
||||
const padded_field_end = ty.structFieldOffset(index + 1, zcu);
|
||||
|
|
@ -589,7 +577,7 @@ pub fn generateSymbol(
|
|||
return error.Overflow;
|
||||
|
||||
if (padding > 0) {
|
||||
try code.appendNTimes(gpa, 0, padding);
|
||||
try w.splatByteAll(0, padding);
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
@ -598,8 +586,9 @@ pub fn generateSymbol(
|
|||
switch (struct_type.layout) {
|
||||
.@"packed" => {
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
const current_pos = code.items.len;
|
||||
try code.appendNTimes(gpa, 0, abi_size);
|
||||
const start = w.end;
|
||||
const buffer = try w.writableSlice(abi_size);
|
||||
@memset(buffer, 0);
|
||||
var bits: u16 = 0;
|
||||
|
||||
for (struct_type.field_types.get(ip), 0..) |field_ty, index| {
|
||||
|
|
@ -619,22 +608,20 @@ pub fn generateSymbol(
|
|||
error.DivisionByZero => unreachable,
|
||||
error.UnexpectedRemainder => return error.RelocationNotByteAligned,
|
||||
};
|
||||
code.items.len = current_pos + field_offset;
|
||||
// TODO: code.lockPointers();
|
||||
w.end = start + field_offset;
|
||||
defer {
|
||||
assert(code.items.len == current_pos + field_offset + @divExact(target.ptrBitWidth(), 8));
|
||||
// TODO: code.unlockPointers();
|
||||
code.items.len = current_pos + abi_size;
|
||||
assert(w.end == start + field_offset + @divExact(target.ptrBitWidth(), 8));
|
||||
w.end = start + abi_size;
|
||||
}
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent);
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), w, reloc_parent);
|
||||
} else {
|
||||
Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), pt, code.items[current_pos..], bits) catch unreachable;
|
||||
Value.fromInterned(field_val).writeToPackedMemory(.fromInterned(field_ty), pt, buffer, bits) catch unreachable;
|
||||
}
|
||||
bits += @intCast(Type.fromInterned(field_ty).bitSize(zcu));
|
||||
}
|
||||
},
|
||||
.auto, .@"extern" => {
|
||||
const struct_begin = code.items.len;
|
||||
const struct_begin = w.end;
|
||||
const field_types = struct_type.field_types.get(ip);
|
||||
const offsets = struct_type.offsets.get(ip);
|
||||
|
||||
|
|
@ -654,11 +641,11 @@ pub fn generateSymbol(
|
|||
|
||||
const padding = math.cast(
|
||||
usize,
|
||||
offsets[field_index] - (code.items.len - struct_begin),
|
||||
offsets[field_index] - (w.end - struct_begin),
|
||||
) orelse return error.Overflow;
|
||||
if (padding > 0) try code.appendNTimes(gpa, 0, padding);
|
||||
if (padding > 0) try w.splatByteAll(0, padding);
|
||||
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent);
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), w, reloc_parent);
|
||||
}
|
||||
|
||||
const size = struct_type.sizeUnordered(ip);
|
||||
|
|
@ -666,10 +653,9 @@ pub fn generateSymbol(
|
|||
|
||||
const padding = math.cast(
|
||||
usize,
|
||||
std.mem.alignForward(u64, size, @max(alignment, 1)) -
|
||||
(code.items.len - struct_begin),
|
||||
std.mem.alignForward(u64, size, @max(alignment, 1)) - (w.end - struct_begin),
|
||||
) orelse return error.Overflow;
|
||||
if (padding > 0) try code.appendNTimes(gpa, 0, padding);
|
||||
if (padding > 0) try w.splatByteAll(0, padding);
|
||||
},
|
||||
}
|
||||
},
|
||||
|
|
@ -679,12 +665,12 @@ pub fn generateSymbol(
|
|||
const layout = ty.unionGetLayout(zcu);
|
||||
|
||||
if (layout.payload_size == 0) {
|
||||
return generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent);
|
||||
return generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), w, reloc_parent);
|
||||
}
|
||||
|
||||
// Check if we should store the tag first.
|
||||
if (layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align)) {
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent);
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), w, reloc_parent);
|
||||
}
|
||||
|
||||
const union_obj = zcu.typeToUnion(ty).?;
|
||||
|
|
@ -692,24 +678,24 @@ pub fn generateSymbol(
|
|||
const field_index = ty.unionTagFieldIndex(Value.fromInterned(un.tag), zcu).?;
|
||||
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBits(zcu)) {
|
||||
try code.appendNTimes(gpa, 0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
|
||||
try w.splatByteAll(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
|
||||
} else {
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, reloc_parent);
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), w, reloc_parent);
|
||||
|
||||
const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
if (padding > 0) {
|
||||
try code.appendNTimes(gpa, 0, padding);
|
||||
try w.splatByteAll(0, padding);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, reloc_parent);
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), w, reloc_parent);
|
||||
}
|
||||
|
||||
if (layout.tag_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) {
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent);
|
||||
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), w, reloc_parent);
|
||||
|
||||
if (layout.padding > 0) {
|
||||
try code.appendNTimes(gpa, 0, layout.padding);
|
||||
try w.splatByteAll(0, layout.padding);
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
@ -722,30 +708,30 @@ fn lowerPtr(
|
|||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
ptr_val: InternPool.Index,
|
||||
code: *ArrayList(u8),
|
||||
w: *std.Io.Writer,
|
||||
reloc_parent: link.File.RelocInfo.Parent,
|
||||
prev_offset: u64,
|
||||
) GenerateSymbolError!void {
|
||||
) (GenerateSymbolError || std.Io.Writer.Error)!void {
|
||||
const zcu = pt.zcu;
|
||||
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
|
||||
const offset: u64 = prev_offset + ptr.byte_offset;
|
||||
return switch (ptr.base_addr) {
|
||||
.nav => |nav| try lowerNavRef(bin_file, pt, nav, code, reloc_parent, offset),
|
||||
.uav => |uav| try lowerUavRef(bin_file, pt, src_loc, uav, code, reloc_parent, offset),
|
||||
.int => try generateSymbol(bin_file, pt, src_loc, try pt.intValue(Type.usize, offset), code, reloc_parent),
|
||||
.nav => |nav| try lowerNavRef(bin_file, pt, nav, w, reloc_parent, offset),
|
||||
.uav => |uav| try lowerUavRef(bin_file, pt, src_loc, uav, w, reloc_parent, offset),
|
||||
.int => try generateSymbol(bin_file, pt, src_loc, try pt.intValue(Type.usize, offset), w, reloc_parent),
|
||||
.eu_payload => |eu_ptr| try lowerPtr(
|
||||
bin_file,
|
||||
pt,
|
||||
src_loc,
|
||||
eu_ptr,
|
||||
code,
|
||||
w,
|
||||
reloc_parent,
|
||||
offset + errUnionPayloadOffset(
|
||||
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu).errorUnionPayload(zcu),
|
||||
zcu,
|
||||
),
|
||||
),
|
||||
.opt_payload => |opt_ptr| try lowerPtr(bin_file, pt, src_loc, opt_ptr, code, reloc_parent, offset),
|
||||
.opt_payload => |opt_ptr| try lowerPtr(bin_file, pt, src_loc, opt_ptr, w, reloc_parent, offset),
|
||||
.field => |field| {
|
||||
const base_ptr = Value.fromInterned(field.base);
|
||||
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
|
||||
|
|
@ -764,7 +750,7 @@ fn lowerPtr(
|
|||
},
|
||||
else => unreachable,
|
||||
};
|
||||
return lowerPtr(bin_file, pt, src_loc, field.base, code, reloc_parent, offset + field_off);
|
||||
return lowerPtr(bin_file, pt, src_loc, field.base, w, reloc_parent, offset + field_off);
|
||||
},
|
||||
.arr_elem, .comptime_field, .comptime_alloc => unreachable,
|
||||
};
|
||||
|
|
@ -775,12 +761,11 @@ fn lowerUavRef(
|
|||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
uav: InternPool.Key.Ptr.BaseAddr.Uav,
|
||||
code: *ArrayList(u8),
|
||||
w: *std.Io.Writer,
|
||||
reloc_parent: link.File.RelocInfo.Parent,
|
||||
offset: u64,
|
||||
) GenerateSymbolError!void {
|
||||
) (GenerateSymbolError || std.Io.Writer.Error)!void {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
const comp = lf.comp;
|
||||
const target = &comp.root_mod.resolved_target.result;
|
||||
|
|
@ -790,10 +775,9 @@ fn lowerUavRef(
|
|||
const is_fn_body = uav_ty.zigTypeTag(zcu) == .@"fn";
|
||||
|
||||
log.debug("lowerUavRef: ty = {f}", .{uav_ty.fmt(pt)});
|
||||
try code.ensureUnusedCapacity(gpa, ptr_width_bytes);
|
||||
|
||||
if (!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) {
|
||||
code.appendNTimesAssumeCapacity(0xaa, ptr_width_bytes);
|
||||
try w.splatByteAll(0xaa, ptr_width_bytes);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -804,29 +788,32 @@ fn lowerUavRef(
|
|||
dev.check(link.File.Tag.wasm.devFeature());
|
||||
const wasm = lf.cast(.wasm).?;
|
||||
assert(reloc_parent == .none);
|
||||
try wasm.addUavReloc(code.items.len, uav.val, uav.orig_ty, @intCast(offset));
|
||||
code.appendNTimesAssumeCapacity(0, ptr_width_bytes);
|
||||
try wasm.addUavReloc(w.end, uav.val, uav.orig_ty, @intCast(offset));
|
||||
try w.splatByteAll(0, ptr_width_bytes);
|
||||
return;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
const uav_align = ip.indexToKey(uav.orig_ty).ptr_type.flags.alignment;
|
||||
const uav_align = Type.fromInterned(uav.orig_ty).ptrAlignment(zcu);
|
||||
switch (try lf.lowerUav(pt, uav_val, uav_align, src_loc)) {
|
||||
.sym_index => {},
|
||||
.fail => |em| std.debug.panic("TODO rework lowerUav. internal error: {s}", .{em.msg}),
|
||||
}
|
||||
|
||||
const vaddr = try lf.getUavVAddr(uav_val, .{
|
||||
const vaddr = lf.getUavVAddr(uav_val, .{
|
||||
.parent = reloc_parent,
|
||||
.offset = code.items.len,
|
||||
.offset = w.end,
|
||||
.addend = @intCast(offset),
|
||||
});
|
||||
}) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => |e| std.debug.panic("TODO rework lowerUav. internal error: {t}", .{e}),
|
||||
};
|
||||
const endian = target.cpu.arch.endian();
|
||||
switch (ptr_width_bytes) {
|
||||
2 => mem.writeInt(u16, code.addManyAsArrayAssumeCapacity(2), @intCast(vaddr), endian),
|
||||
4 => mem.writeInt(u32, code.addManyAsArrayAssumeCapacity(4), @intCast(vaddr), endian),
|
||||
8 => mem.writeInt(u64, code.addManyAsArrayAssumeCapacity(8), vaddr, endian),
|
||||
2 => try w.writeInt(u16, @intCast(vaddr), endian),
|
||||
4 => try w.writeInt(u32, @intCast(vaddr), endian),
|
||||
8 => try w.writeInt(u64, vaddr, endian),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
|
@ -835,10 +822,10 @@ fn lowerNavRef(
|
|||
lf: *link.File,
|
||||
pt: Zcu.PerThread,
|
||||
nav_index: InternPool.Nav.Index,
|
||||
code: *ArrayList(u8),
|
||||
w: *std.Io.Writer,
|
||||
reloc_parent: link.File.RelocInfo.Parent,
|
||||
offset: u64,
|
||||
) GenerateSymbolError!void {
|
||||
) (GenerateSymbolError || std.Io.Writer.Error)!void {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
|
@ -848,10 +835,8 @@ fn lowerNavRef(
|
|||
const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip));
|
||||
const is_fn_body = nav_ty.zigTypeTag(zcu) == .@"fn";
|
||||
|
||||
try code.ensureUnusedCapacity(gpa, ptr_width_bytes);
|
||||
|
||||
if (!is_fn_body and !nav_ty.hasRuntimeBits(zcu)) {
|
||||
code.appendNTimesAssumeCapacity(0xaa, ptr_width_bytes);
|
||||
try w.splatByteAll(0xaa, ptr_width_bytes);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -870,13 +855,13 @@ fn lowerNavRef(
|
|||
} else {
|
||||
try wasm.func_table_fixups.append(gpa, .{
|
||||
.table_index = @enumFromInt(gop.index),
|
||||
.offset = @intCast(code.items.len),
|
||||
.offset = @intCast(w.end),
|
||||
});
|
||||
}
|
||||
} else {
|
||||
if (is_obj) {
|
||||
try wasm.out_relocs.append(gpa, .{
|
||||
.offset = @intCast(code.items.len),
|
||||
.offset = @intCast(w.end),
|
||||
.pointee = .{ .symbol_index = try wasm.navSymbolIndex(nav_index) },
|
||||
.tag = if (ptr_width_bytes == 4) .memory_addr_i32 else .memory_addr_i64,
|
||||
.addend = @intCast(offset),
|
||||
|
|
@ -885,12 +870,12 @@ fn lowerNavRef(
|
|||
try wasm.nav_fixups.ensureUnusedCapacity(gpa, 1);
|
||||
wasm.nav_fixups.appendAssumeCapacity(.{
|
||||
.navs_exe_index = try wasm.refNavExe(nav_index),
|
||||
.offset = @intCast(code.items.len),
|
||||
.offset = @intCast(w.end),
|
||||
.addend = @intCast(offset),
|
||||
});
|
||||
}
|
||||
}
|
||||
code.appendNTimesAssumeCapacity(0, ptr_width_bytes);
|
||||
try w.splatByteAll(0, ptr_width_bytes);
|
||||
return;
|
||||
},
|
||||
else => {},
|
||||
|
|
@ -898,14 +883,14 @@ fn lowerNavRef(
|
|||
|
||||
const vaddr = lf.getNavVAddr(pt, nav_index, .{
|
||||
.parent = reloc_parent,
|
||||
.offset = code.items.len,
|
||||
.offset = w.end,
|
||||
.addend = @intCast(offset),
|
||||
}) catch @panic("TODO rework getNavVAddr");
|
||||
const endian = target.cpu.arch.endian();
|
||||
switch (ptr_width_bytes) {
|
||||
2 => mem.writeInt(u16, code.addManyAsArrayAssumeCapacity(2), @intCast(vaddr), endian),
|
||||
4 => mem.writeInt(u32, code.addManyAsArrayAssumeCapacity(4), @intCast(vaddr), endian),
|
||||
8 => mem.writeInt(u64, code.addManyAsArrayAssumeCapacity(8), vaddr, endian),
|
||||
2 => try w.writeInt(u16, @intCast(vaddr), endian),
|
||||
4 => try w.writeInt(u32, @intCast(vaddr), endian),
|
||||
8 => try w.writeInt(u64, vaddr, endian),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
|
@ -962,6 +947,16 @@ pub fn genNavRef(
|
|||
},
|
||||
.link_once => unreachable,
|
||||
}
|
||||
} else if (lf.cast(.elf2)) |elf| {
|
||||
return .{ .sym_index = @intFromEnum(elf.navSymbol(zcu, nav_index) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => |e| return .{ .fail = try ErrorMsg.create(
|
||||
zcu.gpa,
|
||||
src_loc,
|
||||
"linker failed to create a nav: {t}",
|
||||
.{e},
|
||||
) },
|
||||
}) };
|
||||
} else if (lf.cast(.macho)) |macho_file| {
|
||||
const zo = macho_file.getZigObject().?;
|
||||
switch (linkage) {
|
||||
|
|
|
|||
|
|
@ -42,8 +42,11 @@ fn zonCast(comptime Result: type, zon_value: anytype, symbols: anytype) Result {
|
|||
.@"struct" => |zon_struct| switch (@typeInfo(Result)) {
|
||||
.pointer => |result_pointer| {
|
||||
comptime assert(result_pointer.size == .slice and result_pointer.is_const);
|
||||
var elems: [zon_value.len]result_pointer.child = undefined;
|
||||
inline for (&elems, zon_value) |*elem, zon_elem| elem.* = zonCast(result_pointer.child, zon_elem, symbols);
|
||||
const elems = comptime blk: {
|
||||
var temp_elems: [zon_value.len]result_pointer.child = undefined;
|
||||
for (&temp_elems, zon_value) |*elem, zon_elem| elem.* = zonCast(result_pointer.child, zon_elem, symbols);
|
||||
break :blk temp_elems;
|
||||
};
|
||||
return &elems;
|
||||
},
|
||||
.@"struct" => |result_struct| {
|
||||
|
|
|
|||
|
|
@ -56,13 +56,13 @@ pub fn emit(
|
|||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
func_index: InternPool.Index,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
atom_index: u32,
|
||||
w: *std.Io.Writer,
|
||||
debug_output: link.File.DebugInfoOutput,
|
||||
) !void {
|
||||
_ = debug_output;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const gpa = zcu.gpa;
|
||||
const func = zcu.funcInfo(func_index);
|
||||
const nav = ip.getNav(func.owner_nav);
|
||||
const mod = zcu.navFileScope(func.owner_nav).mod.?;
|
||||
|
|
@ -81,20 +81,19 @@ pub fn emit(
|
|||
@as(u5, @intCast(func_align.minStrict(.@"16").toByteUnits().?)),
|
||||
Instruction.size,
|
||||
) - 1);
|
||||
try code.ensureUnusedCapacity(gpa, Instruction.size *
|
||||
(code_len + literals_align_gap + mir.literals.len));
|
||||
emitInstructionsForward(code, mir.prologue);
|
||||
emitInstructionsBackward(code, mir.body);
|
||||
const body_end: u32 = @intCast(code.items.len);
|
||||
emitInstructionsBackward(code, mir.epilogue);
|
||||
code.appendNTimesAssumeCapacity(0, Instruction.size * literals_align_gap);
|
||||
code.appendSliceAssumeCapacity(@ptrCast(mir.literals));
|
||||
try w.rebase(w.end, Instruction.size * (code_len + literals_align_gap + mir.literals.len));
|
||||
emitInstructionsForward(w, mir.prologue) catch unreachable;
|
||||
emitInstructionsBackward(w, mir.body) catch unreachable;
|
||||
const body_end: u32 = @intCast(w.end);
|
||||
emitInstructionsBackward(w, mir.epilogue) catch unreachable;
|
||||
w.splatByteAll(0, Instruction.size * literals_align_gap) catch unreachable;
|
||||
w.writeAll(@ptrCast(mir.literals)) catch unreachable;
|
||||
mir_log.debug("", .{});
|
||||
|
||||
for (mir.nav_relocs) |nav_reloc| try emitReloc(
|
||||
lf,
|
||||
zcu,
|
||||
func.owner_nav,
|
||||
atom_index,
|
||||
switch (try @import("../../codegen.zig").genNavRef(
|
||||
lf,
|
||||
pt,
|
||||
|
|
@ -112,7 +111,7 @@ pub fn emit(
|
|||
for (mir.uav_relocs) |uav_reloc| try emitReloc(
|
||||
lf,
|
||||
zcu,
|
||||
func.owner_nav,
|
||||
atom_index,
|
||||
switch (try lf.lowerUav(
|
||||
pt,
|
||||
uav_reloc.uav.val,
|
||||
|
|
@ -129,7 +128,7 @@ pub fn emit(
|
|||
for (mir.lazy_relocs) |lazy_reloc| try emitReloc(
|
||||
lf,
|
||||
zcu,
|
||||
func.owner_nav,
|
||||
atom_index,
|
||||
if (lf.cast(.elf)) |ef|
|
||||
ef.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(ef, pt, lazy_reloc.symbol) catch |err|
|
||||
return zcu.codegenFail(func.owner_nav, "{s} creating lazy symbol", .{@errorName(err)})
|
||||
|
|
@ -150,7 +149,7 @@ pub fn emit(
|
|||
for (mir.global_relocs) |global_reloc| try emitReloc(
|
||||
lf,
|
||||
zcu,
|
||||
func.owner_nav,
|
||||
atom_index,
|
||||
if (lf.cast(.elf)) |ef|
|
||||
try ef.getGlobalSymbol(std.mem.span(global_reloc.name), null)
|
||||
else if (lf.cast(.macho)) |mf|
|
||||
|
|
@ -168,30 +167,30 @@ pub fn emit(
|
|||
var instruction = mir.body[literal_reloc.label];
|
||||
instruction.load_store.register_literal.group.imm19 += literal_reloc_offset;
|
||||
instruction.write(
|
||||
code.items[body_end - Instruction.size * (1 + literal_reloc.label) ..][0..Instruction.size],
|
||||
w.buffered()[body_end - Instruction.size * (1 + literal_reloc.label) ..][0..Instruction.size],
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn emitInstructionsForward(code: *std.ArrayListUnmanaged(u8), instructions: []const Instruction) void {
|
||||
for (instructions) |instruction| emitInstruction(code, instruction);
|
||||
fn emitInstructionsForward(w: *std.Io.Writer, instructions: []const Instruction) !void {
|
||||
for (instructions) |instruction| try emitInstruction(w, instruction);
|
||||
}
|
||||
fn emitInstructionsBackward(code: *std.ArrayListUnmanaged(u8), instructions: []const Instruction) void {
|
||||
fn emitInstructionsBackward(w: *std.Io.Writer, instructions: []const Instruction) !void {
|
||||
var instruction_index = instructions.len;
|
||||
while (instruction_index > 0) {
|
||||
instruction_index -= 1;
|
||||
emitInstruction(code, instructions[instruction_index]);
|
||||
try emitInstruction(w, instructions[instruction_index]);
|
||||
}
|
||||
}
|
||||
fn emitInstruction(code: *std.ArrayListUnmanaged(u8), instruction: Instruction) void {
|
||||
fn emitInstruction(w: *std.Io.Writer, instruction: Instruction) !void {
|
||||
mir_log.debug(" {f}", .{instruction});
|
||||
instruction.write(code.addManyAsArrayAssumeCapacity(Instruction.size));
|
||||
instruction.write(try w.writableArray(Instruction.size));
|
||||
}
|
||||
|
||||
fn emitReloc(
|
||||
lf: *link.File,
|
||||
zcu: *Zcu,
|
||||
owner_nav: InternPool.Nav.Index,
|
||||
atom_index: u32,
|
||||
sym_index: u32,
|
||||
instruction: Instruction,
|
||||
offset: u32,
|
||||
|
|
@ -202,7 +201,7 @@ fn emitReloc(
|
|||
else => unreachable,
|
||||
.data_processing_immediate => |decoded| if (lf.cast(.elf)) |ef| {
|
||||
const zo = ef.zigObjectPtr().?;
|
||||
const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
|
||||
const atom = zo.symbol(atom_index).atom(ef).?;
|
||||
const r_type: std.elf.R_AARCH64 = switch (decoded.decode()) {
|
||||
else => unreachable,
|
||||
.pc_relative_addressing => |pc_relative_addressing| switch (pc_relative_addressing.group.op) {
|
||||
|
|
@ -221,7 +220,7 @@ fn emitReloc(
|
|||
}, zo);
|
||||
} else if (lf.cast(.macho)) |mf| {
|
||||
const zo = mf.getZigObject().?;
|
||||
const atom = zo.symbols.items[try zo.getOrCreateMetadataForNav(mf, owner_nav)].getAtom(mf).?;
|
||||
const atom = zo.symbols.items[atom_index].getAtom(mf).?;
|
||||
switch (decoded.decode()) {
|
||||
else => unreachable,
|
||||
.pc_relative_addressing => |pc_relative_addressing| switch (pc_relative_addressing.group.op) {
|
||||
|
|
@ -260,7 +259,7 @@ fn emitReloc(
|
|||
},
|
||||
.branch_exception_generating_system => |decoded| if (lf.cast(.elf)) |ef| {
|
||||
const zo = ef.zigObjectPtr().?;
|
||||
const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
|
||||
const atom = zo.symbol(atom_index).atom(ef).?;
|
||||
const r_type: std.elf.R_AARCH64 = switch (decoded.decode().unconditional_branch_immediate.group.op) {
|
||||
.b => .JUMP26,
|
||||
.bl => .CALL26,
|
||||
|
|
@ -272,7 +271,7 @@ fn emitReloc(
|
|||
}, zo);
|
||||
} else if (lf.cast(.macho)) |mf| {
|
||||
const zo = mf.getZigObject().?;
|
||||
const atom = zo.symbols.items[try zo.getOrCreateMetadataForNav(mf, owner_nav)].getAtom(mf).?;
|
||||
const atom = zo.symbols.items[atom_index].getAtom(mf).?;
|
||||
try atom.addReloc(mf, .{
|
||||
.tag = .@"extern",
|
||||
.offset = offset,
|
||||
|
|
@ -289,7 +288,7 @@ fn emitReloc(
|
|||
},
|
||||
.load_store => |decoded| if (lf.cast(.elf)) |ef| {
|
||||
const zo = ef.zigObjectPtr().?;
|
||||
const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
|
||||
const atom = zo.symbol(atom_index).atom(ef).?;
|
||||
const r_type: std.elf.R_AARCH64 = switch (decoded.decode().register_unsigned_immediate.decode()) {
|
||||
.integer => |integer| switch (integer.decode()) {
|
||||
.unallocated, .prfm => unreachable,
|
||||
|
|
@ -316,7 +315,7 @@ fn emitReloc(
|
|||
}, zo);
|
||||
} else if (lf.cast(.macho)) |mf| {
|
||||
const zo = mf.getZigObject().?;
|
||||
const atom = zo.symbols.items[try zo.getOrCreateMetadataForNav(mf, owner_nav)].getAtom(mf).?;
|
||||
const atom = zo.symbols.items[atom_index].getAtom(mf).?;
|
||||
try atom.addReloc(mf, .{
|
||||
.tag = .@"extern",
|
||||
.offset = offset,
|
||||
|
|
|
|||
|
|
@ -5821,29 +5821,21 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
|
|||
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
|
||||
},
|
||||
.unwrap_errunion_err_ptr => {
|
||||
if (isel.live_values.fetchRemove(air.inst_index)) |error_ptr_vi| unused: {
|
||||
defer error_ptr_vi.value.deref(isel);
|
||||
if (isel.live_values.fetchRemove(air.inst_index)) |error_vi| {
|
||||
defer error_vi.value.deref(isel);
|
||||
const ty_op = air.data(air.inst_index).ty_op;
|
||||
switch (codegen.errUnionErrorOffset(
|
||||
isel.air.typeOf(ty_op.operand, ip).childType(zcu).errorUnionPayload(zcu),
|
||||
zcu,
|
||||
)) {
|
||||
0 => try error_ptr_vi.value.move(isel, ty_op.operand),
|
||||
else => |error_offset| {
|
||||
const error_ptr_ra = try error_ptr_vi.value.defReg(isel) orelse break :unused;
|
||||
const error_union_ptr_vi = try isel.use(ty_op.operand);
|
||||
const error_union_ptr_mat = try error_union_ptr_vi.matReg(isel);
|
||||
const lo12: u12 = @truncate(error_offset >> 0);
|
||||
const hi12: u12 = @intCast(error_offset >> 12);
|
||||
if (hi12 > 0) try isel.emit(.add(
|
||||
error_ptr_ra.x(),
|
||||
if (lo12 > 0) error_ptr_ra.x() else error_union_ptr_mat.ra.x(),
|
||||
.{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
|
||||
));
|
||||
if (lo12 > 0) try isel.emit(.add(error_ptr_ra.x(), error_union_ptr_mat.ra.x(), .{ .immediate = lo12 }));
|
||||
try error_union_ptr_mat.finish(isel);
|
||||
},
|
||||
}
|
||||
const error_union_ptr_ty = isel.air.typeOf(ty_op.operand, ip);
|
||||
const error_union_ptr_info = error_union_ptr_ty.ptrInfo(zcu);
|
||||
const error_union_ptr_vi = try isel.use(ty_op.operand);
|
||||
const error_union_ptr_mat = try error_union_ptr_vi.matReg(isel);
|
||||
_ = try error_vi.value.load(isel, ty_op.ty.toType(), error_union_ptr_mat.ra, .{
|
||||
.offset = codegen.errUnionErrorOffset(
|
||||
ZigType.fromInterned(error_union_ptr_info.child).errorUnionPayload(zcu),
|
||||
zcu,
|
||||
),
|
||||
.@"volatile" = error_union_ptr_info.flags.is_volatile,
|
||||
});
|
||||
try error_union_ptr_mat.finish(isel);
|
||||
}
|
||||
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
|
||||
},
|
||||
|
|
@ -6147,6 +6139,26 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
|
|||
}
|
||||
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
|
||||
},
|
||||
.ptr_slice_len_ptr => {
|
||||
if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
|
||||
defer dst_vi.value.deref(isel);
|
||||
const ty_op = air.data(air.inst_index).ty_op;
|
||||
const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
|
||||
const src_vi = try isel.use(ty_op.operand);
|
||||
const src_mat = try src_vi.matReg(isel);
|
||||
try isel.emit(.add(dst_ra.x(), src_mat.ra.x(), .{ .immediate = 8 }));
|
||||
try src_mat.finish(isel);
|
||||
}
|
||||
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
|
||||
},
|
||||
.ptr_slice_ptr_ptr => {
|
||||
if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| {
|
||||
defer dst_vi.value.deref(isel);
|
||||
const ty_op = air.data(air.inst_index).ty_op;
|
||||
try dst_vi.value.move(isel, ty_op.operand);
|
||||
}
|
||||
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
|
||||
},
|
||||
.array_elem_val => {
|
||||
if (isel.live_values.fetchRemove(air.inst_index)) |elem_vi| unused: {
|
||||
defer elem_vi.value.deref(isel);
|
||||
|
|
@ -8011,6 +8023,7 @@ pub fn layout(
|
|||
while (save_index < saves.len) {
|
||||
if (save_index + 2 <= saves.len and saves[save_index + 1].needs_restore and
|
||||
saves[save_index + 0].class == saves[save_index + 1].class and
|
||||
saves[save_index + 0].size == saves[save_index + 1].size and
|
||||
saves[save_index + 0].offset + saves[save_index + 0].size == saves[save_index + 1].offset)
|
||||
{
|
||||
try isel.emit(.ldp(
|
||||
|
|
@ -8317,7 +8330,7 @@ fn elemPtr(
|
|||
}),
|
||||
2 => {
|
||||
const shift: u6 = @intCast(@ctz(elem_size));
|
||||
const temp_ra = temp_ra: switch (op) {
|
||||
const temp_ra, const free_temp_ra = temp_ra: switch (op) {
|
||||
.add => switch (base_ra) {
|
||||
else => {
|
||||
const temp_ra = try isel.allocIntReg();
|
||||
|
|
@ -8326,7 +8339,7 @@ fn elemPtr(
|
|||
.register = temp_ra.x(),
|
||||
.shift = .{ .lsl = shift },
|
||||
} }));
|
||||
break :temp_ra temp_ra;
|
||||
break :temp_ra .{ temp_ra, true };
|
||||
},
|
||||
.zr => {
|
||||
if (shift > 0) try isel.emit(.ubfm(elem_ptr_ra.x(), elem_ptr_ra.x(), .{
|
||||
|
|
@ -8334,7 +8347,7 @@ fn elemPtr(
|
|||
.immr = -%shift,
|
||||
.imms = ~shift,
|
||||
}));
|
||||
break :temp_ra elem_ptr_ra;
|
||||
break :temp_ra .{ elem_ptr_ra, false };
|
||||
},
|
||||
},
|
||||
.sub => {
|
||||
|
|
@ -8344,10 +8357,10 @@ fn elemPtr(
|
|||
.register = temp_ra.x(),
|
||||
.shift = .{ .lsl = shift },
|
||||
} }));
|
||||
break :temp_ra temp_ra;
|
||||
break :temp_ra .{ temp_ra, true };
|
||||
},
|
||||
};
|
||||
defer if (temp_ra != elem_ptr_ra) isel.freeReg(temp_ra);
|
||||
defer if (free_temp_ra) isel.freeReg(temp_ra);
|
||||
try isel.emit(.add(temp_ra.x(), index_mat.ra.x(), .{ .shifted_register = .{
|
||||
.register = index_mat.ra.x(),
|
||||
.shift = .{ .lsl = @intCast(63 - @clz(elem_size) - shift) },
|
||||
|
|
@ -9276,7 +9289,14 @@ pub const Value = struct {
|
|||
part_offset -= part_size;
|
||||
var wrapped_res_part_it = res_vi.field(ty, part_offset, part_size);
|
||||
const wrapped_res_part_vi = try wrapped_res_part_it.only(isel);
|
||||
const wrapped_res_part_ra = try wrapped_res_part_vi.?.defReg(isel) orelse if (need_carry) .zr else continue;
|
||||
const wrapped_res_part_ra = wrapped_res_part_ra: {
|
||||
const overflow_ra_lock: RegLock = switch (opts.overflow) {
|
||||
.ra => |ra| isel.lockReg(ra),
|
||||
else => .empty,
|
||||
};
|
||||
defer overflow_ra_lock.unlock(isel);
|
||||
break :wrapped_res_part_ra try wrapped_res_part_vi.?.defReg(isel) orelse if (need_carry) .zr else continue;
|
||||
};
|
||||
const unwrapped_res_part_ra = unwrapped_res_part_ra: {
|
||||
if (!need_wrap) break :unwrapped_res_part_ra wrapped_res_part_ra;
|
||||
if (int_info.bits % 32 == 0) {
|
||||
|
|
|
|||
|
|
@ -4980,8 +4980,8 @@ pub const FuncGen = struct {
|
|||
.breakpoint => try self.airBreakpoint(inst),
|
||||
.ret_addr => try self.airRetAddr(inst),
|
||||
.frame_addr => try self.airFrameAddress(inst),
|
||||
.@"try" => try self.airTry(body[i..], false),
|
||||
.try_cold => try self.airTry(body[i..], true),
|
||||
.@"try" => try self.airTry(inst, false),
|
||||
.try_cold => try self.airTry(inst, true),
|
||||
.try_ptr => try self.airTryPtr(inst, false),
|
||||
.try_ptr_cold => try self.airTryPtr(inst, true),
|
||||
.intcast => try self.airIntCast(inst, false),
|
||||
|
|
@ -4989,7 +4989,7 @@ pub const FuncGen = struct {
|
|||
.trunc => try self.airTrunc(inst),
|
||||
.fptrunc => try self.airFptrunc(inst),
|
||||
.fpext => try self.airFpext(inst),
|
||||
.load => try self.airLoad(body[i..]),
|
||||
.load => try self.airLoad(inst),
|
||||
.not => try self.airNot(inst),
|
||||
.store => try self.airStore(inst, false),
|
||||
.store_safe => try self.airStore(inst, true),
|
||||
|
|
@ -5045,7 +5045,7 @@ pub const FuncGen = struct {
|
|||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst),
|
||||
|
||||
.struct_field_ptr => try self.airStructFieldPtr(inst),
|
||||
.struct_field_val => try self.airStructFieldVal(body[i..]),
|
||||
.struct_field_val => try self.airStructFieldVal(inst),
|
||||
|
||||
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
|
||||
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
|
||||
|
|
@ -5054,18 +5054,18 @@ pub const FuncGen = struct {
|
|||
|
||||
.field_parent_ptr => try self.airFieldParentPtr(inst),
|
||||
|
||||
.array_elem_val => try self.airArrayElemVal(body[i..]),
|
||||
.slice_elem_val => try self.airSliceElemVal(body[i..]),
|
||||
.array_elem_val => try self.airArrayElemVal(inst),
|
||||
.slice_elem_val => try self.airSliceElemVal(inst),
|
||||
.slice_elem_ptr => try self.airSliceElemPtr(inst),
|
||||
.ptr_elem_val => try self.airPtrElemVal(body[i..]),
|
||||
.ptr_elem_val => try self.airPtrElemVal(inst),
|
||||
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
|
||||
|
||||
.optional_payload => try self.airOptionalPayload(body[i..]),
|
||||
.optional_payload => try self.airOptionalPayload(inst),
|
||||
.optional_payload_ptr => try self.airOptionalPayloadPtr(inst),
|
||||
.optional_payload_ptr_set => try self.airOptionalPayloadPtrSet(inst),
|
||||
|
||||
.unwrap_errunion_payload => try self.airErrUnionPayload(body[i..], false),
|
||||
.unwrap_errunion_payload_ptr => try self.airErrUnionPayload(body[i..], true),
|
||||
.unwrap_errunion_payload => try self.airErrUnionPayload(inst, false),
|
||||
.unwrap_errunion_payload_ptr => try self.airErrUnionPayload(inst, true),
|
||||
.unwrap_errunion_err => try self.airErrUnionErr(inst, false),
|
||||
.unwrap_errunion_err_ptr => try self.airErrUnionErr(inst, true),
|
||||
.errunion_payload_ptr_set => try self.airErrUnionPayloadPtrSet(inst),
|
||||
|
|
@ -6266,19 +6266,14 @@ pub const FuncGen = struct {
|
|||
// No need to reset the insert cursor since this instruction is noreturn.
|
||||
}
|
||||
|
||||
fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index, err_cold: bool) !Builder.Value {
|
||||
const pt = self.ng.pt;
|
||||
const zcu = pt.zcu;
|
||||
const inst = body_tail[0];
|
||||
fn airTry(self: *FuncGen, inst: Air.Inst.Index, err_cold: bool) !Builder.Value {
|
||||
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
|
||||
const err_union = try self.resolveInst(pl_op.operand);
|
||||
const extra = self.air.extraData(Air.Try, pl_op.payload);
|
||||
const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
|
||||
const err_union_ty = self.typeOf(pl_op.operand);
|
||||
const payload_ty = self.typeOfIndex(inst);
|
||||
const can_elide_load = if (isByRef(payload_ty, zcu)) self.canElideLoad(body_tail) else false;
|
||||
const is_unused = self.liveness.isUnused(inst);
|
||||
return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused, err_cold);
|
||||
return lowerTry(self, err_union, body, err_union_ty, false, false, is_unused, err_cold);
|
||||
}
|
||||
|
||||
fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index, err_cold: bool) !Builder.Value {
|
||||
|
|
@ -6824,11 +6819,10 @@ pub const FuncGen = struct {
|
|||
return self.wip.gepStruct(slice_llvm_ty, slice_ptr, index, "");
|
||||
}
|
||||
|
||||
fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
|
||||
fn airSliceElemVal(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
|
||||
const o = self.ng.object;
|
||||
const pt = self.ng.pt;
|
||||
const zcu = pt.zcu;
|
||||
const inst = body_tail[0];
|
||||
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
||||
const slice_ty = self.typeOf(bin_op.lhs);
|
||||
const slice = try self.resolveInst(bin_op.lhs);
|
||||
|
|
@ -6838,9 +6832,6 @@ pub const FuncGen = struct {
|
|||
const base_ptr = try self.wip.extractValue(slice, &.{0}, "");
|
||||
const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, "");
|
||||
if (isByRef(elem_ty, zcu)) {
|
||||
if (self.canElideLoad(body_tail))
|
||||
return ptr;
|
||||
|
||||
self.maybeMarkAllowZeroAccess(slice_ty.ptrInfo(zcu));
|
||||
|
||||
const slice_align = (slice_ty.ptrAlignment(zcu).min(elem_ty.abiAlignment(zcu))).toLlvm();
|
||||
|
|
@ -6867,11 +6858,10 @@ pub const FuncGen = struct {
|
|||
return self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, "");
|
||||
}
|
||||
|
||||
fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
|
||||
fn airArrayElemVal(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
|
||||
const o = self.ng.object;
|
||||
const pt = self.ng.pt;
|
||||
const zcu = pt.zcu;
|
||||
const inst = body_tail[0];
|
||||
|
||||
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
||||
const array_ty = self.typeOf(bin_op.lhs);
|
||||
|
|
@ -6884,9 +6874,7 @@ pub const FuncGen = struct {
|
|||
try o.builder.intValue(try o.lowerType(pt, Type.usize), 0), rhs,
|
||||
};
|
||||
if (isByRef(elem_ty, zcu)) {
|
||||
const elem_ptr =
|
||||
try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, "");
|
||||
if (canElideLoad(self, body_tail)) return elem_ptr;
|
||||
const elem_ptr = try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, "");
|
||||
const elem_alignment = elem_ty.abiAlignment(zcu).toLlvm();
|
||||
return self.loadByRef(elem_ptr, elem_ty, elem_alignment, .normal);
|
||||
} else {
|
||||
|
|
@ -6900,11 +6888,10 @@ pub const FuncGen = struct {
|
|||
return self.wip.extractElement(array_llvm_val, rhs, "");
|
||||
}
|
||||
|
||||
fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
|
||||
fn airPtrElemVal(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
|
||||
const o = self.ng.object;
|
||||
const pt = self.ng.pt;
|
||||
const zcu = pt.zcu;
|
||||
const inst = body_tail[0];
|
||||
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
|
||||
const ptr_ty = self.typeOf(bin_op.lhs);
|
||||
const elem_ty = ptr_ty.childType(zcu);
|
||||
|
|
@ -6918,10 +6905,7 @@ pub const FuncGen = struct {
|
|||
else
|
||||
&.{rhs}, "");
|
||||
if (isByRef(elem_ty, zcu)) {
|
||||
if (self.canElideLoad(body_tail)) return ptr;
|
||||
|
||||
self.maybeMarkAllowZeroAccess(ptr_ty.ptrInfo(zcu));
|
||||
|
||||
const ptr_align = (ptr_ty.ptrAlignment(zcu).min(elem_ty.abiAlignment(zcu))).toLlvm();
|
||||
return self.loadByRef(ptr, elem_ty, ptr_align, if (ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal);
|
||||
}
|
||||
|
|
@ -6974,11 +6958,10 @@ pub const FuncGen = struct {
|
|||
return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, field_index);
|
||||
}
|
||||
|
||||
fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
|
||||
fn airStructFieldVal(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
|
||||
const o = self.ng.object;
|
||||
const pt = self.ng.pt;
|
||||
const zcu = pt.zcu;
|
||||
const inst = body_tail[0];
|
||||
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
|
||||
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
|
||||
const struct_ty = self.typeOf(struct_field.struct_operand);
|
||||
|
|
@ -7052,9 +7035,6 @@ pub const FuncGen = struct {
|
|||
.flags = .{ .alignment = alignment },
|
||||
});
|
||||
if (isByRef(field_ty, zcu)) {
|
||||
if (canElideLoad(self, body_tail))
|
||||
return field_ptr;
|
||||
|
||||
assert(alignment != .none);
|
||||
const field_alignment = alignment.toLlvm();
|
||||
return self.loadByRef(field_ptr, field_ty, field_alignment, .normal);
|
||||
|
|
@ -7070,7 +7050,6 @@ pub const FuncGen = struct {
|
|||
try self.wip.gepStruct(union_llvm_ty, struct_llvm_val, payload_index, "");
|
||||
const payload_alignment = layout.payload_align.toLlvm();
|
||||
if (isByRef(field_ty, zcu)) {
|
||||
if (canElideLoad(self, body_tail)) return field_ptr;
|
||||
return self.loadByRef(field_ptr, field_ty, payload_alignment, .normal);
|
||||
} else {
|
||||
return self.loadTruncate(.normal, field_ty, field_ptr, payload_alignment);
|
||||
|
|
@ -7829,11 +7808,10 @@ pub const FuncGen = struct {
|
|||
return self.wip.gepStruct(optional_llvm_ty, operand, 0, "");
|
||||
}
|
||||
|
||||
fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
|
||||
fn airOptionalPayload(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
|
||||
const o = self.ng.object;
|
||||
const pt = self.ng.pt;
|
||||
const zcu = pt.zcu;
|
||||
const inst = body_tail[0];
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
const operand = try self.resolveInst(ty_op.operand);
|
||||
const optional_ty = self.typeOf(ty_op.operand);
|
||||
|
|
@ -7846,19 +7824,13 @@ pub const FuncGen = struct {
|
|||
}
|
||||
|
||||
const opt_llvm_ty = try o.lowerType(pt, optional_ty);
|
||||
const can_elide_load = if (isByRef(payload_ty, zcu)) self.canElideLoad(body_tail) else false;
|
||||
return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load);
|
||||
return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, false);
|
||||
}
|
||||
|
||||
fn airErrUnionPayload(
|
||||
self: *FuncGen,
|
||||
body_tail: []const Air.Inst.Index,
|
||||
operand_is_ptr: bool,
|
||||
) !Builder.Value {
|
||||
fn airErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool) !Builder.Value {
|
||||
const o = self.ng.object;
|
||||
const pt = self.ng.pt;
|
||||
const zcu = pt.zcu;
|
||||
const inst = body_tail[0];
|
||||
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
const operand = try self.resolveInst(ty_op.operand);
|
||||
const operand_ty = self.typeOf(ty_op.operand);
|
||||
|
|
@ -7877,7 +7849,6 @@ pub const FuncGen = struct {
|
|||
const payload_alignment = payload_ty.abiAlignment(zcu).toLlvm();
|
||||
const payload_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
|
||||
if (isByRef(payload_ty, zcu)) {
|
||||
if (self.canElideLoad(body_tail)) return payload_ptr;
|
||||
return self.loadByRef(payload_ptr, payload_ty, payload_alignment, .normal);
|
||||
}
|
||||
const payload_llvm_ty = err_union_llvm_ty.structFields(&o.builder)[offset];
|
||||
|
|
@ -9740,45 +9711,14 @@ pub const FuncGen = struct {
|
|||
return .none;
|
||||
}
|
||||
|
||||
/// As an optimization, we want to avoid unnecessary copies of isByRef=true
|
||||
/// types. Here, we scan forward in the current block, looking to see if
|
||||
/// this load dies before any side effects occur. In such case, we can
|
||||
/// safely return the operand without making a copy.
|
||||
///
|
||||
/// The first instruction of `body_tail` is the one whose copy we want to elide.
|
||||
fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool {
|
||||
const zcu = fg.ng.pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
for (body_tail[1..]) |body_inst| {
|
||||
switch (fg.liveness.categorizeOperand(fg.air, zcu, body_inst, body_tail[0], ip)) {
|
||||
.none => continue,
|
||||
.write, .noret, .complex => return false,
|
||||
.tomb => return true,
|
||||
}
|
||||
}
|
||||
// The only way to get here is to hit the end of a loop instruction
|
||||
// (implicit repeat).
|
||||
return false;
|
||||
}
|
||||
|
||||
fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
|
||||
fn airLoad(fg: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
|
||||
const pt = fg.ng.pt;
|
||||
const zcu = pt.zcu;
|
||||
const inst = body_tail[0];
|
||||
const ty_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
const ptr_ty = fg.typeOf(ty_op.operand);
|
||||
const ptr_info = ptr_ty.ptrInfo(zcu);
|
||||
const ptr = try fg.resolveInst(ty_op.operand);
|
||||
|
||||
elide: {
|
||||
if (ptr_info.flags.alignment != .none) break :elide;
|
||||
if (!isByRef(Type.fromInterned(ptr_info.child), zcu)) break :elide;
|
||||
if (!canElideLoad(fg, body_tail)) break :elide;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
fg.maybeMarkAllowZeroAccess(ptr_info);
|
||||
|
||||
return fg.load(ptr, ptr_ty);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -97,6 +97,7 @@ pub const Env = enum {
|
|||
.lld_linker,
|
||||
.coff_linker,
|
||||
.elf_linker,
|
||||
.elf2_linker,
|
||||
.macho_linker,
|
||||
.c_linker,
|
||||
.wasm_linker,
|
||||
|
|
@ -163,6 +164,7 @@ pub const Env = enum {
|
|||
.incremental,
|
||||
.aarch64_backend,
|
||||
.elf_linker,
|
||||
.elf2_linker,
|
||||
=> true,
|
||||
else => Env.sema.supports(feature),
|
||||
},
|
||||
|
|
@ -210,6 +212,7 @@ pub const Env = enum {
|
|||
.legalize,
|
||||
.x86_64_backend,
|
||||
.elf_linker,
|
||||
.elf2_linker,
|
||||
=> true,
|
||||
else => Env.sema.supports(feature),
|
||||
},
|
||||
|
|
@ -282,6 +285,7 @@ pub const Feature = enum {
|
|||
lld_linker,
|
||||
coff_linker,
|
||||
elf_linker,
|
||||
elf2_linker,
|
||||
macho_linker,
|
||||
c_linker,
|
||||
wasm_linker,
|
||||
|
|
|
|||
91
src/link.zig
91
src/link.zig
|
|
@ -219,6 +219,7 @@ pub const Diags = struct {
|
|||
}
|
||||
|
||||
pub fn addError(diags: *Diags, comptime format: []const u8, args: anytype) void {
|
||||
@branchHint(.cold);
|
||||
return addErrorSourceLocation(diags, .none, format, args);
|
||||
}
|
||||
|
||||
|
|
@ -529,7 +530,7 @@ pub const File = struct {
|
|||
const lld: *Lld = try .createEmpty(arena, comp, emit, options);
|
||||
return &lld.base;
|
||||
}
|
||||
switch (Tag.fromObjectFormat(comp.root_mod.resolved_target.result.ofmt)) {
|
||||
switch (Tag.fromObjectFormat(comp.root_mod.resolved_target.result.ofmt, comp.config.use_new_linker)) {
|
||||
.plan9 => return error.UnsupportedObjectFormat,
|
||||
inline else => |tag| {
|
||||
dev.check(tag.devFeature());
|
||||
|
|
@ -552,7 +553,7 @@ pub const File = struct {
|
|||
const lld: *Lld = try .createEmpty(arena, comp, emit, options);
|
||||
return &lld.base;
|
||||
}
|
||||
switch (Tag.fromObjectFormat(comp.root_mod.resolved_target.result.ofmt)) {
|
||||
switch (Tag.fromObjectFormat(comp.root_mod.resolved_target.result.ofmt, comp.config.use_new_linker)) {
|
||||
.plan9 => return error.UnsupportedObjectFormat,
|
||||
inline else => |tag| {
|
||||
dev.check(tag.devFeature());
|
||||
|
|
@ -579,7 +580,8 @@ pub const File = struct {
|
|||
const emit = base.emit;
|
||||
if (base.child_pid) |pid| {
|
||||
if (builtin.os.tag == .windows) {
|
||||
base.cast(.coff).?.ptraceAttach(pid) catch |err| {
|
||||
const coff_file = base.cast(.coff).?;
|
||||
coff_file.ptraceAttach(pid) catch |err| {
|
||||
log.warn("attaching failed with error: {s}", .{@errorName(err)});
|
||||
};
|
||||
} else {
|
||||
|
|
@ -597,8 +599,11 @@ pub const File = struct {
|
|||
.linux => std.posix.ptrace(std.os.linux.PTRACE.ATTACH, pid, 0, 0) catch |err| {
|
||||
log.warn("ptrace failure: {s}", .{@errorName(err)});
|
||||
},
|
||||
.macos => base.cast(.macho).?.ptraceAttach(pid) catch |err| {
|
||||
log.warn("attaching failed with error: {s}", .{@errorName(err)});
|
||||
.macos => {
|
||||
const macho_file = base.cast(.macho).?;
|
||||
macho_file.ptraceAttach(pid) catch |err| {
|
||||
log.warn("attaching failed with error: {s}", .{@errorName(err)});
|
||||
};
|
||||
},
|
||||
.windows => unreachable,
|
||||
else => return error.HotSwapUnavailableOnHostOperatingSystem,
|
||||
|
|
@ -613,6 +618,20 @@ pub const File = struct {
|
|||
.mode = determineMode(output_mode, link_mode),
|
||||
});
|
||||
},
|
||||
.elf2 => {
|
||||
const elf = base.cast(.elf2).?;
|
||||
if (base.file == null) {
|
||||
elf.mf.file = try base.emit.root_dir.handle.createFile(base.emit.sub_path, .{
|
||||
.truncate = false,
|
||||
.read = true,
|
||||
.mode = determineMode(comp.config.output_mode, comp.config.link_mode),
|
||||
});
|
||||
base.file = elf.mf.file;
|
||||
try elf.mf.ensureTotalCapacity(
|
||||
@intCast(elf.mf.nodes.items[0].location().resolve(&elf.mf)[1]),
|
||||
);
|
||||
}
|
||||
},
|
||||
.c, .spirv => dev.checkAny(&.{ .c_linker, .spirv_linker }),
|
||||
.plan9 => unreachable,
|
||||
}
|
||||
|
|
@ -669,14 +688,30 @@ pub const File = struct {
|
|||
|
||||
if (base.child_pid) |pid| {
|
||||
switch (builtin.os.tag) {
|
||||
.macos => base.cast(.macho).?.ptraceDetach(pid) catch |err| {
|
||||
log.warn("detaching failed with error: {s}", .{@errorName(err)});
|
||||
.macos => {
|
||||
const macho_file = base.cast(.macho).?;
|
||||
macho_file.ptraceDetach(pid) catch |err| {
|
||||
log.warn("detaching failed with error: {s}", .{@errorName(err)});
|
||||
};
|
||||
},
|
||||
.windows => {
|
||||
const coff_file = base.cast(.coff).?;
|
||||
coff_file.ptraceDetach(pid);
|
||||
},
|
||||
.windows => base.cast(.coff).?.ptraceDetach(pid),
|
||||
else => return error.HotSwapUnavailableOnHostOperatingSystem,
|
||||
}
|
||||
}
|
||||
},
|
||||
.elf2 => {
|
||||
const elf = base.cast(.elf2).?;
|
||||
if (base.file) |f| {
|
||||
elf.mf.unmap();
|
||||
assert(elf.mf.file.handle == f.handle);
|
||||
elf.mf.file = undefined;
|
||||
f.close();
|
||||
base.file = null;
|
||||
}
|
||||
},
|
||||
.c, .spirv => dev.checkAny(&.{ .c_linker, .spirv_linker }),
|
||||
.plan9 => unreachable,
|
||||
}
|
||||
|
|
@ -793,6 +828,7 @@ pub const File = struct {
|
|||
.spirv => {},
|
||||
.goff, .xcoff => {},
|
||||
.plan9 => unreachable,
|
||||
.elf2 => {},
|
||||
inline else => |tag| {
|
||||
dev.check(tag.devFeature());
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).updateLineNumber(pt, ti_id);
|
||||
|
|
@ -825,6 +861,26 @@ pub const File = struct {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn idle(base: *File, tid: Zcu.PerThread.Id) !bool {
|
||||
switch (base.tag) {
|
||||
else => return false,
|
||||
inline .elf2 => |tag| {
|
||||
dev.check(tag.devFeature());
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).idle(tid);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn updateErrorData(base: *File, pt: Zcu.PerThread) !void {
|
||||
switch (base.tag) {
|
||||
else => {},
|
||||
inline .elf2 => |tag| {
|
||||
dev.check(tag.devFeature());
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).updateErrorData(pt);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub const FlushError = error{
|
||||
/// Indicates an error will be present in `Compilation.link_diags`.
|
||||
LinkFailure,
|
||||
|
|
@ -1099,7 +1155,7 @@ pub const File = struct {
|
|||
if (base.zcu_object_basename != null) return;
|
||||
|
||||
switch (base.tag) {
|
||||
inline .wasm => |tag| {
|
||||
inline .elf2, .wasm => |tag| {
|
||||
dev.check(tag.devFeature());
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).prelink(base.comp.link_prog_node);
|
||||
},
|
||||
|
|
@ -1110,6 +1166,7 @@ pub const File = struct {
|
|||
pub const Tag = enum {
|
||||
coff,
|
||||
elf,
|
||||
elf2,
|
||||
macho,
|
||||
c,
|
||||
wasm,
|
||||
|
|
@ -1123,6 +1180,7 @@ pub const File = struct {
|
|||
return switch (tag) {
|
||||
.coff => Coff,
|
||||
.elf => Elf,
|
||||
.elf2 => Elf2,
|
||||
.macho => MachO,
|
||||
.c => C,
|
||||
.wasm => Wasm,
|
||||
|
|
@ -1134,10 +1192,10 @@ pub const File = struct {
|
|||
};
|
||||
}
|
||||
|
||||
fn fromObjectFormat(ofmt: std.Target.ObjectFormat) Tag {
|
||||
fn fromObjectFormat(ofmt: std.Target.ObjectFormat, use_new_linker: bool) Tag {
|
||||
return switch (ofmt) {
|
||||
.coff => .coff,
|
||||
.elf => .elf,
|
||||
.elf => if (use_new_linker) .elf2 else .elf,
|
||||
.macho => .macho,
|
||||
.wasm => .wasm,
|
||||
.plan9 => .plan9,
|
||||
|
|
@ -1223,6 +1281,7 @@ pub const File = struct {
|
|||
pub const C = @import("link/C.zig");
|
||||
pub const Coff = @import("link/Coff.zig");
|
||||
pub const Elf = @import("link/Elf.zig");
|
||||
pub const Elf2 = @import("link/Elf2.zig");
|
||||
pub const MachO = @import("link/MachO.zig");
|
||||
pub const SpirV = @import("link/SpirV.zig");
|
||||
pub const Wasm = @import("link/Wasm.zig");
|
||||
|
|
@ -1548,6 +1607,9 @@ pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void {
|
|||
}
|
||||
}
|
||||
}
|
||||
pub fn doIdleTask(comp: *Compilation, tid: usize) error{ OutOfMemory, LinkFailure }!bool {
|
||||
return if (comp.bin_file) |lf| lf.idle(@enumFromInt(tid)) else false;
|
||||
}
|
||||
/// After the main pipeline is done, but before flush, the compilation may need to link one final
|
||||
/// `Nav` into the binary: the `builtin.test_functions` value. Since the link thread isn't running
|
||||
/// by then, we expose this function which can be called directly.
|
||||
|
|
@ -1573,6 +1635,13 @@ pub fn linkTestFunctionsNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index)
|
|||
};
|
||||
}
|
||||
}
|
||||
pub fn updateErrorData(pt: Zcu.PerThread) void {
|
||||
const comp = pt.zcu.comp;
|
||||
if (comp.bin_file) |lf| lf.updateErrorData(pt) catch |err| switch (err) {
|
||||
error.OutOfMemory => comp.link_diags.setAllocFailure(),
|
||||
error.LinkFailure => {},
|
||||
};
|
||||
}
|
||||
|
||||
/// Provided by the CLI, processed into `LinkInput` instances at the start of
|
||||
/// the compilation pipeline.
|
||||
|
|
|
|||
|
|
@ -953,7 +953,7 @@ fn writeOffsetTableEntry(coff: *Coff, index: usize) !void {
|
|||
}
|
||||
|
||||
fn markRelocsDirtyByTarget(coff: *Coff, target: SymbolWithLoc) void {
|
||||
if (!coff.base.comp.incremental) return;
|
||||
if (!coff.base.comp.config.incremental) return;
|
||||
// TODO: reverse-lookup might come in handy here
|
||||
for (coff.relocs.values()) |*relocs| {
|
||||
for (relocs.items) |*reloc| {
|
||||
|
|
@ -964,7 +964,7 @@ fn markRelocsDirtyByTarget(coff: *Coff, target: SymbolWithLoc) void {
|
|||
}
|
||||
|
||||
fn markRelocsDirtyByAddress(coff: *Coff, addr: u32) void {
|
||||
if (!coff.base.comp.incremental) return;
|
||||
if (!coff.base.comp.config.incremental) return;
|
||||
const got_moved = blk: {
|
||||
const sect_id = coff.got_section_index orelse break :blk false;
|
||||
break :blk coff.sections.items(.header)[sect_id].virtual_address >= addr;
|
||||
|
|
@ -1111,20 +1111,24 @@ pub fn updateFunc(
|
|||
|
||||
coff.navs.getPtr(func.owner_nav).?.section = coff.text_section_index.?;
|
||||
|
||||
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer code_buffer.deinit(gpa);
|
||||
var aw: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer aw.deinit();
|
||||
|
||||
try codegen.emitFunction(
|
||||
codegen.emitFunction(
|
||||
&coff.base,
|
||||
pt,
|
||||
zcu.navSrcLoc(nav_index),
|
||||
func_index,
|
||||
coff.getAtom(atom_index).getSymbolIndex().?,
|
||||
mir,
|
||||
&code_buffer,
|
||||
&aw.writer,
|
||||
.none,
|
||||
);
|
||||
) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
try coff.updateNavCode(pt, nav_index, code_buffer.items, .FUNCTION);
|
||||
try coff.updateNavCode(pt, nav_index, aw.written(), .FUNCTION);
|
||||
|
||||
// Exports will be updated by `Zcu.processExports` after the update.
|
||||
}
|
||||
|
|
@ -1145,18 +1149,18 @@ fn lowerConst(
|
|||
) !LowerConstResult {
|
||||
const gpa = coff.base.comp.gpa;
|
||||
|
||||
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer code_buffer.deinit(gpa);
|
||||
var aw: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer aw.deinit();
|
||||
|
||||
const atom_index = try coff.createAtom();
|
||||
const sym = coff.getAtom(atom_index).getSymbolPtr(coff);
|
||||
try coff.setSymbolName(sym, name);
|
||||
sym.section_number = @as(coff_util.SectionNumber, @enumFromInt(sect_id + 1));
|
||||
|
||||
try codegen.generateSymbol(&coff.base, pt, src_loc, val, &code_buffer, .{
|
||||
try codegen.generateSymbol(&coff.base, pt, src_loc, val, &aw.writer, .{
|
||||
.atom_index = coff.getAtom(atom_index).getSymbolIndex().?,
|
||||
});
|
||||
const code = code_buffer.items;
|
||||
const code = aw.written();
|
||||
|
||||
const atom = coff.getAtomPtr(atom_index);
|
||||
atom.size = @intCast(code.len);
|
||||
|
|
@ -1170,7 +1174,7 @@ fn lowerConst(
|
|||
log.debug("allocated atom for {s} at 0x{x}", .{ name, atom.getSymbol(coff).value });
|
||||
log.debug(" (required alignment 0x{x})", .{required_alignment});
|
||||
|
||||
try coff.writeAtom(atom_index, code, coff.base.comp.incremental);
|
||||
try coff.writeAtom(atom_index, code, coff.base.comp.config.incremental);
|
||||
|
||||
return .{ .ok = atom_index };
|
||||
}
|
||||
|
|
@ -1214,19 +1218,22 @@ pub fn updateNav(
|
|||
|
||||
coff.navs.getPtr(nav_index).?.section = coff.getNavOutputSection(nav_index);
|
||||
|
||||
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer code_buffer.deinit(gpa);
|
||||
var aw: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer aw.deinit();
|
||||
|
||||
try codegen.generateSymbol(
|
||||
codegen.generateSymbol(
|
||||
&coff.base,
|
||||
pt,
|
||||
zcu.navSrcLoc(nav_index),
|
||||
nav_init,
|
||||
&code_buffer,
|
||||
&aw.writer,
|
||||
.{ .atom_index = atom.getSymbolIndex().? },
|
||||
);
|
||||
) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
try coff.updateNavCode(pt, nav_index, code_buffer.items, .NULL);
|
||||
try coff.updateNavCode(pt, nav_index, aw.written(), .NULL);
|
||||
}
|
||||
|
||||
// Exports will be updated by `Zcu.processExports` after the update.
|
||||
|
|
@ -1244,8 +1251,8 @@ fn updateLazySymbolAtom(
|
|||
const gpa = comp.gpa;
|
||||
|
||||
var required_alignment: InternPool.Alignment = .none;
|
||||
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer code_buffer.deinit(gpa);
|
||||
var aw: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer aw.deinit();
|
||||
|
||||
const name = try allocPrint(gpa, "__lazy_{s}_{f}", .{
|
||||
@tagName(sym.kind),
|
||||
|
|
@ -1262,11 +1269,11 @@ fn updateLazySymbolAtom(
|
|||
src,
|
||||
sym,
|
||||
&required_alignment,
|
||||
&code_buffer,
|
||||
&aw.writer,
|
||||
.none,
|
||||
.{ .atom_index = local_sym_index },
|
||||
);
|
||||
const code = code_buffer.items;
|
||||
const code = aw.written();
|
||||
|
||||
const atom = coff.getAtomPtr(atom_index);
|
||||
const symbol = atom.getSymbolPtr(coff);
|
||||
|
|
@ -1285,7 +1292,7 @@ fn updateLazySymbolAtom(
|
|||
symbol.value = vaddr;
|
||||
|
||||
try coff.addGotEntry(.{ .sym_index = local_sym_index });
|
||||
try coff.writeAtom(atom_index, code, coff.base.comp.incremental);
|
||||
try coff.writeAtom(atom_index, code, coff.base.comp.config.incremental);
|
||||
}
|
||||
|
||||
pub fn getOrCreateAtomForLazySymbol(
|
||||
|
|
@ -1437,7 +1444,7 @@ fn updateNavCode(
|
|||
};
|
||||
}
|
||||
|
||||
coff.writeAtom(atom_index, code, coff.base.comp.incremental) catch |err| switch (err) {
|
||||
coff.writeAtom(atom_index, code, coff.base.comp.config.incremental) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => |e| return coff.base.cgFail(nav_index, "failed to write atom: {s}", .{@errorName(e)}),
|
||||
};
|
||||
|
|
@ -1539,14 +1546,12 @@ pub fn updateExports(
|
|||
sym.section_number = @as(coff_util.SectionNumber, @enumFromInt(metadata.section + 1));
|
||||
sym.type = atom.getSymbol(coff).type;
|
||||
|
||||
switch (exp.opts.linkage) {
|
||||
.strong => {
|
||||
sym.storage_class = .EXTERNAL;
|
||||
},
|
||||
.internal => @panic("TODO Internal"),
|
||||
sym.storage_class = switch (exp.opts.linkage) {
|
||||
.internal => .EXTERNAL,
|
||||
.strong => .EXTERNAL,
|
||||
.weak => @panic("TODO WeakExternal"),
|
||||
else => unreachable,
|
||||
}
|
||||
};
|
||||
|
||||
try coff.resolveGlobalSymbol(sym_loc);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2126,19 +2126,22 @@ pub const WipNav = struct {
|
|||
const size = if (ty.hasRuntimeBits(wip_nav.pt.zcu)) ty.abiSize(wip_nav.pt.zcu) else 0;
|
||||
try diw.writeUleb128(size);
|
||||
if (size == 0) return;
|
||||
var bytes = wip_nav.debug_info.toArrayList();
|
||||
defer wip_nav.debug_info = .fromArrayList(wip_nav.dwarf.gpa, &bytes);
|
||||
const old_len = bytes.items.len;
|
||||
const old_end = wip_nav.debug_info.writer.end;
|
||||
try codegen.generateSymbol(
|
||||
wip_nav.dwarf.bin_file,
|
||||
wip_nav.pt,
|
||||
src_loc,
|
||||
val,
|
||||
&bytes,
|
||||
&wip_nav.debug_info.writer,
|
||||
.{ .debug_output = .{ .dwarf = wip_nav } },
|
||||
);
|
||||
if (old_len + size != bytes.items.len) {
|
||||
std.debug.print("{f} [{}]: {} != {}\n", .{ ty.fmt(wip_nav.pt), ty.toIntern(), size, bytes.items.len - old_len });
|
||||
if (old_end + size != wip_nav.debug_info.writer.end) {
|
||||
std.debug.print("{f} [{}]: {} != {}\n", .{
|
||||
ty.fmt(wip_nav.pt),
|
||||
ty.toIntern(),
|
||||
size,
|
||||
wip_nav.debug_info.writer.end - old_end,
|
||||
});
|
||||
unreachable;
|
||||
}
|
||||
}
|
||||
|
|
@ -6429,7 +6432,7 @@ fn sleb128Bytes(value: anytype) u32 {
|
|||
/// overrides `-fno-incremental` for testing incremental debug info until `-fincremental` is functional
|
||||
const force_incremental = false;
|
||||
inline fn incremental(dwarf: Dwarf) bool {
|
||||
return force_incremental or dwarf.bin_file.comp.incremental;
|
||||
return force_incremental or dwarf.bin_file.comp.config.incremental;
|
||||
}
|
||||
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ fn newSymbolAssumeCapacity(self: *LinkerDefined, name_off: u32, elf_file: *Elf)
|
|||
const esym = self.symtab.addOneAssumeCapacity();
|
||||
esym.* = .{
|
||||
.st_name = name_off,
|
||||
.st_info = elf.STB_WEAK << 4,
|
||||
.st_info = @as(u8, elf.STB_WEAK) << 4,
|
||||
.st_other = @intFromEnum(elf.STV.HIDDEN),
|
||||
.st_shndx = elf.SHN_ABS,
|
||||
.st_value = 0,
|
||||
|
|
|
|||
|
|
@ -105,7 +105,7 @@ pub fn parseHeader(
|
|||
if (amt != buf.len) return error.UnexpectedEndOfFile;
|
||||
}
|
||||
if (!mem.eql(u8, ehdr.e_ident[0..4], "\x7fELF")) return error.BadMagic;
|
||||
if (ehdr.e_ident[elf.EI_VERSION] != 1) return error.BadElfVersion;
|
||||
if (ehdr.e_ident[elf.EI.VERSION] != 1) return error.BadElfVersion;
|
||||
if (ehdr.e_type != elf.ET.DYN) return error.NotSharedObject;
|
||||
|
||||
if (target.toElfMachine() != ehdr.e_machine)
|
||||
|
|
|
|||
|
|
@ -277,8 +277,8 @@ pub fn flush(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void {
|
|||
pt,
|
||||
.{ .kind = .code, .ty = .anyerror_type },
|
||||
metadata.text_symbol_index,
|
||||
) catch |err| return switch (err) {
|
||||
error.CodegenFail => error.LinkFailure,
|
||||
) catch |err| switch (err) {
|
||||
error.CodegenFail => return error.LinkFailure,
|
||||
else => |e| return e,
|
||||
};
|
||||
if (metadata.rodata_state != .unused) self.updateLazySymbol(
|
||||
|
|
@ -286,8 +286,8 @@ pub fn flush(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void {
|
|||
pt,
|
||||
.{ .kind = .const_data, .ty = .anyerror_type },
|
||||
metadata.rodata_symbol_index,
|
||||
) catch |err| return switch (err) {
|
||||
error.CodegenFail => error.LinkFailure,
|
||||
) catch |err| switch (err) {
|
||||
error.CodegenFail => return error.LinkFailure,
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
|
@ -1533,22 +1533,26 @@ pub fn updateFunc(
|
|||
const sym_index = try self.getOrCreateMetadataForNav(zcu, func.owner_nav);
|
||||
self.atom(self.symbol(sym_index).ref.index).?.freeRelocs(self);
|
||||
|
||||
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer code_buffer.deinit(gpa);
|
||||
var aw: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer aw.deinit();
|
||||
|
||||
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, func.owner_nav, sym_index) else null;
|
||||
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
|
||||
|
||||
try codegen.emitFunction(
|
||||
codegen.emitFunction(
|
||||
&elf_file.base,
|
||||
pt,
|
||||
zcu.navSrcLoc(func.owner_nav),
|
||||
func_index,
|
||||
sym_index,
|
||||
mir,
|
||||
&code_buffer,
|
||||
&aw.writer,
|
||||
if (debug_wip_nav) |*dn| .{ .dwarf = dn } else .none,
|
||||
);
|
||||
const code = code_buffer.items;
|
||||
) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
else => |e| return e,
|
||||
};
|
||||
const code = aw.written();
|
||||
|
||||
const shndx = try self.getNavShdrIndex(elf_file, zcu, func.owner_nav, sym_index, code);
|
||||
log.debug("setting shdr({x},{s}) for {f}", .{
|
||||
|
|
@ -1663,21 +1667,24 @@ pub fn updateNav(
|
|||
const sym_index = try self.getOrCreateMetadataForNav(zcu, nav_index);
|
||||
self.symbol(sym_index).atom(elf_file).?.freeRelocs(self);
|
||||
|
||||
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer code_buffer.deinit(zcu.gpa);
|
||||
var aw: std.Io.Writer.Allocating = .init(zcu.gpa);
|
||||
defer aw.deinit();
|
||||
|
||||
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, nav_index, sym_index) else null;
|
||||
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
|
||||
|
||||
try codegen.generateSymbol(
|
||||
codegen.generateSymbol(
|
||||
&elf_file.base,
|
||||
pt,
|
||||
zcu.navSrcLoc(nav_index),
|
||||
Value.fromInterned(nav_init),
|
||||
&code_buffer,
|
||||
&aw.writer,
|
||||
.{ .atom_index = sym_index },
|
||||
);
|
||||
const code = code_buffer.items;
|
||||
) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
else => |e| return e,
|
||||
};
|
||||
const code = aw.written();
|
||||
|
||||
const shndx = try self.getNavShdrIndex(elf_file, zcu, nav_index, sym_index, code);
|
||||
log.debug("setting shdr({x},{s}) for {f}", .{
|
||||
|
|
@ -1722,8 +1729,8 @@ fn updateLazySymbol(
|
|||
const gpa = zcu.gpa;
|
||||
|
||||
var required_alignment: InternPool.Alignment = .none;
|
||||
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer code_buffer.deinit(gpa);
|
||||
var aw: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer aw.deinit();
|
||||
|
||||
const name_str_index = blk: {
|
||||
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{f}", .{
|
||||
|
|
@ -1734,18 +1741,20 @@ fn updateLazySymbol(
|
|||
break :blk try self.strtab.insert(gpa, name);
|
||||
};
|
||||
|
||||
const src = Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded;
|
||||
try codegen.generateLazySymbol(
|
||||
codegen.generateLazySymbol(
|
||||
&elf_file.base,
|
||||
pt,
|
||||
src,
|
||||
Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse .unneeded,
|
||||
sym,
|
||||
&required_alignment,
|
||||
&code_buffer,
|
||||
&aw.writer,
|
||||
.none,
|
||||
.{ .atom_index = symbol_index },
|
||||
);
|
||||
const code = code_buffer.items;
|
||||
) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
else => |e| return e,
|
||||
};
|
||||
const code = aw.written();
|
||||
|
||||
const output_section_index = switch (sym.kind) {
|
||||
.code => if (self.text_index) |sym_index|
|
||||
|
|
@ -1807,21 +1816,24 @@ fn lowerConst(
|
|||
) !codegen.SymbolResult {
|
||||
const gpa = pt.zcu.gpa;
|
||||
|
||||
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer code_buffer.deinit(gpa);
|
||||
var aw: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer aw.deinit();
|
||||
|
||||
const name_off = try self.addString(gpa, name);
|
||||
const sym_index = try self.newSymbolWithAtom(gpa, name_off);
|
||||
|
||||
try codegen.generateSymbol(
|
||||
codegen.generateSymbol(
|
||||
&elf_file.base,
|
||||
pt,
|
||||
src_loc,
|
||||
val,
|
||||
&code_buffer,
|
||||
&aw.writer,
|
||||
.{ .atom_index = sym_index },
|
||||
);
|
||||
const code = code_buffer.items;
|
||||
) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
else => |e| return e,
|
||||
};
|
||||
const code = aw.written();
|
||||
|
||||
const local_sym = self.symbol(sym_index);
|
||||
const local_esym = &self.symtab.items(.elf_sym)[local_sym.esym_index];
|
||||
|
|
|
|||
2036
src/link/Elf2.zig
Normal file
2036
src/link/Elf2.zig
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -784,22 +784,26 @@ pub fn updateFunc(
|
|||
const sym_index = try self.getOrCreateMetadataForNav(macho_file, func.owner_nav);
|
||||
self.symbols.items[sym_index].getAtom(macho_file).?.freeRelocs(macho_file);
|
||||
|
||||
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer code_buffer.deinit(gpa);
|
||||
var aw: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer aw.deinit();
|
||||
|
||||
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, func.owner_nav, sym_index) else null;
|
||||
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
|
||||
|
||||
try codegen.emitFunction(
|
||||
codegen.emitFunction(
|
||||
&macho_file.base,
|
||||
pt,
|
||||
zcu.navSrcLoc(func.owner_nav),
|
||||
func_index,
|
||||
sym_index,
|
||||
mir,
|
||||
&code_buffer,
|
||||
&aw.writer,
|
||||
if (debug_wip_nav) |*wip_nav| .{ .dwarf = wip_nav } else .none,
|
||||
);
|
||||
const code = code_buffer.items;
|
||||
) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
else => |e| return e,
|
||||
};
|
||||
const code = aw.written();
|
||||
|
||||
const sect_index = try self.getNavOutputSection(macho_file, zcu, func.owner_nav, code);
|
||||
const old_rva, const old_alignment = blk: {
|
||||
|
|
@ -895,21 +899,24 @@ pub fn updateNav(
|
|||
const sym_index = try self.getOrCreateMetadataForNav(macho_file, nav_index);
|
||||
self.symbols.items[sym_index].getAtom(macho_file).?.freeRelocs(macho_file);
|
||||
|
||||
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer code_buffer.deinit(zcu.gpa);
|
||||
var aw: std.Io.Writer.Allocating = .init(zcu.gpa);
|
||||
defer aw.deinit();
|
||||
|
||||
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, nav_index, sym_index) else null;
|
||||
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
|
||||
|
||||
try codegen.generateSymbol(
|
||||
codegen.generateSymbol(
|
||||
&macho_file.base,
|
||||
pt,
|
||||
zcu.navSrcLoc(nav_index),
|
||||
Value.fromInterned(nav_init),
|
||||
&code_buffer,
|
||||
&aw.writer,
|
||||
.{ .atom_index = sym_index },
|
||||
);
|
||||
const code = code_buffer.items;
|
||||
) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
else => |e| return e,
|
||||
};
|
||||
const code = aw.written();
|
||||
|
||||
const sect_index = try self.getNavOutputSection(macho_file, zcu, nav_index, code);
|
||||
if (isThreadlocal(macho_file, nav_index))
|
||||
|
|
@ -1198,21 +1205,24 @@ fn lowerConst(
|
|||
) !codegen.SymbolResult {
|
||||
const gpa = macho_file.base.comp.gpa;
|
||||
|
||||
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer code_buffer.deinit(gpa);
|
||||
var aw: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer aw.deinit();
|
||||
|
||||
const name_str = try self.addString(gpa, name);
|
||||
const sym_index = try self.newSymbolWithAtom(gpa, name_str, macho_file);
|
||||
|
||||
try codegen.generateSymbol(
|
||||
codegen.generateSymbol(
|
||||
&macho_file.base,
|
||||
pt,
|
||||
src_loc,
|
||||
val,
|
||||
&code_buffer,
|
||||
&aw.writer,
|
||||
.{ .atom_index = sym_index },
|
||||
);
|
||||
const code = code_buffer.items;
|
||||
) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
else => |e| return e,
|
||||
};
|
||||
const code = aw.written();
|
||||
|
||||
const sym = &self.symbols.items[sym_index];
|
||||
sym.out_n_sect = output_section_index;
|
||||
|
|
@ -1349,8 +1359,8 @@ fn updateLazySymbol(
|
|||
const gpa = zcu.gpa;
|
||||
|
||||
var required_alignment: Atom.Alignment = .none;
|
||||
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer code_buffer.deinit(gpa);
|
||||
var aw: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer aw.deinit();
|
||||
|
||||
const name_str = blk: {
|
||||
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{f}", .{
|
||||
|
|
@ -1368,11 +1378,11 @@ fn updateLazySymbol(
|
|||
src,
|
||||
lazy_sym,
|
||||
&required_alignment,
|
||||
&code_buffer,
|
||||
&aw.writer,
|
||||
.none,
|
||||
.{ .atom_index = symbol_index },
|
||||
);
|
||||
const code = code_buffer.items;
|
||||
const code = aw.written();
|
||||
|
||||
const output_section_index = switch (lazy_sym.kind) {
|
||||
.code => macho_file.zig_text_sect_index.?,
|
||||
|
|
|
|||
927
src/link/MappedFile.zig
Normal file
927
src/link/MappedFile.zig
Normal file
|
|
@ -0,0 +1,927 @@
|
|||
file: std.fs.File,
|
||||
flags: packed struct {
|
||||
block_size: std.mem.Alignment,
|
||||
copy_file_range_unsupported: bool,
|
||||
fallocate_punch_hole_unsupported: bool,
|
||||
fallocate_insert_range_unsupported: bool,
|
||||
},
|
||||
section: if (is_windows) windows.HANDLE else void,
|
||||
contents: []align(std.heap.page_size_min) u8,
|
||||
nodes: std.ArrayList(Node),
|
||||
free_ni: Node.Index,
|
||||
large: std.ArrayList(u64),
|
||||
updates: std.ArrayList(Node.Index),
|
||||
update_prog_node: std.Progress.Node,
|
||||
writers: std.SinglyLinkedList,
|
||||
|
||||
pub const Error = std.posix.MMapError ||
|
||||
std.posix.MRemapError ||
|
||||
std.fs.File.SetEndPosError ||
|
||||
std.fs.File.CopyRangeError ||
|
||||
error{NotFile};
|
||||
|
||||
pub fn init(file: std.fs.File, gpa: std.mem.Allocator) !MappedFile {
|
||||
var mf: MappedFile = .{
|
||||
.file = file,
|
||||
.flags = undefined,
|
||||
.section = if (is_windows) windows.INVALID_HANDLE_VALUE else {},
|
||||
.contents = &.{},
|
||||
.nodes = .empty,
|
||||
.free_ni = .none,
|
||||
.large = .empty,
|
||||
.updates = .empty,
|
||||
.update_prog_node = .none,
|
||||
.writers = .{},
|
||||
};
|
||||
errdefer mf.deinit(gpa);
|
||||
const size: u64, const blksize = if (is_windows)
|
||||
.{ try windows.GetFileSizeEx(file.handle), 1 }
|
||||
else stat: {
|
||||
const stat = try std.posix.fstat(mf.file.handle);
|
||||
if (!std.posix.S.ISREG(stat.mode)) return error.PathAlreadyExists;
|
||||
break :stat .{ @bitCast(stat.size), stat.blksize };
|
||||
};
|
||||
mf.flags = .{
|
||||
.block_size = .fromByteUnits(
|
||||
std.math.ceilPowerOfTwoAssert(usize, @max(std.heap.pageSize(), blksize)),
|
||||
),
|
||||
.copy_file_range_unsupported = false,
|
||||
.fallocate_insert_range_unsupported = false,
|
||||
.fallocate_punch_hole_unsupported = false,
|
||||
};
|
||||
try mf.nodes.ensureUnusedCapacity(gpa, 1);
|
||||
assert(try mf.addNode(gpa, .{
|
||||
.add_node = .{
|
||||
.size = size,
|
||||
.fixed = true,
|
||||
},
|
||||
}) == Node.Index.root);
|
||||
try mf.ensureTotalCapacity(@intCast(size));
|
||||
return mf;
|
||||
}
|
||||
|
||||
pub fn deinit(mf: *MappedFile, gpa: std.mem.Allocator) void {
|
||||
mf.unmap();
|
||||
mf.nodes.deinit(gpa);
|
||||
mf.large.deinit(gpa);
|
||||
mf.updates.deinit(gpa);
|
||||
mf.update_prog_node.end();
|
||||
assert(mf.writers.first == null);
|
||||
mf.* = undefined;
|
||||
}
|
||||
|
||||
pub const Node = extern struct {
|
||||
parent: Node.Index,
|
||||
prev: Node.Index,
|
||||
next: Node.Index,
|
||||
first: Node.Index,
|
||||
last: Node.Index,
|
||||
flags: Flags,
|
||||
location_payload: Location.Payload,
|
||||
|
||||
pub const Flags = packed struct(u32) {
|
||||
location_tag: Location.Tag,
|
||||
alignment: std.mem.Alignment,
|
||||
/// Whether this node can be moved.
|
||||
fixed: bool,
|
||||
/// Whether this node has been moved.
|
||||
moved: bool,
|
||||
/// Whether this node has been resized.
|
||||
resized: bool,
|
||||
/// Whether this node might contain non-zero bytes.
|
||||
has_content: bool,
|
||||
unused: @Type(.{ .int = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = 32 - @bitSizeOf(std.mem.Alignment) - 5,
|
||||
} }) = 0,
|
||||
};
|
||||
|
||||
pub const Location = union(enum(u1)) {
|
||||
small: extern struct {
|
||||
/// Relative to `parent`.
|
||||
offset: u32,
|
||||
size: u32,
|
||||
},
|
||||
large: extern struct {
|
||||
index: usize,
|
||||
unused: @Type(.{ .int = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = 64 - @bitSizeOf(usize),
|
||||
} }) = 0,
|
||||
},
|
||||
|
||||
pub const Tag = @typeInfo(Location).@"union".tag_type.?;
|
||||
pub const Payload = @Type(.{ .@"union" = .{
|
||||
.layout = .@"extern",
|
||||
.tag_type = null,
|
||||
.fields = @typeInfo(Location).@"union".fields,
|
||||
.decls = &.{},
|
||||
} });
|
||||
|
||||
pub fn resolve(loc: Location, mf: *const MappedFile) [2]u64 {
|
||||
return switch (loc) {
|
||||
.small => |small| .{ small.offset, small.size },
|
||||
.large => |large| mf.large.items[large.index..][0..2].*,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Index = enum(u32) {
|
||||
none,
|
||||
_,
|
||||
|
||||
pub const root: Node.Index = .none;
|
||||
|
||||
fn get(ni: Node.Index, mf: *const MappedFile) *Node {
|
||||
return &mf.nodes.items[@intFromEnum(ni)];
|
||||
}
|
||||
|
||||
pub fn childrenMoved(ni: Node.Index, gpa: std.mem.Allocator, mf: *MappedFile) !void {
|
||||
var child_ni = ni.get(mf).last;
|
||||
while (child_ni != .none) {
|
||||
try child_ni.moved(gpa, mf);
|
||||
child_ni = child_ni.get(mf).prev;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hasMoved(ni: Node.Index, mf: *const MappedFile) bool {
|
||||
var parent_ni = ni;
|
||||
while (parent_ni != Node.Index.root) {
|
||||
const parent = parent_ni.get(mf);
|
||||
if (parent.flags.moved) return true;
|
||||
parent_ni = parent.parent;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
pub fn moved(ni: Node.Index, gpa: std.mem.Allocator, mf: *MappedFile) !void {
|
||||
try mf.updates.ensureUnusedCapacity(gpa, 1);
|
||||
ni.movedAssumeCapacity(mf);
|
||||
}
|
||||
pub fn cleanMoved(ni: Node.Index, mf: *const MappedFile) bool {
|
||||
const node_moved = &ni.get(mf).flags.moved;
|
||||
defer node_moved.* = false;
|
||||
return node_moved.*;
|
||||
}
|
||||
fn movedAssumeCapacity(ni: Node.Index, mf: *MappedFile) void {
|
||||
var parent_ni = ni;
|
||||
while (parent_ni != Node.Index.root) {
|
||||
const parent_node = parent_ni.get(mf);
|
||||
if (parent_node.flags.moved) return;
|
||||
parent_ni = parent_node.parent;
|
||||
}
|
||||
const node = ni.get(mf);
|
||||
node.flags.moved = true;
|
||||
if (node.flags.resized) return;
|
||||
mf.updates.appendAssumeCapacity(ni);
|
||||
mf.update_prog_node.increaseEstimatedTotalItems(1);
|
||||
}
|
||||
|
||||
pub fn hasResized(ni: Node.Index, mf: *const MappedFile) bool {
|
||||
return ni.get(mf).flags.resized;
|
||||
}
|
||||
pub fn resized(ni: Node.Index, gpa: std.mem.Allocator, mf: *MappedFile) !void {
|
||||
try mf.updates.ensureUnusedCapacity(gpa, 1);
|
||||
ni.resizedAssumeCapacity(mf);
|
||||
}
|
||||
pub fn cleanResized(ni: Node.Index, mf: *const MappedFile) bool {
|
||||
const node_resized = &ni.get(mf).flags.resized;
|
||||
defer node_resized.* = false;
|
||||
return node_resized.*;
|
||||
}
|
||||
fn resizedAssumeCapacity(ni: Node.Index, mf: *MappedFile) void {
|
||||
const node = ni.get(mf);
|
||||
if (node.flags.resized) return;
|
||||
node.flags.resized = true;
|
||||
if (node.flags.moved) return;
|
||||
mf.updates.appendAssumeCapacity(ni);
|
||||
mf.update_prog_node.increaseEstimatedTotalItems(1);
|
||||
}
|
||||
|
||||
pub fn alignment(ni: Node.Index, mf: *const MappedFile) std.mem.Alignment {
|
||||
return ni.get(mf).flags.alignment;
|
||||
}
|
||||
|
||||
fn setLocationAssumeCapacity(ni: Node.Index, mf: *MappedFile, offset: u64, size: u64) void {
|
||||
const node = ni.get(mf);
|
||||
if (size == 0) node.flags.has_content = false;
|
||||
switch (node.location()) {
|
||||
.small => |small| {
|
||||
if (small.offset != offset) ni.movedAssumeCapacity(mf);
|
||||
if (small.size != size) ni.resizedAssumeCapacity(mf);
|
||||
if (std.math.cast(u32, offset)) |small_offset| {
|
||||
if (std.math.cast(u32, size)) |small_size| {
|
||||
node.location_payload.small = .{
|
||||
.offset = small_offset,
|
||||
.size = small_size,
|
||||
};
|
||||
return;
|
||||
}
|
||||
}
|
||||
defer mf.large.appendSliceAssumeCapacity(&.{ offset, size });
|
||||
node.flags.location_tag = .large;
|
||||
node.location_payload = .{ .large = .{ .index = mf.large.items.len } };
|
||||
},
|
||||
.large => |large| {
|
||||
const large_items = mf.large.items[large.index..][0..2];
|
||||
if (large_items[0] != offset) ni.movedAssumeCapacity(mf);
|
||||
if (large_items[1] != size) ni.resizedAssumeCapacity(mf);
|
||||
large_items.* = .{ offset, size };
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn location(ni: Node.Index, mf: *const MappedFile) Location {
|
||||
return ni.get(mf).location();
|
||||
}
|
||||
|
||||
pub fn fileLocation(
|
||||
ni: Node.Index,
|
||||
mf: *const MappedFile,
|
||||
set_has_content: bool,
|
||||
) struct { offset: u64, size: u64 } {
|
||||
var offset, const size = ni.location(mf).resolve(mf);
|
||||
var parent_ni = ni;
|
||||
while (true) {
|
||||
const parent = parent_ni.get(mf);
|
||||
if (set_has_content) parent.flags.has_content = true;
|
||||
if (parent_ni == .none) break;
|
||||
parent_ni = parent.parent;
|
||||
offset += parent_ni.location(mf).resolve(mf)[0];
|
||||
}
|
||||
return .{ .offset = offset, .size = size };
|
||||
}
|
||||
|
||||
pub fn slice(ni: Node.Index, mf: *const MappedFile) []u8 {
|
||||
const file_loc = ni.fileLocation(mf, true);
|
||||
return mf.contents[@intCast(file_loc.offset)..][0..@intCast(file_loc.size)];
|
||||
}
|
||||
|
||||
pub fn sliceConst(ni: Node.Index, mf: *const MappedFile) []const u8 {
|
||||
const file_loc = ni.fileLocation(mf, false);
|
||||
return mf.contents[@intCast(file_loc.offset)..][0..@intCast(file_loc.size)];
|
||||
}
|
||||
|
||||
pub fn resize(ni: Node.Index, mf: *MappedFile, gpa: std.mem.Allocator, size: u64) !void {
|
||||
try mf.resizeNode(gpa, ni, size);
|
||||
var writers_it = mf.writers.first;
|
||||
while (writers_it) |writer_node| : (writers_it = writer_node.next) {
|
||||
const w: *Node.Writer = @fieldParentPtr("writer_node", writer_node);
|
||||
w.interface.buffer = w.ni.slice(mf);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writer(ni: Node.Index, mf: *MappedFile, gpa: std.mem.Allocator, w: *Writer) void {
|
||||
w.* = .{
|
||||
.gpa = gpa,
|
||||
.mf = mf,
|
||||
.writer_node = .{},
|
||||
.ni = ni,
|
||||
.interface = .{
|
||||
.buffer = ni.slice(mf),
|
||||
.vtable = &Writer.vtable,
|
||||
},
|
||||
.err = null,
|
||||
};
|
||||
mf.writers.prepend(&w.writer_node);
|
||||
}
|
||||
};
|
||||
|
||||
pub fn location(node: *const Node) Location {
|
||||
return switch (node.flags.location_tag) {
|
||||
inline else => |tag| @unionInit(
|
||||
Location,
|
||||
@tagName(tag),
|
||||
@field(node.location_payload, @tagName(tag)),
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
pub const Writer = struct {
|
||||
gpa: std.mem.Allocator,
|
||||
mf: *MappedFile,
|
||||
writer_node: std.SinglyLinkedList.Node,
|
||||
ni: Node.Index,
|
||||
interface: std.Io.Writer,
|
||||
err: ?Error,
|
||||
|
||||
pub fn deinit(w: *Writer) void {
|
||||
assert(w.mf.writers.popFirst() == &w.writer_node);
|
||||
w.* = undefined;
|
||||
}
|
||||
|
||||
const vtable: std.Io.Writer.VTable = .{
|
||||
.drain = drain,
|
||||
.sendFile = sendFile,
|
||||
.flush = std.Io.Writer.noopFlush,
|
||||
.rebase = growingRebase,
|
||||
};
|
||||
|
||||
fn drain(
|
||||
interface: *std.Io.Writer,
|
||||
data: []const []const u8,
|
||||
splat: usize,
|
||||
) std.Io.Writer.Error!usize {
|
||||
const pattern = data[data.len - 1];
|
||||
const splat_len = pattern.len * splat;
|
||||
const start_len = interface.end;
|
||||
assert(data.len != 0);
|
||||
for (data) |bytes| {
|
||||
try growingRebase(interface, interface.end, bytes.len + splat_len + 1);
|
||||
@memcpy(interface.buffer[interface.end..][0..bytes.len], bytes);
|
||||
interface.end += bytes.len;
|
||||
}
|
||||
if (splat == 0) {
|
||||
interface.end -= pattern.len;
|
||||
} else switch (pattern.len) {
|
||||
0 => {},
|
||||
1 => {
|
||||
@memset(interface.buffer[interface.end..][0 .. splat - 1], pattern[0]);
|
||||
interface.end += splat - 1;
|
||||
},
|
||||
else => for (0..splat - 1) |_| {
|
||||
@memcpy(interface.buffer[interface.end..][0..pattern.len], pattern);
|
||||
interface.end += pattern.len;
|
||||
},
|
||||
}
|
||||
return interface.end - start_len;
|
||||
}
|
||||
|
||||
fn sendFile(
|
||||
interface: *std.Io.Writer,
|
||||
file_reader: *std.fs.File.Reader,
|
||||
limit: std.Io.Limit,
|
||||
) std.Io.Writer.FileError!usize {
|
||||
if (limit == .nothing) return 0;
|
||||
const pos = file_reader.logicalPos();
|
||||
const additional = if (file_reader.getSize()) |size| size - pos else |_| std.atomic.cache_line;
|
||||
if (additional == 0) return error.EndOfStream;
|
||||
try growingRebase(interface, interface.end, limit.minInt64(additional));
|
||||
switch (file_reader.mode) {
|
||||
.positional => {
|
||||
const fr_buf = file_reader.interface.buffered();
|
||||
const buf_copy_size = interface.write(fr_buf) catch unreachable;
|
||||
file_reader.interface.toss(buf_copy_size);
|
||||
if (buf_copy_size < fr_buf.len) return buf_copy_size;
|
||||
assert(file_reader.logicalPos() == file_reader.pos);
|
||||
|
||||
const w: *Writer = @fieldParentPtr("interface", interface);
|
||||
const copy_size: usize = @intCast(w.mf.copyFileRange(
|
||||
file_reader.file,
|
||||
file_reader.pos,
|
||||
w.ni.fileLocation(w.mf, true).offset + interface.end,
|
||||
limit.minInt(interface.unusedCapacityLen()),
|
||||
) catch |err| {
|
||||
w.err = err;
|
||||
return error.WriteFailed;
|
||||
});
|
||||
interface.end += copy_size;
|
||||
return copy_size;
|
||||
},
|
||||
.streaming,
|
||||
.streaming_reading,
|
||||
.positional_reading,
|
||||
.failure,
|
||||
=> {
|
||||
const dest = limit.slice(interface.unusedCapacitySlice());
|
||||
const n = try file_reader.read(dest);
|
||||
interface.end += n;
|
||||
return n;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn growingRebase(
|
||||
interface: *std.Io.Writer,
|
||||
preserve: usize,
|
||||
unused_capacity: usize,
|
||||
) std.Io.Writer.Error!void {
|
||||
_ = preserve;
|
||||
const total_capacity = interface.end + unused_capacity;
|
||||
if (interface.buffer.len >= total_capacity) return;
|
||||
const w: *Writer = @fieldParentPtr("interface", interface);
|
||||
w.ni.resize(w.mf, w.gpa, total_capacity +| total_capacity / 2) catch |err| {
|
||||
w.err = err;
|
||||
return error.WriteFailed;
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
comptime {
|
||||
if (!std.debug.runtime_safety) std.debug.assert(@sizeOf(Node) == 32);
|
||||
}
|
||||
};
|
||||
|
||||
fn addNode(mf: *MappedFile, gpa: std.mem.Allocator, opts: struct {
|
||||
parent: Node.Index = .none,
|
||||
prev: Node.Index = .none,
|
||||
next: Node.Index = .none,
|
||||
offset: u64 = 0,
|
||||
add_node: AddNodeOptions,
|
||||
}) !Node.Index {
|
||||
if (opts.add_node.moved or opts.add_node.resized) try mf.updates.ensureUnusedCapacity(gpa, 1);
|
||||
const offset = opts.add_node.alignment.forward(@intCast(opts.offset));
|
||||
const location_tag: Node.Location.Tag, const location_payload: Node.Location.Payload = location: {
|
||||
if (std.math.cast(u32, offset)) |small_offset| break :location .{ .small, .{
|
||||
.small = .{ .offset = small_offset, .size = 0 },
|
||||
} };
|
||||
try mf.large.ensureUnusedCapacity(gpa, 2);
|
||||
defer mf.large.appendSliceAssumeCapacity(&.{ offset, 0 });
|
||||
break :location .{ .large, .{ .large = .{ .index = mf.large.items.len } } };
|
||||
};
|
||||
const free_ni: Node.Index, const free_node = free: switch (mf.free_ni) {
|
||||
.none => .{ @enumFromInt(mf.nodes.items.len), mf.nodes.addOneAssumeCapacity() },
|
||||
else => |free_ni| {
|
||||
const free_node = free_ni.get(mf);
|
||||
mf.free_ni = free_node.next;
|
||||
break :free .{ free_ni, free_node };
|
||||
},
|
||||
};
|
||||
free_node.* = .{
|
||||
.parent = opts.parent,
|
||||
.prev = opts.prev,
|
||||
.next = opts.next,
|
||||
.first = .none,
|
||||
.last = .none,
|
||||
.flags = .{
|
||||
.location_tag = location_tag,
|
||||
.alignment = opts.add_node.alignment,
|
||||
.fixed = opts.add_node.fixed,
|
||||
.moved = true,
|
||||
.resized = true,
|
||||
.has_content = false,
|
||||
},
|
||||
.location_payload = location_payload,
|
||||
};
|
||||
{
|
||||
defer {
|
||||
free_node.flags.moved = false;
|
||||
free_node.flags.resized = false;
|
||||
}
|
||||
if (offset > opts.parent.location(mf).resolve(mf)[1]) try opts.parent.resize(mf, gpa, offset);
|
||||
try free_ni.resize(mf, gpa, opts.add_node.size);
|
||||
}
|
||||
if (opts.add_node.moved) free_ni.movedAssumeCapacity(mf);
|
||||
if (opts.add_node.resized) free_ni.resizedAssumeCapacity(mf);
|
||||
return free_ni;
|
||||
}
|
||||
|
||||
pub const AddNodeOptions = struct {
|
||||
size: u64 = 0,
|
||||
alignment: std.mem.Alignment = .@"1",
|
||||
fixed: bool = false,
|
||||
moved: bool = false,
|
||||
resized: bool = false,
|
||||
};
|
||||
|
||||
pub fn addOnlyChildNode(
|
||||
mf: *MappedFile,
|
||||
gpa: std.mem.Allocator,
|
||||
parent_ni: Node.Index,
|
||||
opts: AddNodeOptions,
|
||||
) !Node.Index {
|
||||
try mf.nodes.ensureUnusedCapacity(gpa, 1);
|
||||
const parent = parent_ni.get(mf);
|
||||
assert(parent.first == .none and parent.last == .none);
|
||||
const ni = try mf.addNode(gpa, .{
|
||||
.parent = parent_ni,
|
||||
.add_node = opts,
|
||||
});
|
||||
parent.first = ni;
|
||||
parent.last = ni;
|
||||
return ni;
|
||||
}
|
||||
|
||||
pub fn addLastChildNode(
|
||||
mf: *MappedFile,
|
||||
gpa: std.mem.Allocator,
|
||||
parent_ni: Node.Index,
|
||||
opts: AddNodeOptions,
|
||||
) !Node.Index {
|
||||
try mf.nodes.ensureUnusedCapacity(gpa, 1);
|
||||
const parent = parent_ni.get(mf);
|
||||
const ni = try mf.addNode(gpa, .{
|
||||
.parent = parent_ni,
|
||||
.prev = parent.last,
|
||||
.offset = offset: switch (parent.last) {
|
||||
.none => 0,
|
||||
else => |last_ni| {
|
||||
const last_offset, const last_size = last_ni.location(mf).resolve(mf);
|
||||
break :offset last_offset + last_size;
|
||||
},
|
||||
},
|
||||
.add_node = opts,
|
||||
});
|
||||
switch (parent.last) {
|
||||
.none => parent.first = ni,
|
||||
else => |last_ni| last_ni.get(mf).next = ni,
|
||||
}
|
||||
parent.last = ni;
|
||||
return ni;
|
||||
}
|
||||
|
||||
pub fn addNodeAfter(
|
||||
mf: *MappedFile,
|
||||
gpa: std.mem.Allocator,
|
||||
prev_ni: Node.Index,
|
||||
opts: AddNodeOptions,
|
||||
) !Node.Index {
|
||||
assert(prev_ni != .none);
|
||||
try mf.nodes.ensureUnusedCapacity(gpa, 1);
|
||||
const prev = prev_ni.get(mf);
|
||||
const prev_offset, const prev_size = prev.location().resolve(mf);
|
||||
const ni = try mf.addNode(gpa, .{
|
||||
.parent = prev.parent,
|
||||
.prev = prev_ni,
|
||||
.next = prev.next,
|
||||
.offset = prev_offset + prev_size,
|
||||
.add_node = opts,
|
||||
});
|
||||
switch (prev.next) {
|
||||
.none => prev.parent.get(mf).last = ni,
|
||||
else => |next_ni| next_ni.get(mf).prev = ni,
|
||||
}
|
||||
prev.next = ni;
|
||||
return ni;
|
||||
}
|
||||
|
||||
fn resizeNode(mf: *MappedFile, gpa: std.mem.Allocator, ni: Node.Index, requested_size: u64) !void {
|
||||
const node = ni.get(mf);
|
||||
var old_offset, const old_size = node.location().resolve(mf);
|
||||
const new_size = node.flags.alignment.forward(@intCast(requested_size));
|
||||
// Resize the entire file
|
||||
if (ni == Node.Index.root) {
|
||||
try mf.ensureCapacityForSetLocation(gpa);
|
||||
try mf.file.setEndPos(new_size);
|
||||
try mf.ensureTotalCapacity(@intCast(new_size));
|
||||
ni.setLocationAssumeCapacity(mf, old_offset, new_size);
|
||||
return;
|
||||
}
|
||||
while (true) {
|
||||
const parent = node.parent.get(mf);
|
||||
_, const old_parent_size = parent.location().resolve(mf);
|
||||
const trailing_end = switch (node.next) {
|
||||
.none => parent.location().resolve(mf)[1],
|
||||
else => |next_ni| next_ni.location(mf).resolve(mf)[0],
|
||||
};
|
||||
assert(old_offset + old_size <= trailing_end);
|
||||
// Expand the node into available trailing free space
|
||||
if (old_offset + new_size <= trailing_end) {
|
||||
try mf.ensureCapacityForSetLocation(gpa);
|
||||
ni.setLocationAssumeCapacity(mf, old_offset, new_size);
|
||||
return;
|
||||
}
|
||||
// Ask the filesystem driver to insert an extent into the file without copying any data
|
||||
if (is_linux and !mf.flags.fallocate_insert_range_unsupported and
|
||||
node.flags.alignment.order(mf.flags.block_size).compare(.gte))
|
||||
insert_range: {
|
||||
const last_offset, const last_size = parent.last.location(mf).resolve(mf);
|
||||
const last_end = last_offset + last_size;
|
||||
assert(last_end <= old_parent_size);
|
||||
const range_size =
|
||||
node.flags.alignment.forward(@intCast(requested_size +| requested_size / 2)) - old_size;
|
||||
const new_parent_size = last_end + range_size;
|
||||
if (new_parent_size > old_parent_size) {
|
||||
try mf.resizeNode(gpa, node.parent, new_parent_size +| new_parent_size / 2);
|
||||
continue;
|
||||
}
|
||||
const range_file_offset = ni.fileLocation(mf, false).offset + old_size;
|
||||
while (true) switch (linux.E.init(linux.fallocate(
|
||||
mf.file.handle,
|
||||
linux.FALLOC.FL_INSERT_RANGE,
|
||||
@intCast(range_file_offset),
|
||||
@intCast(range_size),
|
||||
))) {
|
||||
.SUCCESS => {
|
||||
var enclosing_ni = ni;
|
||||
while (true) {
|
||||
try mf.ensureCapacityForSetLocation(gpa);
|
||||
const enclosing = enclosing_ni.get(mf);
|
||||
const enclosing_offset, const old_enclosing_size =
|
||||
enclosing.location().resolve(mf);
|
||||
const new_enclosing_size = old_enclosing_size + range_size;
|
||||
enclosing_ni.setLocationAssumeCapacity(mf, enclosing_offset, new_enclosing_size);
|
||||
if (enclosing_ni == Node.Index.root) {
|
||||
assert(enclosing_offset == 0);
|
||||
try mf.ensureTotalCapacity(@intCast(new_enclosing_size));
|
||||
break;
|
||||
}
|
||||
var after_ni = enclosing.next;
|
||||
while (after_ni != .none) {
|
||||
try mf.ensureCapacityForSetLocation(gpa);
|
||||
const after = after_ni.get(mf);
|
||||
const after_offset, const after_size = after.location().resolve(mf);
|
||||
after_ni.setLocationAssumeCapacity(
|
||||
mf,
|
||||
range_size + after_offset,
|
||||
after_size,
|
||||
);
|
||||
after_ni = after.next;
|
||||
}
|
||||
enclosing_ni = enclosing.parent;
|
||||
}
|
||||
return;
|
||||
},
|
||||
.INTR => continue,
|
||||
.BADF, .FBIG, .INVAL => unreachable,
|
||||
.IO => return error.InputOutput,
|
||||
.NODEV => return error.NotFile,
|
||||
.NOSPC => return error.NoSpaceLeft,
|
||||
.NOSYS, .OPNOTSUPP => {
|
||||
mf.flags.fallocate_insert_range_unsupported = true;
|
||||
break :insert_range;
|
||||
},
|
||||
.PERM => return error.PermissionDenied,
|
||||
.SPIPE => return error.Unseekable,
|
||||
.TXTBSY => return error.FileBusy,
|
||||
else => |e| return std.posix.unexpectedErrno(e),
|
||||
};
|
||||
}
|
||||
switch (node.next) {
|
||||
.none => {
|
||||
// As this is the last node, we simply need more space in the parent
|
||||
const new_parent_size = old_offset + new_size;
|
||||
try mf.resizeNode(gpa, node.parent, new_parent_size +| new_parent_size / 2);
|
||||
},
|
||||
else => |*next_ni_ptr| switch (node.flags.fixed) {
|
||||
false => {
|
||||
// Make space at the end of the parent for this floating node
|
||||
const last = parent.last.get(mf);
|
||||
const last_offset, const last_size = last.location().resolve(mf);
|
||||
const new_offset = node.flags.alignment.forward(@intCast(last_offset + last_size));
|
||||
const new_parent_size = new_offset + new_size;
|
||||
if (new_parent_size > old_parent_size) {
|
||||
try mf.resizeNode(
|
||||
gpa,
|
||||
node.parent,
|
||||
new_parent_size +| new_parent_size / 2,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
const next_ni = next_ni_ptr.*;
|
||||
next_ni.get(mf).prev = node.prev;
|
||||
switch (node.prev) {
|
||||
.none => parent.first = next_ni,
|
||||
else => |prev_ni| prev_ni.get(mf).next = next_ni,
|
||||
}
|
||||
last.next = ni;
|
||||
node.prev = parent.last;
|
||||
next_ni_ptr.* = .none;
|
||||
parent.last = ni;
|
||||
if (node.flags.has_content) {
|
||||
const parent_file_offset = node.parent.fileLocation(mf, false).offset;
|
||||
try mf.moveRange(
|
||||
parent_file_offset + old_offset,
|
||||
parent_file_offset + new_offset,
|
||||
old_size,
|
||||
);
|
||||
}
|
||||
old_offset = new_offset;
|
||||
},
|
||||
true => {
|
||||
// Move the next floating node to make space for this fixed node
|
||||
const next_ni = next_ni_ptr.*;
|
||||
const next = next_ni.get(mf);
|
||||
assert(!next.flags.fixed);
|
||||
const next_offset, const next_size = next.location().resolve(mf);
|
||||
const last = parent.last.get(mf);
|
||||
const last_offset, const last_size = last.location().resolve(mf);
|
||||
const new_offset = next.flags.alignment.forward(@intCast(
|
||||
@max(old_offset + new_size, last_offset + last_size),
|
||||
));
|
||||
const new_parent_size = new_offset + next_size;
|
||||
if (new_parent_size > old_parent_size) {
|
||||
try mf.resizeNode(
|
||||
gpa,
|
||||
node.parent,
|
||||
new_parent_size +| new_parent_size / 2,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
try mf.ensureCapacityForSetLocation(gpa);
|
||||
next.prev = parent.last;
|
||||
parent.last = next_ni;
|
||||
last.next = next_ni;
|
||||
next_ni_ptr.* = next.next;
|
||||
switch (next.next) {
|
||||
.none => {},
|
||||
else => |next_next_ni| next_next_ni.get(mf).prev = ni,
|
||||
}
|
||||
next.next = .none;
|
||||
if (node.flags.has_content) {
|
||||
const parent_file_offset = node.parent.fileLocation(mf, false).offset;
|
||||
try mf.moveRange(
|
||||
parent_file_offset + next_offset,
|
||||
parent_file_offset + new_offset,
|
||||
next_size,
|
||||
);
|
||||
}
|
||||
next_ni.setLocationAssumeCapacity(mf, new_offset, next_size);
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn moveRange(mf: *MappedFile, old_file_offset: u64, new_file_offset: u64, size: u64) !void {
|
||||
// make a copy of this node at the new location
|
||||
try mf.copyRange(old_file_offset, new_file_offset, size);
|
||||
// delete the copy of this node at the old location
|
||||
if (is_linux and !mf.flags.fallocate_punch_hole_unsupported and
|
||||
size >= mf.flags.block_size.toByteUnits() * 2 - 1) while (true)
|
||||
switch (linux.E.init(linux.fallocate(
|
||||
mf.file.handle,
|
||||
linux.FALLOC.FL_PUNCH_HOLE | linux.FALLOC.FL_KEEP_SIZE,
|
||||
@intCast(old_file_offset),
|
||||
@intCast(size),
|
||||
))) {
|
||||
.SUCCESS => return,
|
||||
.INTR => continue,
|
||||
.BADF, .FBIG, .INVAL => unreachable,
|
||||
.IO => return error.InputOutput,
|
||||
.NODEV => return error.NotFile,
|
||||
.NOSPC => return error.NoSpaceLeft,
|
||||
.NOSYS, .OPNOTSUPP => {
|
||||
mf.flags.fallocate_punch_hole_unsupported = true;
|
||||
break;
|
||||
},
|
||||
.PERM => return error.PermissionDenied,
|
||||
.SPIPE => return error.Unseekable,
|
||||
.TXTBSY => return error.FileBusy,
|
||||
else => |e| return std.posix.unexpectedErrno(e),
|
||||
};
|
||||
@memset(mf.contents[@intCast(old_file_offset)..][0..@intCast(size)], 0);
|
||||
}
|
||||
|
||||
fn copyRange(mf: *MappedFile, old_file_offset: u64, new_file_offset: u64, size: u64) !void {
|
||||
const copy_size = try mf.copyFileRange(mf.file, old_file_offset, new_file_offset, size);
|
||||
if (copy_size < size) @memcpy(
|
||||
mf.contents[@intCast(new_file_offset + copy_size)..][0..@intCast(size - copy_size)],
|
||||
mf.contents[@intCast(old_file_offset + copy_size)..][0..@intCast(size - copy_size)],
|
||||
);
|
||||
}
|
||||
|
||||
fn copyFileRange(
|
||||
mf: *MappedFile,
|
||||
old_file: std.fs.File,
|
||||
old_file_offset: u64,
|
||||
new_file_offset: u64,
|
||||
size: u64,
|
||||
) !u64 {
|
||||
var remaining_size = size;
|
||||
if (is_linux and !mf.flags.copy_file_range_unsupported) {
|
||||
var old_file_offset_mut: i64 = @intCast(old_file_offset);
|
||||
var new_file_offset_mut: i64 = @intCast(new_file_offset);
|
||||
while (remaining_size >= mf.flags.block_size.toByteUnits() * 2 - 1) {
|
||||
const copy_len = linux.copy_file_range(
|
||||
old_file.handle,
|
||||
&old_file_offset_mut,
|
||||
mf.file.handle,
|
||||
&new_file_offset_mut,
|
||||
@intCast(remaining_size),
|
||||
0,
|
||||
);
|
||||
switch (linux.E.init(copy_len)) {
|
||||
.SUCCESS => {
|
||||
if (copy_len == 0) break;
|
||||
remaining_size -= copy_len;
|
||||
if (remaining_size == 0) break;
|
||||
},
|
||||
.INTR => continue,
|
||||
.BADF, .FBIG, .INVAL, .OVERFLOW => unreachable,
|
||||
.IO => return error.InputOutput,
|
||||
.ISDIR => return error.IsDir,
|
||||
.NOMEM => return error.SystemResources,
|
||||
.NOSPC => return error.NoSpaceLeft,
|
||||
.NOSYS, .OPNOTSUPP, .XDEV => {
|
||||
mf.flags.copy_file_range_unsupported = true;
|
||||
break;
|
||||
},
|
||||
.PERM => return error.PermissionDenied,
|
||||
.TXTBSY => return error.FileBusy,
|
||||
else => |e| return std.posix.unexpectedErrno(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
return size - remaining_size;
|
||||
}
|
||||
|
||||
fn ensureCapacityForSetLocation(mf: *MappedFile, gpa: std.mem.Allocator) !void {
|
||||
try mf.large.ensureUnusedCapacity(gpa, 2);
|
||||
try mf.updates.ensureUnusedCapacity(gpa, 1);
|
||||
}
|
||||
|
||||
pub fn ensureTotalCapacity(mf: *MappedFile, new_capacity: usize) !void {
|
||||
if (mf.contents.len >= new_capacity) return;
|
||||
try mf.ensureTotalCapacityPrecise(new_capacity +| new_capacity / 2);
|
||||
}
|
||||
|
||||
pub fn ensureTotalCapacityPrecise(mf: *MappedFile, new_capacity: usize) !void {
|
||||
if (mf.contents.len >= new_capacity) return;
|
||||
const aligned_capacity = mf.flags.block_size.forward(new_capacity);
|
||||
if (!is_linux) mf.unmap() else if (mf.contents.len > 0) {
|
||||
mf.contents = try std.posix.mremap(
|
||||
mf.contents.ptr,
|
||||
mf.contents.len,
|
||||
aligned_capacity,
|
||||
.{ .MAYMOVE = true },
|
||||
null,
|
||||
);
|
||||
return;
|
||||
}
|
||||
if (is_windows) {
|
||||
if (mf.section == windows.INVALID_HANDLE_VALUE) switch (windows.ntdll.NtCreateSection(
|
||||
&mf.section,
|
||||
windows.STANDARD_RIGHTS_REQUIRED | windows.SECTION_QUERY |
|
||||
windows.SECTION_MAP_WRITE | windows.SECTION_MAP_READ | windows.SECTION_EXTEND_SIZE,
|
||||
null,
|
||||
@constCast(&@as(i64, @intCast(aligned_capacity))),
|
||||
windows.PAGE_READWRITE,
|
||||
windows.SEC_COMMIT,
|
||||
mf.file.handle,
|
||||
)) {
|
||||
.SUCCESS => {},
|
||||
else => return error.MemoryMappingNotSupported,
|
||||
};
|
||||
var contents_ptr: ?[*]align(std.heap.page_size_min) u8 = null;
|
||||
var contents_len = aligned_capacity;
|
||||
switch (windows.ntdll.NtMapViewOfSection(
|
||||
mf.section,
|
||||
windows.GetCurrentProcess(),
|
||||
@ptrCast(&contents_ptr),
|
||||
null,
|
||||
0,
|
||||
null,
|
||||
&contents_len,
|
||||
.ViewUnmap,
|
||||
0,
|
||||
windows.PAGE_READWRITE,
|
||||
)) {
|
||||
.SUCCESS => mf.contents = contents_ptr.?[0..contents_len],
|
||||
else => return error.MemoryMappingNotSupported,
|
||||
}
|
||||
} else mf.contents = try std.posix.mmap(
|
||||
null,
|
||||
aligned_capacity,
|
||||
std.posix.PROT.READ | std.posix.PROT.WRITE,
|
||||
.{ .TYPE = if (is_linux) .SHARED_VALIDATE else .SHARED },
|
||||
mf.file.handle,
|
||||
0,
|
||||
);
|
||||
}
|
||||
|
||||
pub fn unmap(mf: *MappedFile) void {
|
||||
if (mf.contents.len == 0) return;
|
||||
if (is_windows)
|
||||
_ = windows.ntdll.NtUnmapViewOfSection(windows.GetCurrentProcess(), mf.contents.ptr)
|
||||
else
|
||||
std.posix.munmap(mf.contents);
|
||||
mf.contents = &.{};
|
||||
if (is_windows and mf.section != windows.INVALID_HANDLE_VALUE) {
|
||||
windows.CloseHandle(mf.section);
|
||||
mf.section = windows.INVALID_HANDLE_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
fn verify(mf: *MappedFile) void {
|
||||
const root = Node.Index.root.get(mf);
|
||||
assert(root.parent == .none);
|
||||
assert(root.prev == .none);
|
||||
assert(root.next == .none);
|
||||
mf.verifyNode(Node.Index.root);
|
||||
}
|
||||
|
||||
fn verifyNode(mf: *MappedFile, parent_ni: Node.Index) void {
|
||||
const parent = parent_ni.get(mf);
|
||||
const parent_offset, const parent_size = parent.location().resolve(mf);
|
||||
var prev_ni: Node.Index = .none;
|
||||
var prev_end: u64 = 0;
|
||||
var ni = parent.first;
|
||||
while (true) {
|
||||
if (ni == .none) {
|
||||
assert(parent.last == prev_ni);
|
||||
return;
|
||||
}
|
||||
const node = ni.get(mf);
|
||||
assert(node.parent == parent_ni);
|
||||
const offset, const size = node.location().resolve(mf);
|
||||
assert(node.flags.alignment.check(@intCast(offset)));
|
||||
assert(node.flags.alignment.check(@intCast(size)));
|
||||
const end = offset + size;
|
||||
assert(end <= parent_offset + parent_size);
|
||||
assert(offset >= prev_end);
|
||||
assert(node.prev == prev_ni);
|
||||
mf.verifyNode(ni);
|
||||
prev_ni = ni;
|
||||
prev_end = end;
|
||||
ni = node.next;
|
||||
}
|
||||
}
|
||||
|
||||
const assert = std.debug.assert;
|
||||
const builtin = @import("builtin");
|
||||
const is_linux = builtin.os.tag == .linux;
|
||||
const is_windows = builtin.os.tag == .windows;
|
||||
const linux = std.os.linux;
|
||||
const MappedFile = @This();
|
||||
const std = @import("std");
|
||||
const windows = std.os.windows;
|
||||
|
|
@ -22,17 +22,17 @@ prelink_wait_count: u32,
|
|||
|
||||
/// Prelink tasks which have been enqueued and are not yet owned by the worker thread.
|
||||
/// Allocated into `gpa`, guarded by `mutex`.
|
||||
queued_prelink: std.ArrayListUnmanaged(PrelinkTask),
|
||||
queued_prelink: std.ArrayList(PrelinkTask),
|
||||
/// The worker thread moves items from `queued_prelink` into this array in order to process them.
|
||||
/// Allocated into `gpa`, accessed only by the worker thread.
|
||||
wip_prelink: std.ArrayListUnmanaged(PrelinkTask),
|
||||
wip_prelink: std.ArrayList(PrelinkTask),
|
||||
|
||||
/// Like `queued_prelink`, but for ZCU tasks.
|
||||
/// Allocated into `gpa`, guarded by `mutex`.
|
||||
queued_zcu: std.ArrayListUnmanaged(ZcuTask),
|
||||
queued_zcu: std.ArrayList(ZcuTask),
|
||||
/// Like `wip_prelink`, but for ZCU tasks.
|
||||
/// Allocated into `gpa`, accessed only by the worker thread.
|
||||
wip_zcu: std.ArrayListUnmanaged(ZcuTask),
|
||||
wip_zcu: std.ArrayList(ZcuTask),
|
||||
|
||||
/// When processing ZCU link tasks, we might have to block due to unpopulated MIR. When this
|
||||
/// happens, some tasks in `wip_zcu` have been run, and some are still pending. This is the
|
||||
|
|
@ -213,32 +213,41 @@ pub fn enqueueZcu(q: *Queue, comp: *Compilation, task: ZcuTask) Allocator.Error!
|
|||
|
||||
fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void {
|
||||
q.flush_safety.lock(); // every `return` site should unlock this before unlocking `q.mutex`
|
||||
|
||||
if (std.debug.runtime_safety) {
|
||||
q.mutex.lock();
|
||||
defer q.mutex.unlock();
|
||||
assert(q.state == .running);
|
||||
}
|
||||
|
||||
var have_idle_tasks = true;
|
||||
prelink: while (true) {
|
||||
assert(q.wip_prelink.items.len == 0);
|
||||
{
|
||||
q.mutex.lock();
|
||||
defer q.mutex.unlock();
|
||||
std.mem.swap(std.ArrayListUnmanaged(PrelinkTask), &q.queued_prelink, &q.wip_prelink);
|
||||
if (q.wip_prelink.items.len == 0) {
|
||||
if (q.prelink_wait_count == 0) {
|
||||
break :prelink; // prelink is done
|
||||
} else {
|
||||
swap_queues: while (true) {
|
||||
{
|
||||
q.mutex.lock();
|
||||
defer q.mutex.unlock();
|
||||
std.mem.swap(std.ArrayList(PrelinkTask), &q.queued_prelink, &q.wip_prelink);
|
||||
if (q.wip_prelink.items.len > 0) break :swap_queues;
|
||||
if (q.prelink_wait_count == 0) break :prelink; // prelink is done
|
||||
if (!have_idle_tasks) {
|
||||
// We're expecting more prelink tasks so can't move on to ZCU tasks.
|
||||
q.state = .finished;
|
||||
q.flush_safety.unlock();
|
||||
return;
|
||||
}
|
||||
}
|
||||
have_idle_tasks = link.doIdleTask(comp, tid) catch |err| switch (err) {
|
||||
error.OutOfMemory => have_idle_tasks: {
|
||||
comp.link_diags.setAllocFailure();
|
||||
break :have_idle_tasks false;
|
||||
},
|
||||
error.LinkFailure => false,
|
||||
};
|
||||
}
|
||||
for (q.wip_prelink.items) |task| {
|
||||
link.doPrelinkTask(comp, task);
|
||||
}
|
||||
have_idle_tasks = true;
|
||||
q.wip_prelink.clearRetainingCapacity();
|
||||
}
|
||||
|
||||
|
|
@ -256,17 +265,29 @@ fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void {
|
|||
|
||||
// Now we can run ZCU tasks.
|
||||
while (true) {
|
||||
if (q.wip_zcu.items.len == q.wip_zcu_idx) {
|
||||
if (q.wip_zcu.items.len == q.wip_zcu_idx) swap_queues: {
|
||||
q.wip_zcu.clearRetainingCapacity();
|
||||
q.wip_zcu_idx = 0;
|
||||
q.mutex.lock();
|
||||
defer q.mutex.unlock();
|
||||
std.mem.swap(std.ArrayListUnmanaged(ZcuTask), &q.queued_zcu, &q.wip_zcu);
|
||||
if (q.wip_zcu.items.len == 0) {
|
||||
// We've exhausted all available tasks.
|
||||
q.state = .finished;
|
||||
q.flush_safety.unlock();
|
||||
return;
|
||||
while (true) {
|
||||
{
|
||||
q.mutex.lock();
|
||||
defer q.mutex.unlock();
|
||||
std.mem.swap(std.ArrayList(ZcuTask), &q.queued_zcu, &q.wip_zcu);
|
||||
if (q.wip_zcu.items.len > 0) break :swap_queues;
|
||||
if (!have_idle_tasks) {
|
||||
// We've exhausted all available tasks.
|
||||
q.state = .finished;
|
||||
q.flush_safety.unlock();
|
||||
return;
|
||||
}
|
||||
}
|
||||
have_idle_tasks = link.doIdleTask(comp, tid) catch |err| switch (err) {
|
||||
error.OutOfMemory => have_idle_tasks: {
|
||||
comp.link_diags.setAllocFailure();
|
||||
break :have_idle_tasks false;
|
||||
},
|
||||
error.LinkFailure => false,
|
||||
};
|
||||
}
|
||||
}
|
||||
const task = q.wip_zcu.items[q.wip_zcu_idx];
|
||||
|
|
@ -274,8 +295,18 @@ fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void {
|
|||
pending: {
|
||||
if (task != .link_func) break :pending;
|
||||
const status_ptr = &task.link_func.mir.status;
|
||||
// First check without the mutex to optimize for the common case where MIR is ready.
|
||||
if (status_ptr.load(.acquire) != .pending) break :pending;
|
||||
while (true) {
|
||||
// First check without the mutex to optimize for the common case where MIR is ready.
|
||||
if (status_ptr.load(.acquire) != .pending) break :pending;
|
||||
if (have_idle_tasks) have_idle_tasks = link.doIdleTask(comp, tid) catch |err| switch (err) {
|
||||
error.OutOfMemory => have_idle_tasks: {
|
||||
comp.link_diags.setAllocFailure();
|
||||
break :have_idle_tasks false;
|
||||
},
|
||||
error.LinkFailure => false,
|
||||
};
|
||||
if (!have_idle_tasks) break;
|
||||
}
|
||||
q.mutex.lock();
|
||||
defer q.mutex.unlock();
|
||||
if (status_ptr.load(.acquire) != .pending) break :pending;
|
||||
|
|
@ -298,6 +329,7 @@ fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void {
|
|||
}
|
||||
}
|
||||
q.wip_zcu_idx += 1;
|
||||
have_idle_tasks = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue