mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
Merge branch 'master' into docgen_drop_cwd
This commit is contained in:
commit
9095997ce2
1321 changed files with 60313 additions and 61205 deletions
3
.github/ISSUE_TEMPLATE/config.yml
vendored
3
.github/ISSUE_TEMPLATE/config.yml
vendored
|
|
@ -5,6 +5,9 @@ contact_links:
|
|||
- name: Question
|
||||
about: Please use one of the community spaces for questions or general discussions.
|
||||
url: https://github.com/ziglang/zig/wiki/Community
|
||||
- name: C Translation
|
||||
about: "Issues related to `zig translate-c` and `@cImport` are tracked separately."
|
||||
url: https://github.com/ziglang/translate-c/
|
||||
- name: Copilot and Other LLMs
|
||||
about: Please do not use GitHub Copilot or any other LLM to write an issue.
|
||||
url: https://github.com/ziglang/zig/wiki/Writing-Issues-with-Copilot-and-Other-LLMs
|
||||
|
|
|
|||
35
.github/workflows/ci-pr-riscv64-linux.yaml
vendored
35
.github/workflows/ci-pr-riscv64-linux.yaml
vendored
|
|
@ -1,35 +0,0 @@
|
|||
name: ci-pr-riscv64-linux
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- labeled
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- unlabeled
|
||||
concurrency:
|
||||
# Cancels pending runs when a PR gets updated.
|
||||
group: riscv64-linux-${{ github.head_ref || github.run_id }}-${{ github.actor }}
|
||||
cancel-in-progress: true
|
||||
permissions:
|
||||
# Sets permission policy for `GITHUB_TOKEN`
|
||||
contents: read
|
||||
jobs:
|
||||
riscv64-linux-debug:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'ci-riscv64-linux')
|
||||
timeout-minutes: 420
|
||||
runs-on: [self-hosted, Linux, riscv64]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Build and Test
|
||||
run: sh ci/riscv64-linux-debug.sh
|
||||
riscv64-linux-release:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'ci-riscv64-linux')
|
||||
timeout-minutes: 420
|
||||
runs-on: [self-hosted, Linux, riscv64]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Build and Test
|
||||
run: sh ci/riscv64-linux-release.sh
|
||||
27
.github/workflows/ci.yaml
vendored
27
.github/workflows/ci.yaml
vendored
|
|
@ -50,33 +50,6 @@ jobs:
|
|||
uses: actions/checkout@v4
|
||||
- name: Build and Test
|
||||
run: sh ci/aarch64-linux-release.sh
|
||||
riscv64-linux-debug:
|
||||
if: github.event_name == 'push'
|
||||
timeout-minutes: 420
|
||||
runs-on: [self-hosted, Linux, riscv64]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Build and Test
|
||||
run: sh ci/riscv64-linux-debug.sh
|
||||
riscv64-linux-release:
|
||||
if: github.event_name == 'push'
|
||||
timeout-minutes: 420
|
||||
runs-on: [self-hosted, Linux, riscv64]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Build and Test
|
||||
run: sh ci/riscv64-linux-release.sh
|
||||
x86_64-macos-release:
|
||||
runs-on: "macos-13"
|
||||
env:
|
||||
ARCH: "x86_64"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Build and Test
|
||||
run: ci/x86_64-macos-release.sh
|
||||
aarch64-macos-debug:
|
||||
runs-on: [self-hosted, macOS, aarch64]
|
||||
env:
|
||||
|
|
|
|||
|
|
@ -197,7 +197,6 @@ set(ZIG_CPP_SOURCES
|
|||
# These are planned to stay even when we are self-hosted.
|
||||
src/zig_llvm.cpp
|
||||
src/zig_llvm-ar.cpp
|
||||
src/zig_clang.cpp
|
||||
src/zig_clang_driver.cpp
|
||||
src/zig_clang_cc1_main.cpp
|
||||
src/zig_clang_cc1as_main.cpp
|
||||
|
|
@ -502,7 +501,6 @@ set(ZIG_STAGE2_SOURCES
|
|||
lib/std/zig/Server.zig
|
||||
lib/std/zig/WindowsSdk.zig
|
||||
lib/std/zig/Zir.zig
|
||||
lib/std/zig/c_builtins.zig
|
||||
lib/std/zig/string_literal.zig
|
||||
lib/std/zig/system.zig
|
||||
lib/std/zig/system/NativePaths.zig
|
||||
|
|
@ -537,7 +535,6 @@ set(ZIG_STAGE2_SOURCES
|
|||
src/Value.zig
|
||||
src/Zcu.zig
|
||||
src/Zcu/PerThread.zig
|
||||
src/clang.zig
|
||||
src/clang_options.zig
|
||||
src/clang_options_data.zig
|
||||
src/codegen.zig
|
||||
|
|
@ -583,6 +580,7 @@ set(ZIG_STAGE2_SOURCES
|
|||
src/link/Elf/relocatable.zig
|
||||
src/link/Elf/relocation.zig
|
||||
src/link/Elf/synthetic_sections.zig
|
||||
src/link/Elf2.zig
|
||||
src/link/Goff.zig
|
||||
src/link/LdScript.zig
|
||||
src/link/Lld.zig
|
||||
|
|
@ -612,6 +610,7 @@ set(ZIG_STAGE2_SOURCES
|
|||
src/link/MachO/synthetic.zig
|
||||
src/link/MachO/Thunk.zig
|
||||
src/link/MachO/uuid.zig
|
||||
src/link/MappedFile.zig
|
||||
src/link/Queue.zig
|
||||
src/link/StringTable.zig
|
||||
src/link/Wasm.zig
|
||||
|
|
@ -639,7 +638,6 @@ set(ZIG_STAGE2_SOURCES
|
|||
src/register_manager.zig
|
||||
src/target.zig
|
||||
src/tracy.zig
|
||||
src/translate_c.zig
|
||||
src/libs/wasi_libc.zig
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -64,6 +64,8 @@ static const char *get_host_os(void) {
|
|||
return "linux";
|
||||
#elif defined(__FreeBSD__)
|
||||
return "freebsd";
|
||||
#elif defined(__DragonFly__)
|
||||
return "dragonfly";
|
||||
#elif defined(__HAIKU__)
|
||||
return "haiku";
|
||||
#else
|
||||
|
|
|
|||
43
build.zig
43
build.zig
|
|
@ -90,8 +90,6 @@ pub fn build(b: *std.Build) !void {
|
|||
const skip_libc = b.option(bool, "skip-libc", "Main test suite skips tests that link libc") orelse false;
|
||||
const skip_single_threaded = b.option(bool, "skip-single-threaded", "Main test suite skips tests that are single-threaded") orelse false;
|
||||
const skip_compile_errors = b.option(bool, "skip-compile-errors", "Main test suite skips compile error tests") orelse false;
|
||||
const skip_translate_c = b.option(bool, "skip-translate-c", "Main test suite skips translate-c tests") orelse false;
|
||||
const skip_run_translated_c = b.option(bool, "skip-run-translated-c", "Main test suite skips run-translated-c tests") orelse false;
|
||||
const skip_freebsd = b.option(bool, "skip-freebsd", "Main test suite skips targets with freebsd OS") orelse false;
|
||||
const skip_netbsd = b.option(bool, "skip-netbsd", "Main test suite skips targets with netbsd OS") orelse false;
|
||||
const skip_windows = b.option(bool, "skip-windows", "Main test suite skips targets with windows OS") orelse false;
|
||||
|
|
@ -202,6 +200,7 @@ pub fn build(b: *std.Build) !void {
|
|||
});
|
||||
exe.pie = pie;
|
||||
exe.entitlements = entitlements;
|
||||
exe.use_new_linker = b.option(bool, "new-linker", "Use the new linker");
|
||||
|
||||
const use_llvm = b.option(bool, "use-llvm", "Use the llvm backend");
|
||||
exe.use_llvm = use_llvm;
|
||||
|
|
@ -415,7 +414,7 @@ pub fn build(b: *std.Build) !void {
|
|||
test_step.dependOn(check_fmt);
|
||||
|
||||
const test_cases_step = b.step("test-cases", "Run the main compiler test cases");
|
||||
try tests.addCases(b, test_cases_step, target, .{
|
||||
try tests.addCases(b, test_cases_step, .{
|
||||
.test_filters = test_filters,
|
||||
.test_target_filters = test_target_filters,
|
||||
.skip_compile_errors = skip_compile_errors,
|
||||
|
|
@ -427,9 +426,6 @@ pub fn build(b: *std.Build) !void {
|
|||
.skip_linux = skip_linux,
|
||||
.skip_llvm = skip_llvm,
|
||||
.skip_libc = skip_libc,
|
||||
}, .{
|
||||
.skip_translate_c = skip_translate_c,
|
||||
.skip_run_translated_c = skip_run_translated_c,
|
||||
}, .{
|
||||
.enable_llvm = enable_llvm,
|
||||
.llvm_has_m68k = llvm_has_m68k,
|
||||
|
|
@ -464,26 +460,6 @@ pub fn build(b: *std.Build) !void {
|
|||
.max_rss = 4000000000,
|
||||
}));
|
||||
|
||||
test_modules_step.dependOn(tests.addModuleTests(b, .{
|
||||
.test_filters = test_filters,
|
||||
.test_target_filters = test_target_filters,
|
||||
.test_extra_targets = test_extra_targets,
|
||||
.root_src = "test/c_import.zig",
|
||||
.name = "c-import",
|
||||
.desc = "Run the @cImport tests",
|
||||
.optimize_modes = optimization_modes,
|
||||
.include_paths = &.{"test/c_import"},
|
||||
.skip_single_threaded = true,
|
||||
.skip_non_native = skip_non_native,
|
||||
.skip_freebsd = skip_freebsd,
|
||||
.skip_netbsd = skip_netbsd,
|
||||
.skip_windows = skip_windows,
|
||||
.skip_macos = skip_macos,
|
||||
.skip_linux = skip_linux,
|
||||
.skip_llvm = skip_llvm,
|
||||
.skip_libc = skip_libc,
|
||||
}));
|
||||
|
||||
test_modules_step.dependOn(tests.addModuleTests(b, .{
|
||||
.test_filters = test_filters,
|
||||
.test_target_filters = test_target_filters,
|
||||
|
|
@ -568,7 +544,6 @@ pub fn build(b: *std.Build) !void {
|
|||
unit_tests.root_module.addOptions("build_options", exe_options);
|
||||
unit_tests_step.dependOn(&b.addRunArtifact(unit_tests).step);
|
||||
|
||||
test_step.dependOn(tests.addCompareOutputTests(b, test_filters, optimization_modes));
|
||||
test_step.dependOn(tests.addStandaloneTests(
|
||||
b,
|
||||
optimization_modes,
|
||||
|
|
@ -590,7 +565,6 @@ pub fn build(b: *std.Build) !void {
|
|||
test_step.dependOn(tests.addLinkTests(b, enable_macos_sdk, enable_ios_sdk, enable_symlinks_windows));
|
||||
test_step.dependOn(tests.addStackTraceTests(b, test_filters, optimization_modes));
|
||||
test_step.dependOn(tests.addCliTests(b));
|
||||
test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filters, optimization_modes));
|
||||
if (tests.addDebuggerTests(b, .{
|
||||
.test_filters = test_filters,
|
||||
.test_target_filters = test_target_filters,
|
||||
|
|
@ -630,6 +604,12 @@ pub fn build(b: *std.Build) !void {
|
|||
const test_incremental_step = b.step("test-incremental", "Run the incremental compilation test cases");
|
||||
try tests.addIncrementalTests(b, test_incremental_step);
|
||||
test_step.dependOn(test_incremental_step);
|
||||
|
||||
if (tests.addLibcTests(b, .{
|
||||
.optimize_modes = optimization_modes,
|
||||
.test_filters = test_filters,
|
||||
.test_target_filters = test_target_filters,
|
||||
})) |test_libc_step| test_step.dependOn(test_libc_step);
|
||||
}
|
||||
|
||||
fn addWasiUpdateStep(b: *std.Build, version: [:0]const u8) !void {
|
||||
|
|
@ -724,13 +704,7 @@ fn addCompilerMod(b: *std.Build, options: AddCompilerModOptions) *std.Build.Modu
|
|||
.root_source_file = b.path("lib/compiler/aro/aro.zig"),
|
||||
});
|
||||
|
||||
const aro_translate_c_mod = b.createModule(.{
|
||||
.root_source_file = b.path("lib/compiler/aro_translate_c.zig"),
|
||||
});
|
||||
|
||||
aro_translate_c_mod.addImport("aro", aro_mod);
|
||||
compiler_mod.addImport("aro", aro_mod);
|
||||
compiler_mod.addImport("aro_translate_c", aro_translate_c_mod);
|
||||
|
||||
return compiler_mod;
|
||||
}
|
||||
|
|
@ -1150,7 +1124,6 @@ fn toNativePathSep(b: *std.Build, s: []const u8) []u8 {
|
|||
const zig_cpp_sources = [_][]const u8{
|
||||
// These are planned to stay even when we are self-hosted.
|
||||
"src/zig_llvm.cpp",
|
||||
"src/zig_clang.cpp",
|
||||
"src/zig_llvm-ar.cpp",
|
||||
"src/zig_clang_driver.cpp",
|
||||
"src/zig_clang_cc1_main.cpp",
|
||||
|
|
|
|||
69
ci/loongarch64-linux-debug.sh
Executable file
69
ci/loongarch64-linux-debug.sh
Executable file
|
|
@ -0,0 +1,69 @@
|
|||
#!/bin/sh
|
||||
|
||||
# Requires cmake ninja-build
|
||||
|
||||
set -x
|
||||
set -e
|
||||
|
||||
ARCH="$(uname -m)"
|
||||
TARGET="$ARCH-linux-musl"
|
||||
MCPU="baseline"
|
||||
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.16.0-dev.157+7fdd60df1"
|
||||
PREFIX="$HOME/deps/$CACHE_BASENAME"
|
||||
ZIG="$PREFIX/bin/zig"
|
||||
|
||||
# Make the `zig version` number consistent.
|
||||
# This will affect the cmake command below.
|
||||
git fetch --unshallow || true
|
||||
git fetch --tags
|
||||
|
||||
# Override the cache directories because they won't actually help other CI runs
|
||||
# which will be testing alternate versions of zig, and ultimately would just
|
||||
# fill up space on the hard drive for no reason.
|
||||
export ZIG_GLOBAL_CACHE_DIR="$PWD/zig-global-cache"
|
||||
export ZIG_LOCAL_CACHE_DIR="$PWD/zig-local-cache"
|
||||
|
||||
mkdir build-debug
|
||||
cd build-debug
|
||||
|
||||
export CC="$ZIG cc -target $TARGET -mcpu=$MCPU"
|
||||
export CXX="$ZIG c++ -target $TARGET -mcpu=$MCPU"
|
||||
|
||||
cmake .. \
|
||||
-DCMAKE_INSTALL_PREFIX="stage3-debug" \
|
||||
-DCMAKE_PREFIX_PATH="$PREFIX" \
|
||||
-DCMAKE_BUILD_TYPE=Debug \
|
||||
-DZIG_TARGET_TRIPLE="$TARGET" \
|
||||
-DZIG_TARGET_MCPU="$MCPU" \
|
||||
-DZIG_STATIC=ON \
|
||||
-DZIG_NO_LIB=ON \
|
||||
-GNinja \
|
||||
-DCMAKE_C_LINKER_DEPFILE_SUPPORTED=FALSE \
|
||||
-DCMAKE_CXX_LINKER_DEPFILE_SUPPORTED=FALSE
|
||||
# https://github.com/ziglang/zig/issues/22213
|
||||
|
||||
# Now cmake will use zig as the C/C++ compiler. We reset the environment variables
|
||||
# so that installation and testing do not get affected by them.
|
||||
unset CC
|
||||
unset CXX
|
||||
|
||||
ninja install
|
||||
|
||||
# No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts.
|
||||
stage3-debug/bin/zig build test \
|
||||
--maxrss 60129542144 \
|
||||
-Dstatic-llvm \
|
||||
-Dskip-non-native \
|
||||
-Dtarget=native-native-musl \
|
||||
--search-prefix "$PREFIX" \
|
||||
--zig-lib-dir "$PWD/../lib"
|
||||
|
||||
stage3-debug/bin/zig build \
|
||||
--prefix stage4-debug \
|
||||
-Denable-llvm \
|
||||
-Dno-lib \
|
||||
-Dtarget=$TARGET \
|
||||
-Duse-zig-libcxx \
|
||||
-Dversion-string="$(stage3-debug/bin/zig version)"
|
||||
|
||||
stage4-debug/bin/zig test ../test/behavior.zig
|
||||
75
ci/loongarch64-linux-release.sh
Executable file
75
ci/loongarch64-linux-release.sh
Executable file
|
|
@ -0,0 +1,75 @@
|
|||
#!/bin/sh
|
||||
|
||||
# Requires cmake ninja-build
|
||||
|
||||
set -x
|
||||
set -e
|
||||
|
||||
ARCH="$(uname -m)"
|
||||
TARGET="$ARCH-linux-musl"
|
||||
MCPU="baseline"
|
||||
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.16.0-dev.157+7fdd60df1"
|
||||
PREFIX="$HOME/deps/$CACHE_BASENAME"
|
||||
ZIG="$PREFIX/bin/zig"
|
||||
|
||||
# Make the `zig version` number consistent.
|
||||
# This will affect the cmake command below.
|
||||
git fetch --unshallow || true
|
||||
git fetch --tags
|
||||
|
||||
# Override the cache directories because they won't actually help other CI runs
|
||||
# which will be testing alternate versions of zig, and ultimately would just
|
||||
# fill up space on the hard drive for no reason.
|
||||
export ZIG_GLOBAL_CACHE_DIR="$PWD/zig-global-cache"
|
||||
export ZIG_LOCAL_CACHE_DIR="$PWD/zig-local-cache"
|
||||
|
||||
mkdir build-release
|
||||
cd build-release
|
||||
|
||||
export CC="$ZIG cc -target $TARGET -mcpu=$MCPU"
|
||||
export CXX="$ZIG c++ -target $TARGET -mcpu=$MCPU"
|
||||
|
||||
cmake .. \
|
||||
-DCMAKE_INSTALL_PREFIX="stage3-release" \
|
||||
-DCMAKE_PREFIX_PATH="$PREFIX" \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DZIG_TARGET_TRIPLE="$TARGET" \
|
||||
-DZIG_TARGET_MCPU="$MCPU" \
|
||||
-DZIG_STATIC=ON \
|
||||
-DZIG_NO_LIB=ON \
|
||||
-GNinja \
|
||||
-DCMAKE_C_LINKER_DEPFILE_SUPPORTED=FALSE \
|
||||
-DCMAKE_CXX_LINKER_DEPFILE_SUPPORTED=FALSE
|
||||
# https://github.com/ziglang/zig/issues/22213
|
||||
|
||||
# Now cmake will use zig as the C/C++ compiler. We reset the environment variables
|
||||
# so that installation and testing do not get affected by them.
|
||||
unset CC
|
||||
unset CXX
|
||||
|
||||
ninja install
|
||||
|
||||
# No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts.
|
||||
stage3-release/bin/zig build test \
|
||||
--maxrss 60129542144 \
|
||||
-Dstatic-llvm \
|
||||
-Dskip-non-native \
|
||||
-Dtarget=native-native-musl \
|
||||
--search-prefix "$PREFIX" \
|
||||
--zig-lib-dir "$PWD/../lib"
|
||||
|
||||
# Ensure that stage3 and stage4 are byte-for-byte identical.
|
||||
stage3-release/bin/zig build \
|
||||
--prefix stage4-release \
|
||||
-Denable-llvm \
|
||||
-Dno-lib \
|
||||
-Doptimize=ReleaseFast \
|
||||
-Dstrip \
|
||||
-Dtarget=$TARGET \
|
||||
-Duse-zig-libcxx \
|
||||
-Dversion-string="$(stage3-release/bin/zig version)"
|
||||
|
||||
# diff returns an error code if the files differ.
|
||||
echo "If the following command fails, it means nondeterminism has been"
|
||||
echo "introduced, making stage3 and stage4 no longer byte-for-byte identical."
|
||||
diff stage3-release/bin/zig stage4-release/bin/zig
|
||||
|
|
@ -49,14 +49,12 @@ unset CXX
|
|||
ninja install
|
||||
|
||||
# No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts.
|
||||
stage3-debug/bin/zig build test-cases test-modules test-unit test-c-abi test-stack-traces test-asm-link test-llvm-ir \
|
||||
stage3-debug/bin/zig build test-cases test-modules test-unit test-c-abi test-stack-traces test-llvm-ir \
|
||||
--maxrss 68719476736 \
|
||||
-Dstatic-llvm \
|
||||
-Dskip-non-native \
|
||||
-Dskip-single-threaded \
|
||||
-Dskip-compile-errors \
|
||||
-Dskip-translate-c \
|
||||
-Dskip-run-translated-c \
|
||||
-Dtarget=native-native-musl \
|
||||
--search-prefix "$PREFIX" \
|
||||
--zig-lib-dir "$PWD/../lib"
|
||||
|
|
|
|||
|
|
@ -49,14 +49,12 @@ unset CXX
|
|||
ninja install
|
||||
|
||||
# No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts.
|
||||
stage3-release/bin/zig build test-cases test-modules test-unit test-c-abi test-stack-traces test-asm-link test-llvm-ir \
|
||||
stage3-release/bin/zig build test-cases test-modules test-unit test-c-abi test-stack-traces test-llvm-ir \
|
||||
--maxrss 68719476736 \
|
||||
-Dstatic-llvm \
|
||||
-Dskip-non-native \
|
||||
-Dskip-single-threaded \
|
||||
-Dskip-compile-errors \
|
||||
-Dskip-translate-c \
|
||||
-Dskip-run-translated-c \
|
||||
-Dtarget=native-native-musl \
|
||||
--search-prefix "$PREFIX" \
|
||||
--zig-lib-dir "$PWD/../lib"
|
||||
|
|
|
|||
70
ci/x86_64-freebsd-debug.sh
Executable file
70
ci/x86_64-freebsd-debug.sh
Executable file
|
|
@ -0,0 +1,70 @@
|
|||
#!/bin/sh
|
||||
|
||||
# Requires cmake ninja-build
|
||||
|
||||
set -x
|
||||
set -e
|
||||
|
||||
ARCH="x86_64"
|
||||
TARGET="$ARCH-freebsd-none"
|
||||
MCPU="baseline"
|
||||
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.16.0-dev.312+164c598cd"
|
||||
PREFIX="$HOME/deps/$CACHE_BASENAME"
|
||||
ZIG="$PREFIX/bin/zig"
|
||||
|
||||
# Make the `zig version` number consistent.
|
||||
# This will affect the cmake command below.
|
||||
git fetch --unshallow || true
|
||||
git fetch --tags
|
||||
|
||||
# Override the cache directories because they won't actually help other CI runs
|
||||
# which will be testing alternate versions of zig, and ultimately would just
|
||||
# fill up space on the hard drive for no reason.
|
||||
export ZIG_GLOBAL_CACHE_DIR="$PWD/zig-global-cache"
|
||||
export ZIG_LOCAL_CACHE_DIR="$PWD/zig-local-cache"
|
||||
|
||||
mkdir build-debug
|
||||
cd build-debug
|
||||
|
||||
export CC="$ZIG cc -target $TARGET -mcpu=$MCPU"
|
||||
export CXX="$ZIG c++ -target $TARGET -mcpu=$MCPU"
|
||||
|
||||
cmake .. \
|
||||
-DCMAKE_INSTALL_PREFIX="stage3-debug" \
|
||||
-DCMAKE_PREFIX_PATH="$PREFIX" \
|
||||
-DCMAKE_BUILD_TYPE=Debug \
|
||||
-DZIG_TARGET_TRIPLE="$TARGET" \
|
||||
-DZIG_TARGET_MCPU="$MCPU" \
|
||||
-DZIG_STATIC=ON \
|
||||
-DZIG_NO_LIB=ON \
|
||||
-GNinja \
|
||||
-DCMAKE_C_LINKER_DEPFILE_SUPPORTED=FALSE \
|
||||
-DCMAKE_CXX_LINKER_DEPFILE_SUPPORTED=FALSE
|
||||
# https://github.com/ziglang/zig/issues/22213
|
||||
|
||||
# Now cmake will use zig as the C/C++ compiler. We reset the environment variables
|
||||
# so that installation and testing do not get affected by them.
|
||||
unset CC
|
||||
unset CXX
|
||||
|
||||
ninja install
|
||||
|
||||
stage3-debug/bin/zig build test docs \
|
||||
--maxrss 32212254720 \
|
||||
-Dstatic-llvm \
|
||||
-Dskip-linux \
|
||||
-Dskip-netbsd \
|
||||
-Dskip-windows \
|
||||
-Dskip-macos \
|
||||
--search-prefix "$PREFIX" \
|
||||
--zig-lib-dir "$PWD/../lib"
|
||||
|
||||
stage3-debug/bin/zig build \
|
||||
--prefix stage4-debug \
|
||||
-Denable-llvm \
|
||||
-Dno-lib \
|
||||
-Dtarget=$TARGET \
|
||||
-Duse-zig-libcxx \
|
||||
-Dversion-string="$(stage3-debug/bin/zig version)"
|
||||
|
||||
stage4-debug/bin/zig test ../test/behavior.zig
|
||||
|
|
@ -1,24 +1,17 @@
|
|||
#!/bin/sh
|
||||
|
||||
# Requires cmake ninja-build
|
||||
|
||||
set -x
|
||||
set -e
|
||||
|
||||
ZIGDIR="$PWD"
|
||||
TARGET="$ARCH-macos-none"
|
||||
ARCH="x86_64"
|
||||
TARGET="$ARCH-freebsd-none"
|
||||
MCPU="baseline"
|
||||
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.16.0-dev.104+689461e31"
|
||||
PREFIX="$HOME/$CACHE_BASENAME"
|
||||
JOBS="-j3"
|
||||
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.16.0-dev.312+164c598cd"
|
||||
PREFIX="$HOME/deps/$CACHE_BASENAME"
|
||||
ZIG="$PREFIX/bin/zig"
|
||||
|
||||
if [ ! -d "$PREFIX" ]; then
|
||||
cd $HOME
|
||||
curl -L -O "https://ziglang.org/deps/$CACHE_BASENAME.tar.xz"
|
||||
tar xf "$CACHE_BASENAME.tar.xz"
|
||||
fi
|
||||
|
||||
cd $ZIGDIR
|
||||
|
||||
# Make the `zig version` number consistent.
|
||||
# This will affect the cmake command below.
|
||||
git fetch --unshallow || true
|
||||
|
|
@ -30,46 +23,54 @@ git fetch --tags
|
|||
export ZIG_GLOBAL_CACHE_DIR="$PWD/zig-global-cache"
|
||||
export ZIG_LOCAL_CACHE_DIR="$PWD/zig-local-cache"
|
||||
|
||||
# Test building from source without LLVM.
|
||||
cc -o bootstrap bootstrap.c
|
||||
./bootstrap
|
||||
./zig2 build -Dno-lib
|
||||
./zig-out/bin/zig test test/behavior.zig
|
||||
mkdir build-release
|
||||
cd build-release
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
export CC="$ZIG cc -target $TARGET -mcpu=$MCPU"
|
||||
export CXX="$ZIG c++ -target $TARGET -mcpu=$MCPU"
|
||||
|
||||
cmake .. \
|
||||
-DCMAKE_INSTALL_PREFIX="stage3-release" \
|
||||
-DCMAKE_PREFIX_PATH="$PREFIX" \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_C_COMPILER="$ZIG;cc;-target;$TARGET;-mcpu=$MCPU" \
|
||||
-DCMAKE_CXX_COMPILER="$ZIG;c++;-target;$TARGET;-mcpu=$MCPU" \
|
||||
-DZIG_TARGET_TRIPLE="$TARGET" \
|
||||
-DZIG_TARGET_MCPU="$MCPU" \
|
||||
-DZIG_STATIC=ON \
|
||||
-DZIG_NO_LIB=ON
|
||||
-DZIG_NO_LIB=ON \
|
||||
-GNinja \
|
||||
-DCMAKE_C_LINKER_DEPFILE_SUPPORTED=FALSE \
|
||||
-DCMAKE_CXX_LINKER_DEPFILE_SUPPORTED=FALSE
|
||||
# https://github.com/ziglang/zig/issues/22213
|
||||
|
||||
make $JOBS install
|
||||
# Now cmake will use zig as the C/C++ compiler. We reset the environment variables
|
||||
# so that installation and testing do not get affected by them.
|
||||
unset CC
|
||||
unset CXX
|
||||
|
||||
stage3/bin/zig build test docs \
|
||||
--zig-lib-dir "$PWD/../lib" \
|
||||
-Denable-macos-sdk \
|
||||
ninja install
|
||||
|
||||
stage3-release/bin/zig build test docs \
|
||||
--maxrss 32212254720 \
|
||||
-Dstatic-llvm \
|
||||
-Dskip-non-native \
|
||||
--search-prefix "$PREFIX"
|
||||
-Dskip-linux \
|
||||
-Dskip-netbsd \
|
||||
-Dskip-windows \
|
||||
-Dskip-macos \
|
||||
--search-prefix "$PREFIX" \
|
||||
--zig-lib-dir "$PWD/../lib"
|
||||
|
||||
# Ensure that stage3 and stage4 are byte-for-byte identical.
|
||||
stage3/bin/zig build \
|
||||
--prefix stage4 \
|
||||
stage3-release/bin/zig build \
|
||||
--prefix stage4-release \
|
||||
-Denable-llvm \
|
||||
-Dno-lib \
|
||||
-Doptimize=ReleaseFast \
|
||||
-Dstrip \
|
||||
-Dtarget=$TARGET \
|
||||
-Duse-zig-libcxx \
|
||||
-Dversion-string="$(stage3/bin/zig version)"
|
||||
-Dversion-string="$(stage3-release/bin/zig version)"
|
||||
|
||||
# diff returns an error code if the files differ.
|
||||
echo "If the following command fails, it means nondeterminism has been"
|
||||
echo "introduced, making stage3 and stage4 no longer byte-for-byte identical."
|
||||
diff stage3/bin/zig stage4/bin/zig
|
||||
diff stage3-release/bin/zig stage4-release/bin/zig
|
||||
0
ci/x86_64-linux-debug-llvm.sh
Normal file → Executable file
0
ci/x86_64-linux-debug-llvm.sh
Normal file → Executable file
|
|
@ -65,6 +65,7 @@ stage3-release/bin/zig build test docs \
|
|||
-fqemu \
|
||||
-fwasmtime \
|
||||
-Dstatic-llvm \
|
||||
-Dskip-freebsd \
|
||||
-Dtarget=native-native-musl \
|
||||
--search-prefix "$PREFIX" \
|
||||
--zig-lib-dir "$PWD/../lib" \
|
||||
|
|
|
|||
|
|
@ -4897,8 +4897,8 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
|
|||
{#header_open|@fieldParentPtr#}
|
||||
<pre>{#syntax#}@fieldParentPtr(comptime field_name: []const u8, field_ptr: *T) anytype{#endsyntax#}</pre>
|
||||
<p>
|
||||
Given a pointer to a struct field, returns a pointer to the struct containing that field.
|
||||
The return type (and struct in question) is the inferred result type.
|
||||
Given a pointer to a struct or union field, returns a pointer to the struct or union containing that field.
|
||||
The return type (pointer to the parent struct or union in question) is the inferred result type.
|
||||
</p>
|
||||
<p>
|
||||
If {#syntax#}field_ptr{#endsyntax#} does not point to the {#syntax#}field_name{#endsyntax#} field of an instance of
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ pub fn main() void {
|
|||
const y = @shrExact(x, 2);
|
||||
std.debug.print("value: {}\n", .{y});
|
||||
|
||||
if (builtin.cpu.arch.isRISCV() and builtin.zig_backend == .stage2_llvm) @panic("https://github.com/ziglang/zig/issues/24304");
|
||||
if ((builtin.cpu.arch.isRISCV() or builtin.cpu.arch.isLoongArch()) and builtin.zig_backend == .stage2_llvm) @panic("https://github.com/ziglang/zig/issues/24304");
|
||||
}
|
||||
|
||||
// exe=fail
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ const S = packed struct {
|
|||
test "overaligned pointer to packed struct" {
|
||||
var foo: S align(4) = .{ .a = 1, .b = 2 };
|
||||
const ptr: *align(4) S = &foo;
|
||||
const ptr_to_b: *u32 = &ptr.b;
|
||||
const ptr_to_b = &ptr.b;
|
||||
try expect(ptr_to_b.* == 2);
|
||||
}
|
||||
|
||||
|
|
|
|||
26
lib/compiler/aro/README.md
vendored
26
lib/compiler/aro/README.md
vendored
|
|
@ -1,26 +0,0 @@
|
|||
<img src="https://aro.vexu.eu/aro-logo.svg" alt="Aro" width="120px"/>
|
||||
|
||||
# Aro
|
||||
|
||||
A C compiler with the goal of providing fast compilation and low memory usage with good diagnostics.
|
||||
|
||||
Aro is included as an alternative C frontend in the [Zig compiler](https://github.com/ziglang/zig)
|
||||
for `translate-c` and eventually compiling C files by translating them to Zig first.
|
||||
Aro is developed in https://github.com/Vexu/arocc and the Zig dependency is
|
||||
updated from there when needed.
|
||||
|
||||
Currently most of standard C is supported up to C23 and as are many of the common
|
||||
extensions from GNU, MSVC, and Clang
|
||||
|
||||
Basic code generation is supported for x86-64 linux and can produce a valid hello world:
|
||||
```sh-session
|
||||
$ cat hello.c
|
||||
extern int printf(const char *restrict fmt, ...);
|
||||
int main(void) {
|
||||
printf("Hello, world!\n");
|
||||
return 0;
|
||||
}
|
||||
$ zig build && ./zig-out/bin/arocc hello.c -o hello
|
||||
$ ./hello
|
||||
Hello, world!
|
||||
```
|
||||
10
lib/compiler/aro/aro.zig
vendored
10
lib/compiler/aro/aro.zig
vendored
|
|
@ -5,12 +5,14 @@ pub const Driver = @import("aro/Driver.zig");
|
|||
pub const Parser = @import("aro/Parser.zig");
|
||||
pub const Preprocessor = @import("aro/Preprocessor.zig");
|
||||
pub const Source = @import("aro/Source.zig");
|
||||
pub const StringInterner = @import("aro/StringInterner.zig");
|
||||
pub const target_util = @import("aro/target.zig");
|
||||
pub const Tokenizer = @import("aro/Tokenizer.zig");
|
||||
pub const Toolchain = @import("aro/Toolchain.zig");
|
||||
pub const Tree = @import("aro/Tree.zig");
|
||||
pub const Type = @import("aro/Type.zig");
|
||||
pub const TypeMapper = @import("aro/StringInterner.zig").TypeMapper;
|
||||
pub const target_util = @import("aro/target.zig");
|
||||
pub const TypeStore = @import("aro/TypeStore.zig");
|
||||
pub const QualType = TypeStore.QualType;
|
||||
pub const Type = TypeStore.Type;
|
||||
pub const Value = @import("aro/Value.zig");
|
||||
|
||||
const backend = @import("backend.zig");
|
||||
|
|
@ -18,6 +20,7 @@ pub const Interner = backend.Interner;
|
|||
pub const Ir = backend.Ir;
|
||||
pub const Object = backend.Object;
|
||||
pub const CallingConvention = backend.CallingConvention;
|
||||
pub const Assembly = backend.Assembly;
|
||||
|
||||
pub const version_str = backend.version_str;
|
||||
pub const version = backend.version;
|
||||
|
|
@ -34,6 +37,5 @@ test {
|
|||
_ = @import("aro/Preprocessor.zig");
|
||||
_ = @import("aro/target.zig");
|
||||
_ = @import("aro/Tokenizer.zig");
|
||||
_ = @import("aro/toolchains/Linux.zig");
|
||||
_ = @import("aro/Value.zig");
|
||||
}
|
||||
|
|
|
|||
758
lib/compiler/aro/aro/Attribute.zig
vendored
758
lib/compiler/aro/aro/Attribute.zig
vendored
File diff suppressed because it is too large
Load diff
1716
lib/compiler/aro/aro/Attribute/names.zig
vendored
1716
lib/compiler/aro/aro/Attribute/names.zig
vendored
File diff suppressed because it is too large
Load diff
302
lib/compiler/aro/aro/Builtins.zig
vendored
302
lib/compiler/aro/aro/Builtins.zig
vendored
|
|
@ -1,21 +1,23 @@
|
|||
const std = @import("std");
|
||||
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Type = @import("Type.zig");
|
||||
const TypeDescription = @import("Builtins/TypeDescription.zig");
|
||||
const target_util = @import("target.zig");
|
||||
const StringId = @import("StringInterner.zig").StringId;
|
||||
const LangOpts = @import("LangOpts.zig");
|
||||
const Parser = @import("Parser.zig");
|
||||
const target_util = @import("target.zig");
|
||||
const TypeStore = @import("TypeStore.zig");
|
||||
const QualType = TypeStore.QualType;
|
||||
const Builder = TypeStore.Builder;
|
||||
const TypeDescription = @import("Builtins/TypeDescription.zig");
|
||||
|
||||
const Properties = @import("Builtins/Properties.zig");
|
||||
pub const Builtin = @import("Builtins/Builtin.zig").with(Properties);
|
||||
|
||||
const Expanded = struct {
|
||||
ty: Type,
|
||||
qt: QualType,
|
||||
builtin: Builtin,
|
||||
};
|
||||
|
||||
const NameToTypeMap = std.StringHashMapUnmanaged(Type);
|
||||
const NameToTypeMap = std.StringHashMapUnmanaged(QualType);
|
||||
|
||||
const Builtins = @This();
|
||||
|
||||
|
|
@ -25,38 +27,38 @@ pub fn deinit(b: *Builtins, gpa: std.mem.Allocator) void {
|
|||
b._name_to_type_map.deinit(gpa);
|
||||
}
|
||||
|
||||
fn specForSize(comp: *const Compilation, size_bits: u32) Type.Builder.Specifier {
|
||||
var ty = Type{ .specifier = .short };
|
||||
if (ty.sizeof(comp).? * 8 == size_bits) return .short;
|
||||
fn specForSize(comp: *const Compilation, size_bits: u32) TypeStore.Builder.Specifier {
|
||||
var qt: QualType = .short;
|
||||
if (qt.bitSizeof(comp) == size_bits) return .short;
|
||||
|
||||
ty.specifier = .int;
|
||||
if (ty.sizeof(comp).? * 8 == size_bits) return .int;
|
||||
qt = .int;
|
||||
if (qt.bitSizeof(comp) == size_bits) return .int;
|
||||
|
||||
ty.specifier = .long;
|
||||
if (ty.sizeof(comp).? * 8 == size_bits) return .long;
|
||||
qt = .long;
|
||||
if (qt.bitSizeof(comp) == size_bits) return .long;
|
||||
|
||||
ty.specifier = .long_long;
|
||||
if (ty.sizeof(comp).? * 8 == size_bits) return .long_long;
|
||||
qt = .long_long;
|
||||
if (qt.bitSizeof(comp) == size_bits) return .long_long;
|
||||
|
||||
unreachable;
|
||||
}
|
||||
|
||||
fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *const Compilation, allocator: std.mem.Allocator) !Type {
|
||||
var builder: Type.Builder = .{ .error_on_invalid = true };
|
||||
fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *Compilation) !QualType {
|
||||
var parser: Parser = undefined;
|
||||
parser.comp = comp;
|
||||
var builder: TypeStore.Builder = .{ .parser = &parser, .error_on_invalid = true };
|
||||
|
||||
var require_native_int32 = false;
|
||||
var require_native_int64 = false;
|
||||
for (desc.prefix) |prefix| {
|
||||
switch (prefix) {
|
||||
.L => builder.combine(undefined, .long, 0) catch unreachable,
|
||||
.LL => {
|
||||
builder.combine(undefined, .long, 0) catch unreachable;
|
||||
builder.combine(undefined, .long, 0) catch unreachable;
|
||||
},
|
||||
.L => builder.combine(.long, 0) catch unreachable,
|
||||
.LL => builder.combine(.long_long, 0) catch unreachable,
|
||||
.LLL => {
|
||||
switch (builder.specifier) {
|
||||
.none => builder.specifier = .int128,
|
||||
.signed => builder.specifier = .sint128,
|
||||
.unsigned => builder.specifier = .uint128,
|
||||
switch (builder.type) {
|
||||
.none => builder.type = .int128,
|
||||
.signed => builder.type = .sint128,
|
||||
.unsigned => builder.type = .uint128,
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
|
|
@ -65,239 +67,226 @@ fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *c
|
|||
.N => {
|
||||
std.debug.assert(desc.spec == .i);
|
||||
if (!target_util.isLP64(comp.target)) {
|
||||
builder.combine(undefined, .long, 0) catch unreachable;
|
||||
builder.combine(.long, 0) catch unreachable;
|
||||
}
|
||||
},
|
||||
.O => {
|
||||
builder.combine(undefined, .long, 0) catch unreachable;
|
||||
builder.combine(.long, 0) catch unreachable;
|
||||
if (comp.target.os.tag != .opencl) {
|
||||
builder.combine(undefined, .long, 0) catch unreachable;
|
||||
builder.combine(.long, 0) catch unreachable;
|
||||
}
|
||||
},
|
||||
.S => builder.combine(undefined, .signed, 0) catch unreachable,
|
||||
.U => builder.combine(undefined, .unsigned, 0) catch unreachable,
|
||||
.S => builder.combine(.signed, 0) catch unreachable,
|
||||
.U => builder.combine(.unsigned, 0) catch unreachable,
|
||||
.I => {
|
||||
// Todo: compile-time constant integer
|
||||
},
|
||||
}
|
||||
}
|
||||
switch (desc.spec) {
|
||||
.v => builder.combine(undefined, .void, 0) catch unreachable,
|
||||
.b => builder.combine(undefined, .bool, 0) catch unreachable,
|
||||
.c => builder.combine(undefined, .char, 0) catch unreachable,
|
||||
.s => builder.combine(undefined, .short, 0) catch unreachable,
|
||||
.v => builder.combine(.void, 0) catch unreachable,
|
||||
.b => builder.combine(.bool, 0) catch unreachable,
|
||||
.c => builder.combine(.char, 0) catch unreachable,
|
||||
.s => builder.combine(.short, 0) catch unreachable,
|
||||
.i => {
|
||||
if (require_native_int32) {
|
||||
builder.specifier = specForSize(comp, 32);
|
||||
builder.type = specForSize(comp, 32);
|
||||
} else if (require_native_int64) {
|
||||
builder.specifier = specForSize(comp, 64);
|
||||
builder.type = specForSize(comp, 64);
|
||||
} else {
|
||||
switch (builder.specifier) {
|
||||
switch (builder.type) {
|
||||
.int128, .sint128, .uint128 => {},
|
||||
else => builder.combine(undefined, .int, 0) catch unreachable,
|
||||
else => builder.combine(.int, 0) catch unreachable,
|
||||
}
|
||||
}
|
||||
},
|
||||
.h => builder.combine(undefined, .fp16, 0) catch unreachable,
|
||||
.x => builder.combine(undefined, .float16, 0) catch unreachable,
|
||||
.h => builder.combine(.fp16, 0) catch unreachable,
|
||||
.x => builder.combine(.float16, 0) catch unreachable,
|
||||
.y => {
|
||||
// Todo: __bf16
|
||||
return .{ .specifier = .invalid };
|
||||
return .invalid;
|
||||
},
|
||||
.f => builder.combine(undefined, .float, 0) catch unreachable,
|
||||
.f => builder.combine(.float, 0) catch unreachable,
|
||||
.d => {
|
||||
if (builder.specifier == .long_long) {
|
||||
builder.specifier = .float128;
|
||||
if (builder.type == .long_long) {
|
||||
builder.type = .float128;
|
||||
} else {
|
||||
builder.combine(undefined, .double, 0) catch unreachable;
|
||||
builder.combine(.double, 0) catch unreachable;
|
||||
}
|
||||
},
|
||||
.z => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
builder.specifier = Type.Builder.fromType(comp.types.size);
|
||||
std.debug.assert(builder.type == .none);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.size);
|
||||
},
|
||||
.w => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
builder.specifier = Type.Builder.fromType(comp.types.wchar);
|
||||
std.debug.assert(builder.type == .none);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.wchar);
|
||||
},
|
||||
.F => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
builder.specifier = Type.Builder.fromType(comp.types.ns_constant_string.ty);
|
||||
std.debug.assert(builder.type == .none);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.ns_constant_string);
|
||||
},
|
||||
.G => {
|
||||
// Todo: id
|
||||
return .{ .specifier = .invalid };
|
||||
return .invalid;
|
||||
},
|
||||
.H => {
|
||||
// Todo: SEL
|
||||
return .{ .specifier = .invalid };
|
||||
return .invalid;
|
||||
},
|
||||
.M => {
|
||||
// Todo: struct objc_super
|
||||
return .{ .specifier = .invalid };
|
||||
return .invalid;
|
||||
},
|
||||
.a => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
std.debug.assert(builder.type == .none);
|
||||
std.debug.assert(desc.suffix.len == 0);
|
||||
builder.specifier = Type.Builder.fromType(comp.types.va_list);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.va_list);
|
||||
},
|
||||
.A => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
std.debug.assert(builder.type == .none);
|
||||
std.debug.assert(desc.suffix.len == 0);
|
||||
var va_list = comp.types.va_list;
|
||||
if (va_list.isArray()) va_list.decayArray();
|
||||
builder.specifier = Type.Builder.fromType(va_list);
|
||||
var va_list = comp.type_store.va_list;
|
||||
std.debug.assert(!va_list.is(comp, .array));
|
||||
builder.type = Builder.fromType(comp, va_list);
|
||||
},
|
||||
.V => |element_count| {
|
||||
std.debug.assert(desc.suffix.len == 0);
|
||||
const child_desc = it.next().?;
|
||||
const child_ty = try createType(child_desc, undefined, comp, allocator);
|
||||
const arr_ty = try allocator.create(Type.Array);
|
||||
arr_ty.* = .{
|
||||
const elem_qt = try createType(child_desc, undefined, comp);
|
||||
const vector_qt = try comp.type_store.put(comp.gpa, .{ .vector = .{
|
||||
.elem = elem_qt,
|
||||
.len = element_count,
|
||||
.elem = child_ty,
|
||||
};
|
||||
const vector_ty: Type = .{ .specifier = .vector, .data = .{ .array = arr_ty } };
|
||||
builder.specifier = Type.Builder.fromType(vector_ty);
|
||||
} });
|
||||
builder.type = .{ .other = vector_qt };
|
||||
},
|
||||
.q => {
|
||||
// Todo: scalable vector
|
||||
return .{ .specifier = .invalid };
|
||||
return .invalid;
|
||||
},
|
||||
.E => {
|
||||
// Todo: ext_vector (OpenCL vector)
|
||||
return .{ .specifier = .invalid };
|
||||
return .invalid;
|
||||
},
|
||||
.X => |child| {
|
||||
builder.combine(undefined, .complex, 0) catch unreachable;
|
||||
builder.combine(.complex, 0) catch unreachable;
|
||||
switch (child) {
|
||||
.float => builder.combine(undefined, .float, 0) catch unreachable,
|
||||
.double => builder.combine(undefined, .double, 0) catch unreachable,
|
||||
.float => builder.combine(.float, 0) catch unreachable,
|
||||
.double => builder.combine(.double, 0) catch unreachable,
|
||||
.longdouble => {
|
||||
builder.combine(undefined, .long, 0) catch unreachable;
|
||||
builder.combine(undefined, .double, 0) catch unreachable;
|
||||
builder.combine(.long, 0) catch unreachable;
|
||||
builder.combine(.double, 0) catch unreachable;
|
||||
},
|
||||
}
|
||||
},
|
||||
.Y => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
std.debug.assert(builder.type == .none);
|
||||
std.debug.assert(desc.suffix.len == 0);
|
||||
builder.specifier = Type.Builder.fromType(comp.types.ptrdiff);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.ptrdiff);
|
||||
},
|
||||
.P => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
if (comp.types.file.specifier == .invalid) {
|
||||
return comp.types.file;
|
||||
std.debug.assert(builder.type == .none);
|
||||
if (comp.type_store.file.isInvalid()) {
|
||||
return comp.type_store.file;
|
||||
}
|
||||
builder.specifier = Type.Builder.fromType(comp.types.file);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.file);
|
||||
},
|
||||
.J => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
std.debug.assert(builder.type == .none);
|
||||
std.debug.assert(desc.suffix.len == 0);
|
||||
if (comp.types.jmp_buf.specifier == .invalid) {
|
||||
return comp.types.jmp_buf;
|
||||
if (comp.type_store.jmp_buf.isInvalid()) {
|
||||
return comp.type_store.jmp_buf;
|
||||
}
|
||||
builder.specifier = Type.Builder.fromType(comp.types.jmp_buf);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.jmp_buf);
|
||||
},
|
||||
.SJ => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
std.debug.assert(builder.type == .none);
|
||||
std.debug.assert(desc.suffix.len == 0);
|
||||
if (comp.types.sigjmp_buf.specifier == .invalid) {
|
||||
return comp.types.sigjmp_buf;
|
||||
if (comp.type_store.sigjmp_buf.isInvalid()) {
|
||||
return comp.type_store.sigjmp_buf;
|
||||
}
|
||||
builder.specifier = Type.Builder.fromType(comp.types.sigjmp_buf);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.sigjmp_buf);
|
||||
},
|
||||
.K => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
if (comp.types.ucontext_t.specifier == .invalid) {
|
||||
return comp.types.ucontext_t;
|
||||
std.debug.assert(builder.type == .none);
|
||||
if (comp.type_store.ucontext_t.isInvalid()) {
|
||||
return comp.type_store.ucontext_t;
|
||||
}
|
||||
builder.specifier = Type.Builder.fromType(comp.types.ucontext_t);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.ucontext_t);
|
||||
},
|
||||
.p => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
std.debug.assert(builder.type == .none);
|
||||
std.debug.assert(desc.suffix.len == 0);
|
||||
builder.specifier = Type.Builder.fromType(comp.types.pid_t);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.pid_t);
|
||||
},
|
||||
.@"!" => return .{ .specifier = .invalid },
|
||||
.@"!" => return .invalid,
|
||||
}
|
||||
for (desc.suffix) |suffix| {
|
||||
switch (suffix) {
|
||||
.@"*" => |address_space| {
|
||||
_ = address_space; // TODO: handle address space
|
||||
const elem_ty = try allocator.create(Type);
|
||||
elem_ty.* = builder.finish(undefined) catch unreachable;
|
||||
const ty = Type{
|
||||
.specifier = .pointer,
|
||||
.data = .{ .sub_type = elem_ty },
|
||||
};
|
||||
builder.qual = .{};
|
||||
builder.specifier = Type.Builder.fromType(ty);
|
||||
const pointer_qt = try comp.type_store.put(comp.gpa, .{ .pointer = .{
|
||||
.child = builder.finish() catch unreachable,
|
||||
.decayed = null,
|
||||
} });
|
||||
|
||||
builder.@"const" = null;
|
||||
builder.@"volatile" = null;
|
||||
builder.restrict = null;
|
||||
builder.type = .{ .other = pointer_qt };
|
||||
},
|
||||
.C => builder.qual.@"const" = 0,
|
||||
.D => builder.qual.@"volatile" = 0,
|
||||
.R => builder.qual.restrict = 0,
|
||||
.C => builder.@"const" = 0,
|
||||
.D => builder.@"volatile" = 0,
|
||||
.R => builder.restrict = 0,
|
||||
}
|
||||
}
|
||||
return builder.finish(undefined) catch unreachable;
|
||||
return builder.finish() catch unreachable;
|
||||
}
|
||||
|
||||
fn createBuiltin(comp: *const Compilation, builtin: Builtin, type_arena: std.mem.Allocator) !Type {
|
||||
fn createBuiltin(comp: *Compilation, builtin: Builtin) !QualType {
|
||||
var it = TypeDescription.TypeIterator.init(builtin.properties.param_str);
|
||||
|
||||
const ret_ty_desc = it.next().?;
|
||||
if (ret_ty_desc.spec == .@"!") {
|
||||
// Todo: handle target-dependent definition
|
||||
}
|
||||
const ret_ty = try createType(ret_ty_desc, &it, comp, type_arena);
|
||||
const ret_ty = try createType(ret_ty_desc, &it, comp);
|
||||
var param_count: usize = 0;
|
||||
var params: [Builtin.max_param_count]Type.Func.Param = undefined;
|
||||
var params: [Builtin.max_param_count]TypeStore.Type.Func.Param = undefined;
|
||||
while (it.next()) |desc| : (param_count += 1) {
|
||||
params[param_count] = .{ .name_tok = 0, .ty = try createType(desc, &it, comp, type_arena), .name = .empty };
|
||||
params[param_count] = .{ .name_tok = 0, .qt = try createType(desc, &it, comp), .name = .empty, .node = .null };
|
||||
}
|
||||
|
||||
const duped_params = try type_arena.dupe(Type.Func.Param, params[0..param_count]);
|
||||
const func = try type_arena.create(Type.Func);
|
||||
|
||||
func.* = .{
|
||||
return comp.type_store.put(comp.gpa, .{ .func = .{
|
||||
.return_type = ret_ty,
|
||||
.params = duped_params,
|
||||
};
|
||||
return .{
|
||||
.specifier = if (builtin.properties.isVarArgs()) .var_args_func else .func,
|
||||
.data = .{ .func = func },
|
||||
};
|
||||
.kind = if (builtin.properties.isVarArgs()) .variadic else .normal,
|
||||
.params = params[0..param_count],
|
||||
} });
|
||||
}
|
||||
|
||||
/// Asserts that the builtin has already been created
|
||||
pub fn lookup(b: *const Builtins, name: []const u8) Expanded {
|
||||
const builtin = Builtin.fromName(name).?;
|
||||
const ty = b._name_to_type_map.get(name).?;
|
||||
return .{
|
||||
.builtin = builtin,
|
||||
.ty = ty,
|
||||
};
|
||||
const qt = b._name_to_type_map.get(name).?;
|
||||
return .{ .builtin = builtin, .qt = qt };
|
||||
}
|
||||
|
||||
pub fn getOrCreate(b: *Builtins, comp: *Compilation, name: []const u8, type_arena: std.mem.Allocator) !?Expanded {
|
||||
const ty = b._name_to_type_map.get(name) orelse {
|
||||
pub fn getOrCreate(b: *Builtins, comp: *Compilation, name: []const u8) !?Expanded {
|
||||
const qt = b._name_to_type_map.get(name) orelse {
|
||||
const builtin = Builtin.fromName(name) orelse return null;
|
||||
if (!comp.hasBuiltinFunction(builtin)) return null;
|
||||
|
||||
try b._name_to_type_map.ensureUnusedCapacity(comp.gpa, 1);
|
||||
const ty = try createBuiltin(comp, builtin, type_arena);
|
||||
b._name_to_type_map.putAssumeCapacity(name, ty);
|
||||
const qt = try createBuiltin(comp, builtin);
|
||||
b._name_to_type_map.putAssumeCapacity(name, qt);
|
||||
|
||||
return .{
|
||||
.builtin = builtin,
|
||||
.ty = ty,
|
||||
.qt = qt,
|
||||
};
|
||||
};
|
||||
const builtin = Builtin.fromName(name).?;
|
||||
return .{
|
||||
.builtin = builtin,
|
||||
.ty = ty,
|
||||
};
|
||||
return .{ .builtin = builtin, .qt = qt };
|
||||
}
|
||||
|
||||
pub const Iterator = struct {
|
||||
|
|
@ -323,12 +312,13 @@ pub const Iterator = struct {
|
|||
};
|
||||
|
||||
test Iterator {
|
||||
const gpa = std.testing.allocator;
|
||||
var it = Iterator{};
|
||||
|
||||
var seen = std.StringHashMap(Builtin).init(std.testing.allocator);
|
||||
defer seen.deinit();
|
||||
var seen: std.StringHashMapUnmanaged(Builtin) = .empty;
|
||||
defer seen.deinit(gpa);
|
||||
|
||||
var arena_state = std.heap.ArenaAllocator.init(std.testing.allocator);
|
||||
var arena_state = std.heap.ArenaAllocator.init(gpa);
|
||||
defer arena_state.deinit();
|
||||
const arena = arena_state.allocator();
|
||||
|
||||
|
|
@ -344,25 +334,27 @@ test Iterator {
|
|||
std.debug.print("previous data: {}\n", .{seen.get(entry.name).?});
|
||||
return error.TestExpectedUniqueEntries;
|
||||
}
|
||||
try seen.put(try arena.dupe(u8, entry.name), entry.builtin);
|
||||
try seen.put(gpa, try arena.dupe(u8, entry.name), entry.builtin);
|
||||
}
|
||||
try std.testing.expectEqual(@as(usize, Builtin.data.len), seen.count());
|
||||
}
|
||||
|
||||
test "All builtins" {
|
||||
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
_ = try comp.generateBuiltinMacros(.include_system_defines);
|
||||
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
|
||||
defer arena.deinit();
|
||||
var arena_state: std.heap.ArenaAllocator = .init(std.testing.allocator);
|
||||
defer arena_state.deinit();
|
||||
const arena = arena_state.allocator();
|
||||
|
||||
const type_arena = arena.allocator();
|
||||
var comp = Compilation.init(std.testing.allocator, arena, undefined, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
|
||||
try comp.type_store.initNamedTypes(&comp);
|
||||
comp.type_store.va_list = try comp.type_store.va_list.decay(&comp);
|
||||
|
||||
var builtin_it = Iterator{};
|
||||
while (builtin_it.next()) |entry| {
|
||||
const name = try type_arena.dupe(u8, entry.name);
|
||||
if (try comp.builtins.getOrCreate(&comp, name, type_arena)) |func_ty| {
|
||||
const get_again = (try comp.builtins.getOrCreate(&comp, name, std.testing.failing_allocator)).?;
|
||||
const name = try arena.dupe(u8, entry.name);
|
||||
if (try comp.builtins.getOrCreate(&comp, name)) |func_ty| {
|
||||
const get_again = (try comp.builtins.getOrCreate(&comp, name)).?;
|
||||
const found_by_lookup = comp.builtins.lookup(name);
|
||||
try std.testing.expectEqual(func_ty.builtin.tag, get_again.builtin.tag);
|
||||
try std.testing.expectEqual(func_ty.builtin.tag, found_by_lookup.builtin.tag);
|
||||
|
|
@ -373,19 +365,19 @@ test "All builtins" {
|
|||
test "Allocation failures" {
|
||||
const Test = struct {
|
||||
fn testOne(allocator: std.mem.Allocator) !void {
|
||||
var comp = Compilation.init(allocator, std.fs.cwd());
|
||||
var arena_state: std.heap.ArenaAllocator = .init(allocator);
|
||||
defer arena_state.deinit();
|
||||
const arena = arena_state.allocator();
|
||||
|
||||
var comp = Compilation.init(allocator, arena, undefined, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
_ = try comp.generateBuiltinMacros(.include_system_defines);
|
||||
var arena = std.heap.ArenaAllocator.init(comp.gpa);
|
||||
defer arena.deinit();
|
||||
|
||||
const type_arena = arena.allocator();
|
||||
|
||||
const num_builtins = 40;
|
||||
var builtin_it = Iterator{};
|
||||
for (0..num_builtins) |_| {
|
||||
const entry = builtin_it.next().?;
|
||||
_ = try comp.builtins.getOrCreate(&comp, entry.name, type_arena);
|
||||
_ = try comp.builtins.getOrCreate(&comp, entry.name);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
|
|||
23182
lib/compiler/aro/aro/Builtins/Builtin.zig
vendored
23182
lib/compiler/aro/aro/Builtins/Builtin.zig
vendored
File diff suppressed because it is too large
Load diff
39
lib/compiler/aro/aro/Builtins/eval.zig
vendored
39
lib/compiler/aro/aro/Builtins/eval.zig
vendored
|
|
@ -5,8 +5,9 @@ const Builtins = @import("../Builtins.zig");
|
|||
const Builtin = Builtins.Builtin;
|
||||
const Parser = @import("../Parser.zig");
|
||||
const Tree = @import("../Tree.zig");
|
||||
const NodeIndex = Tree.NodeIndex;
|
||||
const Type = @import("../Type.zig");
|
||||
const TypeStore = @import("../TypeStore.zig");
|
||||
const Type = TypeStore.Type;
|
||||
const QualType = TypeStore.QualType;
|
||||
const Value = @import("../Value.zig");
|
||||
|
||||
fn makeNan(comptime T: type, str: []const u8) T {
|
||||
|
|
@ -22,22 +23,22 @@ fn makeNan(comptime T: type, str: []const u8) T {
|
|||
return @bitCast(@as(UnsignedSameSize, bits) | @as(UnsignedSameSize, @bitCast(std.math.nan(T))));
|
||||
}
|
||||
|
||||
pub fn eval(tag: Builtin.Tag, p: *Parser, args: []const NodeIndex) !Value {
|
||||
pub fn eval(tag: Builtin.Tag, p: *Parser, args: []const Tree.Node.Index) !Value {
|
||||
const builtin = Builtin.fromTag(tag);
|
||||
if (!builtin.properties.attributes.const_evaluable) return .{};
|
||||
|
||||
switch (tag) {
|
||||
Builtin.tagFromName("__builtin_inff").?,
|
||||
Builtin.tagFromName("__builtin_inf").?,
|
||||
Builtin.tagFromName("__builtin_infl").?,
|
||||
.__builtin_inff,
|
||||
.__builtin_inf,
|
||||
.__builtin_infl,
|
||||
=> {
|
||||
const ty: Type = switch (tag) {
|
||||
Builtin.tagFromName("__builtin_inff").? => .{ .specifier = .float },
|
||||
Builtin.tagFromName("__builtin_inf").? => .{ .specifier = .double },
|
||||
Builtin.tagFromName("__builtin_infl").? => .{ .specifier = .long_double },
|
||||
const qt: QualType = switch (tag) {
|
||||
.__builtin_inff => .float,
|
||||
.__builtin_inf => .double,
|
||||
.__builtin_infl => .long_double,
|
||||
else => unreachable,
|
||||
};
|
||||
const f: Interner.Key.Float = switch (ty.bitSizeof(p.comp).?) {
|
||||
const f: Interner.Key.Float = switch (qt.bitSizeof(p.comp)) {
|
||||
32 => .{ .f32 = std.math.inf(f32) },
|
||||
64 => .{ .f64 = std.math.inf(f64) },
|
||||
80 => .{ .f80 = std.math.inf(f80) },
|
||||
|
|
@ -46,14 +47,14 @@ pub fn eval(tag: Builtin.Tag, p: *Parser, args: []const NodeIndex) !Value {
|
|||
};
|
||||
return Value.intern(p.comp, .{ .float = f });
|
||||
},
|
||||
Builtin.tagFromName("__builtin_isinf").? => blk: {
|
||||
.__builtin_isinf => blk: {
|
||||
if (args.len == 0) break :blk;
|
||||
const val = p.value_map.get(args[0]) orelse break :blk;
|
||||
const val = p.tree.value_map.get(args[0]) orelse break :blk;
|
||||
return Value.fromBool(val.isInf(p.comp));
|
||||
},
|
||||
Builtin.tagFromName("__builtin_isinf_sign").? => blk: {
|
||||
.__builtin_isinf_sign => blk: {
|
||||
if (args.len == 0) break :blk;
|
||||
const val = p.value_map.get(args[0]) orelse break :blk;
|
||||
const val = p.tree.value_map.get(args[0]) orelse break :blk;
|
||||
switch (val.isInfSign(p.comp)) {
|
||||
.unknown => {},
|
||||
.finite => return Value.zero,
|
||||
|
|
@ -61,17 +62,17 @@ pub fn eval(tag: Builtin.Tag, p: *Parser, args: []const NodeIndex) !Value {
|
|||
.negative => return Value.int(@as(i64, -1), p.comp),
|
||||
}
|
||||
},
|
||||
Builtin.tagFromName("__builtin_isnan").? => blk: {
|
||||
.__builtin_isnan => blk: {
|
||||
if (args.len == 0) break :blk;
|
||||
const val = p.value_map.get(args[0]) orelse break :blk;
|
||||
const val = p.tree.value_map.get(args[0]) orelse break :blk;
|
||||
return Value.fromBool(val.isNan(p.comp));
|
||||
},
|
||||
Builtin.tagFromName("__builtin_nan").? => blk: {
|
||||
.__builtin_nan => blk: {
|
||||
if (args.len == 0) break :blk;
|
||||
const val = p.getDecayedStringLiteral(args[0]) orelse break :blk;
|
||||
const bytes = p.comp.interner.get(val.ref()).bytes;
|
||||
|
||||
const f: Interner.Key.Float = switch ((Type{ .specifier = .double }).bitSizeof(p.comp).?) {
|
||||
const f: Interner.Key.Float = switch (Type.Float.double.bits(p.comp)) {
|
||||
32 => .{ .f32 = makeNan(f32, bytes) },
|
||||
64 => .{ .f64 = makeNan(f64, bytes) },
|
||||
80 => .{ .f80 = makeNan(f80, bytes) },
|
||||
|
|
|
|||
1055
lib/compiler/aro/aro/CodeGen.zig
vendored
1055
lib/compiler/aro/aro/CodeGen.zig
vendored
File diff suppressed because it is too large
Load diff
2083
lib/compiler/aro/aro/Compilation.zig
vendored
2083
lib/compiler/aro/aro/Compilation.zig
vendored
File diff suppressed because it is too large
Load diff
99
lib/compiler/aro/aro/DepFile.zig
vendored
Normal file
99
lib/compiler/aro/aro/DepFile.zig
vendored
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
pub const Format = enum { make, nmake };
|
||||
|
||||
const DepFile = @This();
|
||||
|
||||
target: []const u8,
|
||||
deps: std.StringArrayHashMapUnmanaged(void) = .empty,
|
||||
format: Format,
|
||||
|
||||
pub fn deinit(d: *DepFile, gpa: Allocator) void {
|
||||
d.deps.deinit(gpa);
|
||||
d.* = undefined;
|
||||
}
|
||||
|
||||
pub fn addDependency(d: *DepFile, gpa: Allocator, path: []const u8) !void {
|
||||
try d.deps.put(gpa, path, {});
|
||||
}
|
||||
|
||||
pub fn addDependencyDupe(d: *DepFile, gpa: Allocator, arena: Allocator, path: []const u8) !void {
|
||||
const gop = try d.deps.getOrPut(gpa, path);
|
||||
if (gop.found_existing) return;
|
||||
gop.key_ptr.* = try arena.dupe(u8, path);
|
||||
}
|
||||
|
||||
pub fn write(d: *const DepFile, w: *std.Io.Writer) std.Io.Writer.Error!void {
|
||||
const max_columns = 75;
|
||||
var columns: usize = 0;
|
||||
|
||||
try writeTarget(d.target, w);
|
||||
columns += d.target.len;
|
||||
try w.writeByte(':');
|
||||
columns += 1;
|
||||
|
||||
for (d.deps.keys()) |path| {
|
||||
if (std.mem.eql(u8, path, "<stdin>")) continue;
|
||||
|
||||
if (columns + path.len + " \\\n".len > max_columns) {
|
||||
try w.writeAll(" \\\n ");
|
||||
columns = 1;
|
||||
}
|
||||
try w.writeByte(' ');
|
||||
try d.writePath(path, w);
|
||||
columns += path.len + 1;
|
||||
}
|
||||
try w.writeByte('\n');
|
||||
try w.flush();
|
||||
}
|
||||
|
||||
fn writeTarget(path: []const u8, w: *std.Io.Writer) !void {
|
||||
for (path, 0..) |c, i| {
|
||||
switch (c) {
|
||||
' ', '\t' => {
|
||||
try w.writeByte('\\');
|
||||
var j = i;
|
||||
while (j != 0) {
|
||||
j -= 1;
|
||||
if (path[j] != '\\') break;
|
||||
try w.writeByte('\\');
|
||||
}
|
||||
},
|
||||
'$' => try w.writeByte('$'),
|
||||
'#' => try w.writeByte('\\'),
|
||||
else => {},
|
||||
}
|
||||
try w.writeByte(c);
|
||||
}
|
||||
}
|
||||
|
||||
fn writePath(d: *const DepFile, path: []const u8, w: *std.Io.Writer) !void {
|
||||
switch (d.format) {
|
||||
.nmake => {
|
||||
if (std.mem.indexOfAny(u8, path, " #${}^!")) |_|
|
||||
try w.print("\"{s}\"", .{path})
|
||||
else
|
||||
try w.writeAll(path);
|
||||
},
|
||||
.make => {
|
||||
for (path, 0..) |c, i| {
|
||||
switch (c) {
|
||||
' ' => {
|
||||
try w.writeByte('\\');
|
||||
var j = i;
|
||||
while (j != 0) {
|
||||
j -= 1;
|
||||
if (path[j] != '\\') break;
|
||||
try w.writeByte('\\');
|
||||
}
|
||||
},
|
||||
'$' => try w.writeByte('$'),
|
||||
'#' => try w.writeByte('\\'),
|
||||
else => {},
|
||||
}
|
||||
try w.writeByte(c);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
1026
lib/compiler/aro/aro/Diagnostics.zig
vendored
1026
lib/compiler/aro/aro/Diagnostics.zig
vendored
File diff suppressed because it is too large
Load diff
1041
lib/compiler/aro/aro/Diagnostics/messages.zig
vendored
1041
lib/compiler/aro/aro/Diagnostics/messages.zig
vendored
File diff suppressed because it is too large
Load diff
1103
lib/compiler/aro/aro/Driver.zig
vendored
1103
lib/compiler/aro/aro/Driver.zig
vendored
File diff suppressed because it is too large
Load diff
12
lib/compiler/aro/aro/Driver/Filesystem.zig
vendored
12
lib/compiler/aro/aro/Driver/Filesystem.zig
vendored
|
|
@ -96,7 +96,7 @@ fn findProgramByNamePosix(name: []const u8, path: ?[]const u8, buf: []u8) ?[]con
|
|||
}
|
||||
|
||||
pub const Filesystem = union(enum) {
|
||||
real: void,
|
||||
real: std.fs.Dir,
|
||||
fake: []const Entry,
|
||||
|
||||
const Entry = struct {
|
||||
|
|
@ -172,8 +172,8 @@ pub const Filesystem = union(enum) {
|
|||
|
||||
pub fn exists(fs: Filesystem, path: []const u8) bool {
|
||||
switch (fs) {
|
||||
.real => {
|
||||
std.fs.cwd().access(path, .{}) catch return false;
|
||||
.real => |cwd| {
|
||||
cwd.access(path, .{}) catch return false;
|
||||
return true;
|
||||
},
|
||||
.fake => |paths| return existsFake(paths, path),
|
||||
|
|
@ -210,8 +210,8 @@ pub const Filesystem = union(enum) {
|
|||
/// Otherwise returns a slice of `buf`. If the file is larger than `buf` partial contents are returned
|
||||
pub fn readFile(fs: Filesystem, path: []const u8, buf: []u8) ?[]const u8 {
|
||||
return switch (fs) {
|
||||
.real => {
|
||||
const file = std.fs.cwd().openFile(path, .{}) catch return null;
|
||||
.real => |cwd| {
|
||||
const file = cwd.openFile(path, .{}) catch return null;
|
||||
defer file.close();
|
||||
|
||||
const bytes_read = file.readAll(buf) catch return null;
|
||||
|
|
@ -223,7 +223,7 @@ pub const Filesystem = union(enum) {
|
|||
|
||||
pub fn openDir(fs: Filesystem, dir_name: []const u8) std.fs.Dir.OpenError!Dir {
|
||||
return switch (fs) {
|
||||
.real => .{ .dir = try std.fs.cwd().openDir(dir_name, .{ .access_sub_paths = false, .iterate = true }) },
|
||||
.real => |cwd| .{ .dir = try cwd.openDir(dir_name, .{ .access_sub_paths = false, .iterate = true }) },
|
||||
.fake => |entries| .{ .fake = .{ .entries = entries, .path = dir_name } },
|
||||
};
|
||||
}
|
||||
|
|
|
|||
634
lib/compiler/aro/aro/Driver/GCCDetector.zig
vendored
634
lib/compiler/aro/aro/Driver/GCCDetector.zig
vendored
|
|
@ -1,634 +0,0 @@
|
|||
const std = @import("std");
|
||||
const Toolchain = @import("../Toolchain.zig");
|
||||
const target_util = @import("../target.zig");
|
||||
const system_defaults = @import("system_defaults");
|
||||
const GCCVersion = @import("GCCVersion.zig");
|
||||
const Multilib = @import("Multilib.zig");
|
||||
|
||||
const GCCDetector = @This();
|
||||
|
||||
is_valid: bool = false,
|
||||
install_path: []const u8 = "",
|
||||
parent_lib_path: []const u8 = "",
|
||||
version: GCCVersion = .{},
|
||||
gcc_triple: []const u8 = "",
|
||||
selected: Multilib = .{},
|
||||
biarch_sibling: ?Multilib = null,
|
||||
|
||||
pub fn deinit(self: *GCCDetector) void {
|
||||
if (!self.is_valid) return;
|
||||
}
|
||||
|
||||
pub fn appendToolPath(self: *const GCCDetector, tc: *Toolchain) !void {
|
||||
if (!self.is_valid) return;
|
||||
return tc.addPathFromComponents(&.{
|
||||
self.parent_lib_path,
|
||||
"..",
|
||||
self.gcc_triple,
|
||||
"bin",
|
||||
}, .program);
|
||||
}
|
||||
|
||||
fn addDefaultGCCPrefixes(prefixes: *std.ArrayListUnmanaged([]const u8), tc: *const Toolchain) !void {
|
||||
const sysroot = tc.getSysroot();
|
||||
const target = tc.getTarget();
|
||||
if (sysroot.len == 0 and target.os.tag == .linux and tc.filesystem.exists("/opt/rh")) {
|
||||
prefixes.appendAssumeCapacity("/opt/rh/gcc-toolset-12/root/usr");
|
||||
prefixes.appendAssumeCapacity("/opt/rh/gcc-toolset-11/root/usr");
|
||||
prefixes.appendAssumeCapacity("/opt/rh/gcc-toolset-10/root/usr");
|
||||
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-12/root/usr");
|
||||
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-11/root/usr");
|
||||
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-10/root/usr");
|
||||
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-9/root/usr");
|
||||
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-8/root/usr");
|
||||
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-7/root/usr");
|
||||
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-6/root/usr");
|
||||
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-4/root/usr");
|
||||
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-3/root/usr");
|
||||
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-2/root/usr");
|
||||
}
|
||||
if (sysroot.len == 0) {
|
||||
prefixes.appendAssumeCapacity("/usr");
|
||||
} else {
|
||||
var usr_path = try tc.arena.alloc(u8, 4 + sysroot.len);
|
||||
@memcpy(usr_path[0..4], "/usr");
|
||||
@memcpy(usr_path[4..], sysroot);
|
||||
prefixes.appendAssumeCapacity(usr_path);
|
||||
}
|
||||
}
|
||||
|
||||
fn collectLibDirsAndTriples(
|
||||
tc: *Toolchain,
|
||||
lib_dirs: *std.ArrayListUnmanaged([]const u8),
|
||||
triple_aliases: *std.ArrayListUnmanaged([]const u8),
|
||||
biarch_libdirs: *std.ArrayListUnmanaged([]const u8),
|
||||
biarch_triple_aliases: *std.ArrayListUnmanaged([]const u8),
|
||||
) !void {
|
||||
const AArch64LibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
|
||||
const AArch64Triples: [4][]const u8 = .{ "aarch64-none-linux-gnu", "aarch64-linux-gnu", "aarch64-redhat-linux", "aarch64-suse-linux" };
|
||||
const AArch64beLibDirs: [1][]const u8 = .{"/lib"};
|
||||
const AArch64beTriples: [2][]const u8 = .{ "aarch64_be-none-linux-gnu", "aarch64_be-linux-gnu" };
|
||||
|
||||
const ARMLibDirs: [1][]const u8 = .{"/lib"};
|
||||
const ARMTriples: [1][]const u8 = .{"arm-linux-gnueabi"};
|
||||
const ARMHFTriples: [4][]const u8 = .{ "arm-linux-gnueabihf", "armv7hl-redhat-linux-gnueabi", "armv6hl-suse-linux-gnueabi", "armv7hl-suse-linux-gnueabi" };
|
||||
|
||||
const ARMebLibDirs: [1][]const u8 = .{"/lib"};
|
||||
const ARMebTriples: [1][]const u8 = .{"armeb-linux-gnueabi"};
|
||||
const ARMebHFTriples: [2][]const u8 = .{ "armeb-linux-gnueabihf", "armebv7hl-redhat-linux-gnueabi" };
|
||||
|
||||
const AVRLibDirs: [1][]const u8 = .{"/lib"};
|
||||
const AVRTriples: [1][]const u8 = .{"avr"};
|
||||
|
||||
const CSKYLibDirs: [1][]const u8 = .{"/lib"};
|
||||
const CSKYTriples: [3][]const u8 = .{ "csky-linux-gnuabiv2", "csky-linux-uclibcabiv2", "csky-elf-noneabiv2" };
|
||||
|
||||
const X86_64LibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
|
||||
const X86_64Triples: [11][]const u8 = .{
|
||||
"x86_64-linux-gnu", "x86_64-unknown-linux-gnu",
|
||||
"x86_64-pc-linux-gnu", "x86_64-redhat-linux6E",
|
||||
"x86_64-redhat-linux", "x86_64-suse-linux",
|
||||
"x86_64-manbo-linux-gnu", "x86_64-linux-gnu",
|
||||
"x86_64-slackware-linux", "x86_64-unknown-linux",
|
||||
"x86_64-amazon-linux",
|
||||
};
|
||||
const X32Triples: [2][]const u8 = .{ "x86_64-linux-gnux32", "x86_64-pc-linux-gnux32" };
|
||||
const X32LibDirs: [2][]const u8 = .{ "/libx32", "/lib" };
|
||||
const X86LibDirs: [2][]const u8 = .{ "/lib32", "/lib" };
|
||||
const X86Triples: [9][]const u8 = .{
|
||||
"i586-linux-gnu", "i686-linux-gnu", "i686-pc-linux-gnu",
|
||||
"i386-redhat-linux6E", "i686-redhat-linux", "i386-redhat-linux",
|
||||
"i586-suse-linux", "i686-montavista-linux", "i686-gnu",
|
||||
};
|
||||
|
||||
const LoongArch64LibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
|
||||
const LoongArch64Triples: [2][]const u8 = .{ "loongarch64-linux-gnu", "loongarch64-unknown-linux-gnu" };
|
||||
|
||||
const M68kLibDirs: [1][]const u8 = .{"/lib"};
|
||||
const M68kTriples: [3][]const u8 = .{ "m68k-linux-gnu", "m68k-unknown-linux-gnu", "m68k-suse-linux" };
|
||||
|
||||
const MIPSLibDirs: [2][]const u8 = .{ "/libo32", "/lib" };
|
||||
const MIPSTriples: [5][]const u8 = .{
|
||||
"mips-linux-gnu", "mips-mti-linux",
|
||||
"mips-mti-linux-gnu", "mips-img-linux-gnu",
|
||||
"mipsisa32r6-linux-gnu",
|
||||
};
|
||||
const MIPSELLibDirs: [2][]const u8 = .{ "/libo32", "/lib" };
|
||||
const MIPSELTriples: [3][]const u8 = .{ "mipsel-linux-gnu", "mips-img-linux-gnu", "mipsisa32r6el-linux-gnu" };
|
||||
|
||||
const MIPS64LibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
|
||||
const MIPS64Triples: [6][]const u8 = .{
|
||||
"mips64-linux-gnu", "mips-mti-linux-gnu",
|
||||
"mips-img-linux-gnu", "mips64-linux-gnuabi64",
|
||||
"mipsisa64r6-linux-gnu", "mipsisa64r6-linux-gnuabi64",
|
||||
};
|
||||
const MIPS64ELLibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
|
||||
const MIPS64ELTriples: [6][]const u8 = .{
|
||||
"mips64el-linux-gnu", "mips-mti-linux-gnu",
|
||||
"mips-img-linux-gnu", "mips64el-linux-gnuabi64",
|
||||
"mipsisa64r6el-linux-gnu", "mipsisa64r6el-linux-gnuabi64",
|
||||
};
|
||||
|
||||
const MIPSN32LibDirs: [1][]const u8 = .{"/lib32"};
|
||||
const MIPSN32Triples: [2][]const u8 = .{ "mips64-linux-gnuabin32", "mipsisa64r6-linux-gnuabin32" };
|
||||
const MIPSN32ELLibDirs: [1][]const u8 = .{"/lib32"};
|
||||
const MIPSN32ELTriples: [2][]const u8 = .{ "mips64el-linux-gnuabin32", "mipsisa64r6el-linux-gnuabin32" };
|
||||
|
||||
const MSP430LibDirs: [1][]const u8 = .{"/lib"};
|
||||
const MSP430Triples: [1][]const u8 = .{"msp430-elf"};
|
||||
|
||||
const PPCLibDirs: [2][]const u8 = .{ "/lib32", "/lib" };
|
||||
const PPCTriples: [5][]const u8 = .{
|
||||
"powerpc-linux-gnu", "powerpc-unknown-linux-gnu", "powerpc-linux-gnuspe",
|
||||
// On 32-bit PowerPC systems running SUSE Linux, gcc is configured as a
|
||||
// 64-bit compiler which defaults to "-m32", hence "powerpc64-suse-linux".
|
||||
"powerpc64-suse-linux", "powerpc-montavista-linuxspe",
|
||||
};
|
||||
const PPCLELibDirs: [2][]const u8 = .{ "/lib32", "/lib" };
|
||||
const PPCLETriples: [3][]const u8 = .{ "powerpcle-linux-gnu", "powerpcle-unknown-linux-gnu", "powerpcle-linux-musl" };
|
||||
|
||||
const PPC64LibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
|
||||
const PPC64Triples: [4][]const u8 = .{
|
||||
"powerpc64-linux-gnu", "powerpc64-unknown-linux-gnu",
|
||||
"powerpc64-suse-linux", "ppc64-redhat-linux",
|
||||
};
|
||||
const PPC64LELibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
|
||||
const PPC64LETriples: [5][]const u8 = .{
|
||||
"powerpc64le-linux-gnu", "powerpc64le-unknown-linux-gnu",
|
||||
"powerpc64le-none-linux-gnu", "powerpc64le-suse-linux",
|
||||
"ppc64le-redhat-linux",
|
||||
};
|
||||
|
||||
const RISCV32LibDirs: [2][]const u8 = .{ "/lib32", "/lib" };
|
||||
const RISCV32Triples: [3][]const u8 = .{ "riscv32-unknown-linux-gnu", "riscv32-linux-gnu", "riscv32-unknown-elf" };
|
||||
const RISCV64LibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
|
||||
const RISCV64Triples: [3][]const u8 = .{
|
||||
"riscv64-unknown-linux-gnu",
|
||||
"riscv64-linux-gnu",
|
||||
"riscv64-unknown-elf",
|
||||
};
|
||||
|
||||
const SPARCv8LibDirs: [2][]const u8 = .{ "/lib32", "/lib" };
|
||||
const SPARCv8Triples: [2][]const u8 = .{ "sparc-linux-gnu", "sparcv8-linux-gnu" };
|
||||
const SPARCv9LibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
|
||||
const SPARCv9Triples: [2][]const u8 = .{ "sparc64-linux-gnu", "sparcv9-linux-gnu" };
|
||||
|
||||
const SystemZLibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
|
||||
const SystemZTriples: [5][]const u8 = .{
|
||||
"s390x-linux-gnu", "s390x-unknown-linux-gnu", "s390x-ibm-linux-gnu",
|
||||
"s390x-suse-linux", "s390x-redhat-linux",
|
||||
};
|
||||
const target = tc.getTarget();
|
||||
if (target.os.tag == .solaris) {
|
||||
// TODO
|
||||
return;
|
||||
}
|
||||
if (target.abi.isAndroid()) {
|
||||
const AArch64AndroidTriples: [1][]const u8 = .{"aarch64-linux-android"};
|
||||
const ARMAndroidTriples: [1][]const u8 = .{"arm-linux-androideabi"};
|
||||
const MIPSELAndroidTriples: [1][]const u8 = .{"mipsel-linux-android"};
|
||||
const MIPS64ELAndroidTriples: [1][]const u8 = .{"mips64el-linux-android"};
|
||||
const X86AndroidTriples: [1][]const u8 = .{"i686-linux-android"};
|
||||
const X86_64AndroidTriples: [1][]const u8 = .{"x86_64-linux-android"};
|
||||
|
||||
switch (target.cpu.arch) {
|
||||
.aarch64 => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&AArch64LibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&AArch64AndroidTriples);
|
||||
},
|
||||
.arm,
|
||||
.thumb,
|
||||
=> {
|
||||
lib_dirs.appendSliceAssumeCapacity(&ARMLibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&ARMAndroidTriples);
|
||||
},
|
||||
.mipsel => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&MIPSELLibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&MIPSELAndroidTriples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&MIPS64ELLibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPS64ELAndroidTriples);
|
||||
},
|
||||
.mips64el => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&MIPS64ELLibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&MIPS64ELAndroidTriples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&MIPSELLibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPSELAndroidTriples);
|
||||
},
|
||||
.x86_64 => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&X86_64LibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&X86_64AndroidTriples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&X86LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&X86AndroidTriples);
|
||||
},
|
||||
.x86 => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&X86LibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&X86AndroidTriples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&X86_64LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&X86_64AndroidTriples);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
return;
|
||||
}
|
||||
switch (target.cpu.arch) {
|
||||
.aarch64 => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&AArch64LibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&AArch64Triples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&AArch64LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&AArch64Triples);
|
||||
},
|
||||
.aarch64_be => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&AArch64beLibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&AArch64beTriples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&AArch64beLibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&AArch64beTriples);
|
||||
},
|
||||
.arm, .thumb => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&ARMLibDirs);
|
||||
if (target.abi == .gnueabihf) {
|
||||
triple_aliases.appendSliceAssumeCapacity(&ARMHFTriples);
|
||||
} else {
|
||||
triple_aliases.appendSliceAssumeCapacity(&ARMTriples);
|
||||
}
|
||||
},
|
||||
.armeb, .thumbeb => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&ARMebLibDirs);
|
||||
if (target.abi == .gnueabihf) {
|
||||
triple_aliases.appendSliceAssumeCapacity(&ARMebHFTriples);
|
||||
} else {
|
||||
triple_aliases.appendSliceAssumeCapacity(&ARMebTriples);
|
||||
}
|
||||
},
|
||||
.avr => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&AVRLibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&AVRTriples);
|
||||
},
|
||||
.csky => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&CSKYLibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&CSKYTriples);
|
||||
},
|
||||
.x86_64 => {
|
||||
if (target.abi == .gnux32 or target.abi == .muslx32) {
|
||||
lib_dirs.appendSliceAssumeCapacity(&X32LibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&X32Triples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&X86_64LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&X86_64Triples);
|
||||
} else {
|
||||
lib_dirs.appendSliceAssumeCapacity(&X86_64LibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&X86_64Triples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&X32LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&X32Triples);
|
||||
}
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&X86LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&X86Triples);
|
||||
},
|
||||
.x86 => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&X86LibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&X86Triples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&X86_64LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&X86_64Triples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&X32LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&X32Triples);
|
||||
},
|
||||
.loongarch64 => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&LoongArch64LibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&LoongArch64Triples);
|
||||
},
|
||||
.m68k => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&M68kLibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&M68kTriples);
|
||||
},
|
||||
.mips => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&MIPSLibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&MIPSTriples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&MIPS64LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPS64Triples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&MIPSN32LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPSN32Triples);
|
||||
},
|
||||
.mipsel => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&MIPSELLibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&MIPSELTriples);
|
||||
triple_aliases.appendSliceAssumeCapacity(&MIPSTriples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&MIPS64ELLibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPS64ELTriples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&MIPSN32ELLibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPSN32ELTriples);
|
||||
},
|
||||
.mips64 => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&MIPS64LibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&MIPS64Triples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&MIPSLibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPSTriples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&MIPSN32LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPSN32Triples);
|
||||
},
|
||||
.mips64el => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&MIPS64ELLibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&MIPS64ELTriples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&MIPSELLibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPSELTriples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&MIPSN32ELLibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPSN32ELTriples);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPSTriples);
|
||||
},
|
||||
.msp430 => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&MSP430LibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&MSP430Triples);
|
||||
},
|
||||
.powerpc => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&PPCLibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&PPCTriples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&PPC64LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&PPC64Triples);
|
||||
},
|
||||
.powerpcle => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&PPCLELibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&PPCLETriples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&PPC64LELibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&PPC64LETriples);
|
||||
},
|
||||
.powerpc64 => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&PPC64LibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&PPC64Triples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&PPCLibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&PPCTriples);
|
||||
},
|
||||
.powerpc64le => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&PPC64LELibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&PPC64LETriples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&PPCLELibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&PPCLETriples);
|
||||
},
|
||||
.riscv32 => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&RISCV32LibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&RISCV32Triples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&RISCV64LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&RISCV64Triples);
|
||||
},
|
||||
.riscv64 => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&RISCV64LibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&RISCV64Triples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&RISCV32LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&RISCV32Triples);
|
||||
},
|
||||
.sparc => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&SPARCv8LibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&SPARCv8Triples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&SPARCv9LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&SPARCv9Triples);
|
||||
},
|
||||
.sparc64 => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&SPARCv9LibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&SPARCv9Triples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&SPARCv8LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&SPARCv8Triples);
|
||||
},
|
||||
.s390x => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&SystemZLibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&SystemZTriples);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn discover(self: *GCCDetector, tc: *Toolchain) !void {
|
||||
var path_buf: [std.fs.max_path_bytes]u8 = undefined;
|
||||
var fib = std.heap.FixedBufferAllocator.init(&path_buf);
|
||||
|
||||
const target = tc.getTarget();
|
||||
const biarch_variant_target = if (target.ptrBitWidth() == 32)
|
||||
target_util.get64BitArchVariant(target)
|
||||
else
|
||||
target_util.get32BitArchVariant(target);
|
||||
|
||||
var candidate_lib_dirs_buffer: [16][]const u8 = undefined;
|
||||
var candidate_lib_dirs = std.ArrayListUnmanaged([]const u8).initBuffer(&candidate_lib_dirs_buffer);
|
||||
|
||||
var candidate_triple_aliases_buffer: [16][]const u8 = undefined;
|
||||
var candidate_triple_aliases = std.ArrayListUnmanaged([]const u8).initBuffer(&candidate_triple_aliases_buffer);
|
||||
|
||||
var candidate_biarch_lib_dirs_buffer: [16][]const u8 = undefined;
|
||||
var candidate_biarch_lib_dirs = std.ArrayListUnmanaged([]const u8).initBuffer(&candidate_biarch_lib_dirs_buffer);
|
||||
|
||||
var candidate_biarch_triple_aliases_buffer: [16][]const u8 = undefined;
|
||||
var candidate_biarch_triple_aliases = std.ArrayListUnmanaged([]const u8).initBuffer(&candidate_biarch_triple_aliases_buffer);
|
||||
|
||||
try collectLibDirsAndTriples(
|
||||
tc,
|
||||
&candidate_lib_dirs,
|
||||
&candidate_triple_aliases,
|
||||
&candidate_biarch_lib_dirs,
|
||||
&candidate_biarch_triple_aliases,
|
||||
);
|
||||
|
||||
var target_buf: [64]u8 = undefined;
|
||||
const triple_str = target_util.toLLVMTriple(target, &target_buf);
|
||||
candidate_triple_aliases.appendAssumeCapacity(triple_str);
|
||||
|
||||
// Also include the multiarch variant if it's different.
|
||||
var biarch_buf: [64]u8 = undefined;
|
||||
if (biarch_variant_target) |biarch_target| {
|
||||
const biarch_triple_str = target_util.toLLVMTriple(biarch_target, &biarch_buf);
|
||||
if (!std.mem.eql(u8, biarch_triple_str, triple_str)) {
|
||||
candidate_triple_aliases.appendAssumeCapacity(biarch_triple_str);
|
||||
}
|
||||
}
|
||||
|
||||
var prefixes_buf: [16][]const u8 = undefined;
|
||||
var prefixes = std.ArrayListUnmanaged([]const u8).initBuffer(&prefixes_buf);
|
||||
const gcc_toolchain_dir = gccToolchainDir(tc);
|
||||
if (gcc_toolchain_dir.len != 0) {
|
||||
const adjusted = if (gcc_toolchain_dir[gcc_toolchain_dir.len - 1] == '/')
|
||||
gcc_toolchain_dir[0 .. gcc_toolchain_dir.len - 1]
|
||||
else
|
||||
gcc_toolchain_dir;
|
||||
prefixes.appendAssumeCapacity(adjusted);
|
||||
} else {
|
||||
const sysroot = tc.getSysroot();
|
||||
if (sysroot.len > 0) {
|
||||
prefixes.appendAssumeCapacity(sysroot);
|
||||
try addDefaultGCCPrefixes(&prefixes, tc);
|
||||
}
|
||||
|
||||
if (sysroot.len == 0) {
|
||||
try addDefaultGCCPrefixes(&prefixes, tc);
|
||||
}
|
||||
// TODO: Special-case handling for Gentoo
|
||||
}
|
||||
|
||||
const v0 = GCCVersion.parse("0.0.0");
|
||||
for (prefixes.items) |prefix| {
|
||||
if (!tc.filesystem.exists(prefix)) continue;
|
||||
|
||||
for (candidate_lib_dirs.items) |suffix| {
|
||||
defer fib.reset();
|
||||
const lib_dir = std.fs.path.join(fib.allocator(), &.{ prefix, suffix }) catch continue;
|
||||
if (!tc.filesystem.exists(lib_dir)) continue;
|
||||
|
||||
const gcc_dir_exists = tc.filesystem.joinedExists(&.{ lib_dir, "/gcc" });
|
||||
const gcc_cross_dir_exists = tc.filesystem.joinedExists(&.{ lib_dir, "/gcc-cross" });
|
||||
|
||||
try self.scanLibDirForGCCTriple(tc, target, lib_dir, triple_str, false, gcc_dir_exists, gcc_cross_dir_exists);
|
||||
for (candidate_triple_aliases.items) |candidate| {
|
||||
try self.scanLibDirForGCCTriple(tc, target, lib_dir, candidate, false, gcc_dir_exists, gcc_cross_dir_exists);
|
||||
}
|
||||
}
|
||||
for (candidate_biarch_lib_dirs.items) |suffix| {
|
||||
const lib_dir = std.fs.path.join(fib.allocator(), &.{ prefix, suffix }) catch continue;
|
||||
if (!tc.filesystem.exists(lib_dir)) continue;
|
||||
|
||||
const gcc_dir_exists = tc.filesystem.joinedExists(&.{ lib_dir, "/gcc" });
|
||||
const gcc_cross_dir_exists = tc.filesystem.joinedExists(&.{ lib_dir, "/gcc-cross" });
|
||||
for (candidate_biarch_triple_aliases.items) |candidate| {
|
||||
try self.scanLibDirForGCCTriple(tc, target, lib_dir, candidate, true, gcc_dir_exists, gcc_cross_dir_exists);
|
||||
}
|
||||
}
|
||||
if (self.version.order(v0) == .gt) break;
|
||||
}
|
||||
}
|
||||
|
||||
fn findBiarchMultilibs(
|
||||
tc: *const Toolchain,
|
||||
result: *Multilib.Detected,
|
||||
target: std.Target,
|
||||
path: [2][]const u8,
|
||||
needs_biarch_suffix: bool,
|
||||
) !bool {
|
||||
const suff64 = if (target.os.tag == .solaris) switch (target.cpu.arch) {
|
||||
.x86, .x86_64 => "/amd64",
|
||||
.sparc => "/sparcv9",
|
||||
else => "/64",
|
||||
} else "/64";
|
||||
|
||||
const alt_64 = Multilib.init(suff64, suff64, &.{ "-m32", "+m64", "-mx32" });
|
||||
const alt_32 = Multilib.init("/32", "/32", &.{ "+m32", "-m64", "-mx32" });
|
||||
const alt_x32 = Multilib.init("/x32", "/x32", &.{ "-m32", "-m64", "+mx32" });
|
||||
|
||||
const multilib_filter = Multilib.Filter{
|
||||
.base = path,
|
||||
.file = "crtbegin.o",
|
||||
};
|
||||
|
||||
const Want = enum {
|
||||
want32,
|
||||
want64,
|
||||
wantx32,
|
||||
};
|
||||
const is_x32 = target.abi == .gnux32 or target.abi == .muslx32;
|
||||
const target_ptr_width = target.ptrBitWidth();
|
||||
const want: Want = if (target_ptr_width == 32 and multilib_filter.exists(alt_32, tc.filesystem))
|
||||
.want64
|
||||
else if (target_ptr_width == 64 and is_x32 and multilib_filter.exists(alt_x32, tc.filesystem))
|
||||
.want64
|
||||
else if (target_ptr_width == 64 and !is_x32 and multilib_filter.exists(alt_64, tc.filesystem))
|
||||
.want32
|
||||
else if (target_ptr_width == 32)
|
||||
if (needs_biarch_suffix) .want64 else .want32
|
||||
else if (is_x32)
|
||||
if (needs_biarch_suffix) .want64 else .wantx32
|
||||
else if (needs_biarch_suffix) .want32 else .want64;
|
||||
|
||||
const default = switch (want) {
|
||||
.want32 => Multilib.init("", "", &.{ "+m32", "-m64", "-mx32" }),
|
||||
.want64 => Multilib.init("", "", &.{ "-m32", "+m64", "-mx32" }),
|
||||
.wantx32 => Multilib.init("", "", &.{ "-m32", "-m64", "+mx32" }),
|
||||
};
|
||||
result.multilibs.appendSliceAssumeCapacity(&.{
|
||||
default,
|
||||
alt_64,
|
||||
alt_32,
|
||||
alt_x32,
|
||||
});
|
||||
result.filter(multilib_filter, tc.filesystem);
|
||||
var flags: Multilib.Flags = .{};
|
||||
flags.appendAssumeCapacity(if (target_ptr_width == 64 and !is_x32) "+m64" else "-m64");
|
||||
flags.appendAssumeCapacity(if (target_ptr_width == 32) "+m32" else "-m32");
|
||||
flags.appendAssumeCapacity(if (target_ptr_width == 64 and is_x32) "+mx32" else "-mx32");
|
||||
|
||||
return result.select(flags);
|
||||
}
|
||||
|
||||
fn scanGCCForMultilibs(
|
||||
self: *GCCDetector,
|
||||
tc: *const Toolchain,
|
||||
target: std.Target,
|
||||
path: [2][]const u8,
|
||||
needs_biarch_suffix: bool,
|
||||
) !bool {
|
||||
var detected: Multilib.Detected = .{};
|
||||
if (target.cpu.arch == .csky) {
|
||||
// TODO
|
||||
} else if (target.cpu.arch.isMIPS()) {
|
||||
// TODO
|
||||
} else if (target.cpu.arch.isRISCV()) {
|
||||
// TODO
|
||||
} else if (target.cpu.arch == .msp430) {
|
||||
// TODO
|
||||
} else if (target.cpu.arch == .avr) {
|
||||
// No multilibs
|
||||
} else if (!try findBiarchMultilibs(tc, &detected, target, path, needs_biarch_suffix)) {
|
||||
return false;
|
||||
}
|
||||
self.selected = detected.selected;
|
||||
self.biarch_sibling = detected.biarch_sibling;
|
||||
return true;
|
||||
}
|
||||
|
||||
fn scanLibDirForGCCTriple(
|
||||
self: *GCCDetector,
|
||||
tc: *const Toolchain,
|
||||
target: std.Target,
|
||||
lib_dir: []const u8,
|
||||
candidate_triple: []const u8,
|
||||
needs_biarch_suffix: bool,
|
||||
gcc_dir_exists: bool,
|
||||
gcc_cross_dir_exists: bool,
|
||||
) !void {
|
||||
var path_buf: [std.fs.max_path_bytes]u8 = undefined;
|
||||
var fib = std.heap.FixedBufferAllocator.init(&path_buf);
|
||||
for (0..2) |i| {
|
||||
if (i == 0 and !gcc_dir_exists) continue;
|
||||
if (i == 1 and !gcc_cross_dir_exists) continue;
|
||||
defer fib.reset();
|
||||
|
||||
const base: []const u8 = if (i == 0) "gcc" else "gcc-cross";
|
||||
var lib_suffix_buf: [64]u8 = undefined;
|
||||
var suffix_buf_fib = std.heap.FixedBufferAllocator.init(&lib_suffix_buf);
|
||||
const lib_suffix = std.fs.path.join(suffix_buf_fib.allocator(), &.{ base, candidate_triple }) catch continue;
|
||||
|
||||
const dir_name = std.fs.path.join(fib.allocator(), &.{ lib_dir, lib_suffix }) catch continue;
|
||||
var parent_dir = tc.filesystem.openDir(dir_name) catch continue;
|
||||
defer parent_dir.close();
|
||||
|
||||
var it = parent_dir.iterate();
|
||||
while (it.next() catch continue) |entry| {
|
||||
if (entry.kind != .directory) continue;
|
||||
|
||||
const version_text = entry.name;
|
||||
const candidate_version = GCCVersion.parse(version_text);
|
||||
if (candidate_version.major != -1) {
|
||||
// TODO: cache path so we're not repeatedly scanning
|
||||
}
|
||||
if (candidate_version.isLessThan(4, 1, 1, "")) continue;
|
||||
switch (candidate_version.order(self.version)) {
|
||||
.lt, .eq => continue,
|
||||
.gt => {},
|
||||
}
|
||||
|
||||
if (!try self.scanGCCForMultilibs(tc, target, .{ dir_name, version_text }, needs_biarch_suffix)) continue;
|
||||
|
||||
self.version = candidate_version;
|
||||
self.gcc_triple = try tc.arena.dupe(u8, candidate_triple);
|
||||
self.install_path = try std.fs.path.join(tc.arena, &.{ lib_dir, lib_suffix, version_text });
|
||||
self.parent_lib_path = try std.fs.path.join(tc.arena, &.{ self.install_path, "..", "..", ".." });
|
||||
self.is_valid = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn gccToolchainDir(tc: *const Toolchain) []const u8 {
|
||||
const sysroot = tc.getSysroot();
|
||||
if (sysroot.len != 0) return "";
|
||||
return system_defaults.gcc_install_prefix;
|
||||
}
|
||||
2
lib/compiler/aro/aro/Driver/GCCVersion.zig
vendored
2
lib/compiler/aro/aro/Driver/GCCVersion.zig
vendored
|
|
@ -57,7 +57,7 @@ pub fn parse(text: []const u8) GCCVersion {
|
|||
var good = bad;
|
||||
|
||||
var it = mem.splitScalar(u8, text, '.');
|
||||
const first = it.next().?;
|
||||
const first = it.first();
|
||||
const second = it.next() orelse "";
|
||||
const rest = it.next() orelse "";
|
||||
|
||||
|
|
|
|||
55
lib/compiler/aro/aro/Driver/Multilib.zig
vendored
55
lib/compiler/aro/aro/Driver/Multilib.zig
vendored
|
|
@ -1,47 +1,50 @@
|
|||
const std = @import("std");
|
||||
const Filesystem = @import("Filesystem.zig").Filesystem;
|
||||
|
||||
pub const Flags = std.BoundedArray([]const u8, 6);
|
||||
|
||||
/// Large enough for GCCDetector for Linux; may need to be increased to support other toolchains.
|
||||
const max_multilibs = 4;
|
||||
|
||||
const MultilibArray = std.BoundedArray(Multilib, max_multilibs);
|
||||
|
||||
pub const Detected = struct {
|
||||
multilibs: MultilibArray = .{},
|
||||
multilib_buf: [max_multilibs]Multilib = undefined,
|
||||
multilib_count: u8 = 0,
|
||||
selected: Multilib = .{},
|
||||
biarch_sibling: ?Multilib = null,
|
||||
|
||||
pub fn filter(self: *Detected, multilib_filter: Filter, fs: Filesystem) void {
|
||||
var found_count: usize = 0;
|
||||
for (self.multilibs.constSlice()) |multilib| {
|
||||
pub fn filter(d: *Detected, multilib_filter: Filter, fs: Filesystem) void {
|
||||
var found_count: u8 = 0;
|
||||
for (d.multilibs()) |multilib| {
|
||||
if (multilib_filter.exists(multilib, fs)) {
|
||||
self.multilibs.set(found_count, multilib);
|
||||
d.multilib_buf[found_count] = multilib;
|
||||
found_count += 1;
|
||||
}
|
||||
}
|
||||
self.multilibs.resize(found_count) catch unreachable;
|
||||
d.multilib_count = found_count;
|
||||
}
|
||||
|
||||
pub fn select(self: *Detected, flags: Flags) !bool {
|
||||
var filtered: MultilibArray = .{};
|
||||
for (self.multilibs.constSlice()) |multilib| {
|
||||
for (multilib.flags.constSlice()) |multilib_flag| {
|
||||
const matched = for (flags.constSlice()) |arg_flag| {
|
||||
pub fn select(d: *Detected, check_flags: []const []const u8) !bool {
|
||||
var selected: ?Multilib = null;
|
||||
|
||||
for (d.multilibs()) |multilib| {
|
||||
for (multilib.flags()) |multilib_flag| {
|
||||
const matched = for (check_flags) |arg_flag| {
|
||||
if (std.mem.eql(u8, arg_flag[1..], multilib_flag[1..])) break arg_flag;
|
||||
} else multilib_flag;
|
||||
if (matched[0] != multilib_flag[0]) break;
|
||||
} else if (selected != null) {
|
||||
return error.TooManyMultilibs;
|
||||
} else {
|
||||
filtered.appendAssumeCapacity(multilib);
|
||||
selected = multilib;
|
||||
}
|
||||
}
|
||||
if (filtered.len == 0) return false;
|
||||
if (filtered.len == 1) {
|
||||
self.selected = filtered.get(0);
|
||||
if (selected) |multilib| {
|
||||
d.selected = multilib;
|
||||
return true;
|
||||
}
|
||||
return error.TooManyMultilibs;
|
||||
return false;
|
||||
}
|
||||
|
||||
pub fn multilibs(d: *const Detected) []const Multilib {
|
||||
return d.multilib_buf[0..d.multilib_count];
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -58,14 +61,20 @@ const Multilib = @This();
|
|||
gcc_suffix: []const u8 = "",
|
||||
os_suffix: []const u8 = "",
|
||||
include_suffix: []const u8 = "",
|
||||
flags: Flags = .{},
|
||||
flag_buf: [6][]const u8 = undefined,
|
||||
flag_count: u8 = 0,
|
||||
priority: u32 = 0,
|
||||
|
||||
pub fn init(gcc_suffix: []const u8, os_suffix: []const u8, flags: []const []const u8) Multilib {
|
||||
pub fn init(gcc_suffix: []const u8, os_suffix: []const u8, init_flags: []const []const u8) Multilib {
|
||||
var self: Multilib = .{
|
||||
.gcc_suffix = gcc_suffix,
|
||||
.os_suffix = os_suffix,
|
||||
.flag_count = @intCast(init_flags.len),
|
||||
};
|
||||
self.flags.appendSliceAssumeCapacity(flags);
|
||||
@memcpy(self.flag_buf[0..init_flags.len], init_flags);
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn flags(m: *const Multilib) []const []const u8 {
|
||||
return m.flag_buf[0..m.flag_count];
|
||||
}
|
||||
|
|
|
|||
7
lib/compiler/aro/aro/Hideset.zig
vendored
7
lib/compiler/aro/aro/Hideset.zig
vendored
|
|
@ -10,8 +10,9 @@
|
|||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const Allocator = mem.Allocator;
|
||||
const Source = @import("Source.zig");
|
||||
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Source = @import("Source.zig");
|
||||
const Tokenizer = @import("Tokenizer.zig");
|
||||
|
||||
pub const Hideset = @This();
|
||||
|
|
@ -51,10 +52,10 @@ pub const Index = enum(u32) {
|
|||
_,
|
||||
};
|
||||
|
||||
map: std.AutoHashMapUnmanaged(Identifier, Index) = .empty,
|
||||
map: std.AutoHashMapUnmanaged(Identifier, Index) = .{},
|
||||
/// Used for computing union/intersection of two lists; stored here so that allocations can be retained
|
||||
/// until hideset is deinit'ed
|
||||
tmp_map: std.AutoHashMapUnmanaged(Identifier, void) = .empty,
|
||||
tmp_map: std.AutoHashMapUnmanaged(Identifier, void) = .{},
|
||||
linked_list: Item.List = .{},
|
||||
comp: *const Compilation,
|
||||
|
||||
|
|
|
|||
89
lib/compiler/aro/aro/InitList.zig
vendored
89
lib/compiler/aro/aro/InitList.zig
vendored
|
|
@ -3,17 +3,16 @@
|
|||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const testing = std.testing;
|
||||
|
||||
const Diagnostics = @import("Diagnostics.zig");
|
||||
const Parser = @import("Parser.zig");
|
||||
const Tree = @import("Tree.zig");
|
||||
const Token = Tree.Token;
|
||||
const TokenIndex = Tree.TokenIndex;
|
||||
const NodeIndex = Tree.NodeIndex;
|
||||
const Type = @import("Type.zig");
|
||||
const Diagnostics = @import("Diagnostics.zig");
|
||||
const NodeList = std.array_list.Managed(NodeIndex);
|
||||
const Parser = @import("Parser.zig");
|
||||
const Node = Tree.Node;
|
||||
|
||||
const Item = struct {
|
||||
list: InitList = .{},
|
||||
list: InitList,
|
||||
index: u64,
|
||||
|
||||
fn order(_: void, a: Item, b: Item) std.math.Order {
|
||||
|
|
@ -23,8 +22,8 @@ const Item = struct {
|
|||
|
||||
const InitList = @This();
|
||||
|
||||
list: std.ArrayListUnmanaged(Item) = .empty,
|
||||
node: NodeIndex = .none,
|
||||
list: std.ArrayList(Item) = .empty,
|
||||
node: Node.OptIndex = .null,
|
||||
tok: TokenIndex = 0,
|
||||
|
||||
/// Deinitialize freeing all memory.
|
||||
|
|
@ -34,50 +33,6 @@ pub fn deinit(il: *InitList, gpa: Allocator) void {
|
|||
il.* = undefined;
|
||||
}
|
||||
|
||||
/// Insert initializer at index, returning previous entry if one exists.
|
||||
pub fn put(il: *InitList, gpa: Allocator, index: usize, node: NodeIndex, tok: TokenIndex) !?TokenIndex {
|
||||
const items = il.list.items;
|
||||
var left: usize = 0;
|
||||
var right: usize = items.len;
|
||||
|
||||
// Append new value to empty list
|
||||
if (left == right) {
|
||||
const item = try il.list.addOne(gpa);
|
||||
item.* = .{
|
||||
.list = .{ .node = node, .tok = tok },
|
||||
.index = index,
|
||||
};
|
||||
return null;
|
||||
}
|
||||
|
||||
while (left < right) {
|
||||
// Avoid overflowing in the midpoint calculation
|
||||
const mid = left + (right - left) / 2;
|
||||
// Compare the key with the midpoint element
|
||||
switch (std.math.order(index, items[mid].index)) {
|
||||
.eq => {
|
||||
// Replace previous entry.
|
||||
const prev = items[mid].list.tok;
|
||||
items[mid].list.deinit(gpa);
|
||||
items[mid] = .{
|
||||
.list = .{ .node = node, .tok = tok },
|
||||
.index = index,
|
||||
};
|
||||
return prev;
|
||||
},
|
||||
.gt => left = mid + 1,
|
||||
.lt => right = mid,
|
||||
}
|
||||
}
|
||||
|
||||
// Insert a new value into a sorted position.
|
||||
try il.list.insert(gpa, left, .{
|
||||
.list = .{ .node = node, .tok = tok },
|
||||
.index = index,
|
||||
});
|
||||
return null;
|
||||
}
|
||||
|
||||
/// Find item at index, create new if one does not exist.
|
||||
pub fn find(il: *InitList, gpa: Allocator, index: u64) !*InitList {
|
||||
const items = il.list.items;
|
||||
|
|
@ -85,13 +40,21 @@ pub fn find(il: *InitList, gpa: Allocator, index: u64) !*InitList {
|
|||
var right: usize = items.len;
|
||||
|
||||
// Append new value to empty list
|
||||
if (left == right) {
|
||||
if (il.list.items.len == 0) {
|
||||
const item = try il.list.addOne(gpa);
|
||||
item.* = .{
|
||||
.list = .{ .node = .none, .tok = 0 },
|
||||
.list = .{},
|
||||
.index = index,
|
||||
};
|
||||
return &item.list;
|
||||
} else if (il.list.items[il.list.items.len - 1].index < index) {
|
||||
// Append a new value to the end of the list.
|
||||
const new = try il.list.addOne(gpa);
|
||||
new.* = .{
|
||||
.list = .{},
|
||||
.index = index,
|
||||
};
|
||||
return &new.list;
|
||||
}
|
||||
|
||||
while (left < right) {
|
||||
|
|
@ -107,7 +70,7 @@ pub fn find(il: *InitList, gpa: Allocator, index: u64) !*InitList {
|
|||
|
||||
// Insert a new value into a sorted position.
|
||||
try il.list.insert(gpa, left, .{
|
||||
.list = .{ .node = .none, .tok = 0 },
|
||||
.list = .{},
|
||||
.index = index,
|
||||
});
|
||||
return &il.list.items[left].list;
|
||||
|
|
@ -118,22 +81,6 @@ test "basic usage" {
|
|||
var il: InitList = .{};
|
||||
defer il.deinit(gpa);
|
||||
|
||||
{
|
||||
var i: usize = 0;
|
||||
while (i < 5) : (i += 1) {
|
||||
const prev = try il.put(gpa, i, .none, 0);
|
||||
try testing.expect(prev == null);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
const failing = testing.failing_allocator;
|
||||
var i: usize = 0;
|
||||
while (i < 5) : (i += 1) {
|
||||
_ = try il.find(failing, i);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
var item = try il.find(gpa, 0);
|
||||
var i: usize = 1;
|
||||
|
|
|
|||
26
lib/compiler/aro/aro/LangOpts.zig
vendored
26
lib/compiler/aro/aro/LangOpts.zig
vendored
|
|
@ -1,11 +1,20 @@
|
|||
const std = @import("std");
|
||||
const DiagnosticTag = @import("Diagnostics.zig").Tag;
|
||||
|
||||
const char_info = @import("char_info.zig");
|
||||
const DiagnosticTag = @import("Diagnostics.zig").Tag;
|
||||
|
||||
pub const Compiler = enum {
|
||||
clang,
|
||||
gcc,
|
||||
msvc,
|
||||
|
||||
pub fn defaultGccVersion(self: Compiler) u32 {
|
||||
return switch (self) {
|
||||
.clang => 4 * 10_000 + 2 * 100 + 1,
|
||||
.gcc => 7 * 10_000 + 1 * 100 + 0,
|
||||
.msvc => 0,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
/// The floating-point evaluation method for intermediate results within a single expression
|
||||
|
|
@ -138,20 +147,15 @@ preserve_comments_in_macros: bool = false,
|
|||
/// Used ONLY for generating __GNUC__ and related macros. Does not control the presence/absence of any features
|
||||
/// Encoded as major * 10,000 + minor * 100 + patch
|
||||
/// e.g. 4.2.1 == 40201
|
||||
gnuc_version: u32 = 0,
|
||||
gnuc_version: ?u32 = null,
|
||||
|
||||
pub fn setStandard(self: *LangOpts, name: []const u8) error{InvalidStandard}!void {
|
||||
self.standard = Standard.NameMap.get(name) orelse return error.InvalidStandard;
|
||||
}
|
||||
|
||||
pub fn enableMSExtensions(self: *LangOpts) void {
|
||||
self.declspec_attrs = true;
|
||||
self.ms_extensions = true;
|
||||
}
|
||||
|
||||
pub fn disableMSExtensions(self: *LangOpts) void {
|
||||
self.declspec_attrs = false;
|
||||
self.ms_extensions = true;
|
||||
pub fn setMSExtensions(self: *LangOpts, enabled: bool) void {
|
||||
self.declspec_attrs = enabled;
|
||||
self.ms_extensions = enabled;
|
||||
}
|
||||
|
||||
pub fn hasChar8_T(self: *const LangOpts) bool {
|
||||
|
|
@ -164,7 +168,7 @@ pub fn hasDigraphs(self: *const LangOpts) bool {
|
|||
|
||||
pub fn setEmulatedCompiler(self: *LangOpts, compiler: Compiler) void {
|
||||
self.emulate = compiler;
|
||||
if (compiler == .msvc) self.enableMSExtensions();
|
||||
self.setMSExtensions(compiler == .msvc);
|
||||
}
|
||||
|
||||
pub fn setFpEvalMethod(self: *LangOpts, fp_eval_method: FPEvalMethod) void {
|
||||
|
|
|
|||
10163
lib/compiler/aro/aro/Parser.zig
vendored
10163
lib/compiler/aro/aro/Parser.zig
vendored
File diff suppressed because it is too large
Load diff
2424
lib/compiler/aro/aro/Parser/Diagnostic.zig
vendored
Normal file
2424
lib/compiler/aro/aro/Parser/Diagnostic.zig
vendored
Normal file
File diff suppressed because it is too large
Load diff
133
lib/compiler/aro/aro/Pragma.zig
vendored
133
lib/compiler/aro/aro/Pragma.zig
vendored
|
|
@ -1,7 +1,9 @@
|
|||
const std = @import("std");
|
||||
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Preprocessor = @import("Preprocessor.zig");
|
||||
const Diagnostics = @import("Diagnostics.zig");
|
||||
const Parser = @import("Parser.zig");
|
||||
const Preprocessor = @import("Preprocessor.zig");
|
||||
const TokenIndex = @import("Tree.zig").TokenIndex;
|
||||
|
||||
pub const Error = Compilation.Error || error{ UnknownPragma, StopPreprocessing };
|
||||
|
|
@ -58,7 +60,7 @@ pub fn pasteTokens(pp: *Preprocessor, start_idx: TokenIndex) ![]const u8 {
|
|||
.string_literal => {
|
||||
if (rparen_count != 0) return error.ExpectedStringLiteral;
|
||||
const str = pp.expandedSlice(tok);
|
||||
try pp.char_buf.appendSlice(str[1 .. str.len - 1]);
|
||||
try pp.char_buf.appendSlice(pp.comp.gpa, str[1 .. str.len - 1]);
|
||||
},
|
||||
else => return error.ExpectedStringLiteral,
|
||||
}
|
||||
|
|
@ -69,7 +71,7 @@ pub fn pasteTokens(pp: *Preprocessor, start_idx: TokenIndex) ![]const u8 {
|
|||
|
||||
pub fn shouldPreserveTokens(self: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) bool {
|
||||
if (self.preserveTokens) |func| return func(self, pp, start_idx);
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn preprocessorCB(self: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Error!void {
|
||||
|
|
@ -81,3 +83,128 @@ pub fn parserCB(self: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation.Er
|
|||
defer std.debug.assert(tok_index == p.tok_i);
|
||||
if (self.parserHandler) |func| return func(self, p, start_idx);
|
||||
}
|
||||
|
||||
pub const Diagnostic = struct {
|
||||
fmt: []const u8,
|
||||
kind: Diagnostics.Message.Kind,
|
||||
opt: ?Diagnostics.Option = null,
|
||||
extension: bool = false,
|
||||
|
||||
pub const pragma_warning_message: Diagnostic = .{
|
||||
.fmt = "{s}",
|
||||
.kind = .warning,
|
||||
.opt = .@"#pragma-messages",
|
||||
};
|
||||
|
||||
pub const pragma_error_message: Diagnostic = .{
|
||||
.fmt = "{s}",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const pragma_message: Diagnostic = .{
|
||||
.fmt = "#pragma message: {s}",
|
||||
.kind = .note,
|
||||
};
|
||||
|
||||
pub const pragma_requires_string_literal: Diagnostic = .{
|
||||
.fmt = "pragma {s} requires string literal",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const poisoned_identifier: Diagnostic = .{
|
||||
.fmt = "attempt to use a poisoned identifier",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const pragma_poison_identifier: Diagnostic = .{
|
||||
.fmt = "can only poison identifier tokens",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const pragma_poison_macro: Diagnostic = .{
|
||||
.fmt = "poisoning existing macro",
|
||||
.kind = .warning,
|
||||
};
|
||||
|
||||
pub const unknown_gcc_pragma: Diagnostic = .{
|
||||
.fmt = "pragma GCC expected 'error', 'warning', 'diagnostic', 'poison'",
|
||||
.kind = .off,
|
||||
.opt = .@"unknown-pragmas",
|
||||
};
|
||||
|
||||
pub const unknown_gcc_pragma_directive: Diagnostic = .{
|
||||
.fmt = "pragma GCC diagnostic expected 'error', 'warning', 'ignored', 'fatal', 'push', or 'pop'",
|
||||
.kind = .warning,
|
||||
.opt = .@"unknown-pragmas",
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const malformed_warning_check: Diagnostic = .{
|
||||
.fmt = "{s} expected option name (e.g. \"-Wundef\")",
|
||||
.opt = .@"malformed-warning-check",
|
||||
.kind = .warning,
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const pragma_pack_lparen: Diagnostic = .{
|
||||
.fmt = "missing '(' after '#pragma pack' - ignoring",
|
||||
.kind = .warning,
|
||||
.opt = .@"ignored-pragmas",
|
||||
};
|
||||
|
||||
pub const pragma_pack_rparen: Diagnostic = .{
|
||||
.fmt = "missing ')' after '#pragma pack' - ignoring",
|
||||
.kind = .warning,
|
||||
.opt = .@"ignored-pragmas",
|
||||
};
|
||||
|
||||
pub const pragma_pack_unknown_action: Diagnostic = .{
|
||||
.fmt = "unknown action for '#pragma pack' - ignoring",
|
||||
.kind = .warning,
|
||||
.opt = .@"ignored-pragmas",
|
||||
};
|
||||
|
||||
pub const pragma_pack_show: Diagnostic = .{
|
||||
.fmt = "value of #pragma pack(show) == {d}",
|
||||
.kind = .warning,
|
||||
};
|
||||
|
||||
pub const pragma_pack_int_ident: Diagnostic = .{
|
||||
.fmt = "expected integer or identifier in '#pragma pack' - ignored",
|
||||
.kind = .warning,
|
||||
.opt = .@"ignored-pragmas",
|
||||
};
|
||||
|
||||
pub const pragma_pack_int: Diagnostic = .{
|
||||
.fmt = "expected #pragma pack parameter to be '1', '2', '4', '8', or '16'",
|
||||
.opt = .@"ignored-pragmas",
|
||||
.kind = .warning,
|
||||
};
|
||||
|
||||
pub const pragma_pack_undefined_pop: Diagnostic = .{
|
||||
.fmt = "specifying both a name and alignment to 'pop' is undefined",
|
||||
.kind = .warning,
|
||||
};
|
||||
|
||||
pub const pragma_pack_empty_stack: Diagnostic = .{
|
||||
.fmt = "#pragma pack(pop, ...) failed: stack empty",
|
||||
.opt = .@"ignored-pragmas",
|
||||
.kind = .warning,
|
||||
};
|
||||
};
|
||||
|
||||
pub fn err(pp: *Preprocessor, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype) Compilation.Error!void {
|
||||
var sf = std.heap.stackFallback(1024, pp.comp.gpa);
|
||||
var allocating: std.Io.Writer.Allocating = .init(sf.get());
|
||||
defer allocating.deinit();
|
||||
|
||||
Diagnostics.formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory;
|
||||
|
||||
try pp.diagnostics.addWithLocation(pp.comp, .{
|
||||
.kind = diagnostic.kind,
|
||||
.opt = diagnostic.opt,
|
||||
.text = allocating.written(),
|
||||
.location = pp.tokens.items(.loc)[tok_i].expand(pp.comp),
|
||||
.extension = diagnostic.extension,
|
||||
}, pp.expansionSlice(tok_i), true);
|
||||
}
|
||||
|
|
|
|||
1711
lib/compiler/aro/aro/Preprocessor.zig
vendored
1711
lib/compiler/aro/aro/Preprocessor.zig
vendored
File diff suppressed because it is too large
Load diff
458
lib/compiler/aro/aro/Preprocessor/Diagnostic.zig
vendored
Normal file
458
lib/compiler/aro/aro/Preprocessor/Diagnostic.zig
vendored
Normal file
|
|
@ -0,0 +1,458 @@
|
|||
const std = @import("std");
|
||||
|
||||
const Diagnostics = @import("../Diagnostics.zig");
|
||||
const LangOpts = @import("../LangOpts.zig");
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
|
||||
const Diagnostic = @This();
|
||||
|
||||
fmt: []const u8,
|
||||
kind: Diagnostics.Message.Kind,
|
||||
opt: ?Diagnostics.Option = null,
|
||||
extension: bool = false,
|
||||
show_in_system_headers: bool = false,
|
||||
|
||||
pub const elif_without_if: Diagnostic = .{
|
||||
.fmt = "#elif without #if",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const elif_after_else: Diagnostic = .{
|
||||
.fmt = "#elif after #else",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const elifdef_without_if: Diagnostic = .{
|
||||
.fmt = "#elifdef without #if",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const elifdef_after_else: Diagnostic = .{
|
||||
.fmt = "#elifdef after #else",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const elifndef_without_if: Diagnostic = .{
|
||||
.fmt = "#elifndef without #if",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const elifndef_after_else: Diagnostic = .{
|
||||
.fmt = "#elifndef after #else",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const else_without_if: Diagnostic = .{
|
||||
.fmt = "#else without #if",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const else_after_else: Diagnostic = .{
|
||||
.fmt = "#else after #else",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const endif_without_if: Diagnostic = .{
|
||||
.fmt = "#endif without #if",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const unknown_pragma: Diagnostic = .{
|
||||
.fmt = "unknown pragma ignored",
|
||||
.opt = .@"unknown-pragmas",
|
||||
.kind = .off,
|
||||
};
|
||||
|
||||
pub const line_simple_digit: Diagnostic = .{
|
||||
.fmt = "#line directive requires a simple digit sequence",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const line_invalid_filename: Diagnostic = .{
|
||||
.fmt = "invalid filename for #line directive",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const unterminated_conditional_directive: Diagnostic = .{
|
||||
.fmt = "unterminated conditional directive",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const invalid_preprocessing_directive: Diagnostic = .{
|
||||
.fmt = "invalid preprocessing directive",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const error_directive: Diagnostic = .{
|
||||
.fmt = "{s}",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const warning_directive: Diagnostic = .{
|
||||
.fmt = "{s}",
|
||||
.opt = .@"#warnings",
|
||||
.kind = .warning,
|
||||
.show_in_system_headers = true,
|
||||
};
|
||||
|
||||
pub const macro_name_missing: Diagnostic = .{
|
||||
.fmt = "macro name missing",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const extra_tokens_directive_end: Diagnostic = .{
|
||||
.fmt = "extra tokens at end of macro directive",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const expected_value_in_expr: Diagnostic = .{
|
||||
.fmt = "expected value in expression",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const defined_as_macro_name: Diagnostic = .{
|
||||
.fmt = "'defined' cannot be used as a macro name",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const macro_name_must_be_identifier: Diagnostic = .{
|
||||
.fmt = "macro name must be an identifier",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const whitespace_after_macro_name: Diagnostic = .{
|
||||
.fmt = "ISO C99 requires whitespace after the macro name",
|
||||
.opt = .@"c99-extensions",
|
||||
.kind = .warning,
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const hash_hash_at_start: Diagnostic = .{
|
||||
.fmt = "'##' cannot appear at the start of a macro expansion",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const hash_hash_at_end: Diagnostic = .{
|
||||
.fmt = "'##' cannot appear at the end of a macro expansion",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const pasting_formed_invalid: Diagnostic = .{
|
||||
.fmt = "pasting formed '{s}', an invalid preprocessing token",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const missing_paren_param_list: Diagnostic = .{
|
||||
.fmt = "missing ')' in macro parameter list",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const unterminated_macro_param_list: Diagnostic = .{
|
||||
.fmt = "unterminated macro param list",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const invalid_token_param_list: Diagnostic = .{
|
||||
.fmt = "invalid token in macro parameter list",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const expected_comma_param_list: Diagnostic = .{
|
||||
.fmt = "expected comma in macro parameter list",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const hash_not_followed_param: Diagnostic = .{
|
||||
.fmt = "'#' is not followed by a macro parameter",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const expected_filename: Diagnostic = .{
|
||||
.fmt = "expected \"FILENAME\" or <FILENAME>",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const empty_filename: Diagnostic = .{
|
||||
.fmt = "empty filename",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const header_str_closing: Diagnostic = .{
|
||||
.fmt = "expected closing '>'",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const header_str_match: Diagnostic = .{
|
||||
.fmt = "to match this '<'",
|
||||
.kind = .note,
|
||||
};
|
||||
|
||||
pub const string_literal_in_pp_expr: Diagnostic = .{
|
||||
.fmt = "string literal in preprocessor expression",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const empty_char_literal_warning: Diagnostic = .{
|
||||
.fmt = "empty character constant",
|
||||
.kind = .warning,
|
||||
.opt = .@"invalid-pp-token",
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const unterminated_char_literal_warning: Diagnostic = .{
|
||||
.fmt = "missing terminating ' character",
|
||||
.kind = .warning,
|
||||
.opt = .@"invalid-pp-token",
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const unterminated_string_literal_warning: Diagnostic = .{
|
||||
.fmt = "missing terminating '\"' character",
|
||||
.kind = .warning,
|
||||
.opt = .@"invalid-pp-token",
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const unterminated_comment: Diagnostic = .{
|
||||
.fmt = "unterminated comment",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const malformed_embed_param: Diagnostic = .{
|
||||
.fmt = "unexpected token in embed parameter",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const malformed_embed_limit: Diagnostic = .{
|
||||
.fmt = "the limit parameter expects one non-negative integer as a parameter",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const duplicate_embed_param: Diagnostic = .{
|
||||
.fmt = "duplicate embed parameter '{s}'",
|
||||
.kind = .warning,
|
||||
.opt = .@"duplicate-embed-param",
|
||||
};
|
||||
|
||||
pub const unsupported_embed_param: Diagnostic = .{
|
||||
.fmt = "unsupported embed parameter '{s}' embed parameter",
|
||||
.kind = .warning,
|
||||
.opt = .@"unsupported-embed-param",
|
||||
};
|
||||
|
||||
pub const va_opt_lparen: Diagnostic = .{
|
||||
.fmt = "missing '(' following __VA_OPT__",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const va_opt_rparen: Diagnostic = .{
|
||||
.fmt = "unterminated __VA_OPT__ argument list",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const keyword_macro: Diagnostic = .{
|
||||
.fmt = "keyword is hidden by macro definition",
|
||||
.kind = .off,
|
||||
.opt = .@"keyword-macro",
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const undefined_macro: Diagnostic = .{
|
||||
.fmt = "'{s}' is not defined, evaluates to 0",
|
||||
.kind = .off,
|
||||
.opt = .undef,
|
||||
};
|
||||
|
||||
pub const fn_macro_undefined: Diagnostic = .{
|
||||
.fmt = "function-like macro '{s}' is not defined",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
// pub const preprocessing_directive_only: Diagnostic = .{
|
||||
// .fmt = "'{s}' must be used within a preprocessing directive",
|
||||
// .extra = .tok_id_expected,
|
||||
// .kind = .@"error",
|
||||
// };
|
||||
|
||||
pub const missing_lparen_after_builtin: Diagnostic = .{
|
||||
.fmt = "Missing '(' after built-in macro '{s}'",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const too_many_includes: Diagnostic = .{
|
||||
.fmt = "#include nested too deeply",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const include_next: Diagnostic = .{
|
||||
.fmt = "#include_next is a language extension",
|
||||
.kind = .off,
|
||||
.opt = .@"gnu-include-next",
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const include_next_outside_header: Diagnostic = .{
|
||||
.fmt = "#include_next in primary source file; will search from start of include path",
|
||||
.kind = .warning,
|
||||
.opt = .@"include-next-outside-header",
|
||||
};
|
||||
|
||||
pub const comma_deletion_va_args: Diagnostic = .{
|
||||
.fmt = "token pasting of ',' and __VA_ARGS__ is a GNU extension",
|
||||
.kind = .off,
|
||||
.opt = .@"gnu-zero-variadic-macro-arguments",
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const expansion_to_defined_obj: Diagnostic = .{
|
||||
.fmt = "macro expansion producing 'defined' has undefined behavior",
|
||||
.kind = .off,
|
||||
.opt = .@"expansion-to-defined",
|
||||
};
|
||||
|
||||
pub const expansion_to_defined_func: Diagnostic = .{
|
||||
.fmt = expansion_to_defined_obj.fmt,
|
||||
.kind = .off,
|
||||
.opt = .@"expansion-to-defined",
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const invalid_pp_stringify_escape: Diagnostic = .{
|
||||
.fmt = "invalid string literal, ignoring final '\\'",
|
||||
.kind = .warning,
|
||||
};
|
||||
|
||||
pub const gnu_va_macro: Diagnostic = .{
|
||||
.fmt = "named variadic macros are a GNU extension",
|
||||
.opt = .@"variadic-macros",
|
||||
.kind = .off,
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const pragma_operator_string_literal: Diagnostic = .{
|
||||
.fmt = "_Pragma requires exactly one string literal token",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const invalid_preproc_expr_start: Diagnostic = .{
|
||||
.fmt = "invalid token at start of a preprocessor expression",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const newline_eof: Diagnostic = .{
|
||||
.fmt = "no newline at end of file",
|
||||
.opt = .@"newline-eof",
|
||||
.kind = .off,
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const malformed_warning_check: Diagnostic = .{
|
||||
.fmt = "{s} expected option name (e.g. \"-Wundef\")",
|
||||
.opt = .@"malformed-warning-check",
|
||||
.kind = .warning,
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const feature_check_requires_identifier: Diagnostic = .{
|
||||
.fmt = "builtin feature check macro requires a parenthesized identifier",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const builtin_macro_redefined: Diagnostic = .{
|
||||
.fmt = "redefining builtin macro",
|
||||
.opt = .@"builtin-macro-redefined",
|
||||
.kind = .warning,
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const macro_redefined: Diagnostic = .{
|
||||
.fmt = "'{s}' macro redefined",
|
||||
.opt = .@"macro-redefined",
|
||||
.kind = .warning,
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const previous_definition: Diagnostic = .{
|
||||
.fmt = "previous definition is here",
|
||||
.kind = .note,
|
||||
};
|
||||
|
||||
pub const unterminated_macro_arg_list: Diagnostic = .{
|
||||
.fmt = "unterminated function macro argument list",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const to_match_paren: Diagnostic = .{
|
||||
.fmt = "to match this '('",
|
||||
.kind = .note,
|
||||
};
|
||||
|
||||
pub const closing_paren: Diagnostic = .{
|
||||
.fmt = "expected closing ')'",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const poisoned_identifier: Diagnostic = .{
|
||||
.fmt = "attempt to use a poisoned identifier",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const expected_arguments: Diagnostic = .{
|
||||
.fmt = "expected {d} argument(s) got {d}",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const expected_at_least_arguments: Diagnostic = .{
|
||||
.fmt = "expected at least {d} argument(s) got {d}",
|
||||
.kind = .warning,
|
||||
};
|
||||
|
||||
pub const invalid_preproc_operator: Diagnostic = .{
|
||||
.fmt = "token is not a valid binary operator in a preprocessor subexpression",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const expected_str_literal_in: Diagnostic = .{
|
||||
.fmt = "expected string literal in '{s}'",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const builtin_missing_r_paren: Diagnostic = .{
|
||||
.fmt = "missing ')', after {s}",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const cannot_convert_to_identifier: Diagnostic = .{
|
||||
.fmt = "cannot convert {s} to an identifier",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const expected_identifier: Diagnostic = .{
|
||||
.fmt = "expected identifier argument",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const incomplete_ucn: Diagnostic = .{
|
||||
.fmt = "incomplete universal character name; treating as '\\' followed by identifier",
|
||||
.kind = .warning,
|
||||
.opt = .unicode,
|
||||
};
|
||||
|
||||
pub const invalid_source_epoch: Diagnostic = .{
|
||||
.fmt = "environment variable SOURCE_DATE_EPOCH must expand to a non-negative integer less than or equal to 253402300799",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const date_time: Diagnostic = .{
|
||||
.fmt = "expansion of date or time macro is not reproducible",
|
||||
.kind = .off,
|
||||
.opt = .@"date-time",
|
||||
.show_in_system_headers = true,
|
||||
};
|
||||
|
||||
pub const no_argument_variadic_macro: Diagnostic = .{
|
||||
.fmt = "passing no argument for the '...' parameter of a variadic macro is incompatible with C standards before C23",
|
||||
.opt = .@"variadic-macro-arguments-omitted",
|
||||
.kind = .off,
|
||||
.extension = true,
|
||||
};
|
||||
21
lib/compiler/aro/aro/Source.zig
vendored
21
lib/compiler/aro/aro/Source.zig
vendored
|
|
@ -24,6 +24,21 @@ pub const Location = struct {
|
|||
pub fn eql(a: Location, b: Location) bool {
|
||||
return a.id == b.id and a.byte_offset == b.byte_offset and a.line == b.line;
|
||||
}
|
||||
|
||||
pub fn expand(loc: Location, comp: *const @import("Compilation.zig")) ExpandedLocation {
|
||||
const source = comp.getSource(loc.id);
|
||||
return source.lineCol(loc);
|
||||
}
|
||||
};
|
||||
|
||||
pub const ExpandedLocation = struct {
|
||||
path: []const u8,
|
||||
line: []const u8,
|
||||
line_no: u32,
|
||||
col: u32,
|
||||
width: u32,
|
||||
end_with_splice: bool,
|
||||
kind: Kind,
|
||||
};
|
||||
|
||||
const Source = @This();
|
||||
|
|
@ -51,9 +66,7 @@ pub fn physicalLine(source: Source, loc: Location) u32 {
|
|||
return loc.line + source.numSplicesBefore(loc.byte_offset);
|
||||
}
|
||||
|
||||
const LineCol = struct { line: []const u8, line_no: u32, col: u32, width: u32, end_with_splice: bool };
|
||||
|
||||
pub fn lineCol(source: Source, loc: Location) LineCol {
|
||||
pub fn lineCol(source: Source, loc: Location) ExpandedLocation {
|
||||
var start: usize = 0;
|
||||
// find the start of the line which is either a newline or a splice
|
||||
if (std.mem.lastIndexOfScalar(u8, source.buf[0..loc.byte_offset], '\n')) |some| start = some + 1;
|
||||
|
|
@ -102,11 +115,13 @@ pub fn lineCol(source: Source, loc: Location) LineCol {
|
|||
nl = source.splice_locs[splice_index];
|
||||
}
|
||||
return .{
|
||||
.path = source.path,
|
||||
.line = source.buf[start..nl],
|
||||
.line_no = loc.line + splice_index,
|
||||
.col = col,
|
||||
.width = width,
|
||||
.end_with_splice = end_with_splice,
|
||||
.kind = source.kind,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
94
lib/compiler/aro/aro/StringInterner.zig
vendored
94
lib/compiler/aro/aro/StringInterner.zig
vendored
|
|
@ -2,82 +2,34 @@ const std = @import("std");
|
|||
const mem = std.mem;
|
||||
const Compilation = @import("Compilation.zig");
|
||||
|
||||
const StringToIdMap = std.StringHashMapUnmanaged(StringId);
|
||||
|
||||
pub const StringId = enum(u32) {
|
||||
empty,
|
||||
_,
|
||||
};
|
||||
|
||||
pub const TypeMapper = struct {
|
||||
const LookupSpeed = enum {
|
||||
fast,
|
||||
slow,
|
||||
};
|
||||
|
||||
data: union(LookupSpeed) {
|
||||
fast: []const []const u8,
|
||||
slow: *const StringToIdMap,
|
||||
},
|
||||
|
||||
pub fn lookup(self: TypeMapper, string_id: StringInterner.StringId) []const u8 {
|
||||
if (string_id == .empty) return "";
|
||||
switch (self.data) {
|
||||
.fast => |arr| return arr[@intFromEnum(string_id)],
|
||||
.slow => |map| {
|
||||
var it = map.iterator();
|
||||
while (it.next()) |entry| {
|
||||
if (entry.value_ptr.* == string_id) return entry.key_ptr.*;
|
||||
}
|
||||
unreachable;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deinit(self: TypeMapper, allocator: mem.Allocator) void {
|
||||
switch (self.data) {
|
||||
.slow => {},
|
||||
.fast => |arr| allocator.free(arr),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const StringInterner = @This();
|
||||
|
||||
string_table: StringToIdMap = .{},
|
||||
next_id: StringId = @enumFromInt(@intFromEnum(StringId.empty) + 1),
|
||||
pub const StringId = enum(u32) {
|
||||
empty = std.math.maxInt(u32),
|
||||
_,
|
||||
|
||||
pub fn deinit(self: *StringInterner, allocator: mem.Allocator) void {
|
||||
self.string_table.deinit(allocator);
|
||||
pub fn lookup(id: StringId, comp: *const Compilation) []const u8 {
|
||||
if (id == .empty) return "";
|
||||
return comp.string_interner.table.keys()[@intFromEnum(id)];
|
||||
}
|
||||
|
||||
pub fn lookupExtra(id: StringId, si: StringInterner) []const u8 {
|
||||
if (id == .empty) return "";
|
||||
return si.table.keys()[@intFromEnum(id)];
|
||||
}
|
||||
};
|
||||
|
||||
table: std.StringArrayHashMapUnmanaged(void) = .empty,
|
||||
|
||||
pub fn deinit(si: *StringInterner, allocator: mem.Allocator) void {
|
||||
si.table.deinit(allocator);
|
||||
si.* = undefined;
|
||||
}
|
||||
|
||||
pub fn intern(comp: *Compilation, str: []const u8) !StringId {
|
||||
return comp.string_interner.internExtra(comp.gpa, str);
|
||||
}
|
||||
|
||||
pub fn internExtra(self: *StringInterner, allocator: mem.Allocator, str: []const u8) !StringId {
|
||||
/// Intern externally owned string.
|
||||
pub fn intern(si: *StringInterner, allocator: mem.Allocator, str: []const u8) !StringId {
|
||||
if (str.len == 0) return .empty;
|
||||
|
||||
const gop = try self.string_table.getOrPut(allocator, str);
|
||||
if (gop.found_existing) return gop.value_ptr.*;
|
||||
|
||||
defer self.next_id = @enumFromInt(@intFromEnum(self.next_id) + 1);
|
||||
gop.value_ptr.* = self.next_id;
|
||||
return self.next_id;
|
||||
}
|
||||
|
||||
/// deinit for the returned TypeMapper is a no-op and does not need to be called
|
||||
pub fn getSlowTypeMapper(self: *const StringInterner) TypeMapper {
|
||||
return TypeMapper{ .data = .{ .slow = &self.string_table } };
|
||||
}
|
||||
|
||||
/// Caller must call `deinit` on the returned TypeMapper
|
||||
pub fn getFastTypeMapper(self: *const StringInterner, allocator: mem.Allocator) !TypeMapper {
|
||||
var strings = try allocator.alloc([]const u8, @intFromEnum(self.next_id));
|
||||
var it = self.string_table.iterator();
|
||||
strings[0] = "";
|
||||
while (it.next()) |entry| {
|
||||
strings[@intFromEnum(entry.value_ptr.*)] = entry.key_ptr.*;
|
||||
}
|
||||
return TypeMapper{ .data = .{ .fast = strings } };
|
||||
const gop = try si.table.getOrPut(allocator, str);
|
||||
return @enumFromInt(gop.index);
|
||||
}
|
||||
|
|
|
|||
210
lib/compiler/aro/aro/SymbolStack.zig
vendored
210
lib/compiler/aro/aro/SymbolStack.zig
vendored
|
|
@ -2,22 +2,24 @@ const std = @import("std");
|
|||
const mem = std.mem;
|
||||
const Allocator = mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const Parser = @import("Parser.zig");
|
||||
const StringId = @import("StringInterner.zig").StringId;
|
||||
const Tree = @import("Tree.zig");
|
||||
const Token = Tree.Token;
|
||||
const TokenIndex = Tree.TokenIndex;
|
||||
const NodeIndex = Tree.NodeIndex;
|
||||
const Type = @import("Type.zig");
|
||||
const Parser = @import("Parser.zig");
|
||||
const Node = Tree.Node;
|
||||
const QualType = @import("TypeStore.zig").QualType;
|
||||
const Value = @import("Value.zig");
|
||||
const StringId = @import("StringInterner.zig").StringId;
|
||||
|
||||
const SymbolStack = @This();
|
||||
|
||||
pub const Symbol = struct {
|
||||
name: StringId,
|
||||
ty: Type,
|
||||
qt: QualType,
|
||||
tok: TokenIndex,
|
||||
node: NodeIndex = .none,
|
||||
node: Node.OptIndex = .null,
|
||||
out_of_scope: bool = false,
|
||||
kind: Kind,
|
||||
val: Value,
|
||||
};
|
||||
|
|
@ -33,7 +35,7 @@ pub const Kind = enum {
|
|||
constexpr,
|
||||
};
|
||||
|
||||
scopes: std.ArrayListUnmanaged(Scope) = .empty,
|
||||
scopes: std.ArrayList(Scope) = .empty,
|
||||
/// allocations from nested scopes are retained after popping; `active_len` is the number
|
||||
/// of currently-active items in `scopes`.
|
||||
active_len: usize = 0,
|
||||
|
|
@ -64,7 +66,7 @@ pub fn deinit(s: *SymbolStack, gpa: Allocator) void {
|
|||
|
||||
pub fn pushScope(s: *SymbolStack, p: *Parser) !void {
|
||||
if (s.active_len + 1 > s.scopes.items.len) {
|
||||
try s.scopes.append(p.gpa, .{});
|
||||
try s.scopes.append(p.comp.gpa, .{});
|
||||
s.active_len = s.scopes.items.len;
|
||||
} else {
|
||||
s.scopes.items[s.active_len].clearRetainingCapacity();
|
||||
|
|
@ -82,17 +84,17 @@ pub fn findTypedef(s: *SymbolStack, p: *Parser, name: StringId, name_tok: TokenI
|
|||
.typedef => return prev,
|
||||
.@"struct" => {
|
||||
if (no_type_yet) return null;
|
||||
try p.errStr(.must_use_struct, name_tok, p.tokSlice(name_tok));
|
||||
try p.err(name_tok, .must_use_struct, .{p.tokSlice(name_tok)});
|
||||
return prev;
|
||||
},
|
||||
.@"union" => {
|
||||
if (no_type_yet) return null;
|
||||
try p.errStr(.must_use_union, name_tok, p.tokSlice(name_tok));
|
||||
try p.err(name_tok, .must_use_union, .{p.tokSlice(name_tok)});
|
||||
return prev;
|
||||
},
|
||||
.@"enum" => {
|
||||
if (no_type_yet) return null;
|
||||
try p.errStr(.must_use_enum, name_tok, p.tokSlice(name_tok));
|
||||
try p.err(name_tok, .must_use_enum, .{p.tokSlice(name_tok)});
|
||||
return prev;
|
||||
},
|
||||
else => return null,
|
||||
|
|
@ -120,8 +122,8 @@ pub fn findTag(
|
|||
else => unreachable,
|
||||
}
|
||||
if (s.get(name, .tags) == null) return null;
|
||||
try p.errStr(.wrong_tag, name_tok, p.tokSlice(name_tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
try p.err(name_tok, .wrong_tag, .{p.tokSlice(name_tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
@ -171,38 +173,34 @@ pub fn defineTypedef(
|
|||
s: *SymbolStack,
|
||||
p: *Parser,
|
||||
name: StringId,
|
||||
ty: Type,
|
||||
qt: QualType,
|
||||
tok: TokenIndex,
|
||||
node: NodeIndex,
|
||||
node: Node.Index,
|
||||
) !void {
|
||||
if (s.get(name, .vars)) |prev| {
|
||||
switch (prev.kind) {
|
||||
.typedef => {
|
||||
if (!prev.ty.is(.invalid)) {
|
||||
if (!ty.eql(prev.ty, p.comp, true)) {
|
||||
try p.errStr(.redefinition_of_typedef, tok, try p.typePairStrExtra(ty, " vs ", prev.ty));
|
||||
if (prev.tok != 0) try p.errTok(.previous_definition, prev.tok);
|
||||
}
|
||||
if (!prev.qt.isInvalid() and !qt.eqlQualified(prev.qt, p.comp)) {
|
||||
if (qt.isInvalid()) return;
|
||||
const non_typedef_qt = qt.type(p.comp).typedef.base;
|
||||
const non_typedef_prev_qt = prev.qt.type(p.comp).typedef.base;
|
||||
try p.err(tok, .redefinition_of_typedef, .{ non_typedef_qt, non_typedef_prev_qt });
|
||||
if (prev.tok != 0) try p.err(prev.tok, .previous_definition, .{});
|
||||
}
|
||||
},
|
||||
.enumeration, .decl, .def, .constexpr => {
|
||||
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
try s.define(p.gpa, .{
|
||||
try s.define(p.comp.gpa, .{
|
||||
.kind = .typedef,
|
||||
.name = name,
|
||||
.tok = tok,
|
||||
.ty = .{
|
||||
.name = name,
|
||||
.specifier = ty.specifier,
|
||||
.qual = ty.qual,
|
||||
.data = ty.data,
|
||||
},
|
||||
.node = node,
|
||||
.qt = qt,
|
||||
.node = .pack(node),
|
||||
.val = .{},
|
||||
});
|
||||
}
|
||||
|
|
@ -211,42 +209,48 @@ pub fn defineSymbol(
|
|||
s: *SymbolStack,
|
||||
p: *Parser,
|
||||
name: StringId,
|
||||
ty: Type,
|
||||
qt: QualType,
|
||||
tok: TokenIndex,
|
||||
node: NodeIndex,
|
||||
node: Node.Index,
|
||||
val: Value,
|
||||
constexpr: bool,
|
||||
) !void {
|
||||
if (s.get(name, .vars)) |prev| {
|
||||
switch (prev.kind) {
|
||||
.enumeration => {
|
||||
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
.decl => {
|
||||
if (!ty.eql(prev.ty, p.comp, true)) {
|
||||
try p.errStr(.redefinition_incompatible, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (!prev.qt.isInvalid() and !qt.eqlQualified(prev.qt, p.comp)) {
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_incompatible, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
} else {
|
||||
if (prev.node.unpack()) |some| p.setTentativeDeclDefinition(some, node);
|
||||
}
|
||||
},
|
||||
.def, .constexpr => {
|
||||
try p.errStr(.redefinition, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
.def, .constexpr => if (!prev.qt.isInvalid()) {
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
.typedef => {
|
||||
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
try s.define(p.gpa, .{
|
||||
try s.define(p.comp.gpa, .{
|
||||
.kind = if (constexpr) .constexpr else .def,
|
||||
.name = name,
|
||||
.tok = tok,
|
||||
.ty = ty,
|
||||
.node = node,
|
||||
.qt = qt,
|
||||
.node = .pack(node),
|
||||
.val = val,
|
||||
});
|
||||
}
|
||||
|
|
@ -264,69 +268,96 @@ pub fn declareSymbol(
|
|||
s: *SymbolStack,
|
||||
p: *Parser,
|
||||
name: StringId,
|
||||
ty: Type,
|
||||
qt: QualType,
|
||||
tok: TokenIndex,
|
||||
node: NodeIndex,
|
||||
node: Node.Index,
|
||||
) !void {
|
||||
if (s.get(name, .vars)) |prev| {
|
||||
switch (prev.kind) {
|
||||
.enumeration => {
|
||||
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
.decl => {
|
||||
if (!ty.eql(prev.ty, p.comp, true)) {
|
||||
try p.errStr(.redefinition_incompatible, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (!prev.qt.isInvalid() and !qt.eqlQualified(prev.qt, p.comp)) {
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_incompatible, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
} else {
|
||||
if (prev.node.unpack()) |some| p.setTentativeDeclDefinition(node, some);
|
||||
}
|
||||
},
|
||||
.def, .constexpr => {
|
||||
if (!ty.eql(prev.ty, p.comp, true)) {
|
||||
try p.errStr(.redefinition_incompatible, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (!prev.qt.isInvalid() and !qt.eqlQualified(prev.qt, p.comp)) {
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_incompatible, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
} else {
|
||||
if (prev.node.unpack()) |some| p.setTentativeDeclDefinition(node, some);
|
||||
return;
|
||||
}
|
||||
},
|
||||
.typedef => {
|
||||
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
try s.define(p.gpa, .{
|
||||
try s.define(p.comp.gpa, .{
|
||||
.kind = .decl,
|
||||
.name = name,
|
||||
.tok = tok,
|
||||
.ty = ty,
|
||||
.node = node,
|
||||
.qt = qt,
|
||||
.node = .pack(node),
|
||||
.val = .{},
|
||||
});
|
||||
|
||||
// Declare out of scope symbol for functions declared in functions.
|
||||
if (s.active_len > 1 and !p.comp.langopts.standard.atLeast(.c23) and qt.is(p.comp, .func)) {
|
||||
try s.scopes.items[0].vars.put(p.comp.gpa, name, .{
|
||||
.kind = .decl,
|
||||
.name = name,
|
||||
.tok = tok,
|
||||
.qt = qt,
|
||||
.node = .pack(node),
|
||||
.val = .{},
|
||||
.out_of_scope = true,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn defineParam(s: *SymbolStack, p: *Parser, name: StringId, ty: Type, tok: TokenIndex) !void {
|
||||
pub fn defineParam(
|
||||
s: *SymbolStack,
|
||||
p: *Parser,
|
||||
name: StringId,
|
||||
qt: QualType,
|
||||
tok: TokenIndex,
|
||||
node: ?Node.Index,
|
||||
) !void {
|
||||
if (s.get(name, .vars)) |prev| {
|
||||
switch (prev.kind) {
|
||||
.enumeration, .decl, .def, .constexpr => {
|
||||
try p.errStr(.redefinition_of_parameter, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
.enumeration, .decl, .def, .constexpr => if (!prev.qt.isInvalid()) {
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_of_parameter, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
.typedef => {
|
||||
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
if (ty.is(.fp16) and !p.comp.hasHalfPrecisionFloatABI()) {
|
||||
try p.errStr(.suggest_pointer_for_invalid_fp16, tok, "parameters");
|
||||
}
|
||||
try s.define(p.gpa, .{
|
||||
try s.define(p.comp.gpa, .{
|
||||
.kind = .def,
|
||||
.name = name,
|
||||
.tok = tok,
|
||||
.ty = ty,
|
||||
.qt = qt,
|
||||
.node = .packOpt(node),
|
||||
.val = .{},
|
||||
});
|
||||
}
|
||||
|
|
@ -342,20 +373,20 @@ pub fn defineTag(
|
|||
switch (prev.kind) {
|
||||
.@"enum" => {
|
||||
if (kind == .keyword_enum) return prev;
|
||||
try p.errStr(.wrong_tag, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
try p.err(tok, .wrong_tag, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
return null;
|
||||
},
|
||||
.@"struct" => {
|
||||
if (kind == .keyword_struct) return prev;
|
||||
try p.errStr(.wrong_tag, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
try p.err(tok, .wrong_tag, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
return null;
|
||||
},
|
||||
.@"union" => {
|
||||
if (kind == .keyword_union) return prev;
|
||||
try p.errStr(.wrong_tag, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
try p.err(tok, .wrong_tag, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
return null;
|
||||
},
|
||||
else => unreachable,
|
||||
|
|
@ -366,34 +397,39 @@ pub fn defineEnumeration(
|
|||
s: *SymbolStack,
|
||||
p: *Parser,
|
||||
name: StringId,
|
||||
ty: Type,
|
||||
qt: QualType,
|
||||
tok: TokenIndex,
|
||||
val: Value,
|
||||
node: Node.Index,
|
||||
) !void {
|
||||
if (s.get(name, .vars)) |prev| {
|
||||
switch (prev.kind) {
|
||||
.enumeration => {
|
||||
try p.errStr(.redefinition, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
.enumeration => if (!prev.qt.isInvalid()) {
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
return;
|
||||
},
|
||||
.decl, .def, .constexpr => {
|
||||
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
return;
|
||||
},
|
||||
.typedef => {
|
||||
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
try s.define(p.gpa, .{
|
||||
try s.define(p.comp.gpa, .{
|
||||
.kind = .enumeration,
|
||||
.name = name,
|
||||
.tok = tok,
|
||||
.ty = ty,
|
||||
.qt = qt,
|
||||
.val = val,
|
||||
.node = .pack(node),
|
||||
});
|
||||
}
|
||||
|
|
|
|||
286
lib/compiler/aro/aro/Tokenizer.zig
vendored
286
lib/compiler/aro/aro/Tokenizer.zig
vendored
|
|
@ -1,8 +1,45 @@
|
|||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Source = @import("Source.zig");
|
||||
const LangOpts = @import("LangOpts.zig");
|
||||
const Source = @import("Source.zig");
|
||||
|
||||
/// Value for valid escapes indicates how many characters to consume, not counting leading backslash
|
||||
const UCNKind = enum(u8) {
|
||||
/// Just `\`
|
||||
none,
|
||||
/// \u or \U followed by an insufficient number of hex digits
|
||||
incomplete,
|
||||
/// `\uxxxx`
|
||||
hex4 = 5,
|
||||
/// `\Uxxxxxxxx`
|
||||
hex8 = 9,
|
||||
|
||||
/// In the classification phase we do not care if the escape represents a valid universal character name
|
||||
/// e.g. \UFFFFFFFF is acceptable.
|
||||
fn classify(buf: []const u8) UCNKind {
|
||||
assert(buf[0] == '\\');
|
||||
if (buf.len == 1) return .none;
|
||||
switch (buf[1]) {
|
||||
'u' => {
|
||||
if (buf.len < 6) return .incomplete;
|
||||
for (buf[2..6]) |c| {
|
||||
if (!std.ascii.isHex(c)) return .incomplete;
|
||||
}
|
||||
return .hex4;
|
||||
},
|
||||
'U' => {
|
||||
if (buf.len < 10) return .incomplete;
|
||||
for (buf[2..10]) |c| {
|
||||
if (!std.ascii.isHex(c)) return .incomplete;
|
||||
}
|
||||
return .hex8;
|
||||
},
|
||||
else => return .none,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Token = struct {
|
||||
id: Id,
|
||||
|
|
@ -18,7 +55,7 @@ pub const Token = struct {
|
|||
eof,
|
||||
/// identifier containing solely basic character set characters
|
||||
identifier,
|
||||
/// identifier with at least one extended character
|
||||
/// identifier with at least one extended character or UCN escape sequence
|
||||
extended_identifier,
|
||||
|
||||
// string literals with prefixes
|
||||
|
|
@ -147,6 +184,10 @@ pub const Token = struct {
|
|||
macro_counter,
|
||||
/// Special token for implementing _Pragma
|
||||
macro_param_pragma_operator,
|
||||
/// Special token for implementing __identifier (MS extension)
|
||||
macro_param_ms_identifier,
|
||||
/// Special token for implementing __pragma (MS extension)
|
||||
macro_param_ms_pragma,
|
||||
|
||||
/// Special identifier for implementing __func__
|
||||
macro_func,
|
||||
|
|
@ -154,6 +195,12 @@ pub const Token = struct {
|
|||
macro_function,
|
||||
/// Special identifier for implementing __PRETTY_FUNCTION__
|
||||
macro_pretty_func,
|
||||
/// Special identifier for implementing __DATE__
|
||||
macro_date,
|
||||
/// Special identifier for implementing __TIME__
|
||||
macro_time,
|
||||
/// Special identifier for implementing __TIMESTAMP__
|
||||
macro_timestamp,
|
||||
|
||||
keyword_auto,
|
||||
keyword_auto_type,
|
||||
|
|
@ -290,13 +337,21 @@ pub const Token = struct {
|
|||
keyword_thiscall2,
|
||||
keyword_vectorcall,
|
||||
keyword_vectorcall2,
|
||||
keyword_fastcall,
|
||||
keyword_fastcall2,
|
||||
keyword_regcall,
|
||||
keyword_cdecl,
|
||||
keyword_cdecl2,
|
||||
keyword_forceinline,
|
||||
keyword_forceinline2,
|
||||
keyword_unaligned,
|
||||
keyword_unaligned2,
|
||||
|
||||
// builtins that require special parsing
|
||||
builtin_choose_expr,
|
||||
builtin_va_arg,
|
||||
builtin_offsetof,
|
||||
builtin_bitoffsetof,
|
||||
builtin_types_compatible_p,
|
||||
// Type nullability
|
||||
keyword_nonnull,
|
||||
keyword_nullable,
|
||||
keyword_nullable_result,
|
||||
keyword_null_unspecified,
|
||||
|
||||
/// Generated by #embed directive
|
||||
/// Decimal value with no prefix or suffix
|
||||
|
|
@ -323,6 +378,12 @@ pub const Token = struct {
|
|||
/// A comment token if asked to preserve comments.
|
||||
comment,
|
||||
|
||||
/// Incomplete universal character name
|
||||
/// This happens if the source text contains `\u` or `\U` followed by an insufficient number of hex
|
||||
/// digits. This token id represents just the backslash; the subsequent `u` or `U` will be treated as the
|
||||
/// leading character of the following identifier token.
|
||||
incomplete_ucn,
|
||||
|
||||
/// Return true if token is identifier or keyword.
|
||||
pub fn isMacroIdentifier(id: Id) bool {
|
||||
switch (id) {
|
||||
|
|
@ -347,6 +408,9 @@ pub const Token = struct {
|
|||
.macro_func,
|
||||
.macro_function,
|
||||
.macro_pretty_func,
|
||||
.macro_date,
|
||||
.macro_time,
|
||||
.macro_timestamp,
|
||||
.keyword_auto,
|
||||
.keyword_auto_type,
|
||||
.keyword_break,
|
||||
|
|
@ -409,11 +473,6 @@ pub const Token = struct {
|
|||
.keyword_restrict2,
|
||||
.keyword_alignof1,
|
||||
.keyword_alignof2,
|
||||
.builtin_choose_expr,
|
||||
.builtin_va_arg,
|
||||
.builtin_offsetof,
|
||||
.builtin_bitoffsetof,
|
||||
.builtin_types_compatible_p,
|
||||
.keyword_attribute1,
|
||||
.keyword_attribute2,
|
||||
.keyword_extension,
|
||||
|
|
@ -444,6 +503,19 @@ pub const Token = struct {
|
|||
.keyword_thiscall2,
|
||||
.keyword_vectorcall,
|
||||
.keyword_vectorcall2,
|
||||
.keyword_fastcall,
|
||||
.keyword_fastcall2,
|
||||
.keyword_regcall,
|
||||
.keyword_cdecl,
|
||||
.keyword_cdecl2,
|
||||
.keyword_forceinline,
|
||||
.keyword_forceinline2,
|
||||
.keyword_unaligned,
|
||||
.keyword_unaligned2,
|
||||
.keyword_nonnull,
|
||||
.keyword_nullable,
|
||||
.keyword_nullable_result,
|
||||
.keyword_null_unspecified,
|
||||
.keyword_bit_int,
|
||||
.keyword_c23_alignas,
|
||||
.keyword_c23_alignof,
|
||||
|
|
@ -547,11 +619,18 @@ pub const Token = struct {
|
|||
.macro_file,
|
||||
.macro_line,
|
||||
.macro_counter,
|
||||
.macro_time,
|
||||
.macro_date,
|
||||
.macro_timestamp,
|
||||
.macro_param_pragma_operator,
|
||||
.macro_param_ms_identifier,
|
||||
.macro_param_ms_pragma,
|
||||
.placemarker,
|
||||
=> "",
|
||||
.macro_ws => " ",
|
||||
|
||||
.incomplete_ucn => "\\",
|
||||
|
||||
.macro_func => "__func__",
|
||||
.macro_function => "__FUNCTION__",
|
||||
.macro_pretty_func => "__PRETTY_FUNCTION__",
|
||||
|
|
@ -695,11 +774,6 @@ pub const Token = struct {
|
|||
.keyword_alignof2 => "__alignof__",
|
||||
.keyword_typeof1 => "__typeof",
|
||||
.keyword_typeof2 => "__typeof__",
|
||||
.builtin_choose_expr => "__builtin_choose_expr",
|
||||
.builtin_va_arg => "__builtin_va_arg",
|
||||
.builtin_offsetof => "__builtin_offsetof",
|
||||
.builtin_bitoffsetof => "__builtin_bitoffsetof",
|
||||
.builtin_types_compatible_p => "__builtin_types_compatible_p",
|
||||
.keyword_attribute1 => "__attribute",
|
||||
.keyword_attribute2 => "__attribute__",
|
||||
.keyword_extension => "__extension__",
|
||||
|
|
@ -730,6 +804,19 @@ pub const Token = struct {
|
|||
.keyword_thiscall2 => "_thiscall",
|
||||
.keyword_vectorcall => "__vectorcall",
|
||||
.keyword_vectorcall2 => "_vectorcall",
|
||||
.keyword_fastcall => "__fastcall",
|
||||
.keyword_fastcall2 => "_fastcall",
|
||||
.keyword_regcall => "__regcall",
|
||||
.keyword_cdecl => "__cdecl",
|
||||
.keyword_cdecl2 => "_cdecl",
|
||||
.keyword_forceinline => "__forceinline",
|
||||
.keyword_forceinline2 => "_forceinline",
|
||||
.keyword_unaligned => "__unaligned",
|
||||
.keyword_unaligned2 => "_unaligned",
|
||||
.keyword_nonnull => "_Nonnull",
|
||||
.keyword_nullable => "_Nullable",
|
||||
.keyword_nullable_result => "_Nullable_result",
|
||||
.keyword_null_unspecified => "_Null_unspecified",
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -742,11 +829,6 @@ pub const Token = struct {
|
|||
.macro_func,
|
||||
.macro_function,
|
||||
.macro_pretty_func,
|
||||
.builtin_choose_expr,
|
||||
.builtin_va_arg,
|
||||
.builtin_offsetof,
|
||||
.builtin_bitoffsetof,
|
||||
.builtin_types_compatible_p,
|
||||
=> "an identifier",
|
||||
.string_literal,
|
||||
.string_literal_utf_16,
|
||||
|
|
@ -763,7 +845,7 @@ pub const Token = struct {
|
|||
.unterminated_char_literal,
|
||||
.empty_char_literal,
|
||||
=> "a character literal",
|
||||
.pp_num, .embed_byte => "A number",
|
||||
.pp_num, .embed_byte => "a number",
|
||||
else => id.lexeme().?,
|
||||
};
|
||||
}
|
||||
|
|
@ -871,6 +953,12 @@ pub const Token = struct {
|
|||
.keyword_stdcall2,
|
||||
.keyword_thiscall2,
|
||||
.keyword_vectorcall2,
|
||||
.keyword_fastcall2,
|
||||
.keyword_cdecl2,
|
||||
.keyword_forceinline,
|
||||
.keyword_forceinline2,
|
||||
.keyword_unaligned,
|
||||
.keyword_unaligned2,
|
||||
=> if (langopts.ms_extensions) kw else .identifier,
|
||||
else => kw,
|
||||
};
|
||||
|
|
@ -1013,13 +1101,21 @@ pub const Token = struct {
|
|||
.{ "_thiscall", .keyword_thiscall2 },
|
||||
.{ "__vectorcall", .keyword_vectorcall },
|
||||
.{ "_vectorcall", .keyword_vectorcall2 },
|
||||
.{ "__fastcall", .keyword_fastcall },
|
||||
.{ "_fastcall", .keyword_fastcall2 },
|
||||
.{ "_regcall", .keyword_regcall },
|
||||
.{ "__cdecl", .keyword_cdecl },
|
||||
.{ "_cdecl", .keyword_cdecl2 },
|
||||
.{ "__forceinline", .keyword_forceinline },
|
||||
.{ "_forceinline", .keyword_forceinline2 },
|
||||
.{ "__unaligned", .keyword_unaligned },
|
||||
.{ "_unaligned", .keyword_unaligned2 },
|
||||
|
||||
// builtins that require special parsing
|
||||
.{ "__builtin_choose_expr", .builtin_choose_expr },
|
||||
.{ "__builtin_va_arg", .builtin_va_arg },
|
||||
.{ "__builtin_offsetof", .builtin_offsetof },
|
||||
.{ "__builtin_bitoffsetof", .builtin_bitoffsetof },
|
||||
.{ "__builtin_types_compatible_p", .builtin_types_compatible_p },
|
||||
// Type nullability
|
||||
.{ "_Nonnull", .keyword_nonnull },
|
||||
.{ "_Nullable", .keyword_nullable },
|
||||
.{ "_Nullable_result", .keyword_nullable_result },
|
||||
.{ "_Null_unspecified", .keyword_null_unspecified },
|
||||
});
|
||||
};
|
||||
|
||||
|
|
@ -1099,6 +1195,26 @@ pub fn next(self: *Tokenizer) Token {
|
|||
'u' => state = .u,
|
||||
'U' => state = .U,
|
||||
'L' => state = .L,
|
||||
'\\' => {
|
||||
const ucn_kind = UCNKind.classify(self.buf[self.index..]);
|
||||
switch (ucn_kind) {
|
||||
.none => {
|
||||
self.index += 1;
|
||||
id = .invalid;
|
||||
break;
|
||||
},
|
||||
.incomplete => {
|
||||
self.index += 1;
|
||||
id = .incomplete_ucn;
|
||||
break;
|
||||
},
|
||||
.hex4, .hex8 => {
|
||||
self.index += @intFromEnum(ucn_kind);
|
||||
id = .extended_identifier;
|
||||
state = .extended_identifier;
|
||||
},
|
||||
}
|
||||
},
|
||||
'a'...'t', 'v'...'z', 'A'...'K', 'M'...'T', 'V'...'Z', '_' => state = .identifier,
|
||||
'=' => state = .equal,
|
||||
'!' => state = .bang,
|
||||
|
|
@ -1324,6 +1440,20 @@ pub fn next(self: *Tokenizer) Token {
|
|||
break;
|
||||
},
|
||||
0x80...0xFF => state = .extended_identifier,
|
||||
'\\' => {
|
||||
const ucn_kind = UCNKind.classify(self.buf[self.index..]);
|
||||
switch (ucn_kind) {
|
||||
.none, .incomplete => {
|
||||
id = if (state == .identifier) Token.getTokenId(self.langopts, self.buf[start..self.index]) else .extended_identifier;
|
||||
break;
|
||||
},
|
||||
.hex4, .hex8 => {
|
||||
state = .extended_identifier;
|
||||
self.index += @intFromEnum(ucn_kind);
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
else => {
|
||||
id = if (state == .identifier) Token.getTokenId(self.langopts, self.buf[start..self.index]) else .extended_identifier;
|
||||
break;
|
||||
|
|
@ -1731,7 +1861,10 @@ pub fn next(self: *Tokenizer) Token {
|
|||
}
|
||||
} else if (self.index == self.buf.len) {
|
||||
switch (state) {
|
||||
.start, .line_comment => {},
|
||||
.start => {},
|
||||
.line_comment => if (self.langopts.preserve_comments) {
|
||||
id = .comment;
|
||||
},
|
||||
.u, .u8, .U, .L, .identifier => id = Token.getTokenId(self.langopts, self.buf[start..self.index]),
|
||||
.extended_identifier => id = .extended_identifier,
|
||||
|
||||
|
|
@ -2105,6 +2238,15 @@ test "comments" {
|
|||
.hash,
|
||||
.identifier,
|
||||
});
|
||||
try expectTokensExtra(
|
||||
\\//foo
|
||||
\\void
|
||||
\\//bar
|
||||
, &.{
|
||||
.comment, .nl,
|
||||
.keyword_void, .nl,
|
||||
.comment,
|
||||
}, .{ .preserve_comments = true });
|
||||
}
|
||||
|
||||
test "extended identifiers" {
|
||||
|
|
@ -2147,36 +2289,76 @@ test "C23 keywords" {
|
|||
.keyword_c23_thread_local,
|
||||
.keyword_nullptr,
|
||||
.keyword_typeof_unqual,
|
||||
}, .c23);
|
||||
}, .{ .standard = .c23 });
|
||||
}
|
||||
|
||||
test "Universal character names" {
|
||||
try expectTokens("\\", &.{.invalid});
|
||||
try expectTokens("\\g", &.{ .invalid, .identifier });
|
||||
try expectTokens("\\u", &.{ .incomplete_ucn, .identifier });
|
||||
try expectTokens("\\ua", &.{ .incomplete_ucn, .identifier });
|
||||
try expectTokens("\\U9", &.{ .incomplete_ucn, .identifier });
|
||||
try expectTokens("\\ug", &.{ .incomplete_ucn, .identifier });
|
||||
try expectTokens("\\uag", &.{ .incomplete_ucn, .identifier });
|
||||
|
||||
try expectTokens("\\ ", &.{ .invalid, .eof });
|
||||
try expectTokens("\\g ", &.{ .invalid, .identifier, .eof });
|
||||
try expectTokens("\\u ", &.{ .incomplete_ucn, .identifier, .eof });
|
||||
try expectTokens("\\ua ", &.{ .incomplete_ucn, .identifier, .eof });
|
||||
try expectTokens("\\U9 ", &.{ .incomplete_ucn, .identifier, .eof });
|
||||
try expectTokens("\\ug ", &.{ .incomplete_ucn, .identifier, .eof });
|
||||
try expectTokens("\\uag ", &.{ .incomplete_ucn, .identifier, .eof });
|
||||
|
||||
try expectTokens("a\\", &.{ .identifier, .invalid });
|
||||
try expectTokens("a\\g", &.{ .identifier, .invalid, .identifier });
|
||||
try expectTokens("a\\u", &.{ .identifier, .incomplete_ucn, .identifier });
|
||||
try expectTokens("a\\ua", &.{ .identifier, .incomplete_ucn, .identifier });
|
||||
try expectTokens("a\\U9", &.{ .identifier, .incomplete_ucn, .identifier });
|
||||
try expectTokens("a\\ug", &.{ .identifier, .incomplete_ucn, .identifier });
|
||||
try expectTokens("a\\uag", &.{ .identifier, .incomplete_ucn, .identifier });
|
||||
|
||||
try expectTokens("a\\ ", &.{ .identifier, .invalid, .eof });
|
||||
try expectTokens("a\\g ", &.{ .identifier, .invalid, .identifier, .eof });
|
||||
try expectTokens("a\\u ", &.{ .identifier, .incomplete_ucn, .identifier, .eof });
|
||||
try expectTokens("a\\ua ", &.{ .identifier, .incomplete_ucn, .identifier, .eof });
|
||||
try expectTokens("a\\U9 ", &.{ .identifier, .incomplete_ucn, .identifier, .eof });
|
||||
try expectTokens("a\\ug ", &.{ .identifier, .incomplete_ucn, .identifier, .eof });
|
||||
try expectTokens("a\\uag ", &.{ .identifier, .incomplete_ucn, .identifier, .eof });
|
||||
}
|
||||
|
||||
test "Tokenizer fuzz test" {
|
||||
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
const Context = struct {
|
||||
fn testOne(_: @This(), input_bytes: []const u8) anyerror!void {
|
||||
var arena: std.heap.ArenaAllocator = .init(std.testing.allocator);
|
||||
defer arena.deinit();
|
||||
var comp = Compilation.init(std.testing.allocator, arena.allocator(), undefined, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
|
||||
const input_bytes = std.testing.fuzzInput(.{});
|
||||
if (input_bytes.len == 0) return;
|
||||
const source = try comp.addSourceFromBuffer("fuzz.c", input_bytes);
|
||||
|
||||
const source = try comp.addSourceFromBuffer("fuzz.c", input_bytes);
|
||||
|
||||
var tokenizer: Tokenizer = .{
|
||||
.buf = source.buf,
|
||||
.source = source.id,
|
||||
.langopts = comp.langopts,
|
||||
var tokenizer: Tokenizer = .{
|
||||
.buf = source.buf,
|
||||
.source = source.id,
|
||||
.langopts = comp.langopts,
|
||||
};
|
||||
while (true) {
|
||||
const prev_index = tokenizer.index;
|
||||
const tok = tokenizer.next();
|
||||
if (tok.id == .eof) break;
|
||||
try std.testing.expect(prev_index < tokenizer.index); // ensure that the tokenizer always makes progress
|
||||
}
|
||||
}
|
||||
};
|
||||
while (true) {
|
||||
const prev_index = tokenizer.index;
|
||||
const tok = tokenizer.next();
|
||||
if (tok.id == .eof) break;
|
||||
try std.testing.expect(prev_index < tokenizer.index); // ensure that the tokenizer always makes progress
|
||||
}
|
||||
return std.testing.fuzz(Context{}, Context.testOne, .{});
|
||||
}
|
||||
|
||||
fn expectTokensExtra(contents: []const u8, expected_tokens: []const Token.Id, standard: ?LangOpts.Standard) !void {
|
||||
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
|
||||
fn expectTokensExtra(contents: []const u8, expected_tokens: []const Token.Id, langopts: ?LangOpts) !void {
|
||||
var arena: std.heap.ArenaAllocator = .init(std.testing.allocator);
|
||||
defer arena.deinit();
|
||||
var comp = Compilation.init(std.testing.allocator, arena.allocator(), undefined, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
if (standard) |provided| {
|
||||
comp.langopts.standard = provided;
|
||||
if (langopts) |provided| {
|
||||
comp.langopts = provided;
|
||||
}
|
||||
const source = try comp.addSourceFromBuffer("path", contents);
|
||||
var tokenizer = Tokenizer{
|
||||
|
|
|
|||
155
lib/compiler/aro/aro/Toolchain.zig
vendored
155
lib/compiler/aro/aro/Toolchain.zig
vendored
|
|
@ -1,14 +1,15 @@
|
|||
const std = @import("std");
|
||||
const Driver = @import("Driver.zig");
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const mem = std.mem;
|
||||
const system_defaults = @import("system_defaults");
|
||||
const target_util = @import("target.zig");
|
||||
const Linux = @import("toolchains/Linux.zig");
|
||||
const Multilib = @import("Driver/Multilib.zig");
|
||||
const Filesystem = @import("Driver/Filesystem.zig").Filesystem;
|
||||
|
||||
pub const PathList = std.ArrayListUnmanaged([]const u8);
|
||||
const system_defaults = @import("system_defaults");
|
||||
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Driver = @import("Driver.zig");
|
||||
const Filesystem = @import("Driver/Filesystem.zig").Filesystem;
|
||||
const Multilib = @import("Driver/Multilib.zig");
|
||||
const target_util = @import("target.zig");
|
||||
|
||||
pub const PathList = std.ArrayList([]const u8);
|
||||
|
||||
pub const RuntimeLibKind = enum {
|
||||
compiler_rt,
|
||||
|
|
@ -35,22 +36,13 @@ pub const UnwindLibKind = enum {
|
|||
|
||||
const Inner = union(enum) {
|
||||
uninitialized,
|
||||
linux: Linux,
|
||||
unknown: void,
|
||||
|
||||
fn deinit(self: *Inner, allocator: mem.Allocator) void {
|
||||
switch (self.*) {
|
||||
.linux => |*linux| linux.deinit(allocator),
|
||||
.uninitialized, .unknown => {},
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const Toolchain = @This();
|
||||
|
||||
filesystem: Filesystem = .{ .real = {} },
|
||||
filesystem: Filesystem,
|
||||
driver: *Driver,
|
||||
arena: mem.Allocator,
|
||||
|
||||
/// The list of toolchain specific path prefixes to search for libraries.
|
||||
library_paths: PathList = .{},
|
||||
|
|
@ -72,7 +64,6 @@ pub fn getTarget(tc: *const Toolchain) std.Target {
|
|||
fn getDefaultLinker(tc: *const Toolchain) []const u8 {
|
||||
return switch (tc.inner) {
|
||||
.uninitialized => unreachable,
|
||||
.linux => |linux| linux.getDefaultLinker(tc.getTarget()),
|
||||
.unknown => "ld",
|
||||
};
|
||||
}
|
||||
|
|
@ -81,36 +72,26 @@ fn getDefaultLinker(tc: *const Toolchain) []const u8 {
|
|||
pub fn discover(tc: *Toolchain) !void {
|
||||
if (tc.inner != .uninitialized) return;
|
||||
|
||||
const target = tc.getTarget();
|
||||
tc.inner = switch (target.os.tag) {
|
||||
.linux => if (target.cpu.arch == .hexagon)
|
||||
.{ .unknown = {} } // TODO
|
||||
else if (target.cpu.arch.isMIPS())
|
||||
.{ .unknown = {} } // TODO
|
||||
else if (target.cpu.arch.isPowerPC())
|
||||
.{ .unknown = {} } // TODO
|
||||
else if (target.cpu.arch == .ve)
|
||||
.{ .unknown = {} } // TODO
|
||||
else
|
||||
.{ .linux = .{} },
|
||||
else => .{ .unknown = {} }, // TODO
|
||||
};
|
||||
tc.inner = .unknown;
|
||||
return switch (tc.inner) {
|
||||
.uninitialized => unreachable,
|
||||
.linux => |*linux| linux.discover(tc),
|
||||
.unknown => {},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(tc: *Toolchain) void {
|
||||
const gpa = tc.driver.comp.gpa;
|
||||
tc.inner.deinit(gpa);
|
||||
|
||||
tc.library_paths.deinit(gpa);
|
||||
tc.file_paths.deinit(gpa);
|
||||
tc.program_paths.deinit(gpa);
|
||||
}
|
||||
|
||||
/// Write assembler path to `buf` and return a slice of it
|
||||
pub fn getAssemblerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
|
||||
return tc.getProgramPath("as", buf);
|
||||
}
|
||||
|
||||
/// Write linker path to `buf` and return a slice of it
|
||||
pub fn getLinkerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
|
||||
// --ld-path= takes precedence over -fuse-ld= and specifies the executable
|
||||
|
|
@ -149,7 +130,12 @@ pub fn getLinkerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
|
|||
// to a relative path is surprising. This is more complex due to priorities
|
||||
// among -B, COMPILER_PATH and PATH. --ld-path= should be used instead.
|
||||
if (mem.indexOfScalar(u8, use_linker, '/') != null) {
|
||||
try tc.driver.comp.addDiagnostic(.{ .tag = .fuse_ld_path }, &.{});
|
||||
try tc.driver.comp.diagnostics.add(.{
|
||||
.text = "'-fuse-ld=' taking a path is deprecated; use '--ld-path=' instead",
|
||||
.kind = .off,
|
||||
.opt = .@"fuse-ld-path",
|
||||
.location = null,
|
||||
});
|
||||
}
|
||||
|
||||
if (std.fs.path.isAbsolute(use_linker)) {
|
||||
|
|
@ -157,8 +143,11 @@ pub fn getLinkerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
|
|||
return use_linker;
|
||||
}
|
||||
} else {
|
||||
var linker_name = try std.array_list.Managed(u8).initCapacity(tc.driver.comp.gpa, 5 + use_linker.len); // "ld64." ++ use_linker
|
||||
defer linker_name.deinit();
|
||||
const gpa = tc.driver.comp.gpa;
|
||||
var linker_name: std.ArrayList(u8) = .empty;
|
||||
defer linker_name.deinit(gpa);
|
||||
try linker_name.ensureUnusedCapacity(tc.driver.comp.gpa, 5 + use_linker.len); // "ld64." ++ use_linker
|
||||
|
||||
if (tc.getTarget().os.tag.isDarwin()) {
|
||||
linker_name.appendSliceAssumeCapacity("ld64.");
|
||||
} else {
|
||||
|
|
@ -185,27 +174,33 @@ pub fn getLinkerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
|
|||
/// TODO: this isn't exactly right since our target names don't necessarily match up
|
||||
/// with GCC's.
|
||||
/// For example the Zig target `arm-freestanding-eabi` would need the `arm-none-eabi` tools
|
||||
fn possibleProgramNames(raw_triple: ?[]const u8, name: []const u8, buf: *[64]u8) std.BoundedArray([]const u8, 2) {
|
||||
var possible_names: std.BoundedArray([]const u8, 2) = .{};
|
||||
fn possibleProgramNames(
|
||||
raw_triple: ?[]const u8,
|
||||
name: []const u8,
|
||||
buf: *[64]u8,
|
||||
possible_name_buf: *[2][]const u8,
|
||||
) []const []const u8 {
|
||||
var i: u32 = 0;
|
||||
if (raw_triple) |triple| {
|
||||
if (std.fmt.bufPrint(buf, "{s}-{s}", .{ triple, name })) |res| {
|
||||
possible_names.appendAssumeCapacity(res);
|
||||
possible_name_buf[i] = res;
|
||||
i += 1;
|
||||
} else |_| {}
|
||||
}
|
||||
possible_names.appendAssumeCapacity(name);
|
||||
possible_name_buf[i] = name;
|
||||
|
||||
return possible_names;
|
||||
return possible_name_buf[0..i];
|
||||
}
|
||||
|
||||
/// Add toolchain `file_paths` to argv as `-L` arguments
|
||||
pub fn addFilePathLibArgs(tc: *const Toolchain, argv: *std.array_list.Managed([]const u8)) !void {
|
||||
try argv.ensureUnusedCapacity(tc.file_paths.items.len);
|
||||
pub fn addFilePathLibArgs(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void {
|
||||
try argv.ensureUnusedCapacity(tc.driver.comp.gpa, tc.file_paths.items.len);
|
||||
|
||||
var bytes_needed: usize = 0;
|
||||
for (tc.file_paths.items) |path| {
|
||||
bytes_needed += path.len + 2; // +2 for `-L`
|
||||
}
|
||||
var bytes = try tc.arena.alloc(u8, bytes_needed);
|
||||
var bytes = try tc.driver.comp.arena.alloc(u8, bytes_needed);
|
||||
var index: usize = 0;
|
||||
for (tc.file_paths.items) |path| {
|
||||
@memcpy(bytes[index..][0..2], "-L");
|
||||
|
|
@ -223,9 +218,10 @@ fn getProgramPath(tc: *const Toolchain, name: []const u8, buf: []u8) []const u8
|
|||
var fib = std.heap.FixedBufferAllocator.init(&path_buf);
|
||||
|
||||
var tool_specific_buf: [64]u8 = undefined;
|
||||
const possible_names = possibleProgramNames(tc.driver.raw_target_triple, name, &tool_specific_buf);
|
||||
var possible_name_buf: [2][]const u8 = undefined;
|
||||
const possible_names = possibleProgramNames(tc.driver.raw_target_triple, name, &tool_specific_buf, &possible_name_buf);
|
||||
|
||||
for (possible_names.constSlice()) |tool_name| {
|
||||
for (possible_names) |tool_name| {
|
||||
for (tc.program_paths.items) |program_path| {
|
||||
defer fib.reset();
|
||||
|
||||
|
|
@ -252,6 +248,7 @@ pub fn getFilePath(tc: *const Toolchain, name: []const u8) ![]const u8 {
|
|||
var path_buf: [std.fs.max_path_bytes]u8 = undefined;
|
||||
var fib = std.heap.FixedBufferAllocator.init(&path_buf);
|
||||
const allocator = fib.allocator();
|
||||
const arena = tc.driver.comp.arena;
|
||||
|
||||
const sysroot = tc.getSysroot();
|
||||
|
||||
|
|
@ -260,15 +257,15 @@ pub fn getFilePath(tc: *const Toolchain, name: []const u8) ![]const u8 {
|
|||
const aro_dir = std.fs.path.dirname(tc.driver.aro_name) orelse "";
|
||||
const candidate = try std.fs.path.join(allocator, &.{ aro_dir, "..", name });
|
||||
if (tc.filesystem.exists(candidate)) {
|
||||
return tc.arena.dupe(u8, candidate);
|
||||
return arena.dupe(u8, candidate);
|
||||
}
|
||||
|
||||
if (tc.searchPaths(&fib, sysroot, tc.library_paths.items, name)) |path| {
|
||||
return tc.arena.dupe(u8, path);
|
||||
return arena.dupe(u8, path);
|
||||
}
|
||||
|
||||
if (tc.searchPaths(&fib, sysroot, tc.file_paths.items, name)) |path| {
|
||||
return try tc.arena.dupe(u8, path);
|
||||
return try arena.dupe(u8, path);
|
||||
}
|
||||
|
||||
return name;
|
||||
|
|
@ -299,7 +296,7 @@ const PathKind = enum {
|
|||
program,
|
||||
};
|
||||
|
||||
/// Join `components` into a path. If the path exists, dupe it into the toolchain arena and
|
||||
/// Join `components` into a path. If the path exists, dupe it into the Compilation arena and
|
||||
/// add it to the specified path list.
|
||||
pub fn addPathIfExists(tc: *Toolchain, components: []const []const u8, dest_kind: PathKind) !void {
|
||||
var path_buf: [std.fs.max_path_bytes]u8 = undefined;
|
||||
|
|
@ -308,7 +305,7 @@ pub fn addPathIfExists(tc: *Toolchain, components: []const []const u8, dest_kind
|
|||
const candidate = try std.fs.path.join(fib.allocator(), components);
|
||||
|
||||
if (tc.filesystem.exists(candidate)) {
|
||||
const duped = try tc.arena.dupe(u8, candidate);
|
||||
const duped = try tc.driver.comp.arena.dupe(u8, candidate);
|
||||
const dest = switch (dest_kind) {
|
||||
.library => &tc.library_paths,
|
||||
.file => &tc.file_paths,
|
||||
|
|
@ -318,10 +315,10 @@ pub fn addPathIfExists(tc: *Toolchain, components: []const []const u8, dest_kind
|
|||
}
|
||||
}
|
||||
|
||||
/// Join `components` using the toolchain arena and add the resulting path to `dest_kind`. Does not check
|
||||
/// Join `components` using the Compilation arena and add the resulting path to `dest_kind`. Does not check
|
||||
/// whether the path actually exists
|
||||
pub fn addPathFromComponents(tc: *Toolchain, components: []const []const u8, dest_kind: PathKind) !void {
|
||||
const full_path = try std.fs.path.join(tc.arena, components);
|
||||
const full_path = try std.fs.path.join(tc.driver.comp.arena, components);
|
||||
const dest = switch (dest_kind) {
|
||||
.library => &tc.library_paths,
|
||||
.file => &tc.file_paths,
|
||||
|
|
@ -330,16 +327,6 @@ pub fn addPathFromComponents(tc: *Toolchain, components: []const []const u8, des
|
|||
try dest.append(tc.driver.comp.gpa, full_path);
|
||||
}
|
||||
|
||||
/// Add linker args to `argv`. Does not add path to linker executable as first item; that must be handled separately
|
||||
/// Items added to `argv` will be string literals or owned by `tc.arena` so they must not be individually freed
|
||||
pub fn buildLinkerArgs(tc: *Toolchain, argv: *std.array_list.Managed([]const u8)) !void {
|
||||
return switch (tc.inner) {
|
||||
.uninitialized => unreachable,
|
||||
.linux => |*linux| linux.buildLinkerArgs(tc, argv),
|
||||
.unknown => @panic("This toolchain does not support linking yet"),
|
||||
};
|
||||
}
|
||||
|
||||
fn getDefaultRuntimeLibKind(tc: *const Toolchain) RuntimeLibKind {
|
||||
if (tc.getTarget().abi.isAndroid()) {
|
||||
return .compiler_rt;
|
||||
|
|
@ -396,7 +383,7 @@ fn getUnwindLibKind(tc: *const Toolchain) !UnwindLibKind {
|
|||
return .libgcc;
|
||||
} else if (mem.eql(u8, libname, "libunwind")) {
|
||||
if (tc.getRuntimeLibKind() == .libgcc) {
|
||||
try tc.driver.comp.addDiagnostic(.{ .tag = .incompatible_unwindlib }, &.{});
|
||||
try tc.driver.err("--rtlib=libgcc requires --unwindlib=libgcc", .{});
|
||||
}
|
||||
return .compiler_rt;
|
||||
} else {
|
||||
|
|
@ -412,7 +399,7 @@ fn getAsNeededOption(is_solaris: bool, needed: bool) []const u8 {
|
|||
}
|
||||
}
|
||||
|
||||
fn addUnwindLibrary(tc: *const Toolchain, argv: *std.array_list.Managed([]const u8)) !void {
|
||||
fn addUnwindLibrary(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void {
|
||||
const unw = try tc.getUnwindLibKind();
|
||||
const target = tc.getTarget();
|
||||
if ((target.abi.isAndroid() and unw == .libgcc) or
|
||||
|
|
@ -422,46 +409,49 @@ fn addUnwindLibrary(tc: *const Toolchain, argv: *std.array_list.Managed([]const
|
|||
|
||||
const lgk = tc.getLibGCCKind();
|
||||
const as_needed = lgk == .unspecified and !target.abi.isAndroid() and !target_util.isCygwinMinGW(target) and target.os.tag != .aix;
|
||||
|
||||
try argv.ensureUnusedCapacity(tc.driver.comp.gpa, 3);
|
||||
if (as_needed) {
|
||||
try argv.append(getAsNeededOption(target.os.tag == .solaris, true));
|
||||
argv.appendAssumeCapacity(getAsNeededOption(target.os.tag == .solaris, true));
|
||||
}
|
||||
switch (unw) {
|
||||
.none => return,
|
||||
.libgcc => if (lgk == .static) try argv.append("-lgcc_eh") else try argv.append("-lgcc_s"),
|
||||
.libgcc => argv.appendAssumeCapacity(if (lgk == .static) "-lgcc_eh" else "-lgcc_s"),
|
||||
.compiler_rt => if (target.os.tag == .aix) {
|
||||
if (lgk != .static) {
|
||||
try argv.append("-lunwind");
|
||||
argv.appendAssumeCapacity("-lunwind");
|
||||
}
|
||||
} else if (lgk == .static) {
|
||||
try argv.append("-l:libunwind.a");
|
||||
argv.appendAssumeCapacity("-l:libunwind.a");
|
||||
} else if (lgk == .shared) {
|
||||
if (target_util.isCygwinMinGW(target)) {
|
||||
try argv.append("-l:libunwind.dll.a");
|
||||
argv.appendAssumeCapacity("-l:libunwind.dll.a");
|
||||
} else {
|
||||
try argv.append("-l:libunwind.so");
|
||||
argv.appendAssumeCapacity("-l:libunwind.so");
|
||||
}
|
||||
} else {
|
||||
try argv.append("-lunwind");
|
||||
argv.appendAssumeCapacity("-lunwind");
|
||||
},
|
||||
}
|
||||
|
||||
if (as_needed) {
|
||||
try argv.append(getAsNeededOption(target.os.tag == .solaris, false));
|
||||
argv.appendAssumeCapacity(getAsNeededOption(target.os.tag == .solaris, false));
|
||||
}
|
||||
}
|
||||
|
||||
fn addLibGCC(tc: *const Toolchain, argv: *std.array_list.Managed([]const u8)) !void {
|
||||
fn addLibGCC(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void {
|
||||
const gpa = tc.driver.comp.gpa;
|
||||
const libgcc_kind = tc.getLibGCCKind();
|
||||
if (libgcc_kind == .static or libgcc_kind == .unspecified) {
|
||||
try argv.append("-lgcc");
|
||||
try argv.append(gpa, "-lgcc");
|
||||
}
|
||||
try tc.addUnwindLibrary(argv);
|
||||
if (libgcc_kind == .shared) {
|
||||
try argv.append("-lgcc");
|
||||
try argv.append(gpa, "-lgcc");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn addRuntimeLibs(tc: *const Toolchain, argv: *std.array_list.Managed([]const u8)) !void {
|
||||
pub fn addRuntimeLibs(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void {
|
||||
const target = tc.getTarget();
|
||||
const rlt = tc.getRuntimeLibKind();
|
||||
switch (rlt) {
|
||||
|
|
@ -472,7 +462,7 @@ pub fn addRuntimeLibs(tc: *const Toolchain, argv: *std.array_list.Managed([]cons
|
|||
if (target_util.isKnownWindowsMSVCEnvironment(target)) {
|
||||
const rtlib_str = tc.driver.rtlib orelse system_defaults.rtlib;
|
||||
if (!mem.eql(u8, rtlib_str, "platform")) {
|
||||
try tc.driver.comp.addDiagnostic(.{ .tag = .unsupported_rtlib_gcc, .extra = .{ .str = "MSVC" } }, &.{});
|
||||
try tc.driver.err("unsupported runtime library 'libgcc' for platform 'MSVC'", .{});
|
||||
}
|
||||
} else {
|
||||
try tc.addLibGCC(argv);
|
||||
|
|
@ -481,20 +471,19 @@ pub fn addRuntimeLibs(tc: *const Toolchain, argv: *std.array_list.Managed([]cons
|
|||
}
|
||||
|
||||
if (target.abi.isAndroid() and !tc.driver.static and !tc.driver.static_pie) {
|
||||
try argv.append("-ldl");
|
||||
try argv.append(tc.driver.comp.gpa, "-ldl");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn defineSystemIncludes(tc: *Toolchain) !void {
|
||||
return switch (tc.inner) {
|
||||
.uninitialized => unreachable,
|
||||
.linux => |*linux| linux.defineSystemIncludes(tc),
|
||||
.unknown => {
|
||||
if (tc.driver.nostdinc) return;
|
||||
|
||||
const comp = tc.driver.comp;
|
||||
if (!tc.driver.nobuiltininc) {
|
||||
try comp.addBuiltinIncludeDir(tc.driver.aro_name);
|
||||
try comp.addBuiltinIncludeDir(tc.driver.aro_name, tc.driver.resource_dir);
|
||||
}
|
||||
|
||||
if (!tc.driver.nostdlibinc) {
|
||||
|
|
|
|||
4198
lib/compiler/aro/aro/Tree.zig
vendored
4198
lib/compiler/aro/aro/Tree.zig
vendored
File diff suppressed because it is too large
Load diff
2676
lib/compiler/aro/aro/Type.zig
vendored
2676
lib/compiler/aro/aro/Type.zig
vendored
File diff suppressed because it is too large
Load diff
3151
lib/compiler/aro/aro/TypeStore.zig
vendored
Normal file
3151
lib/compiler/aro/aro/TypeStore.zig
vendored
Normal file
File diff suppressed because it is too large
Load diff
378
lib/compiler/aro/aro/Value.zig
vendored
378
lib/compiler/aro/aro/Value.zig
vendored
|
|
@ -2,14 +2,14 @@ const std = @import("std");
|
|||
const assert = std.debug.assert;
|
||||
const BigIntConst = std.math.big.int.Const;
|
||||
const BigIntMutable = std.math.big.int.Mutable;
|
||||
const backend = @import("../backend.zig");
|
||||
const Interner = backend.Interner;
|
||||
|
||||
const Interner = @import("../backend.zig").Interner;
|
||||
const BigIntSpace = Interner.Tag.Int.BigIntSpace;
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Type = @import("Type.zig");
|
||||
const target_util = @import("target.zig");
|
||||
|
||||
const annex_g = @import("annex_g.zig");
|
||||
const Writer = std.Io.Writer;
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const target_util = @import("target.zig");
|
||||
const QualType = @import("TypeStore.zig").QualType;
|
||||
|
||||
const Value = @This();
|
||||
|
||||
|
|
@ -33,11 +33,19 @@ pub fn int(i: anytype, comp: *Compilation) !Value {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn pointer(r: Interner.Key.Pointer, comp: *Compilation) !Value {
|
||||
return intern(comp, .{ .pointer = r });
|
||||
}
|
||||
|
||||
pub fn ref(v: Value) Interner.Ref {
|
||||
std.debug.assert(v.opt_ref != .none);
|
||||
return @enumFromInt(@intFromEnum(v.opt_ref));
|
||||
}
|
||||
|
||||
pub fn fromRef(r: Interner.Ref) Value {
|
||||
return .{ .opt_ref = @enumFromInt(@intFromEnum(r)) };
|
||||
}
|
||||
|
||||
pub fn is(v: Value, tag: std.meta.Tag(Interner.Key), comp: *const Compilation) bool {
|
||||
if (v.opt_ref == .none) return false;
|
||||
return comp.interner.get(v.ref()) == tag;
|
||||
|
|
@ -68,7 +76,11 @@ test "minUnsignedBits" {
|
|||
}
|
||||
};
|
||||
|
||||
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
|
||||
var arena_state: std.heap.ArenaAllocator = .init(std.testing.allocator);
|
||||
defer arena_state.deinit();
|
||||
const arena = arena_state.allocator();
|
||||
|
||||
var comp = Compilation.init(std.testing.allocator, arena, undefined, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" });
|
||||
comp.target = try std.zig.system.resolveTargetQuery(target_query);
|
||||
|
|
@ -103,7 +115,11 @@ test "minSignedBits" {
|
|||
}
|
||||
};
|
||||
|
||||
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
|
||||
var arena_state: std.heap.ArenaAllocator = .init(std.testing.allocator);
|
||||
defer arena_state.deinit();
|
||||
const arena = arena_state.allocator();
|
||||
|
||||
var comp = Compilation.init(std.testing.allocator, arena, undefined, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" });
|
||||
comp.target = try std.zig.system.resolveTargetQuery(target_query);
|
||||
|
|
@ -133,24 +149,27 @@ pub const FloatToIntChangeKind = enum {
|
|||
|
||||
/// Converts the stored value from a float to an integer.
|
||||
/// `.none` value remains unchanged.
|
||||
pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChangeKind {
|
||||
pub fn floatToInt(v: *Value, dest_ty: QualType, comp: *Compilation) !FloatToIntChangeKind {
|
||||
if (v.opt_ref == .none) return .none;
|
||||
|
||||
const float_val = v.toFloat(f128, comp);
|
||||
const was_zero = float_val == 0;
|
||||
|
||||
if (dest_ty.is(.bool)) {
|
||||
if (dest_ty.is(comp, .bool)) {
|
||||
const was_one = float_val == 1.0;
|
||||
v.* = fromBool(!was_zero);
|
||||
if (was_zero or was_one) return .none;
|
||||
return .value_changed;
|
||||
} else if (dest_ty.isUnsignedInt(comp) and float_val < 0) {
|
||||
} else if (dest_ty.signedness(comp) == .unsigned and float_val < 0) {
|
||||
v.* = zero;
|
||||
return .out_of_range;
|
||||
} else if (!std.math.isFinite(float_val)) {
|
||||
v.* = .{};
|
||||
return .overflow;
|
||||
}
|
||||
|
||||
const signedness = dest_ty.signedness(comp);
|
||||
const bits: usize = @intCast(dest_ty.bitSizeof(comp).?);
|
||||
const bits: usize = @intCast(dest_ty.bitSizeof(comp));
|
||||
|
||||
var big_int: std.math.big.int.Mutable = .{
|
||||
.limbs = try comp.gpa.alloc(std.math.big.Limb, @max(
|
||||
|
|
@ -160,6 +179,7 @@ pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChang
|
|||
.len = undefined,
|
||||
.positive = undefined,
|
||||
};
|
||||
defer comp.gpa.free(big_int.limbs);
|
||||
const had_fraction = switch (big_int.setFloat(float_val, .trunc)) {
|
||||
.inexact => true,
|
||||
.exact => false,
|
||||
|
|
@ -177,11 +197,11 @@ pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChang
|
|||
|
||||
/// Converts the stored value from an integer to a float.
|
||||
/// `.none` value remains unchanged.
|
||||
pub fn intToFloat(v: *Value, dest_ty: Type, comp: *Compilation) !void {
|
||||
pub fn intToFloat(v: *Value, dest_ty: QualType, comp: *Compilation) !void {
|
||||
if (v.opt_ref == .none) return;
|
||||
|
||||
if (dest_ty.isComplex()) {
|
||||
const bits = dest_ty.bitSizeof(comp).?;
|
||||
if (dest_ty.is(comp, .complex)) {
|
||||
const bits = dest_ty.bitSizeof(comp);
|
||||
const cf: Interner.Key.Complex = switch (bits) {
|
||||
32 => .{ .cf16 = .{ v.toFloat(f16, comp), 0 } },
|
||||
64 => .{ .cf32 = .{ v.toFloat(f32, comp), 0 } },
|
||||
|
|
@ -193,7 +213,7 @@ pub fn intToFloat(v: *Value, dest_ty: Type, comp: *Compilation) !void {
|
|||
v.* = try intern(comp, .{ .complex = cf });
|
||||
return;
|
||||
}
|
||||
const bits = dest_ty.bitSizeof(comp).?;
|
||||
const bits = dest_ty.bitSizeof(comp);
|
||||
return switch (comp.interner.get(v.ref()).int) {
|
||||
inline .u64, .i64 => |data| {
|
||||
const f: Interner.Key.Float = switch (bits) {
|
||||
|
|
@ -232,14 +252,16 @@ pub const IntCastChangeKind = enum {
|
|||
|
||||
/// Truncates or extends bits based on type.
|
||||
/// `.none` value remains unchanged.
|
||||
pub fn intCast(v: *Value, dest_ty: Type, comp: *Compilation) !IntCastChangeKind {
|
||||
pub fn intCast(v: *Value, dest_ty: QualType, comp: *Compilation) !IntCastChangeKind {
|
||||
if (v.opt_ref == .none) return .none;
|
||||
const key = comp.interner.get(v.ref());
|
||||
if (key == .pointer or key == .bytes) return .none;
|
||||
|
||||
const dest_bits: usize = @intCast(dest_ty.bitSizeof(comp).?);
|
||||
const dest_bits: usize = @intCast(dest_ty.bitSizeof(comp));
|
||||
const dest_signed = dest_ty.signedness(comp) == .signed;
|
||||
|
||||
var space: BigIntSpace = undefined;
|
||||
const big = v.toBigInt(&space, comp);
|
||||
const big = key.toBigInt(&space);
|
||||
const value_bits = big.bitCountTwosComp();
|
||||
|
||||
// if big is negative, then is signed.
|
||||
|
|
@ -269,10 +291,10 @@ pub fn intCast(v: *Value, dest_ty: Type, comp: *Compilation) !IntCastChangeKind
|
|||
|
||||
/// Converts the stored value to a float of the specified type
|
||||
/// `.none` value remains unchanged.
|
||||
pub fn floatCast(v: *Value, dest_ty: Type, comp: *Compilation) !void {
|
||||
pub fn floatCast(v: *Value, dest_ty: QualType, comp: *Compilation) !void {
|
||||
if (v.opt_ref == .none) return;
|
||||
const bits = dest_ty.bitSizeof(comp).?;
|
||||
if (dest_ty.isComplex()) {
|
||||
const bits = dest_ty.bitSizeof(comp);
|
||||
if (dest_ty.is(comp, .complex)) {
|
||||
const cf: Interner.Key.Complex = switch (bits) {
|
||||
32 => .{ .cf16 = .{ v.toFloat(f16, comp), v.imag(f16, comp) } },
|
||||
64 => .{ .cf32 = .{ v.toFloat(f32, comp), v.imag(f32, comp) } },
|
||||
|
|
@ -370,11 +392,8 @@ fn bigIntToFloat(limbs: []const std.math.big.Limb, positive: bool) f128 {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn toBigInt(val: Value, space: *BigIntSpace, comp: *const Compilation) BigIntConst {
|
||||
return switch (comp.interner.get(val.ref()).int) {
|
||||
inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(),
|
||||
.big_int => |b| b,
|
||||
};
|
||||
fn toBigInt(val: Value, space: *BigIntSpace, comp: *const Compilation) BigIntConst {
|
||||
return comp.interner.get(val.ref()).toBigInt(space);
|
||||
}
|
||||
|
||||
pub fn isZero(v: Value, comp: *const Compilation) bool {
|
||||
|
|
@ -398,6 +417,7 @@ pub fn isZero(v: Value, comp: *const Compilation) bool {
|
|||
inline else => |data| return data[0] == 0.0 and data[1] == 0.0,
|
||||
},
|
||||
.bytes => return false,
|
||||
.pointer => return false,
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
|
@ -461,12 +481,19 @@ pub fn toBool(v: Value, comp: *const Compilation) bool {
|
|||
|
||||
pub fn toInt(v: Value, comptime T: type, comp: *const Compilation) ?T {
|
||||
if (v.opt_ref == .none) return null;
|
||||
if (comp.interner.get(v.ref()) != .int) return null;
|
||||
const key = comp.interner.get(v.ref());
|
||||
if (key != .int) return null;
|
||||
var space: BigIntSpace = undefined;
|
||||
const big_int = v.toBigInt(&space, comp);
|
||||
const big_int = key.toBigInt(&space);
|
||||
return big_int.toInt(T) catch null;
|
||||
}
|
||||
|
||||
pub fn toBytes(v: Value, comp: *const Compilation) []const u8 {
|
||||
assert(v.opt_ref != .none);
|
||||
const key = comp.interner.get(v.ref());
|
||||
return key.bytes;
|
||||
}
|
||||
|
||||
const ComplexOp = enum {
|
||||
add,
|
||||
sub,
|
||||
|
|
@ -492,10 +519,11 @@ fn complexAddSub(lhs: Value, rhs: Value, comptime T: type, op: ComplexOp, comp:
|
|||
};
|
||||
}
|
||||
|
||||
pub fn add(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool {
|
||||
const bits: usize = @intCast(ty.bitSizeof(comp).?);
|
||||
if (ty.isFloat()) {
|
||||
if (ty.isComplex()) {
|
||||
pub fn add(res: *Value, lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !bool {
|
||||
const bits: usize = @intCast(qt.bitSizeof(comp));
|
||||
const scalar_kind = qt.scalarKind(comp);
|
||||
if (scalar_kind.isFloat()) {
|
||||
if (scalar_kind == .complex_float) {
|
||||
res.* = switch (bits) {
|
||||
32 => try complexAddSub(lhs, rhs, f16, .add, comp),
|
||||
64 => try complexAddSub(lhs, rhs, f32, .add, comp),
|
||||
|
|
@ -516,29 +544,60 @@ pub fn add(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
|
|||
};
|
||||
res.* = try intern(comp, .{ .float = f });
|
||||
return false;
|
||||
} else {
|
||||
var lhs_space: BigIntSpace = undefined;
|
||||
var rhs_space: BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, comp);
|
||||
|
||||
const limbs = try comp.gpa.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(bits),
|
||||
);
|
||||
defer comp.gpa.free(limbs);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
|
||||
const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, ty.signedness(comp), bits);
|
||||
res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
|
||||
return overflowed;
|
||||
}
|
||||
const lhs_key = comp.interner.get(lhs.ref());
|
||||
const rhs_key = comp.interner.get(rhs.ref());
|
||||
if (lhs_key == .bytes or rhs_key == .bytes) {
|
||||
res.* = .{};
|
||||
return false;
|
||||
}
|
||||
if (lhs_key == .pointer or rhs_key == .pointer) {
|
||||
const rel, const index = if (lhs_key == .pointer)
|
||||
.{ lhs_key.pointer, rhs }
|
||||
else
|
||||
.{ rhs_key.pointer, lhs };
|
||||
|
||||
const elem_size = try int(qt.childType(comp).sizeofOrNull(comp) orelse 1, comp);
|
||||
var total_offset: Value = undefined;
|
||||
const mul_overflow = try total_offset.mul(elem_size, index, comp.type_store.ptrdiff, comp);
|
||||
const old_offset = fromRef(rel.offset);
|
||||
const add_overflow = try total_offset.add(total_offset, old_offset, comp.type_store.ptrdiff, comp);
|
||||
_ = try total_offset.intCast(comp.type_store.ptrdiff, comp);
|
||||
res.* = try pointer(.{ .node = rel.node, .offset = total_offset.ref() }, comp);
|
||||
return mul_overflow or add_overflow;
|
||||
}
|
||||
|
||||
var lhs_space: BigIntSpace = undefined;
|
||||
var rhs_space: BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs_key.toBigInt(&lhs_space);
|
||||
const rhs_bigint = rhs_key.toBigInt(&rhs_space);
|
||||
|
||||
const limbs = try comp.gpa.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(bits),
|
||||
);
|
||||
defer comp.gpa.free(limbs);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
|
||||
const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, qt.signedness(comp), bits);
|
||||
res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
|
||||
return overflowed;
|
||||
}
|
||||
|
||||
pub fn sub(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool {
|
||||
const bits: usize = @intCast(ty.bitSizeof(comp).?);
|
||||
if (ty.isFloat()) {
|
||||
if (ty.isComplex()) {
|
||||
pub fn negate(res: *Value, val: Value, qt: QualType, comp: *Compilation) !bool {
|
||||
return res.sub(zero, val, qt, undefined, comp);
|
||||
}
|
||||
|
||||
pub fn decrement(res: *Value, val: Value, qt: QualType, comp: *Compilation) !bool {
|
||||
return res.sub(val, one, qt, undefined, comp);
|
||||
}
|
||||
|
||||
/// elem_size is only used when subtracting two pointers, so we can scale the result by the size of the element type
|
||||
pub fn sub(res: *Value, lhs: Value, rhs: Value, qt: QualType, elem_size: u64, comp: *Compilation) !bool {
|
||||
const bits: usize = @intCast(qt.bitSizeof(comp));
|
||||
const scalar_kind = qt.scalarKind(comp);
|
||||
if (scalar_kind.isFloat()) {
|
||||
if (scalar_kind == .complex_float) {
|
||||
res.* = switch (bits) {
|
||||
32 => try complexAddSub(lhs, rhs, f16, .sub, comp),
|
||||
64 => try complexAddSub(lhs, rhs, f32, .sub, comp),
|
||||
|
|
@ -559,29 +618,61 @@ pub fn sub(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
|
|||
};
|
||||
res.* = try intern(comp, .{ .float = f });
|
||||
return false;
|
||||
} else {
|
||||
var lhs_space: BigIntSpace = undefined;
|
||||
var rhs_space: BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, comp);
|
||||
|
||||
const limbs = try comp.gpa.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(bits),
|
||||
);
|
||||
defer comp.gpa.free(limbs);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
|
||||
const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, ty.signedness(comp), bits);
|
||||
res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
|
||||
return overflowed;
|
||||
}
|
||||
const lhs_key = comp.interner.get(lhs.ref());
|
||||
const rhs_key = comp.interner.get(rhs.ref());
|
||||
if (lhs_key == .bytes or rhs_key == .bytes) {
|
||||
res.* = .{};
|
||||
return false;
|
||||
}
|
||||
if (lhs_key == .pointer and rhs_key == .pointer) {
|
||||
const lhs_pointer = lhs_key.pointer;
|
||||
const rhs_pointer = rhs_key.pointer;
|
||||
if (lhs_pointer.node != rhs_pointer.node) {
|
||||
res.* = .{};
|
||||
return false;
|
||||
}
|
||||
const lhs_offset = fromRef(lhs_pointer.offset);
|
||||
const rhs_offset = fromRef(rhs_pointer.offset);
|
||||
const overflowed = try res.sub(lhs_offset, rhs_offset, comp.type_store.ptrdiff, undefined, comp);
|
||||
const rhs_size = try int(elem_size, comp);
|
||||
_ = try res.div(res.*, rhs_size, comp.type_store.ptrdiff, comp);
|
||||
return overflowed;
|
||||
} else if (lhs_key == .pointer) {
|
||||
const rel = lhs_key.pointer;
|
||||
|
||||
const lhs_size = try int(elem_size, comp);
|
||||
var total_offset: Value = undefined;
|
||||
const mul_overflow = try total_offset.mul(lhs_size, rhs, comp.type_store.ptrdiff, comp);
|
||||
const old_offset = fromRef(rel.offset);
|
||||
const add_overflow = try total_offset.sub(old_offset, total_offset, comp.type_store.ptrdiff, undefined, comp);
|
||||
_ = try total_offset.intCast(comp.type_store.ptrdiff, comp);
|
||||
res.* = try pointer(.{ .node = rel.node, .offset = total_offset.ref() }, comp);
|
||||
return mul_overflow or add_overflow;
|
||||
}
|
||||
|
||||
var lhs_space: BigIntSpace = undefined;
|
||||
var rhs_space: BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs_key.toBigInt(&lhs_space);
|
||||
const rhs_bigint = rhs_key.toBigInt(&rhs_space);
|
||||
|
||||
const limbs = try comp.gpa.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(bits),
|
||||
);
|
||||
defer comp.gpa.free(limbs);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
|
||||
const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, qt.signedness(comp), bits);
|
||||
res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
|
||||
return overflowed;
|
||||
}
|
||||
|
||||
pub fn mul(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool {
|
||||
const bits: usize = @intCast(ty.bitSizeof(comp).?);
|
||||
if (ty.isFloat()) {
|
||||
if (ty.isComplex()) {
|
||||
pub fn mul(res: *Value, lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !bool {
|
||||
const bits: usize = @intCast(qt.bitSizeof(comp));
|
||||
const scalar_kind = qt.scalarKind(comp);
|
||||
if (scalar_kind.isFloat()) {
|
||||
if (scalar_kind == .complex_float) {
|
||||
const cf: Interner.Key.Complex = switch (bits) {
|
||||
32 => .{ .cf16 = annex_g.complexFloatMul(f16, lhs.toFloat(f16, comp), lhs.imag(f16, comp), rhs.toFloat(f16, comp), rhs.imag(f16, comp)) },
|
||||
64 => .{ .cf32 = annex_g.complexFloatMul(f32, lhs.toFloat(f32, comp), lhs.imag(f32, comp), rhs.toFloat(f32, comp), rhs.imag(f32, comp)) },
|
||||
|
|
@ -624,7 +715,7 @@ pub fn mul(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
|
|||
|
||||
result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, comp.gpa);
|
||||
|
||||
const signedness = ty.signedness(comp);
|
||||
const signedness = qt.signedness(comp);
|
||||
const overflowed = !result_bigint.toConst().fitsInTwosComp(signedness, bits);
|
||||
if (overflowed) {
|
||||
result_bigint.truncate(result_bigint.toConst(), signedness, bits);
|
||||
|
|
@ -635,10 +726,11 @@ pub fn mul(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
|
|||
}
|
||||
|
||||
/// caller guarantees rhs != 0
|
||||
pub fn div(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool {
|
||||
const bits: usize = @intCast(ty.bitSizeof(comp).?);
|
||||
if (ty.isFloat()) {
|
||||
if (ty.isComplex()) {
|
||||
pub fn div(res: *Value, lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !bool {
|
||||
const bits: usize = @intCast(qt.bitSizeof(comp));
|
||||
const scalar_kind = qt.scalarKind(comp);
|
||||
if (scalar_kind.isFloat()) {
|
||||
if (scalar_kind == .complex_float) {
|
||||
const cf: Interner.Key.Complex = switch (bits) {
|
||||
32 => .{ .cf16 = annex_g.complexFloatDiv(f16, lhs.toFloat(f16, comp), lhs.imag(f16, comp), rhs.toFloat(f16, comp), rhs.imag(f16, comp)) },
|
||||
64 => .{ .cf32 = annex_g.complexFloatDiv(f32, lhs.toFloat(f32, comp), lhs.imag(f32, comp), rhs.toFloat(f32, comp), rhs.imag(f32, comp)) },
|
||||
|
|
@ -689,22 +781,21 @@ pub fn div(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
|
|||
result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
|
||||
|
||||
res.* = try intern(comp, .{ .int = .{ .big_int = result_q.toConst() } });
|
||||
return !result_q.toConst().fitsInTwosComp(ty.signedness(comp), bits);
|
||||
return !result_q.toConst().fitsInTwosComp(qt.signedness(comp), bits);
|
||||
}
|
||||
}
|
||||
|
||||
/// caller guarantees rhs != 0
|
||||
/// caller guarantees lhs != std.math.minInt(T) OR rhs != -1
|
||||
pub fn rem(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value {
|
||||
pub fn rem(lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !Value {
|
||||
var lhs_space: BigIntSpace = undefined;
|
||||
var rhs_space: BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, comp);
|
||||
|
||||
const signedness = ty.signedness(comp);
|
||||
if (signedness == .signed) {
|
||||
if (qt.signedness(comp) == .signed) {
|
||||
var spaces: [2]BigIntSpace = undefined;
|
||||
const min_val = try Value.minInt(ty, comp);
|
||||
const min_val = try Value.minInt(qt, comp);
|
||||
const negative = BigIntMutable.init(&spaces[0].limbs, -1).toConst();
|
||||
const big_one = BigIntMutable.init(&spaces[1].limbs, 1).toConst();
|
||||
if (lhs.compare(.eq, min_val, comp) and rhs_bigint.eql(negative)) {
|
||||
|
|
@ -712,9 +803,9 @@ pub fn rem(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value {
|
|||
} else if (rhs_bigint.order(big_one).compare(.lt)) {
|
||||
// lhs - @divTrunc(lhs, rhs) * rhs
|
||||
var tmp: Value = undefined;
|
||||
_ = try tmp.div(lhs, rhs, ty, comp);
|
||||
_ = try tmp.mul(tmp, rhs, ty, comp);
|
||||
_ = try tmp.sub(lhs, tmp, ty, comp);
|
||||
_ = try tmp.div(lhs, rhs, qt, comp);
|
||||
_ = try tmp.mul(tmp, rhs, qt, comp);
|
||||
_ = try tmp.sub(lhs, tmp, qt, undefined, comp);
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
|
|
@ -801,8 +892,8 @@ pub fn bitAnd(lhs: Value, rhs: Value, comp: *Compilation) !Value {
|
|||
return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
|
||||
}
|
||||
|
||||
pub fn bitNot(val: Value, ty: Type, comp: *Compilation) !Value {
|
||||
const bits: usize = @intCast(ty.bitSizeof(comp).?);
|
||||
pub fn bitNot(val: Value, qt: QualType, comp: *Compilation) !Value {
|
||||
const bits: usize = @intCast(qt.bitSizeof(comp));
|
||||
var val_space: Value.BigIntSpace = undefined;
|
||||
const val_bigint = val.toBigInt(&val_space, comp);
|
||||
|
||||
|
|
@ -813,21 +904,21 @@ pub fn bitNot(val: Value, ty: Type, comp: *Compilation) !Value {
|
|||
defer comp.gpa.free(limbs);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
|
||||
result_bigint.bitNotWrap(val_bigint, ty.signedness(comp), bits);
|
||||
result_bigint.bitNotWrap(val_bigint, qt.signedness(comp), bits);
|
||||
return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
|
||||
}
|
||||
|
||||
pub fn shl(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool {
|
||||
pub fn shl(res: *Value, lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !bool {
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
|
||||
const shift = rhs.toInt(usize, comp) orelse std.math.maxInt(usize);
|
||||
|
||||
const bits: usize = @intCast(ty.bitSizeof(comp).?);
|
||||
const bits: usize = @intCast(qt.bitSizeof(comp));
|
||||
if (shift > bits) {
|
||||
if (lhs_bigint.positive) {
|
||||
res.* = try Value.maxInt(ty, comp);
|
||||
res.* = try Value.maxInt(qt, comp);
|
||||
} else {
|
||||
res.* = try Value.minInt(ty, comp);
|
||||
res.* = try Value.minInt(qt, comp);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
@ -840,7 +931,7 @@ pub fn shl(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
|
|||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
|
||||
result_bigint.shiftLeft(lhs_bigint, shift);
|
||||
const signedness = ty.signedness(comp);
|
||||
const signedness = qt.signedness(comp);
|
||||
const overflowed = !result_bigint.toConst().fitsInTwosComp(signedness, bits);
|
||||
if (overflowed) {
|
||||
result_bigint.truncate(result_bigint.toConst(), signedness, bits);
|
||||
|
|
@ -849,7 +940,7 @@ pub fn shl(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
|
|||
return overflowed;
|
||||
}
|
||||
|
||||
pub fn shr(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value {
|
||||
pub fn shr(lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !Value {
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
|
||||
const shift = rhs.toInt(usize, comp) orelse return zero;
|
||||
|
|
@ -865,7 +956,7 @@ pub fn shr(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value {
|
|||
}
|
||||
}
|
||||
|
||||
const bits: usize = @intCast(ty.bitSizeof(comp).?);
|
||||
const bits: usize = @intCast(qt.bitSizeof(comp));
|
||||
const limbs = try comp.gpa.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(bits),
|
||||
|
|
@ -877,8 +968,8 @@ pub fn shr(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value {
|
|||
return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
|
||||
}
|
||||
|
||||
pub fn complexConj(val: Value, ty: Type, comp: *Compilation) !Value {
|
||||
const bits = ty.bitSizeof(comp).?;
|
||||
pub fn complexConj(val: Value, qt: QualType, comp: *Compilation) !Value {
|
||||
const bits = qt.bitSizeof(comp);
|
||||
const cf: Interner.Key.Complex = switch (bits) {
|
||||
32 => .{ .cf16 = .{ val.toFloat(f16, comp), -val.imag(f16, comp) } },
|
||||
64 => .{ .cf32 = .{ val.toFloat(f32, comp), -val.imag(f32, comp) } },
|
||||
|
|
@ -890,12 +981,17 @@ pub fn complexConj(val: Value, ty: Type, comp: *Compilation) !Value {
|
|||
return intern(comp, .{ .complex = cf });
|
||||
}
|
||||
|
||||
pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *const Compilation) bool {
|
||||
fn shallowCompare(lhs: Value, op: std.math.CompareOperator, rhs: Value) ?bool {
|
||||
if (op == .eq) {
|
||||
return lhs.opt_ref == rhs.opt_ref;
|
||||
} else if (lhs.opt_ref == rhs.opt_ref) {
|
||||
return std.math.Order.eq.compare(op);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *const Compilation) bool {
|
||||
if (lhs.shallowCompare(op, rhs)) |val| return val;
|
||||
|
||||
const lhs_key = comp.interner.get(lhs.ref());
|
||||
const rhs_key = comp.interner.get(rhs.ref());
|
||||
|
|
@ -918,10 +1014,33 @@ pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *cons
|
|||
return lhs_bigint.order(rhs_bigint).compare(op);
|
||||
}
|
||||
|
||||
fn twosCompIntLimit(limit: std.math.big.int.TwosCompIntLimit, ty: Type, comp: *Compilation) !Value {
|
||||
const signedness = ty.signedness(comp);
|
||||
/// Returns null for values that cannot be compared at compile time (e.g. `&x < &y`) for globals `x` and `y`.
|
||||
pub fn comparePointers(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *const Compilation) ?bool {
|
||||
if (lhs.shallowCompare(op, rhs)) |val| return val;
|
||||
|
||||
const lhs_key = comp.interner.get(lhs.ref());
|
||||
const rhs_key = comp.interner.get(rhs.ref());
|
||||
|
||||
if (lhs_key == .pointer and rhs_key == .pointer) {
|
||||
const lhs_pointer = lhs_key.pointer;
|
||||
const rhs_pointer = rhs_key.pointer;
|
||||
switch (op) {
|
||||
.eq => if (lhs_pointer.node != rhs_pointer.node) return false,
|
||||
.neq => if (lhs_pointer.node != rhs_pointer.node) return true,
|
||||
else => if (lhs_pointer.node != rhs_pointer.node) return null,
|
||||
}
|
||||
|
||||
const lhs_offset = fromRef(lhs_pointer.offset);
|
||||
const rhs_offset = fromRef(rhs_pointer.offset);
|
||||
return lhs_offset.compare(op, rhs_offset, comp);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
fn twosCompIntLimit(limit: std.math.big.int.TwosCompIntLimit, qt: QualType, comp: *Compilation) !Value {
|
||||
const signedness = qt.signedness(comp);
|
||||
if (limit == .min and signedness == .unsigned) return Value.zero;
|
||||
const mag_bits: usize = @intCast(ty.bitSizeof(comp).?);
|
||||
const mag_bits: usize = @intCast(qt.bitSizeof(comp));
|
||||
switch (mag_bits) {
|
||||
inline 8, 16, 32, 64 => |bits| {
|
||||
if (limit == .min) return Value.int(@as(i64, std.math.minInt(std.meta.Int(.signed, bits))), comp);
|
||||
|
|
@ -946,44 +1065,63 @@ fn twosCompIntLimit(limit: std.math.big.int.TwosCompIntLimit, ty: Type, comp: *C
|
|||
return Value.intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
|
||||
}
|
||||
|
||||
pub fn minInt(ty: Type, comp: *Compilation) !Value {
|
||||
return twosCompIntLimit(.min, ty, comp);
|
||||
pub fn minInt(qt: QualType, comp: *Compilation) !Value {
|
||||
return twosCompIntLimit(.min, qt, comp);
|
||||
}
|
||||
|
||||
pub fn maxInt(ty: Type, comp: *Compilation) !Value {
|
||||
return twosCompIntLimit(.max, ty, comp);
|
||||
pub fn maxInt(qt: QualType, comp: *Compilation) !Value {
|
||||
return twosCompIntLimit(.max, qt, comp);
|
||||
}
|
||||
|
||||
pub fn print(v: Value, ty: Type, comp: *const Compilation, w: *Writer) Writer.Error!void {
|
||||
if (ty.is(.bool)) {
|
||||
return w.writeAll(if (v.isZero(comp)) "false" else "true");
|
||||
const NestedPrint = union(enum) {
|
||||
pointer: struct {
|
||||
node: u32,
|
||||
offset: Value,
|
||||
},
|
||||
};
|
||||
|
||||
pub fn printPointer(offset: Value, base: []const u8, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!void {
|
||||
try w.writeByte('&');
|
||||
try w.writeAll(base);
|
||||
if (!offset.isZero(comp)) {
|
||||
const maybe_nested = try offset.print(comp.type_store.ptrdiff, comp, w);
|
||||
std.debug.assert(maybe_nested == null);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print(v: Value, qt: QualType, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!?NestedPrint {
|
||||
if (qt.is(comp, .bool)) {
|
||||
try w.writeAll(if (v.isZero(comp)) "false" else "true");
|
||||
return null;
|
||||
}
|
||||
const key = comp.interner.get(v.ref());
|
||||
switch (key) {
|
||||
.null => return w.writeAll("nullptr_t"),
|
||||
.null => try w.writeAll("nullptr_t"),
|
||||
.int => |repr| switch (repr) {
|
||||
inline .u64, .i64, .big_int => |x| return w.print("{d}", .{x}),
|
||||
inline else => |x| try w.print("{d}", .{x}),
|
||||
},
|
||||
.float => |repr| switch (repr) {
|
||||
.f16 => |x| return w.print("{d}", .{@round(@as(f64, @floatCast(x)) * 1000) / 1000}),
|
||||
.f32 => |x| return w.print("{d}", .{@round(@as(f64, @floatCast(x)) * 1000000) / 1000000}),
|
||||
inline else => |x| return w.print("{d}", .{@as(f64, @floatCast(x))}),
|
||||
.f16 => |x| try w.print("{d}", .{@round(@as(f64, @floatCast(x)) * 1000) / 1000}),
|
||||
.f32 => |x| try w.print("{d}", .{@round(@as(f64, @floatCast(x)) * 1000000) / 1000000}),
|
||||
inline else => |x| try w.print("{d}", .{@as(f64, @floatCast(x))}),
|
||||
},
|
||||
.bytes => |b| return printString(b, ty, comp, w),
|
||||
.bytes => |b| try printString(b, qt, comp, w),
|
||||
.complex => |repr| switch (repr) {
|
||||
.cf32 => |components| return w.print("{d} + {d}i", .{ @round(@as(f64, @floatCast(components[0])) * 1000000) / 1000000, @round(@as(f64, @floatCast(components[1])) * 1000000) / 1000000 }),
|
||||
inline else => |components| return w.print("{d} + {d}i", .{ @as(f64, @floatCast(components[0])), @as(f64, @floatCast(components[1])) }),
|
||||
.cf32 => |components| try w.print("{d} + {d}i", .{ @round(@as(f64, @floatCast(components[0])) * 1000000) / 1000000, @round(@as(f64, @floatCast(components[1])) * 1000000) / 1000000 }),
|
||||
inline else => |components| try w.print("{d} + {d}i", .{ @as(f64, @floatCast(components[0])), @as(f64, @floatCast(components[1])) }),
|
||||
},
|
||||
.pointer => |ptr| return .{ .pointer = .{ .node = ptr.node, .offset = fromRef(ptr.offset) } },
|
||||
else => unreachable, // not a value
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn printString(bytes: []const u8, ty: Type, comp: *const Compilation, w: *Writer) Writer.Error!void {
|
||||
const size: Compilation.CharUnitSize = @enumFromInt(ty.elemType().sizeof(comp).?);
|
||||
pub fn printString(bytes: []const u8, qt: QualType, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!void {
|
||||
const size: Compilation.CharUnitSize = @enumFromInt(qt.childType(comp).sizeof(comp));
|
||||
const without_null = bytes[0 .. bytes.len - @intFromEnum(size)];
|
||||
try w.writeByte('"');
|
||||
switch (size) {
|
||||
.@"1" => try w.print("{f}", .{std.zig.fmtString(without_null)}),
|
||||
.@"1" => try std.zig.stringEscape(without_null, w),
|
||||
.@"2" => {
|
||||
var items: [2]u16 = undefined;
|
||||
var i: usize = 0;
|
||||
|
|
|
|||
80
lib/compiler/aro/aro/char_info.zig
vendored
80
lib/compiler/aro/aro/char_info.zig
vendored
|
|
@ -442,48 +442,48 @@ pub fn isInvisible(codepoint: u21) bool {
|
|||
}
|
||||
|
||||
/// Checks for identifier characters which resemble non-identifier characters
|
||||
pub fn homoglyph(codepoint: u21) ?u21 {
|
||||
pub fn homoglyph(codepoint: u21) ?[]const u8 {
|
||||
assert(codepoint > 0x7F);
|
||||
return switch (codepoint) {
|
||||
0x01c3 => '!', // LATIN LETTER RETROFLEX CLICK
|
||||
0x037e => ';', // GREEK QUESTION MARK
|
||||
0x2212 => '-', // MINUS SIGN
|
||||
0x2215 => '/', // DIVISION SLASH
|
||||
0x2216 => '\\', // SET MINUS
|
||||
0x2217 => '*', // ASTERISK OPERATOR
|
||||
0x2223 => '|', // DIVIDES
|
||||
0x2227 => '^', // LOGICAL AND
|
||||
0x2236 => ':', // RATIO
|
||||
0x223c => '~', // TILDE OPERATOR
|
||||
0xa789 => ':', // MODIFIER LETTER COLON
|
||||
0xff01 => '!', // FULLWIDTH EXCLAMATION MARK
|
||||
0xff03 => '#', // FULLWIDTH NUMBER SIGN
|
||||
0xff04 => '$', // FULLWIDTH DOLLAR SIGN
|
||||
0xff05 => '%', // FULLWIDTH PERCENT SIGN
|
||||
0xff06 => '&', // FULLWIDTH AMPERSAND
|
||||
0xff08 => '(', // FULLWIDTH LEFT PARENTHESIS
|
||||
0xff09 => ')', // FULLWIDTH RIGHT PARENTHESIS
|
||||
0xff0a => '*', // FULLWIDTH ASTERISK
|
||||
0xff0b => '+', // FULLWIDTH ASTERISK
|
||||
0xff0c => ',', // FULLWIDTH COMMA
|
||||
0xff0d => '-', // FULLWIDTH HYPHEN-MINUS
|
||||
0xff0e => '.', // FULLWIDTH FULL STOP
|
||||
0xff0f => '/', // FULLWIDTH SOLIDUS
|
||||
0xff1a => ':', // FULLWIDTH COLON
|
||||
0xff1b => ';', // FULLWIDTH SEMICOLON
|
||||
0xff1c => '<', // FULLWIDTH LESS-THAN SIGN
|
||||
0xff1d => '=', // FULLWIDTH EQUALS SIGN
|
||||
0xff1e => '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
0xff1f => '?', // FULLWIDTH QUESTION MARK
|
||||
0xff20 => '@', // FULLWIDTH COMMERCIAL AT
|
||||
0xff3b => '[', // FULLWIDTH LEFT SQUARE BRACKET
|
||||
0xff3c => '\\', // FULLWIDTH REVERSE SOLIDUS
|
||||
0xff3d => ']', // FULLWIDTH RIGHT SQUARE BRACKET
|
||||
0xff3e => '^', // FULLWIDTH CIRCUMFLEX ACCENT
|
||||
0xff5b => '{', // FULLWIDTH LEFT CURLY BRACKET
|
||||
0xff5c => '|', // FULLWIDTH VERTICAL LINE
|
||||
0xff5d => '}', // FULLWIDTH RIGHT CURLY BRACKET
|
||||
0xff5e => '~', // FULLWIDTH TILDE
|
||||
0x01c3 => "!", // LATIN LETTER RETROFLEX CLICK
|
||||
0x037e => ";", // GREEK QUESTION MARK
|
||||
0x2212 => "-", // MINUS SIGN
|
||||
0x2215 => "/", // DIVISION SLASH
|
||||
0x2216 => "\\", // SET MINUS
|
||||
0x2217 => "*", // ASTERISK OPERATOR
|
||||
0x2223 => "|", // DIVIDES
|
||||
0x2227 => "^", // LOGICAL AND
|
||||
0x2236 => ":", // RATIO
|
||||
0x223c => "~", // TILDE OPERATOR
|
||||
0xa789 => ":", // MODIFIER LETTER COLON
|
||||
0xff01 => "!", // FULLWIDTH EXCLAMATION MARK
|
||||
0xff03 => "#", // FULLWIDTH NUMBER SIGN
|
||||
0xff04 => "$", // FULLWIDTH DOLLAR SIGN
|
||||
0xff05 => "%", // FULLWIDTH PERCENT SIGN
|
||||
0xff06 => "&", // FULLWIDTH AMPERSAND
|
||||
0xff08 => "(", // FULLWIDTH LEFT PARENTHESIS
|
||||
0xff09 => ")", // FULLWIDTH RIGHT PARENTHESIS
|
||||
0xff0a => "*", // FULLWIDTH ASTERISK
|
||||
0xff0b => "+", // FULLWIDTH ASTERISK
|
||||
0xff0c => ",", // FULLWIDTH COMMA
|
||||
0xff0d => "-", // FULLWIDTH HYPHEN-MINUS
|
||||
0xff0e => ".", // FULLWIDTH FULL STOP
|
||||
0xff0f => "/", // FULLWIDTH SOLIDUS
|
||||
0xff1a => ":", // FULLWIDTH COLON
|
||||
0xff1b => ";", // FULLWIDTH SEMICOLON
|
||||
0xff1c => "<", // FULLWIDTH LESS-THAN SIGN
|
||||
0xff1d => "=", // FULLWIDTH EQUALS SIGN
|
||||
0xff1e => ">", // FULLWIDTH GREATER-THAN SIGN
|
||||
0xff1f => "?", // FULLWIDTH QUESTION MARK
|
||||
0xff20 => "@", // FULLWIDTH COMMERCIAL AT
|
||||
0xff3b => "[", // FULLWIDTH LEFT SQUARE BRACKET
|
||||
0xff3c => "\\", // FULLWIDTH REVERSE SOLIDUS
|
||||
0xff3d => "]", // FULLWIDTH RIGHT SQUARE BRACKET
|
||||
0xff3e => "^", // FULLWIDTH CIRCUMFLEX ACCENT
|
||||
0xff5b => "{", // FULLWIDTH LEFT CURLY BRACKET
|
||||
0xff5c => "|", // FULLWIDTH VERTICAL LINE
|
||||
0xff5d => "}", // FULLWIDTH RIGHT CURLY BRACKET
|
||||
0xff5e => "~", // FULLWIDTH TILDE
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
|
|
|||
4
lib/compiler/aro/aro/features.zig
vendored
4
lib/compiler/aro/aro/features.zig
vendored
|
|
@ -57,13 +57,13 @@ pub fn hasExtension(comp: *Compilation, ext: []const u8) bool {
|
|||
// C11 features
|
||||
.c_alignas = true,
|
||||
.c_alignof = true,
|
||||
.c_atomic = false, // TODO
|
||||
.c_atomic = true,
|
||||
.c_generic_selections = true,
|
||||
.c_static_assert = true,
|
||||
.c_thread_local = target_util.isTlsSupported(comp.target),
|
||||
// misc
|
||||
.overloadable_unmarked = false, // TODO
|
||||
.statement_attributes_with_gnu_syntax = false, // TODO
|
||||
.statement_attributes_with_gnu_syntax = true,
|
||||
.gnu_asm = true,
|
||||
.gnu_asm_goto_with_outputs = true,
|
||||
.matrix_types = false, // TODO
|
||||
|
|
|
|||
82
lib/compiler/aro/aro/pragmas/gcc.zig
vendored
82
lib/compiler/aro/aro/pragmas/gcc.zig
vendored
|
|
@ -1,10 +1,11 @@
|
|||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const Pragma = @import("../Pragma.zig");
|
||||
const Diagnostics = @import("../Diagnostics.zig");
|
||||
const Preprocessor = @import("../Preprocessor.zig");
|
||||
const Parser = @import("../Parser.zig");
|
||||
const Pragma = @import("../Pragma.zig");
|
||||
const Preprocessor = @import("../Preprocessor.zig");
|
||||
const TokenIndex = @import("../Tree.zig").TokenIndex;
|
||||
|
||||
const GCC = @This();
|
||||
|
|
@ -18,8 +19,8 @@ pragma: Pragma = .{
|
|||
.parserHandler = parserHandler,
|
||||
.preserveTokens = preserveTokens,
|
||||
},
|
||||
original_options: Diagnostics.Options = .{},
|
||||
options_stack: std.ArrayListUnmanaged(Diagnostics.Options) = .empty,
|
||||
original_state: Diagnostics.State = .{},
|
||||
state_stack: std.ArrayList(Diagnostics.State) = .empty,
|
||||
|
||||
const Directive = enum {
|
||||
warning,
|
||||
|
|
@ -38,19 +39,19 @@ const Directive = enum {
|
|||
|
||||
fn beforePreprocess(pragma: *Pragma, comp: *Compilation) void {
|
||||
var self: *GCC = @fieldParentPtr("pragma", pragma);
|
||||
self.original_options = comp.diagnostics.options;
|
||||
self.original_state = comp.diagnostics.state;
|
||||
}
|
||||
|
||||
fn beforeParse(pragma: *Pragma, comp: *Compilation) void {
|
||||
var self: *GCC = @fieldParentPtr("pragma", pragma);
|
||||
comp.diagnostics.options = self.original_options;
|
||||
self.options_stack.items.len = 0;
|
||||
comp.diagnostics.state = self.original_state;
|
||||
self.state_stack.items.len = 0;
|
||||
}
|
||||
|
||||
fn afterParse(pragma: *Pragma, comp: *Compilation) void {
|
||||
var self: *GCC = @fieldParentPtr("pragma", pragma);
|
||||
comp.diagnostics.options = self.original_options;
|
||||
self.options_stack.items.len = 0;
|
||||
comp.diagnostics.state = self.original_state;
|
||||
self.state_stack.items.len = 0;
|
||||
}
|
||||
|
||||
pub fn init(allocator: mem.Allocator) !*Pragma {
|
||||
|
|
@ -61,7 +62,7 @@ pub fn init(allocator: mem.Allocator) !*Pragma {
|
|||
|
||||
fn deinit(pragma: *Pragma, comp: *Compilation) void {
|
||||
var self: *GCC = @fieldParentPtr("pragma", pragma);
|
||||
self.options_stack.deinit(comp.gpa);
|
||||
self.state_stack.deinit(comp.gpa);
|
||||
comp.gpa.destroy(self);
|
||||
}
|
||||
|
||||
|
|
@ -76,23 +77,14 @@ fn diagnosticHandler(self: *GCC, pp: *Preprocessor, start_idx: TokenIndex) Pragm
|
|||
.ignored, .warning, .@"error", .fatal => {
|
||||
const str = Pragma.pasteTokens(pp, start_idx + 1) catch |err| switch (err) {
|
||||
error.ExpectedStringLiteral => {
|
||||
return pp.comp.addDiagnostic(.{
|
||||
.tag = .pragma_requires_string_literal,
|
||||
.loc = diagnostic_tok.loc,
|
||||
.extra = .{ .str = "GCC diagnostic" },
|
||||
}, pp.expansionSlice(start_idx));
|
||||
return Pragma.err(pp, start_idx, .pragma_requires_string_literal, .{"GCC diagnostic"});
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
if (!mem.startsWith(u8, str, "-W")) {
|
||||
const next = pp.tokens.get(start_idx + 1);
|
||||
return pp.comp.addDiagnostic(.{
|
||||
.tag = .malformed_warning_check,
|
||||
.loc = next.loc,
|
||||
.extra = .{ .str = "GCC diagnostic" },
|
||||
}, pp.expansionSlice(start_idx + 1));
|
||||
return Pragma.err(pp, start_idx + 1, .malformed_warning_check, .{"GCC diagnostic"});
|
||||
}
|
||||
const new_kind: Diagnostics.Kind = switch (diagnostic) {
|
||||
const new_kind: Diagnostics.Message.Kind = switch (diagnostic) {
|
||||
.ignored => .off,
|
||||
.warning => .warning,
|
||||
.@"error" => .@"error",
|
||||
|
|
@ -100,10 +92,10 @@ fn diagnosticHandler(self: *GCC, pp: *Preprocessor, start_idx: TokenIndex) Pragm
|
|||
else => unreachable,
|
||||
};
|
||||
|
||||
try pp.comp.diagnostics.set(str[2..], new_kind);
|
||||
try pp.diagnostics.set(str[2..], new_kind);
|
||||
},
|
||||
.push => try self.options_stack.append(pp.comp.gpa, pp.comp.diagnostics.options),
|
||||
.pop => pp.comp.diagnostics.options = self.options_stack.pop() orelse self.original_options,
|
||||
.push => try self.state_stack.append(pp.comp.gpa, pp.diagnostics.state),
|
||||
.pop => pp.diagnostics.state = self.state_stack.pop() orelse self.original_state,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -112,38 +104,24 @@ fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex
|
|||
const directive_tok = pp.tokens.get(start_idx + 1);
|
||||
if (directive_tok.id == .nl) return;
|
||||
|
||||
const gcc_pragma = std.meta.stringToEnum(Directive, pp.expandedSlice(directive_tok)) orelse
|
||||
return pp.comp.addDiagnostic(.{
|
||||
.tag = .unknown_gcc_pragma,
|
||||
.loc = directive_tok.loc,
|
||||
}, pp.expansionSlice(start_idx + 1));
|
||||
const gcc_pragma = std.meta.stringToEnum(Directive, pp.expandedSlice(directive_tok)) orelse {
|
||||
return Pragma.err(pp, start_idx + 1, .unknown_gcc_pragma, .{});
|
||||
};
|
||||
|
||||
switch (gcc_pragma) {
|
||||
.warning, .@"error" => {
|
||||
const text = Pragma.pasteTokens(pp, start_idx + 2) catch |err| switch (err) {
|
||||
error.ExpectedStringLiteral => {
|
||||
return pp.comp.addDiagnostic(.{
|
||||
.tag = .pragma_requires_string_literal,
|
||||
.loc = directive_tok.loc,
|
||||
.extra = .{ .str = @tagName(gcc_pragma) },
|
||||
}, pp.expansionSlice(start_idx + 1));
|
||||
return Pragma.err(pp, start_idx + 1, .pragma_requires_string_literal, .{@tagName(gcc_pragma)});
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
const extra = Diagnostics.Message.Extra{ .str = try pp.comp.diagnostics.arena.allocator().dupe(u8, text) };
|
||||
const diagnostic_tag: Diagnostics.Tag = if (gcc_pragma == .warning) .pragma_warning_message else .pragma_error_message;
|
||||
return pp.comp.addDiagnostic(
|
||||
.{ .tag = diagnostic_tag, .loc = directive_tok.loc, .extra = extra },
|
||||
pp.expansionSlice(start_idx + 1),
|
||||
);
|
||||
|
||||
return Pragma.err(pp, start_idx + 1, if (gcc_pragma == .warning) .pragma_warning_message else .pragma_error_message, .{text});
|
||||
},
|
||||
.diagnostic => return self.diagnosticHandler(pp, start_idx + 2) catch |err| switch (err) {
|
||||
error.UnknownPragma => {
|
||||
const tok = pp.tokens.get(start_idx + 2);
|
||||
return pp.comp.addDiagnostic(.{
|
||||
.tag = .unknown_gcc_pragma_directive,
|
||||
.loc = tok.loc,
|
||||
}, pp.expansionSlice(start_idx + 2));
|
||||
return Pragma.err(pp, start_idx + 2, .unknown_gcc_pragma_directive, .{});
|
||||
},
|
||||
else => |e| return e,
|
||||
},
|
||||
|
|
@ -154,19 +132,13 @@ fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex
|
|||
if (tok.id == .nl) break;
|
||||
|
||||
if (!tok.id.isMacroIdentifier()) {
|
||||
return pp.comp.addDiagnostic(.{
|
||||
.tag = .pragma_poison_identifier,
|
||||
.loc = tok.loc,
|
||||
}, pp.expansionSlice(start_idx + i));
|
||||
return Pragma.err(pp, start_idx + i, .pragma_poison_identifier, .{});
|
||||
}
|
||||
const str = pp.expandedSlice(tok);
|
||||
if (pp.defines.get(str) != null) {
|
||||
try pp.comp.addDiagnostic(.{
|
||||
.tag = .pragma_poison_macro,
|
||||
.loc = tok.loc,
|
||||
}, pp.expansionSlice(start_idx + i));
|
||||
try Pragma.err(pp, start_idx + i, .pragma_poison_macro, .{});
|
||||
}
|
||||
try pp.poisoned_identifiers.put(str, {});
|
||||
try pp.poisoned_identifiers.put(pp.comp.gpa, str, {});
|
||||
}
|
||||
return;
|
||||
},
|
||||
|
|
|
|||
35
lib/compiler/aro/aro/pragmas/message.zig
vendored
35
lib/compiler/aro/aro/pragmas/message.zig
vendored
|
|
@ -1,12 +1,13 @@
|
|||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const Pragma = @import("../Pragma.zig");
|
||||
const Diagnostics = @import("../Diagnostics.zig");
|
||||
const Preprocessor = @import("../Preprocessor.zig");
|
||||
const Parser = @import("../Parser.zig");
|
||||
const TokenIndex = @import("../Tree.zig").TokenIndex;
|
||||
const Pragma = @import("../Pragma.zig");
|
||||
const Preprocessor = @import("../Preprocessor.zig");
|
||||
const Source = @import("../Source.zig");
|
||||
const TokenIndex = @import("../Tree.zig").TokenIndex;
|
||||
|
||||
const Message = @This();
|
||||
|
||||
|
|
@ -27,24 +28,32 @@ fn deinit(pragma: *Pragma, comp: *Compilation) void {
|
|||
}
|
||||
|
||||
fn preprocessorHandler(_: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Pragma.Error!void {
|
||||
const message_tok = pp.tokens.get(start_idx);
|
||||
const message_expansion_locs = pp.expansionSlice(start_idx);
|
||||
|
||||
const str = Pragma.pasteTokens(pp, start_idx + 1) catch |err| switch (err) {
|
||||
error.ExpectedStringLiteral => {
|
||||
return pp.comp.addDiagnostic(.{
|
||||
.tag = .pragma_requires_string_literal,
|
||||
.loc = message_tok.loc,
|
||||
.extra = .{ .str = "message" },
|
||||
}, message_expansion_locs);
|
||||
return Pragma.err(pp, start_idx, .pragma_requires_string_literal, .{"message"});
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
const message_tok = pp.tokens.get(start_idx);
|
||||
const message_expansion_locs = pp.expansionSlice(start_idx);
|
||||
const loc = if (message_expansion_locs.len != 0)
|
||||
message_expansion_locs[message_expansion_locs.len - 1]
|
||||
else
|
||||
message_tok.loc;
|
||||
const extra = Diagnostics.Message.Extra{ .str = try pp.comp.diagnostics.arena.allocator().dupe(u8, str) };
|
||||
return pp.comp.addDiagnostic(.{ .tag = .pragma_message, .loc = loc, .extra = extra }, &.{});
|
||||
|
||||
const diagnostic: Pragma.Diagnostic = .pragma_message;
|
||||
|
||||
var sf = std.heap.stackFallback(1024, pp.comp.gpa);
|
||||
var allocating: std.Io.Writer.Allocating = .init(sf.get());
|
||||
defer allocating.deinit();
|
||||
|
||||
Diagnostics.formatArgs(&allocating.writer, diagnostic.fmt, .{str}) catch return error.OutOfMemory;
|
||||
|
||||
try pp.diagnostics.add(.{
|
||||
.text = allocating.written(),
|
||||
.kind = diagnostic.kind,
|
||||
.opt = diagnostic.opt,
|
||||
.location = loc.expand(pp.comp),
|
||||
});
|
||||
}
|
||||
|
|
|
|||
34
lib/compiler/aro/aro/pragmas/once.zig
vendored
34
lib/compiler/aro/aro/pragmas/once.zig
vendored
|
|
@ -1,12 +1,13 @@
|
|||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const Pragma = @import("../Pragma.zig");
|
||||
const Diagnostics = @import("../Diagnostics.zig");
|
||||
const Preprocessor = @import("../Preprocessor.zig");
|
||||
const Parser = @import("../Parser.zig");
|
||||
const TokenIndex = @import("../Tree.zig").TokenIndex;
|
||||
const Pragma = @import("../Pragma.zig");
|
||||
const Preprocessor = @import("../Preprocessor.zig");
|
||||
const Source = @import("../Source.zig");
|
||||
const TokenIndex = @import("../Tree.zig").TokenIndex;
|
||||
|
||||
const Once = @This();
|
||||
|
||||
|
|
@ -14,15 +15,14 @@ pragma: Pragma = .{
|
|||
.afterParse = afterParse,
|
||||
.deinit = deinit,
|
||||
.preprocessorHandler = preprocessorHandler,
|
||||
.preserveTokens = preserveTokens,
|
||||
},
|
||||
pragma_once: std.AutoHashMap(Source.Id, void),
|
||||
pragma_once: std.AutoHashMapUnmanaged(Source.Id, void) = .empty,
|
||||
preprocess_count: u32 = 0,
|
||||
|
||||
pub fn init(allocator: mem.Allocator) !*Pragma {
|
||||
var once = try allocator.create(Once);
|
||||
once.* = .{
|
||||
.pragma_once = std.AutoHashMap(Source.Id, void).init(allocator),
|
||||
};
|
||||
once.* = .{};
|
||||
return &once.pragma;
|
||||
}
|
||||
|
||||
|
|
@ -33,8 +33,9 @@ fn afterParse(pragma: *Pragma, _: *Compilation) void {
|
|||
|
||||
fn deinit(pragma: *Pragma, comp: *Compilation) void {
|
||||
var self: *Once = @fieldParentPtr("pragma", pragma);
|
||||
self.pragma_once.deinit();
|
||||
self.pragma_once.deinit(comp.gpa);
|
||||
comp.gpa.destroy(self);
|
||||
pragma.* = undefined;
|
||||
}
|
||||
|
||||
fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Pragma.Error!void {
|
||||
|
|
@ -42,15 +43,22 @@ fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex
|
|||
const name_tok = pp.tokens.get(start_idx);
|
||||
const next = pp.tokens.get(start_idx + 1);
|
||||
if (next.id != .nl) {
|
||||
try pp.comp.addDiagnostic(.{
|
||||
.tag = .extra_tokens_directive_end,
|
||||
.loc = name_tok.loc,
|
||||
}, pp.expansionSlice(start_idx + 1));
|
||||
const diagnostic: Preprocessor.Diagnostic = .extra_tokens_directive_end;
|
||||
return pp.diagnostics.addWithLocation(pp.comp, .{
|
||||
.text = diagnostic.fmt,
|
||||
.kind = diagnostic.kind,
|
||||
.opt = diagnostic.opt,
|
||||
.location = name_tok.loc.expand(pp.comp),
|
||||
}, pp.expansionSlice(start_idx + 1), true);
|
||||
}
|
||||
const seen = self.preprocess_count == pp.preprocess_count;
|
||||
const prev = try self.pragma_once.fetchPut(name_tok.loc.id, {});
|
||||
const prev = try self.pragma_once.fetchPut(pp.comp.gpa, name_tok.loc.id, {});
|
||||
if (prev != null and !seen) {
|
||||
return error.StopPreprocessing;
|
||||
}
|
||||
self.preprocess_count = pp.preprocess_count;
|
||||
}
|
||||
|
||||
fn preserveTokens(_: *Pragma, _: *Preprocessor, _: TokenIndex) bool {
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
41
lib/compiler/aro/aro/pragmas/pack.zig
vendored
41
lib/compiler/aro/aro/pragmas/pack.zig
vendored
|
|
@ -1,10 +1,11 @@
|
|||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const Pragma = @import("../Pragma.zig");
|
||||
const Diagnostics = @import("../Diagnostics.zig");
|
||||
const Preprocessor = @import("../Preprocessor.zig");
|
||||
const Parser = @import("../Parser.zig");
|
||||
const Pragma = @import("../Pragma.zig");
|
||||
const Preprocessor = @import("../Preprocessor.zig");
|
||||
const Tree = @import("../Tree.zig");
|
||||
const TokenIndex = Tree.TokenIndex;
|
||||
|
||||
|
|
@ -13,9 +14,8 @@ const Pack = @This();
|
|||
pragma: Pragma = .{
|
||||
.deinit = deinit,
|
||||
.parserHandler = parserHandler,
|
||||
.preserveTokens = preserveTokens,
|
||||
},
|
||||
stack: std.ArrayListUnmanaged(struct { label: []const u8, val: u8 }) = .empty,
|
||||
stack: std.ArrayList(struct { label: []const u8, val: u8 }) = .empty,
|
||||
|
||||
pub fn init(allocator: mem.Allocator) !*Pragma {
|
||||
var pack = try allocator.create(Pack);
|
||||
|
|
@ -34,10 +34,7 @@ fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation
|
|||
var idx = start_idx + 1;
|
||||
const l_paren = p.pp.tokens.get(idx);
|
||||
if (l_paren.id != .l_paren) {
|
||||
return p.comp.addDiagnostic(.{
|
||||
.tag = .pragma_pack_lparen,
|
||||
.loc = l_paren.loc,
|
||||
}, p.pp.expansionSlice(idx));
|
||||
return Pragma.err(p.pp, idx, .pragma_pack_lparen, .{});
|
||||
}
|
||||
idx += 1;
|
||||
|
||||
|
|
@ -54,11 +51,11 @@ fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation
|
|||
pop,
|
||||
};
|
||||
const action = std.meta.stringToEnum(Action, p.tokSlice(arg)) orelse {
|
||||
return p.errTok(.pragma_pack_unknown_action, arg);
|
||||
return Pragma.err(p.pp, arg, .pragma_pack_unknown_action, .{});
|
||||
};
|
||||
switch (action) {
|
||||
.show => {
|
||||
try p.errExtra(.pragma_pack_show, arg, .{ .unsigned = p.pragma_pack orelse 8 });
|
||||
return Pragma.err(p.pp, arg, .pragma_pack_show, .{p.pragma_pack orelse 8});
|
||||
},
|
||||
.push, .pop => {
|
||||
var new_val: ?u8 = null;
|
||||
|
|
@ -75,21 +72,23 @@ fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation
|
|||
idx += 1;
|
||||
const int = idx;
|
||||
idx += 1;
|
||||
if (tok_ids[int] != .pp_num) return p.errTok(.pragma_pack_int_ident, int);
|
||||
if (tok_ids[int] != .pp_num) {
|
||||
return Pragma.err(p.pp, int, .pragma_pack_int_ident, .{});
|
||||
}
|
||||
new_val = (try packInt(p, int)) orelse return;
|
||||
}
|
||||
},
|
||||
else => return p.errTok(.pragma_pack_int_ident, next),
|
||||
else => return Pragma.err(p.pp, next, .pragma_pack_int_ident, .{}),
|
||||
}
|
||||
}
|
||||
if (action == .push) {
|
||||
try pack.stack.append(p.gpa, .{ .label = label orelse "", .val = p.pragma_pack orelse 8 });
|
||||
try pack.stack.append(p.comp.gpa, .{ .label = label orelse "", .val = p.pragma_pack orelse 8 });
|
||||
} else {
|
||||
pack.pop(p, label);
|
||||
if (new_val != null) {
|
||||
try p.errTok(.pragma_pack_undefined_pop, arg);
|
||||
try Pragma.err(p.pp, arg, .pragma_pack_undefined_pop, .{});
|
||||
} else if (pack.stack.items.len == 0) {
|
||||
try p.errTok(.pragma_pack_empty_stack, arg);
|
||||
try Pragma.err(p.pp, arg, .pragma_pack_empty_stack, .{});
|
||||
}
|
||||
}
|
||||
if (new_val) |some| {
|
||||
|
|
@ -115,14 +114,14 @@ fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation
|
|||
}
|
||||
|
||||
if (tok_ids[idx] != .r_paren) {
|
||||
return p.errTok(.pragma_pack_rparen, idx);
|
||||
return Pragma.err(p.pp, idx, .pragma_pack_rparen, .{});
|
||||
}
|
||||
}
|
||||
|
||||
fn packInt(p: *Parser, tok_i: TokenIndex) Compilation.Error!?u8 {
|
||||
const res = p.parseNumberToken(tok_i) catch |err| switch (err) {
|
||||
error.ParsingFailed => {
|
||||
try p.errTok(.pragma_pack_int, tok_i);
|
||||
try Pragma.err(p.pp, tok_i, .pragma_pack_int, .{});
|
||||
return null;
|
||||
},
|
||||
else => |e| return e,
|
||||
|
|
@ -131,7 +130,7 @@ fn packInt(p: *Parser, tok_i: TokenIndex) Compilation.Error!?u8 {
|
|||
switch (int) {
|
||||
1, 2, 4, 8, 16 => return @intCast(int),
|
||||
else => {
|
||||
try p.errTok(.pragma_pack_int, tok_i);
|
||||
try Pragma.err(p.pp, tok_i, .pragma_pack_int, .{});
|
||||
return null;
|
||||
},
|
||||
}
|
||||
|
|
@ -156,9 +155,3 @@ fn pop(pack: *Pack, p: *Parser, maybe_label: ?[]const u8) void {
|
|||
p.pragma_pack = prev.val;
|
||||
}
|
||||
}
|
||||
|
||||
fn preserveTokens(_: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) bool {
|
||||
_ = pp;
|
||||
_ = start_idx;
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
162
lib/compiler/aro/aro/record_layout.zig
vendored
162
lib/compiler/aro/aro/record_layout.zig
vendored
|
|
@ -2,15 +2,18 @@
|
|||
//! Licensed under MIT license: https://github.com/mahkoh/repr-c/tree/master/repc/facade
|
||||
|
||||
const std = @import("std");
|
||||
const Type = @import("Type.zig");
|
||||
|
||||
const Attribute = @import("Attribute.zig");
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Parser = @import("Parser.zig");
|
||||
const target_util = @import("target.zig");
|
||||
const TypeStore = @import("TypeStore.zig");
|
||||
const QualType = TypeStore.QualType;
|
||||
const Type = TypeStore.Type;
|
||||
const Record = Type.Record;
|
||||
const Field = Record.Field;
|
||||
const TypeLayout = Type.TypeLayout;
|
||||
const FieldLayout = Type.FieldLayout;
|
||||
const target_util = @import("target.zig");
|
||||
const RecordLayout = Type.Record.Layout;
|
||||
const FieldLayout = Type.Record.Field.Layout;
|
||||
|
||||
const BITS_PER_BYTE = 8;
|
||||
|
||||
|
|
@ -42,36 +45,33 @@ const SysVContext = struct {
|
|||
|
||||
comp: *const Compilation,
|
||||
|
||||
fn init(ty: Type, comp: *const Compilation, pragma_pack: ?u8) SysVContext {
|
||||
fn init(qt: QualType, comp: *const Compilation, pragma_pack: ?u8) SysVContext {
|
||||
const pack_value: ?u64 = if (pragma_pack) |pak| @as(u64, pak) * BITS_PER_BYTE else null;
|
||||
const req_align = @as(u32, (ty.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE;
|
||||
const req_align = @as(u32, (qt.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE;
|
||||
return SysVContext{
|
||||
.attr_packed = ty.hasAttribute(.@"packed"),
|
||||
.attr_packed = qt.hasAttribute(comp, .@"packed"),
|
||||
.max_field_align_bits = pack_value,
|
||||
.aligned_bits = req_align,
|
||||
.is_union = ty.is(.@"union"),
|
||||
.is_union = qt.is(comp, .@"union"),
|
||||
.size_bits = 0,
|
||||
.comp = comp,
|
||||
.ongoing_bitfield = null,
|
||||
};
|
||||
}
|
||||
|
||||
fn layoutFields(self: *SysVContext, rec: *const Record) !void {
|
||||
for (rec.fields, 0..) |*fld, fld_indx| {
|
||||
if (fld.ty.specifier == .invalid) continue;
|
||||
const type_layout = computeLayout(fld.ty, self.comp);
|
||||
fn layoutFields(self: *SysVContext, fields: []Type.Record.Field) !void {
|
||||
for (fields) |*field| {
|
||||
if (field.qt.isInvalid()) continue;
|
||||
const type_layout = computeLayout(field.qt, self.comp);
|
||||
|
||||
var field_attrs: ?[]const Attribute = null;
|
||||
if (rec.field_attributes) |attrs| {
|
||||
field_attrs = attrs[fld_indx];
|
||||
}
|
||||
const attributes = field.attributes(self.comp);
|
||||
if (self.comp.target.isMinGW()) {
|
||||
fld.layout = try self.layoutMinGWField(fld, field_attrs, type_layout);
|
||||
field.layout = try self.layoutMinGWField(field, attributes, type_layout);
|
||||
} else {
|
||||
if (fld.isRegularField()) {
|
||||
fld.layout = try self.layoutRegularField(field_attrs, type_layout);
|
||||
if (field.bit_width.unpack()) |bit_width| {
|
||||
field.layout = try self.layoutBitField(attributes, type_layout, field.name_tok != 0, bit_width);
|
||||
} else {
|
||||
fld.layout = try self.layoutBitField(field_attrs, type_layout, fld.isNamed(), fld.specifiedBitWidth());
|
||||
field.layout = try self.layoutRegularField(attributes, type_layout);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -83,7 +83,7 @@ const SysVContext = struct {
|
|||
/// - the field is a bit-field and the previous field was a non-zero-sized bit-field with the same type size
|
||||
/// - the field is a zero-sized bit-field and the previous field was not a non-zero-sized bit-field
|
||||
/// See test case 0068.
|
||||
fn ignoreTypeAlignment(is_attr_packed: bool, bit_width: ?u32, ongoing_bitfield: ?OngoingBitfield, fld_layout: TypeLayout) bool {
|
||||
fn ignoreTypeAlignment(is_attr_packed: bool, bit_width: ?u32, ongoing_bitfield: ?OngoingBitfield, fld_layout: RecordLayout) bool {
|
||||
if (is_attr_packed) return true;
|
||||
if (bit_width) |width| {
|
||||
if (ongoing_bitfield) |ongoing| {
|
||||
|
|
@ -98,12 +98,12 @@ const SysVContext = struct {
|
|||
fn layoutMinGWField(
|
||||
self: *SysVContext,
|
||||
field: *const Field,
|
||||
field_attrs: ?[]const Attribute,
|
||||
field_layout: TypeLayout,
|
||||
field_attrs: []const Attribute,
|
||||
field_layout: RecordLayout,
|
||||
) !FieldLayout {
|
||||
const annotation_alignment_bits = BITS_PER_BYTE * @as(u32, (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(field_attrs)) orelse 1));
|
||||
const annotation_alignment_bits = BITS_PER_BYTE * (QualType.annotationAlignment(self.comp, Attribute.Iterator.initSlice(field_attrs)) orelse 1);
|
||||
const is_attr_packed = self.attr_packed or isPacked(field_attrs);
|
||||
const ignore_type_alignment = ignoreTypeAlignment(is_attr_packed, field.bit_width, self.ongoing_bitfield, field_layout);
|
||||
const ignore_type_alignment = ignoreTypeAlignment(is_attr_packed, field.bit_width.unpack(), self.ongoing_bitfield, field_layout);
|
||||
|
||||
var field_alignment_bits: u64 = field_layout.field_alignment_bits;
|
||||
if (ignore_type_alignment) {
|
||||
|
|
@ -120,16 +120,16 @@ const SysVContext = struct {
|
|||
// - the field is a non-zero-width bit-field and not packed.
|
||||
// See test case 0069.
|
||||
const update_record_alignment =
|
||||
field.isRegularField() or
|
||||
(field.specifiedBitWidth() == 0 and self.ongoing_bitfield != null) or
|
||||
(field.specifiedBitWidth() != 0 and !is_attr_packed);
|
||||
field.bit_width == .null or
|
||||
(field.bit_width.unpack().? == 0 and self.ongoing_bitfield != null) or
|
||||
(field.bit_width.unpack().? != 0 and !is_attr_packed);
|
||||
|
||||
// If a field affects the alignment of a record, the alignment is calculated in the
|
||||
// usual way except that __attribute__((packed)) is ignored on a zero-width bit-field.
|
||||
// See test case 0068.
|
||||
if (update_record_alignment) {
|
||||
var ty_alignment_bits = field_layout.field_alignment_bits;
|
||||
if (is_attr_packed and (field.isRegularField() or field.specifiedBitWidth() != 0)) {
|
||||
if (is_attr_packed and (field.bit_width == .null or field.bit_width.unpack().? != 0)) {
|
||||
ty_alignment_bits = BITS_PER_BYTE;
|
||||
}
|
||||
ty_alignment_bits = @max(ty_alignment_bits, annotation_alignment_bits);
|
||||
|
|
@ -145,10 +145,10 @@ const SysVContext = struct {
|
|||
// @attr_packed _ { size: 64, alignment: 64 }long long:0,
|
||||
// { offset: 8, size: 8 }d { size: 8, alignment: 8 }char,
|
||||
// }
|
||||
if (field.isRegularField()) {
|
||||
return self.layoutRegularFieldMinGW(field_layout.size_bits, field_alignment_bits);
|
||||
if (field.bit_width.unpack()) |bit_width| {
|
||||
return self.layoutBitFieldMinGW(field_layout.size_bits, field_alignment_bits, field.name_tok != 0, bit_width);
|
||||
} else {
|
||||
return self.layoutBitFieldMinGW(field_layout.size_bits, field_alignment_bits, field.isNamed(), field.specifiedBitWidth());
|
||||
return self.layoutRegularFieldMinGW(field_layout.size_bits, field_alignment_bits);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -227,8 +227,8 @@ const SysVContext = struct {
|
|||
|
||||
fn layoutRegularField(
|
||||
self: *SysVContext,
|
||||
fld_attrs: ?[]const Attribute,
|
||||
fld_layout: TypeLayout,
|
||||
fld_attrs: []const Attribute,
|
||||
fld_layout: RecordLayout,
|
||||
) !FieldLayout {
|
||||
var fld_align_bits = fld_layout.field_alignment_bits;
|
||||
|
||||
|
|
@ -240,7 +240,7 @@ const SysVContext = struct {
|
|||
|
||||
// The field alignment can be increased by __attribute__((aligned)) annotations on the
|
||||
// field. See test case 0085.
|
||||
if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| {
|
||||
if (QualType.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| {
|
||||
fld_align_bits = @max(fld_align_bits, @as(u32, anno) * BITS_PER_BYTE);
|
||||
}
|
||||
|
||||
|
|
@ -268,8 +268,8 @@ const SysVContext = struct {
|
|||
|
||||
fn layoutBitField(
|
||||
self: *SysVContext,
|
||||
fld_attrs: ?[]const Attribute,
|
||||
fld_layout: TypeLayout,
|
||||
fld_attrs: []const Attribute,
|
||||
fld_layout: RecordLayout,
|
||||
is_named: bool,
|
||||
bit_width: u64,
|
||||
) !FieldLayout {
|
||||
|
|
@ -302,7 +302,7 @@ const SysVContext = struct {
|
|||
const attr_packed = self.attr_packed or isPacked(fld_attrs);
|
||||
const has_packing_annotation = attr_packed or self.max_field_align_bits != null;
|
||||
|
||||
const annotation_alignment = if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| @as(u32, anno) * BITS_PER_BYTE else 1;
|
||||
const annotation_alignment = if (QualType.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| @as(u32, anno) * BITS_PER_BYTE else 1;
|
||||
|
||||
const first_unused_bit: u64 = if (self.is_union) 0 else self.size_bits;
|
||||
var field_align_bits: u64 = 1;
|
||||
|
|
@ -403,9 +403,9 @@ const MsvcContext = struct {
|
|||
is_union: bool,
|
||||
comp: *const Compilation,
|
||||
|
||||
fn init(ty: Type, comp: *const Compilation, pragma_pack: ?u8) MsvcContext {
|
||||
fn init(qt: QualType, comp: *const Compilation, pragma_pack: ?u8) MsvcContext {
|
||||
var pack_value: ?u32 = null;
|
||||
if (ty.hasAttribute(.@"packed")) {
|
||||
if (qt.hasAttribute(comp, .@"packed")) {
|
||||
// __attribute__((packed)) behaves like #pragma pack(1) in clang. See test case 0056.
|
||||
pack_value = BITS_PER_BYTE;
|
||||
}
|
||||
|
|
@ -420,8 +420,8 @@ const MsvcContext = struct {
|
|||
|
||||
// The required alignment can be increased by adding a __declspec(align)
|
||||
// annotation. See test case 0023.
|
||||
const must_align = @as(u32, (ty.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE;
|
||||
return MsvcContext{
|
||||
const must_align = @as(u32, (qt.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE;
|
||||
return .{
|
||||
.req_align_bits = must_align,
|
||||
.pointer_align_bits = must_align,
|
||||
.field_align_bits = must_align,
|
||||
|
|
@ -429,26 +429,26 @@ const MsvcContext = struct {
|
|||
.max_field_align_bits = pack_value,
|
||||
.ongoing_bitfield = null,
|
||||
.contains_non_bitfield = false,
|
||||
.is_union = ty.is(.@"union"),
|
||||
.is_union = qt.is(comp, .@"union"),
|
||||
.comp = comp,
|
||||
};
|
||||
}
|
||||
|
||||
fn layoutField(self: *MsvcContext, fld: *const Field, fld_attrs: ?[]const Attribute) !FieldLayout {
|
||||
const type_layout = computeLayout(fld.ty, self.comp);
|
||||
fn layoutField(self: *MsvcContext, fld: *const Field, fld_attrs: []const Attribute) !FieldLayout {
|
||||
const type_layout = computeLayout(fld.qt, self.comp);
|
||||
|
||||
// The required alignment of the field is the maximum of the required alignment of the
|
||||
// underlying type and the __declspec(align) annotation on the field itself.
|
||||
// See test case 0028.
|
||||
var req_align = type_layout.required_alignment_bits;
|
||||
if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| {
|
||||
if (QualType.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| {
|
||||
req_align = @max(@as(u32, anno) * BITS_PER_BYTE, req_align);
|
||||
}
|
||||
|
||||
// The required alignment of a record is the maximum of the required alignments of its
|
||||
// fields except that the required alignment of bitfields is ignored.
|
||||
// See test case 0029.
|
||||
if (fld.isRegularField()) {
|
||||
if (fld.bit_width == .null) {
|
||||
self.req_align_bits = @max(self.req_align_bits, req_align);
|
||||
}
|
||||
|
||||
|
|
@ -459,7 +459,7 @@ const MsvcContext = struct {
|
|||
fld_align_bits = @min(fld_align_bits, max_align);
|
||||
}
|
||||
// check the requested alignment of the field type.
|
||||
if (fld.ty.requestedAlignment(self.comp)) |type_req_align| {
|
||||
if (fld.qt.requestedAlignment(self.comp)) |type_req_align| {
|
||||
fld_align_bits = @max(fld_align_bits, type_req_align * 8);
|
||||
}
|
||||
|
||||
|
|
@ -471,10 +471,10 @@ const MsvcContext = struct {
|
|||
// __attribute__((packed)) on a field is a clang extension. It behaves as if #pragma
|
||||
// pack(1) had been applied only to this field. See test case 0057.
|
||||
fld_align_bits = @max(fld_align_bits, req_align);
|
||||
if (fld.isRegularField()) {
|
||||
return self.layoutRegularField(type_layout.size_bits, fld_align_bits);
|
||||
if (fld.bit_width.unpack()) |bit_width| {
|
||||
return self.layoutBitField(type_layout.size_bits, fld_align_bits, bit_width);
|
||||
} else {
|
||||
return self.layoutBitField(type_layout.size_bits, fld_align_bits, fld.specifiedBitWidth());
|
||||
return self.layoutRegularField(type_layout.size_bits, fld_align_bits);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -567,16 +567,16 @@ const MsvcContext = struct {
|
|||
}
|
||||
};
|
||||
|
||||
pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pack: ?u8) Error!void {
|
||||
pub fn compute(fields: []Type.Record.Field, qt: QualType, comp: *const Compilation, pragma_pack: ?u8) Error!Type.Record.Layout {
|
||||
switch (comp.langopts.emulate) {
|
||||
.gcc, .clang => {
|
||||
var context = SysVContext.init(ty, comp, pragma_pack);
|
||||
var context = SysVContext.init(qt, comp, pragma_pack);
|
||||
|
||||
try context.layoutFields(rec);
|
||||
try context.layoutFields(fields);
|
||||
|
||||
context.size_bits = try alignForward(context.size_bits, context.aligned_bits);
|
||||
|
||||
rec.type_layout = .{
|
||||
return .{
|
||||
.size_bits = context.size_bits,
|
||||
.field_alignment_bits = context.aligned_bits,
|
||||
.pointer_alignment_bits = context.aligned_bits,
|
||||
|
|
@ -584,15 +584,10 @@ pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pac
|
|||
};
|
||||
},
|
||||
.msvc => {
|
||||
var context = MsvcContext.init(ty, comp, pragma_pack);
|
||||
for (rec.fields, 0..) |*fld, fld_indx| {
|
||||
if (fld.ty.specifier == .invalid) continue;
|
||||
var field_attrs: ?[]const Attribute = null;
|
||||
if (rec.field_attributes) |attrs| {
|
||||
field_attrs = attrs[fld_indx];
|
||||
}
|
||||
|
||||
fld.layout = try context.layoutField(fld, field_attrs);
|
||||
var context = MsvcContext.init(qt, comp, pragma_pack);
|
||||
for (fields) |*field| {
|
||||
if (field.qt.isInvalid()) continue;
|
||||
field.layout = try context.layoutField(field, field.attributes(comp));
|
||||
}
|
||||
if (context.size_bits == 0) {
|
||||
// As an extension, MSVC allows records that only contain zero-sized bitfields and empty
|
||||
|
|
@ -601,7 +596,7 @@ pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pac
|
|||
context.handleZeroSizedRecord();
|
||||
}
|
||||
context.size_bits = try alignForward(context.size_bits, context.pointer_align_bits);
|
||||
rec.type_layout = .{
|
||||
return .{
|
||||
.size_bits = context.size_bits,
|
||||
.field_alignment_bits = context.field_align_bits,
|
||||
.pointer_alignment_bits = context.pointer_align_bits,
|
||||
|
|
@ -611,23 +606,26 @@ pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pac
|
|||
}
|
||||
}
|
||||
|
||||
fn computeLayout(ty: Type, comp: *const Compilation) TypeLayout {
|
||||
if (ty.getRecord()) |rec| {
|
||||
const requested = BITS_PER_BYTE * (ty.requestedAlignment(comp) orelse 0);
|
||||
return .{
|
||||
.size_bits = rec.type_layout.size_bits,
|
||||
.pointer_alignment_bits = @max(requested, rec.type_layout.pointer_alignment_bits),
|
||||
.field_alignment_bits = @max(requested, rec.type_layout.field_alignment_bits),
|
||||
.required_alignment_bits = rec.type_layout.required_alignment_bits,
|
||||
};
|
||||
} else {
|
||||
const type_align = ty.alignof(comp) * BITS_PER_BYTE;
|
||||
return .{
|
||||
.size_bits = ty.bitSizeof(comp) orelse 0,
|
||||
.pointer_alignment_bits = type_align,
|
||||
.field_alignment_bits = type_align,
|
||||
.required_alignment_bits = BITS_PER_BYTE,
|
||||
};
|
||||
fn computeLayout(qt: QualType, comp: *const Compilation) RecordLayout {
|
||||
switch (qt.base(comp).type) {
|
||||
.@"struct", .@"union" => |record| {
|
||||
const requested = BITS_PER_BYTE * (qt.requestedAlignment(comp) orelse 0);
|
||||
return .{
|
||||
.size_bits = record.layout.?.size_bits,
|
||||
.pointer_alignment_bits = @max(requested, record.layout.?.pointer_alignment_bits),
|
||||
.field_alignment_bits = @max(requested, record.layout.?.field_alignment_bits),
|
||||
.required_alignment_bits = record.layout.?.required_alignment_bits,
|
||||
};
|
||||
},
|
||||
else => {
|
||||
const type_align = qt.alignof(comp) * BITS_PER_BYTE;
|
||||
return .{
|
||||
.size_bits = qt.bitSizeofOrNull(comp) orelse 0,
|
||||
.pointer_alignment_bits = type_align,
|
||||
.field_alignment_bits = type_align,
|
||||
.required_alignment_bits = BITS_PER_BYTE,
|
||||
};
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
391
lib/compiler/aro/aro/target.zig
vendored
391
lib/compiler/aro/aro/target.zig
vendored
|
|
@ -1,15 +1,18 @@
|
|||
const std = @import("std");
|
||||
|
||||
const backend = @import("../backend.zig");
|
||||
|
||||
const LangOpts = @import("LangOpts.zig");
|
||||
const Type = @import("Type.zig");
|
||||
const TargetSet = @import("Builtins/Properties.zig").TargetSet;
|
||||
const QualType = @import("TypeStore.zig").QualType;
|
||||
|
||||
/// intmax_t for this target
|
||||
pub fn intMaxType(target: std.Target) Type {
|
||||
pub fn intMaxType(target: std.Target) QualType {
|
||||
switch (target.cpu.arch) {
|
||||
.aarch64,
|
||||
.aarch64_be,
|
||||
.sparc64,
|
||||
=> if (target.os.tag != .openbsd) return .{ .specifier = .long },
|
||||
=> if (target.os.tag != .openbsd) return .long,
|
||||
|
||||
.bpfel,
|
||||
.bpfeb,
|
||||
|
|
@ -19,28 +22,28 @@ pub fn intMaxType(target: std.Target) Type {
|
|||
.powerpc64,
|
||||
.powerpc64le,
|
||||
.ve,
|
||||
=> return .{ .specifier = .long },
|
||||
=> return .long,
|
||||
|
||||
.x86_64 => switch (target.os.tag) {
|
||||
.windows, .openbsd => {},
|
||||
else => switch (target.abi) {
|
||||
.gnux32, .muslx32 => {},
|
||||
else => return .{ .specifier = .long },
|
||||
else => return .long,
|
||||
},
|
||||
},
|
||||
|
||||
else => {},
|
||||
}
|
||||
return .{ .specifier = .long_long };
|
||||
return .long_long;
|
||||
}
|
||||
|
||||
/// intptr_t for this target
|
||||
pub fn intPtrType(target: std.Target) Type {
|
||||
if (target.os.tag == .haiku) return .{ .specifier = .long };
|
||||
pub fn intPtrType(target: std.Target) QualType {
|
||||
if (target.os.tag == .haiku) return .long;
|
||||
|
||||
switch (target.cpu.arch) {
|
||||
.aarch64, .aarch64_be => switch (target.os.tag) {
|
||||
.windows => return .{ .specifier = .long_long },
|
||||
.windows => return .long_long,
|
||||
else => {},
|
||||
},
|
||||
|
||||
|
|
@ -55,28 +58,28 @@ pub fn intPtrType(target: std.Target) Type {
|
|||
.spirv32,
|
||||
.arc,
|
||||
.avr,
|
||||
=> return .{ .specifier = .int },
|
||||
=> return .int,
|
||||
|
||||
.sparc => switch (target.os.tag) {
|
||||
.netbsd, .openbsd => {},
|
||||
else => return .{ .specifier = .int },
|
||||
else => return .int,
|
||||
},
|
||||
|
||||
.powerpc, .powerpcle => switch (target.os.tag) {
|
||||
.linux, .freebsd, .netbsd => return .{ .specifier = .int },
|
||||
.linux, .freebsd, .netbsd => return .int,
|
||||
else => {},
|
||||
},
|
||||
|
||||
// 32-bit x86 Darwin, OpenBSD, and RTEMS use long (the default); others use int
|
||||
.x86 => switch (target.os.tag) {
|
||||
.openbsd, .rtems => {},
|
||||
else => if (!target.os.tag.isDarwin()) return .{ .specifier = .int },
|
||||
else => if (!target.os.tag.isDarwin()) return .int,
|
||||
},
|
||||
|
||||
.x86_64 => switch (target.os.tag) {
|
||||
.windows => return .{ .specifier = .long_long },
|
||||
.windows => return .long_long,
|
||||
else => switch (target.abi) {
|
||||
.gnux32, .muslx32 => return .{ .specifier = .int },
|
||||
.gnux32, .muslx32 => return .int,
|
||||
else => {},
|
||||
},
|
||||
},
|
||||
|
|
@ -84,29 +87,29 @@ pub fn intPtrType(target: std.Target) Type {
|
|||
else => {},
|
||||
}
|
||||
|
||||
return .{ .specifier = .long };
|
||||
return .long;
|
||||
}
|
||||
|
||||
/// int16_t for this target
|
||||
pub fn int16Type(target: std.Target) Type {
|
||||
pub fn int16Type(target: std.Target) QualType {
|
||||
return switch (target.cpu.arch) {
|
||||
.avr => .{ .specifier = .int },
|
||||
else => .{ .specifier = .short },
|
||||
.avr => .int,
|
||||
else => .short,
|
||||
};
|
||||
}
|
||||
|
||||
/// sig_atomic_t for this target
|
||||
pub fn sigAtomicType(target: std.Target) Type {
|
||||
if (target.cpu.arch.isWasm()) return .{ .specifier = .long };
|
||||
pub fn sigAtomicType(target: std.Target) QualType {
|
||||
if (target.cpu.arch.isWasm()) return .long;
|
||||
return switch (target.cpu.arch) {
|
||||
.avr => .{ .specifier = .schar },
|
||||
.msp430 => .{ .specifier = .long },
|
||||
else => .{ .specifier = .int },
|
||||
.avr => .schar,
|
||||
.msp430 => .long,
|
||||
else => .int,
|
||||
};
|
||||
}
|
||||
|
||||
/// int64_t for this target
|
||||
pub fn int64Type(target: std.Target) Type {
|
||||
pub fn int64Type(target: std.Target) QualType {
|
||||
switch (target.cpu.arch) {
|
||||
.loongarch64,
|
||||
.ve,
|
||||
|
|
@ -116,20 +119,20 @@ pub fn int64Type(target: std.Target) Type {
|
|||
.powerpc64le,
|
||||
.bpfel,
|
||||
.bpfeb,
|
||||
=> return .{ .specifier = .long },
|
||||
=> return .long,
|
||||
|
||||
.sparc64 => return intMaxType(target),
|
||||
|
||||
.x86, .x86_64 => if (!target.os.tag.isDarwin()) return intMaxType(target),
|
||||
.aarch64, .aarch64_be => if (!target.os.tag.isDarwin() and target.os.tag != .openbsd and target.os.tag != .windows) return .{ .specifier = .long },
|
||||
.aarch64, .aarch64_be => if (!target.os.tag.isDarwin() and target.os.tag != .openbsd and target.os.tag != .windows) return .long,
|
||||
else => {},
|
||||
}
|
||||
return .{ .specifier = .long_long };
|
||||
return .long_long;
|
||||
}
|
||||
|
||||
pub fn float80Type(target: std.Target) ?Type {
|
||||
pub fn float80Type(target: std.Target) ?QualType {
|
||||
switch (target.cpu.arch) {
|
||||
.x86, .x86_64 => return .{ .specifier = .long_double },
|
||||
.x86, .x86_64 => return .long_double,
|
||||
else => {},
|
||||
}
|
||||
return null;
|
||||
|
|
@ -165,7 +168,7 @@ pub fn ignoreNonZeroSizedBitfieldTypeAlignment(target: std.Target) bool {
|
|||
switch (target.cpu.arch) {
|
||||
.avr => return true,
|
||||
.arm => {
|
||||
if (target.cpu.has(.arm, .has_v7)) {
|
||||
if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
|
||||
switch (target.os.tag) {
|
||||
.ios => return true,
|
||||
else => return false,
|
||||
|
|
@ -188,7 +191,7 @@ pub fn minZeroWidthBitfieldAlignment(target: std.Target) ?u29 {
|
|||
switch (target.cpu.arch) {
|
||||
.avr => return 8,
|
||||
.arm => {
|
||||
if (target.cpu.has(.arm, .has_v7)) {
|
||||
if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
|
||||
switch (target.os.tag) {
|
||||
.ios => return 32,
|
||||
else => return null,
|
||||
|
|
@ -206,7 +209,7 @@ pub fn unnamedFieldAffectsAlignment(target: std.Target) bool {
|
|||
return true;
|
||||
},
|
||||
.armeb => {
|
||||
if (target.cpu.has(.arm, .has_v7)) {
|
||||
if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
|
||||
if (std.Target.Abi.default(target.cpu.arch, target.os.tag) == .eabi) return true;
|
||||
}
|
||||
},
|
||||
|
|
@ -233,7 +236,7 @@ pub fn defaultAlignment(target: std.Target) u29 {
|
|||
switch (target.cpu.arch) {
|
||||
.avr => return 1,
|
||||
.arm => if (target.abi.isAndroid() or target.os.tag == .ios) return 16 else return 8,
|
||||
.sparc => if (target.cpu.has(.sparc, .v9)) return 16 else return 8,
|
||||
.sparc => if (std.Target.sparc.featureSetHas(target.cpu.features, .v9)) return 16 else return 8,
|
||||
.mips, .mipsel => switch (target.abi) {
|
||||
.none, .gnuabi64 => return 16,
|
||||
else => return 8,
|
||||
|
|
@ -245,7 +248,8 @@ pub fn defaultAlignment(target: std.Target) u29 {
|
|||
pub fn systemCompiler(target: std.Target) LangOpts.Compiler {
|
||||
// Android is linux but not gcc, so these checks go first
|
||||
// the rest for documentation as fn returns .clang
|
||||
if (target.abi.isAndroid() or
|
||||
if (target.os.tag.isDarwin() or
|
||||
target.abi.isAndroid() or
|
||||
target.os.tag.isBSD() or
|
||||
target.os.tag == .fuchsia or
|
||||
target.os.tag == .solaris or
|
||||
|
|
@ -271,7 +275,7 @@ pub fn systemCompiler(target: std.Target) LangOpts.Compiler {
|
|||
pub fn hasFloat128(target: std.Target) bool {
|
||||
if (target.cpu.arch.isWasm()) return true;
|
||||
if (target.os.tag.isDarwin()) return false;
|
||||
if (target.cpu.arch.isPowerPC()) return target.cpu.has(.powerpc, .float128);
|
||||
if (target.cpu.arch.isPowerPC()) return std.Target.powerpc.featureSetHas(target.cpu.features, .float128);
|
||||
return switch (target.os.tag) {
|
||||
.dragonfly,
|
||||
.haiku,
|
||||
|
|
@ -339,7 +343,7 @@ pub const FPSemantics = enum {
|
|||
.spirv32,
|
||||
.spirv64,
|
||||
=> return .IEEEHalf,
|
||||
.x86, .x86_64 => if (target.cpu.has(.x86, .sse2)) return .IEEEHalf,
|
||||
.x86, .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .sse2)) return .IEEEHalf,
|
||||
else => {},
|
||||
}
|
||||
return null;
|
||||
|
|
@ -374,6 +378,10 @@ pub fn isCygwinMinGW(target: std.Target) bool {
|
|||
return target.os.tag == .windows and (target.abi == .gnu or target.abi == .cygnus);
|
||||
}
|
||||
|
||||
pub fn isPS(target: std.Target) bool {
|
||||
return (target.os.tag == .ps4 or target.os.tag == .ps5) and target.cpu.arch == .x86_64;
|
||||
}
|
||||
|
||||
pub fn builtinEnabled(target: std.Target, enabled_for: TargetSet) bool {
|
||||
var it = enabled_for.iterator();
|
||||
while (it.next()) |val| {
|
||||
|
|
@ -404,7 +412,7 @@ pub fn defaultFpEvalMethod(target: std.Target) LangOpts.FPEvalMethod {
|
|||
return .double;
|
||||
}
|
||||
}
|
||||
if (target.cpu.has(.x86, .sse)) {
|
||||
if (std.Target.x86.featureSetHas(target.cpu.features, .sse)) {
|
||||
return .source;
|
||||
}
|
||||
return .extended;
|
||||
|
|
@ -497,6 +505,8 @@ pub fn get32BitArchVariant(target: std.Target) ?std.Target {
|
|||
.spirv32,
|
||||
.loongarch32,
|
||||
.xtensa,
|
||||
.propeller,
|
||||
.or1k,
|
||||
=> {}, // Already 32 bit
|
||||
|
||||
.aarch64 => copy.cpu.arch = .arm,
|
||||
|
|
@ -530,6 +540,8 @@ pub fn get64BitArchVariant(target: std.Target) ?std.Target {
|
|||
.msp430,
|
||||
.xcore,
|
||||
.xtensa,
|
||||
.propeller,
|
||||
.or1k,
|
||||
=> return null,
|
||||
|
||||
.aarch64,
|
||||
|
|
@ -621,11 +633,14 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
|
|||
.nvptx64 => "nvptx64",
|
||||
.spirv32 => "spirv32",
|
||||
.spirv64 => "spirv64",
|
||||
.kalimba => "kalimba",
|
||||
.lanai => "lanai",
|
||||
.wasm32 => "wasm32",
|
||||
.wasm64 => "wasm64",
|
||||
.ve => "ve",
|
||||
// Note: propeller1, kalimba and or1k are not supported in LLVM; this is the Zig arch name
|
||||
.kalimba => "kalimba",
|
||||
.propeller => "propeller",
|
||||
.or1k => "or1k",
|
||||
};
|
||||
writer.writeAll(llvm_arch) catch unreachable;
|
||||
writer.writeByte('-') catch unreachable;
|
||||
|
|
@ -666,10 +681,12 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
|
|||
.driverkit => "driverkit",
|
||||
.visionos => "xros",
|
||||
.serenity => "serenity",
|
||||
.vulkan => "vulkan",
|
||||
.managarm => "managarm",
|
||||
.@"3ds",
|
||||
.vita,
|
||||
.opencl,
|
||||
.opengl,
|
||||
.vulkan,
|
||||
.plan9,
|
||||
.other,
|
||||
=> "unknown",
|
||||
|
|
@ -721,64 +738,262 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
|
|||
return writer.buffered();
|
||||
}
|
||||
|
||||
pub const DefaultPIStatus = enum { yes, no, depends_on_linker };
|
||||
|
||||
pub fn isPIEDefault(target: std.Target) DefaultPIStatus {
|
||||
return switch (target.os.tag) {
|
||||
.aix,
|
||||
.haiku,
|
||||
|
||||
.macos,
|
||||
.ios,
|
||||
.tvos,
|
||||
.watchos,
|
||||
.visionos,
|
||||
.driverkit,
|
||||
|
||||
.dragonfly,
|
||||
.netbsd,
|
||||
.freebsd,
|
||||
.solaris,
|
||||
|
||||
.cuda,
|
||||
.amdhsa,
|
||||
.amdpal,
|
||||
.mesa3d,
|
||||
|
||||
.ps4,
|
||||
.ps5,
|
||||
|
||||
.hurd,
|
||||
.zos,
|
||||
=> .no,
|
||||
|
||||
.openbsd,
|
||||
.fuchsia,
|
||||
=> .yes,
|
||||
|
||||
.linux => {
|
||||
if (target.abi == .ohos)
|
||||
return .yes;
|
||||
|
||||
switch (target.cpu.arch) {
|
||||
.ve => return .no,
|
||||
else => return if (target.os.tag == .linux or target.abi.isAndroid() or target.abi.isMusl()) .yes else .no,
|
||||
}
|
||||
},
|
||||
|
||||
.windows => {
|
||||
if (target.isMinGW())
|
||||
return .no;
|
||||
|
||||
if (target.abi == .itanium)
|
||||
return if (target.cpu.arch == .x86_64) .yes else .no;
|
||||
|
||||
if (target.abi == .msvc or target.abi == .none)
|
||||
return .depends_on_linker;
|
||||
|
||||
return .no;
|
||||
},
|
||||
|
||||
else => {
|
||||
switch (target.cpu.arch) {
|
||||
.hexagon => {
|
||||
// CLANG_DEFAULT_PIE_ON_LINUX
|
||||
return if (target.os.tag == .linux or target.abi.isAndroid() or target.abi.isMusl()) .yes else .no;
|
||||
},
|
||||
|
||||
else => return .no,
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isPICdefault(target: std.Target) DefaultPIStatus {
|
||||
return switch (target.os.tag) {
|
||||
.aix,
|
||||
.haiku,
|
||||
|
||||
.macos,
|
||||
.ios,
|
||||
.tvos,
|
||||
.watchos,
|
||||
.visionos,
|
||||
.driverkit,
|
||||
|
||||
.amdhsa,
|
||||
.amdpal,
|
||||
.mesa3d,
|
||||
|
||||
.ps4,
|
||||
.ps5,
|
||||
=> .yes,
|
||||
|
||||
.fuchsia,
|
||||
.cuda,
|
||||
.zos,
|
||||
=> .no,
|
||||
|
||||
.dragonfly,
|
||||
.openbsd,
|
||||
.netbsd,
|
||||
.freebsd,
|
||||
.solaris,
|
||||
.hurd,
|
||||
=> {
|
||||
return switch (target.cpu.arch) {
|
||||
.mips64, .mips64el => .yes,
|
||||
else => .no,
|
||||
};
|
||||
},
|
||||
|
||||
.linux => {
|
||||
if (target.abi == .ohos)
|
||||
return .no;
|
||||
|
||||
return switch (target.cpu.arch) {
|
||||
.mips64, .mips64el => .yes,
|
||||
else => .no,
|
||||
};
|
||||
},
|
||||
|
||||
.windows => {
|
||||
if (target.isMinGW())
|
||||
return if (target.cpu.arch == .x86_64 or target.cpu.arch == .aarch64) .yes else .no;
|
||||
|
||||
if (target.abi == .itanium)
|
||||
return if (target.cpu.arch == .x86_64) .yes else .no;
|
||||
|
||||
if (target.abi == .msvc or target.abi == .none)
|
||||
return .depends_on_linker;
|
||||
|
||||
if (target.ofmt == .macho)
|
||||
return .yes;
|
||||
|
||||
return switch (target.cpu.arch) {
|
||||
.x86_64, .mips64, .mips64el => .yes,
|
||||
else => .no,
|
||||
};
|
||||
},
|
||||
|
||||
else => {
|
||||
if (target.ofmt == .macho)
|
||||
return .yes;
|
||||
|
||||
return switch (target.cpu.arch) {
|
||||
.mips64, .mips64el => .yes,
|
||||
else => .no,
|
||||
};
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isPICDefaultForced(target: std.Target) DefaultPIStatus {
|
||||
return switch (target.os.tag) {
|
||||
.aix, .amdhsa, .amdpal, .mesa3d => .yes,
|
||||
|
||||
.haiku,
|
||||
.dragonfly,
|
||||
.openbsd,
|
||||
.netbsd,
|
||||
.freebsd,
|
||||
.solaris,
|
||||
.cuda,
|
||||
.ps4,
|
||||
.ps5,
|
||||
.hurd,
|
||||
.linux,
|
||||
.fuchsia,
|
||||
.zos,
|
||||
=> .no,
|
||||
|
||||
.windows => {
|
||||
if (target.isMinGW())
|
||||
return .yes;
|
||||
|
||||
if (target.abi == .itanium)
|
||||
return if (target.cpu.arch == .x86_64) .yes else .no;
|
||||
|
||||
// if (bfd) return target.cpu.arch == .x86_64 else target.cpu.arch == .x86_64 or target.cpu.arch == .aarch64;
|
||||
if (target.abi == .msvc or target.abi == .none)
|
||||
return .depends_on_linker;
|
||||
|
||||
if (target.ofmt == .macho)
|
||||
return if (target.cpu.arch == .aarch64 or target.cpu.arch == .x86_64) .yes else .no;
|
||||
|
||||
return if (target.cpu.arch == .x86_64) .yes else .no;
|
||||
},
|
||||
|
||||
.macos,
|
||||
.ios,
|
||||
.tvos,
|
||||
.watchos,
|
||||
.visionos,
|
||||
.driverkit,
|
||||
=> if (target.cpu.arch == .x86_64 or target.cpu.arch == .aarch64) .yes else .no,
|
||||
|
||||
else => {
|
||||
return switch (target.cpu.arch) {
|
||||
.hexagon,
|
||||
.lanai,
|
||||
.avr,
|
||||
.riscv32,
|
||||
.riscv64,
|
||||
.csky,
|
||||
.xcore,
|
||||
.wasm32,
|
||||
.wasm64,
|
||||
.ve,
|
||||
.spirv32,
|
||||
.spirv64,
|
||||
=> .no,
|
||||
|
||||
.msp430 => .yes,
|
||||
|
||||
else => {
|
||||
if (target.ofmt == .macho)
|
||||
return if (target.cpu.arch == .aarch64 or target.cpu.arch == .x86_64) .yes else .no;
|
||||
return .no;
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
test "alignment functions - smoke test" {
|
||||
var target: std.Target = undefined;
|
||||
const x86 = std.Target.Cpu.Arch.x86_64;
|
||||
target.os = std.Target.Os.Tag.defaultVersionRange(.linux, x86, .none);
|
||||
target.cpu = std.Target.Cpu.baseline(x86, target.os);
|
||||
target.abi = std.Target.Abi.default(x86, target.os.tag);
|
||||
const linux: std.Target.Os = .{ .tag = .linux, .version_range = .{ .none = {} } };
|
||||
const x86_64_target: std.Target = .{
|
||||
.abi = std.Target.Abi.default(.x86_64, linux.tag),
|
||||
.cpu = std.Target.Cpu.Model.generic(.x86_64).toCpu(.x86_64),
|
||||
.os = linux,
|
||||
.ofmt = .elf,
|
||||
};
|
||||
|
||||
try std.testing.expect(isTlsSupported(target));
|
||||
try std.testing.expect(!ignoreNonZeroSizedBitfieldTypeAlignment(target));
|
||||
try std.testing.expect(minZeroWidthBitfieldAlignment(target) == null);
|
||||
try std.testing.expect(!unnamedFieldAffectsAlignment(target));
|
||||
try std.testing.expect(defaultAlignment(target) == 16);
|
||||
try std.testing.expect(!packAllEnums(target));
|
||||
try std.testing.expect(systemCompiler(target) == .gcc);
|
||||
|
||||
const arm = std.Target.Cpu.Arch.arm;
|
||||
target.os = std.Target.Os.Tag.defaultVersionRange(.ios, arm, .none);
|
||||
target.cpu = std.Target.Cpu.baseline(arm, target.os);
|
||||
target.abi = std.Target.Abi.default(arm, target.os.tag);
|
||||
|
||||
try std.testing.expect(!isTlsSupported(target));
|
||||
try std.testing.expect(ignoreNonZeroSizedBitfieldTypeAlignment(target));
|
||||
try std.testing.expectEqual(@as(?u29, 32), minZeroWidthBitfieldAlignment(target));
|
||||
try std.testing.expect(unnamedFieldAffectsAlignment(target));
|
||||
try std.testing.expect(defaultAlignment(target) == 16);
|
||||
try std.testing.expect(!packAllEnums(target));
|
||||
try std.testing.expect(systemCompiler(target) == .clang);
|
||||
try std.testing.expect(isTlsSupported(x86_64_target));
|
||||
try std.testing.expect(!ignoreNonZeroSizedBitfieldTypeAlignment(x86_64_target));
|
||||
try std.testing.expect(minZeroWidthBitfieldAlignment(x86_64_target) == null);
|
||||
try std.testing.expect(!unnamedFieldAffectsAlignment(x86_64_target));
|
||||
try std.testing.expect(defaultAlignment(x86_64_target) == 16);
|
||||
try std.testing.expect(!packAllEnums(x86_64_target));
|
||||
try std.testing.expect(systemCompiler(x86_64_target) == .gcc);
|
||||
}
|
||||
|
||||
test "target size/align tests" {
|
||||
var comp: @import("Compilation.zig") = undefined;
|
||||
|
||||
const x86 = std.Target.Cpu.Arch.x86;
|
||||
comp.target.cpu.arch = x86;
|
||||
comp.target.cpu.model = &std.Target.x86.cpu.i586;
|
||||
comp.target.os = std.Target.Os.Tag.defaultVersionRange(.linux, x86, .none);
|
||||
comp.target.abi = std.Target.Abi.gnu;
|
||||
|
||||
const tt: Type = .{
|
||||
.specifier = .long_long,
|
||||
const linux: std.Target.Os = .{ .tag = .linux, .version_range = .{ .none = {} } };
|
||||
const x86_target: std.Target = .{
|
||||
.abi = std.Target.Abi.default(.x86, linux.tag),
|
||||
.cpu = std.Target.Cpu.Model.generic(.x86).toCpu(.x86),
|
||||
.os = linux,
|
||||
.ofmt = .elf,
|
||||
};
|
||||
comp.target = x86_target;
|
||||
|
||||
try std.testing.expectEqual(@as(u64, 8), tt.sizeof(&comp).?);
|
||||
const tt: QualType = .long_long;
|
||||
|
||||
try std.testing.expectEqual(@as(u64, 8), tt.sizeof(&comp));
|
||||
try std.testing.expectEqual(@as(u64, 4), tt.alignof(&comp));
|
||||
|
||||
const arm = std.Target.Cpu.Arch.arm;
|
||||
comp.target.cpu = std.Target.Cpu.Model.toCpu(&std.Target.arm.cpu.cortex_r4, arm);
|
||||
comp.target.os = std.Target.Os.Tag.defaultVersionRange(.ios, arm, .none);
|
||||
comp.target.abi = std.Target.Abi.none;
|
||||
|
||||
const ct: Type = .{
|
||||
.specifier = .char,
|
||||
};
|
||||
|
||||
try std.testing.expectEqual(true, comp.target.cpu.has(.arm, .has_v7));
|
||||
try std.testing.expectEqual(@as(u64, 1), ct.sizeof(&comp).?);
|
||||
try std.testing.expectEqual(@as(u64, 1), ct.alignof(&comp));
|
||||
try std.testing.expectEqual(true, ignoreNonZeroSizedBitfieldTypeAlignment(comp.target));
|
||||
}
|
||||
|
||||
/// The canonical integer representation of nullptr_t.
|
||||
|
|
|
|||
368
lib/compiler/aro/aro/text_literal.zig
vendored
368
lib/compiler/aro/aro/text_literal.zig
vendored
|
|
@ -1,11 +1,13 @@
|
|||
//! Parsing and classification of string and character literals
|
||||
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Type = @import("Type.zig");
|
||||
const Diagnostics = @import("Diagnostics.zig");
|
||||
const Tokenizer = @import("Tokenizer.zig");
|
||||
const mem = std.mem;
|
||||
const QualType = @import("TypeStore.zig").QualType;
|
||||
const Source = @import("Source.zig");
|
||||
|
||||
pub const Item = union(enum) {
|
||||
/// decoded hex or character escape
|
||||
|
|
@ -18,11 +20,6 @@ pub const Item = union(enum) {
|
|||
utf8_text: std.unicode.Utf8View,
|
||||
};
|
||||
|
||||
const CharDiagnostic = struct {
|
||||
tag: Diagnostics.Tag,
|
||||
extra: Diagnostics.Message.Extra,
|
||||
};
|
||||
|
||||
pub const Kind = enum {
|
||||
char,
|
||||
wide,
|
||||
|
|
@ -91,13 +88,13 @@ pub const Kind = enum {
|
|||
}
|
||||
|
||||
/// The C type of a character literal of this kind
|
||||
pub fn charLiteralType(kind: Kind, comp: *const Compilation) Type {
|
||||
pub fn charLiteralType(kind: Kind, comp: *const Compilation) QualType {
|
||||
return switch (kind) {
|
||||
.char => Type.int,
|
||||
.wide => comp.types.wchar,
|
||||
.utf_8 => .{ .specifier = .uchar },
|
||||
.utf_16 => comp.types.uint_least16_t,
|
||||
.utf_32 => comp.types.uint_least32_t,
|
||||
.char => .int,
|
||||
.wide => comp.type_store.wchar,
|
||||
.utf_8 => .uchar,
|
||||
.utf_16 => comp.type_store.uint_least16_t,
|
||||
.utf_32 => comp.type_store.uint_least32_t,
|
||||
.unterminated => unreachable,
|
||||
};
|
||||
}
|
||||
|
|
@ -120,7 +117,7 @@ pub const Kind = enum {
|
|||
pub fn charUnitSize(kind: Kind, comp: *const Compilation) Compilation.CharUnitSize {
|
||||
return switch (kind) {
|
||||
.char => .@"1",
|
||||
.wide => switch (comp.types.wchar.sizeof(comp).?) {
|
||||
.wide => switch (comp.type_store.wchar.sizeof(comp)) {
|
||||
2 => .@"2",
|
||||
4 => .@"4",
|
||||
else => unreachable,
|
||||
|
|
@ -140,37 +137,52 @@ pub const Kind = enum {
|
|||
}
|
||||
|
||||
/// The C type of an element of a string literal of this kind
|
||||
pub fn elementType(kind: Kind, comp: *const Compilation) Type {
|
||||
pub fn elementType(kind: Kind, comp: *const Compilation) QualType {
|
||||
return switch (kind) {
|
||||
.unterminated => unreachable,
|
||||
.char => .{ .specifier = .char },
|
||||
.utf_8 => if (comp.langopts.hasChar8_T()) .{ .specifier = .uchar } else .{ .specifier = .char },
|
||||
.char => .char,
|
||||
.utf_8 => if (comp.langopts.hasChar8_T()) .uchar else .char,
|
||||
else => kind.charLiteralType(comp),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Ascii = struct {
|
||||
val: u7,
|
||||
|
||||
pub fn init(val: anytype) Ascii {
|
||||
return .{ .val = @intCast(val) };
|
||||
}
|
||||
|
||||
pub fn format(ctx: Ascii, w: *std.Io.Writer, fmt: []const u8) !usize {
|
||||
const i = Diagnostics.templateIndex(w, fmt, "{c}");
|
||||
if (std.ascii.isPrint(ctx.val)) {
|
||||
try w.writeByte(ctx.val);
|
||||
} else {
|
||||
try w.print("x{x:0>2}", .{ctx.val});
|
||||
}
|
||||
return i;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Parser = struct {
|
||||
comp: *const Compilation,
|
||||
literal: []const u8,
|
||||
i: usize = 0,
|
||||
kind: Kind,
|
||||
max_codepoint: u21,
|
||||
loc: Source.Location,
|
||||
/// Offset added to `loc.byte_offset` when emitting an error.
|
||||
offset: u32 = 0,
|
||||
expansion_locs: []const Source.Location,
|
||||
/// We only want to issue a max of 1 error per char literal
|
||||
errored: bool = false,
|
||||
errors_buffer: [4]CharDiagnostic,
|
||||
errors_len: usize,
|
||||
comp: *const Compilation,
|
||||
|
||||
pub fn init(literal: []const u8, kind: Kind, max_codepoint: u21, comp: *const Compilation) Parser {
|
||||
return .{
|
||||
.literal = literal,
|
||||
.comp = comp,
|
||||
.kind = kind,
|
||||
.max_codepoint = max_codepoint,
|
||||
.errors_buffer = undefined,
|
||||
.errors_len = 0,
|
||||
};
|
||||
}
|
||||
/// Makes incorrect encoding always an error.
|
||||
/// Used when concatenating string literals.
|
||||
incorrect_encoding_is_error: bool = false,
|
||||
/// If this is false, do not issue any diagnostics for incorrect character encoding
|
||||
/// Incorrect encoding is allowed if we are unescaping an identifier in the preprocessor
|
||||
diagnose_incorrect_encoding: bool = true,
|
||||
|
||||
fn prefixLen(self: *const Parser) usize {
|
||||
return switch (self.kind) {
|
||||
|
|
@ -181,65 +193,204 @@ pub const Parser = struct {
|
|||
};
|
||||
}
|
||||
|
||||
pub fn errors(p: *Parser) []CharDiagnostic {
|
||||
return p.errors_buffer[0..p.errors_len];
|
||||
const Diagnostic = struct {
|
||||
fmt: []const u8,
|
||||
kind: Diagnostics.Message.Kind,
|
||||
opt: ?Diagnostics.Option = null,
|
||||
extension: bool = false,
|
||||
|
||||
pub const illegal_char_encoding_error: Diagnostic = .{
|
||||
.fmt = "illegal character encoding in character literal",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const illegal_char_encoding_warning: Diagnostic = .{
|
||||
.fmt = "illegal character encoding in character literal",
|
||||
.kind = .warning,
|
||||
.opt = .@"invalid-source-encoding",
|
||||
};
|
||||
|
||||
pub const missing_hex_escape: Diagnostic = .{
|
||||
.fmt = "\\{c} used with no following hex digits",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const escape_sequence_overflow: Diagnostic = .{
|
||||
.fmt = "escape sequence out of range",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const incomplete_universal_character: Diagnostic = .{
|
||||
.fmt = "incomplete universal character name",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const invalid_universal_character: Diagnostic = .{
|
||||
.fmt = "invalid universal character",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const char_too_large: Diagnostic = .{
|
||||
.fmt = "character too large for enclosing character literal type",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const ucn_basic_char_error: Diagnostic = .{
|
||||
.fmt = "character '{c}' cannot be specified by a universal character name",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const ucn_basic_char_warning: Diagnostic = .{
|
||||
.fmt = "specifying character '{c}' with a universal character name is incompatible with C standards before C23",
|
||||
.kind = .off,
|
||||
.opt = .@"pre-c23-compat",
|
||||
};
|
||||
|
||||
pub const ucn_control_char_error: Diagnostic = .{
|
||||
.fmt = "universal character name refers to a control character",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const ucn_control_char_warning: Diagnostic = .{
|
||||
.fmt = "universal character name referring to a control character is incompatible with C standards before C23",
|
||||
.kind = .off,
|
||||
.opt = .@"pre-c23-compat",
|
||||
};
|
||||
|
||||
pub const c89_ucn_in_literal: Diagnostic = .{
|
||||
.fmt = "universal character names are only valid in C99 or later",
|
||||
.kind = .warning,
|
||||
.opt = .unicode,
|
||||
};
|
||||
|
||||
const non_standard_escape_char: Diagnostic = .{
|
||||
.fmt = "use of non-standard escape character '\\{c}'",
|
||||
.kind = .off,
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const unknown_escape_sequence: Diagnostic = .{
|
||||
.fmt = "unknown escape sequence '\\{c}'",
|
||||
.kind = .warning,
|
||||
.opt = .@"unknown-escape-sequence",
|
||||
};
|
||||
|
||||
pub const four_char_char_literal: Diagnostic = .{
|
||||
.fmt = "multi-character character constant",
|
||||
.opt = .@"four-char-constants",
|
||||
.kind = .off,
|
||||
};
|
||||
|
||||
pub const multichar_literal_warning: Diagnostic = .{
|
||||
.fmt = "multi-character character constant",
|
||||
.kind = .warning,
|
||||
.opt = .multichar,
|
||||
};
|
||||
|
||||
pub const invalid_multichar_literal: Diagnostic = .{
|
||||
.fmt = "{s} character literals may not contain multiple characters",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const char_lit_too_wide: Diagnostic = .{
|
||||
.fmt = "character constant too long for its type",
|
||||
.kind = .warning,
|
||||
};
|
||||
|
||||
// pub const wide_multichar_literal: Diagnostic = .{
|
||||
// .fmt = "extraneous characters in character constant ignored",
|
||||
// .kind = .warning,
|
||||
// };
|
||||
};
|
||||
|
||||
pub fn err(p: *Parser, diagnostic: Diagnostic, args: anytype) !void {
|
||||
defer p.offset = 0;
|
||||
if (p.errored) return;
|
||||
defer p.errored = true;
|
||||
try p.warn(diagnostic, args);
|
||||
}
|
||||
|
||||
pub fn err(self: *Parser, tag: Diagnostics.Tag, extra: Diagnostics.Message.Extra) void {
|
||||
if (self.errored) return;
|
||||
self.errored = true;
|
||||
const diagnostic: CharDiagnostic = .{ .tag = tag, .extra = extra };
|
||||
if (self.errors_len == self.errors_buffer.len) {
|
||||
self.errors_buffer[self.errors_buffer.len - 1] = diagnostic;
|
||||
} else {
|
||||
self.errors_buffer[self.errors_len] = diagnostic;
|
||||
self.errors_len += 1;
|
||||
pub fn warn(p: *Parser, diagnostic: Diagnostic, args: anytype) Compilation.Error!void {
|
||||
defer p.offset = 0;
|
||||
if (p.errored) return;
|
||||
if (p.comp.diagnostics.effectiveKind(diagnostic) == .off) return;
|
||||
|
||||
var sf = std.heap.stackFallback(1024, p.comp.gpa);
|
||||
var allocating: std.Io.Writer.Allocating = .init(sf.get());
|
||||
defer allocating.deinit();
|
||||
|
||||
formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory;
|
||||
|
||||
var offset_location = p.loc;
|
||||
offset_location.byte_offset += p.offset;
|
||||
try p.comp.diagnostics.addWithLocation(p.comp, .{
|
||||
.kind = diagnostic.kind,
|
||||
.text = allocating.written(),
|
||||
.opt = diagnostic.opt,
|
||||
.extension = diagnostic.extension,
|
||||
.location = offset_location.expand(p.comp),
|
||||
}, p.expansion_locs, true);
|
||||
}
|
||||
|
||||
fn formatArgs(w: *std.Io.Writer, fmt: []const u8, args: anytype) !void {
|
||||
var i: usize = 0;
|
||||
inline for (std.meta.fields(@TypeOf(args))) |arg_info| {
|
||||
const arg = @field(args, arg_info.name);
|
||||
i += switch (@TypeOf(arg)) {
|
||||
[]const u8 => try Diagnostics.formatString(w, fmt[i..], arg),
|
||||
Ascii => try arg.format(w, fmt[i..]),
|
||||
else => switch (@typeInfo(@TypeOf(arg))) {
|
||||
.int, .comptime_int => try Diagnostics.formatInt(w, fmt[i..], arg),
|
||||
.pointer => try Diagnostics.formatString(w, fmt[i..], arg),
|
||||
else => comptime unreachable,
|
||||
},
|
||||
};
|
||||
}
|
||||
try w.writeAll(fmt[i..]);
|
||||
}
|
||||
|
||||
pub fn warn(self: *Parser, tag: Diagnostics.Tag, extra: Diagnostics.Message.Extra) void {
|
||||
if (self.errored) return;
|
||||
if (self.errors_len < self.errors_buffer.len) {
|
||||
self.errors_buffer[self.errors_len] = .{ .tag = tag, .extra = extra };
|
||||
self.errors_len += 1;
|
||||
}
|
||||
}
|
||||
pub fn next(p: *Parser) !?Item {
|
||||
if (p.i >= p.literal.len) return null;
|
||||
|
||||
pub fn next(self: *Parser) ?Item {
|
||||
if (self.i >= self.literal.len) return null;
|
||||
|
||||
const start = self.i;
|
||||
if (self.literal[start] != '\\') {
|
||||
self.i = mem.indexOfScalarPos(u8, self.literal, start + 1, '\\') orelse self.literal.len;
|
||||
const unescaped_slice = self.literal[start..self.i];
|
||||
const start = p.i;
|
||||
if (p.literal[start] != '\\') {
|
||||
p.i = mem.indexOfScalarPos(u8, p.literal, start + 1, '\\') orelse p.literal.len;
|
||||
const unescaped_slice = p.literal[start..p.i];
|
||||
|
||||
const view = std.unicode.Utf8View.init(unescaped_slice) catch {
|
||||
if (self.kind != .char) {
|
||||
self.err(.illegal_char_encoding_error, .{ .none = {} });
|
||||
if (!p.diagnose_incorrect_encoding) {
|
||||
return .{ .improperly_encoded = p.literal[start..p.i] };
|
||||
}
|
||||
if (p.incorrect_encoding_is_error) {
|
||||
try p.warn(.illegal_char_encoding_error, .{});
|
||||
return .{ .improperly_encoded = p.literal[start..p.i] };
|
||||
}
|
||||
if (p.kind != .char) {
|
||||
try p.err(.illegal_char_encoding_error, .{});
|
||||
return null;
|
||||
}
|
||||
self.warn(.illegal_char_encoding_warning, .{ .none = {} });
|
||||
return .{ .improperly_encoded = self.literal[start..self.i] };
|
||||
try p.warn(.illegal_char_encoding_warning, .{});
|
||||
return .{ .improperly_encoded = p.literal[start..p.i] };
|
||||
};
|
||||
return .{ .utf8_text = view };
|
||||
}
|
||||
switch (self.literal[start + 1]) {
|
||||
'u', 'U' => return self.parseUnicodeEscape(),
|
||||
else => return self.parseEscapedChar(),
|
||||
switch (p.literal[start + 1]) {
|
||||
'u', 'U' => return try p.parseUnicodeEscape(),
|
||||
else => return try p.parseEscapedChar(),
|
||||
}
|
||||
}
|
||||
|
||||
fn parseUnicodeEscape(self: *Parser) ?Item {
|
||||
const start = self.i;
|
||||
fn parseUnicodeEscape(p: *Parser) !?Item {
|
||||
const start = p.i;
|
||||
|
||||
std.debug.assert(self.literal[self.i] == '\\');
|
||||
std.debug.assert(p.literal[p.i] == '\\');
|
||||
|
||||
const kind = self.literal[self.i + 1];
|
||||
const kind = p.literal[p.i + 1];
|
||||
std.debug.assert(kind == 'u' or kind == 'U');
|
||||
|
||||
self.i += 2;
|
||||
if (self.i >= self.literal.len or !std.ascii.isHex(self.literal[self.i])) {
|
||||
self.err(.missing_hex_escape, .{ .ascii = @intCast(kind) });
|
||||
p.i += 2;
|
||||
if (p.i >= p.literal.len or !std.ascii.isHex(p.literal[p.i])) {
|
||||
try p.err(.missing_hex_escape, .{Ascii.init(kind)});
|
||||
return null;
|
||||
}
|
||||
const expected_len: usize = if (kind == 'u') 4 else 8;
|
||||
|
|
@ -247,66 +398,66 @@ pub const Parser = struct {
|
|||
var count: usize = 0;
|
||||
var val: u32 = 0;
|
||||
|
||||
for (self.literal[self.i..], 0..) |c, i| {
|
||||
for (p.literal[p.i..], 0..) |c, i| {
|
||||
if (i == expected_len) break;
|
||||
|
||||
const char = std.fmt.charToDigit(c, 16) catch {
|
||||
break;
|
||||
};
|
||||
const char = std.fmt.charToDigit(c, 16) catch break;
|
||||
|
||||
val, const overflow = @shlWithOverflow(val, 4);
|
||||
overflowed = overflowed or overflow != 0;
|
||||
val |= char;
|
||||
count += 1;
|
||||
}
|
||||
self.i += expected_len;
|
||||
p.i += expected_len;
|
||||
|
||||
if (overflowed) {
|
||||
self.err(.escape_sequence_overflow, .{ .offset = start + self.prefixLen() });
|
||||
p.offset += @intCast(start + p.prefixLen());
|
||||
try p.err(.escape_sequence_overflow, .{});
|
||||
return null;
|
||||
}
|
||||
|
||||
if (count != expected_len) {
|
||||
self.err(.incomplete_universal_character, .{ .none = {} });
|
||||
try p.err(.incomplete_universal_character, .{});
|
||||
return null;
|
||||
}
|
||||
|
||||
if (val > std.math.maxInt(u21) or !std.unicode.utf8ValidCodepoint(@intCast(val))) {
|
||||
self.err(.invalid_universal_character, .{ .offset = start + self.prefixLen() });
|
||||
p.offset += @intCast(start + p.prefixLen());
|
||||
try p.err(.invalid_universal_character, .{});
|
||||
return null;
|
||||
}
|
||||
|
||||
if (val > self.max_codepoint) {
|
||||
self.err(.char_too_large, .{ .none = {} });
|
||||
if (val > p.max_codepoint) {
|
||||
try p.err(.char_too_large, .{});
|
||||
return null;
|
||||
}
|
||||
|
||||
if (val < 0xA0 and (val != '$' and val != '@' and val != '`')) {
|
||||
const is_error = !self.comp.langopts.standard.atLeast(.c23);
|
||||
const is_error = !p.comp.langopts.standard.atLeast(.c23);
|
||||
if (val >= 0x20 and val <= 0x7F) {
|
||||
if (is_error) {
|
||||
self.err(.ucn_basic_char_error, .{ .ascii = @intCast(val) });
|
||||
} else {
|
||||
self.warn(.ucn_basic_char_warning, .{ .ascii = @intCast(val) });
|
||||
try p.err(.ucn_basic_char_error, .{Ascii.init(val)});
|
||||
} else if (!p.comp.langopts.standard.atLeast(.c23)) {
|
||||
try p.warn(.ucn_basic_char_warning, .{Ascii.init(val)});
|
||||
}
|
||||
} else {
|
||||
if (is_error) {
|
||||
self.err(.ucn_control_char_error, .{ .none = {} });
|
||||
} else {
|
||||
self.warn(.ucn_control_char_warning, .{ .none = {} });
|
||||
try p.err(.ucn_control_char_error, .{});
|
||||
} else if (!p.comp.langopts.standard.atLeast(.c23)) {
|
||||
try p.warn(.ucn_control_char_warning, .{});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.warn(.c89_ucn_in_literal, .{ .none = {} });
|
||||
if (!p.comp.langopts.standard.atLeast(.c99)) try p.warn(.c89_ucn_in_literal, .{});
|
||||
return .{ .codepoint = @intCast(val) };
|
||||
}
|
||||
|
||||
fn parseEscapedChar(self: *Parser) Item {
|
||||
self.i += 1;
|
||||
const c = self.literal[self.i];
|
||||
fn parseEscapedChar(p: *Parser) !Item {
|
||||
p.i += 1;
|
||||
const c = p.literal[p.i];
|
||||
defer if (c != 'x' and (c < '0' or c > '7')) {
|
||||
self.i += 1;
|
||||
p.i += 1;
|
||||
};
|
||||
|
||||
switch (c) {
|
||||
|
|
@ -319,36 +470,40 @@ pub const Parser = struct {
|
|||
'a' => return .{ .value = 0x07 },
|
||||
'b' => return .{ .value = 0x08 },
|
||||
'e', 'E' => {
|
||||
self.warn(.non_standard_escape_char, .{ .invalid_escape = .{ .char = c, .offset = @intCast(self.i) } });
|
||||
p.offset += @intCast(p.i);
|
||||
try p.warn(.non_standard_escape_char, .{Ascii.init(c)});
|
||||
return .{ .value = 0x1B };
|
||||
},
|
||||
'(', '{', '[', '%' => {
|
||||
self.warn(.non_standard_escape_char, .{ .invalid_escape = .{ .char = c, .offset = @intCast(self.i) } });
|
||||
p.offset += @intCast(p.i);
|
||||
try p.warn(.non_standard_escape_char, .{Ascii.init(c)});
|
||||
return .{ .value = c };
|
||||
},
|
||||
'f' => return .{ .value = 0x0C },
|
||||
'v' => return .{ .value = 0x0B },
|
||||
'x' => return .{ .value = self.parseNumberEscape(.hex) },
|
||||
'0'...'7' => return .{ .value = self.parseNumberEscape(.octal) },
|
||||
'x' => return .{ .value = try p.parseNumberEscape(.hex) },
|
||||
'0'...'7' => return .{ .value = try p.parseNumberEscape(.octal) },
|
||||
'u', 'U' => unreachable, // handled by parseUnicodeEscape
|
||||
else => {
|
||||
self.warn(.unknown_escape_sequence, .{ .invalid_escape = .{ .char = c, .offset = @intCast(self.i) } });
|
||||
p.offset += @intCast(p.i);
|
||||
try p.warn(.unknown_escape_sequence, .{Ascii.init(c)});
|
||||
return .{ .value = c };
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn parseNumberEscape(self: *Parser, base: EscapeBase) u32 {
|
||||
fn parseNumberEscape(p: *Parser, base: EscapeBase) !u32 {
|
||||
var val: u32 = 0;
|
||||
var count: usize = 0;
|
||||
var overflowed = false;
|
||||
const start = self.i;
|
||||
defer self.i += count;
|
||||
const start = p.i;
|
||||
defer p.i += count;
|
||||
|
||||
const slice = switch (base) {
|
||||
.octal => self.literal[self.i..@min(self.literal.len, self.i + 3)], // max 3 chars
|
||||
.octal => p.literal[p.i..@min(p.literal.len, p.i + 3)], // max 3 chars
|
||||
.hex => blk: {
|
||||
self.i += 1;
|
||||
break :blk self.literal[self.i..]; // skip over 'x'; could have an arbitrary number of chars
|
||||
p.i += 1;
|
||||
break :blk p.literal[p.i..]; // skip over 'x'; could have an arbitrary number of chars
|
||||
},
|
||||
};
|
||||
for (slice) |c| {
|
||||
|
|
@ -358,13 +513,14 @@ pub const Parser = struct {
|
|||
val += char;
|
||||
count += 1;
|
||||
}
|
||||
if (overflowed or val > self.kind.maxInt(self.comp)) {
|
||||
self.err(.escape_sequence_overflow, .{ .offset = start + self.prefixLen() });
|
||||
if (overflowed or val > p.kind.maxInt(p.comp)) {
|
||||
p.offset += @intCast(start + p.prefixLen());
|
||||
try p.err(.escape_sequence_overflow, .{});
|
||||
return 0;
|
||||
}
|
||||
if (count == 0) {
|
||||
std.debug.assert(base == .hex);
|
||||
self.err(.missing_hex_escape, .{ .ascii = 'x' });
|
||||
try p.err(.missing_hex_escape, .{Ascii.init('x')});
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
|
|
|||
517
lib/compiler/aro/aro/toolchains/Linux.zig
vendored
517
lib/compiler/aro/aro/toolchains/Linux.zig
vendored
|
|
@ -1,517 +0,0 @@
|
|||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const GCCDetector = @import("../Driver/GCCDetector.zig");
|
||||
const Toolchain = @import("../Toolchain.zig");
|
||||
const Driver = @import("../Driver.zig");
|
||||
const Distro = @import("../Driver/Distro.zig");
|
||||
const target_util = @import("../target.zig");
|
||||
const system_defaults = @import("system_defaults");
|
||||
|
||||
const Linux = @This();
|
||||
|
||||
distro: Distro.Tag = .unknown,
|
||||
extra_opts: std.ArrayListUnmanaged([]const u8) = .empty,
|
||||
gcc_detector: GCCDetector = .{},
|
||||
|
||||
pub fn discover(self: *Linux, tc: *Toolchain) !void {
|
||||
self.distro = Distro.detect(tc.getTarget(), tc.filesystem);
|
||||
try self.gcc_detector.discover(tc);
|
||||
tc.selected_multilib = self.gcc_detector.selected;
|
||||
|
||||
try self.gcc_detector.appendToolPath(tc);
|
||||
try self.buildExtraOpts(tc);
|
||||
try self.findPaths(tc);
|
||||
}
|
||||
|
||||
fn buildExtraOpts(self: *Linux, tc: *const Toolchain) !void {
|
||||
const gpa = tc.driver.comp.gpa;
|
||||
const target = tc.getTarget();
|
||||
const is_android = target.abi.isAndroid();
|
||||
if (self.distro.isAlpine() or is_android) {
|
||||
try self.extra_opts.ensureUnusedCapacity(gpa, 2);
|
||||
self.extra_opts.appendAssumeCapacity("-z");
|
||||
self.extra_opts.appendAssumeCapacity("now");
|
||||
}
|
||||
|
||||
if (self.distro.isOpenSUSE() or self.distro.isUbuntu() or self.distro.isAlpine() or is_android) {
|
||||
try self.extra_opts.ensureUnusedCapacity(gpa, 2);
|
||||
self.extra_opts.appendAssumeCapacity("-z");
|
||||
self.extra_opts.appendAssumeCapacity("relro");
|
||||
}
|
||||
|
||||
if ((target.cpu.arch.isArm() and !target.cpu.arch.isThumb()) or target.cpu.arch.isAARCH64() or is_android) {
|
||||
try self.extra_opts.ensureUnusedCapacity(gpa, 2);
|
||||
self.extra_opts.appendAssumeCapacity("-z");
|
||||
self.extra_opts.appendAssumeCapacity("max-page-size=4096");
|
||||
}
|
||||
|
||||
if (target.cpu.arch == .arm or target.cpu.arch == .thumb) {
|
||||
try self.extra_opts.append(gpa, "-X");
|
||||
}
|
||||
|
||||
if (!target.cpu.arch.isMIPS() and target.cpu.arch != .hexagon) {
|
||||
const hash_style = if (is_android) .both else self.distro.getHashStyle();
|
||||
try self.extra_opts.append(gpa, switch (hash_style) {
|
||||
inline else => |tag| "--hash-style=" ++ @tagName(tag),
|
||||
});
|
||||
}
|
||||
|
||||
if (system_defaults.enable_linker_build_id) {
|
||||
try self.extra_opts.append(gpa, "--build-id");
|
||||
}
|
||||
}
|
||||
|
||||
fn addMultiLibPaths(self: *Linux, tc: *Toolchain, sysroot: []const u8, os_lib_dir: []const u8) !void {
|
||||
if (!self.gcc_detector.is_valid) return;
|
||||
const gcc_triple = self.gcc_detector.gcc_triple;
|
||||
const lib_path = self.gcc_detector.parent_lib_path;
|
||||
|
||||
// Add lib/gcc/$triple/$version, with an optional /multilib suffix.
|
||||
try tc.addPathIfExists(&.{ self.gcc_detector.install_path, tc.selected_multilib.gcc_suffix }, .file);
|
||||
|
||||
// Add lib/gcc/$triple/$libdir
|
||||
// For GCC built with --enable-version-specific-runtime-libs.
|
||||
try tc.addPathIfExists(&.{ self.gcc_detector.install_path, "..", os_lib_dir }, .file);
|
||||
|
||||
try tc.addPathIfExists(&.{ lib_path, "..", gcc_triple, "lib", "..", os_lib_dir, tc.selected_multilib.os_suffix }, .file);
|
||||
|
||||
// If the GCC installation we found is inside of the sysroot, we want to
|
||||
// prefer libraries installed in the parent prefix of the GCC installation.
|
||||
// It is important to *not* use these paths when the GCC installation is
|
||||
// outside of the system root as that can pick up unintended libraries.
|
||||
// This usually happens when there is an external cross compiler on the
|
||||
// host system, and a more minimal sysroot available that is the target of
|
||||
// the cross. Note that GCC does include some of these directories in some
|
||||
// configurations but this seems somewhere between questionable and simply
|
||||
// a bug.
|
||||
if (mem.startsWith(u8, lib_path, sysroot)) {
|
||||
try tc.addPathIfExists(&.{ lib_path, "..", os_lib_dir }, .file);
|
||||
}
|
||||
}
|
||||
|
||||
fn addMultiArchPaths(self: *Linux, tc: *Toolchain) !void {
|
||||
if (!self.gcc_detector.is_valid) return;
|
||||
const lib_path = self.gcc_detector.parent_lib_path;
|
||||
const gcc_triple = self.gcc_detector.gcc_triple;
|
||||
const multilib = self.gcc_detector.selected;
|
||||
try tc.addPathIfExists(&.{ lib_path, "..", gcc_triple, "lib", multilib.os_suffix }, .file);
|
||||
}
|
||||
|
||||
/// TODO: Very incomplete
|
||||
fn findPaths(self: *Linux, tc: *Toolchain) !void {
|
||||
const target = tc.getTarget();
|
||||
const sysroot = tc.getSysroot();
|
||||
|
||||
var output: [64]u8 = undefined;
|
||||
|
||||
const os_lib_dir = getOSLibDir(target);
|
||||
const multiarch_triple = getMultiarchTriple(target) orelse target_util.toLLVMTriple(target, &output);
|
||||
|
||||
try self.addMultiLibPaths(tc, sysroot, os_lib_dir);
|
||||
|
||||
try tc.addPathIfExists(&.{ sysroot, "/lib", multiarch_triple }, .file);
|
||||
try tc.addPathIfExists(&.{ sysroot, "/lib", "..", os_lib_dir }, .file);
|
||||
|
||||
if (target.abi.isAndroid()) {
|
||||
// TODO
|
||||
}
|
||||
try tc.addPathIfExists(&.{ sysroot, "/usr", "lib", multiarch_triple }, .file);
|
||||
try tc.addPathIfExists(&.{ sysroot, "/usr", "lib", "..", os_lib_dir }, .file);
|
||||
|
||||
try self.addMultiArchPaths(tc);
|
||||
|
||||
try tc.addPathIfExists(&.{ sysroot, "/lib" }, .file);
|
||||
try tc.addPathIfExists(&.{ sysroot, "/usr", "lib" }, .file);
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Linux, allocator: std.mem.Allocator) void {
|
||||
self.extra_opts.deinit(allocator);
|
||||
}
|
||||
|
||||
fn isPIEDefault(self: *const Linux) bool {
|
||||
_ = self;
|
||||
return false;
|
||||
}
|
||||
|
||||
fn getPIE(self: *const Linux, d: *const Driver) bool {
|
||||
if (d.shared or d.static or d.relocatable or d.static_pie) {
|
||||
return false;
|
||||
}
|
||||
return d.pie orelse self.isPIEDefault();
|
||||
}
|
||||
|
||||
fn getStaticPIE(self: *const Linux, d: *Driver) !bool {
|
||||
_ = self;
|
||||
if (d.static_pie and d.pie != null) {
|
||||
try d.err("cannot specify 'nopie' along with 'static-pie'");
|
||||
}
|
||||
return d.static_pie;
|
||||
}
|
||||
|
||||
fn getStatic(self: *const Linux, d: *const Driver) bool {
|
||||
_ = self;
|
||||
return d.static and !d.static_pie;
|
||||
}
|
||||
|
||||
pub fn getDefaultLinker(self: *const Linux, target: std.Target) []const u8 {
|
||||
_ = self;
|
||||
if (target.abi.isAndroid()) {
|
||||
return "ld.lld";
|
||||
}
|
||||
return "ld";
|
||||
}
|
||||
|
||||
pub fn buildLinkerArgs(self: *const Linux, tc: *const Toolchain, argv: *std.array_list.Managed([]const u8)) Compilation.Error!void {
|
||||
const d = tc.driver;
|
||||
const target = tc.getTarget();
|
||||
|
||||
const is_pie = self.getPIE(d);
|
||||
const is_static_pie = try self.getStaticPIE(d);
|
||||
const is_static = self.getStatic(d);
|
||||
const is_android = target.abi.isAndroid();
|
||||
const is_ve = target.cpu.arch == .ve;
|
||||
const has_crt_begin_end_files = target.abi != .none; // TODO: clang checks for MIPS vendor
|
||||
|
||||
if (is_pie) {
|
||||
try argv.append("-pie");
|
||||
}
|
||||
if (is_static_pie) {
|
||||
try argv.appendSlice(&.{ "-static", "-pie", "--no-dynamic-linker", "-z", "text" });
|
||||
}
|
||||
|
||||
if (d.rdynamic) {
|
||||
try argv.append("-export-dynamic");
|
||||
}
|
||||
|
||||
if (d.strip) {
|
||||
try argv.append("-s");
|
||||
}
|
||||
|
||||
try argv.appendSlice(self.extra_opts.items);
|
||||
try argv.append("--eh-frame-hdr");
|
||||
|
||||
// Todo: Driver should parse `-EL`/`-EB` for arm to set endianness for arm targets
|
||||
if (target_util.ldEmulationOption(d.comp.target, null)) |emulation| {
|
||||
try argv.appendSlice(&.{ "-m", emulation });
|
||||
} else {
|
||||
try d.err("Unknown target triple");
|
||||
return;
|
||||
}
|
||||
if (d.comp.target.cpu.arch.isRISCV()) {
|
||||
try argv.append("-X");
|
||||
}
|
||||
if (d.shared) {
|
||||
try argv.append("-shared");
|
||||
}
|
||||
if (is_static) {
|
||||
try argv.append("-static");
|
||||
} else {
|
||||
if (d.rdynamic) {
|
||||
try argv.append("-export-dynamic");
|
||||
}
|
||||
if (!d.shared and !is_static_pie and !d.relocatable) {
|
||||
const dynamic_linker = d.comp.target.standardDynamicLinkerPath();
|
||||
// todo: check for --dyld-prefix
|
||||
if (dynamic_linker.get()) |path| {
|
||||
try argv.appendSlice(&.{ "-dynamic-linker", try tc.arena.dupe(u8, path) });
|
||||
} else {
|
||||
try d.err("Could not find dynamic linker path");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try argv.appendSlice(&.{ "-o", d.output_name orelse "a.out" });
|
||||
|
||||
if (!d.nostdlib and !d.nostartfiles and !d.relocatable) {
|
||||
if (!is_android) {
|
||||
if (!d.shared) {
|
||||
const crt1 = if (is_pie)
|
||||
"Scrt1.o"
|
||||
else if (is_static_pie)
|
||||
"rcrt1.o"
|
||||
else
|
||||
"crt1.o";
|
||||
try argv.append(try tc.getFilePath(crt1));
|
||||
}
|
||||
try argv.append(try tc.getFilePath("crti.o"));
|
||||
}
|
||||
if (is_ve) {
|
||||
try argv.appendSlice(&.{ "-z", "max-page-size=0x4000000" });
|
||||
}
|
||||
|
||||
if (has_crt_begin_end_files) {
|
||||
var path: []const u8 = "";
|
||||
if (tc.getRuntimeLibKind() == .compiler_rt and !is_android) {
|
||||
const crt_begin = try tc.getCompilerRt("crtbegin", .object);
|
||||
if (tc.filesystem.exists(crt_begin)) {
|
||||
path = crt_begin;
|
||||
}
|
||||
}
|
||||
if (path.len == 0) {
|
||||
const crt_begin = if (tc.driver.shared)
|
||||
if (is_android) "crtbegin_so.o" else "crtbeginS.o"
|
||||
else if (is_static)
|
||||
if (is_android) "crtbegin_static.o" else "crtbeginT.o"
|
||||
else if (is_pie or is_static_pie)
|
||||
if (is_android) "crtbegin_dynamic.o" else "crtbeginS.o"
|
||||
else if (is_android) "crtbegin_dynamic.o" else "crtbegin.o";
|
||||
path = try tc.getFilePath(crt_begin);
|
||||
}
|
||||
try argv.append(path);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO add -L opts
|
||||
// TODO add -u opts
|
||||
|
||||
try tc.addFilePathLibArgs(argv);
|
||||
|
||||
// TODO handle LTO
|
||||
|
||||
try argv.appendSlice(d.link_objects.items);
|
||||
|
||||
if (!d.nostdlib and !d.relocatable) {
|
||||
if (!d.nodefaultlibs) {
|
||||
if (is_static or is_static_pie) {
|
||||
try argv.append("--start-group");
|
||||
}
|
||||
try tc.addRuntimeLibs(argv);
|
||||
|
||||
// TODO: add pthread if needed
|
||||
if (!d.nolibc) {
|
||||
try argv.append("-lc");
|
||||
}
|
||||
if (is_static or is_static_pie) {
|
||||
try argv.append("--end-group");
|
||||
} else {
|
||||
try tc.addRuntimeLibs(argv);
|
||||
}
|
||||
}
|
||||
if (!d.nostartfiles) {
|
||||
if (has_crt_begin_end_files) {
|
||||
var path: []const u8 = "";
|
||||
if (tc.getRuntimeLibKind() == .compiler_rt and !is_android) {
|
||||
const crt_end = try tc.getCompilerRt("crtend", .object);
|
||||
if (tc.filesystem.exists(crt_end)) {
|
||||
path = crt_end;
|
||||
}
|
||||
}
|
||||
if (path.len == 0) {
|
||||
const crt_end = if (d.shared)
|
||||
if (is_android) "crtend_so.o" else "crtendS.o"
|
||||
else if (is_pie or is_static_pie)
|
||||
if (is_android) "crtend_android.o" else "crtendS.o"
|
||||
else if (is_android) "crtend_android.o" else "crtend.o";
|
||||
path = try tc.getFilePath(crt_end);
|
||||
}
|
||||
try argv.append(path);
|
||||
}
|
||||
if (!is_android) {
|
||||
try argv.append(try tc.getFilePath("crtn.o"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO add -T args
|
||||
}
|
||||
|
||||
fn getMultiarchTriple(target: std.Target) ?[]const u8 {
|
||||
const is_android = target.abi.isAndroid();
|
||||
const is_mips_r6 = target.cpu.has(.mips, .mips32r6);
|
||||
return switch (target.cpu.arch) {
|
||||
.arm, .thumb => if (is_android) "arm-linux-androideabi" else if (target.abi == .gnueabihf) "arm-linux-gnueabihf" else "arm-linux-gnueabi",
|
||||
.armeb, .thumbeb => if (target.abi == .gnueabihf) "armeb-linux-gnueabihf" else "armeb-linux-gnueabi",
|
||||
.aarch64 => if (is_android) "aarch64-linux-android" else "aarch64-linux-gnu",
|
||||
.aarch64_be => "aarch64_be-linux-gnu",
|
||||
.x86 => if (is_android) "i686-linux-android" else "i386-linux-gnu",
|
||||
.x86_64 => if (is_android) "x86_64-linux-android" else if (target.abi == .gnux32) "x86_64-linux-gnux32" else "x86_64-linux-gnu",
|
||||
.m68k => "m68k-linux-gnu",
|
||||
.mips => if (is_mips_r6) "mipsisa32r6-linux-gnu" else "mips-linux-gnu",
|
||||
.mipsel => if (is_android) "mipsel-linux-android" else if (is_mips_r6) "mipsisa32r6el-linux-gnu" else "mipsel-linux-gnu",
|
||||
.powerpcle => "powerpcle-linux-gnu",
|
||||
.powerpc64 => "powerpc64-linux-gnu",
|
||||
.powerpc64le => "powerpc64le-linux-gnu",
|
||||
.riscv64 => "riscv64-linux-gnu",
|
||||
.sparc => "sparc-linux-gnu",
|
||||
.sparc64 => "sparc64-linux-gnu",
|
||||
.s390x => "s390x-linux-gnu",
|
||||
|
||||
// TODO: expand this
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
fn getOSLibDir(target: std.Target) []const u8 {
|
||||
switch (target.cpu.arch) {
|
||||
.x86,
|
||||
.powerpc,
|
||||
.powerpcle,
|
||||
.sparc,
|
||||
=> return "lib32",
|
||||
else => {},
|
||||
}
|
||||
if (target.cpu.arch == .x86_64 and (target.abi == .gnux32 or target.abi == .muslx32)) {
|
||||
return "libx32";
|
||||
}
|
||||
if (target.cpu.arch == .riscv32) {
|
||||
return "lib32";
|
||||
}
|
||||
if (target.ptrBitWidth() == 32) {
|
||||
return "lib";
|
||||
}
|
||||
return "lib64";
|
||||
}
|
||||
|
||||
pub fn defineSystemIncludes(self: *const Linux, tc: *const Toolchain) !void {
|
||||
if (tc.driver.nostdinc) return;
|
||||
|
||||
const comp = tc.driver.comp;
|
||||
const target = tc.getTarget();
|
||||
|
||||
// musl prefers /usr/include before builtin includes, so musl targets will add builtins
|
||||
// at the end of this function (unless disabled with nostdlibinc)
|
||||
if (!tc.driver.nobuiltininc and (!target.abi.isMusl() or tc.driver.nostdlibinc)) {
|
||||
try comp.addBuiltinIncludeDir(tc.driver.aro_name);
|
||||
}
|
||||
|
||||
if (tc.driver.nostdlibinc) return;
|
||||
|
||||
const sysroot = tc.getSysroot();
|
||||
const local_include = try std.fmt.allocPrint(comp.gpa, "{s}{s}", .{ sysroot, "/usr/local/include" });
|
||||
defer comp.gpa.free(local_include);
|
||||
try comp.addSystemIncludeDir(local_include);
|
||||
|
||||
if (self.gcc_detector.is_valid) {
|
||||
const gcc_include_path = try std.fs.path.join(comp.gpa, &.{ self.gcc_detector.parent_lib_path, "..", self.gcc_detector.gcc_triple, "include" });
|
||||
defer comp.gpa.free(gcc_include_path);
|
||||
try comp.addSystemIncludeDir(gcc_include_path);
|
||||
}
|
||||
|
||||
if (getMultiarchTriple(target)) |triple| {
|
||||
const joined = try std.fs.path.join(comp.gpa, &.{ sysroot, "usr", "include", triple });
|
||||
defer comp.gpa.free(joined);
|
||||
if (tc.filesystem.exists(joined)) {
|
||||
try comp.addSystemIncludeDir(joined);
|
||||
}
|
||||
}
|
||||
|
||||
if (target.os.tag == .rtems) return;
|
||||
|
||||
try comp.addSystemIncludeDir("/include");
|
||||
try comp.addSystemIncludeDir("/usr/include");
|
||||
|
||||
std.debug.assert(!tc.driver.nostdlibinc);
|
||||
if (!tc.driver.nobuiltininc and target.abi.isMusl()) {
|
||||
try comp.addBuiltinIncludeDir(tc.driver.aro_name);
|
||||
}
|
||||
}
|
||||
|
||||
test Linux {
|
||||
if (@import("builtin").os.tag == .windows) return error.SkipZigTest;
|
||||
|
||||
var arena_instance = std.heap.ArenaAllocator.init(std.testing.allocator);
|
||||
defer arena_instance.deinit();
|
||||
const arena = arena_instance.allocator();
|
||||
|
||||
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
comp.environment = .{
|
||||
.path = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
};
|
||||
defer comp.environment = .{};
|
||||
|
||||
const raw_triple = "x86_64-linux-gnu";
|
||||
const target_query = try std.Target.Query.parse(.{ .arch_os_abi = raw_triple });
|
||||
comp.target = try std.zig.system.resolveTargetQuery(target_query);
|
||||
comp.langopts.setEmulatedCompiler(.gcc);
|
||||
|
||||
var driver: Driver = .{ .comp = &comp };
|
||||
defer driver.deinit();
|
||||
driver.raw_target_triple = raw_triple;
|
||||
|
||||
const link_obj = try driver.comp.gpa.dupe(u8, "/tmp/foo.o");
|
||||
try driver.link_objects.append(driver.comp.gpa, link_obj);
|
||||
driver.temp_file_count += 1;
|
||||
|
||||
var toolchain: Toolchain = .{ .driver = &driver, .arena = arena, .filesystem = .{ .fake = &.{
|
||||
.{ .path = "/tmp" },
|
||||
.{ .path = "/usr" },
|
||||
.{ .path = "/usr/lib64" },
|
||||
.{ .path = "/usr/bin" },
|
||||
.{ .path = "/usr/bin/ld", .executable = true },
|
||||
.{ .path = "/lib" },
|
||||
.{ .path = "/lib/x86_64-linux-gnu" },
|
||||
.{ .path = "/lib/x86_64-linux-gnu/crt1.o" },
|
||||
.{ .path = "/lib/x86_64-linux-gnu/crti.o" },
|
||||
.{ .path = "/lib/x86_64-linux-gnu/crtn.o" },
|
||||
.{ .path = "/lib64" },
|
||||
.{ .path = "/usr/lib" },
|
||||
.{ .path = "/usr/lib/gcc" },
|
||||
.{ .path = "/usr/lib/gcc/x86_64-linux-gnu" },
|
||||
.{ .path = "/usr/lib/gcc/x86_64-linux-gnu/9" },
|
||||
.{ .path = "/usr/lib/gcc/x86_64-linux-gnu/9/crtbegin.o" },
|
||||
.{ .path = "/usr/lib/gcc/x86_64-linux-gnu/9/crtend.o" },
|
||||
.{ .path = "/usr/lib/x86_64-linux-gnu" },
|
||||
.{ .path = "/etc/lsb-release", .contents =
|
||||
\\DISTRIB_ID=Ubuntu
|
||||
\\DISTRIB_RELEASE=20.04
|
||||
\\DISTRIB_CODENAME=focal
|
||||
\\DISTRIB_DESCRIPTION="Ubuntu 20.04.6 LTS"
|
||||
\\
|
||||
},
|
||||
} } };
|
||||
defer toolchain.deinit();
|
||||
|
||||
try toolchain.discover();
|
||||
|
||||
var argv = std.array_list.Managed([]const u8).init(driver.comp.gpa);
|
||||
defer argv.deinit();
|
||||
|
||||
var linker_path_buf: [std.fs.max_path_bytes]u8 = undefined;
|
||||
const linker_path = try toolchain.getLinkerPath(&linker_path_buf);
|
||||
try argv.append(linker_path);
|
||||
|
||||
try toolchain.buildLinkerArgs(&argv);
|
||||
|
||||
const expected = [_][]const u8{
|
||||
"/usr/bin/ld",
|
||||
"-z",
|
||||
"relro",
|
||||
"--hash-style=gnu",
|
||||
"--eh-frame-hdr",
|
||||
"-m",
|
||||
"elf_x86_64",
|
||||
"-dynamic-linker",
|
||||
"/lib64/ld-linux-x86-64.so.2",
|
||||
"-o",
|
||||
"a.out",
|
||||
"/lib/x86_64-linux-gnu/crt1.o",
|
||||
"/lib/x86_64-linux-gnu/crti.o",
|
||||
"/usr/lib/gcc/x86_64-linux-gnu/9/crtbegin.o",
|
||||
"-L/usr/lib/gcc/x86_64-linux-gnu/9",
|
||||
"-L/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib64",
|
||||
"-L/lib/x86_64-linux-gnu",
|
||||
"-L/lib/../lib64",
|
||||
"-L/usr/lib/x86_64-linux-gnu",
|
||||
"-L/usr/lib/../lib64",
|
||||
"-L/lib",
|
||||
"-L/usr/lib",
|
||||
link_obj,
|
||||
"-lgcc",
|
||||
"--as-needed",
|
||||
"-lgcc_s",
|
||||
"--no-as-needed",
|
||||
"-lc",
|
||||
"-lgcc",
|
||||
"--as-needed",
|
||||
"-lgcc_s",
|
||||
"--no-as-needed",
|
||||
"/usr/lib/gcc/x86_64-linux-gnu/9/crtend.o",
|
||||
"/lib/x86_64-linux-gnu/crtn.o",
|
||||
};
|
||||
try std.testing.expectEqual(expected.len, argv.items.len);
|
||||
for (expected, argv.items) |expected_item, actual_item| {
|
||||
try std.testing.expectEqualStrings(expected_item, actual_item);
|
||||
}
|
||||
}
|
||||
12
lib/compiler/aro/assembly_backend.zig
vendored
Normal file
12
lib/compiler/aro/assembly_backend.zig
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
const std = @import("std");
|
||||
|
||||
const aro = @import("aro");
|
||||
|
||||
pub const x86_64 = @import("assembly_backend/x86_64.zig");
|
||||
|
||||
pub fn genAsm(target: std.Target, tree: *const aro.Tree) aro.Compilation.Error!aro.Assembly {
|
||||
return switch (target.cpu.arch) {
|
||||
.x86_64 => x86_64.genAsm(tree),
|
||||
else => std.debug.panic("genAsm not implemented: {s}", .{@tagName(target.cpu.arch)}),
|
||||
};
|
||||
}
|
||||
255
lib/compiler/aro/assembly_backend/x86_64.zig
vendored
Normal file
255
lib/compiler/aro/assembly_backend/x86_64.zig
vendored
Normal file
|
|
@ -0,0 +1,255 @@
|
|||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const aro = @import("aro");
|
||||
const Assembly = aro.Assembly;
|
||||
const Compilation = aro.Compilation;
|
||||
const Node = Tree.Node;
|
||||
const Source = aro.Source;
|
||||
const Tree = aro.Tree;
|
||||
const QualType = aro.QualType;
|
||||
const Value = aro.Value;
|
||||
|
||||
const AsmCodeGen = @This();
|
||||
const Error = aro.Compilation.Error;
|
||||
|
||||
tree: *const Tree,
|
||||
comp: *Compilation,
|
||||
text: *std.Io.Writer,
|
||||
data: *std.Io.Writer,
|
||||
|
||||
const StorageUnit = enum(u8) {
|
||||
byte = 8,
|
||||
short = 16,
|
||||
long = 32,
|
||||
quad = 64,
|
||||
|
||||
fn trunc(self: StorageUnit, val: u64) u64 {
|
||||
return switch (self) {
|
||||
.byte => @as(u8, @truncate(val)),
|
||||
.short => @as(u16, @truncate(val)),
|
||||
.long => @as(u32, @truncate(val)),
|
||||
.quad => val,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
fn serializeInt(value: u64, storage_unit: StorageUnit, w: *std.Io.Writer) !void {
|
||||
try w.print(" .{s} 0x{x}\n", .{ @tagName(storage_unit), storage_unit.trunc(value) });
|
||||
}
|
||||
|
||||
fn serializeFloat(comptime T: type, value: T, w: *std.Io.Writer) !void {
|
||||
switch (T) {
|
||||
f128 => {
|
||||
const bytes = std.mem.asBytes(&value);
|
||||
const first = std.mem.bytesToValue(u64, bytes[0..8]);
|
||||
try serializeInt(first, .quad, w);
|
||||
const second = std.mem.bytesToValue(u64, bytes[8..16]);
|
||||
return serializeInt(second, .quad, w);
|
||||
},
|
||||
f80 => {
|
||||
const bytes = std.mem.asBytes(&value);
|
||||
const first = std.mem.bytesToValue(u64, bytes[0..8]);
|
||||
try serializeInt(first, .quad, w);
|
||||
const second = std.mem.bytesToValue(u16, bytes[8..10]);
|
||||
try serializeInt(second, .short, w);
|
||||
return w.writeAll(" .zero 6\n");
|
||||
},
|
||||
else => {
|
||||
const size = @bitSizeOf(T);
|
||||
const storage_unit = std.meta.intToEnum(StorageUnit, size) catch unreachable;
|
||||
const IntTy = @Type(.{ .int = .{ .signedness = .unsigned, .bits = size } });
|
||||
const int_val: IntTy = @bitCast(value);
|
||||
return serializeInt(int_val, storage_unit, w);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn todo(c: *AsmCodeGen, msg: []const u8, tok: Tree.TokenIndex) Error {
|
||||
const loc: Source.Location = c.tree.tokens.items(.loc)[tok];
|
||||
|
||||
var sf = std.heap.stackFallback(1024, c.comp.gpa);
|
||||
const allocator = sf.get();
|
||||
var buf: std.ArrayList(u8) = .empty;
|
||||
defer buf.deinit(allocator);
|
||||
|
||||
try buf.print(allocator, "TODO: {s}", .{msg});
|
||||
try c.comp.diagnostics.add(.{
|
||||
.text = buf.items,
|
||||
.kind = .@"error",
|
||||
.location = loc.expand(c.comp),
|
||||
});
|
||||
return error.FatalError;
|
||||
}
|
||||
|
||||
fn emitAggregate(c: *AsmCodeGen, qt: QualType, node: Node.Index) !void {
|
||||
_ = qt;
|
||||
return c.todo("Codegen aggregates", node.tok(c.tree));
|
||||
}
|
||||
|
||||
fn emitSingleValue(c: *AsmCodeGen, qt: QualType, node: Node.Index) !void {
|
||||
const value = c.tree.value_map.get(node) orelse return;
|
||||
const bit_size = qt.bitSizeof(c.comp);
|
||||
const scalar_kind = qt.scalarKind(c.comp);
|
||||
if (!scalar_kind.isReal()) {
|
||||
return c.todo("Codegen _Complex values", node.tok(c.tree));
|
||||
} else if (scalar_kind.isInt()) {
|
||||
const storage_unit = std.meta.intToEnum(StorageUnit, bit_size) catch return c.todo("Codegen _BitInt values", node.tok(c.tree));
|
||||
try c.data.print(" .{s} ", .{@tagName(storage_unit)});
|
||||
_ = try value.print(qt, c.comp, c.data);
|
||||
try c.data.writeByte('\n');
|
||||
} else if (scalar_kind.isFloat()) {
|
||||
switch (bit_size) {
|
||||
16 => return serializeFloat(f16, value.toFloat(f16, c.comp), c.data),
|
||||
32 => return serializeFloat(f32, value.toFloat(f32, c.comp), c.data),
|
||||
64 => return serializeFloat(f64, value.toFloat(f64, c.comp), c.data),
|
||||
80 => return serializeFloat(f80, value.toFloat(f80, c.comp), c.data),
|
||||
128 => return serializeFloat(f128, value.toFloat(f128, c.comp), c.data),
|
||||
else => unreachable,
|
||||
}
|
||||
} else if (scalar_kind.isPointer()) {
|
||||
return c.todo("Codegen pointer", node.tok(c.tree));
|
||||
} else if (qt.is(c.comp, .array)) {
|
||||
// Todo:
|
||||
// Handle truncated initializers e.g. char x[3] = "hello";
|
||||
// Zero out remaining bytes if initializer is shorter than storage capacity
|
||||
// Handle non-char strings
|
||||
const bytes = value.toBytes(c.comp);
|
||||
const directive = if (bytes.len > bit_size / 8) "ascii" else "string";
|
||||
try c.data.print(" .{s} ", .{directive});
|
||||
try Value.printString(bytes, qt, c.comp, c.data);
|
||||
|
||||
try c.data.writeByte('\n');
|
||||
} else unreachable;
|
||||
}
|
||||
|
||||
fn emitValue(c: *AsmCodeGen, qt: QualType, node: Node.Index) !void {
|
||||
switch (node.get(c.tree)) {
|
||||
.array_init_expr,
|
||||
.struct_init_expr,
|
||||
.union_init_expr,
|
||||
=> return c.todo("Codegen multiple inits", node.tok(c.tree)),
|
||||
else => return c.emitSingleValue(qt, node),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn genAsm(tree: *const Tree) Error!Assembly {
|
||||
var data: std.Io.Writer.Allocating = .init(tree.comp.gpa);
|
||||
defer data.deinit();
|
||||
|
||||
var text: std.Io.Writer.Allocating = .init(tree.comp.gpa);
|
||||
defer text.deinit();
|
||||
|
||||
var codegen: AsmCodeGen = .{
|
||||
.tree = tree,
|
||||
.comp = tree.comp,
|
||||
.text = &text.writer,
|
||||
.data = &data.writer,
|
||||
};
|
||||
|
||||
codegen.genDecls() catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.FatalError => return error.FatalError,
|
||||
};
|
||||
|
||||
const text_slice = try text.toOwnedSlice();
|
||||
errdefer tree.comp.gpa.free(text_slice);
|
||||
const data_slice = try data.toOwnedSlice();
|
||||
return .{
|
||||
.text = text_slice,
|
||||
.data = data_slice,
|
||||
};
|
||||
}
|
||||
|
||||
fn genDecls(c: *AsmCodeGen) !void {
|
||||
if (c.tree.comp.code_gen_options.debug != .strip) {
|
||||
const sources = c.tree.comp.sources.values();
|
||||
for (sources) |source| {
|
||||
try c.data.print(" .file {d} \"{s}\"\n", .{ @intFromEnum(source.id) - 1, source.path });
|
||||
}
|
||||
}
|
||||
|
||||
for (c.tree.root_decls.items) |decl| {
|
||||
switch (decl.get(c.tree)) {
|
||||
.static_assert,
|
||||
.typedef,
|
||||
.struct_decl,
|
||||
.union_decl,
|
||||
.enum_decl,
|
||||
=> {},
|
||||
|
||||
.function => |function| {
|
||||
if (function.body == null) continue;
|
||||
try c.genFn(function);
|
||||
},
|
||||
|
||||
.variable => |variable| try c.genVar(variable),
|
||||
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
try c.text.writeAll(" .section .note.GNU-stack,\"\",@progbits\n");
|
||||
}
|
||||
|
||||
fn genFn(c: *AsmCodeGen, function: Node.Function) !void {
|
||||
return c.todo("Codegen functions", function.name_tok);
|
||||
}
|
||||
|
||||
fn genVar(c: *AsmCodeGen, variable: Node.Variable) !void {
|
||||
const comp = c.comp;
|
||||
const qt = variable.qt;
|
||||
|
||||
const is_tentative = variable.initializer == null;
|
||||
const size = qt.sizeofOrNull(comp) orelse blk: {
|
||||
// tentative array definition assumed to have one element
|
||||
std.debug.assert(is_tentative and qt.is(c.comp, .array));
|
||||
break :blk qt.childType(c.comp).sizeof(comp);
|
||||
};
|
||||
|
||||
const name = c.tree.tokSlice(variable.name_tok);
|
||||
const nat_align = qt.alignof(comp);
|
||||
const alignment = if (qt.is(c.comp, .array) and size >= 16) @max(16, nat_align) else nat_align;
|
||||
|
||||
if (variable.storage_class == .static) {
|
||||
try c.data.print(" .local \"{s}\"\n", .{name});
|
||||
} else {
|
||||
try c.data.print(" .globl \"{s}\"\n", .{name});
|
||||
}
|
||||
|
||||
if (is_tentative and comp.code_gen_options.common) {
|
||||
try c.data.print(" .comm \"{s}\", {d}, {d}\n", .{ name, size, alignment });
|
||||
return;
|
||||
}
|
||||
if (variable.initializer) |init| {
|
||||
if (variable.thread_local and comp.code_gen_options.data_sections) {
|
||||
try c.data.print(" .section .tdata.\"{s}\",\"awT\",@progbits\n", .{name});
|
||||
} else if (variable.thread_local) {
|
||||
try c.data.writeAll(" .section .tdata,\"awT\",@progbits\n");
|
||||
} else if (comp.code_gen_options.data_sections) {
|
||||
try c.data.print(" .section .data.\"{s}\",\"aw\",@progbits\n", .{name});
|
||||
} else {
|
||||
try c.data.writeAll(" .data\n");
|
||||
}
|
||||
|
||||
try c.data.print(" .type \"{s}\", @object\n", .{name});
|
||||
try c.data.print(" .size \"{s}\", {d}\n", .{ name, size });
|
||||
try c.data.print(" .align {d}\n", .{alignment});
|
||||
try c.data.print("\"{s}\":\n", .{name});
|
||||
try c.emitValue(qt, init);
|
||||
return;
|
||||
}
|
||||
if (variable.thread_local and comp.code_gen_options.data_sections) {
|
||||
try c.data.print(" .section .tbss.\"{s}\",\"awT\",@nobits\n", .{name});
|
||||
} else if (variable.thread_local) {
|
||||
try c.data.writeAll(" .section .tbss,\"awT\",@nobits\n");
|
||||
} else if (comp.code_gen_options.data_sections) {
|
||||
try c.data.print(" .section .bss.\"{s}\",\"aw\",@nobits\n", .{name});
|
||||
} else {
|
||||
try c.data.writeAll(" .bss\n");
|
||||
}
|
||||
try c.data.print(" .align {d}\n", .{alignment});
|
||||
try c.data.print("\"{s}\":\n", .{name});
|
||||
try c.data.print(" .zero {d}\n", .{size});
|
||||
}
|
||||
13
lib/compiler/aro/backend.zig
vendored
13
lib/compiler/aro/backend.zig
vendored
|
|
@ -1,12 +1,23 @@
|
|||
pub const Assembly = @import("backend/Assembly.zig");
|
||||
pub const CodeGenOptions = @import("backend/CodeGenOptions.zig");
|
||||
pub const Interner = @import("backend/Interner.zig");
|
||||
pub const Ir = @import("backend/Ir.zig");
|
||||
pub const Object = @import("backend/Object.zig");
|
||||
|
||||
pub const CallingConvention = enum {
|
||||
C,
|
||||
c,
|
||||
stdcall,
|
||||
thiscall,
|
||||
vectorcall,
|
||||
fastcall,
|
||||
regcall,
|
||||
riscv_vector,
|
||||
aarch64_sve_pcs,
|
||||
aarch64_vector_pcs,
|
||||
arm_aapcs,
|
||||
arm_aapcs_vfp,
|
||||
x86_64_sysv,
|
||||
x86_64_win,
|
||||
};
|
||||
|
||||
pub const version_str = "aro-zig";
|
||||
|
|
|
|||
20
lib/compiler/aro/backend/Assembly.zig
vendored
Normal file
20
lib/compiler/aro/backend/Assembly.zig
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
data: []const u8,
|
||||
text: []const u8,
|
||||
|
||||
const Assembly = @This();
|
||||
|
||||
pub fn deinit(self: *const Assembly, gpa: Allocator) void {
|
||||
gpa.free(self.data);
|
||||
gpa.free(self.text);
|
||||
}
|
||||
|
||||
pub fn writeToFile(self: Assembly, file: std.fs.File) !void {
|
||||
var vec: [2]std.posix.iovec_const = .{
|
||||
.{ .base = self.data.ptr, .len = self.data.len },
|
||||
.{ .base = self.text.ptr, .len = self.text.len },
|
||||
};
|
||||
return file.writevAll(&vec);
|
||||
}
|
||||
94
lib/compiler/aro/backend/CodeGenOptions.zig
vendored
Normal file
94
lib/compiler/aro/backend/CodeGenOptions.zig
vendored
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
const std = @import("std");
|
||||
|
||||
/// place uninitialized global variables in a common block
|
||||
common: bool,
|
||||
/// Place each function into its own section in the output file if the target supports arbitrary sections
|
||||
func_sections: bool,
|
||||
/// Place each data item into its own section in the output file if the target supports arbitrary sections
|
||||
data_sections: bool,
|
||||
pic_level: PicLevel,
|
||||
/// Generate position-independent code that can only be linked into executables
|
||||
is_pie: bool,
|
||||
optimization_level: OptimizationLevel,
|
||||
/// Generate debug information
|
||||
debug: DebugFormat,
|
||||
dwarf_version: DwarfVersion,
|
||||
|
||||
pub const DebugFormat = union(enum) {
|
||||
strip,
|
||||
dwarf: std.dwarf.Format,
|
||||
code_view,
|
||||
};
|
||||
|
||||
pub const DwarfVersion = enum(u3) {
|
||||
@"0" = 0,
|
||||
@"2" = 2,
|
||||
@"3" = 3,
|
||||
@"4" = 4,
|
||||
@"5" = 5,
|
||||
};
|
||||
|
||||
pub const PicLevel = enum(u8) {
|
||||
/// Do not generate position-independent code
|
||||
none = 0,
|
||||
/// Generate position-independent code (PIC) suitable for use in a shared library, if supported for the target machine.
|
||||
one = 1,
|
||||
/// If supported for the target machine, emit position-independent code, suitable for dynamic linking and avoiding
|
||||
/// any limit on the size of the global offset table.
|
||||
two = 2,
|
||||
};
|
||||
|
||||
pub const OptimizationLevel = enum {
|
||||
@"0",
|
||||
@"1",
|
||||
@"2",
|
||||
@"3",
|
||||
/// Optimize for size
|
||||
s,
|
||||
/// Disregard strict standards compliance
|
||||
fast,
|
||||
/// Optimize debugging experience
|
||||
g,
|
||||
/// Optimize aggressively for size rather than speed
|
||||
z,
|
||||
|
||||
const level_map = std.StaticStringMap(OptimizationLevel).initComptime(.{
|
||||
.{ "0", .@"0" },
|
||||
.{ "1", .@"1" },
|
||||
.{ "2", .@"2" },
|
||||
.{ "3", .@"3" },
|
||||
.{ "s", .s },
|
||||
.{ "fast", .fast },
|
||||
.{ "g", .g },
|
||||
.{ "z", .z },
|
||||
});
|
||||
|
||||
pub fn fromString(str: []const u8) ?OptimizationLevel {
|
||||
return level_map.get(str);
|
||||
}
|
||||
|
||||
pub fn isSizeOptimized(self: OptimizationLevel) bool {
|
||||
return switch (self) {
|
||||
.s, .z => true,
|
||||
.@"0", .@"1", .@"2", .@"3", .fast, .g => false,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn hasAnyOptimizations(self: OptimizationLevel) bool {
|
||||
return switch (self) {
|
||||
.@"0" => false,
|
||||
.@"1", .@"2", .@"3", .s, .fast, .g, .z => true,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const default: @This() = .{
|
||||
.common = false,
|
||||
.func_sections = false,
|
||||
.data_sections = false,
|
||||
.pic_level = .none,
|
||||
.is_pie = false,
|
||||
.optimization_level = .@"0",
|
||||
.debug = .strip,
|
||||
.dwarf_version = .@"0",
|
||||
};
|
||||
42
lib/compiler/aro/backend/Interner.zig
vendored
42
lib/compiler/aro/backend/Interner.zig
vendored
|
|
@ -12,10 +12,10 @@ map: std.AutoArrayHashMapUnmanaged(void, void) = .empty,
|
|||
items: std.MultiArrayList(struct {
|
||||
tag: Tag,
|
||||
data: u32,
|
||||
}) = .{},
|
||||
extra: std.ArrayListUnmanaged(u32) = .empty,
|
||||
limbs: std.ArrayListUnmanaged(Limb) = .empty,
|
||||
strings: std.ArrayListUnmanaged(u8) = .empty,
|
||||
}) = .empty,
|
||||
extra: std.ArrayList(u32) = .empty,
|
||||
limbs: std.ArrayList(Limb) = .empty,
|
||||
strings: std.ArrayList(u8) = .empty,
|
||||
|
||||
const KeyAdapter = struct {
|
||||
interner: *const Interner,
|
||||
|
|
@ -65,6 +65,7 @@ pub const Key = union(enum) {
|
|||
float: Float,
|
||||
complex: Complex,
|
||||
bytes: []const u8,
|
||||
pointer: Pointer,
|
||||
|
||||
pub const Float = union(enum) {
|
||||
f16: f16,
|
||||
|
|
@ -80,6 +81,12 @@ pub const Key = union(enum) {
|
|||
cf80: [2]f80,
|
||||
cf128: [2]f128,
|
||||
};
|
||||
pub const Pointer = struct {
|
||||
/// NodeIndex of decl or compound literal whose address we are offsetting from
|
||||
node: u32,
|
||||
/// Offset in bytes
|
||||
offset: Ref,
|
||||
};
|
||||
|
||||
pub fn hash(key: Key) u32 {
|
||||
var hasher = Hash.init(0);
|
||||
|
|
@ -199,6 +206,10 @@ pub const Key = union(enum) {
|
|||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn toBigInt(key: Key, space: *Tag.Int.BigIntSpace) BigIntConst {
|
||||
return key.int.toBigInt(space);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Ref = enum(u32) {
|
||||
|
|
@ -303,6 +314,8 @@ pub const Tag = enum(u8) {
|
|||
bytes,
|
||||
/// `data` is `Record`
|
||||
record_ty,
|
||||
/// `data` is Pointer
|
||||
pointer,
|
||||
|
||||
pub const Array = struct {
|
||||
len0: u32,
|
||||
|
|
@ -322,6 +335,11 @@ pub const Tag = enum(u8) {
|
|||
child: Ref,
|
||||
};
|
||||
|
||||
pub const Pointer = struct {
|
||||
node: u32,
|
||||
offset: Ref,
|
||||
};
|
||||
|
||||
pub const Int = struct {
|
||||
limbs_index: u32,
|
||||
limbs_len: u32,
|
||||
|
|
@ -606,6 +624,15 @@ pub fn put(i: *Interner, gpa: Allocator, key: Key) !Ref {
|
|||
}),
|
||||
});
|
||||
},
|
||||
.pointer => |info| {
|
||||
i.items.appendAssumeCapacity(.{
|
||||
.tag = .pointer,
|
||||
.data = try i.addExtra(gpa, Tag.Pointer{
|
||||
.node = info.node,
|
||||
.offset = info.offset,
|
||||
}),
|
||||
});
|
||||
},
|
||||
.int => |repr| int: {
|
||||
var space: Tag.Int.BigIntSpace = undefined;
|
||||
const big = repr.toBigInt(&space);
|
||||
|
|
@ -792,6 +819,13 @@ pub fn get(i: *const Interner, ref: Ref) Key {
|
|||
.child = vector_ty.child,
|
||||
} };
|
||||
},
|
||||
.pointer => {
|
||||
const pointer = i.extraData(Tag.Pointer, data);
|
||||
return .{ .pointer = .{
|
||||
.node = pointer.node,
|
||||
.offset = pointer.offset,
|
||||
} };
|
||||
},
|
||||
.u32 => .{ .int = .{ .u64 = data } },
|
||||
.i32 => .{ .int = .{ .i64 = @as(i32, @bitCast(data)) } },
|
||||
.int_positive, .int_negative => {
|
||||
|
|
|
|||
53
lib/compiler/aro/backend/Ir.zig
vendored
53
lib/compiler/aro/backend/Ir.zig
vendored
|
|
@ -11,7 +11,7 @@ decls: std.StringArrayHashMapUnmanaged(Decl),
|
|||
|
||||
pub const Decl = struct {
|
||||
instructions: std.MultiArrayList(Inst),
|
||||
body: std.ArrayListUnmanaged(Ref),
|
||||
body: std.ArrayList(Ref),
|
||||
arena: std.heap.ArenaAllocator.State,
|
||||
|
||||
pub fn deinit(decl: *Decl, gpa: Allocator) void {
|
||||
|
|
@ -27,8 +27,8 @@ pub const Builder = struct {
|
|||
interner: *Interner,
|
||||
|
||||
decls: std.StringArrayHashMapUnmanaged(Decl) = .empty,
|
||||
instructions: std.MultiArrayList(Ir.Inst) = .{},
|
||||
body: std.ArrayListUnmanaged(Ref) = .empty,
|
||||
instructions: std.MultiArrayList(Ir.Inst) = .empty,
|
||||
body: std.ArrayList(Ref) = .empty,
|
||||
alloc_count: u32 = 0,
|
||||
arg_count: u32 = 0,
|
||||
current_label: Ref = undefined,
|
||||
|
|
@ -380,23 +380,24 @@ const REF = std.Io.tty.Color.bright_blue;
|
|||
const LITERAL = std.Io.tty.Color.bright_green;
|
||||
const ATTRIBUTE = std.Io.tty.Color.bright_yellow;
|
||||
|
||||
const RefMap = std.AutoArrayHashMap(Ref, void);
|
||||
const RefMap = std.AutoArrayHashMapUnmanaged(Ref, void);
|
||||
|
||||
pub fn dump(ir: *const Ir, gpa: Allocator, config: std.Io.tty.Config, w: anytype) !void {
|
||||
pub fn dump(ir: *const Ir, gpa: Allocator, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
|
||||
for (ir.decls.keys(), ir.decls.values()) |name, *decl| {
|
||||
try ir.dumpDecl(decl, gpa, name, config, w);
|
||||
}
|
||||
try w.flush();
|
||||
}
|
||||
|
||||
fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8, config: std.Io.tty.Config, w: anytype) !void {
|
||||
fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
|
||||
const tags = decl.instructions.items(.tag);
|
||||
const data = decl.instructions.items(.data);
|
||||
|
||||
var ref_map = RefMap.init(gpa);
|
||||
defer ref_map.deinit();
|
||||
var ref_map: RefMap = .empty;
|
||||
defer ref_map.deinit(gpa);
|
||||
|
||||
var label_map = RefMap.init(gpa);
|
||||
defer label_map.deinit();
|
||||
var label_map: RefMap = .empty;
|
||||
defer label_map.deinit(gpa);
|
||||
|
||||
const ret_inst = decl.body.items[decl.body.items.len - 1];
|
||||
const ret_operand = data[@intFromEnum(ret_inst)].un;
|
||||
|
|
@ -412,14 +413,14 @@ fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8,
|
|||
const ref = decl.body.items[arg_count];
|
||||
if (tags[@intFromEnum(ref)] != .arg) break;
|
||||
if (arg_count != 0) try w.writeAll(", ");
|
||||
try ref_map.put(ref, {});
|
||||
try ref_map.put(gpa, ref, {});
|
||||
try ir.writeRef(decl, &ref_map, ref, config, w);
|
||||
try config.setColor(w, .reset);
|
||||
}
|
||||
try w.writeAll(") {\n");
|
||||
for (decl.body.items[arg_count..]) |ref| {
|
||||
switch (tags[@intFromEnum(ref)]) {
|
||||
.label => try label_map.put(ref, {}),
|
||||
.label => try label_map.put(gpa, ref, {}),
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
|
@ -460,7 +461,7 @@ fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8,
|
|||
},
|
||||
.select => {
|
||||
const br = data[i].branch;
|
||||
try ir.writeNewRef(decl, &ref_map, ref, config, w);
|
||||
try ir.writeNewRef(gpa, decl, &ref_map, ref, config, w);
|
||||
try w.writeAll("select ");
|
||||
try ir.writeRef(decl, &ref_map, br.cond, config, w);
|
||||
try config.setColor(w, .reset);
|
||||
|
|
@ -500,7 +501,7 @@ fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8,
|
|||
},
|
||||
.call => {
|
||||
const call = data[i].call;
|
||||
try ir.writeNewRef(decl, &ref_map, ref, config, w);
|
||||
try ir.writeNewRef(gpa, decl, &ref_map, ref, config, w);
|
||||
try w.writeAll("call ");
|
||||
try ir.writeRef(decl, &ref_map, call.func, config, w);
|
||||
try config.setColor(w, .reset);
|
||||
|
|
@ -514,7 +515,7 @@ fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8,
|
|||
},
|
||||
.alloc => {
|
||||
const alloc = data[i].alloc;
|
||||
try ir.writeNewRef(decl, &ref_map, ref, config, w);
|
||||
try ir.writeNewRef(gpa, decl, &ref_map, ref, config, w);
|
||||
try w.writeAll("alloc ");
|
||||
try config.setColor(w, ATTRIBUTE);
|
||||
try w.writeAll("size ");
|
||||
|
|
@ -527,7 +528,7 @@ fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8,
|
|||
try w.writeByte('\n');
|
||||
},
|
||||
.phi => {
|
||||
try ir.writeNewRef(decl, &ref_map, ref, config, w);
|
||||
try ir.writeNewRef(gpa, decl, &ref_map, ref, config, w);
|
||||
try w.writeAll("phi");
|
||||
try config.setColor(w, .reset);
|
||||
try w.writeAll(" {");
|
||||
|
|
@ -559,7 +560,7 @@ fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8,
|
|||
try w.writeByte('\n');
|
||||
},
|
||||
.load => {
|
||||
try ir.writeNewRef(decl, &ref_map, ref, config, w);
|
||||
try ir.writeNewRef(gpa, decl, &ref_map, ref, config, w);
|
||||
try w.writeAll("load ");
|
||||
try ir.writeRef(decl, &ref_map, data[i].un, config, w);
|
||||
try w.writeByte('\n');
|
||||
|
|
@ -582,7 +583,7 @@ fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8,
|
|||
.mod,
|
||||
=> {
|
||||
const bin = data[i].bin;
|
||||
try ir.writeNewRef(decl, &ref_map, ref, config, w);
|
||||
try ir.writeNewRef(gpa, decl, &ref_map, ref, config, w);
|
||||
try w.print("{s} ", .{@tagName(tag)});
|
||||
try ir.writeRef(decl, &ref_map, bin.lhs, config, w);
|
||||
try config.setColor(w, .reset);
|
||||
|
|
@ -597,7 +598,7 @@ fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8,
|
|||
.sext,
|
||||
=> {
|
||||
const un = data[i].un;
|
||||
try ir.writeNewRef(decl, &ref_map, ref, config, w);
|
||||
try ir.writeNewRef(gpa, decl, &ref_map, ref, config, w);
|
||||
try w.print("{s} ", .{@tagName(tag)});
|
||||
try ir.writeRef(decl, &ref_map, un, config, w);
|
||||
try w.writeByte('\n');
|
||||
|
|
@ -609,7 +610,7 @@ fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8,
|
|||
try w.writeAll("}\n\n");
|
||||
}
|
||||
|
||||
fn writeType(ir: Ir, ty_ref: Interner.Ref, config: std.Io.tty.Config, w: anytype) !void {
|
||||
fn writeType(ir: Ir, ty_ref: Interner.Ref, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
|
||||
const ty = ir.interner.get(ty_ref);
|
||||
try config.setColor(w, TYPE);
|
||||
switch (ty) {
|
||||
|
|
@ -639,7 +640,7 @@ fn writeType(ir: Ir, ty_ref: Interner.Ref, config: std.Io.tty.Config, w: anytype
|
|||
}
|
||||
}
|
||||
|
||||
fn writeValue(ir: Ir, val: Interner.Ref, config: std.Io.tty.Config, w: anytype) !void {
|
||||
fn writeValue(ir: Ir, val: Interner.Ref, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
|
||||
try config.setColor(w, LITERAL);
|
||||
const key = ir.interner.get(val);
|
||||
switch (key) {
|
||||
|
|
@ -650,12 +651,12 @@ fn writeValue(ir: Ir, val: Interner.Ref, config: std.Io.tty.Config, w: anytype)
|
|||
.float => |repr| switch (repr) {
|
||||
inline else => |x| return w.print("{d}", .{@as(f64, @floatCast(x))}),
|
||||
},
|
||||
.bytes => |b| return std.zig.stringEscape(b, "", .{}, w),
|
||||
.bytes => |b| return std.zig.stringEscape(b, w),
|
||||
else => unreachable, // not a value
|
||||
}
|
||||
}
|
||||
|
||||
fn writeRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: anytype) !void {
|
||||
fn writeRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
|
||||
assert(ref != .none);
|
||||
const index = @intFromEnum(ref);
|
||||
const ty_ref = decl.instructions.items(.ty)[index];
|
||||
|
|
@ -678,8 +679,8 @@ fn writeRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.I
|
|||
try w.print(" %{d}", .{ref_index});
|
||||
}
|
||||
|
||||
fn writeNewRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: anytype) !void {
|
||||
try ref_map.put(ref, {});
|
||||
fn writeNewRef(ir: Ir, gpa: Allocator, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
|
||||
try ref_map.put(gpa, ref, {});
|
||||
try w.writeAll(" ");
|
||||
try ir.writeRef(decl, ref_map, ref, config, w);
|
||||
try config.setColor(w, .reset);
|
||||
|
|
@ -687,7 +688,7 @@ fn writeNewRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: st
|
|||
try config.setColor(w, INST);
|
||||
}
|
||||
|
||||
fn writeLabel(decl: *const Decl, label_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: anytype) !void {
|
||||
fn writeLabel(decl: *const Decl, label_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
|
||||
assert(ref != .none);
|
||||
const index = @intFromEnum(ref);
|
||||
const label = decl.instructions.items(.data)[index].label;
|
||||
|
|
|
|||
6
lib/compiler/aro/backend/Object.zig
vendored
6
lib/compiler/aro/backend/Object.zig
vendored
|
|
@ -30,7 +30,7 @@ pub const Section = union(enum) {
|
|||
custom: []const u8,
|
||||
};
|
||||
|
||||
pub fn getSection(obj: *Object, section: Section) !*std.array_list.Managed(u8) {
|
||||
pub fn getSection(obj: *Object, section: Section) !*std.ArrayList(u8) {
|
||||
switch (obj.format) {
|
||||
.elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).getSection(section),
|
||||
else => unreachable,
|
||||
|
|
@ -65,9 +65,9 @@ pub fn addRelocation(obj: *Object, name: []const u8, section: Section, address:
|
|||
}
|
||||
}
|
||||
|
||||
pub fn finish(obj: *Object, file: std.fs.File) !void {
|
||||
pub fn finish(obj: *Object, w: *std.Io.Writer) !void {
|
||||
switch (obj.format) {
|
||||
.elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).finish(file),
|
||||
.elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).finish(w),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
49
lib/compiler/aro/backend/Object/Elf.zig
vendored
49
lib/compiler/aro/backend/Object/Elf.zig
vendored
|
|
@ -4,8 +4,8 @@ const Target = std.Target;
|
|||
const Object = @import("../Object.zig");
|
||||
|
||||
const Section = struct {
|
||||
data: std.array_list.Managed(u8),
|
||||
relocations: std.ArrayListUnmanaged(Relocation) = .empty,
|
||||
data: std.ArrayList(u8) = .empty,
|
||||
relocations: std.ArrayList(Relocation) = .empty,
|
||||
flags: u64,
|
||||
type: u32,
|
||||
index: u16 = undefined,
|
||||
|
|
@ -58,7 +58,7 @@ pub fn deinit(elf: *Elf) void {
|
|||
{
|
||||
var it = elf.sections.valueIterator();
|
||||
while (it.next()) |sect| {
|
||||
sect.*.data.deinit();
|
||||
sect.*.data.deinit(gpa);
|
||||
sect.*.relocations.deinit(gpa);
|
||||
}
|
||||
}
|
||||
|
|
@ -80,12 +80,12 @@ fn sectionString(sec: Object.Section) []const u8 {
|
|||
};
|
||||
}
|
||||
|
||||
pub fn getSection(elf: *Elf, section_kind: Object.Section) !*std.array_list.Managed(u8) {
|
||||
pub fn getSection(elf: *Elf, section_kind: Object.Section) !*std.ArrayList(u8) {
|
||||
const section_name = sectionString(section_kind);
|
||||
const section = elf.sections.get(section_name) orelse blk: {
|
||||
const section = try elf.arena.allocator().create(Section);
|
||||
section.* = .{
|
||||
.data = std.array_list.Managed(u8).init(elf.arena.child_allocator),
|
||||
.data = std.ArrayList(u8).init(elf.arena.child_allocator),
|
||||
.type = std.elf.SHT_PROGBITS,
|
||||
.flags = switch (section_kind) {
|
||||
.func, .custom => std.elf.SHF_ALLOC + std.elf.SHF_EXECINSTR,
|
||||
|
|
@ -170,12 +170,8 @@ pub fn addRelocation(elf: *Elf, name: []const u8, section_kind: Object.Section,
|
|||
/// relocations
|
||||
/// strtab
|
||||
/// section headers
|
||||
pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
||||
var file_buffer: [1024]u8 = undefined;
|
||||
var file_writer = file.writer(&file_buffer);
|
||||
const w = &file_writer.interface;
|
||||
|
||||
var num_sections: std.elf.Elf64_Half = additional_sections;
|
||||
pub fn finish(elf: *Elf, w: *std.Io.Writer) !void {
|
||||
var num_sections: std.elf.Half = additional_sections;
|
||||
var relocations_len: std.elf.Elf64_Off = 0;
|
||||
var sections_len: std.elf.Elf64_Off = 0;
|
||||
{
|
||||
|
|
@ -196,8 +192,9 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
|||
const strtab_offset = rela_offset + relocations_len;
|
||||
const sh_offset = strtab_offset + elf.strtab_len;
|
||||
const sh_offset_aligned = std.mem.alignForward(u64, sh_offset, 16);
|
||||
const endian = elf.obj.target.cpu.arch.endian();
|
||||
|
||||
const elf_header = std.elf.Elf64_Ehdr{
|
||||
const elf_header: std.elf.Elf64_Ehdr = .{
|
||||
.e_ident = .{ 0x7F, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
|
||||
.e_type = std.elf.ET.REL, // we only produce relocatables
|
||||
.e_machine = elf.obj.target.toElfMachine(),
|
||||
|
|
@ -213,7 +210,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
|||
.e_shnum = num_sections,
|
||||
.e_shstrndx = strtab_index,
|
||||
};
|
||||
try w.writeStruct(elf_header);
|
||||
try w.writeStruct(elf_header, endian);
|
||||
|
||||
// write contents of sections
|
||||
{
|
||||
|
|
@ -222,13 +219,13 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
|||
}
|
||||
|
||||
// pad to 8 bytes
|
||||
try w.writeByteNTimes(0, @intCast(symtab_offset_aligned - symtab_offset));
|
||||
try w.splatByteAll(0, @intCast(symtab_offset_aligned - symtab_offset));
|
||||
|
||||
var name_offset: u32 = strtab_default.len;
|
||||
// write symbols
|
||||
{
|
||||
// first symbol must be null
|
||||
try w.writeStruct(std.mem.zeroes(std.elf.Elf64_Sym));
|
||||
try w.writeStruct(std.mem.zeroes(std.elf.Elf64_Sym), endian);
|
||||
|
||||
var sym_index: u16 = 1;
|
||||
var it = elf.local_symbols.iterator();
|
||||
|
|
@ -241,7 +238,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
|||
.st_shndx = if (sym.section) |some| some.index else 0,
|
||||
.st_value = sym.offset,
|
||||
.st_size = sym.size,
|
||||
});
|
||||
}, endian);
|
||||
sym.index = sym_index;
|
||||
sym_index += 1;
|
||||
name_offset += @intCast(entry.key_ptr.len + 1); // +1 for null byte
|
||||
|
|
@ -256,7 +253,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
|||
.st_shndx = if (sym.section) |some| some.index else 0,
|
||||
.st_value = sym.offset,
|
||||
.st_size = sym.size,
|
||||
});
|
||||
}, endian);
|
||||
sym.index = sym_index;
|
||||
sym_index += 1;
|
||||
name_offset += @intCast(entry.key_ptr.len + 1); // +1 for null byte
|
||||
|
|
@ -272,7 +269,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
|||
.r_offset = rela.offset,
|
||||
.r_addend = rela.addend,
|
||||
.r_info = (@as(u64, rela.symbol.index) << 32) | rela.type,
|
||||
});
|
||||
}, endian);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -294,13 +291,13 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
|||
}
|
||||
|
||||
// pad to 16 bytes
|
||||
try w.writeByteNTimes(0, @intCast(sh_offset_aligned - sh_offset));
|
||||
try w.splatByteAll(0, @intCast(sh_offset_aligned - sh_offset));
|
||||
// mandatory null header
|
||||
try w.writeStruct(std.mem.zeroes(std.elf.Elf64_Shdr));
|
||||
try w.writeStruct(std.mem.zeroes(std.elf.Elf64_Shdr), endian);
|
||||
|
||||
// write strtab section header
|
||||
{
|
||||
const sect_header = std.elf.Elf64_Shdr{
|
||||
const sect_header: std.elf.Elf64_Shdr = .{
|
||||
.sh_name = strtab_name,
|
||||
.sh_type = std.elf.SHT_STRTAB,
|
||||
.sh_flags = 0,
|
||||
|
|
@ -312,12 +309,12 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
|||
.sh_addralign = 1,
|
||||
.sh_entsize = 0,
|
||||
};
|
||||
try w.writeStruct(sect_header);
|
||||
try w.writeStruct(sect_header, endian);
|
||||
}
|
||||
|
||||
// write symtab section header
|
||||
{
|
||||
const sect_header = std.elf.Elf64_Shdr{
|
||||
const sect_header: std.elf.Elf64_Shdr = .{
|
||||
.sh_name = symtab_name,
|
||||
.sh_type = std.elf.SHT_SYMTAB,
|
||||
.sh_flags = 0,
|
||||
|
|
@ -329,7 +326,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
|||
.sh_addralign = 8,
|
||||
.sh_entsize = @sizeOf(std.elf.Elf64_Sym),
|
||||
};
|
||||
try w.writeStruct(sect_header);
|
||||
try w.writeStruct(sect_header, endian);
|
||||
}
|
||||
|
||||
// remaining section headers
|
||||
|
|
@ -352,7 +349,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
|||
.sh_info = 0,
|
||||
.sh_addralign = if (sect.flags & std.elf.SHF_EXECINSTR != 0) 16 else 1,
|
||||
.sh_entsize = 0,
|
||||
});
|
||||
}, endian);
|
||||
|
||||
if (rela_count != 0) {
|
||||
const size = rela_count * @sizeOf(std.elf.Elf64_Rela);
|
||||
|
|
@ -367,7 +364,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
|||
.sh_info = sect.index,
|
||||
.sh_addralign = 8,
|
||||
.sh_entsize = @sizeOf(std.elf.Elf64_Rela),
|
||||
});
|
||||
}, endian);
|
||||
rela_sect_offset += size;
|
||||
}
|
||||
|
||||
|
|
|
|||
126
lib/compiler/aro/include/float.h
vendored
Normal file
126
lib/compiler/aro/include/float.h
vendored
Normal file
|
|
@ -0,0 +1,126 @@
|
|||
/* <float.h> for the Aro C compiler */
|
||||
|
||||
#pragma once
|
||||
|
||||
#undef FLT_RADIX
|
||||
#define FLT_RADIX __FLT_RADIX__
|
||||
|
||||
#undef FLT_MANT_DIG
|
||||
#define FLT_MANT_DIG __FLT_MANT_DIG__
|
||||
|
||||
#undef DBL_MANT_DIG
|
||||
#define DBL_MANT_DIG __DBL_MANT_DIG__
|
||||
|
||||
#undef LDBL_MANT_DIG
|
||||
#define LDBL_MANT_DIG __LDBL_MANT_DIG__
|
||||
|
||||
#if __STDC_VERSION__ >= 199901L
|
||||
#undef FLT_EVAL_METHOD
|
||||
#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
|
||||
|
||||
#undef DECIMAL_DIG
|
||||
#define DECIMAL_DIG __DECIMAL_DIG__
|
||||
#endif /* __STDC_VERSION__ >= 199901L */
|
||||
|
||||
#undef FLT_DIG
|
||||
#define FLT_DIG __FLT_DIG__
|
||||
|
||||
#undef DBL_DIG
|
||||
#define DBL_DIG __DBL_DIG__
|
||||
|
||||
#undef LDBL_DIG
|
||||
#define LDBL_DIG __LDBL_DIG__
|
||||
|
||||
#undef FLT_MIN_EXP
|
||||
#define FLT_MIN_EXP __FLT_MIN_EXP__
|
||||
|
||||
#undef DBL_MIN_EXP
|
||||
#define DBL_MIN_EXP __DBL_MIN_EXP__
|
||||
|
||||
#undef LDBL_MIN_EXP
|
||||
#define LDBL_MIN_EXP __LDBL_MIN_EXP__
|
||||
|
||||
#undef FLT_MIN_10_EXP
|
||||
#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__
|
||||
|
||||
#undef DBL_MIN_10_EXP
|
||||
#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__
|
||||
|
||||
#undef LDBL_MIN_10_EXP
|
||||
#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__
|
||||
|
||||
#undef FLT_MAX_EXP
|
||||
#define FLT_MAX_EXP __FLT_MAX_EXP__
|
||||
|
||||
#undef DBL_MAX_EXP
|
||||
#define DBL_MAX_EXP __DBL_MAX_EXP__
|
||||
|
||||
#undef LDBL_MAX_EXP
|
||||
#define LDBL_MAX_EXP __LDBL_MAX_EXP__
|
||||
|
||||
#undef FLT_MAX_10_EXP
|
||||
#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__
|
||||
|
||||
#undef DBL_MAX_10_EXP
|
||||
#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__
|
||||
|
||||
#undef LDBL_MAX_10_EXP
|
||||
#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
|
||||
|
||||
#undef FLT_MAX
|
||||
#define FLT_MAX __FLT_MAX__
|
||||
|
||||
#undef DBL_MAX
|
||||
#define DBL_MAX __DBL_MAX__
|
||||
|
||||
#undef LDBL_MAX
|
||||
#define LDBL_MAX __LDBL_MAX__
|
||||
|
||||
#undef FLT_EPSILON
|
||||
#define FLT_EPSILON __FLT_EPSILON__
|
||||
|
||||
#undef DBL_EPSILON
|
||||
#define DBL_EPSILON __DBL_EPSILON__
|
||||
|
||||
#undef LDBL_EPSILON
|
||||
#define LDBL_EPSILON __LDBL_EPSILON__
|
||||
|
||||
#undef FLT_MIN
|
||||
#define FLT_MIN __FLT_MIN__
|
||||
|
||||
#undef DBL_MIN
|
||||
#define DBL_MIN __DBL_MIN__
|
||||
|
||||
#undef LDBL_MIN
|
||||
#define LDBL_MIN __LDBL_MIN__
|
||||
|
||||
#if __STDC_VERSION__ >= 201112L
|
||||
|
||||
#undef FLT_TRUE_MIN
|
||||
#define FLT_TRUE_MIN __FLT_DENORM_MIN__
|
||||
|
||||
#undef DBL_TRUE_MIN
|
||||
#define DBL_TRUE_MIN __DBL_DENORM_MIN__
|
||||
|
||||
#undef LDBL_TRUE_MIN
|
||||
#define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
|
||||
|
||||
#undef FLT_DECIMAL_DIG
|
||||
#define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__
|
||||
|
||||
#undef DBL_DECIMAL_DIG
|
||||
#define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__
|
||||
|
||||
#undef LDBL_DECIMAL_DIG
|
||||
#define LDBL_DECIMAL_DIG __LDBL_DECIMAL_DIG__
|
||||
|
||||
#undef FLT_HAS_SUBNORM
|
||||
#define FLT_HAS_SUBNORM __FLT_HAS_DENORM__
|
||||
|
||||
#undef DBL_HAS_SUBNORM
|
||||
#define DBL_HAS_SUBNORM __DBL_HAS_DENORM__
|
||||
|
||||
#undef LDBL_HAS_SUBNORM
|
||||
#define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__
|
||||
|
||||
#endif /* __STDC_VERSION__ >= 201112L */
|
||||
15
lib/compiler/aro/include/iso646.h
vendored
Normal file
15
lib/compiler/aro/include/iso646.h
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
/* <iso646.h> for the Aro C compiler */
|
||||
|
||||
#pragma once
|
||||
|
||||
#define and &&
|
||||
#define and_eq &=
|
||||
#define bitand &
|
||||
#define bitor |
|
||||
#define compl ~
|
||||
#define not !
|
||||
#define not_eq !=
|
||||
#define or ||
|
||||
#define or_eq |=
|
||||
#define xor ^
|
||||
#define xor_eq ^=
|
||||
124
lib/compiler/aro/include/limits.h
vendored
Normal file
124
lib/compiler/aro/include/limits.h
vendored
Normal file
|
|
@ -0,0 +1,124 @@
|
|||
/* <limits.h> for the Aro C compiler */
|
||||
|
||||
#pragma once
|
||||
|
||||
/* GlibC will try to include_next GCC's limits.h which will fail.
|
||||
Define _GCC_LIMITS_H_ to prevent it. */
|
||||
#if defined __GNUC__ && !defined _GCC_LIMITS_H_
|
||||
#define _GCC_LIMITS_H_
|
||||
#endif
|
||||
|
||||
/* Include the system's limits.h */
|
||||
#if __STDC_HOSTED__ && __has_include_next(<limits.h>)
|
||||
#include_next <limits.h>
|
||||
#endif
|
||||
|
||||
#undef SCHAR_MAX
|
||||
#define SCHAR_MAX __SCHAR_MAX__
|
||||
|
||||
#undef SHRT_MAX
|
||||
#define SHRT_MAX __SHRT_MAX__
|
||||
|
||||
#undef INT_MAX
|
||||
#define INT_MAX __INT_MAX__
|
||||
|
||||
#undef LONG_MAX
|
||||
#define LONG_MAX __LONG_MAX__
|
||||
|
||||
#undef SCHAR_MIN
|
||||
#define SCHAR_MIN (-__SCHAR_MAX__-1)
|
||||
|
||||
#undef SHRT_MIN
|
||||
#define SHRT_MIN (-__SHRT_MAX__ -1)
|
||||
|
||||
#undef INT_MIN
|
||||
#define INT_MIN (-__INT_MAX__ -1)
|
||||
|
||||
#undef LONG_MIN
|
||||
#define LONG_MIN (-__LONG_MAX__ -1L)
|
||||
|
||||
#undef UCHAR_MAX
|
||||
#define UCHAR_MAX (__SCHAR_MAX__*2 +1)
|
||||
|
||||
#undef USHRT_MAX
|
||||
#define USHRT_MAX (__SHRT_MAX__ *2 +1)
|
||||
|
||||
#undef UINT_MAX
|
||||
#define UINT_MAX (__INT_MAX__ *2U +1U)
|
||||
|
||||
#undef ULONG_MAX
|
||||
#define ULONG_MAX (__LONG_MAX__ *2UL+1UL)
|
||||
|
||||
#ifndef MB_LEN_MAX
|
||||
#define MB_LEN_MAX 1
|
||||
#endif
|
||||
|
||||
#undef CHAR_BIT
|
||||
#define CHAR_BIT __CHAR_BIT__
|
||||
|
||||
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
|
||||
|
||||
#undef BOOL_WIDTH
|
||||
#define BOOL_WIDTH __BOOL_WIDTH__
|
||||
|
||||
#undef CHAR_WIDTH
|
||||
#define CHAR_WIDTH CHAR_BIT
|
||||
|
||||
#undef SCHAR_WIDTH
|
||||
#define SCHAR_WIDTH CHAR_BIT
|
||||
|
||||
#undef UCHAR_WIDTH
|
||||
#define UCHAR_WIDTH CHAR_BIT
|
||||
|
||||
#undef USHRT_WIDTH
|
||||
#define USHRT_WIDTH __SHRT_WIDTH__
|
||||
|
||||
#undef SHRT_WIDTH
|
||||
#define SHRT_WIDTH __SHRT_WIDTH__
|
||||
|
||||
#undef UINT_WIDTH
|
||||
#define UINT_WIDTH __INT_WIDTH__
|
||||
|
||||
#undef INT_WIDTH
|
||||
#define INT_WIDTH __INT_WIDTH__
|
||||
|
||||
#undef ULONG_WIDTH
|
||||
#define ULONG_WIDTH __LONG_WIDTH__
|
||||
|
||||
#undef LONG_WIDTH
|
||||
#define LONG_WIDTH __LONG_WIDTH__
|
||||
|
||||
#undef ULLONG_WIDTH
|
||||
#define ULLONG_WIDTH __LLONG_WIDTH__
|
||||
|
||||
#undef LLONG_WIDTH
|
||||
#define LLONG_WIDTH __LLONG_WIDTH__
|
||||
|
||||
#undef BITINT_MAXWIDTH
|
||||
#define BITINT_MAXWIDTH __BITINT_MAXWIDTH__
|
||||
|
||||
#endif /* defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L */
|
||||
|
||||
#undef CHAR_MIN
|
||||
#undef CHAR_MAX
|
||||
#ifdef __CHAR_UNSIGNED__
|
||||
#define CHAR_MIN 0
|
||||
#define CHAR_MAX UCHAR_MAX
|
||||
#else
|
||||
#define CHAR_MIN SCHAR_MIN
|
||||
#define CHAR_MAX __SCHAR_MAX__
|
||||
#endif
|
||||
|
||||
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
|
||||
|
||||
#undef LLONG_MIN
|
||||
#define LLONG_MIN (-__LONG_LONG_MAX__-1LL)
|
||||
|
||||
#undef LLONG_MAX
|
||||
#define LLONG_MAX __LONG_LONG_MAX__
|
||||
|
||||
#undef ULLONG_MAX
|
||||
#define ULLONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
|
||||
|
||||
#endif
|
||||
|
||||
11
lib/compiler/aro/include/stdalign.h
vendored
Normal file
11
lib/compiler/aro/include/stdalign.h
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
/* <stdalign.h> for the Aro C compiler */
|
||||
|
||||
#pragma once
|
||||
#if __STDC_VERSION__ < 202311L
|
||||
|
||||
#define alignas _Alignas
|
||||
#define alignof _Alignof
|
||||
|
||||
#define __alignas_is_defined 1
|
||||
#define __alignof_is_defined 1
|
||||
#endif
|
||||
28
lib/compiler/aro/include/stdarg.h
vendored
Normal file
28
lib/compiler/aro/include/stdarg.h
vendored
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
/* <stdarg.h> for the Aro C compiler */
|
||||
|
||||
#pragma once
|
||||
/* Todo: Set to 202311L once header is compliant with C23 */
|
||||
#define __STDC_VERSION_STDARG_H__ 0
|
||||
|
||||
typedef __builtin_va_list va_list;
|
||||
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L
|
||||
/* C23 no longer requires the second parameter */
|
||||
#define va_start(ap, ...) __builtin_va_start(ap, __VA_ARGS__)
|
||||
#else
|
||||
#define va_start(ap, param) __builtin_va_start(ap, param)
|
||||
#endif
|
||||
#define va_end(ap) __builtin_va_end(ap)
|
||||
#define va_arg(ap, type) __builtin_va_arg(ap, type)
|
||||
|
||||
/* GCC and Clang always define __va_copy */
|
||||
#define __va_copy(d, s) __builtin_va_copy(d, s)
|
||||
|
||||
/* but va_copy only on c99+ or when strict ansi mode is turned off */
|
||||
#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__)
|
||||
#define va_copy(d, s) __builtin_va_copy(d, s)
|
||||
#endif
|
||||
|
||||
#ifndef __GNUC_VA_LIST
|
||||
#define __GNUC_VA_LIST 1
|
||||
typedef __builtin_va_list __gnuc_va_list;
|
||||
#endif
|
||||
138
lib/compiler/aro/include/stdatomic.h
vendored
Normal file
138
lib/compiler/aro/include/stdatomic.h
vendored
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
/* <stdatomic.h> for the Aro C compiler */
|
||||
|
||||
#pragma once
|
||||
|
||||
#define __STDC_VERSION_STDATOMIC_H__ 202311L
|
||||
|
||||
#if __STDC_HOSTED__ && __has_include_next(<stdatomic.h>)
|
||||
#include_next <stdatomic.h>
|
||||
#else
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#define ATOMIC_BOOL_LOCK_FREE __ATOMIC_BOOL_LOCK_FREE
|
||||
#define ATOMIC_CHAR_LOCK_FREE __ATOMIC_CHAR_LOCK_FREE
|
||||
#define ATOMIC_CHAR16_T_LOCK_FREE __ATOMIC_CHAR16_T_LOCK_FREE
|
||||
#define ATOMIC_CHAR32_T_LOCK_FREE __ATOMIC_CHAR32_T_LOCK_FREE
|
||||
#define ATOMIC_WCHAR_T_LOCK_FREE __ATOMIC_WCHAR_T_LOCK_FREE
|
||||
#define ATOMIC_SHORT_LOCK_FREE __ATOMIC_SHORT_LOCK_FREE
|
||||
#define ATOMIC_INT_LOCK_FREE __ATOMIC_INT_LOCK_FREE
|
||||
#define ATOMIC_LONG_LOCK_FREE __ATOMIC_LONG_LOCK_FREE
|
||||
#define ATOMIC_LLONG_LOCK_FREE __ATOMIC_LLONG_LOCK_FREE
|
||||
#define ATOMIC_POINTER_LOCK_FREE __ATOMIC_POINTER_LOCK_FREE
|
||||
#if defined(__ATOMIC_CHAR8_T_LOCK_FREE)
|
||||
#define ATOMIC_CHAR8_T_LOCK_FREE __ATOMIC_CHAR8_T_LOCK_FREE
|
||||
#endif
|
||||
|
||||
#if __STDC_VERSION__ < 202311L
|
||||
/* ATOMIC_VAR_INIT was removed in C23 */
|
||||
#define ATOMIC_VAR_INIT(value) (value)
|
||||
#endif
|
||||
|
||||
#define atomic_init __c11_atomic_init
|
||||
|
||||
typedef enum memory_order {
|
||||
memory_order_relaxed = __ATOMIC_RELAXED,
|
||||
memory_order_consume = __ATOMIC_CONSUME,
|
||||
memory_order_acquire = __ATOMIC_ACQUIRE,
|
||||
memory_order_release = __ATOMIC_RELEASE,
|
||||
memory_order_acq_rel = __ATOMIC_ACQ_REL,
|
||||
memory_order_seq_cst = __ATOMIC_SEQ_CST
|
||||
} memory_order;
|
||||
|
||||
#define kill_dependency(y) (y)
|
||||
|
||||
void atomic_thread_fence(memory_order);
|
||||
void atomic_signal_fence(memory_order);
|
||||
|
||||
#define atomic_thread_fence(order) __c11_atomic_thread_fence(order)
|
||||
#define atomic_signal_fence(order) __c11_atomic_signal_fence(order)
|
||||
|
||||
#define atomic_is_lock_free(obj) __c11_atomic_is_lock_free(sizeof(*(obj)))
|
||||
|
||||
typedef _Atomic(_Bool) atomic_bool;
|
||||
typedef _Atomic(char) atomic_char;
|
||||
typedef _Atomic(signed char) atomic_schar;
|
||||
typedef _Atomic(unsigned char) atomic_uchar;
|
||||
typedef _Atomic(short) atomic_short;
|
||||
typedef _Atomic(unsigned short) atomic_ushort;
|
||||
typedef _Atomic(int) atomic_int;
|
||||
typedef _Atomic(unsigned int) atomic_uint;
|
||||
typedef _Atomic(long) atomic_long;
|
||||
typedef _Atomic(unsigned long) atomic_ulong;
|
||||
typedef _Atomic(long long) atomic_llong;
|
||||
typedef _Atomic(unsigned long long) atomic_ullong;
|
||||
typedef _Atomic(uint_least16_t) atomic_char16_t;
|
||||
typedef _Atomic(uint_least32_t) atomic_char32_t;
|
||||
typedef _Atomic(wchar_t) atomic_wchar_t;
|
||||
typedef _Atomic(int_least8_t) atomic_int_least8_t;
|
||||
typedef _Atomic(uint_least8_t) atomic_uint_least8_t;
|
||||
typedef _Atomic(int_least16_t) atomic_int_least16_t;
|
||||
typedef _Atomic(uint_least16_t) atomic_uint_least16_t;
|
||||
typedef _Atomic(int_least32_t) atomic_int_least32_t;
|
||||
typedef _Atomic(uint_least32_t) atomic_uint_least32_t;
|
||||
typedef _Atomic(int_least64_t) atomic_int_least64_t;
|
||||
typedef _Atomic(uint_least64_t) atomic_uint_least64_t;
|
||||
typedef _Atomic(int_fast8_t) atomic_int_fast8_t;
|
||||
typedef _Atomic(uint_fast8_t) atomic_uint_fast8_t;
|
||||
typedef _Atomic(int_fast16_t) atomic_int_fast16_t;
|
||||
typedef _Atomic(uint_fast16_t) atomic_uint_fast16_t;
|
||||
typedef _Atomic(int_fast32_t) atomic_int_fast32_t;
|
||||
typedef _Atomic(uint_fast32_t) atomic_uint_fast32_t;
|
||||
typedef _Atomic(int_fast64_t) atomic_int_fast64_t;
|
||||
typedef _Atomic(uint_fast64_t) atomic_uint_fast64_t;
|
||||
typedef _Atomic(intptr_t) atomic_intptr_t;
|
||||
typedef _Atomic(uintptr_t) atomic_uintptr_t;
|
||||
typedef _Atomic(size_t) atomic_size_t;
|
||||
typedef _Atomic(ptrdiff_t) atomic_ptrdiff_t;
|
||||
typedef _Atomic(intmax_t) atomic_intmax_t;
|
||||
typedef _Atomic(uintmax_t) atomic_uintmax_t;
|
||||
|
||||
#define atomic_store(object, desired) __c11_atomic_store(object, desired, __ATOMIC_SEQ_CST)
|
||||
#define atomic_store_explicit __c11_atomic_store
|
||||
|
||||
#define atomic_load(object) __c11_atomic_load(object, __ATOMIC_SEQ_CST)
|
||||
#define atomic_load_explicit __c11_atomic_load
|
||||
|
||||
#define atomic_exchange(object, desired) __c11_atomic_exchange(object, desired, __ATOMIC_SEQ_CST)
|
||||
#define atomic_exchange_explicit __c11_atomic_exchange
|
||||
|
||||
#define atomic_compare_exchange_strong(object, expected, desired) __c11_atomic_compare_exchange_strong(object, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
|
||||
#define atomic_compare_exchange_strong_explicit __c11_atomic_compare_exchange_strong
|
||||
|
||||
#define atomic_compare_exchange_weak(object, expected, desired) __c11_atomic_compare_exchange_weak(object, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
|
||||
#define atomic_compare_exchange_weak_explicit __c11_atomic_compare_exchange_weak
|
||||
|
||||
#define atomic_fetch_add(object, operand) __c11_atomic_fetch_add(object, operand, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_add_explicit __c11_atomic_fetch_add
|
||||
|
||||
#define atomic_fetch_sub(object, operand) __c11_atomic_fetch_sub(object, operand, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_sub_explicit __c11_atomic_fetch_sub
|
||||
|
||||
#define atomic_fetch_or(object, operand) __c11_atomic_fetch_or(object, operand, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_or_explicit __c11_atomic_fetch_or
|
||||
|
||||
#define atomic_fetch_xor(object, operand) __c11_atomic_fetch_xor(object, operand, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_xor_explicit __c11_atomic_fetch_xor
|
||||
|
||||
#define atomic_fetch_and(object, operand) __c11_atomic_fetch_and(object, operand, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_and_explicit __c11_atomic_fetch_and
|
||||
|
||||
typedef struct atomic_flag { atomic_bool _Value; } atomic_flag;
|
||||
|
||||
#define ATOMIC_FLAG_INIT { 0 }
|
||||
|
||||
_Bool atomic_flag_test_and_set(volatile atomic_flag *);
|
||||
_Bool atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order);
|
||||
void atomic_flag_clear(volatile atomic_flag *);
|
||||
void atomic_flag_clear_explicit(volatile atomic_flag *, memory_order);
|
||||
|
||||
#define atomic_flag_test_and_set(object) __c11_atomic_exchange(&(object)->_Value, 1, __ATOMIC_SEQ_CST)
|
||||
#define atomic_flag_test_and_set_explicit(object, order) __c11_atomic_exchange(&(object)->_Value, 1, order)
|
||||
|
||||
#define atomic_flag_clear(object) __c11_atomic_store(&(object)->_Value, 0, __ATOMIC_SEQ_CST)
|
||||
#define atomic_flag_clear_explicit(object, order) __c11_atomic_store(&(object)->_Value, 0, order)
|
||||
|
||||
|
||||
#endif
|
||||
13
lib/compiler/aro/include/stdbool.h
vendored
Normal file
13
lib/compiler/aro/include/stdbool.h
vendored
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
/* <stdbool.h> for the Aro C compiler */
|
||||
|
||||
#pragma once
|
||||
|
||||
#if __STDC_VERSION__ < 202311L
|
||||
#define bool _Bool
|
||||
|
||||
#define true 1
|
||||
#define false 0
|
||||
|
||||
#define __bool_true_false_are_defined 1
|
||||
|
||||
#endif
|
||||
9
lib/compiler/aro/include/stdckdint.h
vendored
Normal file
9
lib/compiler/aro/include/stdckdint.h
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
/* <stdckdint.h> for the Aro C compiler */
|
||||
|
||||
#pragma once
|
||||
|
||||
#define __STDC_VERSION_STDCKDINT_H__ 202311L
|
||||
|
||||
#define ckd_add(result, a, b) __builtin_add_overflow(a, b, result)
|
||||
#define ckd_sub(result, a, b) __builtin_sub_overflow(a, b, result)
|
||||
#define ckd_mul(result, a, b) __builtin_mul_overflow(a, b, result)
|
||||
31
lib/compiler/aro/include/stddef.h
vendored
Normal file
31
lib/compiler/aro/include/stddef.h
vendored
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
/* <stddef.h> for the Aro C compiler */
|
||||
|
||||
#pragma once
|
||||
|
||||
#define __STDC_VERSION_STDDEF_H__ 202311L
|
||||
|
||||
typedef __PTRDIFF_TYPE__ ptrdiff_t;
|
||||
typedef __SIZE_TYPE__ size_t;
|
||||
typedef __WCHAR_TYPE__ wchar_t;
|
||||
|
||||
/* define max_align_t to match GCC and Clang */
|
||||
typedef struct {
|
||||
long long __aro_max_align_ll;
|
||||
long double __aro_max_align_ld;
|
||||
} max_align_t;
|
||||
|
||||
#define NULL ((void*)0)
|
||||
#define offsetof(T, member) __builtin_offsetof(T, member)
|
||||
|
||||
#if __STDC_VERSION__ >= 202311L
|
||||
# pragma GCC diagnostic push
|
||||
# pragma GCC diagnostic ignored "-Wpre-c23-compat"
|
||||
typedef typeof(nullptr) nullptr_t;
|
||||
# pragma GCC diagnostic pop
|
||||
|
||||
# if defined unreachable
|
||||
# error unreachable() is a standard macro in C23
|
||||
# else
|
||||
# define unreachable() __builtin_unreachable()
|
||||
# endif
|
||||
#endif
|
||||
289
lib/compiler/aro/include/stdint.h
vendored
Normal file
289
lib/compiler/aro/include/stdint.h
vendored
Normal file
|
|
@ -0,0 +1,289 @@
|
|||
/* <stdint.h> for the Aro C compiler */
|
||||
|
||||
#pragma once
|
||||
|
||||
|
||||
#if __STDC_HOSTED__ && __has_include_next(<stdint.h>)
|
||||
|
||||
# include_next <stdint.h>
|
||||
|
||||
#else
|
||||
|
||||
#define __stdint_int_c_cat(X, Y) X ## Y
|
||||
#define __stdint_int_c(V, SUFFIX) __stdint_int_c_cat(V, SUFFIX)
|
||||
#define __stdint_uint_c(V, SUFFIX) __stdint_int_c_cat(V##U, SUFFIX)
|
||||
|
||||
#define INTPTR_MIN (-__INTPTR_MAX__-1)
|
||||
#define INTPTR_MAX __INTPTR_MAX__
|
||||
#define UINTPTR_MAX __UINTPTR_MAX__
|
||||
#define PTRDIFF_MIN (-__PTRDIFF_MAX__-1)
|
||||
#define PTRDIFF_MAX __PTRDIFF_MAX__
|
||||
#define SIZE_MAX __SIZE_MAX__
|
||||
#define INTMAX_MIN (-__INTMAX_MAX__-1)
|
||||
#define INTMAX_MAX __INTMAX_MAX__
|
||||
#define UINTMAX_MAX __UINTMAX_MAX__
|
||||
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
|
||||
# define INTPTR_WIDTH __INTPTR_WIDTH__
|
||||
# define UINTPTR_WIDTH __UINTPTR_WIDTH__
|
||||
# define INTMAX_WIDTH __INTMAX_WIDTH__
|
||||
# define UINTMAX_WIDTH __UINTMAX_WIDTH__
|
||||
# define PTRDIFF_WIDTH __PTRDIFF_WIDTH__
|
||||
# define SIZE_WIDTH __SIZE_WIDTH__
|
||||
# define WCHAR_WIDTH __WCHAR_WIDTH__
|
||||
#endif
|
||||
|
||||
typedef __INTMAX_TYPE__ intmax_t;
|
||||
typedef __UINTMAX_TYPE__ uintmax_t;
|
||||
|
||||
#ifndef _INTPTR_T
|
||||
# ifndef __intptr_t_defined
|
||||
typedef __INTPTR_TYPE__ intptr_t;
|
||||
# define __intptr_t_defined
|
||||
# define _INTPTR_T
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef _UINTPTR_T
|
||||
typedef __UINTPTR_TYPE__ uintptr_t;
|
||||
# define _UINTPTR_T
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __INT64_TYPE__
|
||||
# ifndef __int8_t_defined /* glibc sys/types.h also defines int64_t*/
|
||||
typedef __INT64_TYPE__ int64_t;
|
||||
# endif /* __int8_t_defined */
|
||||
typedef __UINT64_TYPE__ uint64_t;
|
||||
|
||||
# undef __int64_c_suffix
|
||||
# undef __int32_c_suffix
|
||||
# undef __int16_c_suffix
|
||||
# undef __int8_c_suffix
|
||||
# ifdef __INT64_C_SUFFIX__
|
||||
# define __int64_c_suffix __INT64_C_SUFFIX__
|
||||
# define __int32_c_suffix __INT64_C_SUFFIX__
|
||||
# define __int16_c_suffix __INT64_C_SUFFIX__
|
||||
# define __int8_c_suffix __INT64_C_SUFFIX__
|
||||
# endif /* __INT64_C_SUFFIX__ */
|
||||
|
||||
# ifdef __int64_c_suffix
|
||||
# define INT64_C(v) (__stdint_int_c(v, __int64_c_suffix))
|
||||
# define UINT64_C(v) (__stdint_uint_c(v, __int64_c_suffix))
|
||||
# else
|
||||
# define INT64_C(v) (v)
|
||||
# define UINT64_C(v) (v ## U)
|
||||
# endif /* __int64_c_suffix */
|
||||
|
||||
# define INT64_MAX INT64_C( 9223372036854775807)
|
||||
# define INT64_MIN (-INT64_C( 9223372036854775807)-1)
|
||||
# define UINT64_MAX UINT64_C(18446744073709551615)
|
||||
# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
|
||||
# define UINT64_WIDTH 64
|
||||
# define INT64_WIDTH UINT64_WIDTH
|
||||
# endif /* __STDC_VERSION__ */
|
||||
|
||||
#endif /* __INT64_TYPE__ */
|
||||
|
||||
#ifdef __INT32_TYPE__
|
||||
# ifndef __int8_t_defined /* glibc sys/types.h also defines int32_t*/
|
||||
typedef __INT32_TYPE__ int32_t;
|
||||
# endif /* __int8_t_defined */
|
||||
typedef __UINT32_TYPE__ uint32_t;
|
||||
|
||||
# undef __int32_c_suffix
|
||||
# undef __int16_c_suffix
|
||||
# undef __int8_c_suffix
|
||||
# ifdef __INT32_C_SUFFIX__
|
||||
# define __int32_c_suffix __INT32_C_SUFFIX__
|
||||
# define __int16_c_suffix __INT32_C_SUFFIX__
|
||||
# define __int8_c_suffix __INT32_C_SUFFIX__
|
||||
# endif /* __INT32_C_SUFFIX__ */
|
||||
|
||||
# ifdef __int32_c_suffix
|
||||
# define INT32_C(v) (__stdint_int_c(v, __int32_c_suffix))
|
||||
# define UINT32_C(v) (__stdint_uint_c(v, __int32_c_suffix))
|
||||
# else
|
||||
# define INT32_C(v) (v)
|
||||
# define UINT32_C(v) (v ## U)
|
||||
# endif /* __int32_c_suffix */
|
||||
|
||||
# define INT32_MAX INT32_C( 2147483647)
|
||||
# define INT32_MIN (-INT32_C( 2147483647)-1)
|
||||
# define UINT32_MAX UINT32_C(4294967295)
|
||||
# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
|
||||
# define UINT32_WIDTH 32
|
||||
# define INT32_WIDTH UINT32_WIDTH
|
||||
# endif /* __STDC_VERSION__ */
|
||||
|
||||
#endif /* __INT32_TYPE__ */
|
||||
|
||||
#ifdef __INT16_TYPE__
|
||||
# ifndef __int8_t_defined /* glibc sys/types.h also defines int16_t*/
|
||||
typedef __INT16_TYPE__ int16_t;
|
||||
# endif /* __int8_t_defined */
|
||||
typedef __UINT16_TYPE__ uint16_t;
|
||||
|
||||
# undef __int16_c_suffix
|
||||
# undef __int8_c_suffix
|
||||
# ifdef __INT16_C_SUFFIX__
|
||||
# define __int16_c_suffix __INT16_C_SUFFIX__
|
||||
# define __int8_c_suffix __INT16_C_SUFFIX__
|
||||
# endif /* __INT16_C_SUFFIX__ */
|
||||
|
||||
# ifdef __int16_c_suffix
|
||||
# define INT16_C(v) (__stdint_int_c(v, __int16_c_suffix))
|
||||
# define UINT16_C(v) (__stdint_uint_c(v, __int16_c_suffix))
|
||||
# else
|
||||
# define INT16_C(v) (v)
|
||||
# define UINT16_C(v) (v ## U)
|
||||
# endif /* __int16_c_suffix */
|
||||
|
||||
# define INT16_MAX INT16_C( 32767)
|
||||
# define INT16_MIN (-INT16_C( 32767)-1)
|
||||
# define UINT16_MAX UINT16_C(65535)
|
||||
# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
|
||||
# define UINT16_WIDTH 16
|
||||
# define INT16_WIDTH UINT16_WIDTH
|
||||
# endif /* __STDC_VERSION__ */
|
||||
|
||||
#endif /* __INT16_TYPE__ */
|
||||
|
||||
#ifdef __INT8_TYPE__
|
||||
# ifndef __int8_t_defined /* glibc sys/types.h also defines int8_t*/
|
||||
typedef __INT8_TYPE__ int8_t;
|
||||
# endif /* __int8_t_defined */
|
||||
typedef __UINT8_TYPE__ uint8_t;
|
||||
|
||||
# undef __int8_c_suffix
|
||||
# ifdef __INT8_C_SUFFIX__
|
||||
# define __int8_c_suffix __INT8_C_SUFFIX__
|
||||
# endif /* __INT8_C_SUFFIX__ */
|
||||
|
||||
# ifdef __int8_c_suffix
|
||||
# define INT8_C(v) (__stdint_int_c(v, __int8_c_suffix))
|
||||
# define UINT8_C(v) (__stdint_uint_c(v, __int8_c_suffix))
|
||||
# else
|
||||
# define INT8_C(v) (v)
|
||||
# define UINT8_C(v) (v ## U)
|
||||
# endif /* __int8_c_suffix */
|
||||
|
||||
# define INT8_MAX INT8_C(127)
|
||||
# define INT8_MIN (-INT8_C(127)-1)
|
||||
# define UINT8_MAX UINT8_C(255)
|
||||
# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
|
||||
# define UINT8_WIDTH 8
|
||||
# define INT8_WIDTH UINT8_WIDTH
|
||||
# endif /* __STDC_VERSION__ */
|
||||
|
||||
#endif /* __INT8_TYPE__ */
|
||||
|
||||
typedef __INT_LEAST64_TYPE__ int_least64_t;
|
||||
typedef __INT_LEAST32_TYPE__ int_least32_t;
|
||||
typedef __INT_LEAST16_TYPE__ int_least16_t;
|
||||
typedef __INT_LEAST8_TYPE__ int_least8_t;
|
||||
|
||||
typedef __UINT_LEAST64_TYPE__ uint_least64_t;
|
||||
typedef __UINT_LEAST32_TYPE__ uint_least32_t;
|
||||
typedef __UINT_LEAST16_TYPE__ uint_least16_t;
|
||||
typedef __UINT_LEAST8_TYPE__ uint_least8_t;
|
||||
|
||||
#define INT_LEAST8_MAX __INT_LEAST8_MAX__
|
||||
#define INT_LEAST8_MIN (-__INT_LEAST8_MAX__-1)
|
||||
#define UINT_LEAST8_MAX __UINT_LEAST8_MAX__
|
||||
|
||||
#define INT_LEAST16_MAX __INT_LEAST16_MAX__
|
||||
#define INT_LEAST16_MIN (-__INT_LEAST16_MAX__-1)
|
||||
#define UINT_LEAST16_MAX __UINT_LEAST16_MAX__
|
||||
|
||||
#define INT_LEAST32_MAX __INT_LEAST32_MAX__
|
||||
#define INT_LEAST32_MIN (-__INT_LEAST32_MAX__-1)
|
||||
#define UINT_LEAST32_MAX __UINT_LEAST32_MAX__
|
||||
|
||||
#define INT_LEAST64_MAX __INT_LEAST64_MAX__
|
||||
#define INT_LEAST64_MIN (-__INT_LEAST64_MAX__-1)
|
||||
#define UINT_LEAST64_MAX __UINT_LEAST64_MAX__
|
||||
|
||||
|
||||
typedef __INT_FAST64_TYPE__ int_fast64_t;
|
||||
typedef __INT_FAST32_TYPE__ int_fast32_t;
|
||||
typedef __INT_FAST16_TYPE__ int_fast16_t;
|
||||
typedef __INT_FAST8_TYPE__ int_fast8_t;
|
||||
|
||||
typedef __UINT_FAST64_TYPE__ uint_fast64_t;
|
||||
typedef __UINT_FAST32_TYPE__ uint_fast32_t;
|
||||
typedef __UINT_FAST16_TYPE__ uint_fast16_t;
|
||||
typedef __UINT_FAST8_TYPE__ uint_fast8_t;
|
||||
|
||||
#define INT_FAST8_MAX __INT_FAST8_MAX__
|
||||
#define INT_FAST8_MIN (-__INT_FAST8_MAX__-1)
|
||||
#define UINT_FAST8_MAX __UINT_FAST8_MAX__
|
||||
|
||||
#define INT_FAST16_MAX __INT_FAST16_MAX__
|
||||
#define INT_FAST16_MIN (-__INT_FAST16_MAX__-1)
|
||||
#define UINT_FAST16_MAX __UINT_FAST16_MAX__
|
||||
|
||||
#define INT_FAST32_MAX __INT_FAST32_MAX__
|
||||
#define INT_FAST32_MIN (-__INT_FAST32_MAX__-1)
|
||||
#define UINT_FAST32_MAX __UINT_FAST32_MAX__
|
||||
|
||||
#define INT_FAST64_MAX __INT_FAST64_MAX__
|
||||
#define INT_FAST64_MIN (-__INT_FAST64_MAX__-1)
|
||||
#define UINT_FAST64_MAX __UINT_FAST64_MAX__
|
||||
|
||||
|
||||
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
|
||||
|
||||
#define INT_FAST8_WIDTH __INT_FAST8_WIDTH__
|
||||
#define UINT_FAST8_WIDTH __INT_FAST8_WIDTH__
|
||||
#define INT_LEAST8_WIDTH __INT_LEAST8_WIDTH__
|
||||
#define UINT_LEAST8_WIDTH __INT_LEAST8_WIDTH__
|
||||
|
||||
#define INT_FAST16_WIDTH __INT_FAST16_WIDTH__
|
||||
#define UINT_FAST16_WIDTH __INT_FAST16_WIDTH__
|
||||
#define INT_LEAST16_WIDTH __INT_LEAST16_WIDTH__
|
||||
#define UINT_LEAST16_WIDTH __INT_LEAST16_WIDTH__
|
||||
|
||||
#define INT_FAST32_WIDTH __INT_FAST32_WIDTH__
|
||||
#define UINT_FAST32_WIDTH __INT_FAST32_WIDTH__
|
||||
#define INT_LEAST32_WIDTH __INT_LEAST32_WIDTH__
|
||||
#define UINT_LEAST32_WIDTH __INT_LEAST32_WIDTH__
|
||||
|
||||
#define INT_FAST64_WIDTH __INT_FAST64_WIDTH__
|
||||
#define UINT_FAST64_WIDTH __INT_FAST64_WIDTH__
|
||||
#define INT_LEAST64_WIDTH __INT_LEAST64_WIDTH__
|
||||
#define UINT_LEAST64_WIDTH __INT_LEAST64_WIDTH__
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __SIZEOF_INT128__
|
||||
typedef signed __int128 int128_t;
|
||||
typedef unsigned __int128 uint128_t;
|
||||
typedef signed __int128 int_fast128_t;
|
||||
typedef unsigned __int128 uint_fast128_t;
|
||||
typedef signed __int128 int_least128_t;
|
||||
typedef unsigned __int128 uint_least128_t;
|
||||
# define UINT128_MAX ((uint128_t)-1)
|
||||
# define INT128_MAX ((int128_t)+(UINT128_MAX/2))
|
||||
# define INT128_MIN (-INT128_MAX-1)
|
||||
# define UINT_LEAST128_MAX UINT128_MAX
|
||||
# define INT_LEAST128_MAX INT128_MAX
|
||||
# define INT_LEAST128_MIN INT128_MIN
|
||||
# define UINT_FAST128_MAX UINT128_MAX
|
||||
# define INT_FAST128_MAX INT128_MAX
|
||||
# define INT_FAST128_MIN INT128_MIN
|
||||
# define INT128_WIDTH 128
|
||||
# define UINT128_WIDTH 128
|
||||
# define INT_LEAST128_WIDTH 128
|
||||
# define UINT_LEAST128_WIDTH 128
|
||||
# define INT_FAST128_WIDTH 128
|
||||
# define UINT_FAST128_WIDTH 128
|
||||
# if UINT128_WIDTH > __LLONG_WIDTH__
|
||||
# define INT128_C(N) ((int_least128_t)+N ## WB)
|
||||
# define UINT128_C(N) ((uint_least128_t)+N ## WBU)
|
||||
# else
|
||||
# define INT128_C(N) ((int_least128_t)+N ## LL)
|
||||
# define UINT128_C(N) ((uint_least128_t)+N ## LLU)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif /* __STDC_HOSTED__ && __has_include_next(<stdint.h>) */
|
||||
6
lib/compiler/aro/include/stdnoreturn.h
vendored
Normal file
6
lib/compiler/aro/include/stdnoreturn.h
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
/* <stdnoreturn.h> for the Aro C compiler */
|
||||
|
||||
#pragma once
|
||||
|
||||
#define noreturn _Noreturn
|
||||
#define __noreturn_is_defined 1
|
||||
3
lib/compiler/aro/include/varargs.h
vendored
Normal file
3
lib/compiler/aro/include/varargs.h
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
/* <varargs.h> for the Aro C compiler */
|
||||
#pragma once
|
||||
#error please use <stdarg.h> instead of <varargs.h>
|
||||
80
lib/compiler/aro/main.zig
vendored
Normal file
80
lib/compiler/aro/main.zig
vendored
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
const std = @import("std");
|
||||
const Allocator = mem.Allocator;
|
||||
const mem = std.mem;
|
||||
const process = std.process;
|
||||
const aro = @import("aro");
|
||||
const Compilation = aro.Compilation;
|
||||
const Diagnostics = aro.Diagnostics;
|
||||
const Driver = aro.Driver;
|
||||
const Toolchain = aro.Toolchain;
|
||||
const assembly_backend = @import("assembly_backend");
|
||||
|
||||
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
|
||||
pub fn main() u8 {
|
||||
const gpa = if (@import("builtin").link_libc)
|
||||
std.heap.raw_c_allocator
|
||||
else
|
||||
general_purpose_allocator.allocator();
|
||||
defer if (!@import("builtin").link_libc) {
|
||||
_ = general_purpose_allocator.deinit();
|
||||
};
|
||||
|
||||
var arena_instance = std.heap.ArenaAllocator.init(gpa);
|
||||
defer arena_instance.deinit();
|
||||
const arena = arena_instance.allocator();
|
||||
|
||||
const fast_exit = @import("builtin").mode != .Debug;
|
||||
|
||||
const args = process.argsAlloc(arena) catch {
|
||||
std.debug.print("out of memory\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
};
|
||||
|
||||
const aro_name = std.fs.selfExePathAlloc(gpa) catch {
|
||||
std.debug.print("unable to find Aro executable path\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
};
|
||||
defer gpa.free(aro_name);
|
||||
|
||||
var stderr_buf: [1024]u8 = undefined;
|
||||
var stderr = std.fs.File.stderr().writer(&stderr_buf);
|
||||
var diagnostics: Diagnostics = .{
|
||||
.output = .{ .to_writer = .{
|
||||
.color = .detect(stderr.file),
|
||||
.writer = &stderr.interface,
|
||||
} },
|
||||
};
|
||||
|
||||
var comp = Compilation.initDefault(gpa, arena, &diagnostics, std.fs.cwd()) catch |er| switch (er) {
|
||||
error.OutOfMemory => {
|
||||
std.debug.print("out of memory\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
},
|
||||
};
|
||||
defer comp.deinit();
|
||||
|
||||
var driver: Driver = .{ .comp = &comp, .aro_name = aro_name, .diagnostics = &diagnostics };
|
||||
defer driver.deinit();
|
||||
|
||||
var toolchain: Toolchain = .{ .driver = &driver, .filesystem = .{ .real = comp.cwd } };
|
||||
defer toolchain.deinit();
|
||||
|
||||
driver.main(&toolchain, args, fast_exit, assembly_backend.genAsm) catch |er| switch (er) {
|
||||
error.OutOfMemory => {
|
||||
std.debug.print("out of memory\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
},
|
||||
error.FatalError => {
|
||||
driver.printDiagnosticsStats();
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
},
|
||||
};
|
||||
if (fast_exit) process.exit(@intFromBool(comp.diagnostics.errors != 0));
|
||||
return @intFromBool(diagnostics.errors != 0);
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -112,7 +112,7 @@ pub fn main() !void {
|
|||
var steps_menu = false;
|
||||
var output_tmp_nonce: ?[16]u8 = null;
|
||||
var watch = false;
|
||||
var fuzz = false;
|
||||
var fuzz: ?std.Build.Fuzz.Mode = null;
|
||||
var debounce_interval_ms: u16 = 50;
|
||||
var webui_listen: ?std.net.Address = null;
|
||||
|
||||
|
|
@ -274,10 +274,44 @@ pub fn main() !void {
|
|||
webui_listen = std.net.Address.parseIp("::1", 0) catch unreachable;
|
||||
}
|
||||
} else if (mem.eql(u8, arg, "--fuzz")) {
|
||||
fuzz = true;
|
||||
fuzz = .{ .forever = undefined };
|
||||
if (webui_listen == null) {
|
||||
webui_listen = std.net.Address.parseIp("::1", 0) catch unreachable;
|
||||
}
|
||||
} else if (mem.startsWith(u8, arg, "--fuzz=")) {
|
||||
const value = arg["--fuzz=".len..];
|
||||
if (value.len == 0) fatal("missing argument to --fuzz", .{});
|
||||
|
||||
const unit: u8 = value[value.len - 1];
|
||||
const digits = switch (unit) {
|
||||
'0'...'9' => value,
|
||||
'K', 'M', 'G' => value[0 .. value.len - 1],
|
||||
else => fatal(
|
||||
"invalid argument to --fuzz, expected a positive number optionally suffixed by one of: [KMG]",
|
||||
.{},
|
||||
),
|
||||
};
|
||||
|
||||
const amount = std.fmt.parseInt(u64, digits, 10) catch {
|
||||
fatal(
|
||||
"invalid argument to --fuzz, expected a positive number optionally suffixed by one of: [KMG]",
|
||||
.{},
|
||||
);
|
||||
};
|
||||
|
||||
const normalized_amount = std.math.mul(u64, amount, switch (unit) {
|
||||
else => unreachable,
|
||||
'0'...'9' => 1,
|
||||
'K' => 1000,
|
||||
'M' => 1_000_000,
|
||||
'G' => 1_000_000_000,
|
||||
}) catch fatal("fuzzing limit amount overflows u64", .{});
|
||||
|
||||
fuzz = .{
|
||||
.limit = .{
|
||||
.amount = normalized_amount,
|
||||
},
|
||||
};
|
||||
} else if (mem.eql(u8, arg, "-fincremental")) {
|
||||
graph.incremental = true;
|
||||
} else if (mem.eql(u8, arg, "-fno-incremental")) {
|
||||
|
|
@ -476,6 +510,7 @@ pub fn main() !void {
|
|||
targets.items,
|
||||
main_progress_node,
|
||||
&run,
|
||||
fuzz,
|
||||
) catch |err| switch (err) {
|
||||
error.UncleanExit => {
|
||||
assert(!run.watch and run.web_server == null);
|
||||
|
|
@ -485,7 +520,12 @@ pub fn main() !void {
|
|||
};
|
||||
|
||||
if (run.web_server) |*web_server| {
|
||||
web_server.finishBuild(.{ .fuzz = fuzz });
|
||||
if (fuzz) |mode| if (mode != .forever) fatal(
|
||||
"error: limited fuzzing is not implemented yet for --webui",
|
||||
.{},
|
||||
);
|
||||
|
||||
web_server.finishBuild(.{ .fuzz = fuzz != null });
|
||||
}
|
||||
|
||||
if (!watch and run.web_server == null) {
|
||||
|
|
@ -651,6 +691,7 @@ fn runStepNames(
|
|||
step_names: []const []const u8,
|
||||
parent_prog_node: std.Progress.Node,
|
||||
run: *Run,
|
||||
fuzz: ?std.Build.Fuzz.Mode,
|
||||
) !void {
|
||||
const gpa = run.gpa;
|
||||
const step_stack = &run.step_stack;
|
||||
|
|
@ -676,6 +717,7 @@ fn runStepNames(
|
|||
});
|
||||
}
|
||||
}
|
||||
|
||||
assert(run.memory_blocked_steps.items.len == 0);
|
||||
|
||||
var test_skip_count: usize = 0;
|
||||
|
|
@ -724,6 +766,45 @@ fn runStepNames(
|
|||
}
|
||||
}
|
||||
|
||||
const ttyconf = run.ttyconf;
|
||||
|
||||
if (fuzz) |mode| blk: {
|
||||
switch (builtin.os.tag) {
|
||||
// Current implementation depends on two things that need to be ported to Windows:
|
||||
// * Memory-mapping to share data between the fuzzer and build runner.
|
||||
// * COFF/PE support added to `std.debug.Info` (it needs a batching API for resolving
|
||||
// many addresses to source locations).
|
||||
.windows => fatal("--fuzz not yet implemented for {s}", .{@tagName(builtin.os.tag)}),
|
||||
else => {},
|
||||
}
|
||||
if (@bitSizeOf(usize) != 64) {
|
||||
// Current implementation depends on posix.mmap()'s second parameter, `length: usize`,
|
||||
// being compatible with `std.fs.getEndPos() u64`'s return value. This is not the case
|
||||
// on 32-bit platforms.
|
||||
// Affects or affected by issues #5185, #22523, and #22464.
|
||||
fatal("--fuzz not yet implemented on {d}-bit platforms", .{@bitSizeOf(usize)});
|
||||
}
|
||||
|
||||
switch (mode) {
|
||||
.forever => break :blk,
|
||||
.limit => {},
|
||||
}
|
||||
|
||||
assert(mode == .limit);
|
||||
var f = std.Build.Fuzz.init(
|
||||
gpa,
|
||||
thread_pool,
|
||||
step_stack.keys(),
|
||||
parent_prog_node,
|
||||
ttyconf,
|
||||
mode,
|
||||
) catch |err| fatal("failed to start fuzzer: {s}", .{@errorName(err)});
|
||||
defer f.deinit();
|
||||
|
||||
f.start();
|
||||
f.waitAndPrintReport();
|
||||
}
|
||||
|
||||
// A proper command line application defaults to silently succeeding.
|
||||
// The user may request verbose mode if they have a different preference.
|
||||
const failures_only = switch (run.summary) {
|
||||
|
|
@ -737,8 +818,6 @@ fn runStepNames(
|
|||
std.Progress.setStatus(.failure);
|
||||
}
|
||||
|
||||
const ttyconf = run.ttyconf;
|
||||
|
||||
if (run.summary != .none) {
|
||||
const w = std.debug.lockStderrWriter(&stdio_buffer_allocation);
|
||||
defer std.debug.unlockStderrWriter();
|
||||
|
|
@ -1366,7 +1445,10 @@ fn printUsage(b: *std.Build, w: *Writer) !void {
|
|||
\\ --watch Continuously rebuild when source files are modified
|
||||
\\ --debounce <ms> Delay before rebuilding after changed file detected
|
||||
\\ --webui[=ip] Enable the web interface on the given IP address
|
||||
\\ --fuzz Continuously search for unit test failures (implies '--webui')
|
||||
\\ --fuzz[=limit] Continuously search for unit test failures with an optional
|
||||
\\ limit to the max number of iterations. The argument supports
|
||||
\\ an optional 'K', 'M', or 'G' suffix (e.g. '10K'). Implies
|
||||
\\ '--webui' when no limit is specified.
|
||||
\\ --time-report Force full rebuild and provide detailed information on
|
||||
\\ compilation time of Zig source code (implies '--webui')
|
||||
\\ -fincremental Enable incremental compilation
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ pub fn main() !void {
|
|||
const arg = args[i];
|
||||
if (mem.startsWith(u8, arg, "-")) {
|
||||
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
|
||||
const stdout = std.fs.File.stdout().deprecatedWriter();
|
||||
const stdout = std.fs.File.stdout();
|
||||
try stdout.writeAll(usage);
|
||||
return std.process.cleanExit();
|
||||
} else if (mem.eql(u8, arg, "--")) {
|
||||
|
|
|
|||
|
|
@ -120,7 +120,20 @@ pub fn main() !void {
|
|||
defer aro_arena_state.deinit();
|
||||
const aro_arena = aro_arena_state.allocator();
|
||||
|
||||
var comp = aro.Compilation.init(aro_arena, std.fs.cwd());
|
||||
var stderr_buf: [512]u8 = undefined;
|
||||
var stderr_writer = stderr.writer(&stderr_buf);
|
||||
var diagnostics: aro.Diagnostics = switch (zig_integration) {
|
||||
false => .{ .output = .{ .to_writer = .{
|
||||
.writer = &stderr_writer.interface,
|
||||
.color = stderr_config,
|
||||
} } },
|
||||
true => .{ .output = .{ .to_list = .{
|
||||
.arena = .init(allocator),
|
||||
} } },
|
||||
};
|
||||
defer diagnostics.deinit();
|
||||
|
||||
var comp = aro.Compilation.init(aro_arena, aro_arena, &diagnostics, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
|
||||
var argv: std.ArrayList([]const u8) = .empty;
|
||||
|
|
@ -145,18 +158,22 @@ pub fn main() !void {
|
|||
|
||||
preprocess.preprocess(&comp, &preprocessed_buf.writer, argv.items, maybe_dependencies) catch |err| switch (err) {
|
||||
error.GeneratedSourceError => {
|
||||
try error_handler.emitAroDiagnostics(allocator, "failed during preprocessor setup (this is always a bug):", &comp);
|
||||
try error_handler.emitAroDiagnostics(allocator, "failed during preprocessor setup (this is always a bug)", &comp);
|
||||
std.process.exit(1);
|
||||
},
|
||||
// ArgError can occur if e.g. the .rc file is not found
|
||||
error.ArgError, error.PreprocessError => {
|
||||
try error_handler.emitAroDiagnostics(allocator, "failed during preprocessing:", &comp);
|
||||
try error_handler.emitAroDiagnostics(allocator, "failed during preprocessing", &comp);
|
||||
std.process.exit(1);
|
||||
},
|
||||
error.StreamTooLong => {
|
||||
error.FileTooBig => {
|
||||
try error_handler.emitMessage(allocator, .err, "failed during preprocessing: maximum file size exceeded", .{});
|
||||
std.process.exit(1);
|
||||
},
|
||||
error.WriteFailed => {
|
||||
try error_handler.emitMessage(allocator, .err, "failed during preprocessing: error writing the preprocessed output", .{});
|
||||
std.process.exit(1);
|
||||
},
|
||||
error.OutOfMemory => |e| return e,
|
||||
};
|
||||
|
||||
|
|
@ -660,11 +677,10 @@ const ErrorHandler = union(enum) {
|
|||
try server.serveErrorBundle(error_bundle);
|
||||
},
|
||||
.tty => {
|
||||
// extra newline to separate this line from the aro errors
|
||||
// aro errors have already been emitted
|
||||
const stderr = std.debug.lockStderrWriter(&.{});
|
||||
defer std.debug.unlockStderrWriter();
|
||||
try renderErrorMessage(stderr, self.tty, .err, "{s}\n", .{fail_msg});
|
||||
aro.Diagnostics.render(comp, self.tty);
|
||||
try renderErrorMessage(stderr, self.tty, .err, "{s}", .{fail_msg});
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -883,12 +899,10 @@ fn aroDiagnosticsToErrorBundle(
|
|||
.msg = try bundle.addString(fail_msg),
|
||||
});
|
||||
|
||||
var msg_writer = MsgWriter.init(gpa);
|
||||
defer msg_writer.deinit();
|
||||
var cur_err: ?ErrorBundle.ErrorMessage = null;
|
||||
var cur_notes: std.ArrayList(ErrorBundle.ErrorMessage) = .empty;
|
||||
defer cur_notes.deinit(gpa);
|
||||
for (comp.diagnostics.list.items) |msg| {
|
||||
for (comp.diagnostics.output.to_list.messages.items) |msg| {
|
||||
switch (msg.kind) {
|
||||
// Clear the current error so that notes don't bleed into unassociated errors
|
||||
.off, .warning => {
|
||||
|
|
@ -897,28 +911,19 @@ fn aroDiagnosticsToErrorBundle(
|
|||
},
|
||||
.note => if (cur_err == null) continue,
|
||||
.@"fatal error", .@"error" => {},
|
||||
.default => unreachable,
|
||||
}
|
||||
msg_writer.resetRetainingCapacity();
|
||||
aro.Diagnostics.renderMessage(comp, &msg_writer, msg);
|
||||
|
||||
const src_loc = src_loc: {
|
||||
if (msg_writer.path) |src_path| {
|
||||
var src_loc: ErrorBundle.SourceLocation = .{
|
||||
.src_path = try bundle.addString(src_path),
|
||||
.line = msg_writer.line - 1, // 1-based -> 0-based
|
||||
.column = msg_writer.col - 1, // 1-based -> 0-based
|
||||
.span_start = 0,
|
||||
.span_main = 0,
|
||||
.span_end = 0,
|
||||
};
|
||||
if (msg_writer.source_line) |source_line| {
|
||||
src_loc.span_start = msg_writer.span_main;
|
||||
src_loc.span_main = msg_writer.span_main;
|
||||
src_loc.span_end = msg_writer.span_main;
|
||||
src_loc.source_line = try bundle.addString(source_line);
|
||||
}
|
||||
break :src_loc try bundle.addSourceLocation(src_loc);
|
||||
if (msg.location) |location| {
|
||||
break :src_loc try bundle.addSourceLocation(.{
|
||||
.src_path = try bundle.addString(location.path),
|
||||
.line = location.line_no - 1, // 1-based -> 0-based
|
||||
.column = location.col - 1, // 1-based -> 0-based
|
||||
.span_start = location.width,
|
||||
.span_main = location.width,
|
||||
.span_end = location.width,
|
||||
.source_line = try bundle.addString(location.line),
|
||||
});
|
||||
}
|
||||
break :src_loc ErrorBundle.SourceLocationIndex.none;
|
||||
};
|
||||
|
|
@ -929,7 +934,7 @@ fn aroDiagnosticsToErrorBundle(
|
|||
try flushErrorMessageIntoBundle(&bundle, err, cur_notes.items);
|
||||
}
|
||||
cur_err = .{
|
||||
.msg = try bundle.addString(msg_writer.buf.items),
|
||||
.msg = try bundle.addString(msg.text),
|
||||
.src_loc = src_loc,
|
||||
};
|
||||
cur_notes.clearRetainingCapacity();
|
||||
|
|
@ -937,11 +942,11 @@ fn aroDiagnosticsToErrorBundle(
|
|||
.note => {
|
||||
cur_err.?.notes_len += 1;
|
||||
try cur_notes.append(gpa, .{
|
||||
.msg = try bundle.addString(msg_writer.buf.items),
|
||||
.msg = try bundle.addString(msg.text),
|
||||
.src_loc = src_loc,
|
||||
});
|
||||
},
|
||||
.off, .warning, .default => unreachable,
|
||||
.off, .warning => unreachable,
|
||||
}
|
||||
}
|
||||
if (cur_err) |err| {
|
||||
|
|
@ -950,63 +955,3 @@ fn aroDiagnosticsToErrorBundle(
|
|||
|
||||
return try bundle.toOwnedBundle("");
|
||||
}
|
||||
|
||||
// Similar to aro.Diagnostics.MsgWriter but:
|
||||
// - Writers to an ArrayList
|
||||
// - Only prints the message itself (no location, source line, error: prefix, etc)
|
||||
// - Keeps track of source path/line/col instead
|
||||
const MsgWriter = struct {
|
||||
buf: std.array_list.Managed(u8),
|
||||
path: ?[]const u8 = null,
|
||||
// 1-indexed
|
||||
line: u32 = undefined,
|
||||
col: u32 = undefined,
|
||||
source_line: ?[]const u8 = null,
|
||||
span_main: u32 = undefined,
|
||||
|
||||
fn init(allocator: std.mem.Allocator) MsgWriter {
|
||||
return .{
|
||||
.buf = std.array_list.Managed(u8).init(allocator),
|
||||
};
|
||||
}
|
||||
|
||||
fn deinit(m: *MsgWriter) void {
|
||||
m.buf.deinit();
|
||||
}
|
||||
|
||||
fn resetRetainingCapacity(m: *MsgWriter) void {
|
||||
m.buf.clearRetainingCapacity();
|
||||
m.path = null;
|
||||
m.source_line = null;
|
||||
}
|
||||
|
||||
pub fn print(m: *MsgWriter, comptime fmt: []const u8, args: anytype) void {
|
||||
m.buf.print(fmt, args) catch {};
|
||||
}
|
||||
|
||||
pub fn write(m: *MsgWriter, msg: []const u8) void {
|
||||
m.buf.appendSlice(msg) catch {};
|
||||
}
|
||||
|
||||
pub fn setColor(m: *MsgWriter, color: std.Io.tty.Color) void {
|
||||
_ = m;
|
||||
_ = color;
|
||||
}
|
||||
|
||||
pub fn location(m: *MsgWriter, path: []const u8, line: u32, col: u32) void {
|
||||
m.path = path;
|
||||
m.line = line;
|
||||
m.col = col;
|
||||
}
|
||||
|
||||
pub fn start(m: *MsgWriter, kind: aro.Diagnostics.Kind) void {
|
||||
_ = m;
|
||||
_ = kind;
|
||||
}
|
||||
|
||||
pub fn end(m: *MsgWriter, maybe_line: ?[]const u8, col: u32, end_with_splice: bool) void {
|
||||
_ = end_with_splice;
|
||||
m.source_line = maybe_line;
|
||||
m.span_main = col;
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ const cli = @import("cli.zig");
|
|||
const Dependencies = @import("compile.zig").Dependencies;
|
||||
const aro = @import("aro");
|
||||
|
||||
const PreprocessError = error{ ArgError, GeneratedSourceError, PreprocessError, StreamTooLong, OutOfMemory };
|
||||
const PreprocessError = error{ ArgError, GeneratedSourceError, PreprocessError, FileTooBig, OutOfMemory, WriteFailed };
|
||||
|
||||
pub fn preprocess(
|
||||
comp: *aro.Compilation,
|
||||
|
|
@ -16,18 +16,18 @@ pub fn preprocess(
|
|||
) PreprocessError!void {
|
||||
try comp.addDefaultPragmaHandlers();
|
||||
|
||||
var driver: aro.Driver = .{ .comp = comp, .aro_name = "arocc" };
|
||||
var driver: aro.Driver = .{ .comp = comp, .diagnostics = comp.diagnostics, .aro_name = "arocc" };
|
||||
defer driver.deinit();
|
||||
|
||||
var macro_buf: std.Io.Writer.Allocating = .init(comp.gpa);
|
||||
defer macro_buf.deinit();
|
||||
var macro_buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer macro_buf.deinit(comp.gpa);
|
||||
|
||||
var trash: [64]u8 = undefined;
|
||||
var discarding: std.Io.Writer.Discarding = .init(&trash);
|
||||
_ = driver.parseArgs(&discarding.writer, ¯o_buf.writer, argv) catch |err| switch (err) {
|
||||
var discard_buffer: [64]u8 = undefined;
|
||||
var discarding: std.Io.Writer.Discarding = .init(&discard_buffer);
|
||||
_ = driver.parseArgs(&discarding.writer, ¯o_buf, argv) catch |err| switch (err) {
|
||||
error.FatalError => return error.ArgError,
|
||||
error.OutOfMemory => |e| return e,
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
error.WriteFailed => unreachable,
|
||||
};
|
||||
|
||||
if (hasAnyErrors(comp)) return error.ArgError;
|
||||
|
|
@ -37,7 +37,7 @@ pub fn preprocess(
|
|||
error.FatalError => return error.GeneratedSourceError,
|
||||
else => |e| return e,
|
||||
};
|
||||
const user_macros = comp.addSourceFromBuffer("<command line>", macro_buf.written()) catch |err| switch (err) {
|
||||
const user_macros = comp.addSourceFromBuffer("<command line>", macro_buf.items) catch |err| switch (err) {
|
||||
error.FatalError => return error.GeneratedSourceError,
|
||||
else => |e| return e,
|
||||
};
|
||||
|
|
@ -46,7 +46,10 @@ pub fn preprocess(
|
|||
if (hasAnyErrors(comp)) return error.GeneratedSourceError;
|
||||
|
||||
comp.generated_buf.items.len = 0;
|
||||
var pp = try aro.Preprocessor.initDefault(comp);
|
||||
var pp = aro.Preprocessor.initDefault(comp) catch |err| switch (err) {
|
||||
error.FatalError => return error.GeneratedSourceError,
|
||||
error.OutOfMemory => |e| return e,
|
||||
};
|
||||
defer pp.deinit();
|
||||
|
||||
if (comp.langopts.ms_extensions) {
|
||||
|
|
@ -79,16 +82,7 @@ pub fn preprocess(
|
|||
}
|
||||
|
||||
fn hasAnyErrors(comp: *aro.Compilation) bool {
|
||||
// In theory we could just check Diagnostics.errors != 0, but that only
|
||||
// gets set during rendering of the error messages, see:
|
||||
// https://github.com/Vexu/arocc/issues/603
|
||||
for (comp.diagnostics.list.items) |msg| {
|
||||
switch (msg.kind) {
|
||||
.@"fatal error", .@"error" => return true,
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
return false;
|
||||
return comp.diagnostics.errors != 0;
|
||||
}
|
||||
|
||||
/// `arena` is used for temporary -D argument strings and the INCLUDE environment variable.
|
||||
|
|
@ -98,6 +92,7 @@ pub fn appendAroArgs(arena: Allocator, argv: *std.ArrayList([]const u8), options
|
|||
"-E",
|
||||
"--comments",
|
||||
"-fuse-line-directives",
|
||||
"-fgnuc-version=4.2.1",
|
||||
"--target=x86_64-windows-msvc",
|
||||
"--emulate=msvc",
|
||||
"-nostdinc",
|
||||
|
|
|
|||
|
|
@ -2,8 +2,10 @@
|
|||
const builtin = @import("builtin");
|
||||
|
||||
const std = @import("std");
|
||||
const fatal = std.process.fatal;
|
||||
const testing = std.testing;
|
||||
const assert = std.debug.assert;
|
||||
const fuzz_abi = std.Build.abi.fuzz;
|
||||
|
||||
pub const std_options: std.Options = .{
|
||||
.logFn = log,
|
||||
|
|
@ -54,12 +56,13 @@ pub fn main() void {
|
|||
}
|
||||
}
|
||||
|
||||
fba.reset();
|
||||
if (builtin.fuzz) {
|
||||
const cache_dir = opt_cache_dir orelse @panic("missing --cache-dir=[path] argument");
|
||||
fuzzer_init(FuzzerSlice.fromSlice(cache_dir));
|
||||
fuzz_abi.fuzzer_init(.fromSlice(cache_dir));
|
||||
}
|
||||
|
||||
fba.reset();
|
||||
|
||||
if (listen) {
|
||||
return mainServer() catch @panic("internal test runner failure");
|
||||
} else {
|
||||
|
|
@ -78,8 +81,13 @@ fn mainServer() !void {
|
|||
});
|
||||
|
||||
if (builtin.fuzz) {
|
||||
const coverage_id = fuzzer_coverage_id();
|
||||
try server.serveU64Message(.coverage_id, coverage_id);
|
||||
const coverage = fuzz_abi.fuzzer_coverage();
|
||||
try server.serveCoverageIdMessage(
|
||||
coverage.id,
|
||||
coverage.runs,
|
||||
coverage.unique,
|
||||
coverage.seen,
|
||||
);
|
||||
}
|
||||
|
||||
while (true) {
|
||||
|
|
@ -152,26 +160,38 @@ fn mainServer() !void {
|
|||
});
|
||||
},
|
||||
.start_fuzzing => {
|
||||
// This ensures that this code won't be analyzed and hence reference fuzzer symbols
|
||||
// since they are not present.
|
||||
if (!builtin.fuzz) unreachable;
|
||||
|
||||
const index = try server.receiveBody_u32();
|
||||
const mode: fuzz_abi.LimitKind = @enumFromInt(try server.receiveBody_u8());
|
||||
const amount_or_instance = try server.receiveBody_u64();
|
||||
|
||||
const test_fn = builtin.test_functions[index];
|
||||
const entry_addr = @intFromPtr(test_fn.func);
|
||||
|
||||
try server.serveU64Message(.fuzz_start_addr, entry_addr);
|
||||
defer if (testing.allocator_instance.deinit() == .leak) std.process.exit(1);
|
||||
is_fuzz_test = false;
|
||||
fuzzer_set_name(test_fn.name.ptr, test_fn.name.len);
|
||||
fuzz_test_index = index;
|
||||
fuzz_mode = mode;
|
||||
fuzz_amount_or_instance = amount_or_instance;
|
||||
|
||||
test_fn.func() catch |err| switch (err) {
|
||||
error.SkipZigTest => return,
|
||||
else => {
|
||||
if (@errorReturnTrace()) |trace| {
|
||||
std.debug.dumpStackTrace(trace.*);
|
||||
}
|
||||
std.debug.print("failed with error.{s}\n", .{@errorName(err)});
|
||||
std.debug.print("failed with error.{t}\n", .{err});
|
||||
std.process.exit(1);
|
||||
},
|
||||
};
|
||||
if (!is_fuzz_test) @panic("missed call to std.testing.fuzz");
|
||||
if (log_err_count != 0) @panic("error logs detected");
|
||||
assert(mode != .forever);
|
||||
std.process.exit(0);
|
||||
},
|
||||
|
||||
else => {
|
||||
|
|
@ -184,6 +204,8 @@ fn mainServer() !void {
|
|||
|
||||
fn mainTerminal() void {
|
||||
@disableInstrumentation();
|
||||
if (builtin.fuzz) @panic("fuzz test requires server");
|
||||
|
||||
const test_fn_list = builtin.test_functions;
|
||||
var ok_count: usize = 0;
|
||||
var skip_count: usize = 0;
|
||||
|
|
@ -232,11 +254,11 @@ fn mainTerminal() void {
|
|||
else => {
|
||||
fail_count += 1;
|
||||
if (have_tty) {
|
||||
std.debug.print("{d}/{d} {s}...FAIL ({s})\n", .{
|
||||
i + 1, test_fn_list.len, test_fn.name, @errorName(err),
|
||||
std.debug.print("{d}/{d} {s}...FAIL ({t})\n", .{
|
||||
i + 1, test_fn_list.len, test_fn.name, err,
|
||||
});
|
||||
} else {
|
||||
std.debug.print("FAIL ({s})\n", .{@errorName(err)});
|
||||
std.debug.print("FAIL ({t})\n", .{err});
|
||||
}
|
||||
if (@errorReturnTrace()) |trace| {
|
||||
std.debug.dumpStackTrace(trace.*);
|
||||
|
|
@ -333,28 +355,10 @@ pub fn mainSimple() anyerror!void {
|
|||
if (failed != 0) std.process.exit(1);
|
||||
}
|
||||
|
||||
const FuzzerSlice = extern struct {
|
||||
ptr: [*]const u8,
|
||||
len: usize,
|
||||
|
||||
/// Inline to avoid fuzzer instrumentation.
|
||||
inline fn toSlice(s: FuzzerSlice) []const u8 {
|
||||
return s.ptr[0..s.len];
|
||||
}
|
||||
|
||||
/// Inline to avoid fuzzer instrumentation.
|
||||
inline fn fromSlice(s: []const u8) FuzzerSlice {
|
||||
return .{ .ptr = s.ptr, .len = s.len };
|
||||
}
|
||||
};
|
||||
|
||||
var is_fuzz_test: bool = undefined;
|
||||
|
||||
extern fn fuzzer_set_name(name_ptr: [*]const u8, name_len: usize) void;
|
||||
extern fn fuzzer_init(cache_dir: FuzzerSlice) void;
|
||||
extern fn fuzzer_init_corpus_elem(input_ptr: [*]const u8, input_len: usize) void;
|
||||
extern fn fuzzer_start(testOne: *const fn ([*]const u8, usize) callconv(.c) void) void;
|
||||
extern fn fuzzer_coverage_id() u64;
|
||||
var fuzz_test_index: u32 = undefined;
|
||||
var fuzz_mode: fuzz_abi.LimitKind = undefined;
|
||||
var fuzz_amount_or_instance: u64 = undefined;
|
||||
|
||||
pub fn fuzz(
|
||||
context: anytype,
|
||||
|
|
@ -385,17 +389,17 @@ pub fn fuzz(
|
|||
const global = struct {
|
||||
var ctx: @TypeOf(context) = undefined;
|
||||
|
||||
fn fuzzer_one(input_ptr: [*]const u8, input_len: usize) callconv(.c) void {
|
||||
fn test_one(input: fuzz_abi.Slice) callconv(.c) void {
|
||||
@disableInstrumentation();
|
||||
testing.allocator_instance = .{};
|
||||
defer if (testing.allocator_instance.deinit() == .leak) std.process.exit(1);
|
||||
log_err_count = 0;
|
||||
testOne(ctx, input_ptr[0..input_len]) catch |err| switch (err) {
|
||||
testOne(ctx, input.toSlice()) catch |err| switch (err) {
|
||||
error.SkipZigTest => return,
|
||||
else => {
|
||||
std.debug.lockStdErr();
|
||||
if (@errorReturnTrace()) |trace| std.debug.dumpStackTrace(trace.*);
|
||||
std.debug.print("failed with error.{s}\n", .{@errorName(err)});
|
||||
std.debug.print("failed with error.{t}\n", .{err});
|
||||
std.process.exit(1);
|
||||
},
|
||||
};
|
||||
|
|
@ -411,10 +415,13 @@ pub fn fuzz(
|
|||
testing.allocator_instance = .{};
|
||||
defer testing.allocator_instance = prev_allocator_state;
|
||||
|
||||
for (options.corpus) |elem| fuzzer_init_corpus_elem(elem.ptr, elem.len);
|
||||
|
||||
global.ctx = context;
|
||||
fuzzer_start(&global.fuzzer_one);
|
||||
fuzz_abi.fuzzer_init_test(&global.test_one, .fromSlice(builtin.test_functions[fuzz_test_index].name));
|
||||
|
||||
for (options.corpus) |elem|
|
||||
fuzz_abi.fuzzer_new_input(.fromSlice(elem));
|
||||
|
||||
fuzz_abi.fuzzer_main(fuzz_mode, fuzz_amount_or_instance);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
|||
1314
lib/compiler/translate-c/MacroTranslator.zig
Normal file
1314
lib/compiler/translate-c/MacroTranslator.zig
Normal file
File diff suppressed because it is too large
Load diff
288
lib/compiler/translate-c/PatternList.zig
Normal file
288
lib/compiler/translate-c/PatternList.zig
Normal file
|
|
@ -0,0 +1,288 @@
|
|||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const aro = @import("aro");
|
||||
const CToken = aro.Tokenizer.Token;
|
||||
|
||||
const helpers = @import("helpers.zig");
|
||||
const Translator = @import("Translator.zig");
|
||||
const Error = Translator.Error;
|
||||
pub const MacroProcessingError = Error || error{UnexpectedMacroToken};
|
||||
|
||||
const Impl = std.meta.DeclEnum(std.zig.c_translation.helpers);
|
||||
const Template = struct { []const u8, Impl };
|
||||
|
||||
/// Templates must be function-like macros
|
||||
/// first element is macro source, second element is the name of the function
|
||||
/// in __helpers which implements it
|
||||
const templates = [_]Template{
|
||||
.{ "f_SUFFIX(X) (X ## f)", .F_SUFFIX },
|
||||
.{ "F_SUFFIX(X) (X ## F)", .F_SUFFIX },
|
||||
|
||||
.{ "u_SUFFIX(X) (X ## u)", .U_SUFFIX },
|
||||
.{ "U_SUFFIX(X) (X ## U)", .U_SUFFIX },
|
||||
|
||||
.{ "l_SUFFIX(X) (X ## l)", .L_SUFFIX },
|
||||
.{ "L_SUFFIX(X) (X ## L)", .L_SUFFIX },
|
||||
|
||||
.{ "ul_SUFFIX(X) (X ## ul)", .UL_SUFFIX },
|
||||
.{ "uL_SUFFIX(X) (X ## uL)", .UL_SUFFIX },
|
||||
.{ "Ul_SUFFIX(X) (X ## Ul)", .UL_SUFFIX },
|
||||
.{ "UL_SUFFIX(X) (X ## UL)", .UL_SUFFIX },
|
||||
|
||||
.{ "ll_SUFFIX(X) (X ## ll)", .LL_SUFFIX },
|
||||
.{ "LL_SUFFIX(X) (X ## LL)", .LL_SUFFIX },
|
||||
|
||||
.{ "ull_SUFFIX(X) (X ## ull)", .ULL_SUFFIX },
|
||||
.{ "uLL_SUFFIX(X) (X ## uLL)", .ULL_SUFFIX },
|
||||
.{ "Ull_SUFFIX(X) (X ## Ull)", .ULL_SUFFIX },
|
||||
.{ "ULL_SUFFIX(X) (X ## ULL)", .ULL_SUFFIX },
|
||||
|
||||
.{ "f_SUFFIX(X) X ## f", .F_SUFFIX },
|
||||
.{ "F_SUFFIX(X) X ## F", .F_SUFFIX },
|
||||
|
||||
.{ "u_SUFFIX(X) X ## u", .U_SUFFIX },
|
||||
.{ "U_SUFFIX(X) X ## U", .U_SUFFIX },
|
||||
|
||||
.{ "l_SUFFIX(X) X ## l", .L_SUFFIX },
|
||||
.{ "L_SUFFIX(X) X ## L", .L_SUFFIX },
|
||||
|
||||
.{ "ul_SUFFIX(X) X ## ul", .UL_SUFFIX },
|
||||
.{ "uL_SUFFIX(X) X ## uL", .UL_SUFFIX },
|
||||
.{ "Ul_SUFFIX(X) X ## Ul", .UL_SUFFIX },
|
||||
.{ "UL_SUFFIX(X) X ## UL", .UL_SUFFIX },
|
||||
|
||||
.{ "ll_SUFFIX(X) X ## ll", .LL_SUFFIX },
|
||||
.{ "LL_SUFFIX(X) X ## LL", .LL_SUFFIX },
|
||||
|
||||
.{ "ull_SUFFIX(X) X ## ull", .ULL_SUFFIX },
|
||||
.{ "uLL_SUFFIX(X) X ## uLL", .ULL_SUFFIX },
|
||||
.{ "Ull_SUFFIX(X) X ## Ull", .ULL_SUFFIX },
|
||||
.{ "ULL_SUFFIX(X) X ## ULL", .ULL_SUFFIX },
|
||||
|
||||
.{ "CAST_OR_CALL(X, Y) (X)(Y)", .CAST_OR_CALL },
|
||||
.{ "CAST_OR_CALL(X, Y) ((X)(Y))", .CAST_OR_CALL },
|
||||
|
||||
.{
|
||||
\\wl_container_of(ptr, sample, member) \
|
||||
\\(__typeof__(sample))((char *)(ptr) - \
|
||||
\\ offsetof(__typeof__(*sample), member))
|
||||
,
|
||||
.WL_CONTAINER_OF,
|
||||
},
|
||||
|
||||
.{ "IGNORE_ME(X) ((void)(X))", .DISCARD },
|
||||
.{ "IGNORE_ME(X) (void)(X)", .DISCARD },
|
||||
.{ "IGNORE_ME(X) ((const void)(X))", .DISCARD },
|
||||
.{ "IGNORE_ME(X) (const void)(X)", .DISCARD },
|
||||
.{ "IGNORE_ME(X) ((volatile void)(X))", .DISCARD },
|
||||
.{ "IGNORE_ME(X) (volatile void)(X)", .DISCARD },
|
||||
.{ "IGNORE_ME(X) ((const volatile void)(X))", .DISCARD },
|
||||
.{ "IGNORE_ME(X) (const volatile void)(X)", .DISCARD },
|
||||
.{ "IGNORE_ME(X) ((volatile const void)(X))", .DISCARD },
|
||||
.{ "IGNORE_ME(X) (volatile const void)(X)", .DISCARD },
|
||||
};
|
||||
|
||||
const Pattern = struct {
|
||||
slicer: MacroSlicer,
|
||||
impl: Impl,
|
||||
|
||||
fn init(pl: *Pattern, allocator: mem.Allocator, template: Template) Error!void {
|
||||
const source = template[0];
|
||||
const impl = template[1];
|
||||
var tok_list: std.ArrayList(CToken) = .empty;
|
||||
defer tok_list.deinit(allocator);
|
||||
|
||||
pl.* = .{
|
||||
.slicer = try tokenizeMacro(allocator, source, &tok_list),
|
||||
.impl = impl,
|
||||
};
|
||||
}
|
||||
|
||||
fn deinit(pl: *Pattern, allocator: mem.Allocator) void {
|
||||
allocator.free(pl.slicer.tokens);
|
||||
pl.* = undefined;
|
||||
}
|
||||
|
||||
/// This function assumes that `ms` has already been validated to contain a function-like
|
||||
/// macro, and that the parsed template macro in `pl` also contains a function-like
|
||||
/// macro. Please review this logic carefully if changing that assumption. Two
|
||||
/// function-like macros are considered equivalent if and only if they contain the same
|
||||
/// list of tokens, modulo parameter names.
|
||||
fn matches(pat: Pattern, ms: MacroSlicer) bool {
|
||||
if (ms.params != pat.slicer.params) return false;
|
||||
if (ms.tokens.len != pat.slicer.tokens.len) return false;
|
||||
|
||||
for (ms.tokens, pat.slicer.tokens) |macro_tok, pat_tok| {
|
||||
if (macro_tok.id != pat_tok.id) return false;
|
||||
switch (macro_tok.id) {
|
||||
.macro_param, .macro_param_no_expand => {
|
||||
// `.end` is the parameter index.
|
||||
if (macro_tok.end != pat_tok.end) return false;
|
||||
},
|
||||
.identifier, .extended_identifier, .string_literal, .char_literal, .pp_num => {
|
||||
const macro_bytes = ms.slice(macro_tok);
|
||||
const pattern_bytes = pat.slicer.slice(pat_tok);
|
||||
|
||||
if (!mem.eql(u8, pattern_bytes, macro_bytes)) return false;
|
||||
},
|
||||
else => {
|
||||
// other tags correspond to keywords and operators that do not contain a "payload"
|
||||
// that can vary
|
||||
},
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
const PatternList = @This();
|
||||
|
||||
patterns: []Pattern,
|
||||
|
||||
pub const MacroSlicer = struct {
|
||||
source: []const u8,
|
||||
tokens: []const CToken,
|
||||
params: u32,
|
||||
|
||||
fn slice(pl: MacroSlicer, token: CToken) []const u8 {
|
||||
return pl.source[token.start..token.end];
|
||||
}
|
||||
};
|
||||
|
||||
pub fn init(allocator: mem.Allocator) Error!PatternList {
|
||||
const patterns = try allocator.alloc(Pattern, templates.len);
|
||||
for (patterns, templates) |*pattern, template| {
|
||||
try pattern.init(allocator, template);
|
||||
}
|
||||
return .{ .patterns = patterns };
|
||||
}
|
||||
|
||||
pub fn deinit(pl: *PatternList, allocator: mem.Allocator) void {
|
||||
for (pl.patterns) |*pattern| pattern.deinit(allocator);
|
||||
allocator.free(pl.patterns);
|
||||
pl.* = undefined;
|
||||
}
|
||||
|
||||
pub fn match(pl: PatternList, ms: MacroSlicer) Error!?Impl {
|
||||
for (pl.patterns) |pattern| if (pattern.matches(ms)) return pattern.impl;
|
||||
return null;
|
||||
}
|
||||
|
||||
fn tokenizeMacro(allocator: mem.Allocator, source: []const u8, tok_list: *std.ArrayList(CToken)) Error!MacroSlicer {
|
||||
var param_count: u32 = 0;
|
||||
var param_buf: [8][]const u8 = undefined;
|
||||
|
||||
var tokenizer: aro.Tokenizer = .{
|
||||
.buf = source,
|
||||
.source = .unused,
|
||||
.langopts = .{},
|
||||
};
|
||||
{
|
||||
const name_tok = tokenizer.nextNoWS();
|
||||
assert(name_tok.id == .identifier);
|
||||
const l_paren = tokenizer.nextNoWS();
|
||||
assert(l_paren.id == .l_paren);
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const param = tokenizer.nextNoWS();
|
||||
if (param.id == .r_paren) break;
|
||||
assert(param.id == .identifier);
|
||||
const slice = source[param.start..param.end];
|
||||
param_buf[param_count] = slice;
|
||||
param_count += 1;
|
||||
|
||||
const comma = tokenizer.nextNoWS();
|
||||
if (comma.id == .r_paren) break;
|
||||
assert(comma.id == .comma);
|
||||
}
|
||||
|
||||
outer: while (true) {
|
||||
const tok = tokenizer.next();
|
||||
switch (tok.id) {
|
||||
.whitespace, .comment => continue,
|
||||
.identifier => {
|
||||
const slice = source[tok.start..tok.end];
|
||||
for (param_buf[0..param_count], 0..) |param, i| {
|
||||
if (std.mem.eql(u8, param, slice)) {
|
||||
try tok_list.append(allocator, .{
|
||||
.id = .macro_param,
|
||||
.source = .unused,
|
||||
.end = @intCast(i),
|
||||
});
|
||||
continue :outer;
|
||||
}
|
||||
}
|
||||
},
|
||||
.hash_hash => {
|
||||
if (tok_list.items[tok_list.items.len - 1].id == .macro_param) {
|
||||
tok_list.items[tok_list.items.len - 1].id = .macro_param_no_expand;
|
||||
}
|
||||
},
|
||||
.nl, .eof => break,
|
||||
else => {},
|
||||
}
|
||||
try tok_list.append(allocator, tok);
|
||||
}
|
||||
|
||||
return .{
|
||||
.source = source,
|
||||
.tokens = try tok_list.toOwnedSlice(allocator),
|
||||
.params = param_count,
|
||||
};
|
||||
}
|
||||
|
||||
test "Macro matching" {
|
||||
const testing = std.testing;
|
||||
const helper = struct {
|
||||
fn checkMacro(
|
||||
allocator: mem.Allocator,
|
||||
pattern_list: PatternList,
|
||||
source: []const u8,
|
||||
comptime expected_match: ?Impl,
|
||||
) !void {
|
||||
var tok_list: std.ArrayList(CToken) = .empty;
|
||||
defer tok_list.deinit(allocator);
|
||||
const ms = try tokenizeMacro(allocator, source, &tok_list);
|
||||
defer allocator.free(ms.tokens);
|
||||
|
||||
const matched = try pattern_list.match(ms);
|
||||
if (expected_match) |expected| {
|
||||
try testing.expectEqual(expected, matched);
|
||||
} else {
|
||||
try testing.expectEqual(@as(@TypeOf(matched), null), matched);
|
||||
}
|
||||
}
|
||||
};
|
||||
const allocator = std.testing.allocator;
|
||||
var pattern_list = try PatternList.init(allocator);
|
||||
defer pattern_list.deinit(allocator);
|
||||
|
||||
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## F)", .F_SUFFIX);
|
||||
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## U)", .U_SUFFIX);
|
||||
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## L)", .L_SUFFIX);
|
||||
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## LL)", .LL_SUFFIX);
|
||||
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## UL)", .UL_SUFFIX);
|
||||
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## ULL)", .ULL_SUFFIX);
|
||||
try helper.checkMacro(allocator, pattern_list,
|
||||
\\container_of(a, b, c) \
|
||||
\\(__typeof__(b))((char *)(a) - \
|
||||
\\ offsetof(__typeof__(*b), c))
|
||||
, .WL_CONTAINER_OF);
|
||||
|
||||
try helper.checkMacro(allocator, pattern_list, "NO_MATCH(X, Y) (X + Y)", null);
|
||||
try helper.checkMacro(allocator, pattern_list, "CAST_OR_CALL(X, Y) (X)(Y)", .CAST_OR_CALL);
|
||||
try helper.checkMacro(allocator, pattern_list, "CAST_OR_CALL(X, Y) ((X)(Y))", .CAST_OR_CALL);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (void)(X)", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((void)(X))", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (const void)(X)", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((const void)(X))", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (volatile void)(X)", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((volatile void)(X))", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (const volatile void)(X)", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((const volatile void)(X))", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (volatile const void)(X)", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((volatile const void)(X))", .DISCARD);
|
||||
}
|
||||
399
lib/compiler/translate-c/Scope.zig
Normal file
399
lib/compiler/translate-c/Scope.zig
Normal file
|
|
@ -0,0 +1,399 @@
|
|||
const std = @import("std");
|
||||
|
||||
const aro = @import("aro");
|
||||
|
||||
const ast = @import("ast.zig");
|
||||
const Translator = @import("Translator.zig");
|
||||
|
||||
const Scope = @This();
|
||||
|
||||
pub const SymbolTable = std.StringArrayHashMapUnmanaged(ast.Node);
|
||||
pub const AliasList = std.ArrayListUnmanaged(struct {
|
||||
alias: []const u8,
|
||||
name: []const u8,
|
||||
});
|
||||
|
||||
/// Associates a container (structure or union) with its relevant member functions.
|
||||
pub const ContainerMemberFns = struct {
|
||||
container_decl_ptr: *ast.Node,
|
||||
member_fns: std.ArrayListUnmanaged(*ast.Payload.Func) = .empty,
|
||||
};
|
||||
pub const ContainerMemberFnsHashMap = std.AutoArrayHashMapUnmanaged(aro.QualType, ContainerMemberFns);
|
||||
|
||||
id: Id,
|
||||
parent: ?*Scope,
|
||||
|
||||
pub const Id = enum {
|
||||
block,
|
||||
root,
|
||||
condition,
|
||||
loop,
|
||||
do_loop,
|
||||
};
|
||||
|
||||
/// Used for the scope of condition expressions, for example `if (cond)`.
|
||||
/// The block is lazily initialized because it is only needed for rare
|
||||
/// cases of comma operators being used.
|
||||
pub const Condition = struct {
|
||||
base: Scope,
|
||||
block: ?Block = null,
|
||||
|
||||
fn getBlockScope(cond: *Condition, t: *Translator) !*Block {
|
||||
if (cond.block) |*b| return b;
|
||||
cond.block = try Block.init(t, &cond.base, true);
|
||||
return &cond.block.?;
|
||||
}
|
||||
|
||||
pub fn deinit(cond: *Condition) void {
|
||||
if (cond.block) |*b| b.deinit();
|
||||
}
|
||||
};
|
||||
|
||||
/// Represents an in-progress Node.Block. This struct is stack-allocated.
|
||||
/// When it is deinitialized, it produces an Node.Block which is allocated
|
||||
/// into the main arena.
|
||||
pub const Block = struct {
|
||||
base: Scope,
|
||||
translator: *Translator,
|
||||
statements: std.ArrayListUnmanaged(ast.Node),
|
||||
variables: AliasList,
|
||||
mangle_count: u32 = 0,
|
||||
label: ?[]const u8 = null,
|
||||
|
||||
/// By default all variables are discarded, since we do not know in advance if they
|
||||
/// will be used. This maps the variable's name to the Discard payload, so that if
|
||||
/// the variable is subsequently referenced we can indicate that the discard should
|
||||
/// be skipped during the intermediate AST -> Zig AST render step.
|
||||
variable_discards: std.StringArrayHashMapUnmanaged(*ast.Payload.Discard),
|
||||
|
||||
/// When the block corresponds to a function, keep track of the return type
|
||||
/// so that the return expression can be cast, if necessary
|
||||
return_type: ?aro.QualType = null,
|
||||
|
||||
/// C static local variables are wrapped in a block-local struct. The struct
|
||||
/// is named `mangle(static_local_ + name)` and the Zig variable within the
|
||||
/// struct keeps the name of the C variable.
|
||||
pub const static_local_prefix = "static_local";
|
||||
|
||||
/// C extern local variables are wrapped in a block-local struct. The struct
|
||||
/// is named `mangle(extern_local + name)` and the Zig variable within the
|
||||
/// struct keeps the name of the C variable.
|
||||
pub const extern_local_prefix = "extern_local";
|
||||
|
||||
pub fn init(t: *Translator, parent: *Scope, labeled: bool) !Block {
|
||||
var blk: Block = .{
|
||||
.base = .{
|
||||
.id = .block,
|
||||
.parent = parent,
|
||||
},
|
||||
.translator = t,
|
||||
.statements = .empty,
|
||||
.variables = .empty,
|
||||
.variable_discards = .empty,
|
||||
};
|
||||
if (labeled) {
|
||||
blk.label = try blk.makeMangledName("blk");
|
||||
}
|
||||
return blk;
|
||||
}
|
||||
|
||||
pub fn deinit(block: *Block) void {
|
||||
block.statements.deinit(block.translator.gpa);
|
||||
block.variables.deinit(block.translator.gpa);
|
||||
block.variable_discards.deinit(block.translator.gpa);
|
||||
block.* = undefined;
|
||||
}
|
||||
|
||||
pub fn complete(block: *Block) !ast.Node {
|
||||
const arena = block.translator.arena;
|
||||
if (block.base.parent.?.id == .do_loop) {
|
||||
// We reserve 1 extra statement if the parent is a do_loop. This is in case of
|
||||
// do while, we want to put `if (cond) break;` at the end.
|
||||
const alloc_len = block.statements.items.len + @intFromBool(block.base.parent.?.id == .do_loop);
|
||||
var stmts = try arena.alloc(ast.Node, alloc_len);
|
||||
stmts.len = block.statements.items.len;
|
||||
@memcpy(stmts[0..block.statements.items.len], block.statements.items);
|
||||
return ast.Node.Tag.block.create(arena, .{
|
||||
.label = block.label,
|
||||
.stmts = stmts,
|
||||
});
|
||||
}
|
||||
if (block.statements.items.len == 0) return ast.Node.Tag.empty_block.init();
|
||||
return ast.Node.Tag.block.create(arena, .{
|
||||
.label = block.label,
|
||||
.stmts = try arena.dupe(ast.Node, block.statements.items),
|
||||
});
|
||||
}
|
||||
|
||||
/// Given the desired name, return a name that does not shadow anything from outer scopes.
|
||||
/// Inserts the returned name into the scope.
|
||||
/// The name will not be visible to callers of getAlias.
|
||||
pub fn reserveMangledName(block: *Block, name: []const u8) ![]const u8 {
|
||||
return block.createMangledName(name, true, null);
|
||||
}
|
||||
|
||||
/// Same as reserveMangledName, but enables the alias immediately.
|
||||
pub fn makeMangledName(block: *Block, name: []const u8) ![]const u8 {
|
||||
return block.createMangledName(name, false, null);
|
||||
}
|
||||
|
||||
pub fn createMangledName(block: *Block, name: []const u8, reservation: bool, prefix_opt: ?[]const u8) ![]const u8 {
|
||||
const arena = block.translator.arena;
|
||||
const name_copy = try arena.dupe(u8, name);
|
||||
const alias_base = if (prefix_opt) |prefix|
|
||||
try std.fmt.allocPrint(arena, "{s}_{s}", .{ prefix, name })
|
||||
else
|
||||
name;
|
||||
var proposed_name = alias_base;
|
||||
while (block.contains(proposed_name)) {
|
||||
block.mangle_count += 1;
|
||||
proposed_name = try std.fmt.allocPrint(arena, "{s}_{d}", .{ alias_base, block.mangle_count });
|
||||
}
|
||||
const new_mangle = try block.variables.addOne(block.translator.gpa);
|
||||
if (reservation) {
|
||||
new_mangle.* = .{ .name = name_copy, .alias = name_copy };
|
||||
} else {
|
||||
new_mangle.* = .{ .name = name_copy, .alias = proposed_name };
|
||||
}
|
||||
return proposed_name;
|
||||
}
|
||||
|
||||
fn getAlias(block: *Block, name: []const u8) ?[]const u8 {
|
||||
for (block.variables.items) |p| {
|
||||
if (std.mem.eql(u8, p.name, name))
|
||||
return p.alias;
|
||||
}
|
||||
return block.base.parent.?.getAlias(name);
|
||||
}
|
||||
|
||||
fn localContains(block: *Block, name: []const u8) bool {
|
||||
for (block.variables.items) |p| {
|
||||
if (std.mem.eql(u8, p.alias, name))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
fn contains(block: *Block, name: []const u8) bool {
|
||||
if (block.localContains(name))
|
||||
return true;
|
||||
return block.base.parent.?.contains(name);
|
||||
}
|
||||
|
||||
pub fn discardVariable(block: *Block, name: []const u8) Translator.Error!void {
|
||||
const gpa = block.translator.gpa;
|
||||
const arena = block.translator.arena;
|
||||
const name_node = try ast.Node.Tag.identifier.create(arena, name);
|
||||
const discard = try ast.Node.Tag.discard.create(arena, .{ .should_skip = false, .value = name_node });
|
||||
try block.statements.append(gpa, discard);
|
||||
try block.variable_discards.putNoClobber(gpa, name, discard.castTag(.discard).?);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Root = struct {
|
||||
base: Scope,
|
||||
translator: *Translator,
|
||||
sym_table: SymbolTable,
|
||||
blank_macros: std.StringArrayHashMapUnmanaged(void),
|
||||
nodes: std.ArrayListUnmanaged(ast.Node),
|
||||
container_member_fns_map: ContainerMemberFnsHashMap,
|
||||
|
||||
pub fn init(t: *Translator) Root {
|
||||
return .{
|
||||
.base = .{
|
||||
.id = .root,
|
||||
.parent = null,
|
||||
},
|
||||
.translator = t,
|
||||
.sym_table = .empty,
|
||||
.blank_macros = .empty,
|
||||
.nodes = .empty,
|
||||
.container_member_fns_map = .empty,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(root: *Root) void {
|
||||
root.sym_table.deinit(root.translator.gpa);
|
||||
root.blank_macros.deinit(root.translator.gpa);
|
||||
root.nodes.deinit(root.translator.gpa);
|
||||
for (root.container_member_fns_map.values()) |*members| {
|
||||
members.member_fns.deinit(root.translator.gpa);
|
||||
}
|
||||
root.container_member_fns_map.deinit(root.translator.gpa);
|
||||
}
|
||||
|
||||
/// Check if the global scope contains this name, without looking into the "future", e.g.
|
||||
/// ignore the preprocessed decl and macro names.
|
||||
pub fn containsNow(root: *Root, name: []const u8) bool {
|
||||
return root.sym_table.contains(name);
|
||||
}
|
||||
|
||||
/// Check if the global scope contains the name, includes all decls that haven't been translated yet.
|
||||
pub fn contains(root: *Root, name: []const u8) bool {
|
||||
return root.containsNow(name) or root.translator.global_names.contains(name) or root.translator.weak_global_names.contains(name);
|
||||
}
|
||||
|
||||
pub fn addMemberFunction(root: *Root, func_ty: aro.Type.Func, func: *ast.Payload.Func) !void {
|
||||
std.debug.assert(func.data.name != null);
|
||||
if (func_ty.params.len == 0) return;
|
||||
|
||||
const param1_base = func_ty.params[0].qt.base(root.translator.comp);
|
||||
const container_qt = if (param1_base.type == .pointer)
|
||||
param1_base.type.pointer.child.base(root.translator.comp).qt
|
||||
else
|
||||
param1_base.qt;
|
||||
|
||||
if (root.container_member_fns_map.getPtr(container_qt)) |members| {
|
||||
try members.member_fns.append(root.translator.gpa, func);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn processContainerMemberFns(root: *Root) !void {
|
||||
const gpa = root.translator.gpa;
|
||||
const arena = root.translator.arena;
|
||||
|
||||
var member_names: std.StringArrayHashMapUnmanaged(u32) = .empty;
|
||||
defer member_names.deinit(gpa);
|
||||
for (root.container_member_fns_map.values()) |members| {
|
||||
member_names.clearRetainingCapacity();
|
||||
const decls_ptr = switch (members.container_decl_ptr.tag()) {
|
||||
.@"struct", .@"union" => blk_record: {
|
||||
const payload: *ast.Payload.Container = @alignCast(@fieldParentPtr("base", members.container_decl_ptr.ptr_otherwise));
|
||||
// Avoid duplication with field names
|
||||
for (payload.data.fields) |field| {
|
||||
try member_names.put(gpa, field.name, 0);
|
||||
}
|
||||
break :blk_record &payload.data.decls;
|
||||
},
|
||||
.opaque_literal => blk_opaque: {
|
||||
const container_decl = try ast.Node.Tag.@"opaque".create(arena, .{
|
||||
.layout = .none,
|
||||
.fields = &.{},
|
||||
.decls = &.{},
|
||||
});
|
||||
members.container_decl_ptr.* = container_decl;
|
||||
break :blk_opaque &container_decl.castTag(.@"opaque").?.data.decls;
|
||||
},
|
||||
else => return,
|
||||
};
|
||||
|
||||
const old_decls = decls_ptr.*;
|
||||
const new_decls = try arena.alloc(ast.Node, old_decls.len + members.member_fns.items.len);
|
||||
@memcpy(new_decls[0..old_decls.len], old_decls);
|
||||
// Assume the allocator of payload.data.decls is arena,
|
||||
// so don't add arena.free(old_variables).
|
||||
const func_ref_vars = new_decls[old_decls.len..];
|
||||
var count: u32 = 0;
|
||||
for (members.member_fns.items) |func| {
|
||||
const func_name = func.data.name.?;
|
||||
|
||||
const last_index = std.mem.lastIndexOf(u8, func_name, "_");
|
||||
const last_name = if (last_index) |index| func_name[index + 1 ..] else continue;
|
||||
var same_count: u32 = 0;
|
||||
const gop = try member_names.getOrPutValue(gpa, last_name, same_count);
|
||||
if (gop.found_existing) {
|
||||
gop.value_ptr.* += 1;
|
||||
same_count = gop.value_ptr.*;
|
||||
}
|
||||
const var_name = if (same_count == 0)
|
||||
last_name
|
||||
else
|
||||
try std.fmt.allocPrint(arena, "{s}{d}", .{ last_name, same_count });
|
||||
|
||||
func_ref_vars[count] = try ast.Node.Tag.pub_var_simple.create(arena, .{
|
||||
.name = var_name,
|
||||
.init = try ast.Node.Tag.identifier.create(arena, func_name),
|
||||
});
|
||||
count += 1;
|
||||
}
|
||||
decls_ptr.* = new_decls[0 .. old_decls.len + count];
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub fn findBlockScope(inner: *Scope, t: *Translator) !*Block {
|
||||
var scope = inner;
|
||||
while (true) {
|
||||
switch (scope.id) {
|
||||
.root => unreachable,
|
||||
.block => return @fieldParentPtr("base", scope),
|
||||
.condition => return @as(*Condition, @fieldParentPtr("base", scope)).getBlockScope(t),
|
||||
else => scope = scope.parent.?,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn findBlockReturnType(inner: *Scope) aro.QualType {
|
||||
var scope = inner;
|
||||
while (true) {
|
||||
switch (scope.id) {
|
||||
.root => unreachable,
|
||||
.block => {
|
||||
const block: *Block = @fieldParentPtr("base", scope);
|
||||
if (block.return_type) |qt| return qt;
|
||||
scope = scope.parent.?;
|
||||
},
|
||||
else => scope = scope.parent.?,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getAlias(scope: *Scope, name: []const u8) ?[]const u8 {
|
||||
return switch (scope.id) {
|
||||
.root => null,
|
||||
.block => @as(*Block, @fieldParentPtr("base", scope)).getAlias(name),
|
||||
.loop, .do_loop, .condition => scope.parent.?.getAlias(name),
|
||||
};
|
||||
}
|
||||
|
||||
fn contains(scope: *Scope, name: []const u8) bool {
|
||||
return switch (scope.id) {
|
||||
.root => @as(*Root, @fieldParentPtr("base", scope)).contains(name),
|
||||
.block => @as(*Block, @fieldParentPtr("base", scope)).contains(name),
|
||||
.loop, .do_loop, .condition => scope.parent.?.contains(name),
|
||||
};
|
||||
}
|
||||
|
||||
/// Appends a node to the first block scope if inside a function, or to the root tree if not.
|
||||
pub fn appendNode(inner: *Scope, node: ast.Node) !void {
|
||||
var scope = inner;
|
||||
while (true) {
|
||||
switch (scope.id) {
|
||||
.root => {
|
||||
const root: *Root = @fieldParentPtr("base", scope);
|
||||
return root.nodes.append(root.translator.gpa, node);
|
||||
},
|
||||
.block => {
|
||||
const block: *Block = @fieldParentPtr("base", scope);
|
||||
return block.statements.append(block.translator.gpa, node);
|
||||
},
|
||||
else => scope = scope.parent.?,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn skipVariableDiscard(inner: *Scope, name: []const u8) void {
|
||||
if (true) {
|
||||
// TODO: due to 'local variable is never mutated' errors, we can
|
||||
// only skip discards if a variable is used as an lvalue, which
|
||||
// we don't currently have detection for in translate-c.
|
||||
// Once #17584 is completed, perhaps we can do away with this
|
||||
// logic entirely, and instead rely on render to fixup code.
|
||||
return;
|
||||
}
|
||||
var scope = inner;
|
||||
while (true) {
|
||||
switch (scope.id) {
|
||||
.root => return,
|
||||
.block => {
|
||||
const block: *Block = @fieldParentPtr("base", scope);
|
||||
if (block.variable_discards.get(name)) |discard| {
|
||||
discard.data.should_skip = true;
|
||||
return;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
scope = scope.parent.?;
|
||||
}
|
||||
}
|
||||
4174
lib/compiler/translate-c/Translator.zig
Normal file
4174
lib/compiler/translate-c/Translator.zig
Normal file
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
76
lib/compiler/translate-c/builtins.zig
Normal file
76
lib/compiler/translate-c/builtins.zig
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
const std = @import("std");
|
||||
|
||||
const ast = @import("ast.zig");
|
||||
|
||||
/// All builtins need to have a source so that macros can reference them
|
||||
/// but for some it is possible to directly call an equivalent Zig builtin
|
||||
/// which is preferrable.
|
||||
pub const Builtin = struct {
|
||||
/// The name of the builtin in `c_builtins.zig`.
|
||||
name: []const u8,
|
||||
tag: ?ast.Node.Tag = null,
|
||||
};
|
||||
|
||||
pub const map = std.StaticStringMap(Builtin).initComptime([_]struct { []const u8, Builtin }{
|
||||
.{ "__builtin_abs", .{ .name = "abs" } },
|
||||
.{ "__builtin_assume", .{ .name = "assume" } },
|
||||
.{ "__builtin_bswap16", .{ .name = "bswap16", .tag = .byte_swap } },
|
||||
.{ "__builtin_bswap32", .{ .name = "bswap32", .tag = .byte_swap } },
|
||||
.{ "__builtin_bswap64", .{ .name = "bswap64", .tag = .byte_swap } },
|
||||
.{ "__builtin_ceilf", .{ .name = "ceilf", .tag = .ceil } },
|
||||
.{ "__builtin_ceil", .{ .name = "ceil", .tag = .ceil } },
|
||||
.{ "__builtin_clz", .{ .name = "clz" } },
|
||||
.{ "__builtin_constant_p", .{ .name = "constant_p" } },
|
||||
.{ "__builtin_cosf", .{ .name = "cosf", .tag = .cos } },
|
||||
.{ "__builtin_cos", .{ .name = "cos", .tag = .cos } },
|
||||
.{ "__builtin_ctz", .{ .name = "ctz" } },
|
||||
.{ "__builtin_exp2f", .{ .name = "exp2f", .tag = .exp2 } },
|
||||
.{ "__builtin_exp2", .{ .name = "exp2", .tag = .exp2 } },
|
||||
.{ "__builtin_expf", .{ .name = "expf", .tag = .exp } },
|
||||
.{ "__builtin_exp", .{ .name = "exp", .tag = .exp } },
|
||||
.{ "__builtin_expect", .{ .name = "expect" } },
|
||||
.{ "__builtin_fabsf", .{ .name = "fabsf", .tag = .abs } },
|
||||
.{ "__builtin_fabs", .{ .name = "fabs", .tag = .abs } },
|
||||
.{ "__builtin_floorf", .{ .name = "floorf", .tag = .floor } },
|
||||
.{ "__builtin_floor", .{ .name = "floor", .tag = .floor } },
|
||||
.{ "__builtin_huge_valf", .{ .name = "huge_valf" } },
|
||||
.{ "__builtin_inff", .{ .name = "inff" } },
|
||||
.{ "__builtin_isinf_sign", .{ .name = "isinf_sign" } },
|
||||
.{ "__builtin_isinf", .{ .name = "isinf" } },
|
||||
.{ "__builtin_isnan", .{ .name = "isnan" } },
|
||||
.{ "__builtin_labs", .{ .name = "labs" } },
|
||||
.{ "__builtin_llabs", .{ .name = "llabs" } },
|
||||
.{ "__builtin_log10f", .{ .name = "log10f", .tag = .log10 } },
|
||||
.{ "__builtin_log10", .{ .name = "log10", .tag = .log10 } },
|
||||
.{ "__builtin_log2f", .{ .name = "log2f", .tag = .log2 } },
|
||||
.{ "__builtin_log2", .{ .name = "log2", .tag = .log2 } },
|
||||
.{ "__builtin_logf", .{ .name = "logf", .tag = .log } },
|
||||
.{ "__builtin_log", .{ .name = "log", .tag = .log } },
|
||||
.{ "__builtin___memcpy_chk", .{ .name = "memcpy_chk" } },
|
||||
.{ "__builtin_memcpy", .{ .name = "memcpy" } },
|
||||
.{ "__builtin___memset_chk", .{ .name = "memset_chk" } },
|
||||
.{ "__builtin_memset", .{ .name = "memset" } },
|
||||
.{ "__builtin_mul_overflow", .{ .name = "mul_overflow" } },
|
||||
.{ "__builtin_nanf", .{ .name = "nanf" } },
|
||||
.{ "__builtin_object_size", .{ .name = "object_size" } },
|
||||
.{ "__builtin_popcount", .{ .name = "popcount" } },
|
||||
.{ "__builtin_roundf", .{ .name = "roundf", .tag = .round } },
|
||||
.{ "__builtin_round", .{ .name = "round", .tag = .round } },
|
||||
.{ "__builtin_signbitf", .{ .name = "signbitf" } },
|
||||
.{ "__builtin_signbit", .{ .name = "signbit" } },
|
||||
.{ "__builtin_sinf", .{ .name = "sinf", .tag = .sin } },
|
||||
.{ "__builtin_sin", .{ .name = "sin", .tag = .sin } },
|
||||
.{ "__builtin_sqrtf", .{ .name = "sqrtf", .tag = .sqrt } },
|
||||
.{ "__builtin_sqrt", .{ .name = "sqrt", .tag = .sqrt } },
|
||||
.{ "__builtin_strcmp", .{ .name = "strcmp" } },
|
||||
.{ "__builtin_strlen", .{ .name = "strlen" } },
|
||||
.{ "__builtin_truncf", .{ .name = "truncf", .tag = .trunc } },
|
||||
.{ "__builtin_trunc", .{ .name = "trunc", .tag = .trunc } },
|
||||
.{ "__builtin_unreachable", .{ .name = "unreachable", .tag = .@"unreachable" } },
|
||||
.{ "__has_builtin", .{ .name = "has_builtin" } },
|
||||
|
||||
// __builtin_alloca_with_align is not currently implemented.
|
||||
// It is used in a run and a translate test to ensure that non-implemented
|
||||
// builtins are correctly demoted. If you implement __builtin_alloca_with_align,
|
||||
// please update the tests to use a different non-implemented builtin.
|
||||
});
|
||||
327
lib/compiler/translate-c/helpers.zig
Normal file
327
lib/compiler/translate-c/helpers.zig
Normal file
|
|
@ -0,0 +1,327 @@
|
|||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const testing = std.testing;
|
||||
const math = std.math;
|
||||
|
||||
const helpers = @import("helpers");
|
||||
|
||||
const cast = helpers.cast;
|
||||
|
||||
test cast {
|
||||
var i = @as(i64, 10);
|
||||
|
||||
try testing.expect(cast(*u8, 16) == @as(*u8, @ptrFromInt(16)));
|
||||
try testing.expect(cast(*u64, &i).* == @as(u64, 10));
|
||||
try testing.expect(cast(*i64, @as(?*align(1) i64, &i)) == &i);
|
||||
|
||||
try testing.expect(cast(?*u8, 2) == @as(*u8, @ptrFromInt(2)));
|
||||
try testing.expect(cast(?*i64, @as(*align(1) i64, &i)) == &i);
|
||||
try testing.expect(cast(?*i64, @as(?*align(1) i64, &i)) == &i);
|
||||
|
||||
try testing.expectEqual(@as(u32, 4), cast(u32, @as(*u32, @ptrFromInt(4))));
|
||||
try testing.expectEqual(@as(u32, 4), cast(u32, @as(?*u32, @ptrFromInt(4))));
|
||||
try testing.expectEqual(@as(u32, 10), cast(u32, @as(u64, 10)));
|
||||
|
||||
try testing.expectEqual(@as(i32, @bitCast(@as(u32, 0x8000_0000))), cast(i32, @as(u32, 0x8000_0000)));
|
||||
|
||||
try testing.expectEqual(@as(*u8, @ptrFromInt(2)), cast(*u8, @as(*const u8, @ptrFromInt(2))));
|
||||
try testing.expectEqual(@as(*u8, @ptrFromInt(2)), cast(*u8, @as(*volatile u8, @ptrFromInt(2))));
|
||||
|
||||
try testing.expectEqual(@as(?*anyopaque, @ptrFromInt(2)), cast(?*anyopaque, @as(*u8, @ptrFromInt(2))));
|
||||
|
||||
var foo: c_int = -1;
|
||||
_ = &foo;
|
||||
try testing.expect(cast(*anyopaque, -1) == @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
|
||||
try testing.expect(cast(*anyopaque, foo) == @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
|
||||
try testing.expect(cast(?*anyopaque, -1) == @as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
|
||||
try testing.expect(cast(?*anyopaque, foo) == @as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
|
||||
|
||||
const FnPtr = ?*align(1) const fn (*anyopaque) void;
|
||||
try testing.expect(cast(FnPtr, 0) == @as(FnPtr, @ptrFromInt(@as(usize, 0))));
|
||||
try testing.expect(cast(FnPtr, foo) == @as(FnPtr, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
|
||||
|
||||
const complexFunction = struct {
|
||||
fn f(_: ?*anyopaque, _: c_uint, _: ?*const fn (?*anyopaque) callconv(.c) c_uint, _: ?*anyopaque, _: c_uint, _: [*c]c_uint) callconv(.c) usize {
|
||||
return 0;
|
||||
}
|
||||
}.f;
|
||||
|
||||
const SDL_FunctionPointer = ?*const fn () callconv(.c) void;
|
||||
const fn_ptr = cast(SDL_FunctionPointer, complexFunction);
|
||||
try testing.expect(fn_ptr != null);
|
||||
}
|
||||
|
||||
const sizeof = helpers.sizeof;
|
||||
|
||||
test sizeof {
|
||||
const S = extern struct { a: u32 };
|
||||
|
||||
const ptr_size = @sizeOf(*anyopaque);
|
||||
|
||||
try testing.expect(sizeof(u32) == 4);
|
||||
try testing.expect(sizeof(@as(u32, 2)) == 4);
|
||||
try testing.expect(sizeof(2) == @sizeOf(c_int));
|
||||
|
||||
try testing.expect(sizeof(2.0) == @sizeOf(f64));
|
||||
|
||||
try testing.expect(sizeof(S) == 4);
|
||||
|
||||
try testing.expect(sizeof([_]u32{ 4, 5, 6 }) == 12);
|
||||
try testing.expect(sizeof([3]u32) == 12);
|
||||
try testing.expect(sizeof([3:0]u32) == 16);
|
||||
try testing.expect(sizeof(&[_]u32{ 4, 5, 6 }) == ptr_size);
|
||||
|
||||
try testing.expect(sizeof(*u32) == ptr_size);
|
||||
try testing.expect(sizeof([*]u32) == ptr_size);
|
||||
try testing.expect(sizeof([*c]u32) == ptr_size);
|
||||
try testing.expect(sizeof(?*u32) == ptr_size);
|
||||
try testing.expect(sizeof(?[*]u32) == ptr_size);
|
||||
try testing.expect(sizeof(*anyopaque) == ptr_size);
|
||||
try testing.expect(sizeof(*void) == ptr_size);
|
||||
try testing.expect(sizeof(null) == ptr_size);
|
||||
|
||||
try testing.expect(sizeof("foobar") == 7);
|
||||
try testing.expect(sizeof(&[_:0]u16{ 'f', 'o', 'o', 'b', 'a', 'r' }) == 14);
|
||||
try testing.expect(sizeof(*const [4:0]u8) == 5);
|
||||
try testing.expect(sizeof(*[4:0]u8) == ptr_size);
|
||||
try testing.expect(sizeof([*]const [4:0]u8) == ptr_size);
|
||||
try testing.expect(sizeof(*const *const [4:0]u8) == ptr_size);
|
||||
try testing.expect(sizeof(*const [4]u8) == ptr_size);
|
||||
|
||||
if (false) { // TODO
|
||||
try testing.expect(sizeof(&sizeof) == @sizeOf(@TypeOf(&sizeof)));
|
||||
try testing.expect(sizeof(sizeof) == 1);
|
||||
}
|
||||
|
||||
try testing.expect(sizeof(void) == 1);
|
||||
try testing.expect(sizeof(anyopaque) == 1);
|
||||
}
|
||||
|
||||
const promoteIntLiteral = helpers.promoteIntLiteral;
|
||||
|
||||
test promoteIntLiteral {
|
||||
const signed_hex = promoteIntLiteral(c_int, math.maxInt(c_int) + 1, .hex);
|
||||
try testing.expectEqual(c_uint, @TypeOf(signed_hex));
|
||||
|
||||
if (math.maxInt(c_longlong) == math.maxInt(c_int)) return;
|
||||
|
||||
const signed_decimal = promoteIntLiteral(c_int, math.maxInt(c_int) + 1, .decimal);
|
||||
const unsigned = promoteIntLiteral(c_uint, math.maxInt(c_uint) + 1, .hex);
|
||||
|
||||
if (math.maxInt(c_long) > math.maxInt(c_int)) {
|
||||
try testing.expectEqual(c_long, @TypeOf(signed_decimal));
|
||||
try testing.expectEqual(c_ulong, @TypeOf(unsigned));
|
||||
} else {
|
||||
try testing.expectEqual(c_longlong, @TypeOf(signed_decimal));
|
||||
try testing.expectEqual(c_ulonglong, @TypeOf(unsigned));
|
||||
}
|
||||
}
|
||||
|
||||
const shuffleVectorIndex = helpers.shuffleVectorIndex;
|
||||
|
||||
test shuffleVectorIndex {
|
||||
const vector_len: usize = 4;
|
||||
|
||||
_ = shuffleVectorIndex(-1, vector_len);
|
||||
|
||||
try testing.expect(shuffleVectorIndex(0, vector_len) == 0);
|
||||
try testing.expect(shuffleVectorIndex(1, vector_len) == 1);
|
||||
try testing.expect(shuffleVectorIndex(2, vector_len) == 2);
|
||||
try testing.expect(shuffleVectorIndex(3, vector_len) == 3);
|
||||
|
||||
try testing.expect(shuffleVectorIndex(4, vector_len) == -1);
|
||||
try testing.expect(shuffleVectorIndex(5, vector_len) == -2);
|
||||
try testing.expect(shuffleVectorIndex(6, vector_len) == -3);
|
||||
try testing.expect(shuffleVectorIndex(7, vector_len) == -4);
|
||||
}
|
||||
|
||||
const FlexibleArrayType = helpers.FlexibleArrayType;
|
||||
|
||||
test FlexibleArrayType {
|
||||
const Container = extern struct {
|
||||
size: usize,
|
||||
};
|
||||
|
||||
try testing.expectEqual(FlexibleArrayType(*Container, c_int), [*c]c_int);
|
||||
try testing.expectEqual(FlexibleArrayType(*const Container, c_int), [*c]const c_int);
|
||||
try testing.expectEqual(FlexibleArrayType(*volatile Container, c_int), [*c]volatile c_int);
|
||||
try testing.expectEqual(FlexibleArrayType(*const volatile Container, c_int), [*c]const volatile c_int);
|
||||
}
|
||||
|
||||
const signedRemainder = helpers.signedRemainder;
|
||||
|
||||
test signedRemainder {
|
||||
// TODO add test
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
|
||||
const ArithmeticConversion = helpers.ArithmeticConversion;
|
||||
|
||||
test ArithmeticConversion {
|
||||
// Promotions not necessarily the same for other platforms
|
||||
if (builtin.target.cpu.arch != .x86_64 or builtin.target.os.tag != .linux) return error.SkipZigTest;
|
||||
|
||||
const Test = struct {
|
||||
/// Order of operands should not matter for arithmetic conversions
|
||||
fn checkPromotion(comptime A: type, comptime B: type, comptime Expected: type) !void {
|
||||
try std.testing.expect(ArithmeticConversion(A, B) == Expected);
|
||||
try std.testing.expect(ArithmeticConversion(B, A) == Expected);
|
||||
}
|
||||
};
|
||||
|
||||
try Test.checkPromotion(c_longdouble, c_int, c_longdouble);
|
||||
try Test.checkPromotion(c_int, f64, f64);
|
||||
try Test.checkPromotion(f32, bool, f32);
|
||||
|
||||
try Test.checkPromotion(bool, c_short, c_int);
|
||||
try Test.checkPromotion(c_int, c_int, c_int);
|
||||
try Test.checkPromotion(c_short, c_int, c_int);
|
||||
|
||||
try Test.checkPromotion(c_int, c_long, c_long);
|
||||
|
||||
try Test.checkPromotion(c_ulonglong, c_uint, c_ulonglong);
|
||||
|
||||
try Test.checkPromotion(c_uint, c_int, c_uint);
|
||||
|
||||
try Test.checkPromotion(c_uint, c_long, c_long);
|
||||
|
||||
try Test.checkPromotion(c_ulong, c_longlong, c_ulonglong);
|
||||
|
||||
// stdint.h
|
||||
try Test.checkPromotion(u8, i8, c_int);
|
||||
try Test.checkPromotion(u16, i16, c_int);
|
||||
try Test.checkPromotion(i32, c_int, c_int);
|
||||
try Test.checkPromotion(u32, c_int, c_uint);
|
||||
try Test.checkPromotion(i64, c_int, c_long);
|
||||
try Test.checkPromotion(u64, c_int, c_ulong);
|
||||
try Test.checkPromotion(isize, c_int, c_long);
|
||||
try Test.checkPromotion(usize, c_int, c_ulong);
|
||||
}
|
||||
|
||||
const F_SUFFIX = helpers.F_SUFFIX;
|
||||
|
||||
test F_SUFFIX {
|
||||
try testing.expect(@TypeOf(F_SUFFIX(1)) == f32);
|
||||
}
|
||||
|
||||
const U_SUFFIX = helpers.U_SUFFIX;
|
||||
|
||||
test U_SUFFIX {
|
||||
try testing.expect(@TypeOf(U_SUFFIX(1)) == c_uint);
|
||||
if (math.maxInt(c_ulong) > math.maxInt(c_uint)) {
|
||||
try testing.expect(@TypeOf(U_SUFFIX(math.maxInt(c_uint) + 1)) == c_ulong);
|
||||
}
|
||||
if (math.maxInt(c_ulonglong) > math.maxInt(c_ulong)) {
|
||||
try testing.expect(@TypeOf(U_SUFFIX(math.maxInt(c_ulong) + 1)) == c_ulonglong);
|
||||
}
|
||||
}
|
||||
|
||||
const L_SUFFIX = helpers.L_SUFFIX;
|
||||
|
||||
test L_SUFFIX {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(1)) == c_long);
|
||||
if (math.maxInt(c_long) > math.maxInt(c_int)) {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(math.maxInt(c_int) + 1)) == c_long);
|
||||
}
|
||||
if (math.maxInt(c_longlong) > math.maxInt(c_long)) {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(math.maxInt(c_long) + 1)) == c_longlong);
|
||||
}
|
||||
}
|
||||
const UL_SUFFIX = helpers.UL_SUFFIX;
|
||||
|
||||
test UL_SUFFIX {
|
||||
try testing.expect(@TypeOf(UL_SUFFIX(1)) == c_ulong);
|
||||
if (math.maxInt(c_ulonglong) > math.maxInt(c_ulong)) {
|
||||
try testing.expect(@TypeOf(UL_SUFFIX(math.maxInt(c_ulong) + 1)) == c_ulonglong);
|
||||
}
|
||||
}
|
||||
const LL_SUFFIX = helpers.LL_SUFFIX;
|
||||
|
||||
test LL_SUFFIX {
|
||||
try testing.expect(@TypeOf(LL_SUFFIX(1)) == c_longlong);
|
||||
}
|
||||
const ULL_SUFFIX = helpers.ULL_SUFFIX;
|
||||
|
||||
test ULL_SUFFIX {
|
||||
try testing.expect(@TypeOf(ULL_SUFFIX(1)) == c_ulonglong);
|
||||
}
|
||||
|
||||
test "Extended C ABI casting" {
|
||||
if (math.maxInt(c_long) > math.maxInt(c_char)) {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(@as(c_char, math.maxInt(c_char) - 1))) == c_long); // c_char
|
||||
}
|
||||
if (math.maxInt(c_long) > math.maxInt(c_short)) {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(@as(c_short, math.maxInt(c_short) - 1))) == c_long); // c_short
|
||||
}
|
||||
|
||||
if (math.maxInt(c_long) > math.maxInt(c_ushort)) {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(@as(c_ushort, math.maxInt(c_ushort) - 1))) == c_long); //c_ushort
|
||||
}
|
||||
|
||||
if (math.maxInt(c_long) > math.maxInt(c_int)) {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(@as(c_int, math.maxInt(c_int) - 1))) == c_long); // c_int
|
||||
}
|
||||
|
||||
if (math.maxInt(c_long) > math.maxInt(c_uint)) {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(@as(c_uint, math.maxInt(c_uint) - 1))) == c_long); // c_uint
|
||||
try testing.expect(@TypeOf(L_SUFFIX(math.maxInt(c_uint) + 1)) == c_long); // comptime_int -> c_long
|
||||
}
|
||||
|
||||
if (math.maxInt(c_longlong) > math.maxInt(c_long)) {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(@as(c_long, math.maxInt(c_long) - 1))) == c_long); // c_long
|
||||
try testing.expect(@TypeOf(L_SUFFIX(math.maxInt(c_long) + 1)) == c_longlong); // comptime_int -> c_longlong
|
||||
}
|
||||
}
|
||||
|
||||
const WL_CONTAINER_OF = helpers.WL_CONTAINER_OF;
|
||||
|
||||
test WL_CONTAINER_OF {
|
||||
const S = struct {
|
||||
a: u32 = 0,
|
||||
b: u32 = 0,
|
||||
};
|
||||
const x = S{};
|
||||
const y = S{};
|
||||
const ptr = WL_CONTAINER_OF(&x.b, &y, "b");
|
||||
try testing.expectEqual(&x, ptr);
|
||||
}
|
||||
|
||||
const CAST_OR_CALL = helpers.CAST_OR_CALL;
|
||||
|
||||
test "CAST_OR_CALL casting" {
|
||||
const arg: c_int = 1000;
|
||||
const casted = CAST_OR_CALL(u8, arg);
|
||||
try testing.expectEqual(cast(u8, arg), casted);
|
||||
|
||||
const S = struct {
|
||||
x: u32 = 0,
|
||||
};
|
||||
var s: S = .{};
|
||||
const casted_ptr = CAST_OR_CALL(*u8, &s);
|
||||
try testing.expectEqual(cast(*u8, &s), casted_ptr);
|
||||
}
|
||||
|
||||
test "CAST_OR_CALL calling" {
|
||||
const Helper = struct {
|
||||
var last_val: bool = false;
|
||||
fn returnsVoid(val: bool) void {
|
||||
last_val = val;
|
||||
}
|
||||
fn returnsBool(f: f32) bool {
|
||||
return f > 0;
|
||||
}
|
||||
fn identity(self: c_uint) c_uint {
|
||||
return self;
|
||||
}
|
||||
};
|
||||
|
||||
CAST_OR_CALL(Helper.returnsVoid, true);
|
||||
try testing.expectEqual(true, Helper.last_val);
|
||||
CAST_OR_CALL(Helper.returnsVoid, false);
|
||||
try testing.expectEqual(false, Helper.last_val);
|
||||
|
||||
try testing.expectEqual(Helper.returnsBool(1), CAST_OR_CALL(Helper.returnsBool, @as(f32, 1)));
|
||||
try testing.expectEqual(Helper.returnsBool(-1), CAST_OR_CALL(Helper.returnsBool, @as(f32, -1)));
|
||||
|
||||
try testing.expectEqual(Helper.identity(@as(c_uint, 100)), CAST_OR_CALL(Helper.identity, @as(c_uint, 100)));
|
||||
}
|
||||
222
lib/compiler/translate-c/main.zig
Normal file
222
lib/compiler/translate-c/main.zig
Normal file
|
|
@ -0,0 +1,222 @@
|
|||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
const process = std.process;
|
||||
const aro = @import("aro");
|
||||
const Translator = @import("Translator.zig");
|
||||
|
||||
const fast_exit = @import("builtin").mode != .Debug;
|
||||
|
||||
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init;
|
||||
|
||||
pub fn main() u8 {
|
||||
const gpa = general_purpose_allocator.allocator();
|
||||
defer _ = general_purpose_allocator.deinit();
|
||||
|
||||
var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena_instance.deinit();
|
||||
const arena = arena_instance.allocator();
|
||||
|
||||
const args = process.argsAlloc(arena) catch {
|
||||
std.debug.print("ran out of memory allocating arguments\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
};
|
||||
|
||||
var stderr_buf: [1024]u8 = undefined;
|
||||
var stderr = std.fs.File.stderr().writer(&stderr_buf);
|
||||
var diagnostics: aro.Diagnostics = .{
|
||||
.output = .{ .to_writer = .{
|
||||
.color = .detect(stderr.file),
|
||||
.writer = &stderr.interface,
|
||||
} },
|
||||
};
|
||||
|
||||
var comp = aro.Compilation.initDefault(gpa, arena, &diagnostics, std.fs.cwd()) catch |err| switch (err) {
|
||||
error.OutOfMemory => {
|
||||
std.debug.print("ran out of memory initializing C compilation\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
},
|
||||
};
|
||||
defer comp.deinit();
|
||||
|
||||
var driver: aro.Driver = .{ .comp = &comp, .diagnostics = &diagnostics, .aro_name = "aro" };
|
||||
defer driver.deinit();
|
||||
|
||||
var toolchain: aro.Toolchain = .{ .driver = &driver, .filesystem = .{ .real = comp.cwd } };
|
||||
defer toolchain.deinit();
|
||||
|
||||
translate(&driver, &toolchain, args) catch |err| switch (err) {
|
||||
error.OutOfMemory => {
|
||||
std.debug.print("ran out of memory translating\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
},
|
||||
error.FatalError => {
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
},
|
||||
error.WriteFailed => {
|
||||
std.debug.print("unable to write to stdout\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
},
|
||||
};
|
||||
if (fast_exit) process.exit(@intFromBool(comp.diagnostics.errors != 0));
|
||||
return @intFromBool(comp.diagnostics.errors != 0);
|
||||
}
|
||||
|
||||
pub const usage =
|
||||
\\Usage {s}: [options] file [CC options]
|
||||
\\
|
||||
\\Options:
|
||||
\\ --help Print this message
|
||||
\\ --version Print translate-c version
|
||||
\\ -fmodule-libs Import libraries as modules
|
||||
\\ -fno-module-libs (default) Install libraries next to output file
|
||||
\\
|
||||
\\
|
||||
;
|
||||
|
||||
fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8) !void {
|
||||
const gpa = d.comp.gpa;
|
||||
|
||||
const aro_args = args: {
|
||||
var i: usize = 0;
|
||||
for (args) |arg| {
|
||||
args[i] = arg;
|
||||
if (mem.eql(u8, arg, "--help")) {
|
||||
var stdout_buf: [512]u8 = undefined;
|
||||
var stdout = std.fs.File.stdout().writer(&stdout_buf);
|
||||
try stdout.interface.print(usage, .{args[0]});
|
||||
try stdout.interface.flush();
|
||||
return;
|
||||
} else if (mem.eql(u8, arg, "--version")) {
|
||||
var stdout_buf: [512]u8 = undefined;
|
||||
var stdout = std.fs.File.stdout().writer(&stdout_buf);
|
||||
// TODO add version
|
||||
try stdout.interface.writeAll("0.0.0-dev\n");
|
||||
try stdout.interface.flush();
|
||||
return;
|
||||
} else {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
break :args args[0..i];
|
||||
};
|
||||
const user_macros = macros: {
|
||||
var macro_buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer macro_buf.deinit(gpa);
|
||||
|
||||
var discard_buf: [256]u8 = undefined;
|
||||
var discarding: std.Io.Writer.Discarding = .init(&discard_buf);
|
||||
assert(!try d.parseArgs(&discarding.writer, ¯o_buf, aro_args));
|
||||
if (macro_buf.items.len > std.math.maxInt(u32)) {
|
||||
return d.fatal("user provided macro source exceeded max size", .{});
|
||||
}
|
||||
|
||||
const content = try macro_buf.toOwnedSlice(gpa);
|
||||
errdefer gpa.free(content);
|
||||
|
||||
break :macros try d.comp.addSourceFromOwnedBuffer("<command line>", content, .user);
|
||||
};
|
||||
|
||||
if (d.inputs.items.len != 1) {
|
||||
return d.fatal("expected exactly one input file", .{});
|
||||
}
|
||||
const source = d.inputs.items[0];
|
||||
|
||||
tc.discover() catch |er| switch (er) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.TooManyMultilibs => return d.fatal("found more than one multilib with the same priority", .{}),
|
||||
};
|
||||
tc.defineSystemIncludes() catch |er| switch (er) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AroIncludeNotFound => return d.fatal("unable to find Aro builtin headers", .{}),
|
||||
};
|
||||
|
||||
const builtin_macros = d.comp.generateBuiltinMacros(.include_system_defines) catch |err| switch (err) {
|
||||
error.FileTooBig => return d.fatal("builtin macro source exceeded max size", .{}),
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
var pp = try aro.Preprocessor.initDefault(d.comp);
|
||||
defer pp.deinit();
|
||||
|
||||
var name_buf: [std.fs.max_name_bytes]u8 = undefined;
|
||||
// Omit the source file from the dep file so that it can be tracked separately.
|
||||
// In the Zig compiler we want to omit it from the cache hash since it will
|
||||
// be written to a tmp file then renamed into place, meaning the path will be
|
||||
// wrong as soon as the work is done.
|
||||
var opt_dep_file = try d.initDepFile(source, &name_buf, true);
|
||||
defer if (opt_dep_file) |*dep_file| dep_file.deinit(gpa);
|
||||
|
||||
if (opt_dep_file) |*dep_file| pp.dep_file = dep_file;
|
||||
|
||||
try pp.preprocessSources(&.{ source, builtin_macros, user_macros });
|
||||
|
||||
var c_tree = try pp.parse();
|
||||
defer c_tree.deinit();
|
||||
|
||||
if (d.diagnostics.errors != 0) {
|
||||
if (fast_exit) process.exit(1);
|
||||
return error.FatalError;
|
||||
}
|
||||
|
||||
var out_buf: [4096]u8 = undefined;
|
||||
if (opt_dep_file) |dep_file| {
|
||||
const dep_file_name = try d.getDepFileName(source, out_buf[0..std.fs.max_name_bytes]);
|
||||
|
||||
const file = if (dep_file_name) |path|
|
||||
d.comp.cwd.createFile(path, .{}) catch |er|
|
||||
return d.fatal("unable to create dependency file '{s}': {s}", .{ path, aro.Driver.errorDescription(er) })
|
||||
else
|
||||
std.fs.File.stdout();
|
||||
defer if (dep_file_name != null) file.close();
|
||||
|
||||
var file_writer = file.writer(&out_buf);
|
||||
dep_file.write(&file_writer.interface) catch
|
||||
return d.fatal("unable to write dependency file: {s}", .{aro.Driver.errorDescription(file_writer.err.?)});
|
||||
}
|
||||
|
||||
const rendered_zig = try Translator.translate(.{
|
||||
.gpa = gpa,
|
||||
.comp = d.comp,
|
||||
.pp = &pp,
|
||||
.tree = &c_tree,
|
||||
});
|
||||
defer gpa.free(rendered_zig);
|
||||
|
||||
var close_out_file = false;
|
||||
var out_file_path: []const u8 = "<stdout>";
|
||||
var out_file: std.fs.File = .stdout();
|
||||
defer if (close_out_file) out_file.close();
|
||||
|
||||
if (d.output_name) |path| blk: {
|
||||
if (std.mem.eql(u8, path, "-")) break :blk;
|
||||
if (std.fs.path.dirname(path)) |dirname| {
|
||||
std.fs.cwd().makePath(dirname) catch |err|
|
||||
return d.fatal("failed to create path to '{s}': {s}", .{ path, aro.Driver.errorDescription(err) });
|
||||
}
|
||||
out_file = std.fs.cwd().createFile(path, .{}) catch |err| {
|
||||
return d.fatal("failed to create output file '{s}': {s}", .{ path, aro.Driver.errorDescription(err) });
|
||||
};
|
||||
close_out_file = true;
|
||||
out_file_path = path;
|
||||
}
|
||||
|
||||
var out_writer = out_file.writer(&out_buf);
|
||||
out_writer.interface.writeAll(rendered_zig) catch {};
|
||||
out_writer.interface.flush() catch {};
|
||||
if (out_writer.err) |write_err|
|
||||
return d.fatal("failed to write result to '{s}': {s}", .{ out_file_path, aro.Driver.errorDescription(write_err) });
|
||||
|
||||
if (fast_exit) process.exit(0);
|
||||
}
|
||||
|
||||
test {
|
||||
_ = Translator;
|
||||
_ = @import("helpers.zig");
|
||||
_ = @import("PatternList.zig");
|
||||
}
|
||||
|
|
@ -3,6 +3,7 @@
|
|||
//!
|
||||
//! https://git.musl-libc.org/cgit/musl/tree/src/math/ceilf.c
|
||||
//! https://git.musl-libc.org/cgit/musl/tree/src/math/ceil.c
|
||||
//! https://git.musl-libc.org/cgit/musl/tree/src/math/ceill.c
|
||||
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
|
|
@ -27,8 +28,23 @@ comptime {
|
|||
}
|
||||
|
||||
pub fn __ceilh(x: f16) callconv(.c) f16 {
|
||||
// TODO: more efficient implementation
|
||||
return @floatCast(ceilf(x));
|
||||
var u: u16 = @bitCast(x);
|
||||
const e = @as(i16, @intCast((u >> 10) & 31)) - 15;
|
||||
var m: u16 = undefined;
|
||||
|
||||
if (e >= 10) return x;
|
||||
|
||||
if (e >= 0) {
|
||||
m = @as(u16, 0x03FF) >> @intCast(e);
|
||||
if (u & m == 0) return x;
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(x + 0x1.0p120);
|
||||
if (u >> 15 == 0) u += m;
|
||||
u &= ~m;
|
||||
return @bitCast(u);
|
||||
} else {
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(x + 0x1.0p120);
|
||||
return if (u >> 15 != 0) -0.0 else if (u << 1 != 0) 1.0 else x;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ceilf(x: f32) callconv(.c) f32 {
|
||||
|
|
@ -36,31 +52,18 @@ pub fn ceilf(x: f32) callconv(.c) f32 {
|
|||
const e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F;
|
||||
var m: u32 = undefined;
|
||||
|
||||
// TODO: Shouldn't need this explicit check.
|
||||
if (x == 0.0) {
|
||||
return x;
|
||||
}
|
||||
if (e >= 23) return x;
|
||||
|
||||
if (e >= 23) {
|
||||
return x;
|
||||
} else if (e >= 0) {
|
||||
if (e >= 0) {
|
||||
m = @as(u32, 0x007FFFFF) >> @intCast(e);
|
||||
if (u & m == 0) {
|
||||
return x;
|
||||
}
|
||||
if (u & m == 0) return x;
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(x + 0x1.0p120);
|
||||
if (u >> 31 == 0) {
|
||||
u += m;
|
||||
}
|
||||
if (u >> 31 == 0) u += m;
|
||||
u &= ~m;
|
||||
return @bitCast(u);
|
||||
} else {
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(x + 0x1.0p120);
|
||||
if (u >> 31 != 0) {
|
||||
return -0.0;
|
||||
} else {
|
||||
return 1.0;
|
||||
}
|
||||
return if (u >> 31 != 0) -0.0 else if (u << 1 != 0) 1.0 else x;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -96,8 +99,32 @@ pub fn ceil(x: f64) callconv(.c) f64 {
|
|||
}
|
||||
|
||||
pub fn __ceilx(x: f80) callconv(.c) f80 {
|
||||
// TODO: more efficient implementation
|
||||
return @floatCast(ceilq(x));
|
||||
const f80_toint = 1.0 / math.floatEps(f80);
|
||||
|
||||
const u: u80 = @bitCast(x);
|
||||
const e = (u >> 64) & 0x7FFF;
|
||||
var y: f80 = undefined;
|
||||
|
||||
if (e >= 0x3FFF + 64 or x == 0) return x;
|
||||
|
||||
if (u >> 79 != 0) {
|
||||
y = x - f80_toint + f80_toint - x;
|
||||
} else {
|
||||
y = x + f80_toint - f80_toint - x;
|
||||
}
|
||||
|
||||
if (e <= 0x3FFF - 1) {
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(y);
|
||||
if (u >> 79 != 0) {
|
||||
return -0.0;
|
||||
} else {
|
||||
return 1.0;
|
||||
}
|
||||
} else if (y < 0) {
|
||||
return x + y + 1;
|
||||
} else {
|
||||
return x + y;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ceilq(x: f128) callconv(.c) f128 {
|
||||
|
|
@ -140,6 +167,12 @@ pub fn ceill(x: c_longdouble) callconv(.c) c_longdouble {
|
|||
}
|
||||
}
|
||||
|
||||
test "ceil16" {
|
||||
try expect(__ceilh(1.3) == 2.0);
|
||||
try expect(__ceilh(-1.3) == -1.0);
|
||||
try expect(__ceilh(0.2) == 1.0);
|
||||
}
|
||||
|
||||
test "ceil32" {
|
||||
try expect(ceilf(1.3) == 2.0);
|
||||
try expect(ceilf(-1.3) == -1.0);
|
||||
|
|
@ -152,12 +185,26 @@ test "ceil64" {
|
|||
try expect(ceil(0.2) == 1.0);
|
||||
}
|
||||
|
||||
test "ceil80" {
|
||||
try expect(__ceilx(1.3) == 2.0);
|
||||
try expect(__ceilx(-1.3) == -1.0);
|
||||
try expect(__ceilx(0.2) == 1.0);
|
||||
}
|
||||
|
||||
test "ceil128" {
|
||||
try expect(ceilq(1.3) == 2.0);
|
||||
try expect(ceilq(-1.3) == -1.0);
|
||||
try expect(ceilq(0.2) == 1.0);
|
||||
}
|
||||
|
||||
test "ceil16.special" {
|
||||
try expect(__ceilh(0.0) == 0.0);
|
||||
try expect(__ceilh(-0.0) == -0.0);
|
||||
try expect(math.isPositiveInf(__ceilh(math.inf(f16))));
|
||||
try expect(math.isNegativeInf(__ceilh(-math.inf(f16))));
|
||||
try expect(math.isNan(__ceilh(math.nan(f16))));
|
||||
}
|
||||
|
||||
test "ceil32.special" {
|
||||
try expect(ceilf(0.0) == 0.0);
|
||||
try expect(ceilf(-0.0) == -0.0);
|
||||
|
|
@ -174,6 +221,14 @@ test "ceil64.special" {
|
|||
try expect(math.isNan(ceil(math.nan(f64))));
|
||||
}
|
||||
|
||||
test "ceil80.special" {
|
||||
try expect(__ceilx(0.0) == 0.0);
|
||||
try expect(__ceilx(-0.0) == -0.0);
|
||||
try expect(math.isPositiveInf(__ceilx(math.inf(f80))));
|
||||
try expect(math.isNegativeInf(__ceilx(-math.inf(f80))));
|
||||
try expect(math.isNan(__ceilx(math.nan(f80))));
|
||||
}
|
||||
|
||||
test "ceil128.special" {
|
||||
try expect(ceilq(0.0) == 0.0);
|
||||
try expect(ceilq(-0.0) == -0.0);
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
//!
|
||||
//! https://git.musl-libc.org/cgit/musl/tree/src/math/floorf.c
|
||||
//! https://git.musl-libc.org/cgit/musl/tree/src/math/floor.c
|
||||
//! https://git.musl-libc.org/cgit/musl/tree/src/math/floorl.c
|
||||
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
|
|
@ -31,32 +32,17 @@ pub fn __floorh(x: f16) callconv(.c) f16 {
|
|||
const e = @as(i16, @intCast((u >> 10) & 31)) - 15;
|
||||
var m: u16 = undefined;
|
||||
|
||||
// TODO: Shouldn't need this explicit check.
|
||||
if (x == 0.0) {
|
||||
return x;
|
||||
}
|
||||
|
||||
if (e >= 10) {
|
||||
return x;
|
||||
}
|
||||
if (e >= 10) return x;
|
||||
|
||||
if (e >= 0) {
|
||||
m = @as(u16, 1023) >> @intCast(e);
|
||||
if (u & m == 0) {
|
||||
return x;
|
||||
}
|
||||
m = @as(u16, 0x03FF) >> @intCast(e);
|
||||
if (u & m == 0) return x;
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(x + 0x1.0p120);
|
||||
if (u >> 15 != 0) {
|
||||
u += m;
|
||||
}
|
||||
if (u >> 15 != 0) u += m;
|
||||
return @bitCast(u & ~m);
|
||||
} else {
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(x + 0x1.0p120);
|
||||
if (u >> 15 == 0) {
|
||||
return 0.0;
|
||||
} else {
|
||||
return -1.0;
|
||||
}
|
||||
return if (u >> 15 == 0) 0.0 else if (u << 1 != 0) -1.0 else x;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -65,32 +51,17 @@ pub fn floorf(x: f32) callconv(.c) f32 {
|
|||
const e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F;
|
||||
var m: u32 = undefined;
|
||||
|
||||
// TODO: Shouldn't need this explicit check.
|
||||
if (x == 0.0) {
|
||||
return x;
|
||||
}
|
||||
|
||||
if (e >= 23) {
|
||||
return x;
|
||||
}
|
||||
if (e >= 23) return x;
|
||||
|
||||
if (e >= 0) {
|
||||
m = @as(u32, 0x007FFFFF) >> @intCast(e);
|
||||
if (u & m == 0) {
|
||||
return x;
|
||||
}
|
||||
if (u & m == 0) return x;
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(x + 0x1.0p120);
|
||||
if (u >> 31 != 0) {
|
||||
u += m;
|
||||
}
|
||||
if (u >> 31 != 0) u += m;
|
||||
return @bitCast(u & ~m);
|
||||
} else {
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(x + 0x1.0p120);
|
||||
if (u >> 31 == 0) {
|
||||
return 0.0;
|
||||
} else {
|
||||
return -1.0;
|
||||
}
|
||||
return if (u >> 31 == 0) 0.0 else if (u << 1 != 0) -1.0 else x;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -126,8 +97,34 @@ pub fn floor(x: f64) callconv(.c) f64 {
|
|||
}
|
||||
|
||||
pub fn __floorx(x: f80) callconv(.c) f80 {
|
||||
// TODO: more efficient implementation
|
||||
return @floatCast(floorq(x));
|
||||
const f80_toint = 1.0 / math.floatEps(f80);
|
||||
|
||||
const u: u80 = @bitCast(x);
|
||||
const e = (u >> 64) & 0x7FFF;
|
||||
var y: f80 = undefined;
|
||||
|
||||
if (e >= 0x3FFF + 64 or x == 0) {
|
||||
return x;
|
||||
}
|
||||
|
||||
if (u >> 79 != 0) {
|
||||
y = x - f80_toint + f80_toint - x;
|
||||
} else {
|
||||
y = x + f80_toint - f80_toint - x;
|
||||
}
|
||||
|
||||
if (e <= 0x3FFF - 1) {
|
||||
if (common.want_float_exceptions) mem.doNotOptimizeAway(y);
|
||||
if (u >> 79 != 0) {
|
||||
return -1.0;
|
||||
} else {
|
||||
return 0.0;
|
||||
}
|
||||
} else if (y > 0) {
|
||||
return x + y - 1;
|
||||
} else {
|
||||
return x + y;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn floorq(x: f128) callconv(.c) f128 {
|
||||
|
|
@ -188,6 +185,12 @@ test "floor64" {
|
|||
try expect(floor(0.2) == 0.0);
|
||||
}
|
||||
|
||||
test "floor80" {
|
||||
try expect(__floorx(1.3) == 1.0);
|
||||
try expect(__floorx(-1.3) == -2.0);
|
||||
try expect(__floorx(0.2) == 0.0);
|
||||
}
|
||||
|
||||
test "floor128" {
|
||||
try expect(floorq(1.3) == 1.0);
|
||||
try expect(floorq(-1.3) == -2.0);
|
||||
|
|
@ -218,6 +221,14 @@ test "floor64.special" {
|
|||
try expect(math.isNan(floor(math.nan(f64))));
|
||||
}
|
||||
|
||||
test "floor80.special" {
|
||||
try expect(__floorx(0.0) == 0.0);
|
||||
try expect(__floorx(-0.0) == -0.0);
|
||||
try expect(math.isPositiveInf(__floorx(math.inf(f80))));
|
||||
try expect(math.isNegativeInf(__floorx(-math.inf(f80))));
|
||||
try expect(math.isNan(__floorx(math.nan(f80))));
|
||||
}
|
||||
|
||||
test "floor128.special" {
|
||||
try expect(floorq(0.0) == 0.0);
|
||||
try expect(floorq(-0.0) == -0.0);
|
||||
|
|
|
|||
|
|
@ -203,7 +203,7 @@ fn add_adjusted(a: f64, b: f64) f64 {
|
|||
if (uhii & 1 == 0) {
|
||||
// hibits += copysign(1.0, sum.hi, sum.lo)
|
||||
const uloi: u64 = @bitCast(sum.lo);
|
||||
uhii += 1 - ((uhii ^ uloi) >> 62);
|
||||
uhii = uhii + 1 - ((uhii ^ uloi) >> 62);
|
||||
sum.hi = @bitCast(uhii);
|
||||
}
|
||||
}
|
||||
|
|
@ -217,7 +217,7 @@ fn add_and_denorm(a: f64, b: f64, scale: i32) f64 {
|
|||
const bits_lost = -@as(i32, @intCast((uhii >> 52) & 0x7FF)) - scale + 1;
|
||||
if ((bits_lost != 1) == (uhii & 1 != 0)) {
|
||||
const uloi: u64 = @bitCast(sum.lo);
|
||||
uhii += 1 - (((uhii ^ uloi) >> 62) & 2);
|
||||
uhii = uhii + 1 - (((uhii ^ uloi) >> 62) & 2);
|
||||
sum.hi = @bitCast(uhii);
|
||||
}
|
||||
}
|
||||
|
|
@ -259,7 +259,7 @@ fn add_adjusted128(a: f128, b: f128) f128 {
|
|||
if (uhii & 1 == 0) {
|
||||
// hibits += copysign(1.0, sum.hi, sum.lo)
|
||||
const uloi: u128 = @bitCast(sum.lo);
|
||||
uhii += 1 - ((uhii ^ uloi) >> 126);
|
||||
uhii = uhii + 1 - ((uhii ^ uloi) >> 126);
|
||||
sum.hi = @bitCast(uhii);
|
||||
}
|
||||
}
|
||||
|
|
@ -284,7 +284,7 @@ fn add_and_denorm128(a: f128, b: f128, scale: i32) f128 {
|
|||
const bits_lost = -@as(i32, @intCast((uhii >> 112) & 0x7FFF)) - scale + 1;
|
||||
if ((bits_lost != 1) == (uhii & 1 != 0)) {
|
||||
const uloi: u128 = @bitCast(sum.lo);
|
||||
uhii += 1 - (((uhii ^ uloi) >> 126) & 2);
|
||||
uhii = uhii + 1 - (((uhii ^ uloi) >> 126) & 2);
|
||||
sum.hi = @bitCast(uhii);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue