diff --git a/CMakeLists.txt b/CMakeLists.txt
index 963938ca78..3263725019 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -296,14 +296,12 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/meta/trait.zig"
"${CMAKE_SOURCE_DIR}/lib/std/multi_array_list.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/os/darwin.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/linux.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/linux/errno/generic.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/linux/x86_64.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/linux.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/linux/io_uring.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/linux/x86_64.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/os/posix_spawn.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/windows.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/windows/ntstatus.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/windows/win32error.zig"
@@ -507,7 +505,9 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/Thread.zig"
"${CMAKE_SOURCE_DIR}/lib/std/Thread/Futex.zig"
"${CMAKE_SOURCE_DIR}/lib/std/Thread/Mutex.zig"
+ "${CMAKE_SOURCE_DIR}/lib/std/Thread/Pool.zig"
"${CMAKE_SOURCE_DIR}/lib/std/Thread/ResetEvent.zig"
+ "${CMAKE_SOURCE_DIR}/lib/std/Thread/WaitGroup.zig"
"${CMAKE_SOURCE_DIR}/lib/std/time.zig"
"${CMAKE_SOURCE_DIR}/lib/std/treap.zig"
"${CMAKE_SOURCE_DIR}/lib/std/unicode.zig"
@@ -517,6 +517,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/zig/c_builtins.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/Parse.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/render.zig"
+ "${CMAKE_SOURCE_DIR}/lib/std/zig/Server.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/string_literal.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/system.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/system/NativePaths.zig"
@@ -531,9 +532,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/Package.zig"
"${CMAKE_SOURCE_DIR}/src/RangeSet.zig"
"${CMAKE_SOURCE_DIR}/src/Sema.zig"
- "${CMAKE_SOURCE_DIR}/src/ThreadPool.zig"
"${CMAKE_SOURCE_DIR}/src/TypedValue.zig"
- "${CMAKE_SOURCE_DIR}/src/WaitGroup.zig"
"${CMAKE_SOURCE_DIR}/src/Zir.zig"
"${CMAKE_SOURCE_DIR}/src/arch/aarch64/CodeGen.zig"
"${CMAKE_SOURCE_DIR}/src/arch/aarch64/Emit.zig"
@@ -560,9 +559,12 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/arch/wasm/Mir.zig"
"${CMAKE_SOURCE_DIR}/src/arch/x86_64/CodeGen.zig"
"${CMAKE_SOURCE_DIR}/src/arch/x86_64/Emit.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/x86_64/Encoding.zig"
"${CMAKE_SOURCE_DIR}/src/arch/x86_64/Mir.zig"
- "${CMAKE_SOURCE_DIR}/src/arch/x86_64/bits.zig"
"${CMAKE_SOURCE_DIR}/src/arch/x86_64/abi.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/x86_64/bits.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/x86_64/encoder.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/x86_64/encodings.zig"
"${CMAKE_SOURCE_DIR}/src/clang.zig"
"${CMAKE_SOURCE_DIR}/src/clang_options.zig"
"${CMAKE_SOURCE_DIR}/src/clang_options_data.zig"
@@ -827,6 +829,12 @@ else()
set(ZIG_STATIC_ARG "")
endif()
+if(CMAKE_POSITION_INDEPENDENT_CODE)
+ set(ZIG_PIE_ARG="-Dpie")
+else()
+ set(ZIG_PIE_ARG="")
+endif()
+
set(ZIG_BUILD_ARGS
--zig-lib-dir "${CMAKE_SOURCE_DIR}/lib"
"-Dconfig_h=${ZIG_CONFIG_H_OUT}"
@@ -835,6 +843,7 @@ set(ZIG_BUILD_ARGS
${ZIG_STATIC_ARG}
${ZIG_NO_LIB_ARG}
${ZIG_SINGLE_THREADED_ARG}
+ ${ZIG_PIE_ARG}
"-Dtarget=${ZIG_TARGET_TRIPLE}"
"-Dcpu=${ZIG_TARGET_MCPU}"
"-Dversion-string=${RESOLVED_ZIG_VERSION}"
diff --git a/build.zig b/build.zig
index 87b1b797ca..72db9ba749 100644
--- a/build.zig
+++ b/build.zig
@@ -31,6 +31,11 @@ pub fn build(b: *std.Build) !void {
const use_zig_libcxx = b.option(bool, "use-zig-libcxx", "If libc++ is needed, use zig's bundled version, don't try to integrate with the system") orelse false;
const test_step = b.step("test", "Run all the tests");
+ const deprecated_skip_install_lib_files = b.option(bool, "skip-install-lib-files", "deprecated. see no-lib") orelse false;
+ if (deprecated_skip_install_lib_files) {
+ std.log.warn("-Dskip-install-lib-files is deprecated in favor of -Dno-lib", .{});
+ }
+ const skip_install_lib_files = b.option(bool, "no-lib", "skip copying of lib/ files and langref to installation prefix. Useful for development") orelse deprecated_skip_install_lib_files;
const docgen_exe = b.addExecutable(.{
.name = "docgen",
@@ -40,28 +45,35 @@ pub fn build(b: *std.Build) !void {
});
docgen_exe.single_threaded = single_threaded;
- const langref_out_path = try b.cache_root.join(b.allocator, &.{"langref.html"});
- const docgen_cmd = docgen_exe.run();
- docgen_cmd.addArgs(&[_][]const u8{
- "--zig",
- b.zig_exe,
- "doc" ++ fs.path.sep_str ++ "langref.html.in",
- langref_out_path,
- });
- docgen_cmd.step.dependOn(&docgen_exe.step);
+ const docgen_cmd = b.addRunArtifact(docgen_exe);
+ docgen_cmd.addArgs(&.{ "--zig", b.zig_exe });
+ if (b.zig_lib_dir) |p| {
+ docgen_cmd.addArgs(&.{ "--zig-lib-dir", p });
+ }
+ docgen_cmd.addFileSourceArg(.{ .path = "doc/langref.html.in" });
+ const langref_file = docgen_cmd.addOutputFileArg("langref.html");
+ const install_langref = b.addInstallFileWithDir(langref_file, .prefix, "doc/langref.html");
+ if (!skip_install_lib_files) {
+ b.getInstallStep().dependOn(&install_langref.step);
+ }
const docs_step = b.step("docs", "Build documentation");
docs_step.dependOn(&docgen_cmd.step);
- const test_cases = b.addTest(.{
- .root_source_file = .{ .path = "src/test.zig" },
+ // This is for legacy reasons, to be removed after our CI scripts are upgraded to use
+ // the file from the install prefix instead.
+ const legacy_write_to_cache = b.addWriteFiles();
+ legacy_write_to_cache.addCopyFileToSource(langref_file, "zig-cache/langref.html");
+ docs_step.dependOn(&legacy_write_to_cache.step);
+
+ const check_case_exe = b.addExecutable(.{
+ .name = "check-case",
+ .root_source_file = .{ .path = "test/src/Cases.zig" },
.optimize = optimize,
});
- test_cases.main_pkg_path = ".";
- test_cases.stack_size = stack_size;
- test_cases.single_threaded = single_threaded;
-
- const fmt_build_zig = b.addFmt(&[_][]const u8{"build.zig"});
+ check_case_exe.main_pkg_path = ".";
+ check_case_exe.stack_size = stack_size;
+ check_case_exe.single_threaded = single_threaded;
const skip_debug = b.option(bool, "skip-debug", "Main test suite skips debug builds") orelse false;
const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse false;
@@ -74,11 +86,6 @@ pub fn build(b: *std.Build) !void {
const skip_stage1 = b.option(bool, "skip-stage1", "Main test suite skips stage1 compile error tests") orelse false;
const skip_run_translated_c = b.option(bool, "skip-run-translated-c", "Main test suite skips run-translated-c tests") orelse false;
const skip_stage2_tests = b.option(bool, "skip-stage2-tests", "Main test suite skips self-hosted compiler tests") orelse false;
- const deprecated_skip_install_lib_files = b.option(bool, "skip-install-lib-files", "deprecated. see no-lib") orelse false;
- if (deprecated_skip_install_lib_files) {
- std.log.warn("-Dskip-install-lib-files is deprecated in favor of -Dno-lib", .{});
- }
- const skip_install_lib_files = b.option(bool, "no-lib", "skip copying of lib/ files to installation prefix. Useful for development") orelse deprecated_skip_install_lib_files;
const only_install_lib_files = b.option(bool, "lib-files-only", "Only install library files") orelse false;
@@ -157,6 +164,7 @@ pub fn build(b: *std.Build) !void {
const link_libc = b.option(bool, "force-link-libc", "Force self-hosted compiler to link libc") orelse (enable_llvm or only_c);
const sanitize_thread = b.option(bool, "sanitize-thread", "Enable thread-sanitization") orelse false;
const strip = b.option(bool, "strip", "Omit debug information");
+ const pie = b.option(bool, "pie", "Produce a Position Independent Executable");
const value_tracing = b.option(bool, "value-tracing", "Enable extra state tracking to help troubleshoot bugs in the compiler (using the std.debug.Trace API)") orelse false;
const mem_leak_frames: u32 = b.option(u32, "mem-leak-frames", "How many stack frames to print when a memory leak occurs. Tests get 2x this amount.") orelse blk: {
@@ -167,6 +175,7 @@ pub fn build(b: *std.Build) !void {
const exe = addCompilerStep(b, optimize, target);
exe.strip = strip;
+ exe.pie = pie;
exe.sanitize_thread = sanitize_thread;
exe.build_id = b.option(bool, "build-id", "Include a build id note") orelse false;
exe.install();
@@ -178,13 +187,12 @@ pub fn build(b: *std.Build) !void {
test_step.dependOn(&exe.step);
}
- b.default_step.dependOn(&exe.step);
exe.single_threaded = single_threaded;
if (target.isWindows() and target.getAbi() == .gnu) {
// LTO is currently broken on mingw, this can be removed when it's fixed.
exe.want_lto = false;
- test_cases.want_lto = false;
+ check_case_exe.want_lto = false;
}
const exe_options = b.addOptions();
@@ -199,11 +207,11 @@ pub fn build(b: *std.Build) !void {
exe_options.addOption(bool, "llvm_has_xtensa", llvm_has_xtensa);
exe_options.addOption(bool, "force_gpa", force_gpa);
exe_options.addOption(bool, "only_c", only_c);
- exe_options.addOption(bool, "omit_pkg_fetching_code", false);
+ exe_options.addOption(bool, "omit_pkg_fetching_code", only_c);
if (link_libc) {
exe.linkLibC();
- test_cases.linkLibC();
+ check_case_exe.linkLibC();
}
const is_debug = optimize == .Debug;
@@ -289,14 +297,14 @@ pub fn build(b: *std.Build) !void {
}
try addCmakeCfgOptionsToExe(b, cfg, exe, use_zig_libcxx);
- try addCmakeCfgOptionsToExe(b, cfg, test_cases, use_zig_libcxx);
+ try addCmakeCfgOptionsToExe(b, cfg, check_case_exe, use_zig_libcxx);
} else {
// Here we are -Denable-llvm but no cmake integration.
try addStaticLlvmOptionsToExe(exe);
- try addStaticLlvmOptionsToExe(test_cases);
+ try addStaticLlvmOptionsToExe(check_case_exe);
}
if (target.isWindows()) {
- inline for (.{ exe, test_cases }) |artifact| {
+ inline for (.{ exe, check_case_exe }) |artifact| {
artifact.linkSystemLibrary("version");
artifact.linkSystemLibrary("uuid");
artifact.linkSystemLibrary("ole32");
@@ -341,8 +349,9 @@ pub fn build(b: *std.Build) !void {
const test_filter = b.option([]const u8, "test-filter", "Skip tests that do not match filter");
const test_cases_options = b.addOptions();
- test_cases.addOptions("build_options", test_cases_options);
+ check_case_exe.addOptions("build_options", test_cases_options);
+ test_cases_options.addOption(bool, "enable_tracy", false);
test_cases_options.addOption(bool, "enable_logging", enable_logging);
test_cases_options.addOption(bool, "enable_link_snapshots", enable_link_snapshots);
test_cases_options.addOption(bool, "skip_non_native", skip_non_native);
@@ -366,12 +375,6 @@ pub fn build(b: *std.Build) !void {
test_cases_options.addOption(std.SemanticVersion, "semver", semver);
test_cases_options.addOption(?[]const u8, "test_filter", test_filter);
- const test_cases_step = b.step("test-cases", "Run the main compiler test cases");
- test_cases_step.dependOn(&test_cases.step);
- if (!skip_stage2_tests) {
- test_step.dependOn(test_cases_step);
- }
-
var chosen_opt_modes_buf: [4]builtin.Mode = undefined;
var chosen_mode_index: usize = 0;
if (!skip_debug) {
@@ -392,96 +395,101 @@ pub fn build(b: *std.Build) !void {
}
const optimization_modes = chosen_opt_modes_buf[0..chosen_mode_index];
- // run stage1 `zig fmt` on this build.zig file just to make sure it works
- test_step.dependOn(&fmt_build_zig.step);
- const fmt_step = b.step("test-fmt", "Run zig fmt against build.zig to make sure it works");
- fmt_step.dependOn(&fmt_build_zig.step);
+ const fmt_include_paths = &.{ "doc", "lib", "src", "test", "tools", "build.zig" };
+ const fmt_exclude_paths = &.{"test/cases"};
+ const do_fmt = b.addFmt(.{
+ .paths = fmt_include_paths,
+ .exclude_paths = fmt_exclude_paths,
+ });
- test_step.dependOn(tests.addPkgTests(
- b,
- test_filter,
- "test/behavior.zig",
- "behavior",
- "Run the behavior tests",
- optimization_modes,
- skip_single_threaded,
- skip_non_native,
- skip_libc,
- skip_stage1,
- skip_stage2_tests,
- ));
+ b.step("test-fmt", "Check source files having conforming formatting").dependOn(&b.addFmt(.{
+ .paths = fmt_include_paths,
+ .exclude_paths = fmt_exclude_paths,
+ .check = true,
+ }).step);
- test_step.dependOn(tests.addPkgTests(
- b,
- test_filter,
- "lib/compiler_rt.zig",
- "compiler-rt",
- "Run the compiler_rt tests",
- optimization_modes,
- true, // skip_single_threaded
- skip_non_native,
- true, // skip_libc
- skip_stage1,
- skip_stage2_tests or true, // TODO get these all passing
- ));
+ const test_cases_step = b.step("test-cases", "Run the main compiler test cases");
+ try tests.addCases(b, test_cases_step, test_filter, check_case_exe);
+ if (!skip_stage2_tests) test_step.dependOn(test_cases_step);
- test_step.dependOn(tests.addPkgTests(
- b,
- test_filter,
- "lib/c.zig",
- "universal-libc",
- "Run the universal libc tests",
- optimization_modes,
- true, // skip_single_threaded
- skip_non_native,
- true, // skip_libc
- skip_stage1,
- skip_stage2_tests or true, // TODO get these all passing
- ));
+ test_step.dependOn(tests.addModuleTests(b, .{
+ .test_filter = test_filter,
+ .root_src = "test/behavior.zig",
+ .name = "behavior",
+ .desc = "Run the behavior tests",
+ .optimize_modes = optimization_modes,
+ .skip_single_threaded = skip_single_threaded,
+ .skip_non_native = skip_non_native,
+ .skip_libc = skip_libc,
+ .skip_stage1 = skip_stage1,
+ .skip_stage2 = skip_stage2_tests,
+ .max_rss = 1 * 1024 * 1024 * 1024,
+ }));
+
+ test_step.dependOn(tests.addModuleTests(b, .{
+ .test_filter = test_filter,
+ .root_src = "lib/compiler_rt.zig",
+ .name = "compiler-rt",
+ .desc = "Run the compiler_rt tests",
+ .optimize_modes = optimization_modes,
+ .skip_single_threaded = true,
+ .skip_non_native = skip_non_native,
+ .skip_libc = true,
+ .skip_stage1 = skip_stage1,
+ .skip_stage2 = true, // TODO get all these passing
+ }));
+
+ test_step.dependOn(tests.addModuleTests(b, .{
+ .test_filter = test_filter,
+ .root_src = "lib/c.zig",
+ .name = "universal-libc",
+ .desc = "Run the universal libc tests",
+ .optimize_modes = optimization_modes,
+ .skip_single_threaded = true,
+ .skip_non_native = skip_non_native,
+ .skip_libc = true,
+ .skip_stage1 = skip_stage1,
+ .skip_stage2 = true, // TODO get all these passing
+ }));
test_step.dependOn(tests.addCompareOutputTests(b, test_filter, optimization_modes));
test_step.dependOn(tests.addStandaloneTests(
b,
- test_filter,
optimization_modes,
- skip_non_native,
enable_macos_sdk,
- target,
skip_stage2_tests,
- b.enable_darling,
- b.enable_qemu,
- b.enable_rosetta,
- b.enable_wasmtime,
- b.enable_wine,
enable_symlinks_windows,
));
test_step.dependOn(tests.addCAbiTests(b, skip_non_native, skip_release));
- test_step.dependOn(tests.addLinkTests(b, test_filter, optimization_modes, enable_macos_sdk, skip_stage2_tests, enable_symlinks_windows));
+ test_step.dependOn(tests.addLinkTests(b, enable_macos_sdk, skip_stage2_tests, enable_symlinks_windows));
test_step.dependOn(tests.addStackTraceTests(b, test_filter, optimization_modes));
- test_step.dependOn(tests.addCliTests(b, test_filter, optimization_modes));
+ test_step.dependOn(tests.addCliTests(b));
test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, optimization_modes));
test_step.dependOn(tests.addTranslateCTests(b, test_filter));
if (!skip_run_translated_c) {
test_step.dependOn(tests.addRunTranslatedCTests(b, test_filter, target));
}
- // tests for this feature are disabled until we have the self-hosted compiler available
- // test_step.dependOn(tests.addGenHTests(b, test_filter));
- test_step.dependOn(tests.addPkgTests(
- b,
- test_filter,
- "lib/std/std.zig",
- "std",
- "Run the standard library tests",
- optimization_modes,
- skip_single_threaded,
- skip_non_native,
- skip_libc,
- skip_stage1,
- true, // TODO get these all passing
- ));
+ test_step.dependOn(tests.addModuleTests(b, .{
+ .test_filter = test_filter,
+ .root_src = "lib/std/std.zig",
+ .name = "std",
+ .desc = "Run the standard library tests",
+ .optimize_modes = optimization_modes,
+ .skip_single_threaded = skip_single_threaded,
+ .skip_non_native = skip_non_native,
+ .skip_libc = skip_libc,
+ .skip_stage1 = skip_stage1,
+ .skip_stage2 = true, // TODO get all these passing
+ // I observed a value of 3398275072 on my M1, and multiplied by 1.1 to
+ // get this amount:
+ .max_rss = 3738102579,
+ }));
try addWasiUpdateStep(b, version);
+
+ b.step("fmt", "Modify source files in place to have conforming formatting")
+ .dependOn(&do_fmt.step);
}
fn addWasiUpdateStep(b: *std.Build, version: [:0]const u8) !void {
@@ -510,6 +518,7 @@ fn addWasiUpdateStep(b: *std.Build, version: [:0]const u8) !void {
exe_options.addOption(bool, "enable_tracy_callstack", false);
exe_options.addOption(bool, "enable_tracy_allocation", false);
exe_options.addOption(bool, "value_tracing", false);
+ exe_options.addOption(bool, "omit_pkg_fetching_code", true);
const run_opt = b.addSystemCommand(&.{ "wasm-opt", "-Oz", "--enable-bulk-memory" });
run_opt.addArtifactArg(exe);
@@ -681,10 +690,7 @@ fn addCxxKnownPath(
) !void {
if (!std.process.can_spawn)
return error.RequiredLibraryNotFound;
- const path_padded = try b.exec(&[_][]const u8{
- ctx.cxx_compiler,
- b.fmt("-print-file-name={s}", .{objname}),
- });
+ const path_padded = b.exec(&.{ ctx.cxx_compiler, b.fmt("-print-file-name={s}", .{objname}) });
var tokenizer = mem.tokenize(u8, path_padded, "\r\n");
const path_unpadded = tokenizer.next().?;
if (mem.eql(u8, path_unpadded, objname)) {
diff --git a/ci/aarch64-linux-debug.sh b/ci/aarch64-linux-debug.sh
index 2c0639763c..2fd831860e 100644
--- a/ci/aarch64-linux-debug.sh
+++ b/ci/aarch64-linux-debug.sh
@@ -67,7 +67,7 @@ stage3-debug/bin/zig build test docs \
--zig-lib-dir "$(pwd)/../lib"
# Look for HTML errors.
-tidy --drop-empty-elements no -qe "$ZIG_LOCAL_CACHE_DIR/langref.html"
+tidy --drop-empty-elements no -qe "stage3-debug/doc/langref.html"
# Produce the experimental std lib documentation.
stage3-debug/bin/zig test ../lib/std/std.zig -femit-docs -fno-emit-bin --zig-lib-dir ../lib
@@ -98,3 +98,9 @@ unset CXX
ninja install
stage3/bin/zig test ../test/behavior.zig -I../test
+stage3/bin/zig build -p stage4 \
+ -Dstatic-llvm \
+ -Dtarget=native-native-musl \
+ --search-prefix "$PREFIX" \
+ --zig-lib-dir "$(pwd)/../lib"
+stage4/bin/zig test ../test/behavior.zig -I../test
diff --git a/ci/aarch64-linux-release.sh b/ci/aarch64-linux-release.sh
index 3992f62770..8eb9ed2523 100644
--- a/ci/aarch64-linux-release.sh
+++ b/ci/aarch64-linux-release.sh
@@ -67,7 +67,7 @@ stage3-release/bin/zig build test docs \
--zig-lib-dir "$(pwd)/../lib"
# Look for HTML errors.
-tidy --drop-empty-elements no -qe "$ZIG_LOCAL_CACHE_DIR/langref.html"
+tidy --drop-empty-elements no -qe "stage3-release/doc/langref.html"
# Produce the experimental std lib documentation.
stage3-release/bin/zig test ../lib/std/std.zig -femit-docs -fno-emit-bin --zig-lib-dir ../lib
@@ -98,3 +98,9 @@ unset CXX
ninja install
stage3/bin/zig test ../test/behavior.zig -I../test
+stage3/bin/zig build -p stage4 \
+ -Dstatic-llvm \
+ -Dtarget=native-native-musl \
+ --search-prefix "$PREFIX" \
+ --zig-lib-dir "$(pwd)/../lib"
+stage4/bin/zig test ../test/behavior.zig -I../test
diff --git a/ci/x86_64-linux-debug.sh b/ci/x86_64-linux-debug.sh
index ddcc76af7d..3ae72f7ef5 100755
--- a/ci/x86_64-linux-debug.sh
+++ b/ci/x86_64-linux-debug.sh
@@ -66,7 +66,7 @@ stage3-debug/bin/zig build test docs \
--zig-lib-dir "$(pwd)/../lib"
# Look for HTML errors.
-tidy --drop-empty-elements no -qe "$ZIG_LOCAL_CACHE_DIR/langref.html"
+tidy --drop-empty-elements no -qe "stage3-debug/doc/langref.html"
# Produce the experimental std lib documentation.
stage3-debug/bin/zig test ../lib/std/std.zig -femit-docs -fno-emit-bin --zig-lib-dir ../lib
@@ -97,3 +97,9 @@ unset CXX
ninja install
stage3/bin/zig test ../test/behavior.zig -I../test
+stage3/bin/zig build -p stage4 \
+ -Dstatic-llvm \
+ -Dtarget=native-native-musl \
+ --search-prefix "$PREFIX" \
+ --zig-lib-dir "$(pwd)/../lib"
+stage4/bin/zig test ../test/behavior.zig -I../test
diff --git a/ci/x86_64-linux-release.sh b/ci/x86_64-linux-release.sh
index d3da4991e2..f8b94addcc 100755
--- a/ci/x86_64-linux-release.sh
+++ b/ci/x86_64-linux-release.sh
@@ -67,7 +67,7 @@ stage3-release/bin/zig build test docs \
--zig-lib-dir "$(pwd)/../lib"
# Look for HTML errors.
-tidy --drop-empty-elements no -qe "$ZIG_LOCAL_CACHE_DIR/langref.html"
+tidy --drop-empty-elements no -qe "stage3-release/doc/langref.html"
# Produce the experimental std lib documentation.
stage3-release/bin/zig test ../lib/std/std.zig -femit-docs -fno-emit-bin --zig-lib-dir ../lib
@@ -114,3 +114,9 @@ unset CXX
ninja install
stage3/bin/zig test ../test/behavior.zig -I../test
+stage3/bin/zig build -p stage4 \
+ -Dstatic-llvm \
+ -Dtarget=native-native-musl \
+ --search-prefix "$PREFIX" \
+ --zig-lib-dir "$(pwd)/../lib"
+stage4/bin/zig test ../test/behavior.zig -I../test
diff --git a/doc/docgen.zig b/doc/docgen.zig
index fae513f8c3..277316dd37 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -28,10 +28,10 @@ const usage =
\\
;
-fn errorf(comptime format: []const u8, args: anytype) noreturn {
+fn fatal(comptime format: []const u8, args: anytype) noreturn {
const stderr = io.getStdErr().writer();
- stderr.print("error: " ++ format, args) catch {};
+ stderr.print("error: " ++ format ++ "\n", args) catch {};
process.exit(1);
}
@@ -45,6 +45,7 @@ pub fn main() !void {
if (!args_it.skip()) @panic("expected self arg");
var zig_exe: []const u8 = "zig";
+ var opt_zig_lib_dir: ?[]const u8 = null;
var do_code_tests = true;
var files = [_][]const u8{ "", "" };
@@ -59,24 +60,29 @@ pub fn main() !void {
if (args_it.next()) |param| {
zig_exe = param;
} else {
- errorf("expected parameter after --zig\n", .{});
+ fatal("expected parameter after --zig", .{});
+ }
+ } else if (mem.eql(u8, arg, "--zig-lib-dir")) {
+ if (args_it.next()) |param| {
+ opt_zig_lib_dir = param;
+ } else {
+ fatal("expected parameter after --zig-lib-dir", .{});
}
} else if (mem.eql(u8, arg, "--skip-code-tests")) {
do_code_tests = false;
} else {
- errorf("unrecognized option: '{s}'\n", .{arg});
+ fatal("unrecognized option: '{s}'", .{arg});
}
} else {
if (i > 1) {
- errorf("too many arguments\n", .{});
+ fatal("too many arguments", .{});
}
files[i] = arg;
i += 1;
}
}
if (i < 2) {
- errorf("not enough arguments\n", .{});
- process.exit(1);
+ fatal("not enough arguments", .{});
}
var in_file = try fs.cwd().openFile(files[0], .{ .mode = .read_only });
@@ -95,7 +101,7 @@ pub fn main() !void {
try fs.cwd().makePath(tmp_dir_name);
defer fs.cwd().deleteTree(tmp_dir_name) catch {};
- try genHtml(allocator, &tokenizer, &toc, buffered_writer.writer(), zig_exe, do_code_tests);
+ try genHtml(allocator, &tokenizer, &toc, buffered_writer.writer(), zig_exe, opt_zig_lib_dir, do_code_tests);
try buffered_writer.flush();
}
@@ -1268,9 +1274,10 @@ fn genHtml(
toc: *Toc,
out: anytype,
zig_exe: []const u8,
+ opt_zig_lib_dir: ?[]const u8,
do_code_tests: bool,
) !void {
- var progress = Progress{};
+ var progress = Progress{ .dont_print_on_dumb = true };
const root_node = progress.start("Generating docgen examples", toc.nodes.len);
defer root_node.end();
@@ -1278,7 +1285,7 @@ fn genHtml(
try env_map.put("ZIG_DEBUG_COLOR", "1");
const host = try std.zig.system.NativeTargetInfo.detect(.{});
- const builtin_code = try getBuiltinCode(allocator, &env_map, zig_exe);
+ const builtin_code = try getBuiltinCode(allocator, &env_map, zig_exe, opt_zig_lib_dir);
for (toc.nodes) |node| {
defer root_node.completeOne();
@@ -1370,6 +1377,9 @@ fn genHtml(
"--color", "on",
"--enable-cache", tmp_source_file_name,
});
+ if (opt_zig_lib_dir) |zig_lib_dir| {
+ try build_args.appendSlice(&.{ "--zig-lib-dir", zig_lib_dir });
+ }
try shell_out.print("$ zig build-exe {s} ", .{name_plus_ext});
@@ -1512,8 +1522,12 @@ fn genHtml(
defer test_args.deinit();
try test_args.appendSlice(&[_][]const u8{
- zig_exe, "test", tmp_source_file_name,
+ zig_exe, "test",
+ tmp_source_file_name,
});
+ if (opt_zig_lib_dir) |zig_lib_dir| {
+ try test_args.appendSlice(&.{ "--zig-lib-dir", zig_lib_dir });
+ }
try shell_out.print("$ zig test {s}.zig ", .{code.name});
switch (code.mode) {
@@ -1564,12 +1578,13 @@ fn genHtml(
defer test_args.deinit();
try test_args.appendSlice(&[_][]const u8{
- zig_exe,
- "test",
- "--color",
- "on",
+ zig_exe, "test",
+ "--color", "on",
tmp_source_file_name,
});
+ if (opt_zig_lib_dir) |zig_lib_dir| {
+ try test_args.appendSlice(&.{ "--zig-lib-dir", zig_lib_dir });
+ }
try shell_out.print("$ zig test {s}.zig ", .{code.name});
switch (code.mode) {
@@ -1624,8 +1639,12 @@ fn genHtml(
defer test_args.deinit();
try test_args.appendSlice(&[_][]const u8{
- zig_exe, "test", tmp_source_file_name,
+ zig_exe, "test",
+ tmp_source_file_name,
});
+ if (opt_zig_lib_dir) |zig_lib_dir| {
+ try test_args.appendSlice(&.{ "--zig-lib-dir", zig_lib_dir });
+ }
var mode_arg: []const u8 = "";
switch (code.mode) {
.Debug => {},
@@ -1684,17 +1703,17 @@ fn genHtml(
defer build_args.deinit();
try build_args.appendSlice(&[_][]const u8{
- zig_exe,
- "build-obj",
+ zig_exe, "build-obj",
+ "--color", "on",
+ "--name", code.name,
tmp_source_file_name,
- "--color",
- "on",
- "--name",
- code.name,
try std.fmt.allocPrint(allocator, "-femit-bin={s}{c}{s}", .{
tmp_dir_name, fs.path.sep, name_plus_obj_ext,
}),
});
+ if (opt_zig_lib_dir) |zig_lib_dir| {
+ try build_args.appendSlice(&.{ "--zig-lib-dir", zig_lib_dir });
+ }
try shell_out.print("$ zig build-obj {s}.zig ", .{code.name});
@@ -1758,13 +1777,15 @@ fn genHtml(
defer test_args.deinit();
try test_args.appendSlice(&[_][]const u8{
- zig_exe,
- "build-lib",
+ zig_exe, "build-lib",
tmp_source_file_name,
try std.fmt.allocPrint(allocator, "-femit-bin={s}{s}{s}", .{
tmp_dir_name, fs.path.sep_str, bin_basename,
}),
});
+ if (opt_zig_lib_dir) |zig_lib_dir| {
+ try test_args.appendSlice(&.{ "--zig-lib-dir", zig_lib_dir });
+ }
try shell_out.print("$ zig build-lib {s}.zig ", .{code.name});
switch (code.mode) {
@@ -1829,9 +1850,23 @@ fn exec(allocator: Allocator, env_map: *process.EnvMap, args: []const []const u8
return result;
}
-fn getBuiltinCode(allocator: Allocator, env_map: *process.EnvMap, zig_exe: []const u8) ![]const u8 {
- const result = try exec(allocator, env_map, &[_][]const u8{ zig_exe, "build-obj", "--show-builtin" });
- return result.stdout;
+fn getBuiltinCode(
+ allocator: Allocator,
+ env_map: *process.EnvMap,
+ zig_exe: []const u8,
+ opt_zig_lib_dir: ?[]const u8,
+) ![]const u8 {
+ if (opt_zig_lib_dir) |zig_lib_dir| {
+ const result = try exec(allocator, env_map, &.{
+ zig_exe, "build-obj", "--show-builtin", "--zig-lib-dir", zig_lib_dir,
+ });
+ return result.stdout;
+ } else {
+ const result = try exec(allocator, env_map, &.{
+ zig_exe, "build-obj", "--show-builtin",
+ });
+ return result.stdout;
+ }
}
fn dumpArgs(args: []const []const u8) void {
diff --git a/doc/langref.html.in b/doc/langref.html.in
index e016ef13f8..991fd0c3e6 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -7457,20 +7457,20 @@ pub const STDOUT_FILENO = 1;
pub fn syscall1(number: usize, arg1: usize) usize {
return asm volatile ("syscall"
- : [ret] "={rax}" (-> usize)
+ : [ret] "={rax}" (-> usize),
: [number] "{rax}" (number),
- [arg1] "{rdi}" (arg1)
+ [arg1] "{rdi}" (arg1),
: "rcx", "r11"
);
}
pub fn syscall3(number: usize, arg1: usize, arg2: usize, arg3: usize) usize {
return asm volatile ("syscall"
- : [ret] "={rax}" (-> usize)
+ : [ret] "={rax}" (-> usize),
: [number] "{rax}" (number),
[arg1] "{rdi}" (arg1),
[arg2] "{rsi}" (arg2),
- [arg3] "{rdx}" (arg3)
+ [arg3] "{rdx}" (arg3),
: "rcx", "r11"
);
}
@@ -7519,14 +7519,14 @@ pub fn syscall1(number: usize, arg1: usize) usize {
// type is the result type of the inline assembly expression.
// If it is a value binding, then `%[ret]` syntax would be used
// to refer to the register bound to the value.
- (-> usize)
+ (-> usize),
// Next is the list of inputs.
// The constraint for these inputs means, "when the assembly code is
// executed, $rax shall have the value of `number` and $rdi shall have
// the value of `arg1`". Any number of input parameters is allowed,
// including none.
: [number] "{rax}" (number),
- [arg1] "{rdi}" (arg1)
+ [arg1] "{rdi}" (arg1),
// Next is the list of clobbers. These declare a set of registers whose
// values will not be preserved by the execution of this assembly code.
// These do not include output or input registers. The special clobber
@@ -7818,12 +7818,14 @@ comptime {
This function inserts a platform-specific debug trap instruction which causes
debuggers to break there.
+ Unlike for {#syntax#}@trap(){#endsyntax#}, execution may continue after this point if the program is resumed.
This function is only valid within function scope.
-
+ {#see_also|@trap#}
{#header_close#}
+
{#header_open|@mulAdd#}
{#syntax#}@mulAdd(comptime T: type, a: T, b: T, c: T) T{#endsyntax#}
@@ -9393,6 +9395,19 @@ fn List(comptime T: type) type {
{#header_close#}
+ {#header_open|@trap#}
+ {#syntax#}@trap() noreturn{#endsyntax#}
+
+ This function inserts a platform-specific trap/jam instruction which can be used to exit the program abnormally.
+ This may be implemented by explicitly emitting an invalid instruction which may cause an illegal instruction exception of some sort.
+ Unlike for {#syntax#}@breakpoint(){#endsyntax#}, execution does not continue after this point.
+
+
+ Outside function scope, this builtin causes a compile error.
+
+ {#see_also|@breakpoint#}
+ {#header_close#}
+
{#header_open|@truncate#}
{#syntax#}@truncate(comptime T: type, integer: anytype) T{#endsyntax#}
@@ -9565,9 +9580,10 @@ pub fn build(b: *std.Build) void {
This causes these options to be available:
- - -Drelease-safe=[bool]
- Optimizations on and safety on
- - -Drelease-fast=[bool]
- Optimizations on and safety off
- - -Drelease-small=[bool]
- Size optimizations on and safety off
+ - -Doptimize=Debug
- Optimizations off and safety on (default)
+ - -Doptimize=ReleaseSafe
- Optimizations on and safety on
+ - -Doptimize=ReleaseFast
- Optimizations on and safety off
+ - -Doptimize=ReleaseSmall
- Size optimizations on and safety off
{#header_open|Debug#}
{#shell_samp#}$ zig build-exe example.zig{#end_shell_samp#}
@@ -9788,8 +9804,8 @@ pub fn main() !void {
{#header_close#}
{#header_open|Builtin Overflow Functions#}
- These builtins return a {#syntax#}bool{#endsyntax#} of whether or not overflow
- occurred, as well as returning the overflowed bits:
+ These builtins return a tuple containing whether there was an overflow
+ (as a {#syntax#}u1{#endsyntax#}) and the possibly overflowed bits of the operation:
- {#link|@addWithOverflow#}
diff --git a/lib/build_runner.zig b/lib/build_runner.zig
index 64421d1031..5f5601a68d 100644
--- a/lib/build_runner.zig
+++ b/lib/build_runner.zig
@@ -1,12 +1,14 @@
const root = @import("@build");
const std = @import("std");
const builtin = @import("builtin");
+const assert = std.debug.assert;
const io = std.io;
const fmt = std.fmt;
const mem = std.mem;
const process = std.process;
const ArrayList = std.ArrayList;
const File = std.fs.File;
+const Step = std.Build.Step;
pub const dependencies = @import("@dependencies");
@@ -14,12 +16,15 @@ pub fn main() !void {
// Here we use an ArenaAllocator backed by a DirectAllocator because a build is a short-lived,
// one shot program. We don't need to waste time freeing memory and finding places to squish
// bytes into. So we free everything all at once at the very end.
- var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
- defer arena.deinit();
+ var single_threaded_arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
+ defer single_threaded_arena.deinit();
- const allocator = arena.allocator();
- var args = try process.argsAlloc(allocator);
- defer process.argsFree(allocator, args);
+ var thread_safe_arena: std.heap.ThreadSafeAllocator = .{
+ .child_allocator = single_threaded_arena.allocator(),
+ };
+ const arena = thread_safe_arena.allocator();
+
+ var args = try process.argsAlloc(arena);
// skip my own exe name
var arg_idx: usize = 1;
@@ -59,18 +64,17 @@ pub fn main() !void {
};
var cache: std.Build.Cache = .{
- .gpa = allocator,
+ .gpa = arena,
.manifest_dir = try local_cache_directory.handle.makeOpenPath("h", .{}),
};
cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() });
cache.addPrefix(build_root_directory);
cache.addPrefix(local_cache_directory);
cache.addPrefix(global_cache_directory);
-
- //cache.hash.addBytes(builtin.zig_version);
+ cache.hash.addBytes(builtin.zig_version_string);
const builder = try std.Build.create(
- allocator,
+ arena,
zig_exe,
build_root_directory,
local_cache_directory,
@@ -80,35 +84,34 @@ pub fn main() !void {
);
defer builder.destroy();
- var targets = ArrayList([]const u8).init(allocator);
- var debug_log_scopes = ArrayList([]const u8).init(allocator);
-
- const stderr_stream = io.getStdErr().writer();
- const stdout_stream = io.getStdOut().writer();
+ var targets = ArrayList([]const u8).init(arena);
+ var debug_log_scopes = ArrayList([]const u8).init(arena);
+ var thread_pool_options: std.Thread.Pool.Options = .{ .allocator = arena };
var install_prefix: ?[]const u8 = null;
var dir_list = std.Build.DirList{};
+ var enable_summary: ?bool = null;
+ var max_rss: usize = 0;
+ var color: Color = .auto;
- // before arg parsing, check for the NO_COLOR environment variable
- // if it exists, default the color setting to .off
- // explicit --color arguments will still override this setting.
- builder.color = if (std.process.hasEnvVarConstant("NO_COLOR")) .off else .auto;
+ const stderr_stream = io.getStdErr().writer();
+ const stdout_stream = io.getStdOut().writer();
while (nextArg(args, &arg_idx)) |arg| {
if (mem.startsWith(u8, arg, "-D")) {
const option_contents = arg[2..];
if (option_contents.len == 0) {
std.debug.print("Expected option name after '-D'\n\n", .{});
- return usageAndErr(builder, false, stderr_stream);
+ usageAndErr(builder, false, stderr_stream);
}
if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| {
const option_name = option_contents[0..name_end];
const option_value = option_contents[name_end + 1 ..];
if (try builder.addUserInputOption(option_name, option_value))
- return usageAndErr(builder, false, stderr_stream);
+ usageAndErr(builder, false, stderr_stream);
} else {
if (try builder.addUserInputFlag(option_contents))
- return usageAndErr(builder, false, stderr_stream);
+ usageAndErr(builder, false, stderr_stream);
}
} else if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "--verbose")) {
@@ -118,69 +121,83 @@ pub fn main() !void {
} else if (mem.eql(u8, arg, "-p") or mem.eql(u8, arg, "--prefix")) {
install_prefix = nextArg(args, &arg_idx) orelse {
std.debug.print("Expected argument after {s}\n\n", .{arg});
- return usageAndErr(builder, false, stderr_stream);
+ usageAndErr(builder, false, stderr_stream);
};
} else if (mem.eql(u8, arg, "-l") or mem.eql(u8, arg, "--list-steps")) {
return steps(builder, false, stdout_stream);
} else if (mem.eql(u8, arg, "--prefix-lib-dir")) {
dir_list.lib_dir = nextArg(args, &arg_idx) orelse {
std.debug.print("Expected argument after {s}\n\n", .{arg});
- return usageAndErr(builder, false, stderr_stream);
+ usageAndErr(builder, false, stderr_stream);
};
} else if (mem.eql(u8, arg, "--prefix-exe-dir")) {
dir_list.exe_dir = nextArg(args, &arg_idx) orelse {
std.debug.print("Expected argument after {s}\n\n", .{arg});
- return usageAndErr(builder, false, stderr_stream);
+ usageAndErr(builder, false, stderr_stream);
};
} else if (mem.eql(u8, arg, "--prefix-include-dir")) {
dir_list.include_dir = nextArg(args, &arg_idx) orelse {
std.debug.print("Expected argument after {s}\n\n", .{arg});
- return usageAndErr(builder, false, stderr_stream);
+ usageAndErr(builder, false, stderr_stream);
};
} else if (mem.eql(u8, arg, "--sysroot")) {
const sysroot = nextArg(args, &arg_idx) orelse {
std.debug.print("Expected argument after --sysroot\n\n", .{});
- return usageAndErr(builder, false, stderr_stream);
+ usageAndErr(builder, false, stderr_stream);
};
builder.sysroot = sysroot;
+ } else if (mem.eql(u8, arg, "--maxrss")) {
+ const max_rss_text = nextArg(args, &arg_idx) orelse {
+ std.debug.print("Expected argument after --sysroot\n\n", .{});
+ usageAndErr(builder, false, stderr_stream);
+ };
+ // TODO: support shorthand such as "2GiB", "2GB", or "2G"
+ max_rss = std.fmt.parseInt(usize, max_rss_text, 10) catch |err| {
+ std.debug.print("invalid byte size: '{s}': {s}\n", .{
+ max_rss_text, @errorName(err),
+ });
+ process.exit(1);
+ };
} else if (mem.eql(u8, arg, "--search-prefix")) {
const search_prefix = nextArg(args, &arg_idx) orelse {
std.debug.print("Expected argument after --search-prefix\n\n", .{});
- return usageAndErr(builder, false, stderr_stream);
+ usageAndErr(builder, false, stderr_stream);
};
builder.addSearchPrefix(search_prefix);
} else if (mem.eql(u8, arg, "--libc")) {
const libc_file = nextArg(args, &arg_idx) orelse {
std.debug.print("Expected argument after --libc\n\n", .{});
- return usageAndErr(builder, false, stderr_stream);
+ usageAndErr(builder, false, stderr_stream);
};
builder.libc_file = libc_file;
} else if (mem.eql(u8, arg, "--color")) {
const next_arg = nextArg(args, &arg_idx) orelse {
std.debug.print("expected [auto|on|off] after --color", .{});
- return usageAndErr(builder, false, stderr_stream);
+ usageAndErr(builder, false, stderr_stream);
};
- builder.color = std.meta.stringToEnum(@TypeOf(builder.color), next_arg) orelse {
+ color = std.meta.stringToEnum(Color, next_arg) orelse {
std.debug.print("expected [auto|on|off] after --color, found '{s}'", .{next_arg});
- return usageAndErr(builder, false, stderr_stream);
+ usageAndErr(builder, false, stderr_stream);
};
} else if (mem.eql(u8, arg, "--zig-lib-dir")) {
builder.zig_lib_dir = nextArg(args, &arg_idx) orelse {
std.debug.print("Expected argument after --zig-lib-dir\n\n", .{});
- return usageAndErr(builder, false, stderr_stream);
+ usageAndErr(builder, false, stderr_stream);
};
} else if (mem.eql(u8, arg, "--debug-log")) {
const next_arg = nextArg(args, &arg_idx) orelse {
std.debug.print("Expected argument after {s}\n\n", .{arg});
- return usageAndErr(builder, false, stderr_stream);
+ usageAndErr(builder, false, stderr_stream);
};
try debug_log_scopes.append(next_arg);
+ } else if (mem.eql(u8, arg, "--debug-pkg-config")) {
+ builder.debug_pkg_config = true;
} else if (mem.eql(u8, arg, "--debug-compile-errors")) {
builder.debug_compile_errors = true;
} else if (mem.eql(u8, arg, "--glibc-runtimes")) {
builder.glibc_runtimes_dir = nextArg(args, &arg_idx) orelse {
std.debug.print("Expected argument after --glibc-runtimes\n\n", .{});
- return usageAndErr(builder, false, stderr_stream);
+ usageAndErr(builder, false, stderr_stream);
};
} else if (mem.eql(u8, arg, "--verbose-link")) {
builder.verbose_link = true;
@@ -194,8 +211,6 @@ pub fn main() !void {
builder.verbose_cc = true;
} else if (mem.eql(u8, arg, "--verbose-llvm-cpu-features")) {
builder.verbose_llvm_cpu_features = true;
- } else if (mem.eql(u8, arg, "--prominent-compile-errors")) {
- builder.prominent_compile_errors = true;
} else if (mem.eql(u8, arg, "-fwine")) {
builder.enable_wine = true;
} else if (mem.eql(u8, arg, "-fno-wine")) {
@@ -216,6 +231,10 @@ pub fn main() !void {
builder.enable_darling = true;
} else if (mem.eql(u8, arg, "-fno-darling")) {
builder.enable_darling = false;
+ } else if (mem.eql(u8, arg, "-fsummary")) {
+ enable_summary = true;
+ } else if (mem.eql(u8, arg, "-fno-summary")) {
+ enable_summary = false;
} else if (mem.eql(u8, arg, "-freference-trace")) {
builder.reference_trace = 256;
} else if (mem.startsWith(u8, arg, "-freference-trace=")) {
@@ -226,39 +245,639 @@ pub fn main() !void {
};
} else if (mem.eql(u8, arg, "-fno-reference-trace")) {
builder.reference_trace = null;
+ } else if (mem.startsWith(u8, arg, "-j")) {
+ const num = arg["-j".len..];
+ const n_jobs = std.fmt.parseUnsigned(u32, num, 10) catch |err| {
+ std.debug.print("unable to parse jobs count '{s}': {s}", .{
+ num, @errorName(err),
+ });
+ process.exit(1);
+ };
+ if (n_jobs < 1) {
+ std.debug.print("number of jobs must be at least 1\n", .{});
+ process.exit(1);
+ }
+ thread_pool_options.n_jobs = n_jobs;
} else if (mem.eql(u8, arg, "--")) {
builder.args = argsRest(args, arg_idx);
break;
} else {
std.debug.print("Unrecognized argument: {s}\n\n", .{arg});
- return usageAndErr(builder, false, stderr_stream);
+ usageAndErr(builder, false, stderr_stream);
}
} else {
try targets.append(arg);
}
}
+ const stderr = std.io.getStdErr();
+ const ttyconf = get_tty_conf(color, stderr);
+ switch (ttyconf) {
+ .no_color => try builder.env_map.put("NO_COLOR", "1"),
+ .escape_codes => try builder.env_map.put("ZIG_DEBUG_COLOR", "1"),
+ .windows_api => {},
+ }
+
+ var progress: std.Progress = .{ .dont_print_on_dumb = true };
+ const main_progress_node = progress.start("", 0);
+
builder.debug_log_scopes = debug_log_scopes.items;
builder.resolveInstallPrefix(install_prefix, dir_list);
- try builder.runBuild(root);
+ {
+ var prog_node = main_progress_node.start("user build.zig logic", 0);
+ defer prog_node.end();
+ try builder.runBuild(root);
+ }
if (builder.validateUserInputDidItFail())
- return usageAndErr(builder, true, stderr_stream);
+ usageAndErr(builder, true, stderr_stream);
- builder.make(targets.items) catch |err| {
- switch (err) {
- error.InvalidStepName => {
- return usageAndErr(builder, true, stderr_stream);
- },
- error.UncleanExit => process.exit(1),
- // This error is intended to indicate that the step has already
- // logged an error message and so printing the error return trace
- // here would be unwanted extra information, unless the user opts
- // into it with a debug flag.
- error.StepFailed => process.exit(1),
- else => return err,
- }
+ var run: Run = .{
+ .max_rss = max_rss,
+ .max_rss_is_default = false,
+ .max_rss_mutex = .{},
+ .memory_blocked_steps = std.ArrayList(*Step).init(arena),
+
+ .claimed_rss = 0,
+ .enable_summary = enable_summary,
+ .ttyconf = ttyconf,
+ .stderr = stderr,
};
+
+ if (run.max_rss == 0) {
+ run.max_rss = process.totalSystemMemory() catch std.math.maxInt(usize);
+ run.max_rss_is_default = true;
+ }
+
+ runStepNames(
+ arena,
+ builder,
+ targets.items,
+ main_progress_node,
+ thread_pool_options,
+ &run,
+ ) catch |err| switch (err) {
+ error.UncleanExit => process.exit(1),
+ else => return err,
+ };
+}
+
+const Run = struct {
+ max_rss: usize,
+ max_rss_is_default: bool,
+ max_rss_mutex: std.Thread.Mutex,
+ memory_blocked_steps: std.ArrayList(*Step),
+
+ claimed_rss: usize,
+ enable_summary: ?bool,
+ ttyconf: std.debug.TTY.Config,
+ stderr: std.fs.File,
+};
+
+fn runStepNames(
+ arena: std.mem.Allocator,
+ b: *std.Build,
+ step_names: []const []const u8,
+ parent_prog_node: *std.Progress.Node,
+ thread_pool_options: std.Thread.Pool.Options,
+ run: *Run,
+) !void {
+ const gpa = b.allocator;
+ var step_stack: std.AutoArrayHashMapUnmanaged(*Step, void) = .{};
+ defer step_stack.deinit(gpa);
+
+ if (step_names.len == 0) {
+ try step_stack.put(gpa, b.default_step, {});
+ } else {
+ try step_stack.ensureUnusedCapacity(gpa, step_names.len);
+ for (0..step_names.len) |i| {
+ const step_name = step_names[step_names.len - i - 1];
+ const s = b.top_level_steps.get(step_name) orelse {
+ std.debug.print("no step named '{s}'. Access the help menu with 'zig build -h'\n", .{step_name});
+ process.exit(1);
+ };
+ step_stack.putAssumeCapacity(&s.step, {});
+ }
+ }
+
+ const starting_steps = try arena.dupe(*Step, step_stack.keys());
+ for (starting_steps) |s| {
+ checkForDependencyLoop(b, s, &step_stack) catch |err| switch (err) {
+ error.DependencyLoopDetected => return error.UncleanExit,
+ else => |e| return e,
+ };
+ }
+
+ {
+ // Check that we have enough memory to complete the build.
+ var any_problems = false;
+ for (step_stack.keys()) |s| {
+ if (s.max_rss == 0) continue;
+ if (s.max_rss > run.max_rss) {
+ std.debug.print("{s}{s}: this step declares an upper bound of {d} bytes of memory, exceeding the available {d} bytes of memory\n", .{
+ s.owner.dep_prefix, s.name, s.max_rss, run.max_rss,
+ });
+ any_problems = true;
+ }
+ }
+ if (any_problems) {
+ if (run.max_rss_is_default) {
+ std.debug.print("note: use --maxrss to override the default", .{});
+ }
+ return error.UncleanExit;
+ }
+ }
+
+ var thread_pool: std.Thread.Pool = undefined;
+ try thread_pool.init(thread_pool_options);
+ defer thread_pool.deinit();
+
+ {
+ defer parent_prog_node.end();
+
+ var step_prog = parent_prog_node.start("steps", step_stack.count());
+ defer step_prog.end();
+
+ var wait_group: std.Thread.WaitGroup = .{};
+ defer wait_group.wait();
+
+ // Here we spawn the initial set of tasks with a nice heuristic -
+ // dependency order. Each worker when it finishes a step will then
+ // check whether it should run any dependants.
+ const steps_slice = step_stack.keys();
+ for (0..steps_slice.len) |i| {
+ const step = steps_slice[steps_slice.len - i - 1];
+
+ wait_group.start();
+ thread_pool.spawn(workerMakeOneStep, .{
+ &wait_group, &thread_pool, b, step, &step_prog, run,
+ }) catch @panic("OOM");
+ }
+ }
+ assert(run.memory_blocked_steps.items.len == 0);
+
+ var test_skip_count: usize = 0;
+ var test_fail_count: usize = 0;
+ var test_pass_count: usize = 0;
+ var test_leak_count: usize = 0;
+ var test_count: usize = 0;
+
+ var success_count: usize = 0;
+ var skipped_count: usize = 0;
+ var failure_count: usize = 0;
+ var pending_count: usize = 0;
+ var total_compile_errors: usize = 0;
+ var compile_error_steps: std.ArrayListUnmanaged(*Step) = .{};
+ defer compile_error_steps.deinit(gpa);
+
+ for (step_stack.keys()) |s| {
+ test_fail_count += s.test_results.fail_count;
+ test_skip_count += s.test_results.skip_count;
+ test_leak_count += s.test_results.leak_count;
+ test_pass_count += s.test_results.passCount();
+ test_count += s.test_results.test_count;
+
+ switch (s.state) {
+ .precheck_unstarted => unreachable,
+ .precheck_started => unreachable,
+ .running => unreachable,
+ .precheck_done => {
+ // precheck_done is equivalent to dependency_failure in the case of
+ // transitive dependencies. For example:
+ // A -> B -> C (failure)
+ // B will be marked as dependency_failure, while A may never be queued, and thus
+ // remain in the initial state of precheck_done.
+ s.state = .dependency_failure;
+ pending_count += 1;
+ },
+ .dependency_failure => pending_count += 1,
+ .success => success_count += 1,
+ .skipped => skipped_count += 1,
+ .failure => {
+ failure_count += 1;
+ const compile_errors_len = s.result_error_bundle.errorMessageCount();
+ if (compile_errors_len > 0) {
+ total_compile_errors += compile_errors_len;
+ try compile_error_steps.append(gpa, s);
+ }
+ },
+ }
+ }
+
+ // A proper command line application defaults to silently succeeding.
+ // The user may request verbose mode if they have a different preference.
+ if (failure_count == 0 and run.enable_summary != true) return cleanExit();
+
+ const ttyconf = run.ttyconf;
+ const stderr = run.stderr;
+
+ if (run.enable_summary != false) {
+ const total_count = success_count + failure_count + pending_count + skipped_count;
+ ttyconf.setColor(stderr, .Cyan) catch {};
+ stderr.writeAll("Build Summary:") catch {};
+ ttyconf.setColor(stderr, .Reset) catch {};
+ stderr.writer().print(" {d}/{d} steps succeeded", .{ success_count, total_count }) catch {};
+ if (skipped_count > 0) stderr.writer().print("; {d} skipped", .{skipped_count}) catch {};
+ if (failure_count > 0) stderr.writer().print("; {d} failed", .{failure_count}) catch {};
+
+ if (test_count > 0) stderr.writer().print("; {d}/{d} tests passed", .{ test_pass_count, test_count }) catch {};
+ if (test_skip_count > 0) stderr.writer().print("; {d} skipped", .{test_skip_count}) catch {};
+ if (test_fail_count > 0) stderr.writer().print("; {d} failed", .{test_fail_count}) catch {};
+ if (test_leak_count > 0) stderr.writer().print("; {d} leaked", .{test_leak_count}) catch {};
+
+ if (run.enable_summary == null) {
+ ttyconf.setColor(stderr, .Dim) catch {};
+ stderr.writeAll(" (disable with -fno-summary)") catch {};
+ ttyconf.setColor(stderr, .Reset) catch {};
+ }
+ stderr.writeAll("\n") catch {};
+
+ // Print a fancy tree with build results.
+ var print_node: PrintNode = .{ .parent = null };
+ if (step_names.len == 0) {
+ print_node.last = true;
+ printTreeStep(b, b.default_step, stderr, ttyconf, &print_node, &step_stack) catch {};
+ } else {
+ for (step_names, 0..) |step_name, i| {
+ const tls = b.top_level_steps.get(step_name).?;
+ print_node.last = i + 1 == b.top_level_steps.count();
+ printTreeStep(b, &tls.step, stderr, ttyconf, &print_node, &step_stack) catch {};
+ }
+ }
+ }
+
+ if (failure_count == 0) return cleanExit();
+
+ // Finally, render compile errors at the bottom of the terminal.
+ // We use a separate compile_error_steps array list because step_stack is destructively
+ // mutated in printTreeStep above.
+ if (total_compile_errors > 0) {
+ for (compile_error_steps.items) |s| {
+ if (s.result_error_bundle.errorMessageCount() > 0) {
+ s.result_error_bundle.renderToStdErr(renderOptions(ttyconf));
+ }
+ }
+
+ // Signal to parent process that we have printed compile errors. The
+ // parent process may choose to omit the "following command failed"
+ // line in this case.
+ process.exit(2);
+ }
+
+ process.exit(1);
+}
+
+const PrintNode = struct {
+ parent: ?*PrintNode,
+ last: bool = false,
+};
+
+fn printPrefix(node: *PrintNode, stderr: std.fs.File, ttyconf: std.debug.TTY.Config) !void {
+ const parent = node.parent orelse return;
+ if (parent.parent == null) return;
+ try printPrefix(parent, stderr, ttyconf);
+ if (parent.last) {
+ try stderr.writeAll(" ");
+ } else {
+ try stderr.writeAll(switch (ttyconf) {
+ .no_color, .windows_api => "| ",
+ .escape_codes => "\x1B\x28\x30\x78\x1B\x28\x42 ", // │
+ });
+ }
+}
+
+fn printTreeStep(
+ b: *std.Build,
+ s: *Step,
+ stderr: std.fs.File,
+ ttyconf: std.debug.TTY.Config,
+ parent_node: *PrintNode,
+ step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void),
+) !void {
+ const first = step_stack.swapRemove(s);
+ try printPrefix(parent_node, stderr, ttyconf);
+
+ if (!first) try ttyconf.setColor(stderr, .Dim);
+ if (parent_node.parent != null) {
+ if (parent_node.last) {
+ try stderr.writeAll(switch (ttyconf) {
+ .no_color, .windows_api => "+- ",
+ .escape_codes => "\x1B\x28\x30\x6d\x71\x1B\x28\x42 ", // └─
+ });
+ } else {
+ try stderr.writeAll(switch (ttyconf) {
+ .no_color, .windows_api => "+- ",
+ .escape_codes => "\x1B\x28\x30\x74\x71\x1B\x28\x42 ", // ├─
+ });
+ }
+ }
+
+ // dep_prefix omitted here because it is redundant with the tree.
+ try stderr.writeAll(s.name);
+
+ if (first) {
+ switch (s.state) {
+ .precheck_unstarted => unreachable,
+ .precheck_started => unreachable,
+ .precheck_done => unreachable,
+ .running => unreachable,
+
+ .dependency_failure => {
+ try ttyconf.setColor(stderr, .Dim);
+ try stderr.writeAll(" transitive failure\n");
+ try ttyconf.setColor(stderr, .Reset);
+ },
+
+ .success => {
+ try ttyconf.setColor(stderr, .Green);
+ if (s.result_cached) {
+ try stderr.writeAll(" cached");
+ } else if (s.test_results.test_count > 0) {
+ const pass_count = s.test_results.passCount();
+ try stderr.writer().print(" {d} passed", .{pass_count});
+ if (s.test_results.skip_count > 0) {
+ try ttyconf.setColor(stderr, .Yellow);
+ try stderr.writer().print(" {d} skipped", .{s.test_results.skip_count});
+ }
+ } else {
+ try stderr.writeAll(" success");
+ }
+ try ttyconf.setColor(stderr, .Reset);
+ if (s.result_duration_ns) |ns| {
+ try ttyconf.setColor(stderr, .Dim);
+ if (ns >= std.time.ns_per_min) {
+ try stderr.writer().print(" {d}m", .{ns / std.time.ns_per_min});
+ } else if (ns >= std.time.ns_per_s) {
+ try stderr.writer().print(" {d}s", .{ns / std.time.ns_per_s});
+ } else if (ns >= std.time.ns_per_ms) {
+ try stderr.writer().print(" {d}ms", .{ns / std.time.ns_per_ms});
+ } else if (ns >= std.time.ns_per_us) {
+ try stderr.writer().print(" {d}us", .{ns / std.time.ns_per_us});
+ } else {
+ try stderr.writer().print(" {d}ns", .{ns});
+ }
+ try ttyconf.setColor(stderr, .Reset);
+ }
+ if (s.result_peak_rss != 0) {
+ const rss = s.result_peak_rss;
+ try ttyconf.setColor(stderr, .Dim);
+ if (rss >= 1000_000_000) {
+ try stderr.writer().print(" MaxRSS:{d}G", .{rss / 1000_000_000});
+ } else if (rss >= 1000_000) {
+ try stderr.writer().print(" MaxRSS:{d}M", .{rss / 1000_000});
+ } else if (rss >= 1000) {
+ try stderr.writer().print(" MaxRSS:{d}K", .{rss / 1000});
+ } else {
+ try stderr.writer().print(" MaxRSS:{d}B", .{rss});
+ }
+ try ttyconf.setColor(stderr, .Reset);
+ }
+ try stderr.writeAll("\n");
+ },
+
+ .skipped => {
+ try ttyconf.setColor(stderr, .Yellow);
+ try stderr.writeAll(" skipped\n");
+ try ttyconf.setColor(stderr, .Reset);
+ },
+
+ .failure => {
+ if (s.result_error_bundle.errorMessageCount() > 0) {
+ try ttyconf.setColor(stderr, .Red);
+ try stderr.writer().print(" {d} errors\n", .{
+ s.result_error_bundle.errorMessageCount(),
+ });
+ try ttyconf.setColor(stderr, .Reset);
+ } else if (!s.test_results.isSuccess()) {
+ try stderr.writer().print(" {d}/{d} passed", .{
+ s.test_results.passCount(), s.test_results.test_count,
+ });
+ if (s.test_results.fail_count > 0) {
+ try stderr.writeAll(", ");
+ try ttyconf.setColor(stderr, .Red);
+ try stderr.writer().print("{d} failed", .{
+ s.test_results.fail_count,
+ });
+ try ttyconf.setColor(stderr, .Reset);
+ }
+ if (s.test_results.skip_count > 0) {
+ try stderr.writeAll(", ");
+ try ttyconf.setColor(stderr, .Yellow);
+ try stderr.writer().print("{d} skipped", .{
+ s.test_results.skip_count,
+ });
+ try ttyconf.setColor(stderr, .Reset);
+ }
+ if (s.test_results.leak_count > 0) {
+ try stderr.writeAll(", ");
+ try ttyconf.setColor(stderr, .Red);
+ try stderr.writer().print("{d} leaked", .{
+ s.test_results.leak_count,
+ });
+ try ttyconf.setColor(stderr, .Reset);
+ }
+ try stderr.writeAll("\n");
+ } else {
+ try ttyconf.setColor(stderr, .Red);
+ try stderr.writeAll(" failure\n");
+ try ttyconf.setColor(stderr, .Reset);
+ }
+ },
+ }
+
+ for (s.dependencies.items, 0..) |dep, i| {
+ var print_node: PrintNode = .{
+ .parent = parent_node,
+ .last = i == s.dependencies.items.len - 1,
+ };
+ try printTreeStep(b, dep, stderr, ttyconf, &print_node, step_stack);
+ }
+ } else {
+ if (s.dependencies.items.len == 0) {
+ try stderr.writeAll(" (reused)\n");
+ } else {
+ try stderr.writer().print(" (+{d} more reused dependencies)\n", .{
+ s.dependencies.items.len,
+ });
+ }
+ try ttyconf.setColor(stderr, .Reset);
+ }
+}
+
+fn checkForDependencyLoop(
+ b: *std.Build,
+ s: *Step,
+ step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void),
+) !void {
+ switch (s.state) {
+ .precheck_started => {
+ std.debug.print("dependency loop detected:\n {s}\n", .{s.name});
+ return error.DependencyLoopDetected;
+ },
+ .precheck_unstarted => {
+ s.state = .precheck_started;
+
+ try step_stack.ensureUnusedCapacity(b.allocator, s.dependencies.items.len);
+ for (s.dependencies.items) |dep| {
+ try step_stack.put(b.allocator, dep, {});
+ try dep.dependants.append(b.allocator, s);
+ checkForDependencyLoop(b, dep, step_stack) catch |err| {
+ if (err == error.DependencyLoopDetected) {
+ std.debug.print(" {s}\n", .{s.name});
+ }
+ return err;
+ };
+ }
+
+ s.state = .precheck_done;
+ },
+ .precheck_done => {},
+
+ // These don't happen until we actually run the step graph.
+ .dependency_failure => unreachable,
+ .running => unreachable,
+ .success => unreachable,
+ .failure => unreachable,
+ .skipped => unreachable,
+ }
+}
+
+fn workerMakeOneStep(
+ wg: *std.Thread.WaitGroup,
+ thread_pool: *std.Thread.Pool,
+ b: *std.Build,
+ s: *Step,
+ prog_node: *std.Progress.Node,
+ run: *Run,
+) void {
+ defer wg.finish();
+
+ // First, check the conditions for running this step. If they are not met,
+ // then we return without doing the step, relying on another worker to
+ // queue this step up again when dependencies are met.
+ for (s.dependencies.items) |dep| {
+ switch (@atomicLoad(Step.State, &dep.state, .SeqCst)) {
+ .success, .skipped => continue,
+ .failure, .dependency_failure => {
+ @atomicStore(Step.State, &s.state, .dependency_failure, .SeqCst);
+ return;
+ },
+ .precheck_done, .running => {
+ // dependency is not finished yet.
+ return;
+ },
+ .precheck_unstarted => unreachable,
+ .precheck_started => unreachable,
+ }
+ }
+
+ if (s.max_rss != 0) {
+ run.max_rss_mutex.lock();
+ defer run.max_rss_mutex.unlock();
+
+ // Avoid running steps twice.
+ if (s.state != .precheck_done) {
+ // Another worker got the job.
+ return;
+ }
+
+ const new_claimed_rss = run.claimed_rss + s.max_rss;
+ if (new_claimed_rss > run.max_rss) {
+ // Running this step right now could possibly exceed the allotted RSS.
+ // Add this step to the queue of memory-blocked steps.
+ run.memory_blocked_steps.append(s) catch @panic("OOM");
+ return;
+ }
+
+ run.claimed_rss = new_claimed_rss;
+ s.state = .running;
+ } else {
+ // Avoid running steps twice.
+ if (@cmpxchgStrong(Step.State, &s.state, .precheck_done, .running, .SeqCst, .SeqCst) != null) {
+ // Another worker got the job.
+ return;
+ }
+ }
+
+ var sub_prog_node = prog_node.start(s.name, 0);
+ sub_prog_node.activate();
+ defer sub_prog_node.end();
+
+ const make_result = s.make(&sub_prog_node);
+
+ // No matter the result, we want to display error/warning messages.
+ if (s.result_error_msgs.items.len > 0) {
+ sub_prog_node.context.lock_stderr();
+ defer sub_prog_node.context.unlock_stderr();
+
+ const stderr = run.stderr;
+ const ttyconf = run.ttyconf;
+
+ for (s.result_error_msgs.items) |msg| {
+ // Sometimes it feels like you just can't catch a break. Finally,
+ // with Zig, you can.
+ ttyconf.setColor(stderr, .Bold) catch break;
+ stderr.writeAll(s.owner.dep_prefix) catch break;
+ stderr.writeAll(s.name) catch break;
+ stderr.writeAll(": ") catch break;
+ ttyconf.setColor(stderr, .Red) catch break;
+ stderr.writeAll("error: ") catch break;
+ ttyconf.setColor(stderr, .Reset) catch break;
+ stderr.writeAll(msg) catch break;
+ stderr.writeAll("\n") catch break;
+ }
+ }
+
+ handle_result: {
+ if (make_result) |_| {
+ @atomicStore(Step.State, &s.state, .success, .SeqCst);
+ } else |err| switch (err) {
+ error.MakeFailed => {
+ @atomicStore(Step.State, &s.state, .failure, .SeqCst);
+ break :handle_result;
+ },
+ error.MakeSkipped => @atomicStore(Step.State, &s.state, .skipped, .SeqCst),
+ }
+
+ // Successful completion of a step, so we queue up its dependants as well.
+ for (s.dependants.items) |dep| {
+ wg.start();
+ thread_pool.spawn(workerMakeOneStep, .{
+ wg, thread_pool, b, dep, prog_node, run,
+ }) catch @panic("OOM");
+ }
+ }
+
+ // If this is a step that claims resources, we must now queue up other
+ // steps that are waiting for resources.
+ if (s.max_rss != 0) {
+ run.max_rss_mutex.lock();
+ defer run.max_rss_mutex.unlock();
+
+ // Give the memory back to the scheduler.
+ run.claimed_rss -= s.max_rss;
+ // Avoid kicking off too many tasks that we already know will not have
+ // enough resources.
+ var remaining = run.max_rss - run.claimed_rss;
+ var i: usize = 0;
+ var j: usize = 0;
+ while (j < run.memory_blocked_steps.items.len) : (j += 1) {
+ const dep = run.memory_blocked_steps.items[j];
+ assert(dep.max_rss != 0);
+ if (dep.max_rss <= remaining) {
+ remaining -= dep.max_rss;
+
+ wg.start();
+ thread_pool.spawn(workerMakeOneStep, .{
+ wg, thread_pool, b, dep, prog_node, run,
+ }) catch @panic("OOM");
+ } else {
+ run.memory_blocked_steps.items[i] = dep;
+ i += 1;
+ }
+ }
+ run.memory_blocked_steps.shrinkRetainingCapacity(i);
+ }
}
fn steps(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !void {
@@ -269,7 +888,7 @@ fn steps(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !voi
}
const allocator = builder.allocator;
- for (builder.top_level_steps.items) |top_level_step| {
+ for (builder.top_level_steps.values()) |top_level_step| {
const name = if (&top_level_step.step == builder.default_step)
try fmt.allocPrint(allocator, "{s} (default)", .{top_level_step.step.name})
else
@@ -327,6 +946,10 @@ fn usage(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !voi
\\ --verbose Print commands before executing them
\\ --color [auto|off|on] Enable or disable colored error messages
\\ --prominent-compile-errors Output compile errors formatted for a human to read
+ \\ -fsummary Print the build summary, even on success
+ \\ -fno-summary Omit the build summary, even on failure
+ \\ -j Limit concurrent jobs (default is to use all CPU cores)
+ \\ --maxrss Limit memory usage (default is to use available memory)
\\
\\Project-Specific Options:
\\
@@ -364,6 +987,7 @@ fn usage(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !voi
\\ --zig-lib-dir [arg] Override path to Zig lib directory
\\ --build-runner [file] Override path to build runner
\\ --debug-log [scope] Enable debugging the compiler
+ \\ --debug-pkg-config Fail if unknown pkg-config flags encountered
\\ --verbose-link Enable compiler debug output for linking
\\ --verbose-air Enable compiler debug output for Zig AIR
\\ --verbose-llvm-ir Enable compiler debug output for LLVM IR
@@ -374,7 +998,7 @@ fn usage(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !voi
);
}
-fn usageAndErr(builder: *std.Build, already_ran_build: bool, out_stream: anytype) void {
+fn usageAndErr(builder: *std.Build, already_ran_build: bool, out_stream: anytype) noreturn {
usage(builder, already_ran_build, out_stream) catch {};
process.exit(1);
}
@@ -389,3 +1013,28 @@ fn argsRest(args: [][]const u8, idx: usize) ?[][]const u8 {
if (idx >= args.len) return null;
return args[idx..];
}
+
+fn cleanExit() void {
+ // Perhaps in the future there could be an Advanced Options flag such as
+ // --debug-build-runner-leaks which would make this function return instead
+ // of calling exit.
+ process.exit(0);
+}
+
+const Color = enum { auto, off, on };
+
+fn get_tty_conf(color: Color, stderr: std.fs.File) std.debug.TTY.Config {
+ return switch (color) {
+ .auto => std.debug.detectTTYConfig(stderr),
+ .on => .escape_codes,
+ .off => .no_color,
+ };
+}
+
+fn renderOptions(ttyconf: std.debug.TTY.Config) std.zig.ErrorBundle.RenderOptions {
+ return .{
+ .ttyconf = ttyconf,
+ .include_source_line = ttyconf != .no_color,
+ .include_reference_trace = ttyconf != .no_color,
+ };
+}
diff --git a/lib/compiler_rt/udivmodei4.zig b/lib/compiler_rt/udivmodei4.zig
index de2427b79f..38a9b66b78 100644
--- a/lib/compiler_rt/udivmodei4.zig
+++ b/lib/compiler_rt/udivmodei4.zig
@@ -79,16 +79,16 @@ fn divmod(q: ?[]u32, r: ?[]u32, u: []const u32, v: []const u32) !void {
}
break;
}
- var carry: u64 = 0;
+ var carry: i64 = 0;
i = 0;
while (i <= n) : (i += 1) {
const p = qhat * limb(&vn, i);
const t = limb(&un, i + j) - carry - @truncate(u32, p);
- limb_set(&un, i + j, @truncate(u32, t));
- carry = @intCast(u64, p >> 32) - @intCast(u64, t >> 32);
+ limb_set(&un, i + j, @truncate(u32, @bitCast(u64, t)));
+ carry = @intCast(i64, p >> 32) - @intCast(i64, t >> 32);
}
- const t = limb(&un, j + n + 1) - carry;
- limb_set(&un, j + n + 1, @truncate(u32, t));
+ const t = limb(&un, j + n + 1) -% carry;
+ limb_set(&un, j + n + 1, @truncate(u32, @bitCast(u64, t)));
if (q) |q_| limb_set(q_, j, @truncate(u32, qhat));
if (t < 0) {
if (q) |q_| limb_set(q_, j, limb(q_, j) - 1);
@@ -99,7 +99,7 @@ fn divmod(q: ?[]u32, r: ?[]u32, u: []const u32, v: []const u32) !void {
limb_set(&un, i + j, @truncate(u32, t2));
carry2 = t2 >> 32;
}
- limb_set(un, j + n + 1, @truncate(u32, limb(&un, j + n + 1) + carry2));
+ limb_set(&un, j + n + 1, @truncate(u32, limb(&un, j + n + 1) + carry2));
}
if (j == 0) break;
}
diff --git a/lib/docs/main.js b/lib/docs/main.js
index a0647bbe61..fc99b2f861 100644
--- a/lib/docs/main.js
+++ b/lib/docs/main.js
@@ -1187,10 +1187,6 @@ const NAV_MODES = {
payloadHtml += "panic";
break;
}
- case "set_cold": {
- payloadHtml += "setCold";
- break;
- }
case "set_runtime_safety": {
payloadHtml += "setRuntimeSafety";
break;
diff --git a/lib/std/Build.zig b/lib/std/Build.zig
index 26919962e3..279dd765b5 100644
--- a/lib/std/Build.zig
+++ b/lib/std/Build.zig
@@ -32,14 +32,12 @@ pub const Step = @import("Build/Step.zig");
pub const CheckFileStep = @import("Build/CheckFileStep.zig");
pub const CheckObjectStep = @import("Build/CheckObjectStep.zig");
pub const ConfigHeaderStep = @import("Build/ConfigHeaderStep.zig");
-pub const EmulatableRunStep = @import("Build/EmulatableRunStep.zig");
pub const FmtStep = @import("Build/FmtStep.zig");
pub const InstallArtifactStep = @import("Build/InstallArtifactStep.zig");
pub const InstallDirStep = @import("Build/InstallDirStep.zig");
pub const InstallFileStep = @import("Build/InstallFileStep.zig");
pub const ObjCopyStep = @import("Build/ObjCopyStep.zig");
pub const CompileStep = @import("Build/CompileStep.zig");
-pub const LogStep = @import("Build/LogStep.zig");
pub const OptionsStep = @import("Build/OptionsStep.zig");
pub const RemoveDirStep = @import("Build/RemoveDirStep.zig");
pub const RunStep = @import("Build/RunStep.zig");
@@ -59,15 +57,12 @@ verbose_air: bool,
verbose_llvm_ir: bool,
verbose_cimport: bool,
verbose_llvm_cpu_features: bool,
-/// The purpose of executing the command is for a human to read compile errors from the terminal
-prominent_compile_errors: bool,
-color: enum { auto, on, off } = .auto,
reference_trace: ?u32 = null,
invalid_user_input: bool,
zig_exe: []const u8,
default_step: *Step,
env_map: *EnvMap,
-top_level_steps: ArrayList(*TopLevelStep),
+top_level_steps: std.StringArrayHashMapUnmanaged(*TopLevelStep),
install_prefix: []const u8,
dest_dir: ?[]const u8,
lib_dir: []const u8,
@@ -90,6 +85,7 @@ pkg_config_pkg_list: ?(PkgConfigError![]const PkgConfigPkg) = null,
args: ?[][]const u8 = null,
debug_log_scopes: []const []const u8 = &.{},
debug_compile_errors: bool = false,
+debug_pkg_config: bool = false,
/// Experimental. Use system Darling installation to run cross compiled macOS build artifacts.
enable_darling: bool = false,
@@ -198,7 +194,7 @@ pub fn create(
env_map.* = try process.getEnvMap(allocator);
const self = try allocator.create(Build);
- self.* = Build{
+ self.* = .{
.zig_exe = zig_exe,
.build_root = build_root,
.cache_root = cache_root,
@@ -211,13 +207,12 @@ pub fn create(
.verbose_llvm_ir = false,
.verbose_cimport = false,
.verbose_llvm_cpu_features = false,
- .prominent_compile_errors = false,
.invalid_user_input = false,
.allocator = allocator,
.user_input_options = UserInputOptionsMap.init(allocator),
.available_options_map = AvailableOptionsMap.init(allocator),
.available_options_list = ArrayList(AvailableOption).init(allocator),
- .top_level_steps = ArrayList(*TopLevelStep).init(allocator),
+ .top_level_steps = .{},
.default_step = undefined,
.env_map = env_map,
.search_prefixes = ArrayList([]const u8).init(allocator),
@@ -227,12 +222,21 @@ pub fn create(
.h_dir = undefined,
.dest_dir = env_map.get("DESTDIR"),
.installed_files = ArrayList(InstalledFile).init(allocator),
- .install_tls = TopLevelStep{
- .step = Step.initNoOp(.top_level, "install", allocator),
+ .install_tls = .{
+ .step = Step.init(.{
+ .id = .top_level,
+ .name = "install",
+ .owner = self,
+ }),
.description = "Copy build artifacts to prefix path",
},
- .uninstall_tls = TopLevelStep{
- .step = Step.init(.top_level, "uninstall", allocator, makeUninstall),
+ .uninstall_tls = .{
+ .step = Step.init(.{
+ .id = .top_level,
+ .name = "uninstall",
+ .owner = self,
+ .makeFn = makeUninstall,
+ }),
.description = "Remove build artifacts from prefix path",
},
.zig_lib_dir = null,
@@ -241,8 +245,8 @@ pub fn create(
.host = host,
.modules = std.StringArrayHashMap(*Module).init(allocator),
};
- try self.top_level_steps.append(&self.install_tls);
- try self.top_level_steps.append(&self.uninstall_tls);
+ try self.top_level_steps.put(allocator, self.install_tls.step.name, &self.install_tls);
+ try self.top_level_steps.put(allocator, self.uninstall_tls.step.name, &self.uninstall_tls);
self.default_step = &self.install_tls.step;
return self;
}
@@ -264,11 +268,20 @@ fn createChildOnly(parent: *Build, dep_name: []const u8, build_root: Cache.Direc
child.* = .{
.allocator = allocator,
.install_tls = .{
- .step = Step.initNoOp(.top_level, "install", allocator),
+ .step = Step.init(.{
+ .id = .top_level,
+ .name = "install",
+ .owner = child,
+ }),
.description = "Copy build artifacts to prefix path",
},
.uninstall_tls = .{
- .step = Step.init(.top_level, "uninstall", allocator, makeUninstall),
+ .step = Step.init(.{
+ .id = .top_level,
+ .name = "uninstall",
+ .owner = child,
+ .makeFn = makeUninstall,
+ }),
.description = "Remove build artifacts from prefix path",
},
.user_input_options = UserInputOptionsMap.init(allocator),
@@ -281,14 +294,12 @@ fn createChildOnly(parent: *Build, dep_name: []const u8, build_root: Cache.Direc
.verbose_llvm_ir = parent.verbose_llvm_ir,
.verbose_cimport = parent.verbose_cimport,
.verbose_llvm_cpu_features = parent.verbose_llvm_cpu_features,
- .prominent_compile_errors = parent.prominent_compile_errors,
- .color = parent.color,
.reference_trace = parent.reference_trace,
.invalid_user_input = false,
.zig_exe = parent.zig_exe,
.default_step = undefined,
.env_map = parent.env_map,
- .top_level_steps = ArrayList(*TopLevelStep).init(allocator),
+ .top_level_steps = .{},
.install_prefix = undefined,
.dest_dir = parent.dest_dir,
.lib_dir = parent.lib_dir,
@@ -306,6 +317,7 @@ fn createChildOnly(parent: *Build, dep_name: []const u8, build_root: Cache.Direc
.zig_lib_dir = parent.zig_lib_dir,
.debug_log_scopes = parent.debug_log_scopes,
.debug_compile_errors = parent.debug_compile_errors,
+ .debug_pkg_config = parent.debug_pkg_config,
.enable_darling = parent.enable_darling,
.enable_qemu = parent.enable_qemu,
.enable_rosetta = parent.enable_rosetta,
@@ -316,8 +328,8 @@ fn createChildOnly(parent: *Build, dep_name: []const u8, build_root: Cache.Direc
.dep_prefix = parent.fmt("{s}{s}.", .{ parent.dep_prefix, dep_name }),
.modules = std.StringArrayHashMap(*Module).init(allocator),
};
- try child.top_level_steps.append(&child.install_tls);
- try child.top_level_steps.append(&child.uninstall_tls);
+ try child.top_level_steps.put(allocator, child.install_tls.step.name, &child.install_tls);
+ try child.top_level_steps.put(allocator, child.uninstall_tls.step.name, &child.uninstall_tls);
child.default_step = &child.install_tls.step;
return child;
}
@@ -372,27 +384,24 @@ fn applyArgs(b: *Build, args: anytype) !void {
},
}
}
- const Hasher = std.crypto.auth.siphash.SipHash128(1, 3);
+
+ // Create an installation directory local to this package. This will be used when
+ // dependant packages require a standard prefix, such as include directories for C headers.
+ var hash = b.cache.hash;
// Random bytes to make unique. Refresh this with new random bytes when
// implementation is modified in a non-backwards-compatible way.
- var hash = Hasher.init("ZaEsvQ5ClaA2IdH9");
- hash.update(b.dep_prefix);
+ hash.add(@as(u32, 0xd8cb0055));
+ hash.addBytes(b.dep_prefix);
// TODO additionally update the hash with `args`.
-
- var digest: [16]u8 = undefined;
- hash.final(&digest);
- var hash_basename: [digest.len * 2]u8 = undefined;
- _ = std.fmt.bufPrint(&hash_basename, "{s}", .{std.fmt.fmtSliceHexLower(&digest)}) catch
- unreachable;
-
- const install_prefix = try b.cache_root.join(b.allocator, &.{ "i", &hash_basename });
+ const digest = hash.final();
+ const install_prefix = try b.cache_root.join(b.allocator, &.{ "i", &digest });
b.resolveInstallPrefix(install_prefix, .{});
}
-pub fn destroy(self: *Build) void {
- self.env_map.deinit();
- self.top_level_steps.deinit();
- self.allocator.destroy(self);
+pub fn destroy(b: *Build) void {
+ b.env_map.deinit();
+ b.top_level_steps.deinit(b.allocator);
+ b.allocator.destroy(b);
}
/// This function is intended to be called by lib/build_runner.zig, not a build.zig file.
@@ -441,6 +450,7 @@ pub const ExecutableOptions = struct {
target: CrossTarget = .{},
optimize: std.builtin.Mode = .Debug,
linkage: ?CompileStep.Linkage = null,
+ max_rss: usize = 0,
};
pub fn addExecutable(b: *Build, options: ExecutableOptions) *CompileStep {
@@ -452,6 +462,7 @@ pub fn addExecutable(b: *Build, options: ExecutableOptions) *CompileStep {
.optimize = options.optimize,
.kind = .exe,
.linkage = options.linkage,
+ .max_rss = options.max_rss,
});
}
@@ -460,6 +471,7 @@ pub const ObjectOptions = struct {
root_source_file: ?FileSource = null,
target: CrossTarget,
optimize: std.builtin.Mode,
+ max_rss: usize = 0,
};
pub fn addObject(b: *Build, options: ObjectOptions) *CompileStep {
@@ -469,6 +481,7 @@ pub fn addObject(b: *Build, options: ObjectOptions) *CompileStep {
.target = options.target,
.optimize = options.optimize,
.kind = .obj,
+ .max_rss = options.max_rss,
});
}
@@ -478,6 +491,7 @@ pub const SharedLibraryOptions = struct {
version: ?std.builtin.Version = null,
target: CrossTarget,
optimize: std.builtin.Mode,
+ max_rss: usize = 0,
};
pub fn addSharedLibrary(b: *Build, options: SharedLibraryOptions) *CompileStep {
@@ -489,6 +503,7 @@ pub fn addSharedLibrary(b: *Build, options: SharedLibraryOptions) *CompileStep {
.version = options.version,
.target = options.target,
.optimize = options.optimize,
+ .max_rss = options.max_rss,
});
}
@@ -498,6 +513,7 @@ pub const StaticLibraryOptions = struct {
target: CrossTarget,
optimize: std.builtin.Mode,
version: ?std.builtin.Version = null,
+ max_rss: usize = 0,
};
pub fn addStaticLibrary(b: *Build, options: StaticLibraryOptions) *CompileStep {
@@ -509,25 +525,27 @@ pub fn addStaticLibrary(b: *Build, options: StaticLibraryOptions) *CompileStep {
.version = options.version,
.target = options.target,
.optimize = options.optimize,
+ .max_rss = options.max_rss,
});
}
pub const TestOptions = struct {
name: []const u8 = "test",
- kind: CompileStep.Kind = .@"test",
root_source_file: FileSource,
target: CrossTarget = .{},
optimize: std.builtin.Mode = .Debug,
version: ?std.builtin.Version = null,
+ max_rss: usize = 0,
};
pub fn addTest(b: *Build, options: TestOptions) *CompileStep {
return CompileStep.create(b, .{
.name = options.name,
- .kind = options.kind,
+ .kind = .@"test",
.root_source_file = options.root_source_file,
.target = options.target,
.optimize = options.optimize,
+ .max_rss = options.max_rss,
});
}
@@ -536,6 +554,7 @@ pub const AssemblyOptions = struct {
source_file: FileSource,
target: CrossTarget,
optimize: std.builtin.Mode,
+ max_rss: usize = 0,
};
pub fn addAssembly(b: *Build, options: AssemblyOptions) *CompileStep {
@@ -545,22 +564,19 @@ pub fn addAssembly(b: *Build, options: AssemblyOptions) *CompileStep {
.root_source_file = null,
.target = options.target,
.optimize = options.optimize,
+ .max_rss = options.max_rss,
});
obj_step.addAssemblyFileSource(options.source_file.dupe(b));
return obj_step;
}
-pub const AddModuleOptions = struct {
- name: []const u8,
- source_file: FileSource,
- dependencies: []const ModuleDependency = &.{},
-};
-
-pub fn addModule(b: *Build, options: AddModuleOptions) void {
- b.modules.put(b.dupe(options.name), b.createModule(.{
- .source_file = options.source_file,
- .dependencies = options.dependencies,
- })) catch @panic("OOM");
+/// This function creates a module and adds it to the package's module set, making
+/// it available to other packages which depend on this one.
+/// `createModule` can be used instead to create a private module.
+pub fn addModule(b: *Build, name: []const u8, options: CreateModuleOptions) *Module {
+ const module = b.createModule(options);
+ b.modules.put(b.dupe(name), module) catch @panic("OOM");
+ return module;
}
pub const ModuleDependency = struct {
@@ -573,8 +589,9 @@ pub const CreateModuleOptions = struct {
dependencies: []const ModuleDependency = &.{},
};
-/// Prefer to use `addModule` which will make the module available to other
-/// packages which depend on this package.
+/// This function creates a private module, to be used by the current package,
+/// but not exposed to other packages depending on this one.
+/// `addModule` can be used instead to create a public module.
pub fn createModule(b: *Build, options: CreateModuleOptions) *Module {
const module = b.allocator.create(Module) catch @panic("OOM");
module.* = .{
@@ -608,16 +625,15 @@ pub fn addSystemCommand(self: *Build, argv: []const []const u8) *RunStep {
/// Creates a `RunStep` with an executable built with `addExecutable`.
/// Add command line arguments with methods of `RunStep`.
pub fn addRunArtifact(b: *Build, exe: *CompileStep) *RunStep {
- assert(exe.kind == .exe or exe.kind == .test_exe);
-
// It doesn't have to be native. We catch that if you actually try to run it.
// Consider that this is declarative; the run step may not be run unless a user
// option is supplied.
- const run_step = RunStep.create(b, b.fmt("run {s}", .{exe.step.name}));
+ const run_step = RunStep.create(b, b.fmt("run {s}", .{exe.name}));
run_step.addArtifactArg(exe);
- if (exe.kind == .test_exe) {
- run_step.addArg(b.zig_exe);
+ if (exe.kind == .@"test") {
+ run_step.stdio = .zig_test;
+ run_step.addArgs(&.{"--listen=-"});
}
if (exe.vcpkg_bin_path) |path| {
@@ -637,7 +653,11 @@ pub fn addConfigHeader(
options: ConfigHeaderStep.Options,
values: anytype,
) *ConfigHeaderStep {
- const config_header_step = ConfigHeaderStep.create(b, options);
+ var options_copy = options;
+ if (options_copy.first_ret_addr == null)
+ options_copy.first_ret_addr = @returnAddress();
+
+ const config_header_step = ConfigHeaderStep.create(b, options_copy);
config_header_step.addValues(values);
return config_header_step;
}
@@ -674,17 +694,8 @@ pub fn addWriteFile(self: *Build, file_path: []const u8, data: []const u8) *Writ
return write_file_step;
}
-pub fn addWriteFiles(self: *Build) *WriteFileStep {
- const write_file_step = self.allocator.create(WriteFileStep) catch @panic("OOM");
- write_file_step.* = WriteFileStep.init(self);
- return write_file_step;
-}
-
-pub fn addLog(self: *Build, comptime format: []const u8, args: anytype) *LogStep {
- const data = self.fmt(format, args);
- const log_step = self.allocator.create(LogStep) catch @panic("OOM");
- log_step.* = LogStep.init(self, data);
- return log_step;
+pub fn addWriteFiles(b: *Build) *WriteFileStep {
+ return WriteFileStep.create(b);
}
pub fn addRemoveDirTree(self: *Build, dir_path: []const u8) *RemoveDirStep {
@@ -693,32 +704,14 @@ pub fn addRemoveDirTree(self: *Build, dir_path: []const u8) *RemoveDirStep {
return remove_dir_step;
}
-pub fn addFmt(self: *Build, paths: []const []const u8) *FmtStep {
- return FmtStep.create(self, paths);
+pub fn addFmt(b: *Build, options: FmtStep.Options) *FmtStep {
+ return FmtStep.create(b, options);
}
pub fn addTranslateC(self: *Build, options: TranslateCStep.Options) *TranslateCStep {
return TranslateCStep.create(self, options);
}
-pub fn make(self: *Build, step_names: []const []const u8) !void {
- var wanted_steps = ArrayList(*Step).init(self.allocator);
- defer wanted_steps.deinit();
-
- if (step_names.len == 0) {
- try wanted_steps.append(self.default_step);
- } else {
- for (step_names) |step_name| {
- const s = try self.getTopLevelStepByName(step_name);
- try wanted_steps.append(s);
- }
- }
-
- for (wanted_steps.items) |s| {
- try self.makeOneStep(s);
- }
-}
-
pub fn getInstallStep(self: *Build) *Step {
return &self.install_tls.step;
}
@@ -727,7 +720,8 @@ pub fn getUninstallStep(self: *Build) *Step {
return &self.uninstall_tls.step;
}
-fn makeUninstall(uninstall_step: *Step) anyerror!void {
+fn makeUninstall(uninstall_step: *Step, prog_node: *std.Progress.Node) anyerror!void {
+ _ = prog_node;
const uninstall_tls = @fieldParentPtr(TopLevelStep, "step", uninstall_step);
const self = @fieldParentPtr(Build, "uninstall_tls", uninstall_tls);
@@ -742,37 +736,6 @@ fn makeUninstall(uninstall_step: *Step) anyerror!void {
// TODO remove empty directories
}
-fn makeOneStep(self: *Build, s: *Step) anyerror!void {
- if (s.loop_flag) {
- log.err("Dependency loop detected:\n {s}", .{s.name});
- return error.DependencyLoopDetected;
- }
- s.loop_flag = true;
-
- for (s.dependencies.items) |dep| {
- self.makeOneStep(dep) catch |err| {
- if (err == error.DependencyLoopDetected) {
- log.err(" {s}", .{s.name});
- }
- return err;
- };
- }
-
- s.loop_flag = false;
-
- try s.make();
-}
-
-fn getTopLevelStepByName(self: *Build, name: []const u8) !*Step {
- for (self.top_level_steps.items) |top_level_step| {
- if (mem.eql(u8, top_level_step.step.name, name)) {
- return &top_level_step.step;
- }
- }
- log.err("Cannot run step '{s}' because it does not exist", .{name});
- return error.InvalidStepName;
-}
-
pub fn option(self: *Build, comptime T: type, name_raw: []const u8, description_raw: []const u8) ?T {
const name = self.dupe(name_raw);
const description = self.dupe(description_raw);
@@ -909,11 +872,15 @@ pub fn option(self: *Build, comptime T: type, name_raw: []const u8, description_
pub fn step(self: *Build, name: []const u8, description: []const u8) *Step {
const step_info = self.allocator.create(TopLevelStep) catch @panic("OOM");
- step_info.* = TopLevelStep{
- .step = Step.initNoOp(.top_level, name, self.allocator),
+ step_info.* = .{
+ .step = Step.init(.{
+ .id = .top_level,
+ .name = name,
+ .owner = self,
+ }),
.description = self.dupe(description),
};
- self.top_level_steps.append(step_info) catch @panic("OOM");
+ self.top_level_steps.put(self.allocator, step_info.step.name, step_info) catch @panic("OOM");
return &step_info.step;
}
@@ -1181,50 +1148,18 @@ pub fn validateUserInputDidItFail(self: *Build) bool {
return self.invalid_user_input;
}
-pub fn spawnChild(self: *Build, argv: []const []const u8) !void {
- return self.spawnChildEnvMap(null, self.env_map, argv);
-}
-
-fn printCmd(cwd: ?[]const u8, argv: []const []const u8) void {
- if (cwd) |yes_cwd| std.debug.print("cd {s} && ", .{yes_cwd});
+fn allocPrintCmd(ally: Allocator, opt_cwd: ?[]const u8, argv: []const []const u8) ![]u8 {
+ var buf = ArrayList(u8).init(ally);
+ if (opt_cwd) |cwd| try buf.writer().print("cd {s} && ", .{cwd});
for (argv) |arg| {
- std.debug.print("{s} ", .{arg});
+ try buf.writer().print("{s} ", .{arg});
}
- std.debug.print("\n", .{});
+ return buf.toOwnedSlice();
}
-pub fn spawnChildEnvMap(self: *Build, cwd: ?[]const u8, env_map: *const EnvMap, argv: []const []const u8) !void {
- if (self.verbose) {
- printCmd(cwd, argv);
- }
-
- if (!std.process.can_spawn)
- return error.ExecNotSupported;
-
- var child = std.ChildProcess.init(argv, self.allocator);
- child.cwd = cwd;
- child.env_map = env_map;
-
- const term = child.spawnAndWait() catch |err| {
- log.err("Unable to spawn {s}: {s}", .{ argv[0], @errorName(err) });
- return err;
- };
-
- switch (term) {
- .Exited => |code| {
- if (code != 0) {
- log.err("The following command exited with error code {}:", .{code});
- printCmd(cwd, argv);
- return error.UncleanExit;
- }
- },
- else => {
- log.err("The following command terminated unexpectedly:", .{});
- printCmd(cwd, argv);
-
- return error.UncleanExit;
- },
- }
+fn printCmd(ally: Allocator, cwd: ?[]const u8, argv: []const []const u8) void {
+ const text = allocPrintCmd(ally, cwd, argv) catch @panic("OOM");
+ std.debug.print("{s}\n", .{text});
}
pub fn installArtifact(self: *Build, artifact: *CompileStep) void {
@@ -1283,12 +1218,7 @@ pub fn addInstallFileWithDir(
install_dir: InstallDir,
dest_rel_path: []const u8,
) *InstallFileStep {
- if (dest_rel_path.len == 0) {
- panic("dest_rel_path must be non-empty", .{});
- }
- const install_step = self.allocator.create(InstallFileStep) catch @panic("OOM");
- install_step.* = InstallFileStep.init(self, source.dupe(self), install_dir, dest_rel_path);
- return install_step;
+ return InstallFileStep.create(self, source.dupe(self), install_dir, dest_rel_path);
}
pub fn addInstallDirectory(self: *Build, options: InstallDirectoryOptions) *InstallDirStep {
@@ -1297,6 +1227,14 @@ pub fn addInstallDirectory(self: *Build, options: InstallDirectoryOptions) *Inst
return install_step;
}
+pub fn addCheckFile(
+ b: *Build,
+ file_source: FileSource,
+ options: CheckFileStep.Options,
+) *CheckFileStep {
+ return CheckFileStep.create(b, file_source, options);
+}
+
pub fn pushInstalledFile(self: *Build, dir: InstallDir, dest_rel_path: []const u8) void {
const file = InstalledFile{
.dir = dir,
@@ -1305,18 +1243,6 @@ pub fn pushInstalledFile(self: *Build, dir: InstallDir, dest_rel_path: []const u
self.installed_files.append(file.dupe(self)) catch @panic("OOM");
}
-pub fn updateFile(self: *Build, source_path: []const u8, dest_path: []const u8) !void {
- if (self.verbose) {
- log.info("cp {s} {s} ", .{ source_path, dest_path });
- }
- const cwd = fs.cwd();
- const prev_status = try fs.Dir.updateFile(cwd, source_path, cwd, dest_path, .{});
- if (self.verbose) switch (prev_status) {
- .stale => log.info("# installed", .{}),
- .fresh => log.info("# up-to-date", .{}),
- };
-}
-
pub fn truncateFile(self: *Build, dest_path: []const u8) !void {
if (self.verbose) {
log.info("truncate {s}", .{dest_path});
@@ -1400,7 +1326,7 @@ pub fn execAllowFail(
) ExecError![]u8 {
assert(argv.len != 0);
- if (!std.process.can_spawn)
+ if (!process.can_spawn)
return error.ExecNotSupported;
const max_output_size = 400 * 1024;
@@ -1433,59 +1359,27 @@ pub fn execAllowFail(
}
}
-pub fn execFromStep(self: *Build, argv: []const []const u8, src_step: ?*Step) ![]u8 {
- assert(argv.len != 0);
-
- if (self.verbose) {
- printCmd(null, argv);
- }
-
- if (!std.process.can_spawn) {
- if (src_step) |s| log.err("{s}...", .{s.name});
- log.err("Unable to spawn the following command: cannot spawn child process", .{});
- printCmd(null, argv);
- std.os.abort();
+/// This is a helper function to be called from build.zig scripts, *not* from
+/// inside step make() functions. If any errors occur, it fails the build with
+/// a helpful message.
+pub fn exec(b: *Build, argv: []const []const u8) []u8 {
+ if (!process.can_spawn) {
+ std.debug.print("unable to spawn the following command: cannot spawn child process\n{s}\n", .{
+ try allocPrintCmd(b.allocator, null, argv),
+ });
+ process.exit(1);
}
var code: u8 = undefined;
- return self.execAllowFail(argv, &code, .Inherit) catch |err| switch (err) {
- error.ExecNotSupported => {
- if (src_step) |s| log.err("{s}...", .{s.name});
- log.err("Unable to spawn the following command: cannot spawn child process", .{});
- printCmd(null, argv);
- std.os.abort();
- },
- error.FileNotFound => {
- if (src_step) |s| log.err("{s}...", .{s.name});
- log.err("Unable to spawn the following command: file not found", .{});
- printCmd(null, argv);
- std.os.exit(@truncate(u8, code));
- },
- error.ExitCodeFailure => {
- if (src_step) |s| log.err("{s}...", .{s.name});
- if (self.prominent_compile_errors) {
- log.err("The step exited with error code {d}", .{code});
- } else {
- log.err("The following command exited with error code {d}:", .{code});
- printCmd(null, argv);
- }
-
- std.os.exit(@truncate(u8, code));
- },
- error.ProcessTerminated => {
- if (src_step) |s| log.err("{s}...", .{s.name});
- log.err("The following command terminated unexpectedly:", .{});
- printCmd(null, argv);
- std.os.exit(@truncate(u8, code));
- },
- else => |e| return e,
+ return b.execAllowFail(argv, &code, .Inherit) catch |err| {
+ const printed_cmd = allocPrintCmd(b.allocator, null, argv) catch @panic("OOM");
+ std.debug.print("unable to spawn the following command: {s}\n{s}\n", .{
+ @errorName(err), printed_cmd,
+ });
+ process.exit(1);
};
}
-pub fn exec(self: *Build, argv: []const []const u8) ![]u8 {
- return self.execFromStep(argv, null);
-}
-
pub fn addSearchPrefix(self: *Build, search_prefix: []const u8) void {
self.search_prefixes.append(self.dupePath(search_prefix)) catch @panic("OOM");
}
@@ -1550,10 +1444,29 @@ pub fn dependency(b: *Build, name: []const u8, args: anytype) *Dependency {
const full_path = b.pathFromRoot("build.zig.zon");
std.debug.print("no dependency named '{s}' in '{s}'. All packages used in build.zig must be declared in this file.\n", .{ name, full_path });
- std.process.exit(1);
+ process.exit(1);
}
-fn dependencyInner(
+pub fn anonymousDependency(
+ b: *Build,
+ /// The path to the directory containing the dependency's build.zig file,
+ /// relative to the current package's build.zig.
+ relative_build_root: []const u8,
+ /// A direct `@import` of the build.zig of the dependency.
+ comptime build_zig: type,
+ args: anytype,
+) *Dependency {
+ const arena = b.allocator;
+ const build_root = b.build_root.join(arena, &.{relative_build_root}) catch @panic("OOM");
+ const name = arena.dupe(u8, relative_build_root) catch @panic("OOM");
+ for (name) |*byte| switch (byte.*) {
+ '/', '\\' => byte.* = '.',
+ else => continue,
+ };
+ return dependencyInner(b, name, build_root, build_zig, args);
+}
+
+pub fn dependencyInner(
b: *Build,
name: []const u8,
build_root_string: []const u8,
@@ -1566,7 +1479,7 @@ fn dependencyInner(
std.debug.print("unable to open '{s}': {s}\n", .{
build_root_string, @errorName(err),
});
- std.process.exit(1);
+ process.exit(1);
},
};
const sub_builder = b.createChild(name, build_root, args) catch @panic("unhandled error");
@@ -1610,7 +1523,7 @@ pub const GeneratedFile = struct {
pub fn getPath(self: GeneratedFile) []const u8 {
return self.path orelse std.debug.panic(
- "getPath() was called on a GeneratedFile that wasn't build yet. Is there a missing Step dependency on step '{s}'?",
+ "getPath() was called on a GeneratedFile that wasn't built yet. Is there a missing Step dependency on step '{s}'?",
.{self.step.name},
);
}
@@ -1650,12 +1563,23 @@ pub const FileSource = union(enum) {
}
/// Should only be called during make(), returns a path relative to the build root or absolute.
- pub fn getPath(self: FileSource, builder: *Build) []const u8 {
- const path = switch (self) {
- .path => |p| builder.pathFromRoot(p),
- .generated => |gen| gen.getPath(),
- };
- return path;
+ pub fn getPath(self: FileSource, src_builder: *Build) []const u8 {
+ return getPath2(self, src_builder, null);
+ }
+
+ /// Should only be called during make(), returns a path relative to the build root or absolute.
+ /// asking_step is only used for debugging purposes; it's the step being run that is asking for
+ /// the path.
+ pub fn getPath2(self: FileSource, src_builder: *Build, asking_step: ?*Step) []const u8 {
+ switch (self) {
+ .path => |p| return src_builder.pathFromRoot(p),
+ .generated => |gen| return gen.path orelse {
+ std.debug.getStderrMutex().lock();
+ const stderr = std.io.getStdErr();
+ dumpBadGetPathHelp(gen.step, stderr, src_builder, asking_step) catch {};
+ @panic("misconfigured build script");
+ },
+ }
}
/// Duplicates the file source for a given builder.
@@ -1667,6 +1591,54 @@ pub const FileSource = union(enum) {
}
};
+/// In this function the stderr mutex has already been locked.
+fn dumpBadGetPathHelp(
+ s: *Step,
+ stderr: fs.File,
+ src_builder: *Build,
+ asking_step: ?*Step,
+) anyerror!void {
+ const w = stderr.writer();
+ try w.print(
+ \\getPath() was called on a GeneratedFile that wasn't built yet.
+ \\ source package path: {s}
+ \\ Is there a missing Step dependency on step '{s}'?
+ \\
+ , .{
+ src_builder.build_root.path orelse ".",
+ s.name,
+ });
+
+ const tty_config = std.debug.detectTTYConfig(stderr);
+ tty_config.setColor(w, .Red) catch {};
+ try stderr.writeAll(" The step was created by this stack trace:\n");
+ tty_config.setColor(w, .Reset) catch {};
+
+ const debug_info = std.debug.getSelfDebugInfo() catch |err| {
+ try w.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)});
+ return;
+ };
+ const ally = debug_info.allocator;
+ std.debug.writeStackTrace(s.getStackTrace(), w, ally, debug_info, tty_config) catch |err| {
+ try stderr.writer().print("Unable to dump stack trace: {s}\n", .{@errorName(err)});
+ return;
+ };
+ if (asking_step) |as| {
+ tty_config.setColor(w, .Red) catch {};
+ try stderr.writeAll(" The step that is missing a dependency on the above step was created by this stack trace:\n");
+ tty_config.setColor(w, .Reset) catch {};
+
+ std.debug.writeStackTrace(as.getStackTrace(), w, ally, debug_info, tty_config) catch |err| {
+ try stderr.writer().print("Unable to dump stack trace: {s}\n", .{@errorName(err)});
+ return;
+ };
+ }
+
+ tty_config.setColor(w, .Red) catch {};
+ try stderr.writeAll(" Hope that helps. Proceeding to panic.\n");
+ tty_config.setColor(w, .Reset) catch {};
+}
+
/// Allocates a new string for assigning a value to a named macro.
/// If the value is omitted, it is set to 1.
/// `name` and `value` need not live longer than the function call.
@@ -1706,9 +1678,7 @@ pub const InstallDir = union(enum) {
/// Duplicates the install directory including the path if set to custom.
pub fn dupe(self: InstallDir, builder: *Build) InstallDir {
if (self == .custom) {
- // Written with this temporary to avoid RLS problems
- const duped_path = builder.dupe(self.custom);
- return .{ .custom = duped_path };
+ return .{ .custom = builder.dupe(self.custom) };
} else {
return self;
}
@@ -1756,17 +1726,45 @@ pub fn serializeCpu(allocator: Allocator, cpu: std.Target.Cpu) ![]const u8 {
}
}
+/// This function is intended to be called in the `configure` phase only.
+/// It returns an absolute directory path, which is potentially going to be a
+/// source of API breakage in the future, so keep that in mind when using this
+/// function.
+pub fn makeTempPath(b: *Build) []const u8 {
+ const rand_int = std.crypto.random.int(u64);
+ const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ hex64(rand_int);
+ const result_path = b.cache_root.join(b.allocator, &.{tmp_dir_sub_path}) catch @panic("OOM");
+ fs.cwd().makePath(result_path) catch |err| {
+ std.debug.print("unable to make tmp path '{s}': {s}\n", .{
+ result_path, @errorName(err),
+ });
+ };
+ return result_path;
+}
+
+/// There are a few copies of this function in miscellaneous places. Would be nice to find
+/// a home for them.
+fn hex64(x: u64) [16]u8 {
+ const hex_charset = "0123456789abcdef";
+ var result: [16]u8 = undefined;
+ var i: usize = 0;
+ while (i < 8) : (i += 1) {
+ const byte = @truncate(u8, x >> @intCast(u6, 8 * i));
+ result[i * 2 + 0] = hex_charset[byte >> 4];
+ result[i * 2 + 1] = hex_charset[byte & 15];
+ }
+ return result;
+}
+
test {
_ = CheckFileStep;
_ = CheckObjectStep;
- _ = EmulatableRunStep;
_ = FmtStep;
_ = InstallArtifactStep;
_ = InstallDirStep;
_ = InstallFileStep;
_ = ObjCopyStep;
_ = CompileStep;
- _ = LogStep;
_ = OptionsStep;
_ = RemoveDirStep;
_ = RunStep;
diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig
index d4dbe6ec14..b25e349168 100644
--- a/lib/std/Build/Cache.zig
+++ b/lib/std/Build/Cache.zig
@@ -7,27 +7,27 @@ pub const Directory = struct {
/// directly, but it is needed when passing the directory to a child process.
/// `null` means cwd.
path: ?[]const u8,
- handle: std.fs.Dir,
+ handle: fs.Dir,
pub fn join(self: Directory, allocator: Allocator, paths: []const []const u8) ![]u8 {
if (self.path) |p| {
// TODO clean way to do this with only 1 allocation
- const part2 = try std.fs.path.join(allocator, paths);
+ const part2 = try fs.path.join(allocator, paths);
defer allocator.free(part2);
- return std.fs.path.join(allocator, &[_][]const u8{ p, part2 });
+ return fs.path.join(allocator, &[_][]const u8{ p, part2 });
} else {
- return std.fs.path.join(allocator, paths);
+ return fs.path.join(allocator, paths);
}
}
pub fn joinZ(self: Directory, allocator: Allocator, paths: []const []const u8) ![:0]u8 {
if (self.path) |p| {
// TODO clean way to do this with only 1 allocation
- const part2 = try std.fs.path.join(allocator, paths);
+ const part2 = try fs.path.join(allocator, paths);
defer allocator.free(part2);
- return std.fs.path.joinZ(allocator, &[_][]const u8{ p, part2 });
+ return fs.path.joinZ(allocator, &[_][]const u8{ p, part2 });
} else {
- return std.fs.path.joinZ(allocator, paths);
+ return fs.path.joinZ(allocator, paths);
}
}
@@ -39,6 +39,20 @@ pub const Directory = struct {
if (self.path) |p| gpa.free(p);
self.* = undefined;
}
+
+ pub fn format(
+ self: Directory,
+ comptime fmt_string: []const u8,
+ options: fmt.FormatOptions,
+ writer: anytype,
+ ) !void {
+ _ = options;
+ if (fmt_string.len != 0) fmt.invalidFmtError(fmt, self);
+ if (self.path) |p| {
+ try writer.writeAll(p);
+ try writer.writeAll(fs.path.sep_str);
+ }
+ }
};
gpa: Allocator,
@@ -243,10 +257,10 @@ pub const HashHelper = struct {
hh.hasher.final(&bin_digest);
var out_digest: [hex_digest_len]u8 = undefined;
- _ = std.fmt.bufPrint(
+ _ = fmt.bufPrint(
&out_digest,
"{s}",
- .{std.fmt.fmtSliceHexLower(&bin_digest)},
+ .{fmt.fmtSliceHexLower(&bin_digest)},
) catch unreachable;
return out_digest;
}
@@ -365,10 +379,10 @@ pub const Manifest = struct {
var bin_digest: BinDigest = undefined;
self.hash.hasher.final(&bin_digest);
- _ = std.fmt.bufPrint(
+ _ = fmt.bufPrint(
&self.hex_digest,
"{s}",
- .{std.fmt.fmtSliceHexLower(&bin_digest)},
+ .{fmt.fmtSliceHexLower(&bin_digest)},
) catch unreachable;
self.hash.hasher = hasher_init;
@@ -408,7 +422,11 @@ pub const Manifest = struct {
self.have_exclusive_lock = true;
return false; // cache miss; exclusive lock already held
} else |err| switch (err) {
- error.WouldBlock => continue,
+ // There are no dir components, so you would think
+ // that this was unreachable, however we have
+ // observed on macOS two processes racing to do
+ // openat() with O_CREAT manifest in ENOENT.
+ error.WouldBlock, error.FileNotFound => continue,
else => |e| return e,
}
},
@@ -425,7 +443,10 @@ pub const Manifest = struct {
self.manifest_file = manifest_file;
self.have_exclusive_lock = true;
} else |err| switch (err) {
- error.WouldBlock => {
+ // There are no dir components, so you would think that this was
+ // unreachable, however we have observed on macOS two processes racing
+ // to do openat() with O_CREAT manifest in ENOENT.
+ error.WouldBlock, error.FileNotFound => {
self.manifest_file = try self.cache.manifest_dir.openFile(&manifest_file_path, .{
.lock = .Shared,
});
@@ -469,7 +490,7 @@ pub const Manifest = struct {
cache_hash_file.stat.size = fmt.parseInt(u64, size, 10) catch return error.InvalidFormat;
cache_hash_file.stat.inode = fmt.parseInt(fs.File.INode, inode, 10) catch return error.InvalidFormat;
cache_hash_file.stat.mtime = fmt.parseInt(i64, mtime_nsec_str, 10) catch return error.InvalidFormat;
- _ = std.fmt.hexToBytes(&cache_hash_file.bin_digest, digest_str) catch return error.InvalidFormat;
+ _ = fmt.hexToBytes(&cache_hash_file.bin_digest, digest_str) catch return error.InvalidFormat;
const prefix = fmt.parseInt(u8, prefix_str, 10) catch return error.InvalidFormat;
if (prefix >= self.cache.prefixes_len) return error.InvalidFormat;
@@ -806,10 +827,10 @@ pub const Manifest = struct {
self.hash.hasher.final(&bin_digest);
var out_digest: [hex_digest_len]u8 = undefined;
- _ = std.fmt.bufPrint(
+ _ = fmt.bufPrint(
&out_digest,
"{s}",
- .{std.fmt.fmtSliceHexLower(&bin_digest)},
+ .{fmt.fmtSliceHexLower(&bin_digest)},
) catch unreachable;
return out_digest;
@@ -831,10 +852,10 @@ pub const Manifest = struct {
var encoded_digest: [hex_digest_len]u8 = undefined;
for (self.files.items) |file| {
- _ = std.fmt.bufPrint(
+ _ = fmt.bufPrint(
&encoded_digest,
"{s}",
- .{std.fmt.fmtSliceHexLower(&file.bin_digest)},
+ .{fmt.fmtSliceHexLower(&file.bin_digest)},
) catch unreachable;
try writer.print("{d} {d} {d} {s} {d} {s}\n", .{
file.stat.size,
@@ -955,16 +976,16 @@ fn hashFile(file: fs.File, bin_digest: *[Hasher.mac_length]u8) !void {
}
// Create/Write a file, close it, then grab its stat.mtime timestamp.
-fn testGetCurrentFileTimestamp() !i128 {
+fn testGetCurrentFileTimestamp(dir: fs.Dir) !i128 {
const test_out_file = "test-filetimestamp.tmp";
- var file = try fs.cwd().createFile(test_out_file, .{
+ var file = try dir.createFile(test_out_file, .{
.read = true,
.truncate = true,
});
defer {
file.close();
- fs.cwd().deleteFile(test_out_file) catch {};
+ dir.deleteFile(test_out_file) catch {};
}
return (try file.stat()).mtime;
@@ -976,16 +997,17 @@ test "cache file and then recall it" {
return error.SkipZigTest;
}
- const cwd = fs.cwd();
+ var tmp = testing.tmpDir(.{});
+ defer tmp.cleanup();
const temp_file = "test.txt";
const temp_manifest_dir = "temp_manifest_dir";
- try cwd.writeFile(temp_file, "Hello, world!\n");
+ try tmp.dir.writeFile(temp_file, "Hello, world!\n");
// Wait for file timestamps to tick
- const initial_time = try testGetCurrentFileTimestamp();
- while ((try testGetCurrentFileTimestamp()) == initial_time) {
+ const initial_time = try testGetCurrentFileTimestamp(tmp.dir);
+ while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time) {
std.time.sleep(1);
}
@@ -995,9 +1017,9 @@ test "cache file and then recall it" {
{
var cache = Cache{
.gpa = testing.allocator,
- .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
+ .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
};
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = tmp.dir });
defer cache.manifest_dir.close();
{
@@ -1033,9 +1055,6 @@ test "cache file and then recall it" {
try testing.expectEqual(digest1, digest2);
}
-
- try cwd.deleteTree(temp_manifest_dir);
- try cwd.deleteFile(temp_file);
}
test "check that changing a file makes cache fail" {
@@ -1043,21 +1062,19 @@ test "check that changing a file makes cache fail" {
// https://github.com/ziglang/zig/issues/5437
return error.SkipZigTest;
}
- const cwd = fs.cwd();
+ var tmp = testing.tmpDir(.{});
+ defer tmp.cleanup();
const temp_file = "cache_hash_change_file_test.txt";
const temp_manifest_dir = "cache_hash_change_file_manifest_dir";
const original_temp_file_contents = "Hello, world!\n";
const updated_temp_file_contents = "Hello, world; but updated!\n";
- try cwd.deleteTree(temp_manifest_dir);
- try cwd.deleteTree(temp_file);
-
- try cwd.writeFile(temp_file, original_temp_file_contents);
+ try tmp.dir.writeFile(temp_file, original_temp_file_contents);
// Wait for file timestamps to tick
- const initial_time = try testGetCurrentFileTimestamp();
- while ((try testGetCurrentFileTimestamp()) == initial_time) {
+ const initial_time = try testGetCurrentFileTimestamp(tmp.dir);
+ while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time) {
std.time.sleep(1);
}
@@ -1067,9 +1084,9 @@ test "check that changing a file makes cache fail" {
{
var cache = Cache{
.gpa = testing.allocator,
- .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
+ .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
};
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = tmp.dir });
defer cache.manifest_dir.close();
{
@@ -1089,7 +1106,7 @@ test "check that changing a file makes cache fail" {
try ch.writeManifest();
}
- try cwd.writeFile(temp_file, updated_temp_file_contents);
+ try tmp.dir.writeFile(temp_file, updated_temp_file_contents);
{
var ch = cache.obtain();
@@ -1111,9 +1128,6 @@ test "check that changing a file makes cache fail" {
try testing.expect(!mem.eql(u8, digest1[0..], digest2[0..]));
}
-
- try cwd.deleteTree(temp_manifest_dir);
- try cwd.deleteTree(temp_file);
}
test "no file inputs" {
@@ -1121,18 +1135,20 @@ test "no file inputs" {
// https://github.com/ziglang/zig/issues/5437
return error.SkipZigTest;
}
- const cwd = fs.cwd();
+
+ var tmp = testing.tmpDir(.{});
+ defer tmp.cleanup();
+
const temp_manifest_dir = "no_file_inputs_manifest_dir";
- defer cwd.deleteTree(temp_manifest_dir) catch {};
var digest1: [hex_digest_len]u8 = undefined;
var digest2: [hex_digest_len]u8 = undefined;
var cache = Cache{
.gpa = testing.allocator,
- .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
+ .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
};
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = tmp.dir });
defer cache.manifest_dir.close();
{
@@ -1167,18 +1183,19 @@ test "Manifest with files added after initial hash work" {
// https://github.com/ziglang/zig/issues/5437
return error.SkipZigTest;
}
- const cwd = fs.cwd();
+ var tmp = testing.tmpDir(.{});
+ defer tmp.cleanup();
const temp_file1 = "cache_hash_post_file_test1.txt";
const temp_file2 = "cache_hash_post_file_test2.txt";
const temp_manifest_dir = "cache_hash_post_file_manifest_dir";
- try cwd.writeFile(temp_file1, "Hello, world!\n");
- try cwd.writeFile(temp_file2, "Hello world the second!\n");
+ try tmp.dir.writeFile(temp_file1, "Hello, world!\n");
+ try tmp.dir.writeFile(temp_file2, "Hello world the second!\n");
// Wait for file timestamps to tick
- const initial_time = try testGetCurrentFileTimestamp();
- while ((try testGetCurrentFileTimestamp()) == initial_time) {
+ const initial_time = try testGetCurrentFileTimestamp(tmp.dir);
+ while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time) {
std.time.sleep(1);
}
@@ -1189,9 +1206,9 @@ test "Manifest with files added after initial hash work" {
{
var cache = Cache{
.gpa = testing.allocator,
- .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
+ .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
};
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = tmp.dir });
defer cache.manifest_dir.close();
{
@@ -1224,11 +1241,11 @@ test "Manifest with files added after initial hash work" {
try testing.expect(mem.eql(u8, &digest1, &digest2));
// Modify the file added after initial hash
- try cwd.writeFile(temp_file2, "Hello world the second, updated\n");
+ try tmp.dir.writeFile(temp_file2, "Hello world the second, updated\n");
// Wait for file timestamps to tick
- const initial_time2 = try testGetCurrentFileTimestamp();
- while ((try testGetCurrentFileTimestamp()) == initial_time2) {
+ const initial_time2 = try testGetCurrentFileTimestamp(tmp.dir);
+ while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time2) {
std.time.sleep(1);
}
@@ -1251,8 +1268,4 @@ test "Manifest with files added after initial hash work" {
try testing.expect(!mem.eql(u8, &digest1, &digest3));
}
-
- try cwd.deleteTree(temp_manifest_dir);
- try cwd.deleteFile(temp_file1);
- try cwd.deleteFile(temp_file2);
}
diff --git a/lib/std/Build/CheckFileStep.zig b/lib/std/Build/CheckFileStep.zig
index b08a797e84..1c2b6b7786 100644
--- a/lib/std/Build/CheckFileStep.zig
+++ b/lib/std/Build/CheckFileStep.zig
@@ -1,51 +1,88 @@
-const std = @import("../std.zig");
-const Step = std.Build.Step;
-const fs = std.fs;
-const mem = std.mem;
-
-const CheckFileStep = @This();
-
-pub const base_id = .check_file;
+//! Fail the build step if a file does not match certain checks.
+//! TODO: make this more flexible, supporting more kinds of checks.
+//! TODO: generalize the code in std.testing.expectEqualStrings and make this
+//! CheckFileStep produce those helpful diagnostics when there is not a match.
step: Step,
-builder: *std.Build,
expected_matches: []const []const u8,
+expected_exact: ?[]const u8,
source: std.Build.FileSource,
max_bytes: usize = 20 * 1024 * 1024,
+pub const base_id = .check_file;
+
+pub const Options = struct {
+ expected_matches: []const []const u8 = &.{},
+ expected_exact: ?[]const u8 = null,
+};
+
pub fn create(
- builder: *std.Build,
+ owner: *std.Build,
source: std.Build.FileSource,
- expected_matches: []const []const u8,
+ options: Options,
) *CheckFileStep {
- const self = builder.allocator.create(CheckFileStep) catch @panic("OOM");
- self.* = CheckFileStep{
- .builder = builder,
- .step = Step.init(.check_file, "CheckFile", builder.allocator, make),
- .source = source.dupe(builder),
- .expected_matches = builder.dupeStrings(expected_matches),
+ const self = owner.allocator.create(CheckFileStep) catch @panic("OOM");
+ self.* = .{
+ .step = Step.init(.{
+ .id = .check_file,
+ .name = "CheckFile",
+ .owner = owner,
+ .makeFn = make,
+ }),
+ .source = source.dupe(owner),
+ .expected_matches = owner.dupeStrings(options.expected_matches),
+ .expected_exact = options.expected_exact,
};
self.source.addStepDependencies(&self.step);
return self;
}
-fn make(step: *Step) !void {
+pub fn setName(self: *CheckFileStep, name: []const u8) void {
+ self.step.name = name;
+}
+
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ _ = prog_node;
+ const b = step.owner;
const self = @fieldParentPtr(CheckFileStep, "step", step);
- const src_path = self.source.getPath(self.builder);
- const contents = try fs.cwd().readFileAlloc(self.builder.allocator, src_path, self.max_bytes);
+ const src_path = self.source.getPath(b);
+ const contents = fs.cwd().readFileAlloc(b.allocator, src_path, self.max_bytes) catch |err| {
+ return step.fail("unable to read '{s}': {s}", .{
+ src_path, @errorName(err),
+ });
+ };
for (self.expected_matches) |expected_match| {
if (mem.indexOf(u8, contents, expected_match) == null) {
- std.debug.print(
+ return step.fail(
\\
- \\========= Expected to find: ===================
+ \\========= expected to find: ===================
\\{s}
- \\========= But file does not contain it: =======
+ \\========= but file does not contain it: =======
\\{s}
- \\
+ \\===============================================
, .{ expected_match, contents });
- return error.TestFailed;
+ }
+ }
+
+ if (self.expected_exact) |expected_exact| {
+ if (!mem.eql(u8, expected_exact, contents)) {
+ return step.fail(
+ \\
+ \\========= expected: =====================
+ \\{s}
+ \\========= but found: ====================
+ \\{s}
+ \\========= from the following file: ======
+ \\{s}
+ , .{ expected_exact, contents, src_path });
}
}
}
+
+const CheckFileStep = @This();
+const std = @import("../std.zig");
+const Step = std.Build.Step;
+const fs = std.fs;
+const mem = std.mem;
diff --git a/lib/std/Build/CheckObjectStep.zig b/lib/std/Build/CheckObjectStep.zig
index 5cb096581f..7cac2d04ec 100644
--- a/lib/std/Build/CheckObjectStep.zig
+++ b/lib/std/Build/CheckObjectStep.zig
@@ -10,25 +10,31 @@ const CheckObjectStep = @This();
const Allocator = mem.Allocator;
const Step = std.Build.Step;
-const EmulatableRunStep = std.Build.EmulatableRunStep;
pub const base_id = .check_object;
step: Step,
-builder: *std.Build,
source: std.Build.FileSource,
max_bytes: usize = 20 * 1024 * 1024,
checks: std.ArrayList(Check),
dump_symtab: bool = false,
obj_format: std.Target.ObjectFormat,
-pub fn create(builder: *std.Build, source: std.Build.FileSource, obj_format: std.Target.ObjectFormat) *CheckObjectStep {
- const gpa = builder.allocator;
+pub fn create(
+ owner: *std.Build,
+ source: std.Build.FileSource,
+ obj_format: std.Target.ObjectFormat,
+) *CheckObjectStep {
+ const gpa = owner.allocator;
const self = gpa.create(CheckObjectStep) catch @panic("OOM");
self.* = .{
- .builder = builder,
- .step = Step.init(.check_file, "CheckObject", gpa, make),
- .source = source.dupe(builder),
+ .step = Step.init(.{
+ .id = .check_file,
+ .name = "CheckObject",
+ .owner = owner,
+ .makeFn = make,
+ }),
+ .source = source.dupe(owner),
.checks = std.ArrayList(Check).init(gpa),
.obj_format = obj_format,
};
@@ -38,14 +44,18 @@ pub fn create(builder: *std.Build, source: std.Build.FileSource, obj_format: std
/// Runs and (optionally) compares the output of a binary.
/// Asserts `self` was generated from an executable step.
-pub fn runAndCompare(self: *CheckObjectStep) *EmulatableRunStep {
+/// TODO this doesn't actually compare, and there's no apparent reason for it
+/// to depend on the check object step. I don't see why this function should exist,
+/// the caller could just add the run step directly.
+pub fn runAndCompare(self: *CheckObjectStep) *std.Build.RunStep {
const dependencies_len = self.step.dependencies.items.len;
assert(dependencies_len > 0);
const exe_step = self.step.dependencies.items[dependencies_len - 1];
const exe = exe_step.cast(std.Build.CompileStep).?;
- const emulatable_step = EmulatableRunStep.create(self.builder, "EmulatableRun", exe);
- emulatable_step.step.dependOn(&self.step);
- return emulatable_step;
+ const run = self.step.owner.addRunArtifact(exe);
+ run.skip_foreign_checks = true;
+ run.step.dependOn(&self.step);
+ return run;
}
/// There two types of actions currently suported:
@@ -123,7 +133,8 @@ const Action = struct {
/// Will return true if the `phrase` is correctly parsed into an RPN program and
/// its reduced, computed value compares using `op` with the expected value, either
/// a literal or another extracted variable.
- fn computeCmp(act: Action, gpa: Allocator, global_vars: anytype) !bool {
+ fn computeCmp(act: Action, step: *Step, global_vars: anytype) !bool {
+ const gpa = step.owner.allocator;
var op_stack = std.ArrayList(enum { add, sub, mod, mul }).init(gpa);
var values = std.ArrayList(u64).init(gpa);
@@ -140,11 +151,11 @@ const Action = struct {
} else {
const val = std.fmt.parseInt(u64, next, 0) catch blk: {
break :blk global_vars.get(next) orelse {
- std.debug.print(
+ try step.addError(
\\
- \\========= Variable was not extracted: ===========
+ \\========= variable was not extracted: ===========
\\{s}
- \\
+ \\=================================================
, .{next});
return error.UnknownVariable;
};
@@ -176,11 +187,11 @@ const Action = struct {
const exp_value = switch (act.expected.?.value) {
.variable => |name| global_vars.get(name) orelse {
- std.debug.print(
+ try step.addError(
\\
- \\========= Variable was not extracted: ===========
+ \\========= variable was not extracted: ===========
\\{s}
- \\
+ \\=================================================
, .{name});
return error.UnknownVariable;
},
@@ -249,7 +260,7 @@ const Check = struct {
/// Creates a new sequence of actions with `phrase` as the first anchor searched phrase.
pub fn checkStart(self: *CheckObjectStep, phrase: []const u8) void {
- var new_check = Check.create(self.builder);
+ var new_check = Check.create(self.step.owner);
new_check.match(phrase);
self.checks.append(new_check) catch @panic("OOM");
}
@@ -291,34 +302,34 @@ pub fn checkComputeCompare(
program: []const u8,
expected: ComputeCompareExpected,
) void {
- var new_check = Check.create(self.builder);
+ var new_check = Check.create(self.step.owner);
new_check.computeCmp(program, expected);
self.checks.append(new_check) catch @panic("OOM");
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ _ = prog_node;
+ const b = step.owner;
+ const gpa = b.allocator;
const self = @fieldParentPtr(CheckObjectStep, "step", step);
- const gpa = self.builder.allocator;
- const src_path = self.source.getPath(self.builder);
- const contents = try fs.cwd().readFileAllocOptions(
+ const src_path = self.source.getPath(b);
+ const contents = fs.cwd().readFileAllocOptions(
gpa,
src_path,
self.max_bytes,
null,
@alignOf(u64),
null,
- );
+ ) catch |err| return step.fail("unable to read '{s}': {s}", .{ src_path, @errorName(err) });
const output = switch (self.obj_format) {
- .macho => try MachODumper.parseAndDump(contents, .{
- .gpa = gpa,
+ .macho => try MachODumper.parseAndDump(step, contents, .{
.dump_symtab = self.dump_symtab,
}),
.elf => @panic("TODO elf parser"),
.coff => @panic("TODO coff parser"),
- .wasm => try WasmDumper.parseAndDump(contents, .{
- .gpa = gpa,
+ .wasm => try WasmDumper.parseAndDump(step, contents, .{
.dump_symtab = self.dump_symtab,
}),
else => unreachable,
@@ -334,54 +345,50 @@ fn make(step: *Step) !void {
while (it.next()) |line| {
if (try act.match(line, &vars)) break;
} else {
- std.debug.print(
+ return step.fail(
\\
- \\========= Expected to find: ==========================
+ \\========= expected to find: ==========================
\\{s}
- \\========= But parsed file does not contain it: =======
+ \\========= but parsed file does not contain it: =======
\\{s}
- \\
+ \\======================================================
, .{ act.phrase, output });
- return error.TestFailed;
}
},
.not_present => {
while (it.next()) |line| {
if (try act.match(line, &vars)) {
- std.debug.print(
+ return step.fail(
\\
- \\========= Expected not to find: ===================
+ \\========= expected not to find: ===================
\\{s}
- \\========= But parsed file does contain it: ========
+ \\========= but parsed file does contain it: ========
\\{s}
- \\
+ \\===================================================
, .{ act.phrase, output });
- return error.TestFailed;
}
}
},
.compute_cmp => {
- const res = act.computeCmp(gpa, vars) catch |err| switch (err) {
+ const res = act.computeCmp(step, vars) catch |err| switch (err) {
error.UnknownVariable => {
- std.debug.print(
- \\========= From parsed file: =====================
+ return step.fail(
+ \\========= from parsed file: =====================
\\{s}
- \\
+ \\=================================================
, .{output});
- return error.TestFailed;
},
else => |e| return e,
};
if (!res) {
- std.debug.print(
+ return step.fail(
\\
- \\========= Comparison failed for action: ===========
+ \\========= comparison failed for action: ===========
\\{s} {}
- \\========= From parsed file: =======================
+ \\========= from parsed file: =======================
\\{s}
- \\
+ \\===================================================
, .{ act.phrase, act.expected.?, output });
- return error.TestFailed;
}
},
}
@@ -390,7 +397,6 @@ fn make(step: *Step) !void {
}
const Opts = struct {
- gpa: ?Allocator = null,
dump_symtab: bool = false,
};
@@ -398,8 +404,8 @@ const MachODumper = struct {
const LoadCommandIterator = macho.LoadCommandIterator;
const symtab_label = "symtab";
- fn parseAndDump(bytes: []align(@alignOf(u64)) const u8, opts: Opts) ![]const u8 {
- const gpa = opts.gpa orelse unreachable; // MachO dumper requires an allocator
+ fn parseAndDump(step: *Step, bytes: []align(@alignOf(u64)) const u8, opts: Opts) ![]const u8 {
+ const gpa = step.owner.allocator;
var stream = std.io.fixedBufferStream(bytes);
const reader = stream.reader();
@@ -681,8 +687,8 @@ const MachODumper = struct {
const WasmDumper = struct {
const symtab_label = "symbols";
- fn parseAndDump(bytes: []const u8, opts: Opts) ![]const u8 {
- const gpa = opts.gpa orelse unreachable; // Wasm dumper requires an allocator
+ fn parseAndDump(step: *Step, bytes: []const u8, opts: Opts) ![]const u8 {
+ const gpa = step.owner.allocator;
if (opts.dump_symtab) {
@panic("TODO: Implement symbol table parsing and dumping");
}
@@ -703,20 +709,24 @@ const WasmDumper = struct {
const writer = output.writer();
while (reader.readByte()) |current_byte| {
- const section = std.meta.intToEnum(std.wasm.Section, current_byte) catch |err| {
- std.debug.print("Found invalid section id '{d}'\n", .{current_byte});
- return err;
+ const section = std.meta.intToEnum(std.wasm.Section, current_byte) catch {
+ return step.fail("Found invalid section id '{d}'", .{current_byte});
};
const section_length = try std.leb.readULEB128(u32, reader);
- try parseAndDumpSection(section, bytes[fbs.pos..][0..section_length], writer);
+ try parseAndDumpSection(step, section, bytes[fbs.pos..][0..section_length], writer);
fbs.pos += section_length;
} else |_| {} // reached end of stream
return output.toOwnedSlice();
}
- fn parseAndDumpSection(section: std.wasm.Section, data: []const u8, writer: anytype) !void {
+ fn parseAndDumpSection(
+ step: *Step,
+ section: std.wasm.Section,
+ data: []const u8,
+ writer: anytype,
+ ) !void {
var fbs = std.io.fixedBufferStream(data);
const reader = fbs.reader();
@@ -739,7 +749,7 @@ const WasmDumper = struct {
=> {
const entries = try std.leb.readULEB128(u32, reader);
try writer.print("\nentries {d}\n", .{entries});
- try dumpSection(section, data[fbs.pos..], entries, writer);
+ try dumpSection(step, section, data[fbs.pos..], entries, writer);
},
.custom => {
const name_length = try std.leb.readULEB128(u32, reader);
@@ -748,7 +758,7 @@ const WasmDumper = struct {
try writer.print("\nname {s}\n", .{name});
if (mem.eql(u8, name, "name")) {
- try parseDumpNames(reader, writer, data);
+ try parseDumpNames(step, reader, writer, data);
} else if (mem.eql(u8, name, "producers")) {
try parseDumpProducers(reader, writer, data);
} else if (mem.eql(u8, name, "target_features")) {
@@ -764,7 +774,7 @@ const WasmDumper = struct {
}
}
- fn dumpSection(section: std.wasm.Section, data: []const u8, entries: u32, writer: anytype) !void {
+ fn dumpSection(step: *Step, section: std.wasm.Section, data: []const u8, entries: u32, writer: anytype) !void {
var fbs = std.io.fixedBufferStream(data);
const reader = fbs.reader();
@@ -774,19 +784,18 @@ const WasmDumper = struct {
while (i < entries) : (i += 1) {
const func_type = try reader.readByte();
if (func_type != std.wasm.function_type) {
- std.debug.print("Expected function type, found byte '{d}'\n", .{func_type});
- return error.UnexpectedByte;
+ return step.fail("expected function type, found byte '{d}'", .{func_type});
}
const params = try std.leb.readULEB128(u32, reader);
try writer.print("params {d}\n", .{params});
var index: u32 = 0;
while (index < params) : (index += 1) {
- try parseDumpType(std.wasm.Valtype, reader, writer);
+ try parseDumpType(step, std.wasm.Valtype, reader, writer);
} else index = 0;
const returns = try std.leb.readULEB128(u32, reader);
try writer.print("returns {d}\n", .{returns});
while (index < returns) : (index += 1) {
- try parseDumpType(std.wasm.Valtype, reader, writer);
+ try parseDumpType(step, std.wasm.Valtype, reader, writer);
}
}
},
@@ -800,9 +809,8 @@ const WasmDumper = struct {
const name = data[fbs.pos..][0..name_len];
fbs.pos += name_len;
- const kind = std.meta.intToEnum(std.wasm.ExternalKind, try reader.readByte()) catch |err| {
- std.debug.print("Invalid import kind\n", .{});
- return err;
+ const kind = std.meta.intToEnum(std.wasm.ExternalKind, try reader.readByte()) catch {
+ return step.fail("invalid import kind", .{});
};
try writer.print(
@@ -819,11 +827,11 @@ const WasmDumper = struct {
try parseDumpLimits(reader, writer);
},
.global => {
- try parseDumpType(std.wasm.Valtype, reader, writer);
+ try parseDumpType(step, std.wasm.Valtype, reader, writer);
try writer.print("mutable {}\n", .{0x01 == try std.leb.readULEB128(u32, reader)});
},
.table => {
- try parseDumpType(std.wasm.RefType, reader, writer);
+ try parseDumpType(step, std.wasm.RefType, reader, writer);
try parseDumpLimits(reader, writer);
},
}
@@ -838,7 +846,7 @@ const WasmDumper = struct {
.table => {
var i: u32 = 0;
while (i < entries) : (i += 1) {
- try parseDumpType(std.wasm.RefType, reader, writer);
+ try parseDumpType(step, std.wasm.RefType, reader, writer);
try parseDumpLimits(reader, writer);
}
},
@@ -851,9 +859,9 @@ const WasmDumper = struct {
.global => {
var i: u32 = 0;
while (i < entries) : (i += 1) {
- try parseDumpType(std.wasm.Valtype, reader, writer);
+ try parseDumpType(step, std.wasm.Valtype, reader, writer);
try writer.print("mutable {}\n", .{0x01 == try std.leb.readULEB128(u1, reader)});
- try parseDumpInit(reader, writer);
+ try parseDumpInit(step, reader, writer);
}
},
.@"export" => {
@@ -863,9 +871,8 @@ const WasmDumper = struct {
const name = data[fbs.pos..][0..name_len];
fbs.pos += name_len;
const kind_byte = try std.leb.readULEB128(u8, reader);
- const kind = std.meta.intToEnum(std.wasm.ExternalKind, kind_byte) catch |err| {
- std.debug.print("invalid export kind value '{d}'\n", .{kind_byte});
- return err;
+ const kind = std.meta.intToEnum(std.wasm.ExternalKind, kind_byte) catch {
+ return step.fail("invalid export kind value '{d}'", .{kind_byte});
};
const index = try std.leb.readULEB128(u32, reader);
try writer.print(
@@ -880,7 +887,7 @@ const WasmDumper = struct {
var i: u32 = 0;
while (i < entries) : (i += 1) {
try writer.print("table index {d}\n", .{try std.leb.readULEB128(u32, reader)});
- try parseDumpInit(reader, writer);
+ try parseDumpInit(step, reader, writer);
const function_indexes = try std.leb.readULEB128(u32, reader);
var function_index: u32 = 0;
@@ -896,7 +903,7 @@ const WasmDumper = struct {
while (i < entries) : (i += 1) {
const index = try std.leb.readULEB128(u32, reader);
try writer.print("memory index 0x{x}\n", .{index});
- try parseDumpInit(reader, writer);
+ try parseDumpInit(step, reader, writer);
const size = try std.leb.readULEB128(u32, reader);
try writer.print("size {d}\n", .{size});
try reader.skipBytes(size, .{}); // we do not care about the content of the segments
@@ -906,11 +913,10 @@ const WasmDumper = struct {
}
}
- fn parseDumpType(comptime WasmType: type, reader: anytype, writer: anytype) !void {
+ fn parseDumpType(step: *Step, comptime WasmType: type, reader: anytype, writer: anytype) !void {
const type_byte = try reader.readByte();
- const valtype = std.meta.intToEnum(WasmType, type_byte) catch |err| {
- std.debug.print("Invalid wasm type value '{d}'\n", .{type_byte});
- return err;
+ const valtype = std.meta.intToEnum(WasmType, type_byte) catch {
+ return step.fail("Invalid wasm type value '{d}'", .{type_byte});
};
try writer.print("type {s}\n", .{@tagName(valtype)});
}
@@ -925,11 +931,10 @@ const WasmDumper = struct {
}
}
- fn parseDumpInit(reader: anytype, writer: anytype) !void {
+ fn parseDumpInit(step: *Step, reader: anytype, writer: anytype) !void {
const byte = try std.leb.readULEB128(u8, reader);
- const opcode = std.meta.intToEnum(std.wasm.Opcode, byte) catch |err| {
- std.debug.print("invalid wasm opcode '{d}'\n", .{byte});
- return err;
+ const opcode = std.meta.intToEnum(std.wasm.Opcode, byte) catch {
+ return step.fail("invalid wasm opcode '{d}'", .{byte});
};
switch (opcode) {
.i32_const => try writer.print("i32.const {x}\n", .{try std.leb.readILEB128(i32, reader)}),
@@ -941,14 +946,13 @@ const WasmDumper = struct {
}
const end_opcode = try std.leb.readULEB128(u8, reader);
if (end_opcode != std.wasm.opcode(.end)) {
- std.debug.print("expected 'end' opcode in init expression\n", .{});
- return error.MissingEndOpcode;
+ return step.fail("expected 'end' opcode in init expression", .{});
}
}
- fn parseDumpNames(reader: anytype, writer: anytype, data: []const u8) !void {
+ fn parseDumpNames(step: *Step, reader: anytype, writer: anytype, data: []const u8) !void {
while (reader.context.pos < data.len) {
- try parseDumpType(std.wasm.NameSubsection, reader, writer);
+ try parseDumpType(step, std.wasm.NameSubsection, reader, writer);
const size = try std.leb.readULEB128(u32, reader);
const entries = try std.leb.readULEB128(u32, reader);
try writer.print(
diff --git a/lib/std/Build/CompileStep.zig b/lib/std/Build/CompileStep.zig
index ea2320cc89..d73e5d3b41 100644
--- a/lib/std/Build/CompileStep.zig
+++ b/lib/std/Build/CompileStep.zig
@@ -1,7 +1,6 @@
const builtin = @import("builtin");
const std = @import("../std.zig");
const mem = std.mem;
-const log = std.log;
const fs = std.fs;
const assert = std.debug.assert;
const panic = std.debug.panic;
@@ -22,7 +21,6 @@ const InstallDir = std.Build.InstallDir;
const InstallArtifactStep = std.Build.InstallArtifactStep;
const GeneratedFile = std.Build.GeneratedFile;
const ObjCopyStep = std.Build.ObjCopyStep;
-const EmulatableRunStep = std.Build.EmulatableRunStep;
const CheckObjectStep = std.Build.CheckObjectStep;
const RunStep = std.Build.RunStep;
const OptionsStep = std.Build.OptionsStep;
@@ -32,7 +30,6 @@ const CompileStep = @This();
pub const base_id: Step.Id = .compile;
step: Step,
-builder: *std.Build,
name: []const u8,
target: CrossTarget,
target_info: NativeTargetInfo,
@@ -49,9 +46,9 @@ strip: ?bool,
unwind_tables: ?bool,
// keep in sync with src/link.zig:CompressDebugSections
compress_debug_sections: enum { none, zlib } = .none,
-lib_paths: ArrayList([]const u8),
-rpaths: ArrayList([]const u8),
-framework_dirs: ArrayList([]const u8),
+lib_paths: ArrayList(FileSource),
+rpaths: ArrayList(FileSource),
+framework_dirs: ArrayList(FileSource),
frameworks: StringHashMap(FrameworkLinkInfo),
verbose_link: bool,
verbose_cc: bool,
@@ -86,7 +83,6 @@ c_std: std.Build.CStd,
zig_lib_dir: ?[]const u8,
main_pkg_path: ?[]const u8,
exec_cmd_args: ?[]const ?[]const u8,
-name_prefix: []const u8,
filter: ?[]const u8,
test_evented_io: bool = false,
test_runner: ?[]const u8,
@@ -210,10 +206,17 @@ want_lto: ?bool = null,
use_llvm: ?bool = null,
use_lld: ?bool = null,
+/// This is an advanced setting that can change the intent of this CompileStep.
+/// If this slice has nonzero length, it means that this CompileStep exists to
+/// check for compile errors and return *success* if they match, and failure
+/// otherwise.
+expect_errors: []const []const u8 = &.{},
+
output_path_source: GeneratedFile,
output_lib_path_source: GeneratedFile,
output_h_path_source: GeneratedFile,
output_pdb_path_source: GeneratedFile,
+output_dirname_source: GeneratedFile,
pub const CSourceFiles = struct {
files: []const []const u8,
@@ -277,6 +280,7 @@ pub const Options = struct {
kind: Kind,
linkage: ?Linkage = null,
version: ?std.builtin.Version = null,
+ max_rss: usize = 0,
};
pub const Kind = enum {
@@ -284,7 +288,6 @@ pub const Kind = enum {
lib,
obj,
@"test",
- test_exe,
};
pub const Linkage = enum { dynamic, static };
@@ -305,18 +308,35 @@ pub const EmitOption = union(enum) {
}
};
-pub fn create(builder: *std.Build, options: Options) *CompileStep {
- const name = builder.dupe(options.name);
- const root_src: ?FileSource = if (options.root_source_file) |rsrc| rsrc.dupe(builder) else null;
+pub fn create(owner: *std.Build, options: Options) *CompileStep {
+ const name = owner.dupe(options.name);
+ const root_src: ?FileSource = if (options.root_source_file) |rsrc| rsrc.dupe(owner) else null;
if (mem.indexOf(u8, name, "/") != null or mem.indexOf(u8, name, "\\") != null) {
panic("invalid name: '{s}'. It looks like a file path, but it is supposed to be the library or application name.", .{name});
}
- const self = builder.allocator.create(CompileStep) catch @panic("OOM");
+ // Avoid the common case of the step name looking like "zig test test".
+ const name_adjusted = if (options.kind == .@"test" and mem.eql(u8, name, "test"))
+ ""
+ else
+ owner.fmt("{s} ", .{name});
+
+ const step_name = owner.fmt("{s} {s}{s} {s}", .{
+ switch (options.kind) {
+ .exe => "zig build-exe",
+ .lib => "zig build-lib",
+ .obj => "zig build-obj",
+ .@"test" => "zig test",
+ },
+ name_adjusted,
+ @tagName(options.optimize),
+ options.target.zigTriple(owner.allocator) catch @panic("OOM"),
+ });
+
+ const self = owner.allocator.create(CompileStep) catch @panic("OOM");
self.* = CompileStep{
.strip = null,
.unwind_tables = null,
- .builder = builder,
.verbose_link = false,
.verbose_cc = false,
.optimize = options.optimize,
@@ -325,29 +345,34 @@ pub fn create(builder: *std.Build, options: Options) *CompileStep {
.kind = options.kind,
.root_src = root_src,
.name = name,
- .frameworks = StringHashMap(FrameworkLinkInfo).init(builder.allocator),
- .step = Step.init(base_id, name, builder.allocator, make),
+ .frameworks = StringHashMap(FrameworkLinkInfo).init(owner.allocator),
+ .step = Step.init(.{
+ .id = base_id,
+ .name = step_name,
+ .owner = owner,
+ .makeFn = make,
+ .max_rss = options.max_rss,
+ }),
.version = options.version,
.out_filename = undefined,
- .out_h_filename = builder.fmt("{s}.h", .{name}),
+ .out_h_filename = owner.fmt("{s}.h", .{name}),
.out_lib_filename = undefined,
- .out_pdb_filename = builder.fmt("{s}.pdb", .{name}),
+ .out_pdb_filename = owner.fmt("{s}.pdb", .{name}),
.major_only_filename = null,
.name_only_filename = null,
- .modules = std.StringArrayHashMap(*Module).init(builder.allocator),
- .include_dirs = ArrayList(IncludeDir).init(builder.allocator),
- .link_objects = ArrayList(LinkObject).init(builder.allocator),
- .c_macros = ArrayList([]const u8).init(builder.allocator),
- .lib_paths = ArrayList([]const u8).init(builder.allocator),
- .rpaths = ArrayList([]const u8).init(builder.allocator),
- .framework_dirs = ArrayList([]const u8).init(builder.allocator),
- .installed_headers = ArrayList(*Step).init(builder.allocator),
+ .modules = std.StringArrayHashMap(*Module).init(owner.allocator),
+ .include_dirs = ArrayList(IncludeDir).init(owner.allocator),
+ .link_objects = ArrayList(LinkObject).init(owner.allocator),
+ .c_macros = ArrayList([]const u8).init(owner.allocator),
+ .lib_paths = ArrayList(FileSource).init(owner.allocator),
+ .rpaths = ArrayList(FileSource).init(owner.allocator),
+ .framework_dirs = ArrayList(FileSource).init(owner.allocator),
+ .installed_headers = ArrayList(*Step).init(owner.allocator),
.object_src = undefined,
.c_std = std.Build.CStd.C99,
.zig_lib_dir = null,
.main_pkg_path = null,
.exec_cmd_args = null,
- .name_prefix = "",
.filter = null,
.test_runner = null,
.disable_stack_probing = false,
@@ -363,6 +388,7 @@ pub fn create(builder: *std.Build, options: Options) *CompileStep {
.output_lib_path_source = GeneratedFile{ .step = &self.step },
.output_h_path_source = GeneratedFile{ .step = &self.step },
.output_pdb_path_source = GeneratedFile{ .step = &self.step },
+ .output_dirname_source = GeneratedFile{ .step = &self.step },
.target_info = NativeTargetInfo.detect(self.target) catch @panic("unhandled error"),
};
@@ -372,15 +398,16 @@ pub fn create(builder: *std.Build, options: Options) *CompileStep {
}
fn computeOutFileNames(self: *CompileStep) void {
+ const b = self.step.owner;
const target = self.target_info.target;
- self.out_filename = std.zig.binNameAlloc(self.builder.allocator, .{
+ self.out_filename = std.zig.binNameAlloc(b.allocator, .{
.root_name = self.name,
.target = target,
.output_mode = switch (self.kind) {
.lib => .Lib,
.obj => .Obj,
- .exe, .@"test", .test_exe => .Exe,
+ .exe, .@"test" => .Exe,
},
.link_mode = if (self.linkage) |some| @as(std.builtin.LinkMode, switch (some) {
.dynamic => .Dynamic,
@@ -394,30 +421,30 @@ fn computeOutFileNames(self: *CompileStep) void {
self.out_lib_filename = self.out_filename;
} else if (self.version) |version| {
if (target.isDarwin()) {
- self.major_only_filename = self.builder.fmt("lib{s}.{d}.dylib", .{
+ self.major_only_filename = b.fmt("lib{s}.{d}.dylib", .{
self.name,
version.major,
});
- self.name_only_filename = self.builder.fmt("lib{s}.dylib", .{self.name});
+ self.name_only_filename = b.fmt("lib{s}.dylib", .{self.name});
self.out_lib_filename = self.out_filename;
} else if (target.os.tag == .windows) {
- self.out_lib_filename = self.builder.fmt("{s}.lib", .{self.name});
+ self.out_lib_filename = b.fmt("{s}.lib", .{self.name});
} else {
- self.major_only_filename = self.builder.fmt("lib{s}.so.{d}", .{ self.name, version.major });
- self.name_only_filename = self.builder.fmt("lib{s}.so", .{self.name});
+ self.major_only_filename = b.fmt("lib{s}.so.{d}", .{ self.name, version.major });
+ self.name_only_filename = b.fmt("lib{s}.so", .{self.name});
self.out_lib_filename = self.out_filename;
}
} else {
if (target.isDarwin()) {
self.out_lib_filename = self.out_filename;
} else if (target.os.tag == .windows) {
- self.out_lib_filename = self.builder.fmt("{s}.lib", .{self.name});
+ self.out_lib_filename = b.fmt("{s}.lib", .{self.name});
} else {
self.out_lib_filename = self.out_filename;
}
}
if (self.output_dir != null) {
- self.output_lib_path_source.path = self.builder.pathJoin(
+ self.output_lib_path_source.path = b.pathJoin(
&.{ self.output_dir.?, self.out_lib_filename },
);
}
@@ -425,17 +452,20 @@ fn computeOutFileNames(self: *CompileStep) void {
}
pub fn setOutputDir(self: *CompileStep, dir: []const u8) void {
- self.output_dir = self.builder.dupePath(dir);
+ const b = self.step.owner;
+ self.output_dir = b.dupePath(dir);
}
pub fn install(self: *CompileStep) void {
- self.builder.installArtifact(self);
+ const b = self.step.owner;
+ b.installArtifact(self);
}
-pub fn installHeader(a: *CompileStep, src_path: []const u8, dest_rel_path: []const u8) void {
- const install_file = a.builder.addInstallHeaderFile(src_path, dest_rel_path);
- a.builder.getInstallStep().dependOn(&install_file.step);
- a.installed_headers.append(&install_file.step) catch @panic("OOM");
+pub fn installHeader(cs: *CompileStep, src_path: []const u8, dest_rel_path: []const u8) void {
+ const b = cs.step.owner;
+ const install_file = b.addInstallHeaderFile(src_path, dest_rel_path);
+ b.getInstallStep().dependOn(&install_file.step);
+ cs.installed_headers.append(&install_file.step) catch @panic("OOM");
}
pub const InstallConfigHeaderOptions = struct {
@@ -449,13 +479,14 @@ pub fn installConfigHeader(
options: InstallConfigHeaderOptions,
) void {
const dest_rel_path = options.dest_rel_path orelse config_header.include_path;
- const install_file = cs.builder.addInstallFileWithDir(
+ const b = cs.step.owner;
+ const install_file = b.addInstallFileWithDir(
.{ .generated = &config_header.output_file },
options.install_dir,
dest_rel_path,
);
install_file.step.dependOn(&config_header.step);
- cs.builder.getInstallStep().dependOn(&install_file.step);
+ b.getInstallStep().dependOn(&install_file.step);
cs.installed_headers.append(&install_file.step) catch @panic("OOM");
}
@@ -472,91 +503,83 @@ pub fn installHeadersDirectory(
}
pub fn installHeadersDirectoryOptions(
- a: *CompileStep,
+ cs: *CompileStep,
options: std.Build.InstallDirStep.Options,
) void {
- const install_dir = a.builder.addInstallDirectory(options);
- a.builder.getInstallStep().dependOn(&install_dir.step);
- a.installed_headers.append(&install_dir.step) catch @panic("OOM");
+ const b = cs.step.owner;
+ const install_dir = b.addInstallDirectory(options);
+ b.getInstallStep().dependOn(&install_dir.step);
+ cs.installed_headers.append(&install_dir.step) catch @panic("OOM");
}
-pub fn installLibraryHeaders(a: *CompileStep, l: *CompileStep) void {
+pub fn installLibraryHeaders(cs: *CompileStep, l: *CompileStep) void {
assert(l.kind == .lib);
- const install_step = a.builder.getInstallStep();
+ const b = cs.step.owner;
+ const install_step = b.getInstallStep();
// Copy each element from installed_headers, modifying the builder
// to be the new parent's builder.
for (l.installed_headers.items) |step| {
const step_copy = switch (step.id) {
inline .install_file, .install_dir => |id| blk: {
const T = id.Type();
- const ptr = a.builder.allocator.create(T) catch @panic("OOM");
+ const ptr = b.allocator.create(T) catch @panic("OOM");
ptr.* = step.cast(T).?.*;
- ptr.override_source_builder = ptr.builder;
- ptr.builder = a.builder;
+ ptr.dest_builder = b;
break :blk &ptr.step;
},
else => unreachable,
};
- a.installed_headers.append(step_copy) catch @panic("OOM");
+ cs.installed_headers.append(step_copy) catch @panic("OOM");
install_step.dependOn(step_copy);
}
- a.installed_headers.appendSlice(l.installed_headers.items) catch @panic("OOM");
+ cs.installed_headers.appendSlice(l.installed_headers.items) catch @panic("OOM");
}
pub fn addObjCopy(cs: *CompileStep, options: ObjCopyStep.Options) *ObjCopyStep {
+ const b = cs.step.owner;
var copy = options;
if (copy.basename == null) {
if (options.format) |f| {
- copy.basename = cs.builder.fmt("{s}.{s}", .{ cs.name, @tagName(f) });
+ copy.basename = b.fmt("{s}.{s}", .{ cs.name, @tagName(f) });
} else {
copy.basename = cs.name;
}
}
- return cs.builder.addObjCopy(cs.getOutputSource(), copy);
+ return b.addObjCopy(cs.getOutputSource(), copy);
}
/// Deprecated: use `std.Build.addRunArtifact`
/// This function will run in the context of the package that created the executable,
/// which is undesirable when running an executable provided by a dependency package.
-pub fn run(exe: *CompileStep) *RunStep {
- return exe.builder.addRunArtifact(exe);
+pub fn run(cs: *CompileStep) *RunStep {
+ return cs.step.owner.addRunArtifact(cs);
}
-/// Creates an `EmulatableRunStep` with an executable built with `addExecutable`.
-/// Allows running foreign binaries through emulation platforms such as Qemu or Rosetta.
-/// When a binary cannot be ran through emulation or the option is disabled, a warning
-/// will be printed and the binary will *NOT* be ran.
-pub fn runEmulatable(exe: *CompileStep) *EmulatableRunStep {
- assert(exe.kind == .exe or exe.kind == .test_exe);
-
- const run_step = EmulatableRunStep.create(exe.builder, exe.builder.fmt("run {s}", .{exe.step.name}), exe);
- if (exe.vcpkg_bin_path) |path| {
- RunStep.addPathDirInternal(&run_step.step, exe.builder, path);
- }
- return run_step;
-}
-
-pub fn checkObject(self: *CompileStep, obj_format: std.Target.ObjectFormat) *CheckObjectStep {
- return CheckObjectStep.create(self.builder, self.getOutputSource(), obj_format);
+pub fn checkObject(self: *CompileStep) *CheckObjectStep {
+ return CheckObjectStep.create(self.step.owner, self.getOutputSource(), self.target_info.target.ofmt);
}
pub fn setLinkerScriptPath(self: *CompileStep, source: FileSource) void {
- self.linker_script = source.dupe(self.builder);
+ const b = self.step.owner;
+ self.linker_script = source.dupe(b);
source.addStepDependencies(&self.step);
}
pub fn linkFramework(self: *CompileStep, framework_name: []const u8) void {
- self.frameworks.put(self.builder.dupe(framework_name), .{}) catch @panic("OOM");
+ const b = self.step.owner;
+ self.frameworks.put(b.dupe(framework_name), .{}) catch @panic("OOM");
}
pub fn linkFrameworkNeeded(self: *CompileStep, framework_name: []const u8) void {
- self.frameworks.put(self.builder.dupe(framework_name), .{
+ const b = self.step.owner;
+ self.frameworks.put(b.dupe(framework_name), .{
.needed = true,
}) catch @panic("OOM");
}
pub fn linkFrameworkWeak(self: *CompileStep, framework_name: []const u8) void {
- self.frameworks.put(self.builder.dupe(framework_name), .{
+ const b = self.step.owner;
+ self.frameworks.put(b.dupe(framework_name), .{
.weak = true,
}) catch @panic("OOM");
}
@@ -595,7 +618,7 @@ pub fn producesPdbFile(self: *CompileStep) bool {
if (!self.target.isWindows() and !self.target.isUefi()) return false;
if (self.target.getObjectFormat() == .c) return false;
if (self.strip == true) return false;
- return self.isDynamicLibrary() or self.kind == .exe or self.kind == .test_exe;
+ return self.isDynamicLibrary() or self.kind == .exe or self.kind == .@"test";
}
pub fn linkLibC(self: *CompileStep) void {
@@ -609,21 +632,24 @@ pub fn linkLibCpp(self: *CompileStep) void {
/// If the value is omitted, it is set to 1.
/// `name` and `value` need not live longer than the function call.
pub fn defineCMacro(self: *CompileStep, name: []const u8, value: ?[]const u8) void {
- const macro = std.Build.constructCMacro(self.builder.allocator, name, value);
+ const b = self.step.owner;
+ const macro = std.Build.constructCMacro(b.allocator, name, value);
self.c_macros.append(macro) catch @panic("OOM");
}
/// name_and_value looks like [name]=[value]. If the value is omitted, it is set to 1.
pub fn defineCMacroRaw(self: *CompileStep, name_and_value: []const u8) void {
- self.c_macros.append(self.builder.dupe(name_and_value)) catch @panic("OOM");
+ const b = self.step.owner;
+ self.c_macros.append(b.dupe(name_and_value)) catch @panic("OOM");
}
/// This one has no integration with anything, it just puts -lname on the command line.
/// Prefer to use `linkSystemLibrary` instead.
pub fn linkSystemLibraryName(self: *CompileStep, name: []const u8) void {
+ const b = self.step.owner;
self.link_objects.append(.{
.system_lib = .{
- .name = self.builder.dupe(name),
+ .name = b.dupe(name),
.needed = false,
.weak = false,
.use_pkg_config = .no,
@@ -634,9 +660,10 @@ pub fn linkSystemLibraryName(self: *CompileStep, name: []const u8) void {
/// This one has no integration with anything, it just puts -needed-lname on the command line.
/// Prefer to use `linkSystemLibraryNeeded` instead.
pub fn linkSystemLibraryNeededName(self: *CompileStep, name: []const u8) void {
+ const b = self.step.owner;
self.link_objects.append(.{
.system_lib = .{
- .name = self.builder.dupe(name),
+ .name = b.dupe(name),
.needed = true,
.weak = false,
.use_pkg_config = .no,
@@ -647,9 +674,10 @@ pub fn linkSystemLibraryNeededName(self: *CompileStep, name: []const u8) void {
/// Darwin-only. This one has no integration with anything, it just puts -weak-lname on the
/// command line. Prefer to use `linkSystemLibraryWeak` instead.
pub fn linkSystemLibraryWeakName(self: *CompileStep, name: []const u8) void {
+ const b = self.step.owner;
self.link_objects.append(.{
.system_lib = .{
- .name = self.builder.dupe(name),
+ .name = b.dupe(name),
.needed = false,
.weak = true,
.use_pkg_config = .no,
@@ -660,9 +688,10 @@ pub fn linkSystemLibraryWeakName(self: *CompileStep, name: []const u8) void {
/// This links against a system library, exclusively using pkg-config to find the library.
/// Prefer to use `linkSystemLibrary` instead.
pub fn linkSystemLibraryPkgConfigOnly(self: *CompileStep, lib_name: []const u8) void {
+ const b = self.step.owner;
self.link_objects.append(.{
.system_lib = .{
- .name = self.builder.dupe(lib_name),
+ .name = b.dupe(lib_name),
.needed = false,
.weak = false,
.use_pkg_config = .force,
@@ -673,9 +702,10 @@ pub fn linkSystemLibraryPkgConfigOnly(self: *CompileStep, lib_name: []const u8)
/// This links against a system library, exclusively using pkg-config to find the library.
/// Prefer to use `linkSystemLibraryNeeded` instead.
pub fn linkSystemLibraryNeededPkgConfigOnly(self: *CompileStep, lib_name: []const u8) void {
+ const b = self.step.owner;
self.link_objects.append(.{
.system_lib = .{
- .name = self.builder.dupe(lib_name),
+ .name = b.dupe(lib_name),
.needed = true,
.weak = false,
.use_pkg_config = .force,
@@ -685,14 +715,15 @@ pub fn linkSystemLibraryNeededPkgConfigOnly(self: *CompileStep, lib_name: []cons
/// Run pkg-config for the given library name and parse the output, returning the arguments
/// that should be passed to zig to link the given library.
-pub fn runPkgConfig(self: *CompileStep, lib_name: []const u8) ![]const []const u8 {
+fn runPkgConfig(self: *CompileStep, lib_name: []const u8) ![]const []const u8 {
+ const b = self.step.owner;
const pkg_name = match: {
// First we have to map the library name to pkg config name. Unfortunately,
// there are several examples where this is not straightforward:
// -lSDL2 -> pkg-config sdl2
// -lgdk-3 -> pkg-config gdk-3.0
// -latk-1.0 -> pkg-config atk
- const pkgs = try getPkgConfigList(self.builder);
+ const pkgs = try getPkgConfigList(b);
// Exact match means instant winner.
for (pkgs) |pkg| {
@@ -732,7 +763,7 @@ pub fn runPkgConfig(self: *CompileStep, lib_name: []const u8) ![]const []const u
};
var code: u8 = undefined;
- const stdout = if (self.builder.execAllowFail(&[_][]const u8{
+ const stdout = if (b.execAllowFail(&[_][]const u8{
"pkg-config",
pkg_name,
"--cflags",
@@ -742,11 +773,10 @@ pub fn runPkgConfig(self: *CompileStep, lib_name: []const u8) ![]const []const u
error.ExecNotSupported => return error.PkgConfigFailed,
error.ExitCodeFailure => return error.PkgConfigFailed,
error.FileNotFound => return error.PkgConfigNotInstalled,
- error.ChildExecFailed => return error.PkgConfigFailed,
else => return err,
};
- var zig_args = ArrayList([]const u8).init(self.builder.allocator);
+ var zig_args = ArrayList([]const u8).init(b.allocator);
defer zig_args.deinit();
var it = mem.tokenize(u8, stdout, " \r\n\t");
@@ -771,8 +801,8 @@ pub fn runPkgConfig(self: *CompileStep, lib_name: []const u8) ![]const []const u
try zig_args.appendSlice(&[_][]const u8{ "-D", macro });
} else if (mem.startsWith(u8, tok, "-D")) {
try zig_args.append(tok);
- } else if (self.builder.verbose) {
- log.warn("Ignoring pkg-config flag '{s}'", .{tok});
+ } else if (b.debug_pkg_config) {
+ return self.step.fail("unknown pkg-config flag '{s}'", .{tok});
}
}
@@ -795,6 +825,7 @@ fn linkSystemLibraryInner(self: *CompileStep, name: []const u8, opts: struct {
needed: bool = false,
weak: bool = false,
}) void {
+ const b = self.step.owner;
if (isLibCLibrary(name)) {
self.linkLibC();
return;
@@ -806,7 +837,7 @@ fn linkSystemLibraryInner(self: *CompileStep, name: []const u8, opts: struct {
self.link_objects.append(.{
.system_lib = .{
- .name = self.builder.dupe(name),
+ .name = b.dupe(name),
.needed = opts.needed,
.weak = opts.weak,
.use_pkg_config = .yes,
@@ -814,27 +845,31 @@ fn linkSystemLibraryInner(self: *CompileStep, name: []const u8, opts: struct {
}) catch @panic("OOM");
}
-pub fn setNamePrefix(self: *CompileStep, text: []const u8) void {
- assert(self.kind == .@"test" or self.kind == .test_exe);
- self.name_prefix = self.builder.dupe(text);
+pub fn setName(self: *CompileStep, text: []const u8) void {
+ const b = self.step.owner;
+ assert(self.kind == .@"test");
+ self.name = b.dupe(text);
}
pub fn setFilter(self: *CompileStep, text: ?[]const u8) void {
- assert(self.kind == .@"test" or self.kind == .test_exe);
- self.filter = if (text) |t| self.builder.dupe(t) else null;
+ const b = self.step.owner;
+ assert(self.kind == .@"test");
+ self.filter = if (text) |t| b.dupe(t) else null;
}
pub fn setTestRunner(self: *CompileStep, path: ?[]const u8) void {
- assert(self.kind == .@"test" or self.kind == .test_exe);
- self.test_runner = if (path) |p| self.builder.dupePath(p) else null;
+ const b = self.step.owner;
+ assert(self.kind == .@"test");
+ self.test_runner = if (path) |p| b.dupePath(p) else null;
}
/// Handy when you have many C/C++ source files and want them all to have the same flags.
pub fn addCSourceFiles(self: *CompileStep, files: []const []const u8, flags: []const []const u8) void {
- const c_source_files = self.builder.allocator.create(CSourceFiles) catch @panic("OOM");
+ const b = self.step.owner;
+ const c_source_files = b.allocator.create(CSourceFiles) catch @panic("OOM");
- const files_copy = self.builder.dupeStrings(files);
- const flags_copy = self.builder.dupeStrings(flags);
+ const files_copy = b.dupeStrings(files);
+ const flags_copy = b.dupeStrings(flags);
c_source_files.* = .{
.files = files_copy,
@@ -851,8 +886,9 @@ pub fn addCSourceFile(self: *CompileStep, file: []const u8, flags: []const []con
}
pub fn addCSourceFileSource(self: *CompileStep, source: CSourceFile) void {
- const c_source_file = self.builder.allocator.create(CSourceFile) catch @panic("OOM");
- c_source_file.* = source.dupe(self.builder);
+ const b = self.step.owner;
+ const c_source_file = b.allocator.create(CSourceFile) catch @panic("OOM");
+ c_source_file.* = source.dupe(b);
self.link_objects.append(.{ .c_source_file = c_source_file }) catch @panic("OOM");
source.source.addStepDependencies(&self.step);
}
@@ -866,52 +902,61 @@ pub fn setVerboseCC(self: *CompileStep, value: bool) void {
}
pub fn overrideZigLibDir(self: *CompileStep, dir_path: []const u8) void {
- self.zig_lib_dir = self.builder.dupePath(dir_path);
+ const b = self.step.owner;
+ self.zig_lib_dir = b.dupePath(dir_path);
}
pub fn setMainPkgPath(self: *CompileStep, dir_path: []const u8) void {
- self.main_pkg_path = self.builder.dupePath(dir_path);
+ const b = self.step.owner;
+ self.main_pkg_path = b.dupePath(dir_path);
}
pub fn setLibCFile(self: *CompileStep, libc_file: ?FileSource) void {
- self.libc_file = if (libc_file) |f| f.dupe(self.builder) else null;
+ const b = self.step.owner;
+ self.libc_file = if (libc_file) |f| f.dupe(b) else null;
}
/// Returns the generated executable, library or object file.
/// To run an executable built with zig build, use `run`, or create an install step and invoke it.
pub fn getOutputSource(self: *CompileStep) FileSource {
- return FileSource{ .generated = &self.output_path_source };
+ return .{ .generated = &self.output_path_source };
+}
+
+pub fn getOutputDirectorySource(self: *CompileStep) FileSource {
+ return .{ .generated = &self.output_dirname_source };
}
/// Returns the generated import library. This function can only be called for libraries.
pub fn getOutputLibSource(self: *CompileStep) FileSource {
assert(self.kind == .lib);
- return FileSource{ .generated = &self.output_lib_path_source };
+ return .{ .generated = &self.output_lib_path_source };
}
/// Returns the generated header file.
/// This function can only be called for libraries or object files which have `emit_h` set.
pub fn getOutputHSource(self: *CompileStep) FileSource {
- assert(self.kind != .exe and self.kind != .test_exe and self.kind != .@"test");
+ assert(self.kind != .exe and self.kind != .@"test");
assert(self.emit_h);
- return FileSource{ .generated = &self.output_h_path_source };
+ return .{ .generated = &self.output_h_path_source };
}
/// Returns the generated PDB file. This function can only be called for Windows and UEFI.
pub fn getOutputPdbSource(self: *CompileStep) FileSource {
// TODO: Is this right? Isn't PDB for *any* PE/COFF file?
assert(self.target.isWindows() or self.target.isUefi());
- return FileSource{ .generated = &self.output_pdb_path_source };
+ return .{ .generated = &self.output_pdb_path_source };
}
pub fn addAssemblyFile(self: *CompileStep, path: []const u8) void {
+ const b = self.step.owner;
self.link_objects.append(.{
- .assembly_file = .{ .path = self.builder.dupe(path) },
+ .assembly_file = .{ .path = b.dupe(path) },
}) catch @panic("OOM");
}
pub fn addAssemblyFileSource(self: *CompileStep, source: FileSource) void {
- const source_duped = source.dupe(self.builder);
+ const b = self.step.owner;
+ const source_duped = source.dupe(b);
self.link_objects.append(.{ .assembly_file = source_duped }) catch @panic("OOM");
source_duped.addStepDependencies(&self.step);
}
@@ -921,7 +966,8 @@ pub fn addObjectFile(self: *CompileStep, source_file: []const u8) void {
}
pub fn addObjectFileSource(self: *CompileStep, source: FileSource) void {
- self.link_objects.append(.{ .static_path = source.dupe(self.builder) }) catch @panic("OOM");
+ const b = self.step.owner;
+ self.link_objects.append(.{ .static_path = source.dupe(b) }) catch @panic("OOM");
source.addStepDependencies(&self.step);
}
@@ -936,11 +982,13 @@ pub const addLibPath = @compileError("deprecated, use addLibraryPath");
pub const addFrameworkDir = @compileError("deprecated, use addFrameworkPath");
pub fn addSystemIncludePath(self: *CompileStep, path: []const u8) void {
- self.include_dirs.append(IncludeDir{ .raw_path_system = self.builder.dupe(path) }) catch @panic("OOM");
+ const b = self.step.owner;
+ self.include_dirs.append(IncludeDir{ .raw_path_system = b.dupe(path) }) catch @panic("OOM");
}
pub fn addIncludePath(self: *CompileStep, path: []const u8) void {
- self.include_dirs.append(IncludeDir{ .raw_path = self.builder.dupe(path) }) catch @panic("OOM");
+ const b = self.step.owner;
+ self.include_dirs.append(IncludeDir{ .raw_path = b.dupe(path) }) catch @panic("OOM");
}
pub fn addConfigHeader(self: *CompileStep, config_header: *ConfigHeaderStep) void {
@@ -949,23 +997,42 @@ pub fn addConfigHeader(self: *CompileStep, config_header: *ConfigHeaderStep) voi
}
pub fn addLibraryPath(self: *CompileStep, path: []const u8) void {
- self.lib_paths.append(self.builder.dupe(path)) catch @panic("OOM");
+ const b = self.step.owner;
+ self.lib_paths.append(.{ .path = b.dupe(path) }) catch @panic("OOM");
+}
+
+pub fn addLibraryPathDirectorySource(self: *CompileStep, directory_source: FileSource) void {
+ self.lib_paths.append(directory_source) catch @panic("OOM");
+ directory_source.addStepDependencies(&self.step);
}
pub fn addRPath(self: *CompileStep, path: []const u8) void {
- self.rpaths.append(self.builder.dupe(path)) catch @panic("OOM");
+ const b = self.step.owner;
+ self.rpaths.append(.{ .path = b.dupe(path) }) catch @panic("OOM");
+}
+
+pub fn addRPathDirectorySource(self: *CompileStep, directory_source: FileSource) void {
+ self.rpaths.append(directory_source) catch @panic("OOM");
+ directory_source.addStepDependencies(&self.step);
}
pub fn addFrameworkPath(self: *CompileStep, dir_path: []const u8) void {
- self.framework_dirs.append(self.builder.dupe(dir_path)) catch @panic("OOM");
+ const b = self.step.owner;
+ self.framework_dirs.append(.{ .path = b.dupe(dir_path) }) catch @panic("OOM");
+}
+
+pub fn addFrameworkPathDirectorySource(self: *CompileStep, directory_source: FileSource) void {
+ self.framework_dirs.append(directory_source) catch @panic("OOM");
+ directory_source.addStepDependencies(&self.step);
}
/// Adds a module to be used with `@import` and exposing it in the current
/// package's module table using `name`.
pub fn addModule(cs: *CompileStep, name: []const u8, module: *Module) void {
- cs.modules.put(cs.builder.dupe(name), module) catch @panic("OOM");
+ const b = cs.step.owner;
+ cs.modules.put(b.dupe(name), module) catch @panic("OOM");
- var done = std.AutoHashMap(*Module, void).init(cs.builder.allocator);
+ var done = std.AutoHashMap(*Module, void).init(b.allocator);
defer done.deinit();
cs.addRecursiveBuildDeps(module, &done) catch @panic("OOM");
}
@@ -973,7 +1040,8 @@ pub fn addModule(cs: *CompileStep, name: []const u8, module: *Module) void {
/// Adds a module to be used with `@import` without exposing it in the current
/// package's module table.
pub fn addAnonymousModule(cs: *CompileStep, name: []const u8, options: std.Build.CreateModuleOptions) void {
- const module = cs.builder.createModule(options);
+ const b = cs.step.owner;
+ const module = b.createModule(options);
return addModule(cs, name, module);
}
@@ -993,12 +1061,13 @@ fn addRecursiveBuildDeps(cs: *CompileStep, module: *Module, done: *std.AutoHashM
/// If Vcpkg was found on the system, it will be added to include and lib
/// paths for the specified target.
pub fn addVcpkgPaths(self: *CompileStep, linkage: CompileStep.Linkage) !void {
+ const b = self.step.owner;
// Ideally in the Unattempted case we would call the function recursively
// after findVcpkgRoot and have only one switch statement, but the compiler
// cannot resolve the error set.
- switch (self.builder.vcpkg_root) {
+ switch (b.vcpkg_root) {
.unattempted => {
- self.builder.vcpkg_root = if (try findVcpkgRoot(self.builder.allocator)) |root|
+ b.vcpkg_root = if (try findVcpkgRoot(b.allocator)) |root|
VcpkgRoot{ .found = root }
else
.not_found;
@@ -1007,31 +1076,32 @@ pub fn addVcpkgPaths(self: *CompileStep, linkage: CompileStep.Linkage) !void {
.found => {},
}
- switch (self.builder.vcpkg_root) {
+ switch (b.vcpkg_root) {
.unattempted => unreachable,
.not_found => return error.VcpkgNotFound,
.found => |root| {
- const allocator = self.builder.allocator;
+ const allocator = b.allocator;
const triplet = try self.target.vcpkgTriplet(allocator, if (linkage == .static) .Static else .Dynamic);
- defer self.builder.allocator.free(triplet);
+ defer b.allocator.free(triplet);
- const include_path = self.builder.pathJoin(&.{ root, "installed", triplet, "include" });
+ const include_path = b.pathJoin(&.{ root, "installed", triplet, "include" });
errdefer allocator.free(include_path);
try self.include_dirs.append(IncludeDir{ .raw_path = include_path });
- const lib_path = self.builder.pathJoin(&.{ root, "installed", triplet, "lib" });
- try self.lib_paths.append(lib_path);
+ const lib_path = b.pathJoin(&.{ root, "installed", triplet, "lib" });
+ try self.lib_paths.append(.{ .path = lib_path });
- self.vcpkg_bin_path = self.builder.pathJoin(&.{ root, "installed", triplet, "bin" });
+ self.vcpkg_bin_path = b.pathJoin(&.{ root, "installed", triplet, "bin" });
},
}
}
pub fn setExecCmd(self: *CompileStep, args: []const ?[]const u8) void {
+ const b = self.step.owner;
assert(self.kind == .@"test");
- const duped_args = self.builder.allocator.alloc(?[]u8, args.len) catch @panic("OOM");
+ const duped_args = b.allocator.alloc(?[]u8, args.len) catch @panic("OOM");
for (args, 0..) |arg, i| {
- duped_args[i] = if (arg) |a| self.builder.dupe(a) else null;
+ duped_args[i] = if (arg) |a| b.dupe(a) else null;
}
self.exec_cmd_args = duped_args;
}
@@ -1040,22 +1110,27 @@ fn linkLibraryOrObject(self: *CompileStep, other: *CompileStep) void {
self.step.dependOn(&other.step);
self.link_objects.append(.{ .other_step = other }) catch @panic("OOM");
self.include_dirs.append(.{ .other_step = other }) catch @panic("OOM");
+
+ for (other.installed_headers.items) |install_step| {
+ self.step.dependOn(install_step);
+ }
}
fn appendModuleArgs(
cs: *CompileStep,
zig_args: *ArrayList([]const u8),
) error{OutOfMemory}!void {
+ const b = cs.step.owner;
// First, traverse the whole dependency graph and give every module a unique name, ideally one
// named after what it's called somewhere in the graph. It will help here to have both a mapping
// from module to name and a set of all the currently-used names.
- var mod_names = std.AutoHashMap(*Module, []const u8).init(cs.builder.allocator);
- var names = std.StringHashMap(void).init(cs.builder.allocator);
+ var mod_names = std.AutoHashMap(*Module, []const u8).init(b.allocator);
+ var names = std.StringHashMap(void).init(b.allocator);
var to_name = std.ArrayList(struct {
name: []const u8,
mod: *Module,
- }).init(cs.builder.allocator);
+ }).init(b.allocator);
{
var it = cs.modules.iterator();
while (it.next()) |kv| {
@@ -1076,7 +1151,7 @@ fn appendModuleArgs(
if (mod_names.contains(dep.mod)) continue;
// We'll use this buffer to store the name we decide on
- var buf = try cs.builder.allocator.alloc(u8, dep.name.len + 32);
+ var buf = try b.allocator.alloc(u8, dep.name.len + 32);
// First, try just the exposed dependency name
std.mem.copy(u8, buf, dep.name);
var name = buf[0..dep.name.len];
@@ -1113,15 +1188,15 @@ fn appendModuleArgs(
const mod = kv.key_ptr.*;
const name = kv.value_ptr.*;
- const deps_str = try constructDepString(cs.builder.allocator, mod_names, mod.dependencies);
+ const deps_str = try constructDepString(b.allocator, mod_names, mod.dependencies);
const src = mod.builder.pathFromRoot(mod.source_file.getPath(mod.builder));
try zig_args.append("--mod");
- try zig_args.append(try std.fmt.allocPrint(cs.builder.allocator, "{s}:{s}:{s}", .{ name, deps_str, src }));
+ try zig_args.append(try std.fmt.allocPrint(b.allocator, "{s}:{s}:{s}", .{ name, deps_str, src }));
}
}
// Lastly, output the root dependencies
- const deps_str = try constructDepString(cs.builder.allocator, mod_names, cs.modules);
+ const deps_str = try constructDepString(b.allocator, mod_names, cs.modules);
if (deps_str.len > 0) {
try zig_args.append("--deps");
try zig_args.append(deps_str);
@@ -1151,43 +1226,36 @@ fn constructDepString(
}
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ const b = step.owner;
const self = @fieldParentPtr(CompileStep, "step", step);
- const builder = self.builder;
if (self.root_src == null and self.link_objects.items.len == 0) {
- log.err("{s}: linker needs 1 or more objects to link", .{self.step.name});
- return error.NeedAnObject;
+ return step.fail("the linker needs one or more objects to link", .{});
}
- var zig_args = ArrayList([]const u8).init(builder.allocator);
+ var zig_args = ArrayList([]const u8).init(b.allocator);
defer zig_args.deinit();
- try zig_args.append(builder.zig_exe);
+ try zig_args.append(b.zig_exe);
const cmd = switch (self.kind) {
.lib => "build-lib",
.exe => "build-exe",
.obj => "build-obj",
.@"test" => "test",
- .test_exe => "test",
};
try zig_args.append(cmd);
- if (builder.color != .auto) {
- try zig_args.append("--color");
- try zig_args.append(@tagName(builder.color));
- }
-
- if (builder.reference_trace) |some| {
- try zig_args.append(try std.fmt.allocPrint(builder.allocator, "-freference-trace={d}", .{some}));
+ if (b.reference_trace) |some| {
+ try zig_args.append(try std.fmt.allocPrint(b.allocator, "-freference-trace={d}", .{some}));
}
try addFlag(&zig_args, "LLVM", self.use_llvm);
try addFlag(&zig_args, "LLD", self.use_lld);
if (self.target.ofmt) |ofmt| {
- try zig_args.append(try std.fmt.allocPrint(builder.allocator, "-ofmt={s}", .{@tagName(ofmt)}));
+ try zig_args.append(try std.fmt.allocPrint(b.allocator, "-ofmt={s}", .{@tagName(ofmt)}));
}
if (self.entry_symbol_name) |entry| {
@@ -1197,18 +1265,18 @@ fn make(step: *Step) !void {
if (self.stack_size) |stack_size| {
try zig_args.append("--stack");
- try zig_args.append(try std.fmt.allocPrint(builder.allocator, "{}", .{stack_size}));
+ try zig_args.append(try std.fmt.allocPrint(b.allocator, "{}", .{stack_size}));
}
- if (self.root_src) |root_src| try zig_args.append(root_src.getPath(builder));
+ if (self.root_src) |root_src| try zig_args.append(root_src.getPath(b));
// We will add link objects from transitive dependencies, but we want to keep
// all link objects in the same order provided.
// This array is used to keep self.link_objects immutable.
var transitive_deps: TransitiveDeps = .{
- .link_objects = ArrayList(LinkObject).init(builder.allocator),
- .seen_system_libs = StringHashMap(void).init(builder.allocator),
- .seen_steps = std.AutoHashMap(*const Step, void).init(builder.allocator),
+ .link_objects = ArrayList(LinkObject).init(b.allocator),
+ .seen_system_libs = StringHashMap(void).init(b.allocator),
+ .seen_steps = std.AutoHashMap(*const Step, void).init(b.allocator),
.is_linking_libcpp = self.is_linking_libcpp,
.is_linking_libc = self.is_linking_libc,
.frameworks = &self.frameworks,
@@ -1221,14 +1289,13 @@ fn make(step: *Step) !void {
for (transitive_deps.link_objects.items) |link_object| {
switch (link_object) {
- .static_path => |static_path| try zig_args.append(static_path.getPath(builder)),
+ .static_path => |static_path| try zig_args.append(static_path.getPath(b)),
.other_step => |other| switch (other.kind) {
.exe => @panic("Cannot link with an executable build artifact"),
- .test_exe => @panic("Cannot link with an executable build artifact"),
.@"test" => @panic("Cannot link with a test"),
.obj => {
- try zig_args.append(other.getOutputSource().getPath(builder));
+ try zig_args.append(other.getOutputSource().getPath(b));
},
.lib => l: {
if (self.isStaticLibrary() and other.isStaticLibrary()) {
@@ -1236,7 +1303,7 @@ fn make(step: *Step) !void {
break :l;
}
- const full_path_lib = other.getOutputLibSource().getPath(builder);
+ const full_path_lib = other.getOutputLibSource().getPath(b);
try zig_args.append(full_path_lib);
if (other.linkage == Linkage.dynamic and !self.target.isWindows()) {
@@ -1251,14 +1318,11 @@ fn make(step: *Step) !void {
.system_lib => |system_lib| {
const prefix: []const u8 = prefix: {
if (system_lib.needed) break :prefix "-needed-l";
- if (system_lib.weak) {
- if (self.target.isDarwin()) break :prefix "-weak-l";
- log.warn("Weak library import used for a non-darwin target, this will be converted to normally library import `-lname`", .{});
- }
+ if (system_lib.weak) break :prefix "-weak-l";
break :prefix "-l";
};
switch (system_lib.use_pkg_config) {
- .no => try zig_args.append(builder.fmt("{s}{s}", .{ prefix, system_lib.name })),
+ .no => try zig_args.append(b.fmt("{s}{s}", .{ prefix, system_lib.name })),
.yes, .force => {
if (self.runPkgConfig(system_lib.name)) |args| {
try zig_args.appendSlice(args);
@@ -1272,7 +1336,7 @@ fn make(step: *Step) !void {
.yes => {
// pkg-config failed, so fall back to linking the library
// by name directly.
- try zig_args.append(builder.fmt("{s}{s}", .{
+ try zig_args.append(b.fmt("{s}{s}", .{
prefix,
system_lib.name,
}));
@@ -1295,7 +1359,7 @@ fn make(step: *Step) !void {
try zig_args.append("--");
prev_has_extra_flags = false;
}
- try zig_args.append(asm_file.getPath(builder));
+ try zig_args.append(asm_file.getPath(b));
},
.c_source_file => |c_source_file| {
@@ -1312,7 +1376,7 @@ fn make(step: *Step) !void {
}
try zig_args.append("--");
}
- try zig_args.append(c_source_file.source.getPath(builder));
+ try zig_args.append(c_source_file.source.getPath(b));
},
.c_source_files => |c_source_files| {
@@ -1330,7 +1394,7 @@ fn make(step: *Step) !void {
try zig_args.append("--");
}
for (c_source_files.files) |file| {
- try zig_args.append(builder.pathFromRoot(file));
+ try zig_args.append(b.pathFromRoot(file));
}
},
}
@@ -1346,7 +1410,7 @@ fn make(step: *Step) !void {
if (self.image_base) |image_base| {
try zig_args.append("--image-base");
- try zig_args.append(builder.fmt("0x{x}", .{image_base}));
+ try zig_args.append(b.fmt("0x{x}", .{image_base}));
}
if (self.filter) |filter| {
@@ -1358,39 +1422,34 @@ fn make(step: *Step) !void {
try zig_args.append("--test-evented-io");
}
- if (self.name_prefix.len != 0) {
- try zig_args.append("--test-name-prefix");
- try zig_args.append(self.name_prefix);
- }
-
if (self.test_runner) |test_runner| {
try zig_args.append("--test-runner");
- try zig_args.append(builder.pathFromRoot(test_runner));
+ try zig_args.append(b.pathFromRoot(test_runner));
}
- for (builder.debug_log_scopes) |log_scope| {
+ for (b.debug_log_scopes) |log_scope| {
try zig_args.append("--debug-log");
try zig_args.append(log_scope);
}
- if (builder.debug_compile_errors) {
+ if (b.debug_compile_errors) {
try zig_args.append("--debug-compile-errors");
}
- if (builder.verbose_cimport) try zig_args.append("--verbose-cimport");
- if (builder.verbose_air) try zig_args.append("--verbose-air");
- if (builder.verbose_llvm_ir) try zig_args.append("--verbose-llvm-ir");
- if (builder.verbose_link or self.verbose_link) try zig_args.append("--verbose-link");
- if (builder.verbose_cc or self.verbose_cc) try zig_args.append("--verbose-cc");
- if (builder.verbose_llvm_cpu_features) try zig_args.append("--verbose-llvm-cpu-features");
+ if (b.verbose_cimport) try zig_args.append("--verbose-cimport");
+ if (b.verbose_air) try zig_args.append("--verbose-air");
+ if (b.verbose_llvm_ir) try zig_args.append("--verbose-llvm-ir");
+ if (b.verbose_link or self.verbose_link) try zig_args.append("--verbose-link");
+ if (b.verbose_cc or self.verbose_cc) try zig_args.append("--verbose-cc");
+ if (b.verbose_llvm_cpu_features) try zig_args.append("--verbose-llvm-cpu-features");
- if (self.emit_analysis.getArg(builder, "emit-analysis")) |arg| try zig_args.append(arg);
- if (self.emit_asm.getArg(builder, "emit-asm")) |arg| try zig_args.append(arg);
- if (self.emit_bin.getArg(builder, "emit-bin")) |arg| try zig_args.append(arg);
- if (self.emit_docs.getArg(builder, "emit-docs")) |arg| try zig_args.append(arg);
- if (self.emit_implib.getArg(builder, "emit-implib")) |arg| try zig_args.append(arg);
- if (self.emit_llvm_bc.getArg(builder, "emit-llvm-bc")) |arg| try zig_args.append(arg);
- if (self.emit_llvm_ir.getArg(builder, "emit-llvm-ir")) |arg| try zig_args.append(arg);
+ if (self.emit_analysis.getArg(b, "emit-analysis")) |arg| try zig_args.append(arg);
+ if (self.emit_asm.getArg(b, "emit-asm")) |arg| try zig_args.append(arg);
+ if (self.emit_bin.getArg(b, "emit-bin")) |arg| try zig_args.append(arg);
+ if (self.emit_docs.getArg(b, "emit-docs")) |arg| try zig_args.append(arg);
+ if (self.emit_implib.getArg(b, "emit-implib")) |arg| try zig_args.append(arg);
+ if (self.emit_llvm_bc.getArg(b, "emit-llvm-bc")) |arg| try zig_args.append(arg);
+ if (self.emit_llvm_ir.getArg(b, "emit-llvm-ir")) |arg| try zig_args.append(arg);
if (self.emit_h) try zig_args.append("-femit-h");
@@ -1431,31 +1490,31 @@ fn make(step: *Step) !void {
}
if (self.link_z_common_page_size) |size| {
try zig_args.append("-z");
- try zig_args.append(builder.fmt("common-page-size={d}", .{size}));
+ try zig_args.append(b.fmt("common-page-size={d}", .{size}));
}
if (self.link_z_max_page_size) |size| {
try zig_args.append("-z");
- try zig_args.append(builder.fmt("max-page-size={d}", .{size}));
+ try zig_args.append(b.fmt("max-page-size={d}", .{size}));
}
if (self.libc_file) |libc_file| {
try zig_args.append("--libc");
- try zig_args.append(libc_file.getPath(builder));
- } else if (builder.libc_file) |libc_file| {
+ try zig_args.append(libc_file.getPath(b));
+ } else if (b.libc_file) |libc_file| {
try zig_args.append("--libc");
try zig_args.append(libc_file);
}
switch (self.optimize) {
.Debug => {}, // Skip since it's the default.
- else => try zig_args.append(builder.fmt("-O{s}", .{@tagName(self.optimize)})),
+ else => try zig_args.append(b.fmt("-O{s}", .{@tagName(self.optimize)})),
}
try zig_args.append("--cache-dir");
- try zig_args.append(builder.cache_root.path orelse ".");
+ try zig_args.append(b.cache_root.path orelse ".");
try zig_args.append("--global-cache-dir");
- try zig_args.append(builder.global_cache_root.path orelse ".");
+ try zig_args.append(b.global_cache_root.path orelse ".");
try zig_args.append("--name");
try zig_args.append(self.name);
@@ -1467,11 +1526,11 @@ fn make(step: *Step) !void {
if (self.kind == .lib and self.linkage != null and self.linkage.? == .dynamic) {
if (self.version) |version| {
try zig_args.append("--version");
- try zig_args.append(builder.fmt("{}", .{version}));
+ try zig_args.append(b.fmt("{}", .{version}));
}
if (self.target.isDarwin()) {
- const install_name = self.install_name orelse builder.fmt("@rpath/{s}{s}{s}", .{
+ const install_name = self.install_name orelse b.fmt("@rpath/{s}{s}{s}", .{
self.target.libPrefix(),
self.name,
self.target.dynamicLibSuffix(),
@@ -1485,7 +1544,7 @@ fn make(step: *Step) !void {
try zig_args.appendSlice(&[_][]const u8{ "--entitlements", entitlements });
}
if (self.pagezero_size) |pagezero_size| {
- const size = try std.fmt.allocPrint(builder.allocator, "{x}", .{pagezero_size});
+ const size = try std.fmt.allocPrint(b.allocator, "{x}", .{pagezero_size});
try zig_args.appendSlice(&[_][]const u8{ "-pagezero_size", size });
}
if (self.search_strategy) |strat| switch (strat) {
@@ -1493,7 +1552,7 @@ fn make(step: *Step) !void {
.dylibs_first => try zig_args.append("-search_dylibs_first"),
};
if (self.headerpad_size) |headerpad_size| {
- const size = try std.fmt.allocPrint(builder.allocator, "{x}", .{headerpad_size});
+ const size = try std.fmt.allocPrint(b.allocator, "{x}", .{headerpad_size});
try zig_args.appendSlice(&[_][]const u8{ "-headerpad", size });
}
if (self.headerpad_max_install_names) {
@@ -1541,16 +1600,16 @@ fn make(step: *Step) !void {
try zig_args.append("--export-table");
}
if (self.initial_memory) |initial_memory| {
- try zig_args.append(builder.fmt("--initial-memory={d}", .{initial_memory}));
+ try zig_args.append(b.fmt("--initial-memory={d}", .{initial_memory}));
}
if (self.max_memory) |max_memory| {
- try zig_args.append(builder.fmt("--max-memory={d}", .{max_memory}));
+ try zig_args.append(b.fmt("--max-memory={d}", .{max_memory}));
}
if (self.shared_memory) {
try zig_args.append("--shared-memory");
}
if (self.global_base) |global_base| {
- try zig_args.append(builder.fmt("--global-base={d}", .{global_base}));
+ try zig_args.append(b.fmt("--global-base={d}", .{global_base}));
}
if (self.code_model != .default) {
@@ -1558,16 +1617,16 @@ fn make(step: *Step) !void {
try zig_args.append(@tagName(self.code_model));
}
if (self.wasi_exec_model) |model| {
- try zig_args.append(builder.fmt("-mexec-model={s}", .{@tagName(model)}));
+ try zig_args.append(b.fmt("-mexec-model={s}", .{@tagName(model)}));
}
for (self.export_symbol_names) |symbol_name| {
- try zig_args.append(builder.fmt("--export={s}", .{symbol_name}));
+ try zig_args.append(b.fmt("--export={s}", .{symbol_name}));
}
if (!self.target.isNative()) {
try zig_args.appendSlice(&.{
- "-target", try self.target.zigTriple(builder.allocator),
- "-mcpu", try std.Build.serializeCpu(builder.allocator, self.target.getCpu()),
+ "-target", try self.target.zigTriple(b.allocator),
+ "-mcpu", try std.Build.serializeCpu(b.allocator, self.target.getCpu()),
});
if (self.target.dynamic_linker.get()) |dynamic_linker| {
@@ -1578,12 +1637,12 @@ fn make(step: *Step) !void {
if (self.linker_script) |linker_script| {
try zig_args.append("--script");
- try zig_args.append(linker_script.getPath(builder));
+ try zig_args.append(linker_script.getPath(b));
}
if (self.version_script) |version_script| {
try zig_args.append("--version-script");
- try zig_args.append(builder.pathFromRoot(version_script));
+ try zig_args.append(b.pathFromRoot(version_script));
}
if (self.kind == .@"test") {
@@ -1596,83 +1655,7 @@ fn make(step: *Step) !void {
try zig_args.append("--test-cmd-bin");
}
}
- } else {
- const need_cross_glibc = self.target.isGnuLibC() and transitive_deps.is_linking_libc;
-
- switch (builder.host.getExternalExecutor(self.target_info, .{
- .qemu_fixes_dl = need_cross_glibc and builder.glibc_runtimes_dir != null,
- .link_libc = transitive_deps.is_linking_libc,
- })) {
- .native => {},
- .bad_dl, .bad_os_or_cpu => {
- try zig_args.append("--test-no-exec");
- },
- .rosetta => if (builder.enable_rosetta) {
- try zig_args.append("--test-cmd-bin");
- } else {
- try zig_args.append("--test-no-exec");
- },
- .qemu => |bin_name| ok: {
- if (builder.enable_qemu) qemu: {
- const glibc_dir_arg = if (need_cross_glibc)
- builder.glibc_runtimes_dir orelse break :qemu
- else
- null;
- try zig_args.append("--test-cmd");
- try zig_args.append(bin_name);
- if (glibc_dir_arg) |dir| {
- // TODO look into making this a call to `linuxTriple`. This
- // needs the directory to be called "i686" rather than
- // "x86" which is why we do it manually here.
- const fmt_str = "{s}" ++ fs.path.sep_str ++ "{s}-{s}-{s}";
- const cpu_arch = self.target.getCpuArch();
- const os_tag = self.target.getOsTag();
- const abi = self.target.getAbi();
- const cpu_arch_name: []const u8 = if (cpu_arch == .x86)
- "i686"
- else
- @tagName(cpu_arch);
- const full_dir = try std.fmt.allocPrint(builder.allocator, fmt_str, .{
- dir, cpu_arch_name, @tagName(os_tag), @tagName(abi),
- });
-
- try zig_args.append("--test-cmd");
- try zig_args.append("-L");
- try zig_args.append("--test-cmd");
- try zig_args.append(full_dir);
- }
- try zig_args.append("--test-cmd-bin");
- break :ok;
- }
- try zig_args.append("--test-no-exec");
- },
- .wine => |bin_name| if (builder.enable_wine) {
- try zig_args.append("--test-cmd");
- try zig_args.append(bin_name);
- try zig_args.append("--test-cmd-bin");
- } else {
- try zig_args.append("--test-no-exec");
- },
- .wasmtime => |bin_name| if (builder.enable_wasmtime) {
- try zig_args.append("--test-cmd");
- try zig_args.append(bin_name);
- try zig_args.append("--test-cmd");
- try zig_args.append("--dir=.");
- try zig_args.append("--test-cmd-bin");
- } else {
- try zig_args.append("--test-no-exec");
- },
- .darling => |bin_name| if (builder.enable_darling) {
- try zig_args.append("--test-cmd");
- try zig_args.append(bin_name);
- try zig_args.append("--test-cmd-bin");
- } else {
- try zig_args.append("--test-no-exec");
- },
- }
}
- } else if (self.kind == .test_exe) {
- try zig_args.append("--test-no-exec");
}
try self.appendModuleArgs(&zig_args);
@@ -1681,18 +1664,18 @@ fn make(step: *Step) !void {
switch (include_dir) {
.raw_path => |include_path| {
try zig_args.append("-I");
- try zig_args.append(builder.pathFromRoot(include_path));
+ try zig_args.append(b.pathFromRoot(include_path));
},
.raw_path_system => |include_path| {
- if (builder.sysroot != null) {
+ if (b.sysroot != null) {
try zig_args.append("-iwithsysroot");
} else {
try zig_args.append("-isystem");
}
- const resolved_include_path = builder.pathFromRoot(include_path);
+ const resolved_include_path = b.pathFromRoot(include_path);
- const common_include_path = if (builtin.os.tag == .windows and builder.sysroot != null and fs.path.isAbsolute(resolved_include_path)) blk: {
+ const common_include_path = if (builtin.os.tag == .windows and b.sysroot != null and fs.path.isAbsolute(resolved_include_path)) blk: {
// We need to check for disk designator and strip it out from dir path so
// that zig/clang can concat resolved_include_path with sysroot.
const disk_designator = fs.path.diskDesignatorWindows(resolved_include_path);
@@ -1708,17 +1691,14 @@ fn make(step: *Step) !void {
},
.other_step => |other| {
if (other.emit_h) {
- const h_path = other.getOutputHSource().getPath(builder);
+ const h_path = other.getOutputHSource().getPath(b);
try zig_args.append("-isystem");
try zig_args.append(fs.path.dirname(h_path).?);
}
if (other.installed_headers.items.len > 0) {
- for (other.installed_headers.items) |install_step| {
- try install_step.make();
- }
try zig_args.append("-I");
- try zig_args.append(builder.pathJoin(&.{
- other.builder.install_prefix, "include",
+ try zig_args.append(b.pathJoin(&.{
+ other.step.owner.install_prefix, "include",
}));
}
},
@@ -1730,33 +1710,35 @@ fn make(step: *Step) !void {
}
}
- for (self.lib_paths.items) |lib_path| {
- try zig_args.append("-L");
- try zig_args.append(lib_path);
- }
-
- for (self.rpaths.items) |rpath| {
- try zig_args.append("-rpath");
- try zig_args.append(rpath);
- }
-
for (self.c_macros.items) |c_macro| {
try zig_args.append("-D");
try zig_args.append(c_macro);
}
- if (self.target.isDarwin()) {
- for (self.framework_dirs.items) |dir| {
- if (builder.sysroot != null) {
- try zig_args.append("-iframeworkwithsysroot");
- } else {
- try zig_args.append("-iframework");
- }
- try zig_args.append(dir);
- try zig_args.append("-F");
- try zig_args.append(dir);
- }
+ try zig_args.ensureUnusedCapacity(2 * self.lib_paths.items.len);
+ for (self.lib_paths.items) |lib_path| {
+ zig_args.appendAssumeCapacity("-L");
+ zig_args.appendAssumeCapacity(lib_path.getPath2(b, step));
+ }
+ try zig_args.ensureUnusedCapacity(2 * self.rpaths.items.len);
+ for (self.rpaths.items) |rpath| {
+ zig_args.appendAssumeCapacity("-rpath");
+ zig_args.appendAssumeCapacity(rpath.getPath2(b, step));
+ }
+
+ for (self.framework_dirs.items) |directory_source| {
+ if (b.sysroot != null) {
+ try zig_args.append("-iframeworkwithsysroot");
+ } else {
+ try zig_args.append("-iframework");
+ }
+ try zig_args.append(directory_source.getPath2(b, step));
+ try zig_args.append("-F");
+ try zig_args.append(directory_source.getPath2(b, step));
+ }
+
+ {
var it = self.frameworks.iterator();
while (it.next()) |entry| {
const name = entry.key_ptr.*;
@@ -1770,29 +1752,45 @@ fn make(step: *Step) !void {
}
try zig_args.append(name);
}
- } else {
- if (self.framework_dirs.items.len > 0) {
- log.info("Framework directories have been added for a non-darwin target, this will have no affect on the build", .{});
- }
-
- if (self.frameworks.count() > 0) {
- log.info("Frameworks have been added for a non-darwin target, this will have no affect on the build", .{});
- }
}
- if (builder.sysroot) |sysroot| {
+ if (b.sysroot) |sysroot| {
try zig_args.appendSlice(&[_][]const u8{ "--sysroot", sysroot });
}
- for (builder.search_prefixes.items) |search_prefix| {
- try zig_args.append("-L");
- try zig_args.append(builder.pathJoin(&.{
- search_prefix, "lib",
- }));
- try zig_args.append("-I");
- try zig_args.append(builder.pathJoin(&.{
- search_prefix, "include",
- }));
+ for (b.search_prefixes.items) |search_prefix| {
+ var prefix_dir = fs.cwd().openDir(search_prefix, .{}) catch |err| {
+ return step.fail("unable to open prefix directory '{s}': {s}", .{
+ search_prefix, @errorName(err),
+ });
+ };
+ defer prefix_dir.close();
+
+ // Avoid passing -L and -I flags for nonexistent directories.
+ // This prevents a warning, that should probably be upgraded to an error in Zig's
+ // CLI parsing code, when the linker sees an -L directory that does not exist.
+
+ if (prefix_dir.accessZ("lib", .{})) |_| {
+ try zig_args.appendSlice(&.{
+ "-L", try fs.path.join(b.allocator, &.{ search_prefix, "lib" }),
+ });
+ } else |err| switch (err) {
+ error.FileNotFound => {},
+ else => |e| return step.fail("unable to access '{s}/lib' directory: {s}", .{
+ search_prefix, @errorName(e),
+ }),
+ }
+
+ if (prefix_dir.accessZ("include", .{})) |_| {
+ try zig_args.appendSlice(&.{
+ "-I", try fs.path.join(b.allocator, &.{ search_prefix, "include" }),
+ });
+ } else |err| switch (err) {
+ error.FileNotFound => {},
+ else => |e| return step.fail("unable to access '{s}/include' directory: {s}", .{
+ search_prefix, @errorName(e),
+ }),
+ }
}
try addFlag(&zig_args, "valgrind", self.valgrind_support);
@@ -1801,15 +1799,15 @@ fn make(step: *Step) !void {
if (self.zig_lib_dir) |dir| {
try zig_args.append("--zig-lib-dir");
- try zig_args.append(builder.pathFromRoot(dir));
- } else if (builder.zig_lib_dir) |dir| {
+ try zig_args.append(b.pathFromRoot(dir));
+ } else if (b.zig_lib_dir) |dir| {
try zig_args.append("--zig-lib-dir");
try zig_args.append(dir);
}
if (self.main_pkg_path) |dir| {
try zig_args.append("--main-pkg-path");
- try zig_args.append(builder.pathFromRoot(dir));
+ try zig_args.append(b.pathFromRoot(dir));
}
try addFlag(&zig_args, "PIC", self.force_pic);
@@ -1831,6 +1829,7 @@ fn make(step: *Step) !void {
}
try zig_args.append("--enable-cache");
+ try zig_args.append("--listen=-");
// Windows has an argument length limit of 32,766 characters, macOS 262,144 and Linux
// 2,097,152. If our args exceed 30 KiB, we instead write them to a "response file" and
@@ -1841,15 +1840,15 @@ fn make(step: *Step) !void {
args_length += arg.len + 1; // +1 to account for null terminator
}
if (args_length >= 30 * 1024) {
- try builder.cache_root.handle.makePath("args");
+ try b.cache_root.handle.makePath("args");
const args_to_escape = zig_args.items[2..];
- var escaped_args = try ArrayList([]const u8).initCapacity(builder.allocator, args_to_escape.len);
+ var escaped_args = try ArrayList([]const u8).initCapacity(b.allocator, args_to_escape.len);
arg_blk: for (args_to_escape) |arg| {
for (arg, 0..) |c, arg_idx| {
if (c == '\\' or c == '"') {
// Slow path for arguments that need to be escaped. We'll need to allocate and copy
- var escaped = try ArrayList(u8).initCapacity(builder.allocator, arg.len + 1);
+ var escaped = try ArrayList(u8).initCapacity(b.allocator, arg.len + 1);
const writer = escaped.writer();
try writer.writeAll(arg[0..arg_idx]);
for (arg[arg_idx..]) |to_escape| {
@@ -1865,8 +1864,8 @@ fn make(step: *Step) !void {
// Write the args to zig-cache/args/ to avoid conflicts with
// other zig build commands running in parallel.
- const partially_quoted = try std.mem.join(builder.allocator, "\" \"", escaped_args.items);
- const args = try std.mem.concat(builder.allocator, u8, &[_][]const u8{ "\"", partially_quoted, "\"" });
+ const partially_quoted = try std.mem.join(b.allocator, "\" \"", escaped_args.items);
+ const args = try std.mem.concat(b.allocator, u8, &[_][]const u8{ "\"", partially_quoted, "\"" });
var args_hash: [Sha256.digest_length]u8 = undefined;
Sha256.hash(args, &args_hash, .{});
@@ -1878,28 +1877,35 @@ fn make(step: *Step) !void {
);
const args_file = "args" ++ fs.path.sep_str ++ args_hex_hash;
- try builder.cache_root.handle.writeFile(args_file, args);
+ try b.cache_root.handle.writeFile(args_file, args);
- const resolved_args_file = try mem.concat(builder.allocator, u8, &.{
+ const resolved_args_file = try mem.concat(b.allocator, u8, &.{
"@",
- try builder.cache_root.join(builder.allocator, &.{args_file}),
+ try b.cache_root.join(b.allocator, &.{args_file}),
});
zig_args.shrinkRetainingCapacity(2);
try zig_args.append(resolved_args_file);
}
- const output_dir_nl = try builder.execFromStep(zig_args.items, &self.step);
- const build_output_dir = mem.trimRight(u8, output_dir_nl, "\r\n");
+ const output_bin_path = step.evalZigProcess(zig_args.items, prog_node) catch |err| switch (err) {
+ error.NeedCompileErrorCheck => {
+ assert(self.expect_errors.len != 0);
+ try checkCompileErrors(self);
+ return;
+ },
+ else => |e| return e,
+ };
+ const build_output_dir = fs.path.dirname(output_bin_path).?;
if (self.output_dir) |output_dir| {
- var src_dir = try std.fs.cwd().openIterableDir(build_output_dir, .{});
+ var src_dir = try fs.cwd().openIterableDir(build_output_dir, .{});
defer src_dir.close();
// Create the output directory if it doesn't exist.
- try std.fs.cwd().makePath(output_dir);
+ try fs.cwd().makePath(output_dir);
- var dest_dir = try std.fs.cwd().openDir(output_dir, .{});
+ var dest_dir = try fs.cwd().openDir(output_dir, .{});
defer dest_dir.close();
var it = src_dir.iterate();
@@ -1923,25 +1929,34 @@ fn make(step: *Step) !void {
// Update generated files
if (self.output_dir != null) {
- self.output_path_source.path = builder.pathJoin(
+ self.output_dirname_source.path = self.output_dir.?;
+
+ self.output_path_source.path = b.pathJoin(
&.{ self.output_dir.?, self.out_filename },
);
if (self.emit_h) {
- self.output_h_path_source.path = builder.pathJoin(
+ self.output_h_path_source.path = b.pathJoin(
&.{ self.output_dir.?, self.out_h_filename },
);
}
if (self.target.isWindows() or self.target.isUefi()) {
- self.output_pdb_path_source.path = builder.pathJoin(
+ self.output_pdb_path_source.path = b.pathJoin(
&.{ self.output_dir.?, self.out_pdb_filename },
);
}
}
- if (self.kind == .lib and self.linkage != null and self.linkage.? == .dynamic and self.version != null and self.target.wantSharedLibSymLinks()) {
- try doAtomicSymLinks(builder.allocator, self.getOutputSource().getPath(builder), self.major_only_filename.?, self.name_only_filename.?);
+ if (self.kind == .lib and self.linkage != null and self.linkage.? == .dynamic and
+ self.version != null and self.target.wantSharedLibSymLinks())
+ {
+ try doAtomicSymLinks(
+ step,
+ self.getOutputSource().getPath(b),
+ self.major_only_filename.?,
+ self.name_only_filename.?,
+ );
}
}
@@ -1983,30 +1998,27 @@ fn findVcpkgRoot(allocator: Allocator) !?[]const u8 {
}
pub fn doAtomicSymLinks(
- allocator: Allocator,
+ step: *Step,
output_path: []const u8,
filename_major_only: []const u8,
filename_name_only: []const u8,
) !void {
+ const arena = step.owner.allocator;
const out_dir = fs.path.dirname(output_path) orelse ".";
const out_basename = fs.path.basename(output_path);
// sym link for libfoo.so.1 to libfoo.so.1.2.3
- const major_only_path = try fs.path.join(
- allocator,
- &[_][]const u8{ out_dir, filename_major_only },
- );
- fs.atomicSymLink(allocator, out_basename, major_only_path) catch |err| {
- log.err("Unable to symlink {s} -> {s}", .{ major_only_path, out_basename });
- return err;
+ const major_only_path = try fs.path.join(arena, &.{ out_dir, filename_major_only });
+ fs.atomicSymLink(arena, out_basename, major_only_path) catch |err| {
+ return step.fail("unable to symlink {s} -> {s}: {s}", .{
+ major_only_path, out_basename, @errorName(err),
+ });
};
// sym link for libfoo.so to libfoo.so.1
- const name_only_path = try fs.path.join(
- allocator,
- &[_][]const u8{ out_dir, filename_name_only },
- );
- fs.atomicSymLink(allocator, filename_major_only, name_only_path) catch |err| {
- log.err("Unable to symlink {s} -> {s}", .{ name_only_path, filename_major_only });
- return err;
+ const name_only_path = try fs.path.join(arena, &.{ out_dir, filename_name_only });
+ fs.atomicSymLink(arena, filename_major_only, name_only_path) catch |err| {
+ return step.fail("Unable to symlink {s} -> {s}: {s}", .{
+ name_only_path, filename_major_only, @errorName(err),
+ });
};
}
@@ -2042,7 +2054,6 @@ fn getPkgConfigList(self: *std.Build) ![]const PkgConfigPkg {
error.FileNotFound => error.PkgConfigNotInstalled,
error.InvalidName => error.PkgConfigNotInstalled,
error.PkgConfigInvalidOutput => error.PkgConfigInvalidOutput,
- error.ChildExecFailed => error.PkgConfigFailed,
else => return err,
};
self.pkg_config_pkg_list = result;
@@ -2119,3 +2130,57 @@ const TransitiveDeps = struct {
}
}
};
+
+fn checkCompileErrors(self: *CompileStep) !void {
+ // Clear this field so that it does not get printed by the build runner.
+ const actual_eb = self.step.result_error_bundle;
+ self.step.result_error_bundle = std.zig.ErrorBundle.empty;
+
+ const arena = self.step.owner.allocator;
+
+ var actual_stderr_list = std.ArrayList(u8).init(arena);
+ try actual_eb.renderToWriter(.{
+ .ttyconf = .no_color,
+ .include_reference_trace = false,
+ .include_source_line = false,
+ }, actual_stderr_list.writer());
+ const actual_stderr = try actual_stderr_list.toOwnedSlice();
+
+ // Render the expected lines into a string that we can compare verbatim.
+ var expected_generated = std.ArrayList(u8).init(arena);
+
+ var actual_line_it = mem.split(u8, actual_stderr, "\n");
+ for (self.expect_errors) |expect_line| {
+ const actual_line = actual_line_it.next() orelse {
+ try expected_generated.appendSlice(expect_line);
+ try expected_generated.append('\n');
+ continue;
+ };
+ if (mem.endsWith(u8, actual_line, expect_line)) {
+ try expected_generated.appendSlice(actual_line);
+ try expected_generated.append('\n');
+ continue;
+ }
+ if (mem.startsWith(u8, expect_line, ":?:?: ")) {
+ if (mem.endsWith(u8, actual_line, expect_line[":?:?: ".len..])) {
+ try expected_generated.appendSlice(actual_line);
+ try expected_generated.append('\n');
+ continue;
+ }
+ }
+ try expected_generated.appendSlice(expect_line);
+ try expected_generated.append('\n');
+ }
+
+ if (mem.eql(u8, expected_generated.items, actual_stderr)) return;
+
+ // TODO merge this with the testing.expectEqualStrings logic, and also CheckFile
+ return self.step.fail(
+ \\
+ \\========= expected: =====================
+ \\{s}
+ \\========= but found: ====================
+ \\{s}
+ \\=========================================
+ , .{ expected_generated.items, actual_stderr });
+}
diff --git a/lib/std/Build/ConfigHeaderStep.zig b/lib/std/Build/ConfigHeaderStep.zig
index 595c1018f7..c1849b410e 100644
--- a/lib/std/Build/ConfigHeaderStep.zig
+++ b/lib/std/Build/ConfigHeaderStep.zig
@@ -1,9 +1,3 @@
-const std = @import("../std.zig");
-const ConfigHeaderStep = @This();
-const Step = std.Build.Step;
-
-pub const base_id: Step.Id = .config_header;
-
pub const Style = union(enum) {
/// The configure format supported by autotools. It uses `#undef foo` to
/// mark lines that can be substituted with different values.
@@ -34,7 +28,6 @@ pub const Value = union(enum) {
};
step: Step,
-builder: *std.Build,
values: std.StringArrayHashMap(Value),
output_file: std.Build.GeneratedFile,
@@ -42,43 +35,57 @@ style: Style,
max_bytes: usize,
include_path: []const u8,
+pub const base_id: Step.Id = .config_header;
+
pub const Options = struct {
style: Style = .blank,
max_bytes: usize = 2 * 1024 * 1024,
include_path: ?[]const u8 = null,
+ first_ret_addr: ?usize = null,
};
-pub fn create(builder: *std.Build, options: Options) *ConfigHeaderStep {
- const self = builder.allocator.create(ConfigHeaderStep) catch @panic("OOM");
- const name = if (options.style.getFileSource()) |s|
- builder.fmt("configure {s} header {s}", .{ @tagName(options.style), s.getDisplayName() })
- else
- builder.fmt("configure {s} header", .{@tagName(options.style)});
- self.* = .{
- .builder = builder,
- .step = Step.init(base_id, name, builder.allocator, make),
- .style = options.style,
- .values = std.StringArrayHashMap(Value).init(builder.allocator),
+pub fn create(owner: *std.Build, options: Options) *ConfigHeaderStep {
+ const self = owner.allocator.create(ConfigHeaderStep) catch @panic("OOM");
- .max_bytes = options.max_bytes,
- .include_path = "config.h",
- .output_file = .{ .step = &self.step },
- };
+ var include_path: []const u8 = "config.h";
if (options.style.getFileSource()) |s| switch (s) {
.path => |p| {
const basename = std.fs.path.basename(p);
if (std.mem.endsWith(u8, basename, ".h.in")) {
- self.include_path = basename[0 .. basename.len - 3];
+ include_path = basename[0 .. basename.len - 3];
}
},
else => {},
};
- if (options.include_path) |include_path| {
- self.include_path = include_path;
+ if (options.include_path) |p| {
+ include_path = p;
}
+ const name = if (options.style.getFileSource()) |s|
+ owner.fmt("configure {s} header {s} to {s}", .{
+ @tagName(options.style), s.getDisplayName(), include_path,
+ })
+ else
+ owner.fmt("configure {s} header to {s}", .{ @tagName(options.style), include_path });
+
+ self.* = .{
+ .step = Step.init(.{
+ .id = base_id,
+ .name = name,
+ .owner = owner,
+ .makeFn = make,
+ .first_ret_addr = options.first_ret_addr orelse @returnAddress(),
+ }),
+ .style = options.style,
+ .values = std.StringArrayHashMap(Value).init(owner.allocator),
+
+ .max_bytes = options.max_bytes,
+ .include_path = include_path,
+ .output_file = .{ .step = &self.step },
+ };
+
return self;
}
@@ -146,26 +153,20 @@ fn putValue(self: *ConfigHeaderStep, field_name: []const u8, comptime T: type, v
}
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ _ = prog_node;
+ const b = step.owner;
const self = @fieldParentPtr(ConfigHeaderStep, "step", step);
- const gpa = self.builder.allocator;
+ const gpa = b.allocator;
+ const arena = b.allocator;
- // The cache is used here not really as a way to speed things up - because writing
- // the data to a file would probably be very fast - but as a way to find a canonical
- // location to put build artifacts.
+ var man = b.cache.obtain();
+ defer man.deinit();
- // If, for example, a hard-coded path was used as the location to put ConfigHeaderStep
- // files, then two ConfigHeaderStep executing in parallel might clobber each other.
-
- // TODO port the cache system from the compiler to zig std lib. Until then
- // we construct the path directly, and no "cache hit" detection happens;
- // the files are always written.
- // Note there is very similar code over in WriteFileStep
- const Hasher = std.crypto.auth.siphash.SipHash128(1, 3);
// Random bytes to make ConfigHeaderStep unique. Refresh this with new
// random bytes when ConfigHeaderStep implementation is modified in a
// non-backwards-compatible way.
- var hash = Hasher.init("PGuDTpidxyMqnkGM");
+ man.hash.add(@as(u32, 0xdef08d23));
var output = std.ArrayList(u8).init(gpa);
defer output.deinit();
@@ -177,15 +178,15 @@ fn make(step: *Step) !void {
switch (self.style) {
.autoconf => |file_source| {
try output.appendSlice(c_generated_line);
- const src_path = file_source.getPath(self.builder);
- const contents = try std.fs.cwd().readFileAlloc(gpa, src_path, self.max_bytes);
- try render_autoconf(contents, &output, self.values, src_path);
+ const src_path = file_source.getPath(b);
+ const contents = try std.fs.cwd().readFileAlloc(arena, src_path, self.max_bytes);
+ try render_autoconf(step, contents, &output, self.values, src_path);
},
.cmake => |file_source| {
try output.appendSlice(c_generated_line);
- const src_path = file_source.getPath(self.builder);
- const contents = try std.fs.cwd().readFileAlloc(gpa, src_path, self.max_bytes);
- try render_cmake(contents, &output, self.values, src_path);
+ const src_path = file_source.getPath(b);
+ const contents = try std.fs.cwd().readFileAlloc(arena, src_path, self.max_bytes);
+ try render_cmake(step, contents, &output, self.values, src_path);
},
.blank => {
try output.appendSlice(c_generated_line);
@@ -197,43 +198,44 @@ fn make(step: *Step) !void {
},
}
- hash.update(output.items);
+ man.hash.addBytes(output.items);
- var digest: [16]u8 = undefined;
- hash.final(&digest);
- var hash_basename: [digest.len * 2]u8 = undefined;
- _ = std.fmt.bufPrint(
- &hash_basename,
- "{s}",
- .{std.fmt.fmtSliceHexLower(&digest)},
- ) catch unreachable;
+ if (try step.cacheHit(&man)) {
+ const digest = man.final();
+ self.output_file.path = try b.cache_root.join(arena, &.{
+ "o", &digest, self.include_path,
+ });
+ return;
+ }
- const output_dir = try self.builder.cache_root.join(gpa, &.{ "o", &hash_basename });
+ const digest = man.final();
// If output_path has directory parts, deal with them. Example:
// output_dir is zig-cache/o/HASH
// output_path is libavutil/avconfig.h
// We want to open directory zig-cache/o/HASH/libavutil/
// but keep output_dir as zig-cache/o/HASH for -I include
- const sub_dir_path = if (std.fs.path.dirname(self.include_path)) |d|
- try std.fs.path.join(gpa, &.{ output_dir, d })
- else
- output_dir;
+ const sub_path = try std.fs.path.join(arena, &.{ "o", &digest, self.include_path });
+ const sub_path_dirname = std.fs.path.dirname(sub_path).?;
- var dir = std.fs.cwd().makeOpenPath(sub_dir_path, .{}) catch |err| {
- std.debug.print("unable to make path {s}: {s}\n", .{ output_dir, @errorName(err) });
- return err;
+ b.cache_root.handle.makePath(sub_path_dirname) catch |err| {
+ return step.fail("unable to make path '{}{s}': {s}", .{
+ b.cache_root, sub_path_dirname, @errorName(err),
+ });
};
- defer dir.close();
- try dir.writeFile(std.fs.path.basename(self.include_path), output.items);
+ b.cache_root.handle.writeFile(sub_path, output.items) catch |err| {
+ return step.fail("unable to write file '{}{s}': {s}", .{
+ b.cache_root, sub_path, @errorName(err),
+ });
+ };
- self.output_file.path = try std.fs.path.join(self.builder.allocator, &.{
- output_dir, self.include_path,
- });
+ self.output_file.path = try b.cache_root.join(arena, &.{sub_path});
+ try man.writeManifest();
}
fn render_autoconf(
+ step: *Step,
contents: []const u8,
output: *std.ArrayList(u8),
values: std.StringArrayHashMap(Value),
@@ -260,7 +262,7 @@ fn render_autoconf(
}
const name = it.rest();
const kv = values_copy.fetchSwapRemove(name) orelse {
- std.debug.print("{s}:{d}: error: unspecified config header value: '{s}'\n", .{
+ try step.addError("{s}:{d}: error: unspecified config header value: '{s}'", .{
src_path, line_index + 1, name,
});
any_errors = true;
@@ -270,15 +272,17 @@ fn render_autoconf(
}
for (values_copy.keys()) |name| {
- std.debug.print("{s}: error: config header value unused: '{s}'\n", .{ src_path, name });
+ try step.addError("{s}: error: config header value unused: '{s}'", .{ src_path, name });
+ any_errors = true;
}
if (any_errors) {
- return error.HeaderConfigFailed;
+ return error.MakeFailed;
}
}
fn render_cmake(
+ step: *Step,
contents: []const u8,
output: *std.ArrayList(u8),
values: std.StringArrayHashMap(Value),
@@ -304,14 +308,14 @@ fn render_cmake(
continue;
}
const name = it.next() orelse {
- std.debug.print("{s}:{d}: error: missing define name\n", .{
+ try step.addError("{s}:{d}: error: missing define name", .{
src_path, line_index + 1,
});
any_errors = true;
continue;
};
const kv = values_copy.fetchSwapRemove(name) orelse {
- std.debug.print("{s}:{d}: error: unspecified config header value: '{s}'\n", .{
+ try step.addError("{s}:{d}: error: unspecified config header value: '{s}'", .{
src_path, line_index + 1, name,
});
any_errors = true;
@@ -321,7 +325,8 @@ fn render_cmake(
}
for (values_copy.keys()) |name| {
- std.debug.print("{s}: error: config header value unused: '{s}'\n", .{ src_path, name });
+ try step.addError("{s}: error: config header value unused: '{s}'", .{ src_path, name });
+ any_errors = true;
}
if (any_errors) {
@@ -426,3 +431,7 @@ fn renderValueNasm(output: *std.ArrayList(u8), name: []const u8, value: Value) !
},
}
}
+
+const std = @import("../std.zig");
+const ConfigHeaderStep = @This();
+const Step = std.Build.Step;
diff --git a/lib/std/Build/EmulatableRunStep.zig b/lib/std/Build/EmulatableRunStep.zig
deleted file mode 100644
index d4b5238524..0000000000
--- a/lib/std/Build/EmulatableRunStep.zig
+++ /dev/null
@@ -1,213 +0,0 @@
-//! Unlike `RunStep` this step will provide emulation, when enabled, to run foreign binaries.
-//! When a binary is foreign, but emulation for the target is disabled, the specified binary
-//! will not be run and therefore also not validated against its output.
-//! This step can be useful when wishing to run a built binary on multiple platforms,
-//! without having to verify if it's possible to be ran against.
-
-const std = @import("../std.zig");
-const Step = std.Build.Step;
-const CompileStep = std.Build.CompileStep;
-const RunStep = std.Build.RunStep;
-
-const fs = std.fs;
-const process = std.process;
-const EnvMap = process.EnvMap;
-
-const EmulatableRunStep = @This();
-
-pub const base_id = .emulatable_run;
-
-const max_stdout_size = 1 * 1024 * 1024; // 1 MiB
-
-step: Step,
-builder: *std.Build,
-
-/// The artifact (executable) to be run by this step
-exe: *CompileStep,
-
-/// Set this to `null` to ignore the exit code for the purpose of determining a successful execution
-expected_term: ?std.ChildProcess.Term = .{ .Exited = 0 },
-
-/// Override this field to modify the environment
-env_map: ?*EnvMap,
-
-/// Set this to modify the current working directory
-cwd: ?[]const u8,
-
-stdout_action: RunStep.StdIoAction = .inherit,
-stderr_action: RunStep.StdIoAction = .inherit,
-
-/// When set to true, hides the warning of skipping a foreign binary which cannot be run on the host
-/// or through emulation.
-hide_foreign_binaries_warning: bool,
-
-/// Creates a step that will execute the given artifact. This step will allow running the
-/// binary through emulation when any of the emulation options such as `enable_rosetta` are set to true.
-/// When set to false, and the binary is foreign, running the executable is skipped.
-/// Asserts given artifact is an executable.
-pub fn create(builder: *std.Build, name: []const u8, artifact: *CompileStep) *EmulatableRunStep {
- std.debug.assert(artifact.kind == .exe or artifact.kind == .test_exe);
- const self = builder.allocator.create(EmulatableRunStep) catch @panic("OOM");
-
- const option_name = "hide-foreign-warnings";
- const hide_warnings = if (builder.available_options_map.get(option_name) == null) warn: {
- break :warn builder.option(bool, option_name, "Hide the warning when a foreign binary which is incompatible is skipped") orelse false;
- } else false;
-
- self.* = .{
- .builder = builder,
- .step = Step.init(.emulatable_run, name, builder.allocator, make),
- .exe = artifact,
- .env_map = null,
- .cwd = null,
- .hide_foreign_binaries_warning = hide_warnings,
- };
- self.step.dependOn(&artifact.step);
-
- return self;
-}
-
-fn make(step: *Step) !void {
- const self = @fieldParentPtr(EmulatableRunStep, "step", step);
- const host_info = self.builder.host;
-
- var argv_list = std.ArrayList([]const u8).init(self.builder.allocator);
- defer argv_list.deinit();
-
- const need_cross_glibc = self.exe.target.isGnuLibC() and self.exe.is_linking_libc;
- switch (host_info.getExternalExecutor(self.exe.target_info, .{
- .qemu_fixes_dl = need_cross_glibc and self.builder.glibc_runtimes_dir != null,
- .link_libc = self.exe.is_linking_libc,
- })) {
- .native => {},
- .rosetta => if (!self.builder.enable_rosetta) return warnAboutForeignBinaries(self),
- .wine => |bin_name| if (self.builder.enable_wine) {
- try argv_list.append(bin_name);
- } else return,
- .qemu => |bin_name| if (self.builder.enable_qemu) {
- const glibc_dir_arg = if (need_cross_glibc)
- self.builder.glibc_runtimes_dir orelse return
- else
- null;
- try argv_list.append(bin_name);
- if (glibc_dir_arg) |dir| {
- // TODO look into making this a call to `linuxTriple`. This
- // needs the directory to be called "i686" rather than
- // "x86" which is why we do it manually here.
- const fmt_str = "{s}" ++ fs.path.sep_str ++ "{s}-{s}-{s}";
- const cpu_arch = self.exe.target.getCpuArch();
- const os_tag = self.exe.target.getOsTag();
- const abi = self.exe.target.getAbi();
- const cpu_arch_name: []const u8 = if (cpu_arch == .x86)
- "i686"
- else
- @tagName(cpu_arch);
- const full_dir = try std.fmt.allocPrint(self.builder.allocator, fmt_str, .{
- dir, cpu_arch_name, @tagName(os_tag), @tagName(abi),
- });
-
- try argv_list.append("-L");
- try argv_list.append(full_dir);
- }
- } else return warnAboutForeignBinaries(self),
- .darling => |bin_name| if (self.builder.enable_darling) {
- try argv_list.append(bin_name);
- } else return warnAboutForeignBinaries(self),
- .wasmtime => |bin_name| if (self.builder.enable_wasmtime) {
- try argv_list.append(bin_name);
- try argv_list.append("--dir=.");
- } else return warnAboutForeignBinaries(self),
- else => return warnAboutForeignBinaries(self),
- }
-
- if (self.exe.target.isWindows()) {
- // On Windows we don't have rpaths so we have to add .dll search paths to PATH
- RunStep.addPathForDynLibsInternal(&self.step, self.builder, self.exe);
- }
-
- const executable_path = self.exe.installed_path orelse self.exe.getOutputSource().getPath(self.builder);
- try argv_list.append(executable_path);
-
- try RunStep.runCommand(
- argv_list.items,
- self.builder,
- self.expected_term,
- self.stdout_action,
- self.stderr_action,
- .Inherit,
- self.env_map,
- self.cwd,
- false,
- );
-}
-
-pub fn expectStdErrEqual(self: *EmulatableRunStep, bytes: []const u8) void {
- self.stderr_action = .{ .expect_exact = self.builder.dupe(bytes) };
-}
-
-pub fn expectStdOutEqual(self: *EmulatableRunStep, bytes: []const u8) void {
- self.stdout_action = .{ .expect_exact = self.builder.dupe(bytes) };
-}
-
-fn warnAboutForeignBinaries(step: *EmulatableRunStep) void {
- if (step.hide_foreign_binaries_warning) return;
- const builder = step.builder;
- const artifact = step.exe;
-
- const host_name = builder.host.target.zigTriple(builder.allocator) catch @panic("unhandled error");
- const foreign_name = artifact.target.zigTriple(builder.allocator) catch @panic("unhandled error");
- const target_info = std.zig.system.NativeTargetInfo.detect(artifact.target) catch @panic("unhandled error");
- const need_cross_glibc = artifact.target.isGnuLibC() and artifact.is_linking_libc;
- switch (builder.host.getExternalExecutor(target_info, .{
- .qemu_fixes_dl = need_cross_glibc and builder.glibc_runtimes_dir != null,
- .link_libc = artifact.is_linking_libc,
- })) {
- .native => unreachable,
- .bad_dl => |foreign_dl| {
- const host_dl = builder.host.dynamic_linker.get() orelse "(none)";
- std.debug.print("the host system does not appear to be capable of executing binaries from the target because the host dynamic linker is '{s}', while the target dynamic linker is '{s}'. Consider setting the dynamic linker as '{s}'.\n", .{
- host_dl, foreign_dl, host_dl,
- });
- },
- .bad_os_or_cpu => {
- std.debug.print("the host system ({s}) does not appear to be capable of executing binaries from the target ({s}).\n", .{
- host_name, foreign_name,
- });
- },
- .darling => if (!builder.enable_darling) {
- std.debug.print(
- "the host system ({s}) does not appear to be capable of executing binaries " ++
- "from the target ({s}). Consider enabling darling.\n",
- .{ host_name, foreign_name },
- );
- },
- .rosetta => if (!builder.enable_rosetta) {
- std.debug.print(
- "the host system ({s}) does not appear to be capable of executing binaries " ++
- "from the target ({s}). Consider enabling rosetta.\n",
- .{ host_name, foreign_name },
- );
- },
- .wine => if (!builder.enable_wine) {
- std.debug.print(
- "the host system ({s}) does not appear to be capable of executing binaries " ++
- "from the target ({s}). Consider enabling wine.\n",
- .{ host_name, foreign_name },
- );
- },
- .qemu => if (!builder.enable_qemu) {
- std.debug.print(
- "the host system ({s}) does not appear to be capable of executing binaries " ++
- "from the target ({s}). Consider enabling qemu.\n",
- .{ host_name, foreign_name },
- );
- },
- .wasmtime => {
- std.debug.print(
- "the host system ({s}) does not appear to be capable of executing binaries " ++
- "from the target ({s}). Consider enabling wasmtime.\n",
- .{ host_name, foreign_name },
- );
- },
- }
-}
diff --git a/lib/std/Build/FmtStep.zig b/lib/std/Build/FmtStep.zig
index 4a5efde2bd..2a82342336 100644
--- a/lib/std/Build/FmtStep.zig
+++ b/lib/std/Build/FmtStep.zig
@@ -1,32 +1,73 @@
-const std = @import("../std.zig");
-const Step = std.Build.Step;
-const FmtStep = @This();
+//! This step has two modes:
+//! * Modify mode: directly modify source files, formatting them in place.
+//! * Check mode: fail the step if a non-conforming file is found.
+
+step: Step,
+paths: []const []const u8,
+exclude_paths: []const []const u8,
+check: bool,
pub const base_id = .fmt;
-step: Step,
-builder: *std.Build,
-argv: [][]const u8,
+pub const Options = struct {
+ paths: []const []const u8 = &.{},
+ exclude_paths: []const []const u8 = &.{},
+ /// If true, fails the build step when any non-conforming files are encountered.
+ check: bool = false,
+};
-pub fn create(builder: *std.Build, paths: []const []const u8) *FmtStep {
- const self = builder.allocator.create(FmtStep) catch @panic("OOM");
- const name = "zig fmt";
- self.* = FmtStep{
- .step = Step.init(.fmt, name, builder.allocator, make),
- .builder = builder,
- .argv = builder.allocator.alloc([]u8, paths.len + 2) catch @panic("OOM"),
+pub fn create(owner: *std.Build, options: Options) *FmtStep {
+ const self = owner.allocator.create(FmtStep) catch @panic("OOM");
+ const name = if (options.check) "zig fmt --check" else "zig fmt";
+ self.* = .{
+ .step = Step.init(.{
+ .id = base_id,
+ .name = name,
+ .owner = owner,
+ .makeFn = make,
+ }),
+ .paths = options.paths,
+ .exclude_paths = options.exclude_paths,
+ .check = options.check,
};
-
- self.argv[0] = builder.zig_exe;
- self.argv[1] = "fmt";
- for (paths, 0..) |path, i| {
- self.argv[2 + i] = builder.pathFromRoot(path);
- }
return self;
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ // zig fmt is fast enough that no progress is needed.
+ _ = prog_node;
+
+ // TODO: if check=false, this means we are modifying source files in place, which
+ // is an operation that could race against other operations also modifying source files
+ // in place. In this case, this step should obtain a write lock while making those
+ // modifications.
+
+ const b = step.owner;
+ const arena = b.allocator;
const self = @fieldParentPtr(FmtStep, "step", step);
- return self.builder.spawnChild(self.argv);
+ var argv: std.ArrayListUnmanaged([]const u8) = .{};
+ try argv.ensureUnusedCapacity(arena, 2 + 1 + self.paths.len + 2 * self.exclude_paths.len);
+
+ argv.appendAssumeCapacity(b.zig_exe);
+ argv.appendAssumeCapacity("fmt");
+
+ if (self.check) {
+ argv.appendAssumeCapacity("--check");
+ }
+
+ for (self.paths) |p| {
+ argv.appendAssumeCapacity(b.pathFromRoot(p));
+ }
+
+ for (self.exclude_paths) |p| {
+ argv.appendAssumeCapacity("--exclude");
+ argv.appendAssumeCapacity(b.pathFromRoot(p));
+ }
+
+ return step.evalChildProcess(argv.items);
}
+
+const std = @import("../std.zig");
+const Step = std.Build.Step;
+const FmtStep = @This();
diff --git a/lib/std/Build/InstallArtifactStep.zig b/lib/std/Build/InstallArtifactStep.zig
index c419c85fdf..445f1e8ea8 100644
--- a/lib/std/Build/InstallArtifactStep.zig
+++ b/lib/std/Build/InstallArtifactStep.zig
@@ -3,83 +3,133 @@ const Step = std.Build.Step;
const CompileStep = std.Build.CompileStep;
const InstallDir = std.Build.InstallDir;
const InstallArtifactStep = @This();
+const fs = std.fs;
pub const base_id = .install_artifact;
step: Step,
-builder: *std.Build,
+dest_builder: *std.Build,
artifact: *CompileStep,
dest_dir: InstallDir,
pdb_dir: ?InstallDir,
h_dir: ?InstallDir,
+/// If non-null, adds additional path components relative to dest_dir, and
+/// overrides the basename of the CompileStep.
+dest_sub_path: ?[]const u8,
-pub fn create(builder: *std.Build, artifact: *CompileStep) *InstallArtifactStep {
+pub fn create(owner: *std.Build, artifact: *CompileStep) *InstallArtifactStep {
if (artifact.install_step) |s| return s;
- const self = builder.allocator.create(InstallArtifactStep) catch @panic("OOM");
+ const self = owner.allocator.create(InstallArtifactStep) catch @panic("OOM");
self.* = InstallArtifactStep{
- .builder = builder,
- .step = Step.init(.install_artifact, builder.fmt("install {s}", .{artifact.step.name}), builder.allocator, make),
+ .step = Step.init(.{
+ .id = base_id,
+ .name = owner.fmt("install {s}", .{artifact.name}),
+ .owner = owner,
+ .makeFn = make,
+ }),
+ .dest_builder = owner,
.artifact = artifact,
.dest_dir = artifact.override_dest_dir orelse switch (artifact.kind) {
.obj => @panic("Cannot install a .obj build artifact."),
- .@"test" => @panic("Cannot install a .test build artifact, use .test_exe instead."),
- .exe, .test_exe => InstallDir{ .bin = {} },
+ .exe, .@"test" => InstallDir{ .bin = {} },
.lib => InstallDir{ .lib = {} },
},
.pdb_dir = if (artifact.producesPdbFile()) blk: {
- if (artifact.kind == .exe or artifact.kind == .test_exe) {
+ if (artifact.kind == .exe or artifact.kind == .@"test") {
break :blk InstallDir{ .bin = {} };
} else {
break :blk InstallDir{ .lib = {} };
}
} else null,
.h_dir = if (artifact.kind == .lib and artifact.emit_h) .header else null,
+ .dest_sub_path = null,
};
self.step.dependOn(&artifact.step);
artifact.install_step = self;
- builder.pushInstalledFile(self.dest_dir, artifact.out_filename);
+ owner.pushInstalledFile(self.dest_dir, artifact.out_filename);
if (self.artifact.isDynamicLibrary()) {
if (artifact.major_only_filename) |name| {
- builder.pushInstalledFile(.lib, name);
+ owner.pushInstalledFile(.lib, name);
}
if (artifact.name_only_filename) |name| {
- builder.pushInstalledFile(.lib, name);
+ owner.pushInstalledFile(.lib, name);
}
if (self.artifact.target.isWindows()) {
- builder.pushInstalledFile(.lib, artifact.out_lib_filename);
+ owner.pushInstalledFile(.lib, artifact.out_lib_filename);
}
}
if (self.pdb_dir) |pdb_dir| {
- builder.pushInstalledFile(pdb_dir, artifact.out_pdb_filename);
+ owner.pushInstalledFile(pdb_dir, artifact.out_pdb_filename);
}
if (self.h_dir) |h_dir| {
- builder.pushInstalledFile(h_dir, artifact.out_h_filename);
+ owner.pushInstalledFile(h_dir, artifact.out_h_filename);
}
return self;
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ _ = prog_node;
+ const src_builder = step.owner;
const self = @fieldParentPtr(InstallArtifactStep, "step", step);
- const builder = self.builder;
+ const dest_builder = self.dest_builder;
- const full_dest_path = builder.getInstallPath(self.dest_dir, self.artifact.out_filename);
- try builder.updateFile(self.artifact.getOutputSource().getPath(builder), full_dest_path);
- if (self.artifact.isDynamicLibrary() and self.artifact.version != null and self.artifact.target.wantSharedLibSymLinks()) {
- try CompileStep.doAtomicSymLinks(builder.allocator, full_dest_path, self.artifact.major_only_filename.?, self.artifact.name_only_filename.?);
+ const dest_sub_path = if (self.dest_sub_path) |sub_path| sub_path else self.artifact.out_filename;
+ const full_dest_path = dest_builder.getInstallPath(self.dest_dir, dest_sub_path);
+ const cwd = fs.cwd();
+
+ var all_cached = true;
+
+ {
+ const full_src_path = self.artifact.getOutputSource().getPath(src_builder);
+ const p = fs.Dir.updateFile(cwd, full_src_path, cwd, full_dest_path, .{}) catch |err| {
+ return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
+ full_src_path, full_dest_path, @errorName(err),
+ });
+ };
+ all_cached = all_cached and p == .fresh;
}
- if (self.artifact.isDynamicLibrary() and self.artifact.target.isWindows() and self.artifact.emit_implib != .no_emit) {
- const full_implib_path = builder.getInstallPath(self.dest_dir, self.artifact.out_lib_filename);
- try builder.updateFile(self.artifact.getOutputLibSource().getPath(builder), full_implib_path);
+
+ if (self.artifact.isDynamicLibrary() and
+ self.artifact.version != null and
+ self.artifact.target.wantSharedLibSymLinks())
+ {
+ try CompileStep.doAtomicSymLinks(step, full_dest_path, self.artifact.major_only_filename.?, self.artifact.name_only_filename.?);
+ }
+ if (self.artifact.isDynamicLibrary() and
+ self.artifact.target.isWindows() and
+ self.artifact.emit_implib != .no_emit)
+ {
+ const full_src_path = self.artifact.getOutputLibSource().getPath(src_builder);
+ const full_implib_path = dest_builder.getInstallPath(self.dest_dir, self.artifact.out_lib_filename);
+ const p = fs.Dir.updateFile(cwd, full_src_path, cwd, full_implib_path, .{}) catch |err| {
+ return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
+ full_src_path, full_implib_path, @errorName(err),
+ });
+ };
+ all_cached = all_cached and p == .fresh;
}
if (self.pdb_dir) |pdb_dir| {
- const full_pdb_path = builder.getInstallPath(pdb_dir, self.artifact.out_pdb_filename);
- try builder.updateFile(self.artifact.getOutputPdbSource().getPath(builder), full_pdb_path);
+ const full_src_path = self.artifact.getOutputPdbSource().getPath(src_builder);
+ const full_pdb_path = dest_builder.getInstallPath(pdb_dir, self.artifact.out_pdb_filename);
+ const p = fs.Dir.updateFile(cwd, full_src_path, cwd, full_pdb_path, .{}) catch |err| {
+ return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
+ full_src_path, full_pdb_path, @errorName(err),
+ });
+ };
+ all_cached = all_cached and p == .fresh;
}
if (self.h_dir) |h_dir| {
- const full_h_path = builder.getInstallPath(h_dir, self.artifact.out_h_filename);
- try builder.updateFile(self.artifact.getOutputHSource().getPath(builder), full_h_path);
+ const full_src_path = self.artifact.getOutputHSource().getPath(src_builder);
+ const full_h_path = dest_builder.getInstallPath(h_dir, self.artifact.out_h_filename);
+ const p = fs.Dir.updateFile(cwd, full_src_path, cwd, full_h_path, .{}) catch |err| {
+ return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
+ full_src_path, full_h_path, @errorName(err),
+ });
+ };
+ all_cached = all_cached and p == .fresh;
}
self.artifact.installed_path = full_dest_path;
+ step.result_cached = all_cached;
}
diff --git a/lib/std/Build/InstallDirStep.zig b/lib/std/Build/InstallDirStep.zig
index 41dbb3e35a..d9ea248913 100644
--- a/lib/std/Build/InstallDirStep.zig
+++ b/lib/std/Build/InstallDirStep.zig
@@ -4,14 +4,12 @@ const fs = std.fs;
const Step = std.Build.Step;
const InstallDir = std.Build.InstallDir;
const InstallDirStep = @This();
-const log = std.log;
step: Step,
-builder: *std.Build,
options: Options,
/// This is used by the build system when a file being installed comes from one
/// package but is being installed by another.
-override_source_builder: ?*std.Build = null,
+dest_builder: *std.Build,
pub const base_id = .install_dir;
@@ -40,31 +38,35 @@ pub const Options = struct {
}
};
-pub fn init(
- builder: *std.Build,
- options: Options,
-) InstallDirStep {
- builder.pushInstalledFile(options.install_dir, options.install_subdir);
- return InstallDirStep{
- .builder = builder,
- .step = Step.init(.install_dir, builder.fmt("install {s}/", .{options.source_dir}), builder.allocator, make),
- .options = options.dupe(builder),
+pub fn init(owner: *std.Build, options: Options) InstallDirStep {
+ owner.pushInstalledFile(options.install_dir, options.install_subdir);
+ return .{
+ .step = Step.init(.{
+ .id = .install_dir,
+ .name = owner.fmt("install {s}/", .{options.source_dir}),
+ .owner = owner,
+ .makeFn = make,
+ }),
+ .options = options.dupe(owner),
+ .dest_builder = owner,
};
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ _ = prog_node;
const self = @fieldParentPtr(InstallDirStep, "step", step);
- const dest_prefix = self.builder.getInstallPath(self.options.install_dir, self.options.install_subdir);
- const src_builder = self.override_source_builder orelse self.builder;
- const full_src_dir = src_builder.pathFromRoot(self.options.source_dir);
- var src_dir = std.fs.cwd().openIterableDir(full_src_dir, .{}) catch |err| {
- log.err("InstallDirStep: unable to open source directory '{s}': {s}", .{
- full_src_dir, @errorName(err),
+ const dest_builder = self.dest_builder;
+ const arena = dest_builder.allocator;
+ const dest_prefix = dest_builder.getInstallPath(self.options.install_dir, self.options.install_subdir);
+ const src_builder = self.step.owner;
+ var src_dir = src_builder.build_root.handle.openIterableDir(self.options.source_dir, .{}) catch |err| {
+ return step.fail("unable to open source directory '{}{s}': {s}", .{
+ src_builder.build_root, self.options.source_dir, @errorName(err),
});
- return error.StepFailed;
};
defer src_dir.close();
- var it = try src_dir.walk(self.builder.allocator);
+ var it = try src_dir.walk(arena);
+ var all_cached = true;
next_entry: while (try it.next()) |entry| {
for (self.options.exclude_extensions) |ext| {
if (mem.endsWith(u8, entry.path, ext)) {
@@ -72,22 +74,37 @@ fn make(step: *Step) !void {
}
}
- const full_path = self.builder.pathJoin(&.{ full_src_dir, entry.path });
- const dest_path = self.builder.pathJoin(&.{ dest_prefix, entry.path });
+ // relative to src build root
+ const src_sub_path = try fs.path.join(arena, &.{ self.options.source_dir, entry.path });
+ const dest_path = try fs.path.join(arena, &.{ dest_prefix, entry.path });
+ const cwd = fs.cwd();
switch (entry.kind) {
- .Directory => try fs.cwd().makePath(dest_path),
+ .Directory => try cwd.makePath(dest_path),
.File => {
for (self.options.blank_extensions) |ext| {
if (mem.endsWith(u8, entry.path, ext)) {
- try self.builder.truncateFile(dest_path);
+ try dest_builder.truncateFile(dest_path);
continue :next_entry;
}
}
- try self.builder.updateFile(full_path, dest_path);
+ const prev_status = fs.Dir.updateFile(
+ src_builder.build_root.handle,
+ src_sub_path,
+ cwd,
+ dest_path,
+ .{},
+ ) catch |err| {
+ return step.fail("unable to update file from '{}{s}' to '{s}': {s}", .{
+ src_builder.build_root, src_sub_path, dest_path, @errorName(err),
+ });
+ };
+ all_cached = all_cached and prev_status == .fresh;
},
else => continue,
}
}
+
+ step.result_cached = all_cached;
}
diff --git a/lib/std/Build/InstallFileStep.zig b/lib/std/Build/InstallFileStep.zig
index 8c8d8ad2d4..011ad48208 100644
--- a/lib/std/Build/InstallFileStep.zig
+++ b/lib/std/Build/InstallFileStep.zig
@@ -3,38 +3,55 @@ const Step = std.Build.Step;
const FileSource = std.Build.FileSource;
const InstallDir = std.Build.InstallDir;
const InstallFileStep = @This();
+const assert = std.debug.assert;
pub const base_id = .install_file;
step: Step,
-builder: *std.Build,
source: FileSource,
dir: InstallDir,
dest_rel_path: []const u8,
/// This is used by the build system when a file being installed comes from one
/// package but is being installed by another.
-override_source_builder: ?*std.Build = null,
+dest_builder: *std.Build,
-pub fn init(
- builder: *std.Build,
+pub fn create(
+ owner: *std.Build,
source: FileSource,
dir: InstallDir,
dest_rel_path: []const u8,
-) InstallFileStep {
- builder.pushInstalledFile(dir, dest_rel_path);
- return InstallFileStep{
- .builder = builder,
- .step = Step.init(.install_file, builder.fmt("install {s} to {s}", .{ source.getDisplayName(), dest_rel_path }), builder.allocator, make),
- .source = source.dupe(builder),
- .dir = dir.dupe(builder),
- .dest_rel_path = builder.dupePath(dest_rel_path),
+) *InstallFileStep {
+ assert(dest_rel_path.len != 0);
+ owner.pushInstalledFile(dir, dest_rel_path);
+ const self = owner.allocator.create(InstallFileStep) catch @panic("OOM");
+ self.* = .{
+ .step = Step.init(.{
+ .id = base_id,
+ .name = owner.fmt("install {s} to {s}", .{ source.getDisplayName(), dest_rel_path }),
+ .owner = owner,
+ .makeFn = make,
+ }),
+ .source = source.dupe(owner),
+ .dir = dir.dupe(owner),
+ .dest_rel_path = owner.dupePath(dest_rel_path),
+ .dest_builder = owner,
};
+ source.addStepDependencies(&self.step);
+ return self;
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ _ = prog_node;
+ const src_builder = step.owner;
const self = @fieldParentPtr(InstallFileStep, "step", step);
- const src_builder = self.override_source_builder orelse self.builder;
- const full_src_path = self.source.getPath(src_builder);
- const full_dest_path = self.builder.getInstallPath(self.dir, self.dest_rel_path);
- try self.builder.updateFile(full_src_path, full_dest_path);
+ const dest_builder = self.dest_builder;
+ const full_src_path = self.source.getPath2(src_builder, step);
+ const full_dest_path = dest_builder.getInstallPath(self.dir, self.dest_rel_path);
+ const cwd = std.fs.cwd();
+ const prev = std.fs.Dir.updateFile(cwd, full_src_path, cwd, full_dest_path, .{}) catch |err| {
+ return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
+ full_src_path, full_dest_path, @errorName(err),
+ });
+ };
+ step.result_cached = prev == .fresh;
}
diff --git a/lib/std/Build/LogStep.zig b/lib/std/Build/LogStep.zig
deleted file mode 100644
index 6d51df8cbd..0000000000
--- a/lib/std/Build/LogStep.zig
+++ /dev/null
@@ -1,23 +0,0 @@
-const std = @import("../std.zig");
-const log = std.log;
-const Step = std.Build.Step;
-const LogStep = @This();
-
-pub const base_id = .log;
-
-step: Step,
-builder: *std.Build,
-data: []const u8,
-
-pub fn init(builder: *std.Build, data: []const u8) LogStep {
- return LogStep{
- .builder = builder,
- .step = Step.init(.log, builder.fmt("log {s}", .{data}), builder.allocator, make),
- .data = builder.dupe(data),
- };
-}
-
-fn make(step: *Step) anyerror!void {
- const self = @fieldParentPtr(LogStep, "step", step);
- log.info("{s}", .{self.data});
-}
diff --git a/lib/std/Build/ObjCopyStep.zig b/lib/std/Build/ObjCopyStep.zig
index aea5b8975c..608c56591f 100644
--- a/lib/std/Build/ObjCopyStep.zig
+++ b/lib/std/Build/ObjCopyStep.zig
@@ -21,7 +21,6 @@ pub const RawFormat = enum {
};
step: Step,
-builder: *std.Build,
file_source: std.Build.FileSource,
basename: []const u8,
output_file: std.Build.GeneratedFile,
@@ -38,19 +37,18 @@ pub const Options = struct {
};
pub fn create(
- builder: *std.Build,
+ owner: *std.Build,
file_source: std.Build.FileSource,
options: Options,
) *ObjCopyStep {
- const self = builder.allocator.create(ObjCopyStep) catch @panic("OOM");
+ const self = owner.allocator.create(ObjCopyStep) catch @panic("OOM");
self.* = ObjCopyStep{
- .step = Step.init(
- base_id,
- builder.fmt("objcopy {s}", .{file_source.getDisplayName()}),
- builder.allocator,
- make,
- ),
- .builder = builder,
+ .step = Step.init(.{
+ .id = base_id,
+ .name = owner.fmt("objcopy {s}", .{file_source.getDisplayName()}),
+ .owner = owner,
+ .makeFn = make,
+ }),
.file_source = file_source,
.basename = options.basename orelse file_source.getDisplayName(),
.output_file = std.Build.GeneratedFile{ .step = &self.step },
@@ -67,9 +65,9 @@ pub fn getOutputSource(self: *const ObjCopyStep) std.Build.FileSource {
return .{ .generated = &self.output_file };
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ const b = step.owner;
const self = @fieldParentPtr(ObjCopyStep, "step", step);
- const b = self.builder;
var man = b.cache.obtain();
defer man.deinit();
@@ -84,7 +82,7 @@ fn make(step: *Step) !void {
man.hash.addOptional(self.pad_to);
man.hash.addOptional(self.format);
- if (man.hit() catch |err| failWithCacheError(man, err)) {
+ if (try step.cacheHit(&man)) {
// Cache hit, skip subprocess execution.
const digest = man.final();
self.output_file.path = try b.cache_root.join(b.allocator, &.{
@@ -97,8 +95,7 @@ fn make(step: *Step) !void {
const full_dest_path = try b.cache_root.join(b.allocator, &.{ "o", &digest, self.basename });
const cache_path = "o" ++ fs.path.sep_str ++ digest;
b.cache_root.handle.makePath(cache_path) catch |err| {
- std.debug.print("unable to make path {s}: {s}\n", .{ cache_path, @errorName(err) });
- return err;
+ return step.fail("unable to make path {s}: {s}", .{ cache_path, @errorName(err) });
};
var argv = std.ArrayList([]const u8).init(b.allocator);
@@ -116,23 +113,10 @@ fn make(step: *Step) !void {
};
try argv.appendSlice(&.{ full_src_path, full_dest_path });
- _ = try self.builder.execFromStep(argv.items, &self.step);
+
+ try argv.append("--listen=-");
+ _ = try step.evalZigProcess(argv.items, prog_node);
self.output_file.path = full_dest_path;
try man.writeManifest();
}
-
-/// TODO consolidate this with the same function in RunStep?
-/// Also properly deal with concurrency (see open PR)
-fn failWithCacheError(man: std.Build.Cache.Manifest, err: anyerror) noreturn {
- const i = man.failed_file_index orelse failWithSimpleError(err);
- const pp = man.files.items[i].prefixed_path orelse failWithSimpleError(err);
- const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
- std.debug.print("{s}: {s}/{s}\n", .{ @errorName(err), prefix, pp.sub_path });
- std.process.exit(1);
-}
-
-fn failWithSimpleError(err: anyerror) noreturn {
- std.debug.print("{s}\n", .{@errorName(err)});
- std.process.exit(1);
-}
diff --git a/lib/std/Build/OptionsStep.zig b/lib/std/Build/OptionsStep.zig
index e5c3e23821..859d0b68c9 100644
--- a/lib/std/Build/OptionsStep.zig
+++ b/lib/std/Build/OptionsStep.zig
@@ -12,21 +12,24 @@ pub const base_id = .options;
step: Step,
generated_file: GeneratedFile,
-builder: *std.Build,
contents: std.ArrayList(u8),
artifact_args: std.ArrayList(OptionArtifactArg),
file_source_args: std.ArrayList(OptionFileSourceArg),
-pub fn create(builder: *std.Build) *OptionsStep {
- const self = builder.allocator.create(OptionsStep) catch @panic("OOM");
+pub fn create(owner: *std.Build) *OptionsStep {
+ const self = owner.allocator.create(OptionsStep) catch @panic("OOM");
self.* = .{
- .builder = builder,
- .step = Step.init(.options, "options", builder.allocator, make),
+ .step = Step.init(.{
+ .id = base_id,
+ .name = "options",
+ .owner = owner,
+ .makeFn = make,
+ }),
.generated_file = undefined,
- .contents = std.ArrayList(u8).init(builder.allocator),
- .artifact_args = std.ArrayList(OptionArtifactArg).init(builder.allocator),
- .file_source_args = std.ArrayList(OptionFileSourceArg).init(builder.allocator),
+ .contents = std.ArrayList(u8).init(owner.allocator),
+ .artifact_args = std.ArrayList(OptionArtifactArg).init(owner.allocator),
+ .file_source_args = std.ArrayList(OptionFileSourceArg).init(owner.allocator),
};
self.generated_file = .{ .step = &self.step };
@@ -192,7 +195,7 @@ pub fn addOptionFileSource(
) void {
self.file_source_args.append(.{
.name = name,
- .source = source.dupe(self.builder),
+ .source = source.dupe(self.step.owner),
}) catch @panic("OOM");
source.addStepDependencies(&self.step);
}
@@ -200,12 +203,12 @@ pub fn addOptionFileSource(
/// The value is the path in the cache dir.
/// Adds a dependency automatically.
pub fn addOptionArtifact(self: *OptionsStep, name: []const u8, artifact: *CompileStep) void {
- self.artifact_args.append(.{ .name = self.builder.dupe(name), .artifact = artifact }) catch @panic("OOM");
+ self.artifact_args.append(.{ .name = self.step.owner.dupe(name), .artifact = artifact }) catch @panic("OOM");
self.step.dependOn(&artifact.step);
}
pub fn createModule(self: *OptionsStep) *std.Build.Module {
- return self.builder.createModule(.{
+ return self.step.owner.createModule(.{
.source_file = self.getSource(),
.dependencies = &.{},
});
@@ -215,14 +218,18 @@ pub fn getSource(self: *OptionsStep) FileSource {
return .{ .generated = &self.generated_file };
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ // This step completes so quickly that no progress is necessary.
+ _ = prog_node;
+
+ const b = step.owner;
const self = @fieldParentPtr(OptionsStep, "step", step);
for (self.artifact_args.items) |item| {
self.addOption(
[]const u8,
item.name,
- self.builder.pathFromRoot(item.artifact.getOutputSource().getPath(self.builder)),
+ b.pathFromRoot(item.artifact.getOutputSource().getPath(b)),
);
}
@@ -230,20 +237,18 @@ fn make(step: *Step) !void {
self.addOption(
[]const u8,
item.name,
- item.source.getPath(self.builder),
+ item.source.getPath(b),
);
}
- var options_dir = try self.builder.cache_root.handle.makeOpenPath("options", .{});
+ var options_dir = try b.cache_root.handle.makeOpenPath("options", .{});
defer options_dir.close();
const basename = self.hashContentsToFileName();
try options_dir.writeFile(&basename, self.contents.items);
- self.generated_file.path = try self.builder.cache_root.join(self.builder.allocator, &.{
- "options", &basename,
- });
+ self.generated_file.path = try b.cache_root.join(b.allocator, &.{ "options", &basename });
}
fn hashContentsToFileName(self: *OptionsStep) [64]u8 {
diff --git a/lib/std/Build/RemoveDirStep.zig b/lib/std/Build/RemoveDirStep.zig
index f3b71dcec1..a5bf3c3256 100644
--- a/lib/std/Build/RemoveDirStep.zig
+++ b/lib/std/Build/RemoveDirStep.zig
@@ -1,5 +1,4 @@
const std = @import("../std.zig");
-const log = std.log;
const fs = std.fs;
const Step = std.Build.Step;
const RemoveDirStep = @This();
@@ -7,23 +6,37 @@ const RemoveDirStep = @This();
pub const base_id = .remove_dir;
step: Step,
-builder: *std.Build,
dir_path: []const u8,
-pub fn init(builder: *std.Build, dir_path: []const u8) RemoveDirStep {
+pub fn init(owner: *std.Build, dir_path: []const u8) RemoveDirStep {
return RemoveDirStep{
- .builder = builder,
- .step = Step.init(.remove_dir, builder.fmt("RemoveDir {s}", .{dir_path}), builder.allocator, make),
- .dir_path = builder.dupePath(dir_path),
+ .step = Step.init(.{
+ .id = .remove_dir,
+ .name = owner.fmt("RemoveDir {s}", .{dir_path}),
+ .owner = owner,
+ .makeFn = make,
+ }),
+ .dir_path = owner.dupePath(dir_path),
};
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ // TODO update progress node while walking file system.
+ // Should the standard library support this use case??
+ _ = prog_node;
+
+ const b = step.owner;
const self = @fieldParentPtr(RemoveDirStep, "step", step);
- const full_path = self.builder.pathFromRoot(self.dir_path);
- fs.cwd().deleteTree(full_path) catch |err| {
- log.err("Unable to remove {s}: {s}", .{ full_path, @errorName(err) });
- return err;
+ b.build_root.handle.deleteTree(self.dir_path) catch |err| {
+ if (b.build_root.path) |base| {
+ return step.fail("unable to recursively delete path '{s}/{s}': {s}", .{
+ base, self.dir_path, @errorName(err),
+ });
+ } else {
+ return step.fail("unable to recursively delete path '{s}': {s}", .{
+ self.dir_path, @errorName(err),
+ });
+ }
};
}
diff --git a/lib/std/Build/RunStep.zig b/lib/std/Build/RunStep.zig
index 1aae37d2f3..feeb64f6ca 100644
--- a/lib/std/Build/RunStep.zig
+++ b/lib/std/Build/RunStep.zig
@@ -10,76 +10,136 @@ const ArrayList = std.ArrayList;
const EnvMap = process.EnvMap;
const Allocator = mem.Allocator;
const ExecError = std.Build.ExecError;
-
-const max_stdout_size = 1 * 1024 * 1024; // 1 MiB
+const assert = std.debug.assert;
const RunStep = @This();
pub const base_id: Step.Id = .run;
step: Step,
-builder: *std.Build,
/// See also addArg and addArgs to modifying this directly
argv: ArrayList(Arg),
/// Set this to modify the current working directory
+/// TODO change this to a Build.Cache.Directory to better integrate with
+/// future child process cwd API.
cwd: ?[]const u8,
/// Override this field to modify the environment, or use setEnvironmentVariable
env_map: ?*EnvMap,
-stdout_action: StdIoAction = .inherit,
-stderr_action: StdIoAction = .inherit,
-
-stdin_behavior: std.ChildProcess.StdIo = .Inherit,
-
-/// Set this to `null` to ignore the exit code for the purpose of determining a successful execution
-expected_term: ?std.ChildProcess.Term = .{ .Exited = 0 },
-
-/// Print the command before running it
-print: bool,
-/// Controls whether execution is skipped if the output file is up-to-date.
-/// The default is to always run if there is no output file, and to skip
-/// running if all output files are up-to-date.
-condition: enum { output_outdated, always } = .output_outdated,
+/// Configures whether the RunStep is considered to have side-effects, and also
+/// whether the RunStep will inherit stdio streams, forwarding them to the
+/// parent process, in which case will require a global lock to prevent other
+/// steps from interfering with stdio while the subprocess associated with this
+/// RunStep is running.
+/// If the RunStep is determined to not have side-effects, then execution will
+/// be skipped if all output files are up-to-date and input files are
+/// unchanged.
+stdio: StdIo = .infer_from_args,
+/// This field must be `null` if stdio is `inherit`.
+stdin: ?[]const u8 = null,
/// Additional file paths relative to build.zig that, when modified, indicate
/// that the RunStep should be re-executed.
+/// If the RunStep is determined to have side-effects, this field is ignored
+/// and the RunStep is always executed when it appears in the build graph.
extra_file_dependencies: []const []const u8 = &.{},
-pub const StdIoAction = union(enum) {
+/// After adding an output argument, this step will by default rename itself
+/// for a better display name in the build summary.
+/// This can be disabled by setting this to false.
+rename_step_with_output_arg: bool = true,
+
+/// If this is true, a RunStep which is configured to check the output of the
+/// executed binary will not fail the build if the binary cannot be executed
+/// due to being for a foreign binary to the host system which is running the
+/// build graph.
+/// Command-line arguments such as -fqemu and -fwasmtime may affect whether a
+/// binary is detected as foreign, as well as system configuration such as
+/// Rosetta (macOS) and binfmt_misc (Linux).
+/// If this RunStep is considered to have side-effects, then this flag does
+/// nothing.
+skip_foreign_checks: bool = false,
+
+/// If stderr or stdout exceeds this amount, the child process is killed and
+/// the step fails.
+max_stdio_size: usize = 10 * 1024 * 1024,
+
+captured_stdout: ?*Output = null,
+captured_stderr: ?*Output = null,
+
+has_side_effects: bool = false,
+
+pub const StdIo = union(enum) {
+ /// Whether the RunStep has side-effects will be determined by whether or not one
+ /// of the args is an output file (added with `addOutputFileArg`).
+ /// If the RunStep is determined to have side-effects, this is the same as `inherit`.
+ /// The step will fail if the subprocess crashes or returns a non-zero exit code.
+ infer_from_args,
+ /// Causes the RunStep to be considered to have side-effects, and therefore
+ /// always execute when it appears in the build graph.
+ /// It also means that this step will obtain a global lock to prevent other
+ /// steps from running in the meantime.
+ /// The step will fail if the subprocess crashes or returns a non-zero exit code.
inherit,
- ignore,
- expect_exact: []const u8,
- expect_matches: []const []const u8,
+ /// Causes the RunStep to be considered to *not* have side-effects. The
+ /// process will be re-executed if any of the input dependencies are
+ /// modified. The exit code and standard I/O streams will be checked for
+ /// certain conditions, and the step will succeed or fail based on these
+ /// conditions.
+ /// Note that an explicit check for exit code 0 needs to be added to this
+ /// list if such a check is desireable.
+ check: std.ArrayList(Check),
+ /// This RunStep is running a zig unit test binary and will communicate
+ /// extra metadata over the IPC protocol.
+ zig_test,
+
+ pub const Check = union(enum) {
+ expect_stderr_exact: []const u8,
+ expect_stderr_match: []const u8,
+ expect_stdout_exact: []const u8,
+ expect_stdout_match: []const u8,
+ expect_term: std.process.Child.Term,
+ };
};
pub const Arg = union(enum) {
artifact: *CompileStep,
file_source: std.Build.FileSource,
+ directory_source: std.Build.FileSource,
bytes: []u8,
- output: Output,
-
- pub const Output = struct {
- generated_file: *std.Build.GeneratedFile,
- basename: []const u8,
- };
+ output: *Output,
};
-pub fn create(builder: *std.Build, name: []const u8) *RunStep {
- const self = builder.allocator.create(RunStep) catch @panic("OOM");
- self.* = RunStep{
- .builder = builder,
- .step = Step.init(base_id, name, builder.allocator, make),
- .argv = ArrayList(Arg).init(builder.allocator),
+pub const Output = struct {
+ generated_file: std.Build.GeneratedFile,
+ prefix: []const u8,
+ basename: []const u8,
+};
+
+pub fn create(owner: *std.Build, name: []const u8) *RunStep {
+ const self = owner.allocator.create(RunStep) catch @panic("OOM");
+ self.* = .{
+ .step = Step.init(.{
+ .id = base_id,
+ .name = name,
+ .owner = owner,
+ .makeFn = make,
+ }),
+ .argv = ArrayList(Arg).init(owner.allocator),
.cwd = null,
.env_map = null,
- .print = builder.verbose,
};
return self;
}
+pub fn setName(self: *RunStep, name: []const u8) void {
+ self.step.name = name;
+ self.rename_step_with_output_arg = false;
+}
+
pub fn addArtifactArg(self: *RunStep, artifact: *CompileStep) void {
self.argv.append(Arg{ .artifact = artifact }) catch @panic("OOM");
self.step.dependOn(&artifact.step);
@@ -89,25 +149,47 @@ pub fn addArtifactArg(self: *RunStep, artifact: *CompileStep) void {
/// run, and returns a FileSource which can be used as inputs to other APIs
/// throughout the build system.
pub fn addOutputFileArg(rs: *RunStep, basename: []const u8) std.Build.FileSource {
- const generated_file = rs.builder.allocator.create(std.Build.GeneratedFile) catch @panic("OOM");
- generated_file.* = .{ .step = &rs.step };
- rs.argv.append(.{ .output = .{
- .generated_file = generated_file,
- .basename = rs.builder.dupe(basename),
- } }) catch @panic("OOM");
+ return addPrefixedOutputFileArg(rs, "", basename);
+}
- return .{ .generated = generated_file };
+pub fn addPrefixedOutputFileArg(
+ rs: *RunStep,
+ prefix: []const u8,
+ basename: []const u8,
+) std.Build.FileSource {
+ const b = rs.step.owner;
+
+ const output = b.allocator.create(Output) catch @panic("OOM");
+ output.* = .{
+ .prefix = prefix,
+ .basename = basename,
+ .generated_file = .{ .step = &rs.step },
+ };
+ rs.argv.append(.{ .output = output }) catch @panic("OOM");
+
+ if (rs.rename_step_with_output_arg) {
+ rs.setName(b.fmt("{s} ({s})", .{ rs.step.name, basename }));
+ }
+
+ return .{ .generated = &output.generated_file };
}
pub fn addFileSourceArg(self: *RunStep, file_source: std.Build.FileSource) void {
- self.argv.append(Arg{
- .file_source = file_source.dupe(self.builder),
+ self.argv.append(.{
+ .file_source = file_source.dupe(self.step.owner),
}) catch @panic("OOM");
file_source.addStepDependencies(&self.step);
}
+pub fn addDirectorySourceArg(self: *RunStep, directory_source: std.Build.FileSource) void {
+ self.argv.append(.{
+ .directory_source = directory_source.dupe(self.step.owner),
+ }) catch @panic("OOM");
+ directory_source.addStepDependencies(&self.step);
+}
+
pub fn addArg(self: *RunStep, arg: []const u8) void {
- self.argv.append(Arg{ .bytes = self.builder.dupe(arg) }) catch @panic("OOM");
+ self.argv.append(.{ .bytes = self.step.owner.dupe(arg) }) catch @panic("OOM");
}
pub fn addArgs(self: *RunStep, args: []const []const u8) void {
@@ -117,102 +199,183 @@ pub fn addArgs(self: *RunStep, args: []const []const u8) void {
}
pub fn clearEnvironment(self: *RunStep) void {
- const new_env_map = self.builder.allocator.create(EnvMap) catch @panic("OOM");
- new_env_map.* = EnvMap.init(self.builder.allocator);
+ const b = self.step.owner;
+ const new_env_map = b.allocator.create(EnvMap) catch @panic("OOM");
+ new_env_map.* = EnvMap.init(b.allocator);
self.env_map = new_env_map;
}
pub fn addPathDir(self: *RunStep, search_path: []const u8) void {
- addPathDirInternal(&self.step, self.builder, search_path);
-}
-
-/// For internal use only, users of `RunStep` should use `addPathDir` directly.
-pub fn addPathDirInternal(step: *Step, builder: *std.Build, search_path: []const u8) void {
- const env_map = getEnvMapInternal(step, builder.allocator);
+ const b = self.step.owner;
+ const env_map = getEnvMapInternal(self);
const key = "PATH";
var prev_path = env_map.get(key);
if (prev_path) |pp| {
- const new_path = builder.fmt("{s}" ++ [1]u8{fs.path.delimiter} ++ "{s}", .{ pp, search_path });
+ const new_path = b.fmt("{s}" ++ [1]u8{fs.path.delimiter} ++ "{s}", .{ pp, search_path });
env_map.put(key, new_path) catch @panic("OOM");
} else {
- env_map.put(key, builder.dupePath(search_path)) catch @panic("OOM");
+ env_map.put(key, b.dupePath(search_path)) catch @panic("OOM");
}
}
pub fn getEnvMap(self: *RunStep) *EnvMap {
- return getEnvMapInternal(&self.step, self.builder.allocator);
+ return getEnvMapInternal(self);
}
-fn getEnvMapInternal(step: *Step, allocator: Allocator) *EnvMap {
- const maybe_env_map = switch (step.id) {
- .run => step.cast(RunStep).?.env_map,
- .emulatable_run => step.cast(std.Build.EmulatableRunStep).?.env_map,
- else => unreachable,
- };
- return maybe_env_map orelse {
- const env_map = allocator.create(EnvMap) catch @panic("OOM");
- env_map.* = process.getEnvMap(allocator) catch @panic("unhandled error");
- switch (step.id) {
- .run => step.cast(RunStep).?.env_map = env_map,
- .emulatable_run => step.cast(RunStep).?.env_map = env_map,
- else => unreachable,
- }
+fn getEnvMapInternal(self: *RunStep) *EnvMap {
+ const arena = self.step.owner.allocator;
+ return self.env_map orelse {
+ const env_map = arena.create(EnvMap) catch @panic("OOM");
+ env_map.* = process.getEnvMap(arena) catch @panic("unhandled error");
+ self.env_map = env_map;
return env_map;
};
}
pub fn setEnvironmentVariable(self: *RunStep, key: []const u8, value: []const u8) void {
+ const b = self.step.owner;
const env_map = self.getEnvMap();
- env_map.put(
- self.builder.dupe(key),
- self.builder.dupe(value),
- ) catch @panic("unhandled error");
+ env_map.put(b.dupe(key), b.dupe(value)) catch @panic("unhandled error");
}
+pub fn removeEnvironmentVariable(self: *RunStep, key: []const u8) void {
+ self.getEnvMap().remove(key);
+}
+
+/// Adds a check for exact stderr match. Does not add any other checks.
pub fn expectStdErrEqual(self: *RunStep, bytes: []const u8) void {
- self.stderr_action = .{ .expect_exact = self.builder.dupe(bytes) };
+ const new_check: StdIo.Check = .{ .expect_stderr_exact = self.step.owner.dupe(bytes) };
+ self.addCheck(new_check);
}
+/// Adds a check for exact stdout match as well as a check for exit code 0, if
+/// there is not already an expected termination check.
pub fn expectStdOutEqual(self: *RunStep, bytes: []const u8) void {
- self.stdout_action = .{ .expect_exact = self.builder.dupe(bytes) };
+ const new_check: StdIo.Check = .{ .expect_stdout_exact = self.step.owner.dupe(bytes) };
+ self.addCheck(new_check);
+ if (!self.hasTermCheck()) {
+ self.expectExitCode(0);
+ }
}
-fn stdIoActionToBehavior(action: StdIoAction) std.ChildProcess.StdIo {
- return switch (action) {
- .ignore => .Ignore,
- .inherit => .Inherit,
- .expect_exact, .expect_matches => .Pipe,
+pub fn expectExitCode(self: *RunStep, code: u8) void {
+ const new_check: StdIo.Check = .{ .expect_term = .{ .Exited = code } };
+ self.addCheck(new_check);
+}
+
+pub fn hasTermCheck(self: RunStep) bool {
+ for (self.stdio.check.items) |check| switch (check) {
+ .expect_term => return true,
+ else => continue,
+ };
+ return false;
+}
+
+pub fn addCheck(self: *RunStep, new_check: StdIo.Check) void {
+ switch (self.stdio) {
+ .infer_from_args => {
+ self.stdio = .{ .check = std.ArrayList(StdIo.Check).init(self.step.owner.allocator) };
+ self.stdio.check.append(new_check) catch @panic("OOM");
+ },
+ .check => |*checks| checks.append(new_check) catch @panic("OOM"),
+ else => @panic("illegal call to addCheck: conflicting helper method calls. Suggest to directly set stdio field of RunStep instead"),
+ }
+}
+
+pub fn captureStdErr(self: *RunStep) std.Build.FileSource {
+ assert(self.stdio != .inherit);
+
+ if (self.captured_stderr) |output| return .{ .generated = &output.generated_file };
+
+ const output = self.step.owner.allocator.create(Output) catch @panic("OOM");
+ output.* = .{
+ .prefix = "",
+ .basename = "stderr",
+ .generated_file = .{ .step = &self.step },
+ };
+ self.captured_stderr = output;
+ return .{ .generated = &output.generated_file };
+}
+
+pub fn captureStdOut(self: *RunStep) *std.Build.GeneratedFile {
+ assert(self.stdio != .inherit);
+
+ if (self.captured_stdout) |output| return .{ .generated = &output.generated_file };
+
+ const output = self.step.owner.allocator.create(Output) catch @panic("OOM");
+ output.* = .{
+ .prefix = "",
+ .basename = "stdout",
+ .generated_file = .{ .step = &self.step },
+ };
+ self.captured_stdout = output;
+ return .{ .generated = &output.generated_file };
+}
+
+/// Returns whether the RunStep has side effects *other than* updating the output arguments.
+fn hasSideEffects(self: RunStep) bool {
+ if (self.has_side_effects) return true;
+ return switch (self.stdio) {
+ .infer_from_args => !self.hasAnyOutputArgs(),
+ .inherit => true,
+ .check => false,
+ .zig_test => false,
};
}
-fn needOutputCheck(self: RunStep) bool {
- switch (self.condition) {
- .always => return false,
- .output_outdated => {},
- }
- if (self.extra_file_dependencies.len > 0) return true;
-
+fn hasAnyOutputArgs(self: RunStep) bool {
+ if (self.captured_stdout != null) return true;
+ if (self.captured_stderr != null) return true;
for (self.argv.items) |arg| switch (arg) {
.output => return true,
else => continue,
};
-
return false;
}
-fn make(step: *Step) !void {
- const self = @fieldParentPtr(RunStep, "step", step);
- const need_output_check = self.needOutputCheck();
+fn checksContainStdout(checks: []const StdIo.Check) bool {
+ for (checks) |check| switch (check) {
+ .expect_stderr_exact,
+ .expect_stderr_match,
+ .expect_term,
+ => continue,
- var argv_list = ArrayList([]const u8).init(self.builder.allocator);
+ .expect_stdout_exact,
+ .expect_stdout_match,
+ => return true,
+ };
+ return false;
+}
+
+fn checksContainStderr(checks: []const StdIo.Check) bool {
+ for (checks) |check| switch (check) {
+ .expect_stdout_exact,
+ .expect_stdout_match,
+ .expect_term,
+ => continue,
+
+ .expect_stderr_exact,
+ .expect_stderr_match,
+ => return true,
+ };
+ return false;
+}
+
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ const b = step.owner;
+ const arena = b.allocator;
+ const self = @fieldParentPtr(RunStep, "step", step);
+ const has_side_effects = self.hasSideEffects();
+
+ var argv_list = ArrayList([]const u8).init(arena);
var output_placeholders = ArrayList(struct {
index: usize,
- output: Arg.Output,
- }).init(self.builder.allocator);
+ output: *Output,
+ }).init(arena);
- var man = self.builder.cache.obtain();
+ var man = b.cache.obtain();
defer man.deinit();
for (self.argv.items) |arg| {
@@ -222,23 +385,29 @@ fn make(step: *Step) !void {
man.hash.addBytes(bytes);
},
.file_source => |file| {
- const file_path = file.getPath(self.builder);
+ const file_path = file.getPath(b);
try argv_list.append(file_path);
_ = try man.addFile(file_path, null);
},
+ .directory_source => |file| {
+ const file_path = file.getPath(b);
+ try argv_list.append(file_path);
+ man.hash.addBytes(file_path);
+ },
.artifact => |artifact| {
if (artifact.target.isWindows()) {
// On Windows we don't have rpaths so we have to add .dll search paths to PATH
self.addPathForDynLibs(artifact);
}
const file_path = artifact.installed_path orelse
- artifact.getOutputSource().getPath(self.builder);
+ artifact.getOutputSource().getPath(b);
try argv_list.append(file_path);
_ = try man.addFile(file_path, null);
},
.output => |output| {
+ man.hash.addBytes(output.prefix);
man.hash.addBytes(output.basename);
// Add a placeholder into the argument list because we need the
// manifest hash to be updated with all arguments before the
@@ -252,60 +421,77 @@ fn make(step: *Step) !void {
}
}
- if (need_output_check) {
- for (self.extra_file_dependencies) |file_path| {
- _ = try man.addFile(self.builder.pathFromRoot(file_path), null);
- }
+ if (self.captured_stdout) |output| {
+ man.hash.addBytes(output.basename);
+ }
- if (man.hit() catch |err| failWithCacheError(man, err)) {
- // cache hit, skip running command
- const digest = man.final();
- for (output_placeholders.items) |placeholder| {
- placeholder.output.generated_file.path = try self.builder.cache_root.join(
- self.builder.allocator,
- &.{ "o", &digest, placeholder.output.basename },
- );
- }
- return;
- }
+ if (self.captured_stderr) |output| {
+ man.hash.addBytes(output.basename);
+ }
+ hashStdIo(&man.hash, self.stdio);
+
+ if (has_side_effects) {
+ try runCommand(self, argv_list.items, has_side_effects, null, prog_node);
+ return;
+ }
+
+ for (self.extra_file_dependencies) |file_path| {
+ _ = try man.addFile(b.pathFromRoot(file_path), null);
+ }
+
+ if (try step.cacheHit(&man)) {
+ // cache hit, skip running command
const digest = man.final();
-
for (output_placeholders.items) |placeholder| {
- const output_path = try self.builder.cache_root.join(
- self.builder.allocator,
- &.{ "o", &digest, placeholder.output.basename },
- );
- const output_dir = fs.path.dirname(output_path).?;
- fs.cwd().makePath(output_dir) catch |err| {
- std.debug.print("unable to make path {s}: {s}\n", .{ output_dir, @errorName(err) });
- return err;
- };
-
- placeholder.output.generated_file.path = output_path;
- argv_list.items[placeholder.index] = output_path;
+ placeholder.output.generated_file.path = try b.cache_root.join(arena, &.{
+ "o", &digest, placeholder.output.basename,
+ });
}
+
+ if (self.captured_stdout) |output| {
+ output.generated_file.path = try b.cache_root.join(arena, &.{
+ "o", &digest, output.basename,
+ });
+ }
+
+ if (self.captured_stderr) |output| {
+ output.generated_file.path = try b.cache_root.join(arena, &.{
+ "o", &digest, output.basename,
+ });
+ }
+
+ step.result_cached = true;
+ return;
}
- try runCommand(
- argv_list.items,
- self.builder,
- self.expected_term,
- self.stdout_action,
- self.stderr_action,
- self.stdin_behavior,
- self.env_map,
- self.cwd,
- self.print,
- );
+ const digest = man.final();
- if (need_output_check) {
- try man.writeManifest();
+ for (output_placeholders.items) |placeholder| {
+ const output_components = .{ "o", &digest, placeholder.output.basename };
+ const output_sub_path = try fs.path.join(arena, &output_components);
+ const output_sub_dir_path = fs.path.dirname(output_sub_path).?;
+ b.cache_root.handle.makePath(output_sub_dir_path) catch |err| {
+ return step.fail("unable to make path '{}{s}': {s}", .{
+ b.cache_root, output_sub_dir_path, @errorName(err),
+ });
+ };
+ const output_path = try b.cache_root.join(arena, &output_components);
+ placeholder.output.generated_file.path = output_path;
+ const cli_arg = if (placeholder.output.prefix.len == 0)
+ output_path
+ else
+ b.fmt("{s}{s}", .{ placeholder.output.prefix, output_path });
+ argv_list.items[placeholder.index] = cli_arg;
}
+
+ try runCommand(self, argv_list.items, has_side_effects, &digest, prog_node);
+
+ try step.writeManifest(&man);
}
fn formatTerm(
- term: ?std.ChildProcess.Term,
+ term: ?std.process.Child.Term,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
@@ -321,11 +507,11 @@ fn formatTerm(
try writer.writeAll("exited with any code");
}
}
-fn fmtTerm(term: ?std.ChildProcess.Term) std.fmt.Formatter(formatTerm) {
+fn fmtTerm(term: ?std.process.Child.Term) std.fmt.Formatter(formatTerm) {
return .{ .data = term };
}
-fn termMatches(expected: ?std.ChildProcess.Term, actual: std.ChildProcess.Term) bool {
+fn termMatches(expected: ?std.process.Child.Term, actual: std.process.Child.Term) bool {
return if (expected) |e| switch (e) {
.Exited => |expected_code| switch (actual) {
.Exited => |actual_code| expected_code == actual_code,
@@ -349,183 +535,701 @@ fn termMatches(expected: ?std.ChildProcess.Term, actual: std.ChildProcess.Term)
};
}
-pub fn runCommand(
+fn runCommand(
+ self: *RunStep,
argv: []const []const u8,
- builder: *std.Build,
- expected_term: ?std.ChildProcess.Term,
- stdout_action: StdIoAction,
- stderr_action: StdIoAction,
- stdin_behavior: std.ChildProcess.StdIo,
- env_map: ?*EnvMap,
- maybe_cwd: ?[]const u8,
- print: bool,
+ has_side_effects: bool,
+ digest: ?*const [std.Build.Cache.hex_digest_len]u8,
+ prog_node: *std.Progress.Node,
) !void {
- const cwd = if (maybe_cwd) |cwd| builder.pathFromRoot(cwd) else builder.build_root.path;
+ const step = &self.step;
+ const b = step.owner;
+ const arena = b.allocator;
- if (!std.process.can_spawn) {
- const cmd = try std.mem.join(builder.allocator, " ", argv);
- std.debug.print("the following command cannot be executed ({s} does not support spawning a child process):\n{s}", .{
- @tagName(builtin.os.tag), cmd,
- });
- builder.allocator.free(cmd);
- return ExecError.ExecNotSupported;
- }
+ try step.handleChildProcUnsupported(self.cwd, argv);
+ try Step.handleVerbose2(step.owner, self.cwd, self.env_map, argv);
- var child = std.ChildProcess.init(argv, builder.allocator);
- child.cwd = cwd;
- child.env_map = env_map orelse builder.env_map;
-
- child.stdin_behavior = stdin_behavior;
- child.stdout_behavior = stdIoActionToBehavior(stdout_action);
- child.stderr_behavior = stdIoActionToBehavior(stderr_action);
-
- if (print)
- printCmd(cwd, argv);
-
- child.spawn() catch |err| {
- std.debug.print("Unable to spawn {s}: {s}\n", .{ argv[0], @errorName(err) });
- return err;
+ const allow_skip = switch (self.stdio) {
+ .check, .zig_test => self.skip_foreign_checks,
+ else => false,
};
- // TODO need to poll to read these streams to prevent a deadlock (or rely on evented I/O).
+ var interp_argv = std.ArrayList([]const u8).init(b.allocator);
+ defer interp_argv.deinit();
- var stdout: ?[]const u8 = null;
- defer if (stdout) |s| builder.allocator.free(s);
+ const result = spawnChildAndCollect(self, argv, has_side_effects, prog_node) catch |err| term: {
+ // InvalidExe: cpu arch mismatch
+ // FileNotFound: can happen with a wrong dynamic linker path
+ if (err == error.InvalidExe or err == error.FileNotFound) interpret: {
+ // TODO: learn the target from the binary directly rather than from
+ // relying on it being a CompileStep. This will make this logic
+ // work even for the edge case that the binary was produced by a
+ // third party.
+ const exe = switch (self.argv.items[0]) {
+ .artifact => |exe| exe,
+ else => break :interpret,
+ };
+ switch (exe.kind) {
+ .exe, .@"test" => {},
+ else => break :interpret,
+ }
- switch (stdout_action) {
- .expect_exact, .expect_matches => {
- stdout = try child.stdout.?.reader().readAllAlloc(builder.allocator, max_stdout_size);
- },
- .inherit, .ignore => {},
- }
+ const need_cross_glibc = exe.target.isGnuLibC() and exe.is_linking_libc;
+ switch (b.host.getExternalExecutor(exe.target_info, .{
+ .qemu_fixes_dl = need_cross_glibc and b.glibc_runtimes_dir != null,
+ .link_libc = exe.is_linking_libc,
+ })) {
+ .native, .rosetta => {
+ if (allow_skip) return error.MakeSkipped;
+ break :interpret;
+ },
+ .wine => |bin_name| {
+ if (b.enable_wine) {
+ try interp_argv.append(bin_name);
+ try interp_argv.appendSlice(argv);
+ } else {
+ return failForeign(self, "-fwine", argv[0], exe);
+ }
+ },
+ .qemu => |bin_name| {
+ if (b.enable_qemu) {
+ const glibc_dir_arg = if (need_cross_glibc)
+ b.glibc_runtimes_dir orelse return
+ else
+ null;
- var stderr: ?[]const u8 = null;
- defer if (stderr) |s| builder.allocator.free(s);
+ try interp_argv.append(bin_name);
- switch (stderr_action) {
- .expect_exact, .expect_matches => {
- stderr = try child.stderr.?.reader().readAllAlloc(builder.allocator, max_stdout_size);
- },
- .inherit, .ignore => {},
- }
+ if (glibc_dir_arg) |dir| {
+ // TODO look into making this a call to `linuxTriple`. This
+ // needs the directory to be called "i686" rather than
+ // "x86" which is why we do it manually here.
+ const fmt_str = "{s}" ++ fs.path.sep_str ++ "{s}-{s}-{s}";
+ const cpu_arch = exe.target.getCpuArch();
+ const os_tag = exe.target.getOsTag();
+ const abi = exe.target.getAbi();
+ const cpu_arch_name: []const u8 = if (cpu_arch == .x86)
+ "i686"
+ else
+ @tagName(cpu_arch);
+ const full_dir = try std.fmt.allocPrint(b.allocator, fmt_str, .{
+ dir, cpu_arch_name, @tagName(os_tag), @tagName(abi),
+ });
- const term = child.wait() catch |err| {
- std.debug.print("Unable to spawn {s}: {s}\n", .{ argv[0], @errorName(err) });
- return err;
- };
+ try interp_argv.append("-L");
+ try interp_argv.append(full_dir);
+ }
- if (!termMatches(expected_term, term)) {
- if (builder.prominent_compile_errors) {
- std.debug.print("Run step {} (expected {})\n", .{ fmtTerm(term), fmtTerm(expected_term) });
- } else {
- std.debug.print("The following command {} (expected {}):\n", .{ fmtTerm(term), fmtTerm(expected_term) });
- printCmd(cwd, argv);
+ try interp_argv.appendSlice(argv);
+ } else {
+ return failForeign(self, "-fqemu", argv[0], exe);
+ }
+ },
+ .darling => |bin_name| {
+ if (b.enable_darling) {
+ try interp_argv.append(bin_name);
+ try interp_argv.appendSlice(argv);
+ } else {
+ return failForeign(self, "-fdarling", argv[0], exe);
+ }
+ },
+ .wasmtime => |bin_name| {
+ if (b.enable_wasmtime) {
+ try interp_argv.append(bin_name);
+ try interp_argv.append("--dir=.");
+ try interp_argv.append(argv[0]);
+ try interp_argv.append("--");
+ try interp_argv.appendSlice(argv[1..]);
+ } else {
+ return failForeign(self, "-fwasmtime", argv[0], exe);
+ }
+ },
+ .bad_dl => |foreign_dl| {
+ if (allow_skip) return error.MakeSkipped;
+
+ const host_dl = b.host.dynamic_linker.get() orelse "(none)";
+
+ return step.fail(
+ \\the host system is unable to execute binaries from the target
+ \\ because the host dynamic linker is '{s}',
+ \\ while the target dynamic linker is '{s}'.
+ \\ consider setting the dynamic linker or enabling skip_foreign_checks in the Run step
+ , .{ host_dl, foreign_dl });
+ },
+ .bad_os_or_cpu => {
+ if (allow_skip) return error.MakeSkipped;
+
+ const host_name = try b.host.target.zigTriple(b.allocator);
+ const foreign_name = try exe.target.zigTriple(b.allocator);
+
+ return step.fail("the host system ({s}) is unable to execute binaries from the target ({s})", .{
+ host_name, foreign_name,
+ });
+ },
+ }
+
+ if (exe.target.isWindows()) {
+ // On Windows we don't have rpaths so we have to add .dll search paths to PATH
+ self.addPathForDynLibs(exe);
+ }
+
+ try Step.handleVerbose2(step.owner, self.cwd, self.env_map, interp_argv.items);
+
+ break :term spawnChildAndCollect(self, interp_argv.items, has_side_effects, prog_node) catch |e| {
+ return step.fail("unable to spawn interpreter {s}: {s}", .{
+ interp_argv.items[0], @errorName(e),
+ });
+ };
+ }
+
+ return step.fail("unable to spawn {s}: {s}", .{ argv[0], @errorName(err) });
+ };
+
+ step.result_duration_ns = result.elapsed_ns;
+ step.result_peak_rss = result.peak_rss;
+ step.test_results = result.stdio.test_results;
+
+ // Capture stdout and stderr to GeneratedFile objects.
+ const Stream = struct {
+ captured: ?*Output,
+ is_null: bool,
+ bytes: []const u8,
+ };
+ for ([_]Stream{
+ .{
+ .captured = self.captured_stdout,
+ .is_null = result.stdio.stdout_null,
+ .bytes = result.stdio.stdout,
+ },
+ .{
+ .captured = self.captured_stderr,
+ .is_null = result.stdio.stderr_null,
+ .bytes = result.stdio.stderr,
+ },
+ }) |stream| {
+ if (stream.captured) |output| {
+ assert(!stream.is_null);
+
+ const output_components = .{ "o", digest.?, output.basename };
+ const output_path = try b.cache_root.join(arena, &output_components);
+ output.generated_file.path = output_path;
+
+ const sub_path = try fs.path.join(arena, &output_components);
+ const sub_path_dirname = fs.path.dirname(sub_path).?;
+ b.cache_root.handle.makePath(sub_path_dirname) catch |err| {
+ return step.fail("unable to make path '{}{s}': {s}", .{
+ b.cache_root, sub_path_dirname, @errorName(err),
+ });
+ };
+ b.cache_root.handle.writeFile(sub_path, stream.bytes) catch |err| {
+ return step.fail("unable to write file '{}{s}': {s}", .{
+ b.cache_root, sub_path, @errorName(err),
+ });
+ };
}
- return error.UnexpectedExit;
}
- switch (stderr_action) {
- .inherit, .ignore => {},
- .expect_exact => |expected_bytes| {
- if (!mem.eql(u8, expected_bytes, stderr.?)) {
- std.debug.print(
- \\
- \\========= Expected this stderr: =========
- \\{s}
- \\========= But found: ====================
- \\{s}
- \\
- , .{ expected_bytes, stderr.? });
- printCmd(cwd, argv);
- return error.TestFailed;
- }
- },
- .expect_matches => |matches| for (matches) |match| {
- if (mem.indexOf(u8, stderr.?, match) == null) {
- std.debug.print(
- \\
- \\========= Expected to find in stderr: =========
- \\{s}
- \\========= But stderr does not contain it: =====
- \\{s}
- \\
- , .{ match, stderr.? });
- printCmd(cwd, argv);
- return error.TestFailed;
- }
- },
- }
+ const final_argv = if (interp_argv.items.len == 0) argv else interp_argv.items;
- switch (stdout_action) {
- .inherit, .ignore => {},
- .expect_exact => |expected_bytes| {
- if (!mem.eql(u8, expected_bytes, stdout.?)) {
- std.debug.print(
- \\
- \\========= Expected this stdout: =========
- \\{s}
- \\========= But found: ====================
- \\{s}
- \\
- , .{ expected_bytes, stdout.? });
- printCmd(cwd, argv);
- return error.TestFailed;
+ switch (self.stdio) {
+ .check => |checks| for (checks.items) |check| switch (check) {
+ .expect_stderr_exact => |expected_bytes| {
+ assert(!result.stdio.stderr_null);
+ if (!mem.eql(u8, expected_bytes, result.stdio.stderr)) {
+ return step.fail(
+ \\
+ \\========= expected this stderr: =========
+ \\{s}
+ \\========= but found: ====================
+ \\{s}
+ \\========= from the following command: ===
+ \\{s}
+ , .{
+ expected_bytes,
+ result.stdio.stderr,
+ try Step.allocPrintCmd(arena, self.cwd, final_argv),
+ });
+ }
+ },
+ .expect_stderr_match => |match| {
+ assert(!result.stdio.stderr_null);
+ if (mem.indexOf(u8, result.stdio.stderr, match) == null) {
+ return step.fail(
+ \\
+ \\========= expected to find in stderr: =========
+ \\{s}
+ \\========= but stderr does not contain it: =====
+ \\{s}
+ \\========= from the following command: =========
+ \\{s}
+ , .{
+ match,
+ result.stdio.stderr,
+ try Step.allocPrintCmd(arena, self.cwd, final_argv),
+ });
+ }
+ },
+ .expect_stdout_exact => |expected_bytes| {
+ assert(!result.stdio.stdout_null);
+ if (!mem.eql(u8, expected_bytes, result.stdio.stdout)) {
+ return step.fail(
+ \\
+ \\========= expected this stdout: =========
+ \\{s}
+ \\========= but found: ====================
+ \\{s}
+ \\========= from the following command: ===
+ \\{s}
+ , .{
+ expected_bytes,
+ result.stdio.stdout,
+ try Step.allocPrintCmd(arena, self.cwd, final_argv),
+ });
+ }
+ },
+ .expect_stdout_match => |match| {
+ assert(!result.stdio.stdout_null);
+ if (mem.indexOf(u8, result.stdio.stdout, match) == null) {
+ return step.fail(
+ \\
+ \\========= expected to find in stdout: =========
+ \\{s}
+ \\========= but stdout does not contain it: =====
+ \\{s}
+ \\========= from the following command: =========
+ \\{s}
+ , .{
+ match,
+ result.stdio.stdout,
+ try Step.allocPrintCmd(arena, self.cwd, final_argv),
+ });
+ }
+ },
+ .expect_term => |expected_term| {
+ if (!termMatches(expected_term, result.term)) {
+ return step.fail("the following command {} (expected {}):\n{s}", .{
+ fmtTerm(result.term),
+ fmtTerm(expected_term),
+ try Step.allocPrintCmd(arena, self.cwd, final_argv),
+ });
+ }
+ },
+ },
+ .zig_test => {
+ const expected_term: std.process.Child.Term = .{ .Exited = 0 };
+ if (!termMatches(expected_term, result.term)) {
+ return step.fail("the following command {} (expected {}):\n{s}", .{
+ fmtTerm(result.term),
+ fmtTerm(expected_term),
+ try Step.allocPrintCmd(arena, self.cwd, final_argv),
+ });
+ }
+ if (!result.stdio.test_results.isSuccess()) {
+ return step.fail(
+ "the following test command failed:\n{s}",
+ .{try Step.allocPrintCmd(arena, self.cwd, final_argv)},
+ );
}
},
- .expect_matches => |matches| for (matches) |match| {
- if (mem.indexOf(u8, stdout.?, match) == null) {
- std.debug.print(
- \\
- \\========= Expected to find in stdout: =========
- \\{s}
- \\========= But stdout does not contain it: =====
- \\{s}
- \\
- , .{ match, stdout.? });
- printCmd(cwd, argv);
- return error.TestFailed;
- }
+ else => {
+ try step.handleChildProcessTerm(result.term, self.cwd, final_argv);
},
}
}
-fn failWithCacheError(man: std.Build.Cache.Manifest, err: anyerror) noreturn {
- const i = man.failed_file_index orelse failWithSimpleError(err);
- const pp = man.files.items[i].prefixed_path orelse failWithSimpleError(err);
- const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
- std.debug.print("{s}: {s}/{s}\n", .{ @errorName(err), prefix, pp.sub_path });
- std.process.exit(1);
-}
+const ChildProcResult = struct {
+ term: std.process.Child.Term,
+ elapsed_ns: u64,
+ peak_rss: usize,
-fn failWithSimpleError(err: anyerror) noreturn {
- std.debug.print("{s}\n", .{@errorName(err)});
- std.process.exit(1);
-}
+ stdio: StdIoResult,
+};
-fn printCmd(cwd: ?[]const u8, argv: []const []const u8) void {
- if (cwd) |yes_cwd| std.debug.print("cd {s} && ", .{yes_cwd});
- for (argv) |arg| {
- std.debug.print("{s} ", .{arg});
+fn spawnChildAndCollect(
+ self: *RunStep,
+ argv: []const []const u8,
+ has_side_effects: bool,
+ prog_node: *std.Progress.Node,
+) !ChildProcResult {
+ const b = self.step.owner;
+ const arena = b.allocator;
+
+ var child = std.process.Child.init(argv, arena);
+ if (self.cwd) |cwd| {
+ child.cwd = b.pathFromRoot(cwd);
+ } else {
+ child.cwd = b.build_root.path;
+ child.cwd_dir = b.build_root.handle;
}
- std.debug.print("\n", .{});
+ child.env_map = self.env_map orelse b.env_map;
+ child.request_resource_usage_statistics = true;
+
+ child.stdin_behavior = switch (self.stdio) {
+ .infer_from_args => if (has_side_effects) .Inherit else .Close,
+ .inherit => .Inherit,
+ .check => .Close,
+ .zig_test => .Pipe,
+ };
+ child.stdout_behavior = switch (self.stdio) {
+ .infer_from_args => if (has_side_effects) .Inherit else .Ignore,
+ .inherit => .Inherit,
+ .check => |checks| if (checksContainStdout(checks.items)) .Pipe else .Ignore,
+ .zig_test => .Pipe,
+ };
+ child.stderr_behavior = switch (self.stdio) {
+ .infer_from_args => if (has_side_effects) .Inherit else .Pipe,
+ .inherit => .Inherit,
+ .check => .Pipe,
+ .zig_test => .Pipe,
+ };
+ if (self.captured_stdout != null) child.stdout_behavior = .Pipe;
+ if (self.captured_stderr != null) child.stderr_behavior = .Pipe;
+ if (self.stdin != null) {
+ assert(child.stdin_behavior != .Inherit);
+ child.stdin_behavior = .Pipe;
+ }
+
+ try child.spawn();
+ var timer = try std.time.Timer.start();
+
+ const result = if (self.stdio == .zig_test)
+ evalZigTest(self, &child, prog_node)
+ else
+ evalGeneric(self, &child);
+
+ const term = try child.wait();
+ const elapsed_ns = timer.read();
+
+ return .{
+ .stdio = try result,
+ .term = term,
+ .elapsed_ns = elapsed_ns,
+ .peak_rss = child.resource_usage_statistics.getMaxRss() orelse 0,
+ };
+}
+
+const StdIoResult = struct {
+ // These use boolean flags instead of optionals as a workaround for
+ // https://github.com/ziglang/zig/issues/14783
+ stdout: []const u8,
+ stderr: []const u8,
+ stdout_null: bool,
+ stderr_null: bool,
+ test_results: Step.TestResults,
+};
+
+fn evalZigTest(
+ self: *RunStep,
+ child: *std.process.Child,
+ prog_node: *std.Progress.Node,
+) !StdIoResult {
+ const gpa = self.step.owner.allocator;
+ const arena = self.step.owner.allocator;
+
+ var poller = std.io.poll(gpa, enum { stdout, stderr }, .{
+ .stdout = child.stdout.?,
+ .stderr = child.stderr.?,
+ });
+ defer poller.deinit();
+
+ try sendMessage(child.stdin.?, .query_test_metadata);
+
+ const Header = std.zig.Server.Message.Header;
+
+ const stdout = poller.fifo(.stdout);
+ const stderr = poller.fifo(.stderr);
+
+ var fail_count: u32 = 0;
+ var skip_count: u32 = 0;
+ var leak_count: u32 = 0;
+ var test_count: u32 = 0;
+
+ var metadata: ?TestMetadata = null;
+
+ var sub_prog_node: ?std.Progress.Node = null;
+ defer if (sub_prog_node) |*n| n.end();
+
+ poll: while (true) {
+ while (stdout.readableLength() < @sizeOf(Header)) {
+ if (!(try poller.poll())) break :poll;
+ }
+ const header = stdout.reader().readStruct(Header) catch unreachable;
+ while (stdout.readableLength() < header.bytes_len) {
+ if (!(try poller.poll())) break :poll;
+ }
+ const body = stdout.readableSliceOfLen(header.bytes_len);
+
+ switch (header.tag) {
+ .zig_version => {
+ if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
+ return self.step.fail(
+ "zig version mismatch build runner vs compiler: '{s}' vs '{s}'",
+ .{ builtin.zig_version_string, body },
+ );
+ }
+ },
+ .test_metadata => {
+ const TmHdr = std.zig.Server.Message.TestMetadata;
+ const tm_hdr = @ptrCast(*align(1) const TmHdr, body);
+ test_count = tm_hdr.tests_len;
+
+ const names_bytes = body[@sizeOf(TmHdr)..][0 .. test_count * @sizeOf(u32)];
+ const async_frame_lens_bytes = body[@sizeOf(TmHdr) + names_bytes.len ..][0 .. test_count * @sizeOf(u32)];
+ const expected_panic_msgs_bytes = body[@sizeOf(TmHdr) + names_bytes.len + async_frame_lens_bytes.len ..][0 .. test_count * @sizeOf(u32)];
+ const string_bytes = body[@sizeOf(TmHdr) + names_bytes.len + async_frame_lens_bytes.len + expected_panic_msgs_bytes.len ..][0..tm_hdr.string_bytes_len];
+
+ const names = std.mem.bytesAsSlice(u32, names_bytes);
+ const async_frame_lens = std.mem.bytesAsSlice(u32, async_frame_lens_bytes);
+ const expected_panic_msgs = std.mem.bytesAsSlice(u32, expected_panic_msgs_bytes);
+ const names_aligned = try arena.alloc(u32, names.len);
+ for (names_aligned, names) |*dest, src| dest.* = src;
+
+ const async_frame_lens_aligned = try arena.alloc(u32, async_frame_lens.len);
+ for (async_frame_lens_aligned, async_frame_lens) |*dest, src| dest.* = src;
+
+ const expected_panic_msgs_aligned = try arena.alloc(u32, expected_panic_msgs.len);
+ for (expected_panic_msgs_aligned, expected_panic_msgs) |*dest, src| dest.* = src;
+
+ prog_node.setEstimatedTotalItems(names.len);
+ metadata = .{
+ .string_bytes = try arena.dupe(u8, string_bytes),
+ .names = names_aligned,
+ .async_frame_lens = async_frame_lens_aligned,
+ .expected_panic_msgs = expected_panic_msgs_aligned,
+ .next_index = 0,
+ .prog_node = prog_node,
+ };
+
+ try requestNextTest(child.stdin.?, &metadata.?, &sub_prog_node);
+ },
+ .test_results => {
+ const md = metadata.?;
+
+ const TrHdr = std.zig.Server.Message.TestResults;
+ const tr_hdr = @ptrCast(*align(1) const TrHdr, body);
+ fail_count += @boolToInt(tr_hdr.flags.fail);
+ skip_count += @boolToInt(tr_hdr.flags.skip);
+ leak_count += @boolToInt(tr_hdr.flags.leak);
+
+ if (tr_hdr.flags.fail or tr_hdr.flags.leak) {
+ const name = std.mem.sliceTo(md.string_bytes[md.names[tr_hdr.index]..], 0);
+ const msg = std.mem.trim(u8, stderr.readableSlice(0), "\n");
+ const label = if (tr_hdr.flags.fail) "failed" else "leaked";
+ if (msg.len > 0) {
+ try self.step.addError("'{s}' {s}: {s}", .{ name, label, msg });
+ } else {
+ try self.step.addError("'{s}' {s}", .{ name, label });
+ }
+ stderr.discard(msg.len);
+ }
+
+ try requestNextTest(child.stdin.?, &metadata.?, &sub_prog_node);
+ },
+ else => {}, // ignore other messages
+ }
+
+ stdout.discard(body.len);
+ }
+
+ if (stderr.readableLength() > 0) {
+ const msg = std.mem.trim(u8, try stderr.toOwnedSlice(), "\n");
+ if (msg.len > 0) try self.step.result_error_msgs.append(arena, msg);
+ }
+
+ // Send EOF to stdin.
+ child.stdin.?.close();
+ child.stdin = null;
+
+ return .{
+ .stdout = &.{},
+ .stderr = &.{},
+ .stdout_null = true,
+ .stderr_null = true,
+ .test_results = .{
+ .test_count = test_count,
+ .fail_count = fail_count,
+ .skip_count = skip_count,
+ .leak_count = leak_count,
+ },
+ };
+}
+
+const TestMetadata = struct {
+ names: []const u32,
+ async_frame_lens: []const u32,
+ expected_panic_msgs: []const u32,
+ string_bytes: []const u8,
+ next_index: u32,
+ prog_node: *std.Progress.Node,
+
+ fn testName(tm: TestMetadata, index: u32) []const u8 {
+ return std.mem.sliceTo(tm.string_bytes[tm.names[index]..], 0);
+ }
+};
+
+fn requestNextTest(in: fs.File, metadata: *TestMetadata, sub_prog_node: *?std.Progress.Node) !void {
+ while (metadata.next_index < metadata.names.len) {
+ const i = metadata.next_index;
+ metadata.next_index += 1;
+
+ if (metadata.async_frame_lens[i] != 0) continue;
+ if (metadata.expected_panic_msgs[i] != 0) continue;
+
+ const name = metadata.testName(i);
+ if (sub_prog_node.*) |*n| n.end();
+ sub_prog_node.* = metadata.prog_node.start(name, 0);
+
+ try sendRunTestMessage(in, i);
+ return;
+ } else {
+ try sendMessage(in, .exit);
+ }
+}
+
+fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void {
+ const header: std.zig.Client.Message.Header = .{
+ .tag = tag,
+ .bytes_len = 0,
+ };
+ try file.writeAll(std.mem.asBytes(&header));
+}
+
+fn sendRunTestMessage(file: std.fs.File, index: u32) !void {
+ const header: std.zig.Client.Message.Header = .{
+ .tag = .run_test,
+ .bytes_len = 4,
+ };
+ const full_msg = std.mem.asBytes(&header) ++ std.mem.asBytes(&index);
+ try file.writeAll(full_msg);
+}
+
+fn evalGeneric(self: *RunStep, child: *std.process.Child) !StdIoResult {
+ const arena = self.step.owner.allocator;
+
+ if (self.stdin) |stdin| {
+ child.stdin.?.writeAll(stdin) catch |err| {
+ return self.step.fail("unable to write stdin: {s}", .{@errorName(err)});
+ };
+ child.stdin.?.close();
+ child.stdin = null;
+ }
+
+ // These are not optionals, as a workaround for
+ // https://github.com/ziglang/zig/issues/14783
+ var stdout_bytes: []const u8 = undefined;
+ var stderr_bytes: []const u8 = undefined;
+ var stdout_null = true;
+ var stderr_null = true;
+
+ if (child.stdout) |stdout| {
+ if (child.stderr) |stderr| {
+ var poller = std.io.poll(arena, enum { stdout, stderr }, .{
+ .stdout = stdout,
+ .stderr = stderr,
+ });
+ defer poller.deinit();
+
+ while (try poller.poll()) {
+ if (poller.fifo(.stdout).count > self.max_stdio_size)
+ return error.StdoutStreamTooLong;
+ if (poller.fifo(.stderr).count > self.max_stdio_size)
+ return error.StderrStreamTooLong;
+ }
+
+ stdout_bytes = try poller.fifo(.stdout).toOwnedSlice();
+ stderr_bytes = try poller.fifo(.stderr).toOwnedSlice();
+ stdout_null = false;
+ stderr_null = false;
+ } else {
+ stdout_bytes = try stdout.reader().readAllAlloc(arena, self.max_stdio_size);
+ stdout_null = false;
+ }
+ } else if (child.stderr) |stderr| {
+ stderr_bytes = try stderr.reader().readAllAlloc(arena, self.max_stdio_size);
+ stderr_null = false;
+ }
+
+ if (!stderr_null and stderr_bytes.len > 0) {
+ // Treat stderr as an error message.
+ const stderr_is_diagnostic = self.captured_stderr == null and switch (self.stdio) {
+ .check => |checks| !checksContainStderr(checks.items),
+ else => true,
+ };
+ if (stderr_is_diagnostic) {
+ try self.step.result_error_msgs.append(arena, stderr_bytes);
+ }
+ }
+
+ return .{
+ .stdout = stdout_bytes,
+ .stderr = stderr_bytes,
+ .stdout_null = stdout_null,
+ .stderr_null = stderr_null,
+ .test_results = .{},
+ };
}
fn addPathForDynLibs(self: *RunStep, artifact: *CompileStep) void {
- addPathForDynLibsInternal(&self.step, self.builder, artifact);
-}
-
-/// This should only be used for internal usage, this is called automatically
-/// for the user.
-pub fn addPathForDynLibsInternal(step: *Step, builder: *std.Build, artifact: *CompileStep) void {
+ const b = self.step.owner;
for (artifact.link_objects.items) |link_object| {
switch (link_object) {
.other_step => |other| {
if (other.target.isWindows() and other.isDynamicLibrary()) {
- addPathDirInternal(step, builder, fs.path.dirname(other.getOutputSource().getPath(builder)).?);
- addPathForDynLibsInternal(step, builder, other);
+ addPathDir(self, fs.path.dirname(other.getOutputSource().getPath(b)).?);
+ addPathForDynLibs(self, other);
}
},
else => {},
}
}
}
+
+fn failForeign(
+ self: *RunStep,
+ suggested_flag: []const u8,
+ argv0: []const u8,
+ exe: *CompileStep,
+) error{ MakeFailed, MakeSkipped, OutOfMemory } {
+ switch (self.stdio) {
+ .check, .zig_test => {
+ if (self.skip_foreign_checks)
+ return error.MakeSkipped;
+
+ const b = self.step.owner;
+ const host_name = try b.host.target.zigTriple(b.allocator);
+ const foreign_name = try exe.target.zigTriple(b.allocator);
+
+ return self.step.fail(
+ \\unable to spawn foreign binary '{s}' ({s}) on host system ({s})
+ \\ consider using {s} or enabling skip_foreign_checks in the Run step
+ , .{ argv0, foreign_name, host_name, suggested_flag });
+ },
+ else => {
+ return self.step.fail("unable to spawn foreign binary '{s}'", .{argv0});
+ },
+ }
+}
+
+fn hashStdIo(hh: *std.Build.Cache.HashHelper, stdio: StdIo) void {
+ switch (stdio) {
+ .infer_from_args, .inherit, .zig_test => {},
+ .check => |checks| for (checks.items) |check| {
+ hh.add(@as(std.meta.Tag(StdIo.Check), check));
+ switch (check) {
+ .expect_stderr_exact,
+ .expect_stderr_match,
+ .expect_stdout_exact,
+ .expect_stdout_match,
+ => |s| hh.addBytes(s),
+
+ .expect_term => |term| {
+ hh.add(@as(std.meta.Tag(std.process.Child.Term), term));
+ switch (term) {
+ .Exited => |x| hh.add(x),
+ .Signal, .Stopped, .Unknown => |x| hh.add(x),
+ }
+ },
+ }
+ },
+ }
+}
diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig
index 82c39ac2cc..88580a6cbc 100644
--- a/lib/std/Build/Step.zig
+++ b/lib/std/Build/Step.zig
@@ -1,9 +1,77 @@
id: Id,
name: []const u8,
-makeFn: *const fn (self: *Step) anyerror!void,
+owner: *Build,
+makeFn: MakeFn,
+
dependencies: std.ArrayList(*Step),
-loop_flag: bool,
-done_flag: bool,
+/// This field is empty during execution of the user's build script, and
+/// then populated during dependency loop checking in the build runner.
+dependants: std.ArrayListUnmanaged(*Step),
+state: State,
+/// Set this field to declare an upper bound on the amount of bytes of memory it will
+/// take to run the step. Zero means no limit.
+///
+/// The idea to annotate steps that might use a high amount of RAM with an
+/// upper bound. For example, perhaps a particular set of unit tests require 4
+/// GiB of RAM, and those tests will be run under 4 different build
+/// configurations at once. This would potentially require 16 GiB of memory on
+/// the system if all 4 steps executed simultaneously, which could easily be
+/// greater than what is actually available, potentially causing the system to
+/// crash when using `zig build` at the default concurrency level.
+///
+/// This field causes the build runner to do two things:
+/// 1. ulimit child processes, so that they will fail if it would exceed this
+/// memory limit. This serves to enforce that this upper bound value is
+/// correct.
+/// 2. Ensure that the set of concurrent steps at any given time have a total
+/// max_rss value that does not exceed the `max_total_rss` value of the build
+/// runner. This value is configurable on the command line, and defaults to the
+/// total system memory available.
+max_rss: usize,
+
+result_error_msgs: std.ArrayListUnmanaged([]const u8),
+result_error_bundle: std.zig.ErrorBundle,
+result_cached: bool,
+result_duration_ns: ?u64,
+/// 0 means unavailable or not reported.
+result_peak_rss: usize,
+test_results: TestResults,
+
+/// The return addresss associated with creation of this step that can be useful
+/// to print along with debugging messages.
+debug_stack_trace: [n_debug_stack_frames]usize,
+
+pub const TestResults = struct {
+ fail_count: u32 = 0,
+ skip_count: u32 = 0,
+ leak_count: u32 = 0,
+ test_count: u32 = 0,
+
+ pub fn isSuccess(tr: TestResults) bool {
+ return tr.fail_count == 0 and tr.leak_count == 0;
+ }
+
+ pub fn passCount(tr: TestResults) u32 {
+ return tr.test_count - tr.fail_count - tr.skip_count;
+ }
+};
+
+pub const MakeFn = *const fn (self: *Step, prog_node: *std.Progress.Node) anyerror!void;
+
+const n_debug_stack_frames = 4;
+
+pub const State = enum {
+ precheck_unstarted,
+ precheck_started,
+ precheck_done,
+ running,
+ dependency_failure,
+ success,
+ failure,
+ /// This state indicates that the step did not complete, however, it also did not fail,
+ /// and it is safe to continue executing its dependencies.
+ skipped,
+};
pub const Id = enum {
top_level,
@@ -17,7 +85,6 @@ pub const Id = enum {
translate_c,
write_file,
run,
- emulatable_run,
check_file,
check_object,
config_header,
@@ -38,7 +105,6 @@ pub const Id = enum {
.translate_c => Build.TranslateCStep,
.write_file => Build.WriteFileStep,
.run => Build.RunStep,
- .emulatable_run => Build.EmulatableRunStep,
.check_file => Build.CheckFileStep,
.check_object => Build.CheckObjectStep,
.config_header => Build.ConfigHeaderStep,
@@ -49,39 +115,99 @@ pub const Id = enum {
}
};
-pub fn init(
+pub const Options = struct {
id: Id,
name: []const u8,
- allocator: Allocator,
- makeFn: *const fn (self: *Step) anyerror!void,
-) Step {
- return Step{
- .id = id,
- .name = allocator.dupe(u8, name) catch @panic("OOM"),
- .makeFn = makeFn,
- .dependencies = std.ArrayList(*Step).init(allocator),
- .loop_flag = false,
- .done_flag = false,
+ owner: *Build,
+ makeFn: MakeFn = makeNoOp,
+ first_ret_addr: ?usize = null,
+ max_rss: usize = 0,
+};
+
+pub fn init(options: Options) Step {
+ const arena = options.owner.allocator;
+
+ var addresses = [1]usize{0} ** n_debug_stack_frames;
+ const first_ret_addr = options.first_ret_addr orelse @returnAddress();
+ var stack_trace = std.builtin.StackTrace{
+ .instruction_addresses = &addresses,
+ .index = 0,
+ };
+ std.debug.captureStackTrace(first_ret_addr, &stack_trace);
+
+ return .{
+ .id = options.id,
+ .name = arena.dupe(u8, options.name) catch @panic("OOM"),
+ .owner = options.owner,
+ .makeFn = options.makeFn,
+ .dependencies = std.ArrayList(*Step).init(arena),
+ .dependants = .{},
+ .state = .precheck_unstarted,
+ .max_rss = options.max_rss,
+ .debug_stack_trace = addresses,
+ .result_error_msgs = .{},
+ .result_error_bundle = std.zig.ErrorBundle.empty,
+ .result_cached = false,
+ .result_duration_ns = null,
+ .result_peak_rss = 0,
+ .test_results = .{},
};
}
-pub fn initNoOp(id: Id, name: []const u8, allocator: Allocator) Step {
- return init(id, name, allocator, makeNoOp);
-}
+/// If the Step's `make` function reports `error.MakeFailed`, it indicates they
+/// have already reported the error. Otherwise, we add a simple error report
+/// here.
+pub fn make(s: *Step, prog_node: *std.Progress.Node) error{ MakeFailed, MakeSkipped }!void {
+ const arena = s.owner.allocator;
-pub fn make(self: *Step) !void {
- if (self.done_flag) return;
+ s.makeFn(s, prog_node) catch |err| switch (err) {
+ error.MakeFailed => return error.MakeFailed,
+ error.MakeSkipped => return error.MakeSkipped,
+ else => {
+ s.result_error_msgs.append(arena, @errorName(err)) catch @panic("OOM");
+ return error.MakeFailed;
+ },
+ };
- try self.makeFn(self);
- self.done_flag = true;
+ if (!s.test_results.isSuccess()) {
+ return error.MakeFailed;
+ }
+
+ if (s.max_rss != 0 and s.result_peak_rss > s.max_rss) {
+ const msg = std.fmt.allocPrint(arena, "memory usage peaked at {d} bytes, exceeding the declared upper bound of {d}", .{
+ s.result_peak_rss, s.max_rss,
+ }) catch @panic("OOM");
+ s.result_error_msgs.append(arena, msg) catch @panic("OOM");
+ return error.MakeFailed;
+ }
}
pub fn dependOn(self: *Step, other: *Step) void {
self.dependencies.append(other) catch @panic("OOM");
}
-fn makeNoOp(self: *Step) anyerror!void {
- _ = self;
+pub fn getStackTrace(s: *Step) std.builtin.StackTrace {
+ const stack_addresses = &s.debug_stack_trace;
+ var len: usize = 0;
+ while (len < n_debug_stack_frames and stack_addresses[len] != 0) {
+ len += 1;
+ }
+ return .{
+ .instruction_addresses = stack_addresses,
+ .index = len,
+ };
+}
+
+fn makeNoOp(step: *Step, prog_node: *std.Progress.Node) anyerror!void {
+ _ = prog_node;
+
+ var all_cached = true;
+
+ for (step.dependencies.items) |dep| {
+ all_cached = all_cached and dep.result_cached;
+ }
+
+ step.result_cached = all_cached;
}
pub fn cast(step: *Step, comptime T: type) ?*T {
@@ -91,7 +217,323 @@ pub fn cast(step: *Step, comptime T: type) ?*T {
return null;
}
+/// For debugging purposes, prints identifying information about this Step.
+pub fn dump(step: *Step) void {
+ std.debug.getStderrMutex().lock();
+ defer std.debug.getStderrMutex().unlock();
+
+ const stderr = std.io.getStdErr();
+ const w = stderr.writer();
+ const tty_config = std.debug.detectTTYConfig(stderr);
+ const debug_info = std.debug.getSelfDebugInfo() catch |err| {
+ w.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{
+ @errorName(err),
+ }) catch {};
+ return;
+ };
+ const ally = debug_info.allocator;
+ w.print("name: '{s}'. creation stack trace:\n", .{step.name}) catch {};
+ std.debug.writeStackTrace(step.getStackTrace(), w, ally, debug_info, tty_config) catch |err| {
+ stderr.writer().print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch {};
+ return;
+ };
+}
+
const Step = @This();
const std = @import("../std.zig");
const Build = std.Build;
const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const builtin = @import("builtin");
+
+pub fn evalChildProcess(s: *Step, argv: []const []const u8) !void {
+ const arena = s.owner.allocator;
+
+ try handleChildProcUnsupported(s, null, argv);
+ try handleVerbose(s.owner, null, argv);
+
+ const result = std.ChildProcess.exec(.{
+ .allocator = arena,
+ .argv = argv,
+ }) catch |err| return s.fail("unable to spawn {s}: {s}", .{ argv[0], @errorName(err) });
+
+ if (result.stderr.len > 0) {
+ try s.result_error_msgs.append(arena, result.stderr);
+ }
+
+ try handleChildProcessTerm(s, result.term, null, argv);
+}
+
+pub fn fail(step: *Step, comptime fmt: []const u8, args: anytype) error{ OutOfMemory, MakeFailed } {
+ try step.addError(fmt, args);
+ return error.MakeFailed;
+}
+
+pub fn addError(step: *Step, comptime fmt: []const u8, args: anytype) error{OutOfMemory}!void {
+ const arena = step.owner.allocator;
+ const msg = try std.fmt.allocPrint(arena, fmt, args);
+ try step.result_error_msgs.append(arena, msg);
+}
+
+/// Assumes that argv contains `--listen=-` and that the process being spawned
+/// is the zig compiler - the same version that compiled the build runner.
+pub fn evalZigProcess(
+ s: *Step,
+ argv: []const []const u8,
+ prog_node: *std.Progress.Node,
+) ![]const u8 {
+ assert(argv.len != 0);
+ const b = s.owner;
+ const arena = b.allocator;
+ const gpa = arena;
+
+ try handleChildProcUnsupported(s, null, argv);
+ try handleVerbose(s.owner, null, argv);
+
+ var child = std.ChildProcess.init(argv, arena);
+ child.env_map = b.env_map;
+ child.stdin_behavior = .Pipe;
+ child.stdout_behavior = .Pipe;
+ child.stderr_behavior = .Pipe;
+ child.request_resource_usage_statistics = true;
+
+ child.spawn() catch |err| return s.fail("unable to spawn {s}: {s}", .{
+ argv[0], @errorName(err),
+ });
+ var timer = try std.time.Timer.start();
+
+ var poller = std.io.poll(gpa, enum { stdout, stderr }, .{
+ .stdout = child.stdout.?,
+ .stderr = child.stderr.?,
+ });
+ defer poller.deinit();
+
+ try sendMessage(child.stdin.?, .update);
+ try sendMessage(child.stdin.?, .exit);
+
+ const Header = std.zig.Server.Message.Header;
+ var result: ?[]const u8 = null;
+
+ var node_name: std.ArrayListUnmanaged(u8) = .{};
+ defer node_name.deinit(gpa);
+ var sub_prog_node = prog_node.start("", 0);
+ defer sub_prog_node.end();
+
+ const stdout = poller.fifo(.stdout);
+
+ poll: while (true) {
+ while (stdout.readableLength() < @sizeOf(Header)) {
+ if (!(try poller.poll())) break :poll;
+ }
+ const header = stdout.reader().readStruct(Header) catch unreachable;
+ while (stdout.readableLength() < header.bytes_len) {
+ if (!(try poller.poll())) break :poll;
+ }
+ const body = stdout.readableSliceOfLen(header.bytes_len);
+
+ switch (header.tag) {
+ .zig_version => {
+ if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
+ return s.fail(
+ "zig version mismatch build runner vs compiler: '{s}' vs '{s}'",
+ .{ builtin.zig_version_string, body },
+ );
+ }
+ },
+ .error_bundle => {
+ const EbHdr = std.zig.Server.Message.ErrorBundle;
+ const eb_hdr = @ptrCast(*align(1) const EbHdr, body);
+ const extra_bytes =
+ body[@sizeOf(EbHdr)..][0 .. @sizeOf(u32) * eb_hdr.extra_len];
+ const string_bytes =
+ body[@sizeOf(EbHdr) + extra_bytes.len ..][0..eb_hdr.string_bytes_len];
+ // TODO: use @ptrCast when the compiler supports it
+ const unaligned_extra = std.mem.bytesAsSlice(u32, extra_bytes);
+ const extra_array = try arena.alloc(u32, unaligned_extra.len);
+ // TODO: use @memcpy when it supports slices
+ for (extra_array, unaligned_extra) |*dst, src| dst.* = src;
+ s.result_error_bundle = .{
+ .string_bytes = try arena.dupe(u8, string_bytes),
+ .extra = extra_array,
+ };
+ },
+ .progress => {
+ node_name.clearRetainingCapacity();
+ try node_name.appendSlice(gpa, body);
+ sub_prog_node.setName(node_name.items);
+ },
+ .emit_bin_path => {
+ const EbpHdr = std.zig.Server.Message.EmitBinPath;
+ const ebp_hdr = @ptrCast(*align(1) const EbpHdr, body);
+ s.result_cached = ebp_hdr.flags.cache_hit;
+ result = try arena.dupe(u8, body[@sizeOf(EbpHdr)..]);
+ },
+ else => {}, // ignore other messages
+ }
+
+ stdout.discard(body.len);
+ }
+
+ const stderr = poller.fifo(.stderr);
+ if (stderr.readableLength() > 0) {
+ try s.result_error_msgs.append(arena, try stderr.toOwnedSlice());
+ }
+
+ // Send EOF to stdin.
+ child.stdin.?.close();
+ child.stdin = null;
+
+ const term = child.wait() catch |err| {
+ return s.fail("unable to wait for {s}: {s}", .{ argv[0], @errorName(err) });
+ };
+ s.result_duration_ns = timer.read();
+ s.result_peak_rss = child.resource_usage_statistics.getMaxRss() orelse 0;
+
+ // Special handling for CompileStep that is expecting compile errors.
+ if (s.cast(Build.CompileStep)) |compile| switch (term) {
+ .Exited => {
+ // Note that the exit code may be 0 in this case due to the
+ // compiler server protocol.
+ if (compile.expect_errors.len != 0 and s.result_error_bundle.errorMessageCount() > 0) {
+ return error.NeedCompileErrorCheck;
+ }
+ },
+ else => {},
+ };
+
+ try handleChildProcessTerm(s, term, null, argv);
+
+ if (s.result_error_bundle.errorMessageCount() > 0) {
+ return s.fail("the following command failed with {d} compilation errors:\n{s}", .{
+ s.result_error_bundle.errorMessageCount(),
+ try allocPrintCmd(arena, null, argv),
+ });
+ }
+
+ return result orelse return s.fail(
+ "the following command failed to communicate the compilation result:\n{s}",
+ .{try allocPrintCmd(arena, null, argv)},
+ );
+}
+
+fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void {
+ const header: std.zig.Client.Message.Header = .{
+ .tag = tag,
+ .bytes_len = 0,
+ };
+ try file.writeAll(std.mem.asBytes(&header));
+}
+
+pub fn handleVerbose(
+ b: *Build,
+ opt_cwd: ?[]const u8,
+ argv: []const []const u8,
+) error{OutOfMemory}!void {
+ return handleVerbose2(b, opt_cwd, null, argv);
+}
+
+pub fn handleVerbose2(
+ b: *Build,
+ opt_cwd: ?[]const u8,
+ opt_env: ?*const std.process.EnvMap,
+ argv: []const []const u8,
+) error{OutOfMemory}!void {
+ if (b.verbose) {
+ // Intention of verbose is to print all sub-process command lines to
+ // stderr before spawning them.
+ const text = try allocPrintCmd2(b.allocator, opt_cwd, opt_env, argv);
+ std.debug.print("{s}\n", .{text});
+ }
+}
+
+pub inline fn handleChildProcUnsupported(
+ s: *Step,
+ opt_cwd: ?[]const u8,
+ argv: []const []const u8,
+) error{ OutOfMemory, MakeFailed }!void {
+ if (!std.process.can_spawn) {
+ return s.fail(
+ "unable to execute the following command: host cannot spawn child processes\n{s}",
+ .{try allocPrintCmd(s.owner.allocator, opt_cwd, argv)},
+ );
+ }
+}
+
+pub fn handleChildProcessTerm(
+ s: *Step,
+ term: std.ChildProcess.Term,
+ opt_cwd: ?[]const u8,
+ argv: []const []const u8,
+) error{ MakeFailed, OutOfMemory }!void {
+ const arena = s.owner.allocator;
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ return s.fail(
+ "the following command exited with error code {d}:\n{s}",
+ .{ code, try allocPrintCmd(arena, opt_cwd, argv) },
+ );
+ }
+ },
+ .Signal, .Stopped, .Unknown => {
+ return s.fail(
+ "the following command terminated unexpectedly:\n{s}",
+ .{try allocPrintCmd(arena, opt_cwd, argv)},
+ );
+ },
+ }
+}
+
+pub fn allocPrintCmd(
+ arena: Allocator,
+ opt_cwd: ?[]const u8,
+ argv: []const []const u8,
+) Allocator.Error![]u8 {
+ return allocPrintCmd2(arena, opt_cwd, null, argv);
+}
+
+pub fn allocPrintCmd2(
+ arena: Allocator,
+ opt_cwd: ?[]const u8,
+ opt_env: ?*const std.process.EnvMap,
+ argv: []const []const u8,
+) Allocator.Error![]u8 {
+ var buf: std.ArrayListUnmanaged(u8) = .{};
+ if (opt_cwd) |cwd| try buf.writer(arena).print("cd {s} && ", .{cwd});
+ if (opt_env) |env| {
+ const process_env_map = std.process.getEnvMap(arena) catch std.process.EnvMap.init(arena);
+ var it = env.iterator();
+ while (it.next()) |entry| {
+ const key = entry.key_ptr.*;
+ const value = entry.value_ptr.*;
+ if (process_env_map.get(key)) |process_value| {
+ if (std.mem.eql(u8, value, process_value)) continue;
+ }
+ try buf.writer(arena).print("{s}={s} ", .{ key, value });
+ }
+ }
+ for (argv) |arg| {
+ try buf.writer(arena).print("{s} ", .{arg});
+ }
+ return buf.toOwnedSlice(arena);
+}
+
+pub fn cacheHit(s: *Step, man: *std.Build.Cache.Manifest) !bool {
+ s.result_cached = man.hit() catch |err| return failWithCacheError(s, man, err);
+ return s.result_cached;
+}
+
+fn failWithCacheError(s: *Step, man: *const std.Build.Cache.Manifest, err: anyerror) anyerror {
+ const i = man.failed_file_index orelse return err;
+ const pp = man.files.items[i].prefixed_path orelse return err;
+ const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
+ return s.fail("{s}: {s}/{s}", .{ @errorName(err), prefix, pp.sub_path });
+}
+
+pub fn writeManifest(s: *Step, man: *std.Build.Cache.Manifest) !void {
+ if (s.test_results.isSuccess()) {
+ man.writeManifest() catch |err| {
+ try s.addError("unable to write cache manifest: {s}", .{@errorName(err)});
+ };
+ }
+}
diff --git a/lib/std/Build/TranslateCStep.zig b/lib/std/Build/TranslateCStep.zig
index fb0adfd0ae..0cfd5d85a8 100644
--- a/lib/std/Build/TranslateCStep.zig
+++ b/lib/std/Build/TranslateCStep.zig
@@ -11,7 +11,6 @@ const TranslateCStep = @This();
pub const base_id = .translate_c;
step: Step,
-builder: *std.Build,
source: std.Build.FileSource,
include_dirs: std.ArrayList([]const u8),
c_macros: std.ArrayList([]const u8),
@@ -26,15 +25,19 @@ pub const Options = struct {
optimize: std.builtin.OptimizeMode,
};
-pub fn create(builder: *std.Build, options: Options) *TranslateCStep {
- const self = builder.allocator.create(TranslateCStep) catch @panic("OOM");
- const source = options.source_file.dupe(builder);
+pub fn create(owner: *std.Build, options: Options) *TranslateCStep {
+ const self = owner.allocator.create(TranslateCStep) catch @panic("OOM");
+ const source = options.source_file.dupe(owner);
self.* = TranslateCStep{
- .step = Step.init(.translate_c, "translate-c", builder.allocator, make),
- .builder = builder,
+ .step = Step.init(.{
+ .id = .translate_c,
+ .name = "translate-c",
+ .owner = owner,
+ .makeFn = make,
+ }),
.source = source,
- .include_dirs = std.ArrayList([]const u8).init(builder.allocator),
- .c_macros = std.ArrayList([]const u8).init(builder.allocator),
+ .include_dirs = std.ArrayList([]const u8).init(owner.allocator),
+ .c_macros = std.ArrayList([]const u8).init(owner.allocator),
.out_basename = undefined,
.target = options.target,
.optimize = options.optimize,
@@ -54,7 +57,7 @@ pub const AddExecutableOptions = struct {
/// Creates a step to build an executable from the translated source.
pub fn addExecutable(self: *TranslateCStep, options: AddExecutableOptions) *CompileStep {
- return self.builder.addExecutable(.{
+ return self.step.owner.addExecutable(.{
.root_source_file = .{ .generated = &self.output_file },
.name = options.name orelse "translated_c",
.version = options.version,
@@ -65,43 +68,49 @@ pub fn addExecutable(self: *TranslateCStep, options: AddExecutableOptions) *Comp
}
pub fn addIncludeDir(self: *TranslateCStep, include_dir: []const u8) void {
- self.include_dirs.append(self.builder.dupePath(include_dir)) catch @panic("OOM");
+ self.include_dirs.append(self.step.owner.dupePath(include_dir)) catch @panic("OOM");
}
pub fn addCheckFile(self: *TranslateCStep, expected_matches: []const []const u8) *CheckFileStep {
- return CheckFileStep.create(self.builder, .{ .generated = &self.output_file }, self.builder.dupeStrings(expected_matches));
+ return CheckFileStep.create(
+ self.step.owner,
+ .{ .generated = &self.output_file },
+ .{ .expected_matches = expected_matches },
+ );
}
/// If the value is omitted, it is set to 1.
/// `name` and `value` need not live longer than the function call.
pub fn defineCMacro(self: *TranslateCStep, name: []const u8, value: ?[]const u8) void {
- const macro = std.Build.constructCMacro(self.builder.allocator, name, value);
+ const macro = std.Build.constructCMacro(self.step.owner.allocator, name, value);
self.c_macros.append(macro) catch @panic("OOM");
}
/// name_and_value looks like [name]=[value]. If the value is omitted, it is set to 1.
pub fn defineCMacroRaw(self: *TranslateCStep, name_and_value: []const u8) void {
- self.c_macros.append(self.builder.dupe(name_and_value)) catch @panic("OOM");
+ self.c_macros.append(self.step.owner.dupe(name_and_value)) catch @panic("OOM");
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ const b = step.owner;
const self = @fieldParentPtr(TranslateCStep, "step", step);
- var argv_list = std.ArrayList([]const u8).init(self.builder.allocator);
- try argv_list.append(self.builder.zig_exe);
+ var argv_list = std.ArrayList([]const u8).init(b.allocator);
+ try argv_list.append(b.zig_exe);
try argv_list.append("translate-c");
try argv_list.append("-lc");
try argv_list.append("--enable-cache");
+ try argv_list.append("--listen=-");
if (!self.target.isNative()) {
try argv_list.append("-target");
- try argv_list.append(try self.target.zigTriple(self.builder.allocator));
+ try argv_list.append(try self.target.zigTriple(b.allocator));
}
switch (self.optimize) {
.Debug => {}, // Skip since it's the default.
- else => try argv_list.append(self.builder.fmt("-O{s}", .{@tagName(self.optimize)})),
+ else => try argv_list.append(b.fmt("-O{s}", .{@tagName(self.optimize)})),
}
for (self.include_dirs.items) |include_dir| {
@@ -114,16 +123,15 @@ fn make(step: *Step) !void {
try argv_list.append(c_macro);
}
- try argv_list.append(self.source.getPath(self.builder));
+ try argv_list.append(self.source.getPath(b));
- const output_path_nl = try self.builder.execFromStep(argv_list.items, &self.step);
- const output_path = mem.trimRight(u8, output_path_nl, "\r\n");
+ const output_path = try step.evalZigProcess(argv_list.items, prog_node);
self.out_basename = fs.path.basename(output_path);
const output_dir = fs.path.dirname(output_path).?;
self.output_file.path = try fs.path.join(
- self.builder.allocator,
+ b.allocator,
&[_][]const u8{ output_dir, self.out_basename },
);
}
diff --git a/lib/std/Build/WriteFileStep.zig b/lib/std/Build/WriteFileStep.zig
index 3a30aba190..dee79af5be 100644
--- a/lib/std/Build/WriteFileStep.zig
+++ b/lib/std/Build/WriteFileStep.zig
@@ -10,11 +10,11 @@
//! control.
step: Step,
-builder: *std.Build,
/// The elements here are pointers because we need stable pointers for the
/// GeneratedFile field.
files: std.ArrayListUnmanaged(*File),
output_source_files: std.ArrayListUnmanaged(OutputSourceFile),
+generated_directory: std.Build.GeneratedFile,
pub const base_id = .write_file;
@@ -34,24 +34,34 @@ pub const Contents = union(enum) {
copy: std.Build.FileSource,
};
-pub fn init(builder: *std.Build) WriteFileStep {
- return .{
- .builder = builder,
- .step = Step.init(.write_file, "writefile", builder.allocator, make),
+pub fn create(owner: *std.Build) *WriteFileStep {
+ const wf = owner.allocator.create(WriteFileStep) catch @panic("OOM");
+ wf.* = .{
+ .step = Step.init(.{
+ .id = .write_file,
+ .name = "WriteFile",
+ .owner = owner,
+ .makeFn = make,
+ }),
.files = .{},
.output_source_files = .{},
+ .generated_directory = .{ .step = &wf.step },
};
+ return wf;
}
pub fn add(wf: *WriteFileStep, sub_path: []const u8, bytes: []const u8) void {
- const gpa = wf.builder.allocator;
+ const b = wf.step.owner;
+ const gpa = b.allocator;
const file = gpa.create(File) catch @panic("OOM");
file.* = .{
.generated_file = .{ .step = &wf.step },
- .sub_path = wf.builder.dupePath(sub_path),
- .contents = .{ .bytes = wf.builder.dupe(bytes) },
+ .sub_path = b.dupePath(sub_path),
+ .contents = .{ .bytes = b.dupe(bytes) },
};
wf.files.append(gpa, file) catch @panic("OOM");
+
+ wf.maybeUpdateName();
}
/// Place the file into the generated directory within the local cache,
@@ -62,14 +72,18 @@ pub fn add(wf: *WriteFileStep, sub_path: []const u8, bytes: []const u8) void {
/// required sub-path exists.
/// This is the option expected to be used most commonly with `addCopyFile`.
pub fn addCopyFile(wf: *WriteFileStep, source: std.Build.FileSource, sub_path: []const u8) void {
- const gpa = wf.builder.allocator;
+ const b = wf.step.owner;
+ const gpa = b.allocator;
const file = gpa.create(File) catch @panic("OOM");
file.* = .{
.generated_file = .{ .step = &wf.step },
- .sub_path = wf.builder.dupePath(sub_path),
+ .sub_path = b.dupePath(sub_path),
.contents = .{ .copy = source },
};
wf.files.append(gpa, file) catch @panic("OOM");
+
+ wf.maybeUpdateName();
+ source.addStepDependencies(&wf.step);
}
/// A path relative to the package root.
@@ -79,10 +93,26 @@ pub fn addCopyFile(wf: *WriteFileStep, source: std.Build.FileSource, sub_path: [
/// those changes to version control.
/// A file added this way is not available with `getFileSource`.
pub fn addCopyFileToSource(wf: *WriteFileStep, source: std.Build.FileSource, sub_path: []const u8) void {
- wf.output_source_files.append(wf.builder.allocator, .{
+ const b = wf.step.owner;
+ wf.output_source_files.append(b.allocator, .{
.contents = .{ .copy = source },
.sub_path = sub_path,
}) catch @panic("OOM");
+ source.addStepDependencies(&wf.step);
+}
+
+/// A path relative to the package root.
+/// Be careful with this because it updates source files. This should not be
+/// used as part of the normal build process, but as a utility occasionally
+/// run by a developer with intent to modify source files and then commit
+/// those changes to version control.
+/// A file added this way is not available with `getFileSource`.
+pub fn addBytesToSource(wf: *WriteFileStep, bytes: []const u8, sub_path: []const u8) void {
+ const b = wf.step.owner;
+ wf.output_source_files.append(b.allocator, .{
+ .contents = .{ .bytes = bytes },
+ .sub_path = sub_path,
+ }) catch @panic("OOM");
}
/// Gets a file source for the given sub_path. If the file does not exist, returns `null`.
@@ -95,21 +125,63 @@ pub fn getFileSource(wf: *WriteFileStep, sub_path: []const u8) ?std.Build.FileSo
return null;
}
-fn make(step: *Step) !void {
+/// Returns a `FileSource` representing the base directory that contains all the
+/// files from this `WriteFileStep`.
+pub fn getDirectorySource(wf: *WriteFileStep) std.Build.FileSource {
+ return .{ .generated = &wf.generated_directory };
+}
+
+fn maybeUpdateName(wf: *WriteFileStep) void {
+ if (wf.files.items.len == 1) {
+ // First time adding a file; update name.
+ if (std.mem.eql(u8, wf.step.name, "WriteFile")) {
+ wf.step.name = wf.step.owner.fmt("WriteFile {s}", .{wf.files.items[0].sub_path});
+ }
+ }
+}
+
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ _ = prog_node;
+ const b = step.owner;
const wf = @fieldParentPtr(WriteFileStep, "step", step);
// Writing to source files is kind of an extra capability of this
// WriteFileStep - arguably it should be a different step. But anyway here
// it is, it happens unconditionally and does not interact with the other
// files here.
+ var any_miss = false;
for (wf.output_source_files.items) |output_source_file| {
- const basename = fs.path.basename(output_source_file.sub_path);
if (fs.path.dirname(output_source_file.sub_path)) |dirname| {
- var dir = try wf.builder.build_root.handle.makeOpenPath(dirname, .{});
- defer dir.close();
- try writeFile(wf, dir, output_source_file.contents, basename);
- } else {
- try writeFile(wf, wf.builder.build_root.handle, output_source_file.contents, basename);
+ b.build_root.handle.makePath(dirname) catch |err| {
+ return step.fail("unable to make path '{}{s}': {s}", .{
+ b.build_root, dirname, @errorName(err),
+ });
+ };
+ }
+ switch (output_source_file.contents) {
+ .bytes => |bytes| {
+ b.build_root.handle.writeFile(output_source_file.sub_path, bytes) catch |err| {
+ return step.fail("unable to write file '{}{s}': {s}", .{
+ b.build_root, output_source_file.sub_path, @errorName(err),
+ });
+ };
+ any_miss = true;
+ },
+ .copy => |file_source| {
+ const source_path = file_source.getPath(b);
+ const prev_status = fs.Dir.updateFile(
+ fs.cwd(),
+ source_path,
+ b.build_root.handle,
+ output_source_file.sub_path,
+ .{},
+ ) catch |err| {
+ return step.fail("unable to update file from '{s}' to '{}{s}': {s}", .{
+ source_path, b.build_root, output_source_file.sub_path, @errorName(err),
+ });
+ };
+ any_miss = any_miss or prev_status == .stale;
+ },
}
}
@@ -120,7 +192,7 @@ fn make(step: *Step) !void {
// If, for example, a hard-coded path was used as the location to put WriteFileStep
// files, then two WriteFileSteps executing in parallel might clobber each other.
- var man = wf.builder.cache.obtain();
+ var man = b.cache.obtain();
defer man.deinit();
// Random bytes to make WriteFileStep unique. Refresh this with
@@ -135,76 +207,82 @@ fn make(step: *Step) !void {
man.hash.addBytes(bytes);
},
.copy => |file_source| {
- _ = try man.addFile(file_source.getPath(wf.builder), null);
+ _ = try man.addFile(file_source.getPath(b), null);
},
}
}
- if (man.hit() catch |err| failWithCacheError(man, err)) {
- // Cache hit, skip writing file data.
+ if (try step.cacheHit(&man)) {
const digest = man.final();
for (wf.files.items) |file| {
- file.generated_file.path = try wf.builder.cache_root.join(
- wf.builder.allocator,
- &.{ "o", &digest, file.sub_path },
- );
+ file.generated_file.path = try b.cache_root.join(b.allocator, &.{
+ "o", &digest, file.sub_path,
+ });
}
+ wf.generated_directory.path = try b.cache_root.join(b.allocator, &.{ "o", &digest });
return;
}
const digest = man.final();
const cache_path = "o" ++ fs.path.sep_str ++ digest;
- var cache_dir = wf.builder.cache_root.handle.makeOpenPath(cache_path, .{}) catch |err| {
- std.debug.print("unable to make path {s}: {s}\n", .{ cache_path, @errorName(err) });
- return err;
+ wf.generated_directory.path = try b.cache_root.join(b.allocator, &.{ "o", &digest });
+
+ var cache_dir = b.cache_root.handle.makeOpenPath(cache_path, .{}) catch |err| {
+ return step.fail("unable to make path '{}{s}': {s}", .{
+ b.cache_root, cache_path, @errorName(err),
+ });
};
defer cache_dir.close();
for (wf.files.items) |file| {
- const basename = fs.path.basename(file.sub_path);
if (fs.path.dirname(file.sub_path)) |dirname| {
- var dir = try wf.builder.cache_root.handle.makeOpenPath(dirname, .{});
- defer dir.close();
- try writeFile(wf, dir, file.contents, basename);
- } else {
- try writeFile(wf, cache_dir, file.contents, basename);
+ cache_dir.makePath(dirname) catch |err| {
+ return step.fail("unable to make path '{}{s}{c}{s}': {s}", .{
+ b.cache_root, cache_path, fs.path.sep, dirname, @errorName(err),
+ });
+ };
+ }
+ switch (file.contents) {
+ .bytes => |bytes| {
+ cache_dir.writeFile(file.sub_path, bytes) catch |err| {
+ return step.fail("unable to write file '{}{s}{c}{s}': {s}", .{
+ b.cache_root, cache_path, fs.path.sep, file.sub_path, @errorName(err),
+ });
+ };
+ },
+ .copy => |file_source| {
+ const source_path = file_source.getPath(b);
+ const prev_status = fs.Dir.updateFile(
+ fs.cwd(),
+ source_path,
+ cache_dir,
+ file.sub_path,
+ .{},
+ ) catch |err| {
+ return step.fail("unable to update file from '{s}' to '{}{s}{c}{s}': {s}", .{
+ source_path,
+ b.cache_root,
+ cache_path,
+ fs.path.sep,
+ file.sub_path,
+ @errorName(err),
+ });
+ };
+ // At this point we already will mark the step as a cache miss.
+ // But this is kind of a partial cache hit since individual
+ // file copies may be avoided. Oh well, this information is
+ // discarded.
+ _ = prev_status;
+ },
}
- file.generated_file.path = try wf.builder.cache_root.join(
- wf.builder.allocator,
- &.{ cache_path, file.sub_path },
- );
+ file.generated_file.path = try b.cache_root.join(b.allocator, &.{
+ cache_path, file.sub_path,
+ });
}
- try man.writeManifest();
-}
-
-fn writeFile(wf: *WriteFileStep, dir: fs.Dir, contents: Contents, basename: []const u8) !void {
- // TODO after landing concurrency PR, improve error reporting here
- switch (contents) {
- .bytes => |bytes| return dir.writeFile(basename, bytes),
- .copy => |file_source| {
- const source_path = file_source.getPath(wf.builder);
- const prev_status = try fs.Dir.updateFile(fs.cwd(), source_path, dir, basename, .{});
- _ = prev_status; // TODO logging (affected by open PR regarding concurrency)
- },
- }
-}
-
-/// TODO consolidate this with the same function in RunStep?
-/// Also properly deal with concurrency (see open PR)
-fn failWithCacheError(man: std.Build.Cache.Manifest, err: anyerror) noreturn {
- const i = man.failed_file_index orelse failWithSimpleError(err);
- const pp = man.files.items[i].prefixed_path orelse failWithSimpleError(err);
- const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
- std.debug.print("{s}: {s}/{s}\n", .{ @errorName(err), prefix, pp.sub_path });
- std.process.exit(1);
-}
-
-fn failWithSimpleError(err: anyerror) noreturn {
- std.debug.print("{s}\n", .{@errorName(err)});
- std.process.exit(1);
+ try step.writeManifest(&man);
}
const std = @import("../std.zig");
diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig
index f0b0e2dbd5..dba7166398 100644
--- a/lib/std/Progress.zig
+++ b/lib/std/Progress.zig
@@ -126,6 +126,21 @@ pub const Node = struct {
}
}
+ /// Thread-safe.
+ pub fn setName(self: *Node, name: []const u8) void {
+ const progress = self.context;
+ progress.update_mutex.lock();
+ defer progress.update_mutex.unlock();
+ self.name = name;
+ if (self.parent) |parent| {
+ @atomicStore(?*Node, &parent.recently_updated_child, self, .Release);
+ if (parent.parent) |grand_parent| {
+ @atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .Release);
+ }
+ if (progress.timer) |*timer| progress.maybeRefreshWithHeldLock(timer);
+ }
+ }
+
/// Thread-safe. 0 means unknown.
pub fn setEstimatedTotalItems(self: *Node, count: usize) void {
@atomicStore(usize, &self.unprotected_estimated_total_items, count, .Monotonic);
@@ -174,16 +189,20 @@ pub fn maybeRefresh(self: *Progress) void {
if (self.timer) |*timer| {
if (!self.update_mutex.tryLock()) return;
defer self.update_mutex.unlock();
- const now = timer.read();
- if (now < self.initial_delay_ns) return;
- // TODO I have observed this to happen sometimes. I think we need to follow Rust's
- // lead and guarantee monotonically increasing times in the std lib itself.
- if (now < self.prev_refresh_timestamp) return;
- if (now - self.prev_refresh_timestamp < self.refresh_rate_ns) return;
- return self.refreshWithHeldLock();
+ maybeRefreshWithHeldLock(self, timer);
}
}
+fn maybeRefreshWithHeldLock(self: *Progress, timer: *std.time.Timer) void {
+ const now = timer.read();
+ if (now < self.initial_delay_ns) return;
+ // TODO I have observed this to happen sometimes. I think we need to follow Rust's
+ // lead and guarantee monotonically increasing times in the std lib itself.
+ if (now < self.prev_refresh_timestamp) return;
+ if (now - self.prev_refresh_timestamp < self.refresh_rate_ns) return;
+ return self.refreshWithHeldLock();
+}
+
/// Updates the terminal and resets `self.next_refresh_timestamp`. Thread-safe.
pub fn refresh(self: *Progress) void {
if (!self.update_mutex.tryLock()) return;
@@ -192,32 +211,28 @@ pub fn refresh(self: *Progress) void {
return self.refreshWithHeldLock();
}
-fn refreshWithHeldLock(self: *Progress) void {
- const is_dumb = !self.supports_ansi_escape_codes and !self.is_windows_terminal;
- if (is_dumb and self.dont_print_on_dumb) return;
-
- const file = self.terminal orelse return;
-
- var end: usize = 0;
- if (self.columns_written > 0) {
+fn clearWithHeldLock(p: *Progress, end_ptr: *usize) void {
+ const file = p.terminal orelse return;
+ var end = end_ptr.*;
+ if (p.columns_written > 0) {
// restore the cursor position by moving the cursor
// `columns_written` cells to the left, then clear the rest of the
// line
- if (self.supports_ansi_escape_codes) {
- end += (std.fmt.bufPrint(self.output_buffer[end..], "\x1b[{d}D", .{self.columns_written}) catch unreachable).len;
- end += (std.fmt.bufPrint(self.output_buffer[end..], "\x1b[0K", .{}) catch unreachable).len;
+ if (p.supports_ansi_escape_codes) {
+ end += (std.fmt.bufPrint(p.output_buffer[end..], "\x1b[{d}D", .{p.columns_written}) catch unreachable).len;
+ end += (std.fmt.bufPrint(p.output_buffer[end..], "\x1b[0K", .{}) catch unreachable).len;
} else if (builtin.os.tag == .windows) winapi: {
- std.debug.assert(self.is_windows_terminal);
+ std.debug.assert(p.is_windows_terminal);
var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined;
if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != windows.TRUE) {
// stop trying to write to this file
- self.terminal = null;
+ p.terminal = null;
break :winapi;
}
var cursor_pos = windows.COORD{
- .X = info.dwCursorPosition.X - @intCast(windows.SHORT, self.columns_written),
+ .X = info.dwCursorPosition.X - @intCast(windows.SHORT, p.columns_written),
.Y = info.dwCursorPosition.Y,
};
@@ -235,7 +250,7 @@ fn refreshWithHeldLock(self: *Progress) void {
&written,
) != windows.TRUE) {
// stop trying to write to this file
- self.terminal = null;
+ p.terminal = null;
break :winapi;
}
if (windows.kernel32.FillConsoleOutputCharacterW(
@@ -246,22 +261,33 @@ fn refreshWithHeldLock(self: *Progress) void {
&written,
) != windows.TRUE) {
// stop trying to write to this file
- self.terminal = null;
+ p.terminal = null;
break :winapi;
}
if (windows.kernel32.SetConsoleCursorPosition(file.handle, cursor_pos) != windows.TRUE) {
// stop trying to write to this file
- self.terminal = null;
+ p.terminal = null;
break :winapi;
}
} else {
// we are in a "dumb" terminal like in acme or writing to a file
- self.output_buffer[end] = '\n';
+ p.output_buffer[end] = '\n';
end += 1;
}
- self.columns_written = 0;
+ p.columns_written = 0;
}
+ end_ptr.* = end;
+}
+
+fn refreshWithHeldLock(self: *Progress) void {
+ const is_dumb = !self.supports_ansi_escape_codes and !self.is_windows_terminal;
+ if (is_dumb and self.dont_print_on_dumb) return;
+
+ const file = self.terminal orelse return;
+
+ var end: usize = 0;
+ clearWithHeldLock(self, &end);
if (!self.done) {
var need_ellipse = false;
@@ -318,6 +344,26 @@ pub fn log(self: *Progress, comptime format: []const u8, args: anytype) void {
self.columns_written = 0;
}
+/// Allows the caller to freely write to stderr until unlock_stderr() is called.
+/// During the lock, the progress information is cleared from the terminal.
+pub fn lock_stderr(p: *Progress) void {
+ p.update_mutex.lock();
+ if (p.terminal) |file| {
+ var end: usize = 0;
+ clearWithHeldLock(p, &end);
+ _ = file.write(p.output_buffer[0..end]) catch {
+ // stop trying to write to this file
+ p.terminal = null;
+ };
+ }
+ std.debug.getStderrMutex().lock();
+}
+
+pub fn unlock_stderr(p: *Progress) void {
+ std.debug.getStderrMutex().unlock();
+ p.update_mutex.unlock();
+}
+
fn bufWrite(self: *Progress, end: *usize, comptime format: []const u8, args: anytype) void {
if (std.fmt.bufPrint(self.output_buffer[end.*..], format, args)) |written| {
const amt = written.len;
diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig
index 8004f94d7f..e3345e4a42 100644
--- a/lib/std/Thread.zig
+++ b/lib/std/Thread.zig
@@ -16,6 +16,8 @@ pub const Mutex = @import("Thread/Mutex.zig");
pub const Semaphore = @import("Thread/Semaphore.zig");
pub const Condition = @import("Thread/Condition.zig");
pub const RwLock = @import("Thread/RwLock.zig");
+pub const Pool = @import("Thread/Pool.zig");
+pub const WaitGroup = @import("Thread/WaitGroup.zig");
pub const use_pthreads = target.os.tag != .windows and target.os.tag != .wasi and builtin.link_libc;
const is_gnu = target.abi.isGnu();
@@ -945,6 +947,7 @@ const LinuxThreadImpl = struct {
// map all memory needed without read/write permissions
// to avoid committing the whole region right away
+ // anonymous mapping ensures file descriptor limits are not exceeded
const mapped = os.mmap(
null,
map_bytes,
@@ -956,6 +959,8 @@ const LinuxThreadImpl = struct {
error.MemoryMappingNotSupported => unreachable,
error.AccessDenied => unreachable,
error.PermissionDenied => unreachable,
+ error.ProcessFdQuotaExceeded => unreachable,
+ error.SystemFdQuotaExceeded => unreachable,
else => |e| return e,
};
assert(mapped.len >= map_bytes);
diff --git a/src/ThreadPool.zig b/lib/std/Thread/Pool.zig
similarity index 86%
rename from src/ThreadPool.zig
rename to lib/std/Thread/Pool.zig
index fde5ed27db..ed1a4dc052 100644
--- a/src/ThreadPool.zig
+++ b/lib/std/Thread/Pool.zig
@@ -1,6 +1,6 @@
const std = @import("std");
const builtin = @import("builtin");
-const ThreadPool = @This();
+const Pool = @This();
const WaitGroup = @import("WaitGroup.zig");
mutex: std.Thread.Mutex = .{},
@@ -17,7 +17,14 @@ const Runnable = struct {
const RunProto = *const fn (*Runnable) void;
-pub fn init(pool: *ThreadPool, allocator: std.mem.Allocator) !void {
+pub const Options = struct {
+ allocator: std.mem.Allocator,
+ n_jobs: ?u32 = null,
+};
+
+pub fn init(pool: *Pool, options: Options) !void {
+ const allocator = options.allocator;
+
pool.* = .{
.allocator = allocator,
.threads = &[_]std.Thread{},
@@ -27,7 +34,7 @@ pub fn init(pool: *ThreadPool, allocator: std.mem.Allocator) !void {
return;
}
- const thread_count = std.math.max(1, std.Thread.getCpuCount() catch 1);
+ const thread_count = options.n_jobs orelse @max(1, std.Thread.getCpuCount() catch 1);
pool.threads = try allocator.alloc(std.Thread, thread_count);
errdefer allocator.free(pool.threads);
@@ -41,12 +48,12 @@ pub fn init(pool: *ThreadPool, allocator: std.mem.Allocator) !void {
}
}
-pub fn deinit(pool: *ThreadPool) void {
+pub fn deinit(pool: *Pool) void {
pool.join(pool.threads.len); // kill and join all threads.
pool.* = undefined;
}
-fn join(pool: *ThreadPool, spawned: usize) void {
+fn join(pool: *Pool, spawned: usize) void {
if (builtin.single_threaded) {
return;
}
@@ -69,7 +76,7 @@ fn join(pool: *ThreadPool, spawned: usize) void {
pool.allocator.free(pool.threads);
}
-pub fn spawn(pool: *ThreadPool, comptime func: anytype, args: anytype) !void {
+pub fn spawn(pool: *Pool, comptime func: anytype, args: anytype) !void {
if (builtin.single_threaded) {
@call(.auto, func, args);
return;
@@ -78,7 +85,7 @@ pub fn spawn(pool: *ThreadPool, comptime func: anytype, args: anytype) !void {
const Args = @TypeOf(args);
const Closure = struct {
arguments: Args,
- pool: *ThreadPool,
+ pool: *Pool,
run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } },
fn runFn(runnable: *Runnable) void {
@@ -112,7 +119,7 @@ pub fn spawn(pool: *ThreadPool, comptime func: anytype, args: anytype) !void {
pool.cond.signal();
}
-fn worker(pool: *ThreadPool) void {
+fn worker(pool: *Pool) void {
pool.mutex.lock();
defer pool.mutex.unlock();
@@ -135,7 +142,7 @@ fn worker(pool: *ThreadPool) void {
}
}
-pub fn waitAndWork(pool: *ThreadPool, wait_group: *WaitGroup) void {
+pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void {
while (!wait_group.isDone()) {
if (blk: {
pool.mutex.lock();
diff --git a/src/WaitGroup.zig b/lib/std/Thread/WaitGroup.zig
similarity index 100%
rename from src/WaitGroup.zig
rename to lib/std/Thread/WaitGroup.zig
diff --git a/lib/std/Uri.zig b/lib/std/Uri.zig
index 015b6c34f6..eb6311a19b 100644
--- a/lib/std/Uri.zig
+++ b/lib/std/Uri.zig
@@ -16,15 +16,27 @@ fragment: ?[]const u8,
/// Applies URI encoding and replaces all reserved characters with their respective %XX code.
pub fn escapeString(allocator: std.mem.Allocator, input: []const u8) error{OutOfMemory}![]const u8 {
+ return escapeStringWithFn(allocator, input, isUnreserved);
+}
+
+pub fn escapePath(allocator: std.mem.Allocator, input: []const u8) error{OutOfMemory}![]const u8 {
+ return escapeStringWithFn(allocator, input, isPathChar);
+}
+
+pub fn escapeQuery(allocator: std.mem.Allocator, input: []const u8) error{OutOfMemory}![]const u8 {
+ return escapeStringWithFn(allocator, input, isQueryChar);
+}
+
+pub fn escapeStringWithFn(allocator: std.mem.Allocator, input: []const u8, comptime keepUnescaped: fn (c: u8) bool) std.mem.Allocator.Error![]const u8 {
var outsize: usize = 0;
for (input) |c| {
- outsize += if (isUnreserved(c)) @as(usize, 1) else 3;
+ outsize += if (keepUnescaped(c)) @as(usize, 1) else 3;
}
var output = try allocator.alloc(u8, outsize);
var outptr: usize = 0;
for (input) |c| {
- if (isUnreserved(c)) {
+ if (keepUnescaped(c)) {
output[outptr] = c;
outptr += 1;
} else {
@@ -94,13 +106,14 @@ pub fn unescapeString(allocator: std.mem.Allocator, input: []const u8) error{Out
pub const ParseError = error{ UnexpectedCharacter, InvalidFormat, InvalidPort };
-/// Parses the URI or returns an error.
+/// Parses the URI or returns an error. This function is not compliant, but is required to parse
+/// some forms of URIs in the wild. Such as HTTP Location headers.
/// The return value will contain unescaped strings pointing into the
/// original `text`. Each component that is provided, will be non-`null`.
-pub fn parse(text: []const u8) ParseError!Uri {
+pub fn parseWithoutScheme(text: []const u8) ParseError!Uri {
var reader = SliceReader{ .slice = text };
var uri = Uri{
- .scheme = reader.readWhile(isSchemeChar),
+ .scheme = "",
.user = null,
.password = null,
.host = null,
@@ -110,14 +123,6 @@ pub fn parse(text: []const u8) ParseError!Uri {
.fragment = null,
};
- // after the scheme, a ':' must appear
- if (reader.get()) |c| {
- if (c != ':')
- return error.UnexpectedCharacter;
- } else {
- return error.InvalidFormat;
- }
-
if (reader.peekPrefix("//")) { // authority part
std.debug.assert(reader.get().? == '/');
std.debug.assert(reader.get().? == '/');
@@ -179,6 +184,76 @@ pub fn parse(text: []const u8) ParseError!Uri {
return uri;
}
+/// Parses the URI or returns an error.
+/// The return value will contain unescaped strings pointing into the
+/// original `text`. Each component that is provided, will be non-`null`.
+pub fn parse(text: []const u8) ParseError!Uri {
+ var reader = SliceReader{ .slice = text };
+ const scheme = reader.readWhile(isSchemeChar);
+
+ // after the scheme, a ':' must appear
+ if (reader.get()) |c| {
+ if (c != ':')
+ return error.UnexpectedCharacter;
+ } else {
+ return error.InvalidFormat;
+ }
+
+ var uri = try parseWithoutScheme(reader.readUntilEof());
+ uri.scheme = scheme;
+
+ return uri;
+}
+
+/// Resolves a URI against a base URI, conforming to RFC 3986, Section 5.
+/// arena owns any memory allocated by this function.
+pub fn resolve(Base: Uri, R: Uri, strict: bool, arena: std.mem.Allocator) !Uri {
+ var T: Uri = undefined;
+
+ if (R.scheme.len > 0 and !((!strict) and (std.mem.eql(u8, R.scheme, Base.scheme)))) {
+ T.scheme = R.scheme;
+ T.user = R.user;
+ T.host = R.host;
+ T.port = R.port;
+ T.path = try std.fs.path.resolvePosix(arena, &.{ "/", R.path });
+ T.query = R.query;
+ } else {
+ if (R.host) |host| {
+ T.user = R.user;
+ T.host = host;
+ T.port = R.port;
+ T.path = R.path;
+ T.path = try std.fs.path.resolvePosix(arena, &.{ "/", R.path });
+ T.query = R.query;
+ } else {
+ if (R.path.len == 0) {
+ T.path = Base.path;
+ if (R.query) |query| {
+ T.query = query;
+ } else {
+ T.query = Base.query;
+ }
+ } else {
+ if (R.path[0] == '/') {
+ T.path = try std.fs.path.resolvePosix(arena, &.{ "/", R.path });
+ } else {
+ T.path = try std.fs.path.resolvePosix(arena, &.{ "/", Base.path, R.path });
+ }
+ T.query = R.query;
+ }
+
+ T.user = Base.user;
+ T.host = Base.host;
+ T.port = Base.port;
+ }
+ T.scheme = Base.scheme;
+ }
+
+ T.fragment = R.fragment;
+
+ return T;
+}
+
const SliceReader = struct {
const Self = @This();
@@ -284,6 +359,14 @@ fn isPathSeparator(c: u8) bool {
};
}
+fn isPathChar(c: u8) bool {
+ return isUnreserved(c) or isSubLimit(c) or c == '/' or c == ':' or c == '@';
+}
+
+fn isQueryChar(c: u8) bool {
+ return isPathChar(c) or c == '?';
+}
+
fn isQuerySeparator(c: u8) bool {
return switch (c) {
'#' => true,
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index 13aad53019..fb11e2e755 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -141,11 +141,21 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
return cloned;
}
- /// Insert `item` at index `n` by moving `list[n .. list.len]` to make room.
+ /// Insert `item` at index `n`. Moves `list[n .. list.len]` to higher indices to make room.
+ /// If `n` is equal to the length of the list this operation is equivalent to append.
/// This operation is O(N).
/// Invalidates pointers if additional memory is needed.
pub fn insert(self: *Self, n: usize, item: T) Allocator.Error!void {
try self.ensureUnusedCapacity(1);
+ self.insertAssumeCapacity(n, item);
+ }
+
+ /// Insert `item` at index `n`. Moves `list[n .. list.len]` to higher indices to make room.
+ /// If `n` is equal to the length of the list this operation is equivalent to append.
+ /// This operation is O(N).
+ /// Asserts that there is enough capacity for the new item.
+ pub fn insertAssumeCapacity(self: *Self, n: usize, item: T) void {
+ assert(self.items.len < self.capacity);
self.items.len += 1;
mem.copyBackwards(T, self.items[n + 1 .. self.items.len], self.items[n .. self.items.len - 1]);
@@ -609,12 +619,21 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
return cloned;
}
- /// Insert `item` at index `n`. Moves `list[n .. list.len]`
- /// to higher indices to make room.
+ /// Insert `item` at index `n`. Moves `list[n .. list.len]` to higher indices to make room.
+ /// If `n` is equal to the length of the list this operation is equivalent to append.
/// This operation is O(N).
/// Invalidates pointers if additional memory is needed.
pub fn insert(self: *Self, allocator: Allocator, n: usize, item: T) Allocator.Error!void {
try self.ensureUnusedCapacity(allocator, 1);
+ self.insertAssumeCapacity(n, item);
+ }
+
+ /// Insert `item` at index `n`. Moves `list[n .. list.len]` to higher indices to make room.
+ /// If `n` is equal to the length of the list this operation is equivalent to append.
+ /// This operation is O(N).
+ /// Asserts that there is enough capacity for the new item.
+ pub fn insertAssumeCapacity(self: *Self, n: usize, item: T) void {
+ assert(self.items.len < self.capacity);
self.items.len += 1;
mem.copyBackwards(T, self.items[n + 1 .. self.items.len], self.items[n .. self.items.len - 1]);
@@ -1309,9 +1328,9 @@ test "std.ArrayList/ArrayListUnmanaged.insert" {
var list = ArrayList(i32).init(a);
defer list.deinit();
- try list.append(1);
+ try list.insert(0, 1);
try list.append(2);
- try list.append(3);
+ try list.insert(2, 3);
try list.insert(0, 5);
try testing.expect(list.items[0] == 5);
try testing.expect(list.items[1] == 1);
@@ -1322,9 +1341,9 @@ test "std.ArrayList/ArrayListUnmanaged.insert" {
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
- try list.append(a, 1);
+ try list.insert(a, 0, 1);
try list.append(a, 2);
- try list.append(a, 3);
+ try list.insert(a, 2, 3);
try list.insert(a, 0, 5);
try testing.expect(list.items[0] == 5);
try testing.expect(list.items[1] == 1);
diff --git a/lib/std/c.zig b/lib/std/c.zig
index 9fc3b1d57e..a0b65c31c8 100644
--- a/lib/std/c.zig
+++ b/lib/std/c.zig
@@ -153,7 +153,8 @@ pub extern "c" fn linkat(oldfd: c.fd_t, oldpath: [*:0]const u8, newfd: c.fd_t, n
pub extern "c" fn unlink(path: [*:0]const u8) c_int;
pub extern "c" fn unlinkat(dirfd: c.fd_t, path: [*:0]const u8, flags: c_uint) c_int;
pub extern "c" fn getcwd(buf: [*]u8, size: usize) ?[*]u8;
-pub extern "c" fn waitpid(pid: c.pid_t, stat_loc: ?*c_int, options: c_int) c.pid_t;
+pub extern "c" fn waitpid(pid: c.pid_t, status: ?*c_int, options: c_int) c.pid_t;
+pub extern "c" fn wait4(pid: c.pid_t, status: ?*c_int, options: c_int, ru: ?*c.rusage) c.pid_t;
pub extern "c" fn fork() c_int;
pub extern "c" fn access(path: [*:0]const u8, mode: c_uint) c_int;
pub extern "c" fn faccessat(dirfd: c.fd_t, path: [*:0]const u8, mode: c_uint, flags: c_uint) c_int;
diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig
index 9c5ac1e93a..75267cc171 100644
--- a/lib/std/c/darwin.zig
+++ b/lib/std/c/darwin.zig
@@ -8,6 +8,7 @@ const iovec_const = std.os.iovec_const;
pub const aarch64 = @import("darwin/aarch64.zig");
pub const x86_64 = @import("darwin/x86_64.zig");
+pub const cssm = @import("darwin/cssm.zig");
const arch_bits = switch (native_arch) {
.aarch64 => @import("darwin/aarch64.zig"),
@@ -2179,6 +2180,14 @@ pub fn getKernError(err: kern_return_t) KernE {
return @intToEnum(KernE, @truncate(u32, @intCast(usize, err)));
}
+pub fn unexpectedKernError(err: KernE) std.os.UnexpectedError {
+ if (std.os.unexpected_error_tracing) {
+ std.debug.print("unexpected errno: {d}\n", .{@enumToInt(err)});
+ std.debug.dumpCurrentStackTrace(null);
+ }
+ return error.Unexpected;
+}
+
/// Kernel return values
pub const KernE = enum(u32) {
SUCCESS = 0,
@@ -3085,3 +3094,471 @@ pub const PT_DENY_ATTACH = 31;
pub const caddr_t = ?[*]u8;
pub extern "c" fn ptrace(request: c_int, pid: pid_t, addr: caddr_t, data: c_int) c_int;
+
+pub const MachError = error{
+ /// Not enough permissions held to perform the requested kernel
+ /// call.
+ PermissionDenied,
+} || std.os.UnexpectedError;
+
+pub const MachTask = extern struct {
+ port: mach_port_name_t,
+
+ pub fn isValid(self: MachTask) bool {
+ return self.port != TASK_NULL;
+ }
+
+ pub fn pidForTask(self: MachTask) MachError!std.os.pid_t {
+ var pid: std.os.pid_t = undefined;
+ switch (getKernError(pid_for_task(self.port, &pid))) {
+ .SUCCESS => return pid,
+ .FAILURE => return error.PermissionDenied,
+ else => |err| return unexpectedKernError(err),
+ }
+ }
+
+ pub fn allocatePort(self: MachTask, right: MACH_PORT_RIGHT) MachError!MachTask {
+ var out_port: mach_port_name_t = undefined;
+ switch (getKernError(mach_port_allocate(
+ self.port,
+ @enumToInt(right),
+ &out_port,
+ ))) {
+ .SUCCESS => return .{ .port = out_port },
+ .FAILURE => return error.PermissionDenied,
+ else => |err| return unexpectedKernError(err),
+ }
+ }
+
+ pub fn deallocatePort(self: MachTask, port: MachTask) void {
+ _ = getKernError(mach_port_deallocate(self.port, port.port));
+ }
+
+ pub fn insertRight(self: MachTask, port: MachTask, msg: MACH_MSG_TYPE) !void {
+ switch (getKernError(mach_port_insert_right(
+ self.port,
+ port.port,
+ port.port,
+ @enumToInt(msg),
+ ))) {
+ .SUCCESS => return,
+ .FAILURE => return error.PermissionDenied,
+ else => |err| return unexpectedKernError(err),
+ }
+ }
+
+ pub const PortInfo = struct {
+ mask: exception_mask_t,
+ masks: [EXC_TYPES_COUNT]exception_mask_t,
+ ports: [EXC_TYPES_COUNT]mach_port_t,
+ behaviors: [EXC_TYPES_COUNT]exception_behavior_t,
+ flavors: [EXC_TYPES_COUNT]thread_state_flavor_t,
+ count: mach_msg_type_number_t,
+ };
+
+ pub fn getExceptionPorts(self: MachTask, mask: exception_mask_t) !PortInfo {
+ var info = PortInfo{
+ .mask = mask,
+ .masks = undefined,
+ .ports = undefined,
+ .behaviors = undefined,
+ .flavors = undefined,
+ .count = 0,
+ };
+ info.count = info.ports.len / @sizeOf(mach_port_t);
+
+ switch (getKernError(task_get_exception_ports(
+ self.port,
+ info.mask,
+ &info.masks,
+ &info.count,
+ &info.ports,
+ &info.behaviors,
+ &info.flavors,
+ ))) {
+ .SUCCESS => return info,
+ .FAILURE => return error.PermissionDenied,
+ else => |err| return unexpectedKernError(err),
+ }
+ }
+
+ pub fn setExceptionPorts(
+ self: MachTask,
+ mask: exception_mask_t,
+ new_port: MachTask,
+ behavior: exception_behavior_t,
+ new_flavor: thread_state_flavor_t,
+ ) !void {
+ switch (getKernError(task_set_exception_ports(
+ self.port,
+ mask,
+ new_port.port,
+ behavior,
+ new_flavor,
+ ))) {
+ .SUCCESS => return,
+ .FAILURE => return error.PermissionDenied,
+ else => |err| return unexpectedKernError(err),
+ }
+ }
+
+ pub const RegionInfo = struct {
+ pub const Tag = enum {
+ basic,
+ extended,
+ top,
+ };
+
+ base_addr: u64,
+ tag: Tag,
+ info: union {
+ basic: vm_region_basic_info_64,
+ extended: vm_region_extended_info,
+ top: vm_region_top_info,
+ },
+ };
+
+ pub fn getRegionInfo(
+ task: MachTask,
+ address: u64,
+ len: usize,
+ tag: RegionInfo.Tag,
+ ) MachError!RegionInfo {
+ var info: RegionInfo = .{
+ .base_addr = address,
+ .tag = tag,
+ .info = undefined,
+ };
+ switch (tag) {
+ .basic => info.info = .{ .basic = undefined },
+ .extended => info.info = .{ .extended = undefined },
+ .top => info.info = .{ .top = undefined },
+ }
+ var base_len: mach_vm_size_t = if (len == 1) 2 else len;
+ var objname: mach_port_t = undefined;
+ var count: mach_msg_type_number_t = switch (tag) {
+ .basic => VM_REGION_BASIC_INFO_COUNT,
+ .extended => VM_REGION_EXTENDED_INFO_COUNT,
+ .top => VM_REGION_TOP_INFO_COUNT,
+ };
+ switch (getKernError(mach_vm_region(
+ task.port,
+ &info.base_addr,
+ &base_len,
+ switch (tag) {
+ .basic => VM_REGION_BASIC_INFO_64,
+ .extended => VM_REGION_EXTENDED_INFO,
+ .top => VM_REGION_TOP_INFO,
+ },
+ switch (tag) {
+ .basic => @ptrCast(vm_region_info_t, &info.info.basic),
+ .extended => @ptrCast(vm_region_info_t, &info.info.extended),
+ .top => @ptrCast(vm_region_info_t, &info.info.top),
+ },
+ &count,
+ &objname,
+ ))) {
+ .SUCCESS => return info,
+ .FAILURE => return error.PermissionDenied,
+ else => |err| return unexpectedKernError(err),
+ }
+ }
+
+ pub const RegionSubmapInfo = struct {
+ pub const Tag = enum {
+ short,
+ full,
+ };
+
+ tag: Tag,
+ base_addr: u64,
+ info: union {
+ short: vm_region_submap_short_info_64,
+ full: vm_region_submap_info_64,
+ },
+ };
+
+ pub fn getRegionSubmapInfo(
+ task: MachTask,
+ address: u64,
+ len: usize,
+ nesting_depth: u32,
+ tag: RegionSubmapInfo.Tag,
+ ) MachError!RegionSubmapInfo {
+ var info: RegionSubmapInfo = .{
+ .base_addr = address,
+ .tag = tag,
+ .info = undefined,
+ };
+ switch (tag) {
+ .short => info.info = .{ .short = undefined },
+ .full => info.info = .{ .full = undefined },
+ }
+ var nesting = nesting_depth;
+ var base_len: mach_vm_size_t = if (len == 1) 2 else len;
+ var count: mach_msg_type_number_t = switch (tag) {
+ .short => VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
+ .full => VM_REGION_SUBMAP_INFO_COUNT_64,
+ };
+ switch (getKernError(mach_vm_region_recurse(
+ task.port,
+ &info.base_addr,
+ &base_len,
+ &nesting,
+ switch (tag) {
+ .short => @ptrCast(vm_region_recurse_info_t, &info.info.short),
+ .full => @ptrCast(vm_region_recurse_info_t, &info.info.full),
+ },
+ &count,
+ ))) {
+ .SUCCESS => return info,
+ .FAILURE => return error.PermissionDenied,
+ else => |err| return unexpectedKernError(err),
+ }
+ }
+
+ pub fn getCurrProtection(task: MachTask, address: u64, len: usize) MachError!vm_prot_t {
+ const info = try task.getRegionSubmapInfo(address, len, 0, .short);
+ return info.info.short.protection;
+ }
+
+ pub fn setMaxProtection(task: MachTask, address: u64, len: usize, prot: vm_prot_t) MachError!void {
+ return task.setProtectionImpl(address, len, true, prot);
+ }
+
+ pub fn setCurrProtection(task: MachTask, address: u64, len: usize, prot: vm_prot_t) MachError!void {
+ return task.setProtectionImpl(address, len, false, prot);
+ }
+
+ fn setProtectionImpl(task: MachTask, address: u64, len: usize, set_max: bool, prot: vm_prot_t) MachError!void {
+ switch (getKernError(mach_vm_protect(task.port, address, len, @boolToInt(set_max), prot))) {
+ .SUCCESS => return,
+ .FAILURE => return error.PermissionDenied,
+ else => |err| return unexpectedKernError(err),
+ }
+ }
+
+ /// Will write to VM even if current protection attributes specifically prohibit
+ /// us from doing so, by temporarily setting protection level to a level with VM_PROT_COPY
+ /// variant, and resetting after a successful or unsuccessful write.
+ pub fn writeMemProtected(task: MachTask, address: u64, buf: []const u8, arch: std.Target.Cpu.Arch) MachError!usize {
+ const curr_prot = try task.getCurrProtection(address, buf.len);
+ try task.setCurrProtection(
+ address,
+ buf.len,
+ PROT.READ | PROT.WRITE | PROT.COPY,
+ );
+ defer {
+ task.setCurrProtection(address, buf.len, curr_prot) catch {};
+ }
+ return task.writeMem(address, buf, arch);
+ }
+
+ pub fn writeMem(task: MachTask, address: u64, buf: []const u8, arch: std.Target.Cpu.Arch) MachError!usize {
+ const count = buf.len;
+ var total_written: usize = 0;
+ var curr_addr = address;
+ const page_size = try getPageSize(task); // TODO we probably can assume value here
+ var out_buf = buf[0..];
+
+ while (total_written < count) {
+ const curr_size = maxBytesLeftInPage(page_size, curr_addr, count - total_written);
+ switch (getKernError(mach_vm_write(
+ task.port,
+ curr_addr,
+ @ptrToInt(out_buf.ptr),
+ @intCast(mach_msg_type_number_t, curr_size),
+ ))) {
+ .SUCCESS => {},
+ .FAILURE => return error.PermissionDenied,
+ else => |err| return unexpectedKernError(err),
+ }
+
+ switch (arch) {
+ .aarch64 => {
+ var mattr_value: vm_machine_attribute_val_t = MATTR_VAL_CACHE_FLUSH;
+ switch (getKernError(vm_machine_attribute(
+ task.port,
+ curr_addr,
+ curr_size,
+ MATTR_CACHE,
+ &mattr_value,
+ ))) {
+ .SUCCESS => {},
+ .FAILURE => return error.PermissionDenied,
+ else => |err| return unexpectedKernError(err),
+ }
+ },
+ .x86_64 => {},
+ else => unreachable,
+ }
+
+ out_buf = out_buf[curr_size..];
+ total_written += curr_size;
+ curr_addr += curr_size;
+ }
+
+ return total_written;
+ }
+
+ pub fn readMem(task: MachTask, address: u64, buf: []u8) MachError!usize {
+ const count = buf.len;
+ var total_read: usize = 0;
+ var curr_addr = address;
+ const page_size = try getPageSize(task); // TODO we probably can assume value here
+ var out_buf = buf[0..];
+
+ while (total_read < count) {
+ const curr_size = maxBytesLeftInPage(page_size, curr_addr, count - total_read);
+ var curr_bytes_read: mach_msg_type_number_t = 0;
+ var vm_memory: vm_offset_t = undefined;
+ switch (getKernError(mach_vm_read(task.port, curr_addr, curr_size, &vm_memory, &curr_bytes_read))) {
+ .SUCCESS => {},
+ .FAILURE => return error.PermissionDenied,
+ else => |err| return unexpectedKernError(err),
+ }
+
+ @memcpy(out_buf[0..].ptr, @intToPtr([*]const u8, vm_memory), curr_bytes_read);
+ _ = vm_deallocate(mach_task_self(), vm_memory, curr_bytes_read);
+
+ out_buf = out_buf[curr_bytes_read..];
+ curr_addr += curr_bytes_read;
+ total_read += curr_bytes_read;
+ }
+
+ return total_read;
+ }
+
+ fn maxBytesLeftInPage(page_size: usize, address: u64, count: usize) usize {
+ var left = count;
+ if (page_size > 0) {
+ const page_offset = address % page_size;
+ const bytes_left_in_page = page_size - page_offset;
+ if (count > bytes_left_in_page) {
+ left = bytes_left_in_page;
+ }
+ }
+ return left;
+ }
+
+ fn getPageSize(task: MachTask) MachError!usize {
+ if (task.isValid()) {
+ var info_count = TASK_VM_INFO_COUNT;
+ var vm_info: task_vm_info_data_t = undefined;
+ switch (getKernError(task_info(
+ task.port,
+ TASK_VM_INFO,
+ @ptrCast(task_info_t, &vm_info),
+ &info_count,
+ ))) {
+ .SUCCESS => return @intCast(usize, vm_info.page_size),
+ else => {},
+ }
+ }
+ var page_size: vm_size_t = undefined;
+ switch (getKernError(_host_page_size(mach_host_self(), &page_size))) {
+ .SUCCESS => return page_size,
+ else => |err| return unexpectedKernError(err),
+ }
+ }
+
+ pub fn basicTaskInfo(task: MachTask) MachError!mach_task_basic_info {
+ var info: mach_task_basic_info = undefined;
+ var count = MACH_TASK_BASIC_INFO_COUNT;
+ switch (getKernError(task_info(
+ task.port,
+ MACH_TASK_BASIC_INFO,
+ @ptrCast(task_info_t, &info),
+ &count,
+ ))) {
+ .SUCCESS => return info,
+ else => |err| return unexpectedKernError(err),
+ }
+ }
+
+ pub fn @"resume"(task: MachTask) MachError!void {
+ switch (getKernError(task_resume(task.port))) {
+ .SUCCESS => {},
+ else => |err| return unexpectedKernError(err),
+ }
+ }
+
+ pub fn @"suspend"(task: MachTask) MachError!void {
+ switch (getKernError(task_suspend(task.port))) {
+ .SUCCESS => {},
+ else => |err| return unexpectedKernError(err),
+ }
+ }
+
+ const ThreadList = struct {
+ buf: []MachThread,
+
+ pub fn deinit(list: ThreadList) void {
+ const self_task = machTaskForSelf();
+ _ = vm_deallocate(
+ self_task.port,
+ @ptrToInt(list.buf.ptr),
+ @intCast(vm_size_t, list.buf.len * @sizeOf(mach_port_t)),
+ );
+ }
+ };
+
+ pub fn getThreads(task: MachTask) MachError!ThreadList {
+ var thread_list: mach_port_array_t = undefined;
+ var thread_count: mach_msg_type_number_t = undefined;
+ switch (getKernError(task_threads(task.port, &thread_list, &thread_count))) {
+ .SUCCESS => return ThreadList{ .buf = @ptrCast([*]MachThread, thread_list)[0..thread_count] },
+ else => |err| return unexpectedKernError(err),
+ }
+ }
+};
+
+pub const MachThread = extern struct {
+ port: mach_port_t,
+
+ pub fn isValid(thread: MachThread) bool {
+ return thread.port != THREAD_NULL;
+ }
+
+ pub fn getBasicInfo(thread: MachThread) MachError!thread_basic_info {
+ var info: thread_basic_info = undefined;
+ var count = THREAD_BASIC_INFO_COUNT;
+ switch (getKernError(thread_info(
+ thread.port,
+ THREAD_BASIC_INFO,
+ @ptrCast(thread_info_t, &info),
+ &count,
+ ))) {
+ .SUCCESS => return info,
+ else => |err| return unexpectedKernError(err),
+ }
+ }
+
+ pub fn getIdentifierInfo(thread: MachThread) MachError!thread_identifier_info {
+ var info: thread_identifier_info = undefined;
+ var count = THREAD_IDENTIFIER_INFO_COUNT;
+ switch (getKernError(thread_info(
+ thread.port,
+ THREAD_IDENTIFIER_INFO,
+ @ptrCast(thread_info_t, &info),
+ &count,
+ ))) {
+ .SUCCESS => return info,
+ else => |err| return unexpectedKernError(err),
+ }
+ }
+};
+
+pub fn machTaskForPid(pid: std.os.pid_t) MachError!MachTask {
+ var port: mach_port_name_t = undefined;
+ switch (getKernError(task_for_pid(mach_task_self(), pid, &port))) {
+ .SUCCESS => {},
+ .FAILURE => return error.PermissionDenied,
+ else => |err| return unexpectedKernError(err),
+ }
+ return MachTask{ .port = port };
+}
+
+pub fn machTaskForSelf() MachTask {
+ return .{ .port = mach_task_self() };
+}
diff --git a/lib/std/os/darwin/cssm.zig b/lib/std/c/darwin/cssm.zig
similarity index 100%
rename from lib/std/os/darwin/cssm.zig
rename to lib/std/c/darwin/cssm.zig
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index dba92ab998..3748ca6877 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -17,10 +17,12 @@ const Os = std.builtin.Os;
const TailQueue = std.TailQueue;
const maxInt = std.math.maxInt;
const assert = std.debug.assert;
+const is_darwin = builtin.target.isDarwin();
pub const ChildProcess = struct {
pub const Id = switch (builtin.os.tag) {
.windows => windows.HANDLE,
+ .wasi => void,
else => os.pid_t,
};
@@ -70,6 +72,43 @@ pub const ChildProcess = struct {
/// Darwin-only. Start child process in suspended state as if SIGSTOP was sent.
start_suspended: bool = false,
+ /// Set to true to obtain rusage information for the child process.
+ /// Depending on the target platform and implementation status, the
+ /// requested statistics may or may not be available. If they are
+ /// available, then the `resource_usage_statistics` field will be populated
+ /// after calling `wait`.
+ /// On Linux, this obtains rusage statistics from wait4().
+ request_resource_usage_statistics: bool = false,
+
+ /// This is available after calling wait if
+ /// `request_resource_usage_statistics` was set to `true` before calling
+ /// `spawn`.
+ resource_usage_statistics: ResourceUsageStatistics = .{},
+
+ pub const ResourceUsageStatistics = struct {
+ rusage: @TypeOf(rusage_init) = rusage_init,
+
+ /// Returns the peak resident set size of the child process, in bytes,
+ /// if available.
+ pub inline fn getMaxRss(rus: ResourceUsageStatistics) ?usize {
+ switch (builtin.os.tag) {
+ .linux => {
+ if (rus.rusage) |ru| {
+ return @intCast(usize, ru.maxrss) * 1024;
+ } else {
+ return null;
+ }
+ },
+ else => return null,
+ }
+ }
+
+ const rusage_init = switch (builtin.os.tag) {
+ .linux => @as(?std.os.rusage, null),
+ else => {},
+ };
+ };
+
pub const Arg0Expand = os.Arg0Expand;
pub const SpawnError = error{
@@ -90,8 +129,7 @@ pub const ChildProcess = struct {
os.SetIdError ||
os.ChangeCurDirError ||
windows.CreateProcessError ||
- windows.WaitForSingleObjectError ||
- os.posix_spawn.Error;
+ windows.WaitForSingleObjectError;
pub const Term = union(enum) {
Exited: u8,
@@ -143,10 +181,6 @@ pub const ChildProcess = struct {
@compileError("the target operating system cannot spawn processes");
}
- if (comptime builtin.target.isDarwin()) {
- return self.spawnMacos();
- }
-
if (builtin.os.tag == .windows) {
return self.spawnWindows();
} else {
@@ -337,10 +371,16 @@ pub const ChildProcess = struct {
}
fn waitUnwrapped(self: *ChildProcess) !void {
- const res: os.WaitPidResult = if (comptime builtin.target.isDarwin())
- try os.posix_spawn.waitpid(self.id, 0)
- else
- os.waitpid(self.id, 0);
+ const res: os.WaitPidResult = res: {
+ if (builtin.os.tag == .linux and self.request_resource_usage_statistics) {
+ var ru: std.os.rusage = undefined;
+ const res = os.wait4(self.id, 0, &ru);
+ self.resource_usage_statistics.rusage = ru;
+ break :res res;
+ }
+
+ break :res os.waitpid(self.id, 0);
+ };
const status = res.status;
self.cleanupStreams();
self.handleWaitResult(status);
@@ -416,121 +456,6 @@ pub const ChildProcess = struct {
Term{ .Unknown = status };
}
- fn spawnMacos(self: *ChildProcess) SpawnError!void {
- const pipe_flags = if (io.is_async) os.O.NONBLOCK else 0;
- const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try os.pipe2(pipe_flags) else undefined;
- errdefer if (self.stdin_behavior == StdIo.Pipe) destroyPipe(stdin_pipe);
-
- const stdout_pipe = if (self.stdout_behavior == StdIo.Pipe) try os.pipe2(pipe_flags) else undefined;
- errdefer if (self.stdout_behavior == StdIo.Pipe) destroyPipe(stdout_pipe);
-
- const stderr_pipe = if (self.stderr_behavior == StdIo.Pipe) try os.pipe2(pipe_flags) else undefined;
- errdefer if (self.stderr_behavior == StdIo.Pipe) destroyPipe(stderr_pipe);
-
- const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore);
- const dev_null_fd = if (any_ignore)
- os.openZ("/dev/null", os.O.RDWR, 0) catch |err| switch (err) {
- error.PathAlreadyExists => unreachable,
- error.NoSpaceLeft => unreachable,
- error.FileTooBig => unreachable,
- error.DeviceBusy => unreachable,
- error.FileLocksNotSupported => unreachable,
- error.BadPathName => unreachable, // Windows-only
- error.InvalidHandle => unreachable, // WASI-only
- error.WouldBlock => unreachable,
- else => |e| return e,
- }
- else
- undefined;
- defer if (any_ignore) os.close(dev_null_fd);
-
- var attr = try os.posix_spawn.Attr.init();
- defer attr.deinit();
- var flags: u16 = os.darwin.POSIX_SPAWN_SETSIGDEF | os.darwin.POSIX_SPAWN_SETSIGMASK;
- if (self.disable_aslr) {
- flags |= os.darwin._POSIX_SPAWN_DISABLE_ASLR;
- }
- if (self.start_suspended) {
- flags |= os.darwin.POSIX_SPAWN_START_SUSPENDED;
- }
- try attr.set(flags);
-
- var actions = try os.posix_spawn.Actions.init();
- defer actions.deinit();
-
- try setUpChildIoPosixSpawn(self.stdin_behavior, &actions, stdin_pipe, os.STDIN_FILENO, dev_null_fd);
- try setUpChildIoPosixSpawn(self.stdout_behavior, &actions, stdout_pipe, os.STDOUT_FILENO, dev_null_fd);
- try setUpChildIoPosixSpawn(self.stderr_behavior, &actions, stderr_pipe, os.STDERR_FILENO, dev_null_fd);
-
- if (self.cwd_dir) |cwd| {
- try actions.fchdir(cwd.fd);
- } else if (self.cwd) |cwd| {
- try actions.chdir(cwd);
- }
-
- var arena_allocator = std.heap.ArenaAllocator.init(self.allocator);
- defer arena_allocator.deinit();
- const arena = arena_allocator.allocator();
-
- const argv_buf = try arena.allocSentinel(?[*:0]u8, self.argv.len, null);
- for (self.argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
-
- const envp = if (self.env_map) |env_map| m: {
- const envp_buf = try createNullDelimitedEnvMap(arena, env_map);
- break :m envp_buf.ptr;
- } else std.c.environ;
-
- const pid = try os.posix_spawn.spawnp(self.argv[0], actions, attr, argv_buf, envp);
-
- if (self.stdin_behavior == StdIo.Pipe) {
- self.stdin = File{ .handle = stdin_pipe[1] };
- } else {
- self.stdin = null;
- }
- if (self.stdout_behavior == StdIo.Pipe) {
- self.stdout = File{ .handle = stdout_pipe[0] };
- } else {
- self.stdout = null;
- }
- if (self.stderr_behavior == StdIo.Pipe) {
- self.stderr = File{ .handle = stderr_pipe[0] };
- } else {
- self.stderr = null;
- }
-
- self.id = pid;
- self.term = null;
-
- if (self.stdin_behavior == StdIo.Pipe) {
- os.close(stdin_pipe[0]);
- }
- if (self.stdout_behavior == StdIo.Pipe) {
- os.close(stdout_pipe[1]);
- }
- if (self.stderr_behavior == StdIo.Pipe) {
- os.close(stderr_pipe[1]);
- }
- }
-
- fn setUpChildIoPosixSpawn(
- stdio: StdIo,
- actions: *os.posix_spawn.Actions,
- pipe_fd: [2]i32,
- std_fileno: i32,
- dev_null_fd: i32,
- ) !void {
- switch (stdio) {
- .Pipe => {
- const idx: usize = if (std_fileno == 0) 0 else 1;
- try actions.dup2(pipe_fd[idx], std_fileno);
- try actions.close(pipe_fd[1 - idx]);
- },
- .Close => try actions.close(std_fileno),
- .Inherit => {},
- .Ignore => try actions.dup2(dev_null_fd, std_fileno),
- }
- }
-
fn spawnPosix(self: *ChildProcess) SpawnError!void {
const pipe_flags = if (io.is_async) os.O.NONBLOCK else 0;
const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try os.pipe2(pipe_flags) else undefined;
diff --git a/lib/std/crypto.zig b/lib/std/crypto.zig
index f46e7b1022..9b995480aa 100644
--- a/lib/std/crypto.zig
+++ b/lib/std/crypto.zig
@@ -1,3 +1,5 @@
+const root = @import("root");
+
/// Authenticated Encryption with Associated Data
pub const aead = struct {
pub const aegis = struct {
@@ -66,6 +68,11 @@ pub const dh = struct {
pub const X25519 = @import("crypto/25519/x25519.zig").X25519;
};
+/// Key Encapsulation Mechanisms.
+pub const kem = struct {
+ pub const kyber_d00 = @import("crypto/kyber_d00.zig");
+};
+
/// Elliptic-curve arithmetic.
pub const ecc = struct {
pub const Curve25519 = @import("crypto/25519/curve25519.zig").Curve25519;
@@ -183,6 +190,27 @@ pub const errors = @import("crypto/errors.zig");
pub const tls = @import("crypto/tls.zig");
pub const Certificate = @import("crypto/Certificate.zig");
+/// Side-channels mitigations.
+pub const SideChannelsMitigations = enum {
+ /// No additional side-channel mitigations are applied.
+ /// This is the fastest mode.
+ none,
+ /// The `basic` mode protects against most practical attacks, provided that the
+ /// application or implements proper defenses against brute-force attacks.
+ /// It offers a good balance between performance and security.
+ basic,
+ /// The `medium` mode offers increased resilience against side-channel attacks,
+ /// making most attacks unpractical even on shared/low latency environements.
+ /// This is the default mode.
+ medium,
+ /// The `full` mode offers the highest level of protection against side-channel attacks.
+ /// Note that this doesn't cover all possible attacks (especially power analysis or
+ /// thread-local attacks such as cachebleed), and that the performance impact is significant.
+ full,
+};
+
+pub const default_side_channels_mitigations = .medium;
+
test {
_ = aead.aegis.Aegis128L;
_ = aead.aegis.Aegis256;
@@ -217,6 +245,8 @@ test {
_ = dh.X25519;
+ _ = kem.kyber_d00;
+
_ = ecc.Curve25519;
_ = ecc.Edwards25519;
_ = ecc.P256;
diff --git a/lib/std/crypto/25519/field.zig b/lib/std/crypto/25519/field.zig
index 66a50bee70..1885f9286e 100644
--- a/lib/std/crypto/25519/field.zig
+++ b/lib/std/crypto/25519/field.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const builtin = @import("builtin");
const crypto = std.crypto;
const readIntLittle = std.mem.readIntLittle;
const writeIntLittle = std.mem.writeIntLittle;
@@ -6,6 +7,12 @@ const writeIntLittle = std.mem.writeIntLittle;
const NonCanonicalError = crypto.errors.NonCanonicalError;
const NotSquareError = crypto.errors.NotSquareError;
+// Inline conditionally, when it can result in large code generation.
+const bloaty_inline = switch (builtin.mode) {
+ .ReleaseSafe, .ReleaseFast => .Inline,
+ .Debug, .ReleaseSmall => .Unspecified,
+};
+
pub const Fe = struct {
limbs: [5]u64,
@@ -264,7 +271,7 @@ pub const Fe = struct {
}
/// Multiply two field elements
- pub inline fn mul(a: Fe, b: Fe) Fe {
+ pub fn mul(a: Fe, b: Fe) callconv(bloaty_inline) Fe {
var ax: [5]u128 = undefined;
var bx: [5]u128 = undefined;
var a19: [5]u128 = undefined;
diff --git a/lib/std/crypto/aes/armcrypto.zig b/lib/std/crypto/aes/armcrypto.zig
index 3f4faf1b14..a6574c372a 100644
--- a/lib/std/crypto/aes/armcrypto.zig
+++ b/lib/std/crypto/aes/armcrypto.zig
@@ -32,62 +32,54 @@ pub const Block = struct {
/// Encrypt a block with a round key.
pub inline fn encrypt(block: Block, round_key: Block) Block {
return Block{
- .repr = asm (
+ .repr = (asm (
\\ mov %[out].16b, %[in].16b
\\ aese %[out].16b, %[zero].16b
\\ aesmc %[out].16b, %[out].16b
- \\ eor %[out].16b, %[out].16b, %[rk].16b
: [out] "=&x" (-> BlockVec),
: [in] "x" (block.repr),
- [rk] "x" (round_key.repr),
[zero] "x" (zero),
- ),
+ )) ^ round_key.repr,
};
}
/// Encrypt a block with the last round key.
pub inline fn encryptLast(block: Block, round_key: Block) Block {
return Block{
- .repr = asm (
+ .repr = (asm (
\\ mov %[out].16b, %[in].16b
\\ aese %[out].16b, %[zero].16b
- \\ eor %[out].16b, %[out].16b, %[rk].16b
: [out] "=&x" (-> BlockVec),
: [in] "x" (block.repr),
- [rk] "x" (round_key.repr),
[zero] "x" (zero),
- ),
+ )) ^ round_key.repr,
};
}
/// Decrypt a block with a round key.
pub inline fn decrypt(block: Block, inv_round_key: Block) Block {
return Block{
- .repr = asm (
+ .repr = (asm (
\\ mov %[out].16b, %[in].16b
\\ aesd %[out].16b, %[zero].16b
\\ aesimc %[out].16b, %[out].16b
- \\ eor %[out].16b, %[out].16b, %[rk].16b
: [out] "=&x" (-> BlockVec),
: [in] "x" (block.repr),
- [rk] "x" (inv_round_key.repr),
[zero] "x" (zero),
- ),
+ )) ^ inv_round_key.repr,
};
}
/// Decrypt a block with the last round key.
pub inline fn decryptLast(block: Block, inv_round_key: Block) Block {
return Block{
- .repr = asm (
+ .repr = (asm (
\\ mov %[out].16b, %[in].16b
\\ aesd %[out].16b, %[zero].16b
- \\ eor %[out].16b, %[out].16b, %[rk].16b
: [out] "=&x" (-> BlockVec),
: [in] "x" (block.repr),
- [rk] "x" (inv_round_key.repr),
[zero] "x" (zero),
- ),
+ )) ^ inv_round_key.repr,
};
}
diff --git a/lib/std/crypto/aes/soft.zig b/lib/std/crypto/aes/soft.zig
index d8bd3d4ac0..7a8e7ff0ec 100644
--- a/lib/std/crypto/aes/soft.zig
+++ b/lib/std/crypto/aes/soft.zig
@@ -1,11 +1,11 @@
-// Based on Go stdlib implementation
-
const std = @import("../../std.zig");
const math = std.math;
const mem = std.mem;
const BlockVec = [4]u32;
+const side_channels_mitigations = std.options.side_channels_mitigations;
+
/// A single AES block.
pub const Block = struct {
pub const block_length: usize = 16;
@@ -15,20 +15,20 @@ pub const Block = struct {
/// Convert a byte sequence into an internal representation.
pub inline fn fromBytes(bytes: *const [16]u8) Block {
- const s0 = mem.readIntBig(u32, bytes[0..4]);
- const s1 = mem.readIntBig(u32, bytes[4..8]);
- const s2 = mem.readIntBig(u32, bytes[8..12]);
- const s3 = mem.readIntBig(u32, bytes[12..16]);
+ const s0 = mem.readIntLittle(u32, bytes[0..4]);
+ const s1 = mem.readIntLittle(u32, bytes[4..8]);
+ const s2 = mem.readIntLittle(u32, bytes[8..12]);
+ const s3 = mem.readIntLittle(u32, bytes[12..16]);
return Block{ .repr = BlockVec{ s0, s1, s2, s3 } };
}
/// Convert the internal representation of a block into a byte sequence.
pub inline fn toBytes(block: Block) [16]u8 {
var bytes: [16]u8 = undefined;
- mem.writeIntBig(u32, bytes[0..4], block.repr[0]);
- mem.writeIntBig(u32, bytes[4..8], block.repr[1]);
- mem.writeIntBig(u32, bytes[8..12], block.repr[2]);
- mem.writeIntBig(u32, bytes[12..16], block.repr[3]);
+ mem.writeIntLittle(u32, bytes[0..4], block.repr[0]);
+ mem.writeIntLittle(u32, bytes[4..8], block.repr[1]);
+ mem.writeIntLittle(u32, bytes[8..12], block.repr[2]);
+ mem.writeIntLittle(u32, bytes[12..16], block.repr[3]);
return bytes;
}
@@ -50,32 +50,93 @@ pub const Block = struct {
const s2 = block.repr[2];
const s3 = block.repr[3];
- const t0 = round_key.repr[0] ^ table_encrypt[0][@truncate(u8, s0 >> 24)] ^ table_encrypt[1][@truncate(u8, s1 >> 16)] ^ table_encrypt[2][@truncate(u8, s2 >> 8)] ^ table_encrypt[3][@truncate(u8, s3)];
- const t1 = round_key.repr[1] ^ table_encrypt[0][@truncate(u8, s1 >> 24)] ^ table_encrypt[1][@truncate(u8, s2 >> 16)] ^ table_encrypt[2][@truncate(u8, s3 >> 8)] ^ table_encrypt[3][@truncate(u8, s0)];
- const t2 = round_key.repr[2] ^ table_encrypt[0][@truncate(u8, s2 >> 24)] ^ table_encrypt[1][@truncate(u8, s3 >> 16)] ^ table_encrypt[2][@truncate(u8, s0 >> 8)] ^ table_encrypt[3][@truncate(u8, s1)];
- const t3 = round_key.repr[3] ^ table_encrypt[0][@truncate(u8, s3 >> 24)] ^ table_encrypt[1][@truncate(u8, s0 >> 16)] ^ table_encrypt[2][@truncate(u8, s1 >> 8)] ^ table_encrypt[3][@truncate(u8, s2)];
+ var x: [4]u32 = undefined;
+ x = table_lookup(&table_encrypt, @truncate(u8, s0), @truncate(u8, s1 >> 8), @truncate(u8, s2 >> 16), @truncate(u8, s3 >> 24));
+ var t0 = x[0] ^ x[1] ^ x[2] ^ x[3];
+ x = table_lookup(&table_encrypt, @truncate(u8, s1), @truncate(u8, s2 >> 8), @truncate(u8, s3 >> 16), @truncate(u8, s0 >> 24));
+ var t1 = x[0] ^ x[1] ^ x[2] ^ x[3];
+ x = table_lookup(&table_encrypt, @truncate(u8, s2), @truncate(u8, s3 >> 8), @truncate(u8, s0 >> 16), @truncate(u8, s1 >> 24));
+ var t2 = x[0] ^ x[1] ^ x[2] ^ x[3];
+ x = table_lookup(&table_encrypt, @truncate(u8, s3), @truncate(u8, s0 >> 8), @truncate(u8, s1 >> 16), @truncate(u8, s2 >> 24));
+ var t3 = x[0] ^ x[1] ^ x[2] ^ x[3];
+
+ t0 ^= round_key.repr[0];
+ t1 ^= round_key.repr[1];
+ t2 ^= round_key.repr[2];
+ t3 ^= round_key.repr[3];
+
+ return Block{ .repr = BlockVec{ t0, t1, t2, t3 } };
+ }
+
+ /// Encrypt a block with a round key *WITHOUT ANY PROTECTION AGAINST SIDE CHANNELS*
+ pub inline fn encryptUnprotected(block: Block, round_key: Block) Block {
+ const s0 = block.repr[0];
+ const s1 = block.repr[1];
+ const s2 = block.repr[2];
+ const s3 = block.repr[3];
+
+ var x: [4]u32 = undefined;
+ x = .{
+ table_encrypt[0][@truncate(u8, s0)],
+ table_encrypt[1][@truncate(u8, s1 >> 8)],
+ table_encrypt[2][@truncate(u8, s2 >> 16)],
+ table_encrypt[3][@truncate(u8, s3 >> 24)],
+ };
+ var t0 = x[0] ^ x[1] ^ x[2] ^ x[3];
+ x = .{
+ table_encrypt[0][@truncate(u8, s1)],
+ table_encrypt[1][@truncate(u8, s2 >> 8)],
+ table_encrypt[2][@truncate(u8, s3 >> 16)],
+ table_encrypt[3][@truncate(u8, s0 >> 24)],
+ };
+ var t1 = x[0] ^ x[1] ^ x[2] ^ x[3];
+ x = .{
+ table_encrypt[0][@truncate(u8, s2)],
+ table_encrypt[1][@truncate(u8, s3 >> 8)],
+ table_encrypt[2][@truncate(u8, s0 >> 16)],
+ table_encrypt[3][@truncate(u8, s1 >> 24)],
+ };
+ var t2 = x[0] ^ x[1] ^ x[2] ^ x[3];
+ x = .{
+ table_encrypt[0][@truncate(u8, s3)],
+ table_encrypt[1][@truncate(u8, s0 >> 8)],
+ table_encrypt[2][@truncate(u8, s1 >> 16)],
+ table_encrypt[3][@truncate(u8, s2 >> 24)],
+ };
+ var t3 = x[0] ^ x[1] ^ x[2] ^ x[3];
+
+ t0 ^= round_key.repr[0];
+ t1 ^= round_key.repr[1];
+ t2 ^= round_key.repr[2];
+ t3 ^= round_key.repr[3];
return Block{ .repr = BlockVec{ t0, t1, t2, t3 } };
}
/// Encrypt a block with the last round key.
pub inline fn encryptLast(block: Block, round_key: Block) Block {
- const t0 = block.repr[0];
- const t1 = block.repr[1];
- const t2 = block.repr[2];
- const t3 = block.repr[3];
+ const s0 = block.repr[0];
+ const s1 = block.repr[1];
+ const s2 = block.repr[2];
+ const s3 = block.repr[3];
// Last round uses s-box directly and XORs to produce output.
- var s0 = @as(u32, sbox_encrypt[t0 >> 24]) << 24 | @as(u32, sbox_encrypt[t1 >> 16 & 0xff]) << 16 | @as(u32, sbox_encrypt[t2 >> 8 & 0xff]) << 8 | @as(u32, sbox_encrypt[t3 & 0xff]);
- var s1 = @as(u32, sbox_encrypt[t1 >> 24]) << 24 | @as(u32, sbox_encrypt[t2 >> 16 & 0xff]) << 16 | @as(u32, sbox_encrypt[t3 >> 8 & 0xff]) << 8 | @as(u32, sbox_encrypt[t0 & 0xff]);
- var s2 = @as(u32, sbox_encrypt[t2 >> 24]) << 24 | @as(u32, sbox_encrypt[t3 >> 16 & 0xff]) << 16 | @as(u32, sbox_encrypt[t0 >> 8 & 0xff]) << 8 | @as(u32, sbox_encrypt[t1 & 0xff]);
- var s3 = @as(u32, sbox_encrypt[t3 >> 24]) << 24 | @as(u32, sbox_encrypt[t0 >> 16 & 0xff]) << 16 | @as(u32, sbox_encrypt[t1 >> 8 & 0xff]) << 8 | @as(u32, sbox_encrypt[t2 & 0xff]);
- s0 ^= round_key.repr[0];
- s1 ^= round_key.repr[1];
- s2 ^= round_key.repr[2];
- s3 ^= round_key.repr[3];
+ var x: [4]u8 = undefined;
+ x = sbox_lookup(&sbox_encrypt, @truncate(u8, s3 >> 24), @truncate(u8, s2 >> 16), @truncate(u8, s1 >> 8), @truncate(u8, s0));
+ var t0 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
+ x = sbox_lookup(&sbox_encrypt, @truncate(u8, s0 >> 24), @truncate(u8, s3 >> 16), @truncate(u8, s2 >> 8), @truncate(u8, s1));
+ var t1 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
+ x = sbox_lookup(&sbox_encrypt, @truncate(u8, s1 >> 24), @truncate(u8, s0 >> 16), @truncate(u8, s3 >> 8), @truncate(u8, s2));
+ var t2 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
+ x = sbox_lookup(&sbox_encrypt, @truncate(u8, s2 >> 24), @truncate(u8, s1 >> 16), @truncate(u8, s0 >> 8), @truncate(u8, s3));
+ var t3 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
- return Block{ .repr = BlockVec{ s0, s1, s2, s3 } };
+ t0 ^= round_key.repr[0];
+ t1 ^= round_key.repr[1];
+ t2 ^= round_key.repr[2];
+ t3 ^= round_key.repr[3];
+
+ return Block{ .repr = BlockVec{ t0, t1, t2, t3 } };
}
/// Decrypt a block with a round key.
@@ -85,32 +146,93 @@ pub const Block = struct {
const s2 = block.repr[2];
const s3 = block.repr[3];
- const t0 = round_key.repr[0] ^ table_decrypt[0][@truncate(u8, s0 >> 24)] ^ table_decrypt[1][@truncate(u8, s3 >> 16)] ^ table_decrypt[2][@truncate(u8, s2 >> 8)] ^ table_decrypt[3][@truncate(u8, s1)];
- const t1 = round_key.repr[1] ^ table_decrypt[0][@truncate(u8, s1 >> 24)] ^ table_decrypt[1][@truncate(u8, s0 >> 16)] ^ table_decrypt[2][@truncate(u8, s3 >> 8)] ^ table_decrypt[3][@truncate(u8, s2)];
- const t2 = round_key.repr[2] ^ table_decrypt[0][@truncate(u8, s2 >> 24)] ^ table_decrypt[1][@truncate(u8, s1 >> 16)] ^ table_decrypt[2][@truncate(u8, s0 >> 8)] ^ table_decrypt[3][@truncate(u8, s3)];
- const t3 = round_key.repr[3] ^ table_decrypt[0][@truncate(u8, s3 >> 24)] ^ table_decrypt[1][@truncate(u8, s2 >> 16)] ^ table_decrypt[2][@truncate(u8, s1 >> 8)] ^ table_decrypt[3][@truncate(u8, s0)];
+ var x: [4]u32 = undefined;
+ x = table_lookup(&table_decrypt, @truncate(u8, s0), @truncate(u8, s3 >> 8), @truncate(u8, s2 >> 16), @truncate(u8, s1 >> 24));
+ var t0 = x[0] ^ x[1] ^ x[2] ^ x[3];
+ x = table_lookup(&table_decrypt, @truncate(u8, s1), @truncate(u8, s0 >> 8), @truncate(u8, s3 >> 16), @truncate(u8, s2 >> 24));
+ var t1 = x[0] ^ x[1] ^ x[2] ^ x[3];
+ x = table_lookup(&table_decrypt, @truncate(u8, s2), @truncate(u8, s1 >> 8), @truncate(u8, s0 >> 16), @truncate(u8, s3 >> 24));
+ var t2 = x[0] ^ x[1] ^ x[2] ^ x[3];
+ x = table_lookup(&table_decrypt, @truncate(u8, s3), @truncate(u8, s2 >> 8), @truncate(u8, s1 >> 16), @truncate(u8, s0 >> 24));
+ var t3 = x[0] ^ x[1] ^ x[2] ^ x[3];
+
+ t0 ^= round_key.repr[0];
+ t1 ^= round_key.repr[1];
+ t2 ^= round_key.repr[2];
+ t3 ^= round_key.repr[3];
+
+ return Block{ .repr = BlockVec{ t0, t1, t2, t3 } };
+ }
+
+ /// Decrypt a block with a round key *WITHOUT ANY PROTECTION AGAINST SIDE CHANNELS*
+ pub inline fn decryptUnprotected(block: Block, round_key: Block) Block {
+ const s0 = block.repr[0];
+ const s1 = block.repr[1];
+ const s2 = block.repr[2];
+ const s3 = block.repr[3];
+
+ var x: [4]u32 = undefined;
+ x = .{
+ table_decrypt[0][@truncate(u8, s0)],
+ table_decrypt[1][@truncate(u8, s3 >> 8)],
+ table_decrypt[2][@truncate(u8, s2 >> 16)],
+ table_decrypt[3][@truncate(u8, s1 >> 24)],
+ };
+ var t0 = x[0] ^ x[1] ^ x[2] ^ x[3];
+ x = .{
+ table_decrypt[0][@truncate(u8, s1)],
+ table_decrypt[1][@truncate(u8, s0 >> 8)],
+ table_decrypt[2][@truncate(u8, s3 >> 16)],
+ table_decrypt[3][@truncate(u8, s2 >> 24)],
+ };
+ var t1 = x[0] ^ x[1] ^ x[2] ^ x[3];
+ x = .{
+ table_decrypt[0][@truncate(u8, s2)],
+ table_decrypt[1][@truncate(u8, s1 >> 8)],
+ table_decrypt[2][@truncate(u8, s0 >> 16)],
+ table_decrypt[3][@truncate(u8, s3 >> 24)],
+ };
+ var t2 = x[0] ^ x[1] ^ x[2] ^ x[3];
+ x = .{
+ table_decrypt[0][@truncate(u8, s3)],
+ table_decrypt[1][@truncate(u8, s2 >> 8)],
+ table_decrypt[2][@truncate(u8, s1 >> 16)],
+ table_decrypt[3][@truncate(u8, s0 >> 24)],
+ };
+ var t3 = x[0] ^ x[1] ^ x[2] ^ x[3];
+
+ t0 ^= round_key.repr[0];
+ t1 ^= round_key.repr[1];
+ t2 ^= round_key.repr[2];
+ t3 ^= round_key.repr[3];
return Block{ .repr = BlockVec{ t0, t1, t2, t3 } };
}
/// Decrypt a block with the last round key.
pub inline fn decryptLast(block: Block, round_key: Block) Block {
- const t0 = block.repr[0];
- const t1 = block.repr[1];
- const t2 = block.repr[2];
- const t3 = block.repr[3];
+ const s0 = block.repr[0];
+ const s1 = block.repr[1];
+ const s2 = block.repr[2];
+ const s3 = block.repr[3];
// Last round uses s-box directly and XORs to produce output.
- var s0 = @as(u32, sbox_decrypt[t0 >> 24]) << 24 | @as(u32, sbox_decrypt[t3 >> 16 & 0xff]) << 16 | @as(u32, sbox_decrypt[t2 >> 8 & 0xff]) << 8 | @as(u32, sbox_decrypt[t1 & 0xff]);
- var s1 = @as(u32, sbox_decrypt[t1 >> 24]) << 24 | @as(u32, sbox_decrypt[t0 >> 16 & 0xff]) << 16 | @as(u32, sbox_decrypt[t3 >> 8 & 0xff]) << 8 | @as(u32, sbox_decrypt[t2 & 0xff]);
- var s2 = @as(u32, sbox_decrypt[t2 >> 24]) << 24 | @as(u32, sbox_decrypt[t1 >> 16 & 0xff]) << 16 | @as(u32, sbox_decrypt[t0 >> 8 & 0xff]) << 8 | @as(u32, sbox_decrypt[t3 & 0xff]);
- var s3 = @as(u32, sbox_decrypt[t3 >> 24]) << 24 | @as(u32, sbox_decrypt[t2 >> 16 & 0xff]) << 16 | @as(u32, sbox_decrypt[t1 >> 8 & 0xff]) << 8 | @as(u32, sbox_decrypt[t0 & 0xff]);
- s0 ^= round_key.repr[0];
- s1 ^= round_key.repr[1];
- s2 ^= round_key.repr[2];
- s3 ^= round_key.repr[3];
+ var x: [4]u8 = undefined;
+ x = sbox_lookup(&sbox_decrypt, @truncate(u8, s1 >> 24), @truncate(u8, s2 >> 16), @truncate(u8, s3 >> 8), @truncate(u8, s0));
+ var t0 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
+ x = sbox_lookup(&sbox_decrypt, @truncate(u8, s2 >> 24), @truncate(u8, s3 >> 16), @truncate(u8, s0 >> 8), @truncate(u8, s1));
+ var t1 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
+ x = sbox_lookup(&sbox_decrypt, @truncate(u8, s3 >> 24), @truncate(u8, s0 >> 16), @truncate(u8, s1 >> 8), @truncate(u8, s2));
+ var t2 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
+ x = sbox_lookup(&sbox_decrypt, @truncate(u8, s0 >> 24), @truncate(u8, s1 >> 16), @truncate(u8, s2 >> 8), @truncate(u8, s3));
+ var t3 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
- return Block{ .repr = BlockVec{ s0, s1, s2, s3 } };
+ t0 ^= round_key.repr[0];
+ t1 ^= round_key.repr[1];
+ t2 ^= round_key.repr[2];
+ t3 ^= round_key.repr[3];
+
+ return Block{ .repr = BlockVec{ t0, t1, t2, t3 } };
}
/// Apply the bitwise XOR operation to the content of two blocks.
@@ -226,7 +348,8 @@ fn KeySchedule(comptime Aes: type) type {
const subw = struct {
// Apply sbox_encrypt to each byte in w.
fn func(w: u32) u32 {
- return @as(u32, sbox_encrypt[w >> 24]) << 24 | @as(u32, sbox_encrypt[w >> 16 & 0xff]) << 16 | @as(u32, sbox_encrypt[w >> 8 & 0xff]) << 8 | @as(u32, sbox_encrypt[w & 0xff]);
+ const x = sbox_lookup(&sbox_key_schedule, @truncate(u8, w), @truncate(u8, w >> 8), @truncate(u8, w >> 16), @truncate(u8, w >> 24));
+ return @as(u32, x[3]) << 24 | @as(u32, x[2]) << 16 | @as(u32, x[1]) << 8 | @as(u32, x[0]);
}
}.func;
@@ -244,6 +367,10 @@ fn KeySchedule(comptime Aes: type) type {
}
round_keys[i / 4].repr[i % 4] = round_keys[(i - words_in_key) / 4].repr[(i - words_in_key) % 4] ^ t;
}
+ i = 0;
+ inline while (i < round_keys.len * 4) : (i += 1) {
+ round_keys[i / 4].repr[i % 4] = @byteSwap(round_keys[i / 4].repr[i % 4]);
+ }
return Self{ .round_keys = round_keys };
}
@@ -257,11 +384,13 @@ fn KeySchedule(comptime Aes: type) type {
const ei = total_words - i - 4;
comptime var j: usize = 0;
inline while (j < 4) : (j += 1) {
- var x = round_keys[(ei + j) / 4].repr[(ei + j) % 4];
+ var rk = round_keys[(ei + j) / 4].repr[(ei + j) % 4];
if (i > 0 and i + 4 < total_words) {
- x = table_decrypt[0][sbox_encrypt[x >> 24]] ^ table_decrypt[1][sbox_encrypt[x >> 16 & 0xff]] ^ table_decrypt[2][sbox_encrypt[x >> 8 & 0xff]] ^ table_decrypt[3][sbox_encrypt[x & 0xff]];
+ const x = sbox_lookup(&sbox_key_schedule, @truncate(u8, rk >> 24), @truncate(u8, rk >> 16), @truncate(u8, rk >> 8), @truncate(u8, rk));
+ const y = table_lookup(&table_decrypt, x[3], x[2], x[1], x[0]);
+ rk = y[0] ^ y[1] ^ y[2] ^ y[3];
}
- inv_round_keys[(i + j) / 4].repr[(i + j) % 4] = x;
+ inv_round_keys[(i + j) / 4].repr[(i + j) % 4] = rk;
}
}
return Self{ .round_keys = inv_round_keys };
@@ -293,7 +422,17 @@ pub fn AesEncryptCtx(comptime Aes: type) type {
const round_keys = ctx.key_schedule.round_keys;
var t = Block.fromBytes(src).xorBlocks(round_keys[0]);
comptime var i = 1;
- inline while (i < rounds) : (i += 1) {
+ if (side_channels_mitigations == .full) {
+ inline while (i < rounds) : (i += 1) {
+ t = t.encrypt(round_keys[i]);
+ }
+ } else {
+ inline while (i < 5) : (i += 1) {
+ t = t.encrypt(round_keys[i]);
+ }
+ inline while (i < rounds - 1) : (i += 1) {
+ t = t.encryptUnprotected(round_keys[i]);
+ }
t = t.encrypt(round_keys[i]);
}
t = t.encryptLast(round_keys[rounds]);
@@ -305,7 +444,17 @@ pub fn AesEncryptCtx(comptime Aes: type) type {
const round_keys = ctx.key_schedule.round_keys;
var t = Block.fromBytes(&counter).xorBlocks(round_keys[0]);
comptime var i = 1;
- inline while (i < rounds) : (i += 1) {
+ if (side_channels_mitigations == .full) {
+ inline while (i < rounds) : (i += 1) {
+ t = t.encrypt(round_keys[i]);
+ }
+ } else {
+ inline while (i < 5) : (i += 1) {
+ t = t.encrypt(round_keys[i]);
+ }
+ inline while (i < rounds - 1) : (i += 1) {
+ t = t.encryptUnprotected(round_keys[i]);
+ }
t = t.encrypt(round_keys[i]);
}
t = t.encryptLast(round_keys[rounds]);
@@ -359,7 +508,17 @@ pub fn AesDecryptCtx(comptime Aes: type) type {
const inv_round_keys = ctx.key_schedule.round_keys;
var t = Block.fromBytes(src).xorBlocks(inv_round_keys[0]);
comptime var i = 1;
- inline while (i < rounds) : (i += 1) {
+ if (side_channels_mitigations == .full) {
+ inline while (i < rounds) : (i += 1) {
+ t = t.decrypt(inv_round_keys[i]);
+ }
+ } else {
+ inline while (i < 5) : (i += 1) {
+ t = t.decrypt(inv_round_keys[i]);
+ }
+ inline while (i < rounds - 1) : (i += 1) {
+ t = t.decryptUnprotected(inv_round_keys[i]);
+ }
t = t.decrypt(inv_round_keys[i]);
}
t = t.decryptLast(inv_round_keys[rounds]);
@@ -428,10 +587,11 @@ const powx = init: {
break :init array;
};
-const sbox_encrypt align(64) = generateSbox(false);
-const sbox_decrypt align(64) = generateSbox(true);
-const table_encrypt align(64) = generateTable(false);
-const table_decrypt align(64) = generateTable(true);
+const sbox_encrypt align(64) = generateSbox(false); // S-box for encryption
+const sbox_key_schedule align(64) = generateSbox(false); // S-box only for key schedule, so that it uses distinct L1 cache entries than the S-box used for encryption
+const sbox_decrypt align(64) = generateSbox(true); // S-box for decryption
+const table_encrypt align(64) = generateTable(false); // 4-byte LUTs for encryption
+const table_decrypt align(64) = generateTable(true); // 4-byte LUTs for decryption
// Generate S-box substitution values.
fn generateSbox(invert: bool) [256]u8 {
@@ -472,14 +632,14 @@ fn generateTable(invert: bool) [4][256]u32 {
var table: [4][256]u32 = undefined;
for (generateSbox(invert), 0..) |value, index| {
- table[0][index] = mul(value, if (invert) 0xb else 0x3);
- table[0][index] |= math.shl(u32, mul(value, if (invert) 0xd else 0x1), 8);
- table[0][index] |= math.shl(u32, mul(value, if (invert) 0x9 else 0x1), 16);
- table[0][index] |= math.shl(u32, mul(value, if (invert) 0xe else 0x2), 24);
+ table[0][index] = math.shl(u32, mul(value, if (invert) 0xb else 0x3), 24);
+ table[0][index] |= math.shl(u32, mul(value, if (invert) 0xd else 0x1), 16);
+ table[0][index] |= math.shl(u32, mul(value, if (invert) 0x9 else 0x1), 8);
+ table[0][index] |= mul(value, if (invert) 0xe else 0x2);
- table[1][index] = math.rotr(u32, table[0][index], 8);
- table[2][index] = math.rotr(u32, table[0][index], 16);
- table[3][index] = math.rotr(u32, table[0][index], 24);
+ table[1][index] = math.rotl(u32, table[0][index], 8);
+ table[2][index] = math.rotl(u32, table[0][index], 16);
+ table[3][index] = math.rotl(u32, table[0][index], 24);
}
return table;
@@ -506,3 +666,82 @@ fn mul(a: u8, b: u8) u8 {
return @truncate(u8, s);
}
+
+const cache_line_bytes = 64;
+
+inline fn sbox_lookup(sbox: *align(64) const [256]u8, idx0: u8, idx1: u8, idx2: u8, idx3: u8) [4]u8 {
+ if (side_channels_mitigations == .none) {
+ return [4]u8{
+ sbox[idx0],
+ sbox[idx1],
+ sbox[idx2],
+ sbox[idx3],
+ };
+ } else {
+ const stride = switch (side_channels_mitigations) {
+ .none => unreachable,
+ .basic => sbox.len / 4,
+ .medium => sbox.len / (sbox.len / cache_line_bytes) * 2,
+ .full => sbox.len / (sbox.len / cache_line_bytes),
+ };
+ const of0 = idx0 % stride;
+ const of1 = idx1 % stride;
+ const of2 = idx2 % stride;
+ const of3 = idx3 % stride;
+ var t: [4][sbox.len / stride]u8 align(64) = undefined;
+ var i: usize = 0;
+ while (i < t[0].len) : (i += 1) {
+ const tx = sbox[i * stride ..];
+ t[0][i] = tx[of0];
+ t[1][i] = tx[of1];
+ t[2][i] = tx[of2];
+ t[3][i] = tx[of3];
+ }
+ std.mem.doNotOptimizeAway(t);
+ return [4]u8{
+ t[0][idx0 / stride],
+ t[1][idx1 / stride],
+ t[2][idx2 / stride],
+ t[3][idx3 / stride],
+ };
+ }
+}
+
+inline fn table_lookup(table: *align(64) const [4][256]u32, idx0: u8, idx1: u8, idx2: u8, idx3: u8) [4]u32 {
+ if (side_channels_mitigations == .none) {
+ return [4]u32{
+ table[0][idx0],
+ table[1][idx1],
+ table[2][idx2],
+ table[3][idx3],
+ };
+ } else {
+ const table_bytes = @sizeOf(@TypeOf(table[0]));
+ const stride = switch (side_channels_mitigations) {
+ .none => unreachable,
+ .basic => table[0].len / 4,
+ .medium => table[0].len / (table_bytes / cache_line_bytes) * 2,
+ .full => table[0].len / (table_bytes / cache_line_bytes),
+ };
+ const of0 = idx0 % stride;
+ const of1 = idx1 % stride;
+ const of2 = idx2 % stride;
+ const of3 = idx3 % stride;
+ var t: [4][table[0].len / stride]u32 align(64) = undefined;
+ var i: usize = 0;
+ while (i < t[0].len) : (i += 1) {
+ const tx = table[0][i * stride ..];
+ t[0][i] = tx[of0];
+ t[1][i] = tx[of1];
+ t[2][i] = tx[of2];
+ t[3][i] = tx[of3];
+ }
+ std.mem.doNotOptimizeAway(t);
+ return [4]u32{
+ t[0][idx0 / stride],
+ math.rotl(u32, t[1][idx1 / stride], 8),
+ math.rotl(u32, t[2][idx2 / stride], 16),
+ math.rotl(u32, t[3][idx3 / stride], 24),
+ };
+ }
+}
diff --git a/lib/std/crypto/argon2.zig b/lib/std/crypto/argon2.zig
index a95e75e538..0112e81c6a 100644
--- a/lib/std/crypto/argon2.zig
+++ b/lib/std/crypto/argon2.zig
@@ -138,40 +138,39 @@ fn initHash(
}
fn blake2bLong(out: []u8, in: []const u8) void {
- var b2 = Blake2b512.init(.{ .expected_out_bits = math.min(512, out.len * 8) });
+ const H = Blake2b512;
+ var outlen_bytes: [4]u8 = undefined;
+ mem.writeIntLittle(u32, &outlen_bytes, @intCast(u32, out.len));
- var buffer: [Blake2b512.digest_length]u8 = undefined;
- mem.writeIntLittle(u32, buffer[0..4], @intCast(u32, out.len));
- b2.update(buffer[0..4]);
- b2.update(in);
- b2.final(&buffer);
+ var out_buf: [H.digest_length]u8 = undefined;
- if (out.len <= Blake2b512.digest_length) {
- mem.copy(u8, out, buffer[0..out.len]);
+ if (out.len <= H.digest_length) {
+ var h = H.init(.{ .expected_out_bits = out.len * 8 });
+ h.update(&outlen_bytes);
+ h.update(in);
+ h.final(&out_buf);
+ mem.copy(u8, out, out_buf[0..out.len]);
return;
}
- b2 = Blake2b512.init(.{});
- mem.copy(u8, out, buffer[0..32]);
- var out_slice = out[32..];
- while (out_slice.len > Blake2b512.digest_length) : ({
- out_slice = out_slice[32..];
- b2 = Blake2b512.init(.{});
- }) {
- b2.update(&buffer);
- b2.final(&buffer);
- mem.copy(u8, out_slice, buffer[0..32]);
- }
+ var h = H.init(.{});
+ h.update(&outlen_bytes);
+ h.update(in);
+ h.final(&out_buf);
+ var out_slice = out;
+ mem.copy(u8, out_slice, out_buf[0 .. H.digest_length / 2]);
+ out_slice = out_slice[H.digest_length / 2 ..];
- var r = Blake2b512.digest_length;
- if (out.len % Blake2b512.digest_length > 0) {
- r = ((out.len + 31) / 32) - 2;
- b2 = Blake2b512.init(.{ .expected_out_bits = r * 8 });
+ var in_buf: [H.digest_length]u8 = undefined;
+ while (out_slice.len > H.digest_length) {
+ mem.copy(u8, &in_buf, &out_buf);
+ H.hash(&in_buf, &out_buf, .{});
+ mem.copy(u8, out_slice, out_buf[0 .. H.digest_length / 2]);
+ out_slice = out_slice[H.digest_length / 2 ..];
}
-
- b2.update(&buffer);
- b2.final(&buffer);
- mem.copy(u8, out_slice, buffer[0..r]);
+ mem.copy(u8, &in_buf, &out_buf);
+ H.hash(&in_buf, &out_buf, .{ .expected_out_bits = out_slice.len * 8 });
+ mem.copy(u8, out_slice, out_buf[0..out_slice.len]);
}
fn initBlocks(
diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig
index e6e0e1fc39..ff098c7804 100644
--- a/lib/std/crypto/benchmark.zig
+++ b/lib/std/crypto/benchmark.zig
@@ -27,6 +27,8 @@ const hashes = [_]Crypto{
Crypto{ .ty = crypto.hash.sha3.Sha3_512, .name = "sha3-512" },
Crypto{ .ty = crypto.hash.sha3.Shake128, .name = "shake-128" },
Crypto{ .ty = crypto.hash.sha3.Shake256, .name = "shake-256" },
+ Crypto{ .ty = crypto.hash.sha3.TurboShake128(null), .name = "turboshake-128" },
+ Crypto{ .ty = crypto.hash.sha3.TurboShake256(null), .name = "turboshake-256" },
Crypto{ .ty = crypto.hash.Gimli, .name = "gimli-hash" },
Crypto{ .ty = crypto.hash.blake2.Blake2s256, .name = "blake2s" },
Crypto{ .ty = crypto.hash.blake2.Blake2b512, .name = "blake2b" },
@@ -201,6 +203,72 @@ pub fn benchmarkBatchSignatureVerification(comptime Signature: anytype, comptime
return throughput;
}
+const kems = [_]Crypto{
+ Crypto{ .ty = crypto.kem.kyber_d00.Kyber512, .name = "kyber512d00" },
+ Crypto{ .ty = crypto.kem.kyber_d00.Kyber768, .name = "kyber768d00" },
+ Crypto{ .ty = crypto.kem.kyber_d00.Kyber1024, .name = "kyber1024d00" },
+};
+
+pub fn benchmarkKem(comptime Kem: anytype, comptime kems_count: comptime_int) !u64 {
+ const key_pair = try Kem.KeyPair.create(null);
+
+ var timer = try Timer.start();
+ const start = timer.lap();
+ {
+ var i: usize = 0;
+ while (i < kems_count) : (i += 1) {
+ const e = key_pair.public_key.encaps(null);
+ mem.doNotOptimizeAway(&e);
+ }
+ }
+ const end = timer.read();
+
+ const elapsed_s = @intToFloat(f64, end - start) / time.ns_per_s;
+ const throughput = @floatToInt(u64, kems_count / elapsed_s);
+
+ return throughput;
+}
+
+pub fn benchmarkKemDecaps(comptime Kem: anytype, comptime kems_count: comptime_int) !u64 {
+ const key_pair = try Kem.KeyPair.create(null);
+
+ const e = key_pair.public_key.encaps(null);
+
+ var timer = try Timer.start();
+ const start = timer.lap();
+ {
+ var i: usize = 0;
+ while (i < kems_count) : (i += 1) {
+ const ss2 = try key_pair.secret_key.decaps(&e.ciphertext);
+ mem.doNotOptimizeAway(&ss2);
+ }
+ }
+ const end = timer.read();
+
+ const elapsed_s = @intToFloat(f64, end - start) / time.ns_per_s;
+ const throughput = @floatToInt(u64, kems_count / elapsed_s);
+
+ return throughput;
+}
+
+pub fn benchmarkKemKeyGen(comptime Kem: anytype, comptime kems_count: comptime_int) !u64 {
+ var timer = try Timer.start();
+ const start = timer.lap();
+ {
+ var i: usize = 0;
+ while (i < kems_count) : (i += 1) {
+ const key_pair = try Kem.KeyPair.create(null);
+ mem.doNotOptimizeAway(&key_pair);
+ }
+ }
+ const end = timer.read();
+
+ const elapsed_s = @intToFloat(f64, end - start) / time.ns_per_s;
+ const throughput = @floatToInt(u64, kems_count / elapsed_s);
+
+ return throughput;
+}
+
const aeads = [_]Crypto{
Crypto{ .ty = crypto.aead.chacha_poly.ChaCha20Poly1305, .name = "chacha20Poly1305" },
Crypto{ .ty = crypto.aead.chacha_poly.XChaCha20Poly1305, .name = "xchacha20Poly1305" },
@@ -483,4 +551,25 @@ pub fn main() !void {
try stdout.print("{s:>17}: {d:10.3} s/ops\n", .{ H.name, throughput });
}
}
+
+ inline for (kems) |E| {
+ if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) {
+ const throughput = try benchmarkKem(E.ty, mode(1000));
+ try stdout.print("{s:>17}: {:10} encaps/s\n", .{ E.name, throughput });
+ }
+ }
+
+ inline for (kems) |E| {
+ if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) {
+ const throughput = try benchmarkKemDecaps(E.ty, mode(25000));
+ try stdout.print("{s:>17}: {:10} decaps/s\n", .{ E.name, throughput });
+ }
+ }
+
+ inline for (kems) |E| {
+ if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) {
+ const throughput = try benchmarkKemKeyGen(E.ty, mode(25000));
+ try stdout.print("{s:>17}: {:10} keygen/s\n", .{ E.name, throughput });
+ }
+ }
}
diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig
index 5b8e21d922..36d717387f 100644
--- a/lib/std/crypto/blake3.zig
+++ b/lib/std/crypto/blake3.zig
@@ -200,7 +200,7 @@ const CompressGeneric = struct {
}
};
-const compress = if (builtin.cpu.arch == .x86_64 and builtin.zig_backend != .stage2_c)
+const compress = if (builtin.cpu.arch == .x86_64)
CompressVectorized.compress
else
CompressGeneric.compress;
diff --git a/lib/std/crypto/gimli.zig b/lib/std/crypto/gimli.zig
index 9443b97be7..0189f4c359 100644
--- a/lib/std/crypto/gimli.zig
+++ b/lib/std/crypto/gimli.zig
@@ -152,7 +152,7 @@ pub const State = struct {
self.endianSwap();
}
- pub const permute = if (builtin.cpu.arch == .x86_64 and builtin.zig_backend != .stage2_c) impl: {
+ pub const permute = if (builtin.cpu.arch == .x86_64) impl: {
break :impl permute_vectorized;
} else if (builtin.mode == .ReleaseSmall) impl: {
break :impl permute_small;
diff --git a/lib/std/crypto/keccak_p.zig b/lib/std/crypto/keccak_p.zig
index 10367caffd..af7c12c8c2 100644
--- a/lib/std/crypto/keccak_p.zig
+++ b/lib/std/crypto/keccak_p.zig
@@ -175,12 +175,12 @@ pub fn KeccakF(comptime f: u11) type {
/// Apply a (possibly) reduced-round permutation to the state.
pub fn permuteR(self: *Self, comptime rounds: u5) void {
var i = RC.len - rounds;
- while (i < rounds - rounds % 3) : (i += 3) {
+ while (i < RC.len - RC.len % 3) : (i += 3) {
self.round(RC[i]);
self.round(RC[i + 1]);
self.round(RC[i + 2]);
}
- while (i < rounds) : (i += 1) {
+ while (i < RC.len) : (i += 1) {
self.round(RC[i]);
}
}
@@ -231,7 +231,7 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime delim: u8, compti
bytes = bytes[rate..];
}
if (bytes.len > 0) {
- self.st.addBytes(bytes[0..]);
+ mem.copy(u8, &self.buf, bytes);
self.offset = bytes.len;
}
}
diff --git a/lib/std/crypto/kyber_d00.zig b/lib/std/crypto/kyber_d00.zig
new file mode 100644
index 0000000000..21fdb6ff17
--- /dev/null
+++ b/lib/std/crypto/kyber_d00.zig
@@ -0,0 +1,1780 @@
+//! Implementation of the IND-CCA2 post-quantum secure key encapsulation
+//! mechanism (KEM) CRYSTALS-Kyber, as submitted to the third round of the NIST
+//! Post-Quantum Cryptography (v3.02/"draft00"), and selected for standardisation.
+//!
+//! Kyber will likely change before final standardisation.
+//!
+//! The namespace suffix (currently `_d00`) refers to the version currently
+//! implemented, in accordance with the draft. It may not be updated if new
+//! versions of the draft only include editorial changes.
+//!
+//! The suffix will eventually be removed once Kyber is finalized.
+//!
+//! Quoting from the CFRG I-D:
+//!
+//! Kyber is not a Diffie-Hellman (DH) style non-interactive key
+//! agreement, but instead, Kyber is a Key Encapsulation Method (KEM).
+//! In essence, a KEM is a Public-Key Encryption (PKE) scheme where the
+//! plaintext cannot be specified, but is generated as a random key as
+//! part of the encryption. A KEM can be transformed into an unrestricted
+//! PKE using HPKE (RFC9180). On its own, a KEM can be used as a key
+//! agreement method in TLS.
+//!
+//! Kyber is an IND-CCA2 secure KEM. It is constructed by applying a
+//! Fujisaki--Okamato style transformation on InnerPKE, which is the
+//! underlying IND-CPA secure Public Key Encryption scheme. We cannot
+//! use InnerPKE directly, as its ciphertexts are malleable.
+//!
+//! ```
+//! F.O. transform
+//! InnerPKE ----------------------> Kyber
+//! IND-CPA IND-CCA2
+//! ```
+//!
+//! Kyber is a lattice-based scheme. More precisely, its security is
+//! based on the learning-with-errors-and-rounding problem in module
+//! lattices (MLWER). The underlying polynomial ring R (defined in
+//! Section 5) is chosen such that multiplication is very fast using the
+//! number theoretic transform (NTT, see Section 5.1.3).
+//!
+//! An InnerPKE private key is a vector _s_ over R of length k which is
+//! _small_ in a particular way. Here k is a security parameter akin to
+//! the size of a prime modulus. For Kyber512, which targets AES-128's
+//! security level, the value of k is 2.
+//!
+//! The public key consists of two values:
+//!
+//! * _A_ a uniformly sampled k by k matrix over R _and_
+//!
+//! * _t = A s + e_, where e is a suitably small masking vector.
+//!
+//! Distinguishing between such A s + e and a uniformly sampled t is the
+//! module learning-with-errors (MLWE) problem. If that is hard, then it
+//! is also hard to recover the private key from the public key as that
+//! would allow you to distinguish between those two.
+//!
+//! To save space in the public key, A is recomputed deterministically
+//! from a seed _rho_.
+//!
+//! A ciphertext for a message m under this public key is a pair (c_1,
+//! c_2) computed roughly as follows:
+//!
+//! c_1 = Compress(A^T r + e_1, d_u)
+//! c_2 = Compress(t^T r + e_2 + Decompress(m, 1), d_v)
+//!
+//! where
+//!
+//! * e_1, e_2 and r are small blinds;
+//!
+//! * Compress(-, d) removes some information, leaving d bits per
+//! coefficient and Decompress is such that Compress after Decompress
+//! does nothing and
+//!
+//! * d_u, d_v are scheme parameters.
+//!
+//! Distinguishing such a ciphertext and uniformly sampled (c_1, c_2) is
+//! an example of the full MLWER problem, see section 4.4 of [KyberV302].
+//!
+//! To decrypt the ciphertext, one computes
+//!
+//! m = Compress(Decompress(c_2, d_v) - s^T Decompress(c_1, d_u), 1).
+//!
+//! It it not straight-forward to see that this formula is correct. In
+//! fact, there is negligable but non-zero probability that a ciphertext
+//! does not decrypt correctly given by the DFP column in Table 4. This
+//! failure probability can be computed by a careful automated analysis
+//! of the probabilities involved, see kyber_failure.py of [SecEst].
+//!
+//! [KyberV302](https://pq-crystals.org/kyber/data/kyber-specification-round3-20210804.pdf)
+//! [I-D](https://github.com/bwesterb/draft-schwabe-cfrg-kyber)
+//! [SecEst](https://github.com/pq-crystals/security-estimates)
+
+// TODO
+//
+// - The bottleneck in Kyber are the various hash/xof calls:
+// - Optimize Zig's keccak implementation.
+// - Use SIMD to compute keccak in parallel.
+// - Can we track bounds of coefficients using comptime types without
+// duplicating code?
+// - Would be neater to have tests closer to the thing under test.
+// - When generating a keypair, we have a copy of the inner public key with
+// its large matrix A in both the public key and the private key. In Go we
+// can just have a pointer in the private key to the public key, but
+// how do we do this elegantly in Zig?
+
+const std = @import("std");
+const builtin = @import("builtin");
+
+const testing = std.testing;
+const assert = std.debug.assert;
+const crypto = std.crypto;
+const math = std.math;
+const mem = std.mem;
+const RndGen = std.rand.DefaultPrng;
+const sha3 = crypto.hash.sha3;
+
+// Q is the parameter q ≡ 3329 = 2¹¹ + 2¹⁰ + 2⁸ + 1.
+const Q: i16 = 3329;
+
+// Montgomery R
+const R: i32 = 1 << 16;
+
+// Parameter n, degree of polynomials.
+const N: usize = 256;
+
+// Size of "small" vectors used in encryption blinds.
+const eta2: u8 = 2;
+
+const Params = struct {
+ name: []const u8,
+
+ // Width and height of the matrix A.
+ k: u8,
+
+ // Size of "small" vectors used in private key and encryption blinds.
+ eta1: u8,
+
+ // How many bits to retain of u, the private-key independent part
+ // of the ciphertext.
+ du: u8,
+
+ // How many bits to retain of v, the private-key dependent part
+ // of the ciphertext.
+ dv: u8,
+};
+
+pub const Kyber512 = Kyber(.{
+ .name = "Kyber512",
+ .k = 2,
+ .eta1 = 3,
+ .du = 10,
+ .dv = 4,
+});
+
+pub const Kyber768 = Kyber(.{
+ .name = "Kyber768",
+ .k = 3,
+ .eta1 = 2,
+ .du = 10,
+ .dv = 4,
+});
+
+pub const Kyber1024 = Kyber(.{
+ .name = "Kyber1024",
+ .k = 4,
+ .eta1 = 2,
+ .du = 11,
+ .dv = 5,
+});
+
+const modes = [_]type{ Kyber512, Kyber768, Kyber1024 };
+const h_length: usize = 32;
+const inner_seed_length: usize = 32;
+const common_encaps_seed_length: usize = 32;
+const common_shared_key_size: usize = 32;
+
+fn Kyber(comptime p: Params) type {
+ return struct {
+ // Size of a ciphertext, in bytes.
+ pub const ciphertext_length = Poly.compressedSize(p.du) * p.k + Poly.compressedSize(p.dv);
+
+ const Self = @This();
+ const V = Vec(p.k);
+ const M = Mat(p.k);
+
+ /// Length (in bytes) of a shared secret.
+ pub const shared_length = common_shared_key_size;
+ /// Length (in bytes) of a seed for deterministic encapsulation.
+ pub const encaps_seed_length = common_encaps_seed_length;
+ /// Length (in bytes) of a seed for key generation.
+ pub const seed_length: usize = inner_seed_length + shared_length;
+ /// Algorithm name.
+ pub const name = p.name;
+
+ /// A shared secret, and an encapsulated (encrypted) representation of it.
+ pub const EncapsulatedSecret = struct {
+ shared_secret: [shared_length]u8,
+ ciphertext: [ciphertext_length]u8,
+ };
+
+ /// A Kyber public key.
+ pub const PublicKey = struct {
+ pk: InnerPk,
+
+ // Cached
+ hpk: [h_length]u8, // H(pk)
+
+ /// Size of a serialized representation of the key, in bytes.
+ pub const bytes_length = InnerPk.bytes_length;
+
+ /// Generates a shared secret, and encapsulates it for the public key.
+ /// If `seed` is `null`, a random seed is used. This is recommended.
+ /// If `seed` is set, encapsulation is deterministic.
+ pub fn encaps(pk: PublicKey, seed_: ?[encaps_seed_length]u8) EncapsulatedSecret {
+ const seed = seed_ orelse seed: {
+ var random_seed: [encaps_seed_length]u8 = undefined;
+ crypto.random.bytes(&random_seed);
+ break :seed random_seed;
+ };
+
+ var m: [inner_plaintext_length]u8 = undefined;
+
+ // m = H(seed)
+ var h = sha3.Sha3_256.init(.{});
+ h.update(&seed);
+ h.final(&m);
+
+ // (K', r) = G(m ‖ H(pk))
+ var kr: [inner_plaintext_length + h_length]u8 = undefined;
+ var g = sha3.Sha3_512.init(.{});
+ g.update(&m);
+ g.update(&pk.hpk);
+ g.final(&kr);
+
+ // c = innerEncrypy(pk, m, r)
+ const ct = pk.pk.encrypt(&m, kr[32..64]);
+
+ // Compute H(c) and put in second slot of kr, which will be (K', H(c)).
+ h = sha3.Sha3_256.init(.{});
+ h.update(&ct);
+ h.final(kr[32..64]);
+
+ // K = KDF(K' ‖ H(c))
+ var kdf = sha3.Shake256.init(.{});
+ kdf.update(&kr);
+ var ss: [shared_length]u8 = undefined;
+ kdf.squeeze(&ss);
+
+ return EncapsulatedSecret{
+ .shared_secret = ss,
+ .ciphertext = ct,
+ };
+ }
+
+ /// Serializes the key into a byte array.
+ pub fn toBytes(pk: PublicKey) [bytes_length]u8 {
+ return pk.pk.toBytes();
+ }
+
+ /// Deserializes the key from a byte array.
+ pub fn fromBytes(buf: *const [bytes_length]u8) !PublicKey {
+ var ret: PublicKey = undefined;
+ ret.pk = InnerPk.fromBytes(buf[0..InnerPk.bytes_length]);
+
+ var h = sha3.Sha3_256.init(.{});
+ h.update(buf);
+ h.final(&ret.hpk);
+ return ret;
+ }
+ };
+
+ /// A Kyber secret key.
+ pub const SecretKey = struct {
+ sk: InnerSk,
+ pk: InnerPk,
+ hpk: [h_length]u8, // H(pk)
+ z: [shared_length]u8,
+
+ /// Size of a serialized representation of the key, in bytes.
+ pub const bytes_length: usize =
+ InnerSk.bytes_length + InnerPk.bytes_length + h_length + shared_length;
+
+ /// Decapsulates the shared secret within ct using the private key.
+ pub fn decaps(sk: SecretKey, ct: *const [ciphertext_length]u8) ![shared_length]u8 {
+ // m' = innerDec(ct)
+ const m2 = sk.sk.decrypt(ct);
+
+ // (K'', r') = G(m' ‖ H(pk))
+ var kr2: [64]u8 = undefined;
+ var g = sha3.Sha3_512.init(.{});
+ g.update(&m2);
+ g.update(&sk.hpk);
+ g.final(&kr2);
+
+ // ct' = innerEnc(pk, m', r')
+ const ct2 = sk.pk.encrypt(&m2, kr2[32..64]);
+
+ // Compute H(ct) and put in the second slot of kr2 which will be (K'', H(ct)).
+ var h = sha3.Sha3_256.init(.{});
+ h.update(ct);
+ h.final(kr2[32..64]);
+
+ // Replace K'' by z in the first slot of kr2 if ct ≠ ct'.
+ cmov(32, kr2[0..32], sk.z, ctneq(ciphertext_length, ct.*, ct2));
+
+ // K = KDF(K''/z, H(c))
+ var kdf = sha3.Shake256.init(.{});
+ var ss: [shared_length]u8 = undefined;
+ kdf.update(&kr2);
+ kdf.squeeze(&ss);
+ return ss;
+ }
+
+ /// Serializes the key into a byte array.
+ pub fn toBytes(sk: SecretKey) [bytes_length]u8 {
+ return sk.sk.toBytes() ++ sk.pk.toBytes() ++ sk.hpk ++ sk.z;
+ }
+
+ /// Deserializes the key from a byte array.
+ pub fn fromBytes(buf: *const [bytes_length]u8) !SecretKey {
+ var ret: SecretKey = undefined;
+ comptime var s: usize = 0;
+ ret.sk = InnerSk.fromBytes(buf[s .. s + InnerSk.bytes_length]);
+ s += InnerSk.bytes_length;
+ ret.pk = InnerPk.fromBytes(buf[s .. s + InnerPk.bytes_length]);
+ s += InnerPk.bytes_length;
+ mem.copy(u8, &ret.hpk, buf[s .. s + h_length]);
+ s += h_length;
+ mem.copy(u8, &ret.z, buf[s .. s + shared_length]);
+ return ret;
+ }
+ };
+
+ /// A Kyber key pair.
+ pub const KeyPair = struct {
+ secret_key: SecretKey,
+ public_key: PublicKey,
+
+ /// Create a new key pair.
+ /// If seed is null, a random seed will be generated.
+ /// If a seed is provided, the key pair will be determinsitic.
+ pub fn create(seed_: ?[seed_length]u8) !KeyPair {
+ const seed = seed_ orelse sk: {
+ var random_seed: [seed_length]u8 = undefined;
+ crypto.random.bytes(&random_seed);
+ break :sk random_seed;
+ };
+ var ret: KeyPair = undefined;
+ mem.copy(u8, &ret.secret_key.z, seed[inner_seed_length..seed_length]);
+
+ // Generate inner key
+ innerKeyFromSeed(
+ seed[0..inner_seed_length].*,
+ &ret.public_key.pk,
+ &ret.secret_key.sk,
+ );
+ ret.secret_key.pk = ret.public_key.pk;
+
+ // Copy over z from seed.
+ mem.copy(u8, &ret.secret_key.z, seed[inner_seed_length..seed_length]);
+
+ // Compute H(pk)
+ var h = sha3.Sha3_256.init(.{});
+ h.update(&ret.public_key.pk.toBytes());
+ h.final(&ret.secret_key.hpk);
+ ret.public_key.hpk = ret.secret_key.hpk;
+
+ return ret;
+ }
+ };
+
+ // Size of plaintexts of the in
+ const inner_plaintext_length: usize = Poly.compressedSize(1);
+
+ const InnerPk = struct {
+ rho: [32]u8, // ρ, the seed for the matrix A
+ th: V, // NTT(t), normalized
+
+ // Cached values
+ aT: M,
+
+ const bytes_length = V.bytes_length + 32;
+
+ fn encrypt(
+ pk: InnerPk,
+ pt: *const [inner_plaintext_length]u8,
+ seed: *const [32]u8,
+ ) [ciphertext_length]u8 {
+ // Sample r, e₁ and e₂ appropriately
+ const rh = V.noise(p.eta1, 0, seed).ntt().barrettReduce();
+ const e1 = V.noise(eta2, p.k, seed);
+ const e2 = Poly.noise(eta2, 2 * p.k, seed);
+
+ // Next we compute u = Aᵀ r + e₁. First Aᵀ.
+ var u: V = undefined;
+ for (0..p.k) |i| {
+ // Note that coefficients of r are bounded by q and those of Aᵀ
+ // are bounded by 4.5q and so their product is bounded by 2¹⁵q
+ // as required for multiplication.
+ u.ps[i] = pk.aT.vs[i].dotHat(rh);
+ }
+
+ // Aᵀ and r were not in Montgomery form, so the Montgomery
+ // multiplications in the inner product added a factor R⁻¹ which
+ // the InvNTT cancels out.
+ u = u.barrettReduce().invNTT().add(e1).normalize();
+
+ // Next, compute v = + e₂ + Decompress_q(m, 1)
+ const v = pk.th.dotHat(rh).barrettReduce().invNTT()
+ .add(Poly.decompress(1, pt)).add(e2).normalize();
+
+ return u.compress(p.du) ++ v.compress(p.dv);
+ }
+
+ fn toBytes(pk: InnerPk) [bytes_length]u8 {
+ return pk.th.toBytes() ++ pk.rho;
+ }
+
+ fn fromBytes(buf: *const [bytes_length]u8) InnerPk {
+ var ret: InnerPk = undefined;
+ ret.th = V.fromBytes(buf[0..V.bytes_length]).normalize();
+ mem.copy(u8, &ret.rho, buf[V.bytes_length..bytes_length]);
+ ret.aT = M.uniform(ret.rho, true);
+ return ret;
+ }
+ };
+
+ // Private key of the inner PKE
+ const InnerSk = struct {
+ sh: V, // NTT(s), normalized
+ const bytes_length = V.bytes_length;
+
+ fn decrypt(sk: InnerSk, ct: *const [ciphertext_length]u8) [inner_plaintext_length]u8 {
+ const u = V.decompress(p.du, ct[0..comptime V.compressedSize(p.du)]);
+ const v = Poly.decompress(
+ p.dv,
+ ct[comptime V.compressedSize(p.du)..ciphertext_length],
+ );
+
+ // Compute m = v -
+ return v.sub(sk.sh.dotHat(u.ntt()).barrettReduce().invNTT())
+ .normalize().compress(1);
+ }
+
+ fn toBytes(sk: InnerSk) [bytes_length]u8 {
+ return sk.sh.toBytes();
+ }
+
+ fn fromBytes(buf: *const [bytes_length]u8) InnerSk {
+ var ret: InnerSk = undefined;
+ ret.sh = V.fromBytes(buf).normalize();
+ return ret;
+ }
+ };
+
+ // Derives inner PKE keypair from given seed.
+ fn innerKeyFromSeed(seed: [inner_seed_length]u8, pk: *InnerPk, sk: *InnerSk) void {
+ var expanded_seed: [64]u8 = undefined;
+
+ var h = sha3.Sha3_512.init(.{});
+ h.update(&seed);
+ h.final(&expanded_seed);
+ mem.copy(u8, &pk.rho, expanded_seed[0..32]);
+ const sigma = expanded_seed[32..64];
+ pk.aT = M.uniform(pk.rho, false); // Expand ρ to A; we'll transpose later on
+
+ // Sample secret vector s.
+ sk.sh = V.noise(p.eta1, 0, sigma).ntt().normalize();
+
+ const eh = Vec(p.k).noise(p.eta1, p.k, sigma).ntt(); // sample blind e.
+ var th: V = undefined;
+
+ // Next, we compute t = A s + e.
+ for (0..p.k) |i| {
+ // Note that coefficients of s are bounded by q and those of A
+ // are bounded by 4.5q and so their product is bounded by 2¹⁵q
+ // as required for multiplication.
+ // A and s were not in Montgomery form, so the Montgomery
+ // multiplications in the inner product added a factor R⁻¹ which
+ // we'll cancel out with toMont(). This will also ensure the
+ // coefficients of th are bounded in absolute value by q.
+ th.ps[i] = pk.aT.vs[i].dotHat(sk.sh).toMont();
+ }
+
+ pk.th = th.add(eh).normalize(); // bounded by 8q
+ pk.aT = pk.aT.transpose();
+ }
+ };
+}
+
+// R mod q
+const r_mod_q: i32 = @rem(@as(i32, R), Q);
+
+// R² mod q
+const r2_mod_q: i32 = @rem(r_mod_q * r_mod_q, Q);
+
+// ζ is the degree 256 primitive root of unity used for the NTT.
+const zeta: i16 = 17;
+
+// (128)⁻¹ R². Used in inverse NTT.
+const r2_over_128: i32 = @mod(invertMod(128, Q) * r2_mod_q, Q);
+
+// zetas lists precomputed powers of the primitive root of unity in
+// Montgomery representation used for the NTT:
+//
+// zetas[i] = ζᵇʳᵛ⁽ⁱ⁾ R mod q
+//
+// where ζ = 17, brv(i) is the bitreversal of a 7-bit number and R=2¹⁶ mod q.
+const zetas = computeZetas();
+
+// invNTTReductions keeps track of which coefficients to apply Barrett
+// reduction to in Poly.invNTT().
+//
+// Generated lazily: once a butterfly is computed which is about to
+// overflow the i16, the largest coefficient is reduced. If that is
+// not enough, the other coefficient is reduced as well.
+//
+// This is actually optimal, as proven in https://eprint.iacr.org/2020/1377.pdf
+// TODO generate comptime?
+const inv_ntt_reductions = [_]i16{
+ -1, // after layer 1
+ -1, // after layer 2
+ 16,
+ 17,
+ 48,
+ 49,
+ 80,
+ 81,
+ 112,
+ 113,
+ 144,
+ 145,
+ 176,
+ 177,
+ 208,
+ 209,
+ 240, 241, -1, // after layer 3
+ 0, 1, 32,
+ 33, 34, 35,
+ 64, 65, 96,
+ 97, 98, 99,
+ 128, 129,
+ 160, 161, 162, 163, 192, 193, 224, 225, 226, 227, -1, // after layer 4
+ 2, 3, 66, 67, 68, 69, 70, 71, 130, 131, 194,
+ 195, 196, 197,
+ 198, 199, -1, // after layer 5
+ 4, 5, 6,
+ 7, 132, 133,
+ 134, 135, 136,
+ 137, 138, 139,
+ 140, 141,
+ 142, 143, -1, // after layer 6
+ -1, // after layer 7
+};
+
+test "invNTTReductions bounds" {
+ // Checks whether the reductions proposed by invNTTReductions
+ // don't overflow during invNTT().
+ var xs = [_]i32{1} ** 256; // start at |x| ≤ q
+
+ var r: usize = 0;
+ var layer: math.Log2Int(usize) = 1;
+ while (layer < 8) : (layer += 1) {
+ const w = @as(usize, 1) << layer;
+ var i: usize = 0;
+
+ while (i + w < 256) {
+ xs[i] = xs[i] + xs[i + w];
+ try testing.expect(xs[i] <= 9); // we can't exceed 9q
+ xs[i + w] = 1;
+ i += 1;
+ if (@mod(i, w) == 0) {
+ i += w;
+ }
+ }
+
+ while (true) {
+ const j = inv_ntt_reductions[r];
+ r += 1;
+ if (j < 0) {
+ break;
+ }
+ xs[@intCast(usize, j)] = 1;
+ }
+ }
+}
+
+// Extended euclidean algorithm.
+//
+// For a, b finds x, y such that x a + y b = gcd(a, b). Used to compute
+// modular inverse.
+fn eea(a: anytype, b: @TypeOf(a)) EeaResult(@TypeOf(a)) {
+ if (a == 0) {
+ return .{ .gcd = b, .x = 0, .y = 1 };
+ }
+ const r = eea(@rem(b, a), a);
+ return .{ .gcd = r.gcd, .x = r.y - @divTrunc(b, a) * r.x, .y = r.x };
+}
+
+fn EeaResult(comptime T: type) type {
+ return struct { gcd: T, x: T, y: T };
+}
+
+// Returns least common multiple of a and b.
+fn lcm(a: anytype, b: @TypeOf(a)) @TypeOf(a) {
+ const r = eea(a, b);
+ return a * b / r.gcd;
+}
+
+// Invert modulo p.
+fn invertMod(a: anytype, p: @TypeOf(a)) @TypeOf(a) {
+ const r = eea(a, p);
+ assert(r.gcd == 1);
+ return r.x;
+}
+
+// Reduce mod q for testing.
+fn modQ32(x: i32) i16 {
+ var y = @intCast(i16, @rem(x, @as(i32, Q)));
+ if (y < 0) {
+ y += Q;
+ }
+ return y;
+}
+
+// Given -2¹⁵ q ≤ x < 2¹⁵ q, returns -q < y < q with x 2⁻¹⁶ = y (mod q).
+fn montReduce(x: i32) i16 {
+ const qInv = comptime invertMod(@as(i32, Q), R);
+ // This is Montgomery reduction with R=2¹⁶.
+ //
+ // Note gcd(2¹⁶, q) = 1 as q is prime. Write q' := 62209 = q⁻¹ mod R.
+ // First we compute
+ //
+ // m := ((x mod R) q') mod R
+ // = x q' mod R
+ // = int16(x q')
+ // = int16(int32(x) * int32(q'))
+ //
+ // Note that x q' might be as big as 2³² and could overflow the int32
+ // multiplication in the last line. However for any int32s a and b,
+ // we have int32(int64(a)*int64(b)) = int32(a*b) and so the result is ok.
+ const m = @truncate(i16, @truncate(i32, x *% qInv));
+
+ // Note that x - m q is divisable by R; indeed modulo R we have
+ //
+ // x - m q ≡ x - x q' q ≡ x - x q⁻¹ q ≡ x - x = 0.
+ //
+ // We return y := (x - m q) / R. Note that y is indeed correct as
+ // modulo q we have
+ //
+ // y ≡ x R⁻¹ - m q R⁻¹ = x R⁻¹
+ //
+ // and as both 2¹⁵ q ≤ m q, x < 2¹⁵ q, we have
+ // 2¹⁶ q ≤ x - m q < 2¹⁶ and so q ≤ (x - m q) / R < q as desired.
+ const yR = x - @as(i32, m) * @as(i32, Q);
+ return @bitCast(i16, @truncate(u16, @bitCast(u32, yR) >> 16));
+}
+
+test "Test montReduce" {
+ var rnd = RndGen.init(0);
+ for (0..1000) |_| {
+ const bound = comptime @as(i32, Q) * (1 << 15);
+ const x = rnd.random().intRangeLessThan(i32, -bound, bound);
+ const y = montReduce(x);
+ try testing.expect(-Q < y and y < Q);
+ try testing.expectEqual(modQ32(x), modQ32(@as(i32, y) * R));
+ }
+}
+
+// Given any x, return x R mod q where R=2¹⁶.
+fn feToMont(x: i16) i16 {
+ // Note |1353 x| ≤ 1353 2¹⁵ ≤ 13318 q ≤ 2¹⁵ q and so we're within
+ // the bounds of montReduce.
+ return montReduce(@as(i32, x) * r2_mod_q);
+}
+
+test "Test feToMont" {
+ var x: i32 = -(1 << 15);
+ while (x < 1 << 15) : (x += 1) {
+ const y = feToMont(@intCast(i16, x));
+ try testing.expectEqual(modQ32(@as(i32, y)), modQ32(x * r_mod_q));
+ }
+}
+
+// Given any x, compute 0 ≤ y ≤ q with x = y (mod q).
+//
+// Beware: we might have feBarrettReduce(x) = q ≠ 0 for some x. In fact,
+// this happens if and only if x = -nq for some positive integer n.
+fn feBarrettReduce(x: i16) i16 {
+ // This is standard Barrett reduction.
+ //
+ // For any x we have x mod q = x - ⌊x/q⌋ q. We will use 20159/2²⁶ as
+ // an approximation of 1/q. Note that 0 ≤ 20159/2²⁶ - 1/q ≤ 0.135/2²⁶
+ // and so | x 20156/2²⁶ - x/q | ≤ 2⁻¹⁰ for |x| ≤ 2¹⁶. For all x
+ // not a multiple of q, the number x/q is further than 1/q from any integer
+ // and so ⌊x 20156/2²⁶⌋ = ⌊x/q⌋. If x is a multiple of q and x is positive,
+ // then x 20156/2²⁶ is larger than x/q so ⌊x 20156/2²⁶⌋ = ⌊x/q⌋ as well.
+ // Finally, if x is negative multiple of q, then ⌊x 20156/2²⁶⌋ = ⌊x/q⌋-1.
+ // Thus
+ // [ q if x=-nq for pos. integer n
+ // x - ⌊x 20156/2²⁶⌋ q = [
+ // [ x mod q otherwise
+ //
+ // To actually compute this, note that
+ //
+ // ⌊x 20156/2²⁶⌋ = (20159 x) >> 26.
+ return x -% @intCast(i16, (@as(i32, x) * 20159) >> 26) *% Q;
+}
+
+test "Test Barrett reduction" {
+ var x: i32 = -(1 << 15);
+ while (x < 1 << 15) : (x += 1) {
+ var y1 = feBarrettReduce(@intCast(i16, x));
+ const y2 = @mod(@intCast(i16, x), Q);
+ if (x < 0 and @rem(-x, Q) == 0) {
+ y1 -= Q;
+ }
+ try testing.expectEqual(y1, y2);
+ }
+}
+
+// Returns x if x < q and x - q otherwise. Assumes x ≥ -29439.
+fn csubq(x: i16) i16 {
+ var r = x;
+ r -= Q;
+ r += (r >> 15) & Q;
+ return r;
+}
+
+test "Test csubq" {
+ var x: i32 = -29439;
+ while (x < 1 << 15) : (x += 1) {
+ const y1 = csubq(@intCast(i16, x));
+ var y2 = @intCast(i16, x);
+ if (@intCast(i16, x) >= Q) {
+ y2 -= Q;
+ }
+ try testing.expectEqual(y1, y2);
+ }
+}
+
+// Compute a^s mod p.
+fn mpow(a: anytype, s: @TypeOf(a), p: @TypeOf(a)) @TypeOf(a) {
+ var ret: @TypeOf(a) = 1;
+ var s2 = s;
+ var a2 = a;
+
+ while (true) {
+ if (s2 & 1 == 1) {
+ ret = @mod(ret * a2, p);
+ }
+ s2 >>= 1;
+ if (s2 == 0) {
+ break;
+ }
+ a2 = @mod(a2 * a2, p);
+ }
+ return ret;
+}
+
+// Computes zetas table used by ntt and invNTT.
+fn computeZetas() [128]i16 {
+ @setEvalBranchQuota(10000);
+ var ret: [128]i16 = undefined;
+ for (&ret, 0..) |*r, i| {
+ const t = @intCast(i16, mpow(@as(i32, zeta), @bitReverse(@intCast(u7, i)), Q));
+ r.* = csubq(feBarrettReduce(feToMont(t)));
+ }
+ return ret;
+}
+
+// An element of our base ring R which are polynomials over ℤ_q
+// modulo the equation Xᴺ = -1, where q=3329 and N=256.
+//
+// This type is also used to store NTT-transformed polynomials,
+// see Poly.NTT().
+//
+// Coefficients aren't always reduced. See Normalize().
+const Poly = struct {
+ cs: [N]i16,
+
+ const bytes_length = N / 2 * 3;
+ const zero: Poly = .{ .cs = .{0} ** N };
+
+ fn add(a: Poly, b: Poly) Poly {
+ var ret: Poly = undefined;
+ for (0..N) |i| {
+ ret.cs[i] = a.cs[i] + b.cs[i];
+ }
+ return ret;
+ }
+
+ fn sub(a: Poly, b: Poly) Poly {
+ var ret: Poly = undefined;
+ for (0..N) |i| {
+ ret.cs[i] = a.cs[i] - b.cs[i];
+ }
+ return ret;
+ }
+
+ // For testing, generates a random polynomial with for each
+ // coefficient |x| ≤ q.
+ fn randAbsLeqQ(rnd: anytype) Poly {
+ var ret: Poly = undefined;
+ for (0..N) |i| {
+ ret.cs[i] = rnd.random().intRangeAtMost(i16, -Q, Q);
+ }
+ return ret;
+ }
+
+ // For testing, generates a random normalized polynomial.
+ fn randNormalized(rnd: anytype) Poly {
+ var ret: Poly = undefined;
+ for (0..N) |i| {
+ ret.cs[i] = rnd.random().intRangeLessThan(i16, 0, Q);
+ }
+ return ret;
+ }
+
+ // Executes a forward "NTT" on p.
+ //
+ // Assumes the coefficients are in absolute value ≤q. The resulting
+ // coefficients are in absolute value ≤7q. If the input is in Montgomery
+ // form, then the result is in Montgomery form and so (by linearity of the NTT)
+ // if the input is in regular form, then the result is also in regular form.
+ fn ntt(a: Poly) Poly {
+ // Note that ℤ_q does not have a primitive 512ᵗʰ root of unity (as 512
+ // does not divide into q-1) and so we cannot do a regular NTT. ℤ_q
+ // does have a primitive 256ᵗʰ root of unity, the smallest of which
+ // is ζ := 17.
+ //
+ // Recall that our base ring R := ℤ_q[x] / (x²⁵⁶ + 1). The polynomial
+ // x²⁵⁶+1 will not split completely (as its roots would be 512ᵗʰ roots
+ // of unity.) However, it does split almost (using ζ¹²⁸ = -1):
+ //
+ // x²⁵⁶ + 1 = (x²)¹²⁸ - ζ¹²⁸
+ // = ((x²)⁶⁴ - ζ⁶⁴)((x²)⁶⁴ + ζ⁶⁴)
+ // = ((x²)³² - ζ³²)((x²)³² + ζ³²)((x²)³² - ζ⁹⁶)((x²)³² + ζ⁹⁶)
+ // ⋮
+ // = (x² - ζ)(x² + ζ)(x² - ζ⁶⁵)(x² + ζ⁶⁵) … (x² + ζ¹²⁷)
+ //
+ // Note that the powers of ζ that appear (from the second line down) are
+ // in binary
+ //
+ // 0100000 1100000
+ // 0010000 1010000 0110000 1110000
+ // 0001000 1001000 0101000 1101000 0011000 1011000 0111000 1111000
+ // …
+ //
+ // That is: brv(2), brv(3), brv(4), …, where brv(x) denotes the 7-bit
+ // bitreversal of x. These powers of ζ are given by the Zetas array.
+ //
+ // The polynomials x² ± ζⁱ are irreducible and coprime, hence by
+ // the Chinese Remainder Theorem we know
+ //
+ // ℤ_q[x]/(x²⁵⁶+1) → ℤ_q[x]/(x²-ζ) x … x ℤ_q[x]/(x²+ζ¹²⁷)
+ //
+ // given by a ↦ ( a mod x²-ζ, …, a mod x²+ζ¹²⁷ )
+ // is an isomorphism, which is the "NTT". It can be efficiently computed by
+ //
+ //
+ // a ↦ ( a mod (x²)⁶⁴ - ζ⁶⁴, a mod (x²)⁶⁴ + ζ⁶⁴ )
+ // ↦ ( a mod (x²)³² - ζ³², a mod (x²)³² + ζ³²,
+ // a mod (x²)⁹⁶ - ζ⁹⁶, a mod (x²)⁹⁶ + ζ⁹⁶ )
+ //
+ // et cetera
+ // If N was 8 then this can be pictured in the following diagram:
+ //
+ // https://cnx.org/resources/17ee4dfe517a6adda05377b25a00bf6e6c93c334/File0026.png
+ //
+ // Each cross is a Cooley-Tukey butterfly: it's the map
+ //
+ // (a, b) ↦ (a + ζb, a - ζb)
+ //
+ // for the appropriate power ζ for that column and row group.
+ var p = a;
+ var k: usize = 0; // index into zetas
+
+ var l = N >> 1;
+ while (l > 1) : (l >>= 1) {
+ // On the nᵗʰ iteration of the l-loop, the absolute value of the
+ // coefficients are bounded by nq.
+
+ // offset effectively loops over the row groups in this column; it is
+ // the first row in the row group.
+ var offset: usize = 0;
+ while (offset < N - l) : (offset += 2 * l) {
+ k += 1;
+ const z = @as(i32, zetas[k]);
+
+ // j loops over each butterfly in the row group.
+ for (offset..offset + l) |j| {
+ const t = montReduce(z * @as(i32, p.cs[j + l]));
+ p.cs[j + l] = p.cs[j] - t;
+ p.cs[j] += t;
+ }
+ }
+ }
+
+ return p;
+ }
+
+ // Executes an inverse "NTT" on p and multiply by the Montgomery factor R.
+ //
+ // Assumes the coefficients are in absolute value ≤q. The resulting
+ // coefficients are in absolute value ≤q. If the input is in Montgomery
+ // form, then the result is in Montgomery form and so (by linearity)
+ // if the input is in regular form, then the result is also in regular form.
+ fn invNTT(a: Poly) Poly {
+ var k: usize = 127; // index into zetas
+ var r: usize = 0; // index into invNTTReductions
+ var p = a;
+
+ // We basically do the oppposite of NTT, but postpone dividing by 2 in the
+ // inverse of the Cooley-Tukey butterfly and accumulate that into a big
+ // division by 2⁷ at the end. See the comments in the ntt() function.
+
+ var l: usize = 2;
+ while (l < N) : (l <<= 1) {
+ var offset: usize = 0;
+ while (offset < N - l) : (offset += 2 * l) {
+ // As we're inverting, we need powers of ζ⁻¹ (instead of ζ).
+ // To be precise, we need ζᵇʳᵛ⁽ᵏ⁾⁻¹²⁸. However, as ζ⁻¹²⁸ = -1,
+ // we can use the existing zetas table instead of
+ // keeping a separate invZetas table as in Dilithium.
+
+ const minZeta = @as(i32, zetas[k]);
+ k -= 1;
+
+ for (offset..offset + l) |j| {
+ // Gentleman-Sande butterfly: (a, b) ↦ (a + b, ζ(a-b))
+ const t = p.cs[j + l] - p.cs[j];
+ p.cs[j] += p.cs[j + l];
+ p.cs[j + l] = montReduce(minZeta * @as(i32, t));
+
+ // Note that if we had |a| < αq and |b| < βq before the
+ // butterfly, then now we have |a| < (α+β)q and |b| < q.
+ }
+ }
+
+ // We let the invNTTReductions instruct us which coefficients to
+ // Barrett reduce.
+ while (true) {
+ const i = inv_ntt_reductions[r];
+ r += 1;
+ if (i < 0) {
+ break;
+ }
+ p.cs[@intCast(usize, i)] = feBarrettReduce(p.cs[@intCast(usize, i)]);
+ }
+ }
+
+ for (0..N) |j| {
+ // Note 1441 = (128)⁻¹ R². The coefficients are bounded by 9q, so
+ // as 1441 * 9 ≈ 2¹⁴ < 2¹⁵, we're within the required bounds
+ // for montReduce().
+ p.cs[j] = montReduce(r2_over_128 * @as(i32, p.cs[j]));
+ }
+
+ return p;
+ }
+
+ // Normalizes coefficients.
+ //
+ // Ensures each coefficient is in {0, …, q-1}.
+ fn normalize(a: Poly) Poly {
+ var ret: Poly = undefined;
+ for (0..N) |i| {
+ ret.cs[i] = csubq(feBarrettReduce(a.cs[i]));
+ }
+ return ret;
+ }
+
+ // Put p in Montgomery form.
+ fn toMont(a: Poly) Poly {
+ var ret: Poly = undefined;
+ for (0..N) |i| {
+ ret.cs[i] = feToMont(a.cs[i]);
+ }
+ return ret;
+ }
+
+ // Barret reduce coefficients.
+ //
+ // Beware, this does not fully normalize coefficients.
+ fn barrettReduce(a: Poly) Poly {
+ var ret: Poly = undefined;
+ for (0..N) |i| {
+ ret.cs[i] = feBarrettReduce(a.cs[i]);
+ }
+ return ret;
+ }
+
+ fn compressedSize(comptime d: u8) usize {
+ return @divTrunc(N * d, 8);
+ }
+
+ // Returns packed Compress_q(p, d).
+ //
+ // Assumes p is normalized.
+ fn compress(p: Poly, comptime d: u8) [compressedSize(d)]u8 {
+ @setEvalBranchQuota(10000);
+ const q_over_2: u32 = comptime @divTrunc(Q, 2); // (q-1)/2
+ const two_d_min_1: u32 = comptime (1 << d) - 1; // 2ᵈ-1
+ var in_off: usize = 0;
+ var out_off: usize = 0;
+
+ const batch_size: usize = comptime lcm(@as(i16, d), 8);
+ const in_batch_size: usize = comptime batch_size / d;
+ const out_batch_size: usize = comptime batch_size / 8;
+
+ const out_length: usize = comptime @divTrunc(N * d, 8);
+ comptime assert(out_length * 8 == d * N);
+ var out = [_]u8{0} ** out_length;
+
+ while (in_off < N) {
+ // First we compress into in.
+ var in: [in_batch_size]u16 = undefined;
+ inline for (0..in_batch_size) |i| {
+ // Compress_q(x, d) = ⌈(2ᵈ/q)x⌋ mod⁺ 2ᵈ
+ // = ⌊(2ᵈ/q)x+½⌋ mod⁺ 2ᵈ
+ // = ⌊((x << d) + q/2) / q⌋ mod⁺ 2ᵈ
+ // = DIV((x << d) + q/2, q) & ((1< 0) {
+ const out_shift = comptime 8 - todo;
+ out[out_off + j] |= @truncate(u8, (in[i] >> in_shift) << out_shift);
+
+ const done = comptime @min(@min(d, todo), d - in_shift);
+ todo -= done;
+ in_shift += done;
+
+ if (in_shift == d) {
+ in_shift = 0;
+ i += 1;
+ }
+ }
+ }
+
+ in_off += in_batch_size;
+ out_off += out_batch_size;
+ }
+
+ return out;
+ }
+
+ // Set p to Decompress_q(m, d).
+ fn decompress(comptime d: u8, in: *const [compressedSize(d)]u8) Poly {
+ @setEvalBranchQuota(10000);
+ const inLen = comptime @divTrunc(N * d, 8);
+ comptime assert(inLen * 8 == d * N);
+ var ret: Poly = undefined;
+ var in_off: usize = 0;
+ var out_off: usize = 0;
+
+ const batch_size: usize = comptime lcm(@as(i16, d), 8);
+ const in_batch_size: usize = comptime batch_size / 8;
+ const out_batch_size: usize = comptime batch_size / d;
+
+ while (out_off < N) {
+ comptime var in_shift: usize = 0;
+ comptime var j: usize = 0;
+ comptime var i: usize = 0;
+ inline while (i < out_batch_size) : (i += 1) {
+ // First, unpack next coefficient.
+ comptime var todo = d;
+ var out: u16 = 0;
+
+ inline while (todo > 0) {
+ const out_shift = comptime d - todo;
+ const m = comptime (1 << d) - 1;
+ out |= (@as(u16, in[in_off + j] >> in_shift) << out_shift) & m;
+
+ const done = comptime @min(@min(8, todo), 8 - in_shift);
+ todo -= done;
+ in_shift += done;
+
+ if (in_shift == 8) {
+ in_shift = 0;
+ j += 1;
+ }
+ }
+
+ // Decompress_q(x, d) = ⌈(q/2ᵈ)x⌋
+ // = ⌊(q/2ᵈ)x+½⌋
+ // = ⌊(qx + 2ᵈ⁻¹)/2ᵈ⌋
+ // = (qx + (1<<(d-1))) >> d
+ const qx = @as(u32, out) * @as(u32, Q);
+ ret.cs[out_off + i] = @intCast(i16, (qx + (1 << (d - 1))) >> d);
+ }
+
+ in_off += in_batch_size;
+ out_off += out_batch_size;
+ }
+
+ return ret;
+ }
+
+ // Returns the "pointwise" multiplication a o b.
+ //
+ // That is: invNTT(a o b) = invNTT(a) * invNTT(b). Assumes a and b are in
+ // Montgomery form. Products between coefficients of a and b must be strictly
+ // bounded in absolute value by 2¹⁵q. a o b will be in Montgomery form and
+ // bounded in absolute value by 2q.
+ fn mulHat(a: Poly, b: Poly) Poly {
+ // Recall from the discussion in ntt(), that a transformed polynomial is
+ // an element of ℤ_q[x]/(x²-ζ) x … x ℤ_q[x]/(x²+ζ¹²⁷);
+ // that is: 128 degree-one polynomials instead of simply 256 elements
+ // from ℤ_q as in the regular NTT. So instead of pointwise multiplication,
+ // we multiply the 128 pairs of degree-one polynomials modulo the
+ // right equation:
+ //
+ // (a₁ + a₂x)(b₁ + b₂x) = a₁b₁ + a₂b₂ζ' + (a₁b₂ + a₂b₁)x,
+ //
+ // where ζ' is the appropriate power of ζ.
+
+ var p: Poly = undefined;
+ var k: usize = 64;
+ var i: usize = 0;
+ while (i < N) : (i += 4) {
+ const z = @as(i32, zetas[k]);
+ k += 1;
+
+ const a1b1 = montReduce(@as(i32, a.cs[i + 1]) * @as(i32, b.cs[i + 1]));
+ const a0b0 = montReduce(@as(i32, a.cs[i]) * @as(i32, b.cs[i]));
+ const a1b0 = montReduce(@as(i32, a.cs[i + 1]) * @as(i32, b.cs[i]));
+ const a0b1 = montReduce(@as(i32, a.cs[i]) * @as(i32, b.cs[i + 1]));
+
+ p.cs[i] = montReduce(a1b1 * z) + a0b0;
+ p.cs[i + 1] = a0b1 + a1b0;
+
+ const a3b3 = montReduce(@as(i32, a.cs[i + 3]) * @as(i32, b.cs[i + 3]));
+ const a2b2 = montReduce(@as(i32, a.cs[i + 2]) * @as(i32, b.cs[i + 2]));
+ const a3b2 = montReduce(@as(i32, a.cs[i + 3]) * @as(i32, b.cs[i + 2]));
+ const a2b3 = montReduce(@as(i32, a.cs[i + 2]) * @as(i32, b.cs[i + 3]));
+
+ p.cs[i + 2] = a2b2 - montReduce(a3b3 * z);
+ p.cs[i + 3] = a2b3 + a3b2;
+ }
+
+ return p;
+ }
+
+ // Sample p from a centered binomial distribution with n=2η and p=½ - viz:
+ // coefficients are in {-η, …, η} with probabilities
+ //
+ // {ncr(0, 2η)/2^2η, ncr(1, 2η)/2^2η, …, ncr(2η,2η)/2^2η}
+ fn noise(comptime eta: u8, nonce: u8, seed: *const [32]u8) Poly {
+ var h = sha3.Shake256.init(.{});
+ const suffix: [1]u8 = .{nonce};
+ h.update(seed);
+ h.update(&suffix);
+
+ // The distribution at hand is exactly the same as that
+ // of (a₁ + a₂ + … + a_η) - (b₁ + … + b_η) where a_i,b_i~U(1).
+ // Thus we need 2η bits per coefficient.
+ const buf_len = comptime 2 * eta * N / 8;
+ var buf: [buf_len]u8 = undefined;
+ h.squeeze(&buf);
+
+ // buf is interpreted as a₁…a_ηb₁…b_ηa₁…a_ηb₁…b_η…. We process
+ // multiple coefficients in one batch.
+
+ const T = switch (builtin.target.cpu.arch) {
+ .x86_64, .x86 => u32, // Generates better code on Intel CPUs
+ else => u64, // u128 might be faster on some other CPUs.
+ };
+
+ comptime var batch_count: usize = undefined;
+ comptime var batch_bytes: usize = undefined;
+ comptime var mask: T = 0;
+ comptime {
+ batch_count = @bitSizeOf(T) / @as(usize, 2 * eta);
+ while (@rem(N, batch_count) != 0 and batch_count > 0) : (batch_count -= 1) {}
+ assert(batch_count > 0);
+ assert(@rem(2 * eta * batch_count, 8) == 0);
+ batch_bytes = 2 * eta * batch_count / 8;
+
+ for (0..2 * eta * batch_count) |_| {
+ mask <<= eta;
+ mask |= 1;
+ }
+ }
+
+ var ret: Poly = undefined;
+ for (0..comptime N / batch_count) |i| {
+ // Read coefficients into t. In the case of η=3,
+ // we have t = a₁ + 2a₂ + 4a₃ + 8b₁ + 16b₂ + …
+ var t: T = 0;
+ inline for (0..batch_bytes) |j| {
+ t |= @as(T, buf[batch_bytes * i + j]) << (8 * j);
+ }
+
+ // Accumelate `a's and `b's together by masking them out, shifting
+ // and adding. For η=3, we have d = a₁ + a₂ + a₃ + 8(b₁ + b₂ + b₃) + …
+ var d: T = 0;
+ inline for (0..eta) |j| {
+ d += (t >> j) & mask;
+ }
+
+ // Extract each a and b separately and set coefficient in polynomial.
+ inline for (0..batch_count) |j| {
+ const mask2 = comptime (1 << eta) - 1;
+ const a = @intCast(i16, (d >> (comptime (2 * j * eta))) & mask2);
+ const b = @intCast(i16, (d >> (comptime ((2 * j + 1) * eta))) & mask2);
+ ret.cs[batch_count * i + j] = a - b;
+ }
+ }
+
+ return ret;
+ }
+
+ // Sample p uniformly from the given seed and x and y coordinates.
+ fn uniform(seed: [32]u8, x: u8, y: u8) Poly {
+ var h = sha3.Shake128.init(.{});
+ const suffix: [2]u8 = .{ x, y };
+ h.update(&seed);
+ h.update(&suffix);
+
+ const buf_len = sha3.Shake128.block_length; // rate SHAKE-128
+ var buf: [buf_len]u8 = undefined;
+
+ var ret: Poly = undefined;
+ var i: usize = 0; // index into ret.cs
+ outer: while (true) {
+ h.squeeze(&buf);
+
+ var j: usize = 0; // index into buf
+ while (j < buf_len) : (j += 3) {
+ const b0 = @as(u16, buf[j]);
+ const b1 = @as(u16, buf[j + 1]);
+ const b2 = @as(u16, buf[j + 2]);
+
+ const ts: [2]u16 = .{
+ b0 | ((b1 & 0xf) << 8),
+ (b1 >> 4) | (b2 << 4),
+ };
+
+ inline for (ts) |t| {
+ if (t < Q) {
+ ret.cs[i] = @intCast(i16, t);
+ i += 1;
+
+ if (i == N) {
+ break :outer;
+ }
+ }
+ }
+ }
+ }
+
+ return ret;
+ }
+
+ // Packs p.
+ //
+ // Assumes p is normalized (and not just Barrett reduced).
+ fn toBytes(p: Poly) [bytes_length]u8 {
+ var ret: [bytes_length]u8 = undefined;
+ for (0..comptime N / 2) |i| {
+ const t0 = @intCast(u16, p.cs[2 * i]);
+ const t1 = @intCast(u16, p.cs[2 * i + 1]);
+ ret[3 * i] = @truncate(u8, t0);
+ ret[3 * i + 1] = @truncate(u8, (t0 >> 8) | (t1 << 4));
+ ret[3 * i + 2] = @truncate(u8, t1 >> 4);
+ }
+ return ret;
+ }
+
+ // Unpacks a Poly from buf.
+ //
+ // p will not be normalized; instead 0 ≤ p[i] < 4096.
+ fn fromBytes(buf: *const [bytes_length]u8) Poly {
+ var ret: Poly = undefined;
+ for (0..comptime N / 2) |i| {
+ const b0 = @as(i16, buf[3 * i]);
+ const b1 = @as(i16, buf[3 * i + 1]);
+ const b2 = @as(i16, buf[3 * i + 2]);
+ ret.cs[2 * i] = b0 | ((b1 & 0xf) << 8);
+ ret.cs[2 * i + 1] = (b1 >> 4) | b2 << 4;
+ }
+ return ret;
+ }
+};
+
+// A vector of K polynomials.
+fn Vec(comptime K: u8) type {
+ return struct {
+ ps: [K]Poly,
+
+ const Self = @This();
+ const bytes_length = K * Poly.bytes_length;
+
+ fn compressedSize(comptime d: u8) usize {
+ return Poly.compressedSize(d) * K;
+ }
+
+ fn ntt(a: Self) Self {
+ var ret: Self = undefined;
+ for (0..K) |i| {
+ ret.ps[i] = a.ps[i].ntt();
+ }
+ return ret;
+ }
+
+ fn invNTT(a: Self) Self {
+ var ret: Self = undefined;
+ for (0..K) |i| {
+ ret.ps[i] = a.ps[i].invNTT();
+ }
+ return ret;
+ }
+
+ fn normalize(a: Self) Self {
+ var ret: Self = undefined;
+ for (0..K) |i| {
+ ret.ps[i] = a.ps[i].normalize();
+ }
+ return ret;
+ }
+
+ fn barrettReduce(a: Self) Self {
+ var ret: Self = undefined;
+ for (0..K) |i| {
+ ret.ps[i] = a.ps[i].barrettReduce();
+ }
+ return ret;
+ }
+
+ fn add(a: Self, b: Self) Self {
+ var ret: Self = undefined;
+ for (0..K) |i| {
+ ret.ps[i] = a.ps[i].add(b.ps[i]);
+ }
+ return ret;
+ }
+
+ fn sub(a: Self, b: Self) Self {
+ var ret: Self = undefined;
+ for (0..K) |i| {
+ ret.ps[i] = a.ps[i].sub(b.ps[i]);
+ }
+ return ret;
+ }
+
+ // Samples v[i] from centered binomial distribution with the given η,
+ // seed and nonce+i.
+ fn noise(comptime eta: u8, nonce: u8, seed: *const [32]u8) Self {
+ var ret: Self = undefined;
+ for (0..K) |i| {
+ ret.ps[i] = Poly.noise(eta, nonce + @intCast(u8, i), seed);
+ }
+ return ret;
+ }
+
+ // Sets p to the inner product of a and b using "pointwise" multiplication.
+ //
+ // See MulHat() and NTT() for a description of the multiplication.
+ // Assumes a and b are in Montgomery form. p will be in Montgomery form,
+ // and its coefficients will be bounded in absolute value by 2kq.
+ // If a and b are not in Montgomery form, then the action is the same
+ // as "pointwise" multiplication followed by multiplying by R⁻¹, the inverse
+ // of the Montgomery factor.
+ fn dotHat(a: Self, b: Self) Poly {
+ var ret: Poly = Poly.zero;
+ for (0..K) |i| {
+ ret = ret.add(a.ps[i].mulHat(b.ps[i]));
+ }
+ return ret;
+ }
+
+ fn compress(v: Self, comptime d: u8) [compressedSize(d)]u8 {
+ const cs = comptime Poly.compressedSize(d);
+ var ret: [compressedSize(d)]u8 = undefined;
+ inline for (0..K) |i| {
+ mem.copy(u8, ret[i * cs .. (i + 1) * cs], &v.ps[i].compress(d));
+ }
+ return ret;
+ }
+
+ fn decompress(comptime d: u8, buf: *const [compressedSize(d)]u8) Self {
+ const cs = comptime Poly.compressedSize(d);
+ var ret: Self = undefined;
+ inline for (0..K) |i| {
+ ret.ps[i] = Poly.decompress(d, buf[i * cs .. (i + 1) * cs]);
+ }
+ return ret;
+ }
+
+ /// Serializes the key into a byte array.
+ fn toBytes(v: Self) [bytes_length]u8 {
+ var ret: [bytes_length]u8 = undefined;
+ inline for (0..K) |i| {
+ mem.copy(
+ u8,
+ ret[i * Poly.bytes_length .. (i + 1) * Poly.bytes_length],
+ &v.ps[i].toBytes(),
+ );
+ }
+ return ret;
+ }
+
+ /// Deserializes the key from a byte array.
+ fn fromBytes(buf: *const [bytes_length]u8) Self {
+ var ret: Self = undefined;
+ inline for (0..K) |i| {
+ ret.ps[i] = Poly.fromBytes(
+ buf[i * Poly.bytes_length .. (i + 1) * Poly.bytes_length],
+ );
+ }
+ return ret;
+ }
+ };
+}
+
+// A matrix of K vectors
+fn Mat(comptime K: u8) type {
+ return struct {
+ const Self = @This();
+ vs: [K]Vec(K),
+
+ fn uniform(seed: [32]u8, comptime transposed: bool) Self {
+ var ret: Self = undefined;
+ var i: u8 = 0;
+ while (i < K) : (i += 1) {
+ var j: u8 = 0;
+ while (j < K) : (j += 1) {
+ ret.vs[i].ps[j] = Poly.uniform(
+ seed,
+ if (transposed) i else j,
+ if (transposed) j else i,
+ );
+ }
+ }
+ return ret;
+ }
+
+ // Returns transpose of A
+ fn transpose(m: Self) Self {
+ var ret: Self = undefined;
+ for (0..K) |i| {
+ for (0..K) |j| {
+ ret.vs[i].ps[j] = m.vs[j].ps[i];
+ }
+ }
+ return ret;
+ }
+ };
+}
+
+// Returns `true` if a ≠ b.
+fn ctneq(comptime len: usize, a: [len]u8, b: [len]u8) u1 {
+ return 1 - @boolToInt(crypto.utils.timingSafeEql([len]u8, a, b));
+}
+
+// Copy src into dst given b = 1.
+fn cmov(comptime len: usize, dst: *[len]u8, src: [len]u8, b: u1) void {
+ const mask = @as(u8, 0) -% b;
+ for (0..len) |i| {
+ dst[i] ^= mask & (dst[i] ^ src[i]);
+ }
+}
+
+test "MulHat" {
+ var rnd = RndGen.init(0);
+
+ for (0..100) |_| {
+ const a = Poly.randAbsLeqQ(&rnd);
+ const b = Poly.randAbsLeqQ(&rnd);
+
+ const p2 = a.ntt().mulHat(b.ntt()).barrettReduce().invNTT().normalize();
+ var p: Poly = undefined;
+
+ mem.set(i16, &p.cs, 0);
+
+ for (0..N) |i| {
+ for (0..N) |j| {
+ var v = montReduce(@as(i32, a.cs[i]) * @as(i32, b.cs[j]));
+ var k = i + j;
+ if (k >= N) {
+ // Recall Xᴺ = -1.
+ k -= N;
+ v = -v;
+ }
+ p.cs[k] = feBarrettReduce(v + p.cs[k]);
+ }
+ }
+
+ p = p.toMont().normalize();
+
+ try testing.expectEqual(p, p2);
+ }
+}
+
+test "NTT" {
+ var rnd = RndGen.init(0);
+
+ for (0..1000) |_| {
+ var p = Poly.randAbsLeqQ(&rnd);
+ const q = p.toMont().normalize();
+ p = p.ntt();
+
+ for (0..N) |i| {
+ try testing.expect(p.cs[i] <= 7 * Q and -7 * Q <= p.cs[i]);
+ }
+
+ p = p.normalize().invNTT();
+ for (0..N) |i| {
+ try testing.expect(p.cs[i] <= Q and -Q <= p.cs[i]);
+ }
+
+ p = p.normalize();
+
+ try testing.expectEqual(p, q);
+ }
+}
+
+test "Compression" {
+ var rnd = RndGen.init(0);
+ inline for (.{ 1, 4, 5, 10, 11 }) |d| {
+ for (0..1000) |_| {
+ const p = Poly.randNormalized(&rnd);
+ const pp = p.compress(d);
+ const pq = Poly.decompress(d, &pp).compress(d);
+ try testing.expectEqual(pp, pq);
+ }
+ }
+}
+
+test "noise" {
+ var seed: [32]u8 = undefined;
+ for (&seed, 0..) |*s, i| {
+ s.* = @intCast(u8, i);
+ }
+ try testing.expectEqual(Poly.noise(3, 37, &seed).cs, .{
+ 0, 0, 1, -1, 0, 2, 0, -1, -1, 3, 0, 1, -2, -2, 0, 1, -2,
+ 1, 0, -2, 3, 0, 0, 0, 1, 3, 1, 1, 2, 1, -1, -1, -1, 0,
+ 1, 0, 1, 0, 2, 0, 1, -2, 0, -1, -1, -2, 1, -1, -1, 2, -1,
+ 1, 1, 2, -3, -1, -1, 0, 0, 0, 0, 1, -1, -2, -2, 0, -2, 0,
+ 0, 0, 1, 0, -1, -1, 1, -2, 2, 0, 0, 2, -2, 0, 1, 0, 1,
+ 1, 1, 0, 1, -2, -1, -2, -1, 1, 0, 0, 0, 0, 0, 1, 0, -1,
+ -1, 0, -1, 1, 0, 1, 0, -1, -1, 0, -2, 2, 0, -2, 1, -1, 0,
+ 1, -1, -1, 2, 1, 0, 0, -2, -1, 2, 0, 0, 0, -1, -1, 3, 1,
+ 0, 1, 0, 1, 0, 2, 1, 0, 0, 1, 0, 1, 0, 0, -1, -1, -1,
+ 0, 1, 3, 1, 0, 1, 0, 1, -1, -1, -1, -1, 0, 0, -2, -1, -1,
+ 2, 0, 1, 0, 1, 0, 2, -2, 0, 1, 1, -3, -1, -2, -1, 0, 1,
+ 0, 1, -2, 2, 2, 1, 1, 0, -1, 0, -1, -1, 1, 0, -1, 2, 1,
+ -1, 1, 2, -2, 1, 2, 0, 1, 2, 1, 0, 0, 2, 1, 2, 1, 0,
+ 2, 1, 0, 0, -1, -1, 1, -1, 0, 1, -1, 2, 2, 0, 0, -1, 1,
+ 1, 1, 1, 0, 0, -2, 0, -1, 1, 2, 0, 0, 1, 1, -1, 1, 0,
+ 1,
+ });
+ try testing.expectEqual(Poly.noise(2, 37, &seed).cs, .{
+ 1, 0, 1, -1, -1, -2, -1, -1, 2, 0, -1, 0, 0, -1,
+ 1, 1, -1, 1, 0, 2, -2, 0, 1, 2, 0, 0, -1, 1,
+ 0, -1, 1, -1, 1, 2, 1, 1, 0, -1, 1, -1, -2, -1,
+ 1, -1, -1, -1, 2, -1, -1, 0, 0, 1, 1, -1, 1, 1,
+ 1, 1, -1, -2, 0, 1, 0, 0, 2, 1, -1, 2, 0, 0,
+ 1, 1, 0, -1, 0, 0, -1, -1, 2, 0, 1, -1, 2, -1,
+ -1, -1, -1, 0, -2, 0, 2, 1, 0, 0, 0, -1, 0, 0,
+ 0, -1, -1, 0, -1, -1, 0, -1, 0, 0, -2, 1, 1, 0,
+ 1, 0, 1, 0, 1, 1, -1, 2, 0, 1, -1, 1, 2, 0,
+ 0, 0, 0, -1, -1, -1, 0, 1, 0, -1, 2, 0, 0, 1,
+ 1, 1, 0, 1, -1, 1, 2, 1, 0, 2, -1, 1, -1, -2,
+ -1, -2, -1, 1, 0, -2, -2, -1, 1, 0, 0, 0, 0, 1,
+ 0, 0, 0, 2, 2, 0, 1, 0, -1, -1, 0, 2, 0, 0,
+ -2, 1, 0, 2, 1, -1, -2, 0, 0, -1, 1, 1, 0, 0,
+ 2, 0, 1, 1, -2, 1, -2, 1, 1, 0, 2, 0, -1, 0,
+ -1, 0, 1, 2, 0, 1, 0, -2, 1, -2, -2, 1, -1, 0,
+ -1, 1, 1, 0, 0, 0, 1, 0, -1, 1, 1, 0, 0, 0,
+ 0, 1, 0, 1, -1, 0, 1, -1, -1, 2, 0, 0, 1, -1,
+ 0, 1, -1, 0,
+ });
+}
+
+test "uniform sampling" {
+ var seed: [32]u8 = undefined;
+ for (&seed, 0..) |*s, i| {
+ s.* = @intCast(u8, i);
+ }
+ try testing.expectEqual(Poly.uniform(seed, 1, 0).cs, .{
+ 797, 993, 161, 6, 2608, 2385, 2096, 2661, 1676, 247, 2440,
+ 342, 634, 194, 1570, 2848, 986, 684, 3148, 3208, 2018, 351,
+ 2288, 612, 1394, 170, 1521, 3119, 58, 596, 2093, 1549, 409,
+ 2156, 1934, 1730, 1324, 388, 446, 418, 1719, 2202, 1812, 98,
+ 1019, 2369, 214, 2699, 28, 1523, 2824, 273, 402, 2899, 246,
+ 210, 1288, 863, 2708, 177, 3076, 349, 44, 949, 854, 1371,
+ 957, 292, 2502, 1617, 1501, 254, 7, 1761, 2581, 2206, 2655,
+ 1211, 629, 1274, 2358, 816, 2766, 2115, 2985, 1006, 2433, 856,
+ 2596, 3192, 1, 1378, 2345, 707, 1891, 1669, 536, 1221, 710,
+ 2511, 120, 1176, 322, 1897, 2309, 595, 2950, 1171, 801, 1848,
+ 695, 2912, 1396, 1931, 1775, 2904, 893, 2507, 1810, 2873, 253,
+ 1529, 1047, 2615, 1687, 831, 1414, 965, 3169, 1887, 753, 3246,
+ 1937, 115, 2953, 586, 545, 1621, 1667, 3187, 1654, 1988, 1857,
+ 512, 1239, 1219, 898, 3106, 391, 1331, 2228, 3169, 586, 2412,
+ 845, 768, 156, 662, 478, 1693, 2632, 573, 2434, 1671, 173,
+ 969, 364, 1663, 2701, 2169, 813, 1000, 1471, 720, 2431, 2530,
+ 3161, 733, 1691, 527, 2634, 335, 26, 2377, 1707, 767, 3020,
+ 950, 502, 426, 1138, 3208, 2607, 2389, 44, 1358, 1392, 2334,
+ 875, 2097, 173, 1697, 2578, 942, 1817, 974, 1165, 2853, 1958,
+ 2973, 3282, 271, 1236, 1677, 2230, 673, 1554, 96, 242, 1729,
+ 2518, 1884, 2272, 71, 1382, 924, 1807, 1610, 456, 1148, 2479,
+ 2152, 238, 2208, 2329, 713, 1175, 1196, 757, 1078, 3190, 3169,
+ 708, 3117, 154, 1751, 3225, 1364, 154, 23, 2842, 1105, 1419,
+ 79, 5, 2013,
+ });
+}
+
+test "Polynomial packing" {
+ var rnd = RndGen.init(0);
+
+ for (0..1000) |_| {
+ const p = Poly.randNormalized(&rnd);
+ try testing.expectEqual(Poly.fromBytes(&p.toBytes()), p);
+ }
+}
+
+test "Test inner PKE" {
+ var seed: [32]u8 = undefined;
+ var pt: [32]u8 = undefined;
+ for (&seed, &pt, 0..) |*s, *p, i| {
+ s.* = @intCast(u8, i);
+ p.* = @intCast(u8, i + 32);
+ }
+ inline for (modes) |mode| {
+ for (0..100) |i| {
+ var pk: mode.InnerPk = undefined;
+ var sk: mode.InnerSk = undefined;
+ seed[0] = @intCast(u8, i);
+ mode.innerKeyFromSeed(seed, &pk, &sk);
+ for (0..10) |j| {
+ seed[1] = @intCast(u8, j);
+ try testing.expectEqual(sk.decrypt(&pk.encrypt(&pt, &seed)), pt);
+ }
+ }
+ }
+}
+
+test "Test happy flow" {
+ var seed: [64]u8 = undefined;
+ for (&seed, 0..) |*s, i| {
+ s.* = @intCast(u8, i);
+ }
+ inline for (modes) |mode| {
+ for (0..100) |i| {
+ seed[0] = @intCast(u8, i);
+ const kp = try mode.KeyPair.create(seed);
+ const sk = try mode.SecretKey.fromBytes(&kp.secret_key.toBytes());
+ try testing.expectEqual(sk, kp.secret_key);
+ const pk = try mode.PublicKey.fromBytes(&kp.public_key.toBytes());
+ try testing.expectEqual(pk, kp.public_key);
+ for (0..10) |j| {
+ seed[1] = @intCast(u8, j);
+ const e = pk.encaps(seed[0..32].*);
+ try testing.expectEqual(e.shared_secret, try sk.decaps(&e.ciphertext));
+ }
+ }
+ }
+}
+
+// Code to test NIST Known Answer Tests (KAT), see PQCgenKAT.c.
+
+const sha2 = crypto.hash.sha2;
+
+test "NIST KAT test" {
+ inline for (.{
+ .{ Kyber512, "e9c2bd37133fcb40772f81559f14b1f58dccd1c816701be9ba6214d43baf4547" },
+ .{ Kyber1024, "89248f2f33f7f4f7051729111f3049c409a933ec904aedadf035f30fa5646cd5" },
+ .{ Kyber768, "a1e122cad3c24bc51622e4c242d8b8acbcd3f618fee4220400605ca8f9ea02c2" },
+ }) |modeHash| {
+ const mode = modeHash[0];
+ var seed: [48]u8 = undefined;
+ for (&seed, 0..) |*s, i| {
+ s.* = @intCast(u8, i);
+ }
+ var f = sha2.Sha256.init(.{});
+ const fw = f.writer();
+ var g = NistDRBG.init(seed);
+ try std.fmt.format(fw, "# {s}\n\n", .{mode.name});
+ for (0..100) |i| {
+ g.fill(&seed);
+ try std.fmt.format(fw, "count = {}\n", .{i});
+ try std.fmt.format(fw, "seed = {s}\n", .{std.fmt.fmtSliceHexUpper(&seed)});
+ var g2 = NistDRBG.init(seed);
+
+ // This is not equivalent to g2.fill(kseed[:]). As the reference
+ // implementation calls randombytes twice generating the keypair,
+ // we have to do that as well.
+ var kseed: [64]u8 = undefined;
+ var eseed: [32]u8 = undefined;
+ g2.fill(kseed[0..32]);
+ g2.fill(kseed[32..64]);
+ g2.fill(&eseed);
+ const kp = try mode.KeyPair.create(kseed);
+ const e = kp.public_key.encaps(eseed);
+ const ss2 = try kp.secret_key.decaps(&e.ciphertext);
+ try testing.expectEqual(ss2, e.shared_secret);
+ try std.fmt.format(fw, "pk = {s}\n", .{std.fmt.fmtSliceHexUpper(&kp.public_key.toBytes())});
+ try std.fmt.format(fw, "sk = {s}\n", .{std.fmt.fmtSliceHexUpper(&kp.secret_key.toBytes())});
+ try std.fmt.format(fw, "ct = {s}\n", .{std.fmt.fmtSliceHexUpper(&e.ciphertext)});
+ try std.fmt.format(fw, "ss = {s}\n\n", .{std.fmt.fmtSliceHexUpper(&e.shared_secret)});
+ }
+
+ var out: [32]u8 = undefined;
+ f.final(&out);
+ var outHex: [64]u8 = undefined;
+ _ = try std.fmt.bufPrint(&outHex, "{s}", .{std.fmt.fmtSliceHexLower(&out)});
+ try testing.expectEqual(outHex, modeHash[1].*);
+ }
+}
+
+const NistDRBG = struct {
+ key: [32]u8,
+ v: [16]u8,
+
+ fn incV(g: *NistDRBG) void {
+ var j: usize = 15;
+ while (j >= 0) : (j -= 1) {
+ if (g.v[j] == 255) {
+ g.v[j] = 0;
+ } else {
+ g.v[j] += 1;
+ break;
+ }
+ }
+ }
+
+ // AES256_CTR_DRBG_Update(pd, &g.key, &g.v).
+ fn update(g: *NistDRBG, pd: ?[48]u8) void {
+ var buf: [48]u8 = undefined;
+ const ctx = crypto.core.aes.Aes256.initEnc(g.key);
+ var i: usize = 0;
+ while (i < 3) : (i += 1) {
+ g.incV();
+ var block: [16]u8 = undefined;
+ ctx.encrypt(&block, &g.v);
+ mem.copy(u8, buf[i * 16 .. (i + 1) * 16], &block);
+ }
+ if (pd) |p| {
+ for (&buf, p) |*b, x| {
+ b.* ^= x;
+ }
+ }
+ mem.copy(u8, &g.key, buf[0..32]);
+ mem.copy(u8, &g.v, buf[32..48]);
+ }
+
+ // randombytes.
+ fn fill(g: *NistDRBG, out: []u8) void {
+ var block: [16]u8 = undefined;
+ var dst = out;
+
+ const ctx = crypto.core.aes.Aes256.initEnc(g.key);
+ while (dst.len > 0) {
+ g.incV();
+ ctx.encrypt(&block, &g.v);
+ if (dst.len < 16) {
+ mem.copy(u8, dst, block[0..dst.len]);
+ break;
+ }
+ mem.copy(u8, dst, &block);
+ dst = dst[16..dst.len];
+ }
+ g.update(null);
+ }
+
+ fn init(seed: [48]u8) NistDRBG {
+ var ret: NistDRBG = .{ .key = .{0} ** 32, .v = .{0} ** 16 };
+ ret.update(seed);
+ return ret;
+ }
+};
diff --git a/lib/std/crypto/sha3.zig b/lib/std/crypto/sha3.zig
index 985027f3ee..1f48f87c53 100644
--- a/lib/std/crypto/sha3.zig
+++ b/lib/std/crypto/sha3.zig
@@ -18,6 +18,20 @@ pub const Keccak_512 = @compileError("Deprecated: use `Keccak512` instead");
pub const Shake128 = Shake(128);
pub const Shake256 = Shake(256);
+/// TurboSHAKE128 is a XOF (a secure hash function with a variable output length), with a 128 bit security level.
+/// It is based on the same permutation as SHA3 and SHAKE128, but which much higher performance.
+/// The delimiter is 0x1f by default, but can be changed for context-separation.
+pub fn TurboShake128(comptime delim: ?u8) type {
+ return TurboShake(128, delim);
+}
+
+/// TurboSHAKE256 is a XOF (a secure hash function with a variable output length), with a 256 bit security level.
+/// It is based on the same permutation as SHA3 and SHAKE256, but which much higher performance.
+/// The delimiter is 0x01 by default, but can be changed for context-separation.
+pub fn TurboShake256(comptime delim: ?u8) type {
+ return TurboShake(256, delim);
+}
+
/// A generic Keccak hash function.
pub fn Keccak(comptime f: u11, comptime output_bits: u11, comptime delim: u8, comptime rounds: u5) type {
comptime assert(output_bits > 0 and output_bits * 2 < f and output_bits % 8 == 0); // invalid output length
@@ -76,9 +90,18 @@ pub fn Keccak(comptime f: u11, comptime output_bits: u11, comptime delim: u8, co
/// The SHAKE extendable output hash function.
pub fn Shake(comptime security_level: u11) type {
+ return ShakeLike(security_level, 0x1f, 24);
+}
+
+/// The TurboSHAKE extendable output hash function.
+/// https://datatracker.ietf.org/doc/draft-irtf-cfrg-kangarootwelve/
+pub fn TurboShake(comptime security_level: u11, comptime delim: ?u8) type {
+ return ShakeLike(security_level, delim orelse 0x1f, 12);
+}
+
+fn ShakeLike(comptime security_level: u11, comptime delim: u8, comptime rounds: u5) type {
const f = 1600;
- const rounds = 24;
- const State = KeccakState(f, security_level * 2, 0x1f, rounds);
+ const State = KeccakState(f, security_level * 2, delim, rounds);
return struct {
const Self = @This();
@@ -348,3 +371,23 @@ test "SHAKE-256 single" {
Shake256.hash("hello123", &out, .{});
try htest.assertEqual("ade612ba265f92de4a37", &out);
}
+
+test "TurboSHAKE-128" {
+ var out: [32]u8 = undefined;
+ TurboShake(128, 0x06).hash("\xff", &out, .{});
+ try htest.assertEqual("8ec9c66465ed0d4a6c35d13506718d687a25cb05c74cca1e42501abd83874a67", &out);
+}
+
+test "SHA-3 with streaming" {
+ var msg: [613]u8 = [613]u8{ 0x97, 0xd1, 0x2d, 0x1a, 0x16, 0x2d, 0x36, 0x4d, 0x20, 0x62, 0x19, 0x0b, 0x14, 0x93, 0xbb, 0xf8, 0x5b, 0xea, 0x04, 0xc2, 0x61, 0x8e, 0xd6, 0x08, 0x81, 0xa1, 0x1d, 0x73, 0x27, 0x48, 0xbf, 0xa4, 0xba, 0xb1, 0x9a, 0x48, 0x9c, 0xf9, 0x9b, 0xff, 0x34, 0x48, 0xa9, 0x75, 0xea, 0xc8, 0xa3, 0x48, 0x24, 0x9d, 0x75, 0x27, 0x48, 0xec, 0x03, 0xb0, 0xbb, 0xdf, 0x33, 0x90, 0xe3, 0x93, 0xed, 0x68, 0x24, 0x39, 0x12, 0xdf, 0xea, 0xee, 0x8c, 0x9f, 0x96, 0xde, 0x42, 0x46, 0x8c, 0x2b, 0x17, 0x83, 0x36, 0xfb, 0xf4, 0xf7, 0xff, 0x79, 0xb9, 0x45, 0x41, 0xc9, 0x56, 0x1a, 0x6b, 0x0c, 0xa4, 0x1a, 0xdd, 0x6b, 0x95, 0xe8, 0x03, 0x0f, 0x09, 0x29, 0x40, 0x1b, 0xea, 0x87, 0xfa, 0xb9, 0x18, 0xa9, 0x95, 0x07, 0x7c, 0x2f, 0x7c, 0x33, 0xfb, 0xc5, 0x11, 0x5e, 0x81, 0x0e, 0xbc, 0xae, 0xec, 0xb3, 0xe1, 0x4a, 0x26, 0x56, 0xe8, 0x5b, 0x11, 0x9d, 0x37, 0x06, 0x9b, 0x34, 0x31, 0x6e, 0xa3, 0xba, 0x41, 0xbc, 0x11, 0xd8, 0xc5, 0x15, 0xc9, 0x30, 0x2c, 0x9b, 0xb6, 0x71, 0xd8, 0x7c, 0xbc, 0x38, 0x2f, 0xd5, 0xbd, 0x30, 0x96, 0xd4, 0xa3, 0x00, 0x77, 0x9d, 0x55, 0x4a, 0x33, 0x53, 0xb6, 0xb3, 0x35, 0x1b, 0xae, 0xe5, 0xdc, 0x22, 0x23, 0x85, 0x95, 0x88, 0xf9, 0x3b, 0xbf, 0x74, 0x13, 0xaa, 0xcb, 0x0a, 0x60, 0x79, 0x13, 0x79, 0xc0, 0x4a, 0x02, 0xdb, 0x1c, 0xc9, 0xff, 0x60, 0x57, 0x9a, 0x70, 0x28, 0x58, 0x60, 0xbc, 0x57, 0x07, 0xc7, 0x47, 0x1a, 0x45, 0x71, 0x76, 0x94, 0xfb, 0x05, 0xad, 0xec, 0x12, 0x29, 0x5a, 0x44, 0x6a, 0x81, 0xd9, 0xc6, 0xf0, 0xb6, 0x9b, 0x97, 0x83, 0x69, 0xfb, 0xdc, 0x0d, 0x4a, 0x67, 0xbc, 0x72, 0xf5, 0x43, 0x5e, 0x9b, 0x13, 0xf2, 0xe4, 0x6d, 0x49, 0xdb, 0x76, 0xcb, 0x42, 0x6a, 0x3c, 0x9f, 0xa1, 0xfe, 0x5e, 0xca, 0x0a, 0xfc, 0xfa, 0x39, 0x27, 0xd1, 0x3c, 0xcb, 0x9a, 0xde, 0x4c, 0x6b, 0x09, 0x8b, 0x49, 0xfd, 0x1e, 0x3d, 0x5e, 0x67, 0x7c, 0x57, 0xad, 0x90, 0xcc, 0x46, 0x5f, 0x5c, 0xae, 0x6a, 0x9c, 0xb2, 0xcd, 0x2c, 0x89, 0x78, 0xcf, 0xf1, 0x49, 0x96, 0x55, 0x1e, 0x04, 0xef, 0x0e, 0x1c, 0xde, 0x6c, 0x96, 0x51, 0x00, 0xee, 0x9a, 0x1f, 0x8d, 0x61, 0xbc, 0xeb, 0xb1, 0xa6, 0xa5, 0x21, 0x8b, 0xa7, 0xf8, 0x25, 0x41, 0x48, 0x62, 0x5b, 0x01, 0x6c, 0x7c, 0x2a, 0xe8, 0xff, 0xf9, 0xf9, 0x1f, 0xe2, 0x79, 0x2e, 0xd1, 0xff, 0xa3, 0x2e, 0x1c, 0x3a, 0x1a, 0x5d, 0x2b, 0x7b, 0x87, 0x25, 0x22, 0xa4, 0x90, 0xea, 0x26, 0x9d, 0xdd, 0x13, 0x60, 0x4c, 0x10, 0x03, 0xf6, 0x99, 0xd3, 0x21, 0x0c, 0x69, 0xc6, 0xd8, 0xc8, 0x9e, 0x94, 0x89, 0x51, 0x21, 0xe3, 0x9a, 0xcd, 0xda, 0x54, 0x72, 0x64, 0xae, 0x94, 0x79, 0x36, 0x81, 0x44, 0x14, 0x6d, 0x3a, 0x0e, 0xa6, 0x30, 0xbf, 0x95, 0x99, 0xa6, 0xf5, 0x7f, 0x4f, 0xef, 0xc6, 0x71, 0x2f, 0x36, 0x13, 0x14, 0xa2, 0x9d, 0xc2, 0x0c, 0x0d, 0x4e, 0xc0, 0x02, 0xd3, 0x6f, 0xee, 0x98, 0x5e, 0x24, 0x31, 0x74, 0x11, 0x96, 0x6e, 0x43, 0x57, 0xe8, 0x8e, 0xa0, 0x8d, 0x3d, 0x79, 0x38, 0x20, 0xc2, 0x0f, 0xb4, 0x75, 0x99, 0x3b, 0xb1, 0xf0, 0xe8, 0xe1, 0xda, 0xf9, 0xd4, 0xe6, 0xd6, 0xf4, 0x8a, 0x32, 0x4a, 0x4a, 0x25, 0xa8, 0xd9, 0x60, 0xd6, 0x33, 0x31, 0x97, 0xb9, 0xb6, 0xed, 0x5f, 0xfc, 0x15, 0xbd, 0x13, 0xc0, 0x3a, 0x3f, 0x1f, 0x2d, 0x09, 0x1d, 0xeb, 0x69, 0x6a, 0xfe, 0xd7, 0x95, 0x3e, 0x8a, 0x4e, 0xe1, 0x6e, 0x61, 0xb2, 0x6c, 0xe3, 0x2b, 0x70, 0x60, 0x7e, 0x8c, 0xe4, 0xdd, 0x27, 0x30, 0x7e, 0x0d, 0xc7, 0xb7, 0x9a, 0x1a, 0x3c, 0xcc, 0xa7, 0x22, 0x77, 0x14, 0x05, 0x50, 0x57, 0x31, 0x1b, 0xc8, 0xbf, 0xce, 0x52, 0xaf, 0x9c, 0x8e, 0x10, 0x2e, 0xd2, 0x16, 0xb6, 0x6e, 0x43, 0x10, 0xaf, 0x8b, 0xde, 0x1d, 0x60, 0xb2, 0x7d, 0xe6, 0x2f, 0x08, 0x10, 0x12, 0x7e, 0xb4, 0x76, 0x45, 0xb6, 0xd8, 0x9b, 0x26, 0x40, 0xa1, 0x63, 0x5c, 0x7a, 0x2a, 0xb1, 0x8c, 0xd6, 0xa4, 0x6f, 0x5a, 0xae, 0x33, 0x7e, 0x6d, 0x71, 0xf5, 0xc8, 0x6d, 0x80, 0x1c, 0x35, 0xfc, 0x3f, 0xc1, 0xa6, 0xc6, 0x1a, 0x15, 0x04, 0x6d, 0x76, 0x38, 0x32, 0x95, 0xb2, 0x51, 0x1a, 0xe9, 0x3e, 0x89, 0x9f, 0x0c, 0x79 };
+ var out: [Sha3_256.digest_length]u8 = undefined;
+
+ Sha3_256.hash(&msg, &out, .{});
+ try htest.assertEqual("5780048dfa381a1d01c747906e4a08711dd34fd712ecd7c6801dd2b38fd81a89", &out);
+
+ var h = Sha3_256.init(.{});
+ h.update(msg[0..64]);
+ h.update(msg[64..613]);
+ h.final(&out);
+ try htest.assertEqual("5780048dfa381a1d01c747906e4a08711dd34fd712ecd7c6801dd2b38fd81a89", &out);
+}
diff --git a/lib/std/crypto/tls/Client.zig b/lib/std/crypto/tls/Client.zig
index 627ad7ea59..bc59459ff9 100644
--- a/lib/std/crypto/tls/Client.zig
+++ b/lib/std/crypto/tls/Client.zig
@@ -88,11 +88,59 @@ pub const StreamInterface = struct {
}
};
+pub fn InitError(comptime Stream: type) type {
+ return std.mem.Allocator.Error || Stream.WriteError || Stream.ReadError || error{
+ InsufficientEntropy,
+ DiskQuota,
+ LockViolation,
+ NotOpenForWriting,
+ TlsAlert,
+ TlsUnexpectedMessage,
+ TlsIllegalParameter,
+ TlsDecryptFailure,
+ TlsRecordOverflow,
+ TlsBadRecordMac,
+ CertificateFieldHasInvalidLength,
+ CertificateHostMismatch,
+ CertificatePublicKeyInvalid,
+ CertificateExpired,
+ CertificateFieldHasWrongDataType,
+ CertificateIssuerMismatch,
+ CertificateNotYetValid,
+ CertificateSignatureAlgorithmMismatch,
+ CertificateSignatureAlgorithmUnsupported,
+ CertificateSignatureInvalid,
+ CertificateSignatureInvalidLength,
+ CertificateSignatureNamedCurveUnsupported,
+ CertificateSignatureUnsupportedBitCount,
+ TlsCertificateNotVerified,
+ TlsBadSignatureScheme,
+ TlsBadRsaSignatureBitCount,
+ InvalidEncoding,
+ IdentityElement,
+ SignatureVerificationFailed,
+ TlsDecryptError,
+ TlsConnectionTruncated,
+ TlsDecodeError,
+ UnsupportedCertificateVersion,
+ CertificateTimeInvalid,
+ CertificateHasUnrecognizedObjectId,
+ CertificateHasInvalidBitString,
+ MessageTooLong,
+ NegativeIntoUnsigned,
+ TargetTooSmall,
+ BufferTooSmall,
+ InvalidSignature,
+ NotSquare,
+ NonCanonical,
+ };
+}
+
/// Initiates a TLS handshake and establishes a TLSv1.3 session with `stream`, which
/// must conform to `StreamInterface`.
///
/// `host` is only borrowed during this function call.
-pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) !Client {
+pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) InitError(@TypeOf(stream))!Client {
const host_len = @intCast(u16, host.len);
var random_buffer: [128]u8 = undefined;
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index 97acf81af6..6abceed9b8 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -635,6 +635,7 @@ pub const TTY = struct {
pub const Color = enum {
Red,
Green,
+ Yellow,
Cyan,
White,
Dim,
@@ -659,6 +660,7 @@ pub const TTY = struct {
const color_string = switch (color) {
.Red => "\x1b[31;1m",
.Green => "\x1b[32;1m",
+ .Yellow => "\x1b[33;1m",
.Cyan => "\x1b[36;1m",
.White => "\x1b[37;1m",
.Bold => "\x1b[1m",
@@ -671,6 +673,7 @@ pub const TTY = struct {
const attributes = switch (color) {
.Red => windows.FOREGROUND_RED | windows.FOREGROUND_INTENSITY,
.Green => windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY,
+ .Yellow => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY,
.Cyan => windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY,
.White, .Bold => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY,
.Dim => windows.FOREGROUND_INTENSITY,
@@ -682,6 +685,36 @@ pub const TTY = struct {
},
};
}
+
+ pub fn writeDEC(conf: Config, writer: anytype, codepoint: u8) !void {
+ const bytes = switch (conf) {
+ .no_color, .windows_api => switch (codepoint) {
+ 0x50...0x5e => @as(*const [1]u8, &codepoint),
+ 0x6a => "+", // ┘
+ 0x6b => "+", // ┐
+ 0x6c => "+", // ┌
+ 0x6d => "+", // └
+ 0x6e => "+", // ┼
+ 0x71 => "-", // ─
+ 0x74 => "+", // ├
+ 0x75 => "+", // ┤
+ 0x76 => "+", // ┴
+ 0x77 => "+", // ┬
+ 0x78 => "|", // │
+ else => " ", // TODO
+ },
+ .escape_codes => switch (codepoint) {
+ // Here we avoid writing the DEC beginning sequence and
+ // ending sequence in separate syscalls by putting the
+ // beginning and ending sequence into the same string
+ // literals, to prevent terminals ending up in bad states
+ // in case a crash happens between syscalls.
+ inline 0x50...0x7f => |x| "\x1B\x28\x30" ++ [1]u8{x} ++ "\x1B\x28\x42",
+ else => unreachable,
+ },
+ };
+ return writer.writeAll(bytes);
+ }
};
};
diff --git a/lib/std/fifo.zig b/lib/std/fifo.zig
index b7c8f761d3..eddbff5af0 100644
--- a/lib/std/fifo.zig
+++ b/lib/std/fifo.zig
@@ -164,6 +164,17 @@ pub fn LinearFifo(
return self.readableSliceMut(offset);
}
+ pub fn readableSliceOfLen(self: *Self, len: usize) []const T {
+ assert(len <= self.count);
+ const buf = self.readableSlice(0);
+ if (buf.len >= len) {
+ return buf[0..len];
+ } else {
+ self.realign();
+ return self.readableSlice(0)[0..len];
+ }
+ }
+
/// Discard first `count` items in the fifo
pub fn discard(self: *Self, count: usize) void {
assert(count <= self.count);
@@ -383,6 +394,22 @@ pub fn LinearFifo(
self.discard(try dest_writer.write(self.readableSlice(0)));
}
}
+
+ pub fn toOwnedSlice(self: *Self) Allocator.Error![]T {
+ if (self.head != 0) self.realign();
+ assert(self.head == 0);
+ assert(self.count <= self.buf.len);
+ const allocator = self.allocator;
+ if (allocator.resize(self.buf, self.count)) {
+ const result = self.buf[0..self.count];
+ self.* = Self.init(allocator);
+ return result;
+ }
+ const new_memory = try allocator.dupe(T, self.buf[0..self.count]);
+ allocator.free(self.buf);
+ self.* = Self.init(allocator);
+ return new_memory;
+ }
};
}
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index 0da25fde78..8167a2b252 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -2555,6 +2555,21 @@ test "bytes.hex" {
try expectFmt("lowercase: 000ebabe\n", "lowercase: {x}\n", .{fmtSliceHexLower(bytes_with_zeros)});
}
+/// Encodes a sequence of bytes as hexadecimal digits.
+/// Returns an array containing the encoded bytes.
+pub fn bytesToHex(input: anytype, case: Case) [input.len * 2]u8 {
+ if (input.len == 0) return [_]u8{};
+ comptime assert(@TypeOf(input[0]) == u8); // elements to encode must be unsigned bytes
+
+ const charset = "0123456789" ++ if (case == .upper) "ABCDEF" else "abcdef";
+ var result: [input.len * 2]u8 = undefined;
+ for (input, 0..) |b, i| {
+ result[i * 2 + 0] = charset[b >> 4];
+ result[i * 2 + 1] = charset[b & 15];
+ }
+ return result;
+}
+
/// Decodes the sequence of bytes represented by the specified string of
/// hexadecimal characters.
/// Returns a slice of the output buffer containing the decoded bytes.
@@ -2575,6 +2590,13 @@ pub fn hexToBytes(out: []u8, input: []const u8) ![]u8 {
return out[0 .. in_i / 2];
}
+test "bytesToHex" {
+ const input = "input slice";
+ const encoded = bytesToHex(input, .lower);
+ var decoded: [input.len]u8 = undefined;
+ try std.testing.expectEqualSlices(u8, input, try hexToBytes(&decoded, &encoded));
+}
+
test "hexToBytes" {
var buf: [32]u8 = undefined;
try expectFmt("90" ** 32, "{s}", .{fmtSliceHexUpper(try hexToBytes(&buf, "90" ** 32))});
diff --git a/lib/std/fmt/parse_float.zig b/lib/std/fmt/parse_float.zig
index 427ac727c9..e92564ef01 100644
--- a/lib/std/fmt/parse_float.zig
+++ b/lib/std/fmt/parse_float.zig
@@ -119,6 +119,7 @@ test "fmt.parseFloat hex.f16" {
}
test "fmt.parseFloat hex.f32" {
+ try testing.expectError(error.InvalidCharacter, parseFloat(f32, "0x"));
try testing.expectEqual(try parseFloat(f32, "0x1p0"), 1.0);
try testing.expectEqual(try parseFloat(f32, "-0x1p-1"), -0.5);
try testing.expectEqual(try parseFloat(f32, "0x10p+10"), 16384.0);
diff --git a/lib/std/fmt/parse_float/parse.zig b/lib/std/fmt/parse_float/parse.zig
index 3b757c7c41..9f6e75b29a 100644
--- a/lib/std/fmt/parse_float/parse.zig
+++ b/lib/std/fmt/parse_float/parse.zig
@@ -107,6 +107,8 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool
tryParseDigits(MantissaT, stream, &mantissa, info.base);
var int_end = stream.offsetTrue();
var n_digits = @intCast(isize, stream.offsetTrue());
+ // the base being 16 implies a 0x prefix, which shouldn't be included in the digit count
+ if (info.base == 16) n_digits -= 2;
// handle dot with the following digits
var exponent: i64 = 0;
diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig
index bf93a61239..8235b8aecf 100644
--- a/lib/std/fs/file.zig
+++ b/lib/std/fs/file.zig
@@ -1048,12 +1048,27 @@ pub const File = struct {
/// Returns the number of bytes read. If the number read is smaller than the total bytes
/// from all the buffers, it means the file reached the end. Reaching the end of a file
/// is not an error condition.
- /// The `iovecs` parameter is mutable because this function needs to mutate the fields in
- /// order to handle partial reads from the underlying OS layer.
- /// See https://github.com/ziglang/zig/issues/7699
+ ///
+ /// The `iovecs` parameter is mutable because:
+ /// * This function needs to mutate the fields in order to handle partial
+ /// reads from the underlying OS layer.
+ /// * The OS layer expects pointer addresses to be inside the application's address space
+ /// even if the length is zero. Meanwhile, in Zig, slices may have undefined pointer
+ /// addresses when the length is zero. So this function modifies the iov_base fields
+ /// when the length is zero.
+ ///
+ /// Related open issue: https://github.com/ziglang/zig/issues/7699
pub fn readvAll(self: File, iovecs: []os.iovec) ReadError!usize {
if (iovecs.len == 0) return 0;
+ // We use the address of this local variable for all zero-length
+ // vectors so that the OS does not complain that we are giving it
+ // addresses outside the application's address space.
+ var garbage: [1]u8 = undefined;
+ for (iovecs) |*v| {
+ if (v.iov_len == 0) v.iov_base = &garbage;
+ }
+
var i: usize = 0;
var off: usize = 0;
while (true) {
@@ -1181,13 +1196,26 @@ pub const File = struct {
}
}
- /// The `iovecs` parameter is mutable because this function needs to mutate the fields in
- /// order to handle partial writes from the underlying OS layer.
+ /// The `iovecs` parameter is mutable because:
+ /// * This function needs to mutate the fields in order to handle partial
+ /// writes from the underlying OS layer.
+ /// * The OS layer expects pointer addresses to be inside the application's address space
+ /// even if the length is zero. Meanwhile, in Zig, slices may have undefined pointer
+ /// addresses when the length is zero. So this function modifies the iov_base fields
+ /// when the length is zero.
/// See https://github.com/ziglang/zig/issues/7699
/// See equivalent function: `std.net.Stream.writevAll`.
pub fn writevAll(self: File, iovecs: []os.iovec_const) WriteError!void {
if (iovecs.len == 0) return;
+ // We use the address of this local variable for all zero-length
+ // vectors so that the OS does not complain that we are giving it
+ // addresses outside the application's address space.
+ var garbage: [1]u8 = undefined;
+ for (iovecs) |*v| {
+ if (v.iov_len == 0) v.iov_base = &garbage;
+ }
+
var i: usize = 0;
while (true) {
var amt = try self.writev(iovecs[i..]);
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index 16458d7dc4..1fbd7dbd63 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -1124,17 +1124,31 @@ test "open file with exclusive lock twice, make sure second lock waits" {
test "open file with exclusive nonblocking lock twice (absolute paths)" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
- const allocator = testing.allocator;
+ var random_bytes: [12]u8 = undefined;
+ std.crypto.random.bytes(&random_bytes);
- const cwd = try std.process.getCwdAlloc(allocator);
- defer allocator.free(cwd);
- const file_paths: [2][]const u8 = .{ cwd, "zig-test-absolute-paths.txt" };
- const filename = try fs.path.resolve(allocator, &file_paths);
- defer allocator.free(filename);
+ var random_b64: [fs.base64_encoder.calcSize(random_bytes.len)]u8 = undefined;
+ _ = fs.base64_encoder.encode(&random_b64, &random_bytes);
- const file1 = try fs.createFileAbsolute(filename, .{ .lock = .Exclusive, .lock_nonblocking = true });
+ const sub_path = random_b64 ++ "-zig-test-absolute-paths.txt";
- const file2 = fs.createFileAbsolute(filename, .{ .lock = .Exclusive, .lock_nonblocking = true });
+ const gpa = testing.allocator;
+
+ const cwd = try std.process.getCwdAlloc(gpa);
+ defer gpa.free(cwd);
+
+ const filename = try fs.path.resolve(gpa, &[_][]const u8{ cwd, sub_path });
+ defer gpa.free(filename);
+
+ const file1 = try fs.createFileAbsolute(filename, .{
+ .lock = .Exclusive,
+ .lock_nonblocking = true,
+ });
+
+ const file2 = fs.createFileAbsolute(filename, .{
+ .lock = .Exclusive,
+ .lock_nonblocking = true,
+ });
file1.close();
try testing.expectError(error.WouldBlock, file2);
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index c15e5d0ec2..e2d000f318 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -19,6 +19,7 @@ pub const GeneralPurposeAllocator = @import("heap/general_purpose_allocator.zig"
pub const WasmAllocator = @import("heap/WasmAllocator.zig");
pub const WasmPageAllocator = @import("heap/WasmPageAllocator.zig");
pub const PageAllocator = @import("heap/PageAllocator.zig");
+pub const ThreadSafeAllocator = @import("heap/ThreadSafeAllocator.zig");
const memory_pool = @import("heap/memory_pool.zig");
pub const MemoryPool = memory_pool.MemoryPool;
diff --git a/lib/std/heap/ThreadSafeAllocator.zig b/lib/std/heap/ThreadSafeAllocator.zig
new file mode 100644
index 0000000000..fe10eb2fdb
--- /dev/null
+++ b/lib/std/heap/ThreadSafeAllocator.zig
@@ -0,0 +1,45 @@
+//! Wraps a non-thread-safe allocator and makes it thread-safe.
+
+child_allocator: Allocator,
+mutex: std.Thread.Mutex = .{},
+
+pub fn allocator(self: *ThreadSafeAllocator) Allocator {
+ return .{
+ .ptr = self,
+ .vtable = &.{
+ .alloc = alloc,
+ .resize = resize,
+ .free = free,
+ },
+ };
+}
+
+fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
+ const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx));
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ return self.child_allocator.rawAlloc(n, log2_ptr_align, ra);
+}
+
+fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
+ const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx));
+
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ return self.child_allocator.rawResize(buf, log2_buf_align, new_len, ret_addr);
+}
+
+fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void {
+ const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx));
+
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ return self.child_allocator.rawFree(buf, log2_buf_align, ret_addr);
+}
+
+const std = @import("../std.zig");
+const ThreadSafeAllocator = @This();
+const Allocator = std.mem.Allocator;
diff --git a/lib/std/http.zig b/lib/std/http.zig
index 7c2a2da605..ef89f09925 100644
--- a/lib/std/http.zig
+++ b/lib/std/http.zig
@@ -248,9 +248,24 @@ pub const Status = enum(u10) {
pub const TransferEncoding = enum {
chunked,
+ // compression is intentionally omitted here, as std.http.Client stores it as content-encoding
+};
+
+pub const ContentEncoding = enum {
compress,
deflate,
gzip,
+ zstd,
+};
+
+pub const Connection = enum {
+ keep_alive,
+ close,
+};
+
+pub const CustomHeader = struct {
+ name: []const u8,
+ value: []const u8,
};
const std = @import("std.zig");
diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig
index 7cf512d65f..76073c0ce3 100644
--- a/lib/std/http/Client.zig
+++ b/lib/std/http/Client.zig
@@ -13,6 +13,12 @@ const Uri = std.Uri;
const Allocator = std.mem.Allocator;
const testing = std.testing;
+pub const Request = @import("Client/Request.zig");
+pub const Response = @import("Client/Response.zig");
+
+pub const default_connection_pool_size = 32;
+const connection_pool_size = std.options.http_connection_pool_size;
+
/// Used for tcpConnectToHost and storing HTTP headers when an externally
/// managed buffer is not provided.
allocator: Allocator,
@@ -21,11 +27,130 @@ ca_bundle: std.crypto.Certificate.Bundle = .{},
/// it will first rescan the system for root certificates.
next_https_rescan_certs: bool = true,
+connection_pool: ConnectionPool = .{},
+
+pub const ConnectionPool = struct {
+ pub const Criteria = struct {
+ host: []const u8,
+ port: u16,
+ is_tls: bool,
+ };
+
+ const Queue = std.TailQueue(Connection);
+ pub const Node = Queue.Node;
+
+ mutex: std.Thread.Mutex = .{},
+ used: Queue = .{},
+ free: Queue = .{},
+ free_len: usize = 0,
+ free_size: usize = default_connection_pool_size,
+
+ /// Finds and acquires a connection from the connection pool matching the criteria. This function is threadsafe.
+ /// If no connection is found, null is returned.
+ pub fn findConnection(pool: *ConnectionPool, criteria: Criteria) ?*Node {
+ pool.mutex.lock();
+ defer pool.mutex.unlock();
+
+ var next = pool.free.last;
+ while (next) |node| : (next = node.prev) {
+ if ((node.data.protocol == .tls) != criteria.is_tls) continue;
+ if (node.data.port != criteria.port) continue;
+ if (std.mem.eql(u8, node.data.host, criteria.host)) continue;
+
+ pool.acquireUnsafe(node);
+ return node;
+ }
+
+ return null;
+ }
+
+ /// Acquires an existing connection from the connection pool. This function is not threadsafe.
+ pub fn acquireUnsafe(pool: *ConnectionPool, node: *Node) void {
+ pool.free.remove(node);
+ pool.free_len -= 1;
+
+ pool.used.append(node);
+ }
+
+ /// Acquires an existing connection from the connection pool. This function is threadsafe.
+ pub fn acquire(pool: *ConnectionPool, node: *Node) void {
+ pool.mutex.lock();
+ defer pool.mutex.unlock();
+
+ return pool.acquireUnsafe(node);
+ }
+
+ /// Tries to release a connection back to the connection pool. This function is threadsafe.
+ /// If the connection is marked as closing, it will be closed instead.
+ pub fn release(pool: *ConnectionPool, client: *Client, node: *Node) void {
+ pool.mutex.lock();
+ defer pool.mutex.unlock();
+
+ pool.used.remove(node);
+
+ if (node.data.closing) {
+ node.data.close(client);
+
+ return client.allocator.destroy(node);
+ }
+
+ if (pool.free_len + 1 >= pool.free_size) {
+ const popped = pool.free.popFirst() orelse unreachable;
+
+ popped.data.close(client);
+
+ return client.allocator.destroy(popped);
+ }
+
+ pool.free.append(node);
+ pool.free_len += 1;
+ }
+
+ /// Adds a newly created node to the pool of used connections. This function is threadsafe.
+ pub fn addUsed(pool: *ConnectionPool, node: *Node) void {
+ pool.mutex.lock();
+ defer pool.mutex.unlock();
+
+ pool.used.append(node);
+ }
+
+ pub fn deinit(pool: *ConnectionPool, client: *Client) void {
+ pool.mutex.lock();
+
+ var next = pool.free.first;
+ while (next) |node| {
+ defer client.allocator.destroy(node);
+ next = node.next;
+
+ node.data.close(client);
+ }
+
+ next = pool.used.first;
+ while (next) |node| {
+ defer client.allocator.destroy(node);
+ next = node.next;
+
+ node.data.close(client);
+ }
+
+ pool.* = undefined;
+ }
+};
+
+pub const DeflateDecompressor = std.compress.zlib.ZlibStream(Request.ReaderRaw);
+pub const GzipDecompressor = std.compress.gzip.Decompress(Request.ReaderRaw);
+pub const ZstdDecompressor = std.compress.zstd.DecompressStream(Request.ReaderRaw, .{});
+
pub const Connection = struct {
stream: net.Stream,
/// undefined unless protocol is tls.
- tls_client: std.crypto.tls.Client,
+ tls_client: *std.crypto.tls.Client, // TODO: allocate this, it's currently 16 KB.
protocol: Protocol,
+ host: []u8,
+ port: u16,
+
+ // This connection has been part of a non keepalive request and cannot be added to the pool.
+ closing: bool = false,
pub const Protocol = enum { plain, tls };
@@ -43,6 +168,24 @@ pub const Connection = struct {
}
}
+ pub const ReadError = net.Stream.ReadError || error{
+ TlsConnectionTruncated,
+ TlsRecordOverflow,
+ TlsDecodeError,
+ TlsAlert,
+ TlsBadRecordMac,
+ Overflow,
+ TlsBadLength,
+ TlsIllegalParameter,
+ TlsUnexpectedMessage,
+ };
+
+ pub const Reader = std.io.Reader(*Connection, ReadError, read);
+
+ pub fn reader(conn: *Connection) Reader {
+ return Reader{ .context = conn };
+ }
+
pub fn writeAll(conn: *Connection, buffer: []const u8) !void {
switch (conn.protocol) {
.plain => return conn.stream.writeAll(buffer),
@@ -56,818 +199,84 @@ pub const Connection = struct {
.tls => return conn.tls_client.write(conn.stream, buffer),
}
}
-};
-/// TODO: emit error.UnexpectedEndOfStream or something like that when the read
-/// data does not match the content length. This is necessary since HTTPS disables
-/// close_notify protection on underlying TLS streams.
-pub const Request = struct {
- client: *Client,
- connection: Connection,
- redirects_left: u32,
- response: Response,
- /// These are stored in Request so that they are available when following
- /// redirects.
- headers: Headers,
+ pub const WriteError = net.Stream.WriteError || error{};
+ pub const Writer = std.io.Writer(*Connection, WriteError, write);
- pub const Response = struct {
- headers: Response.Headers,
- state: State,
- header_bytes_owned: bool,
- /// This could either be a fixed buffer provided by the API user or it
- /// could be our own array list.
- header_bytes: std.ArrayListUnmanaged(u8),
- max_header_bytes: usize,
- next_chunk_length: u64,
-
- pub const Headers = struct {
- status: http.Status,
- version: http.Version,
- location: ?[]const u8 = null,
- content_length: ?u64 = null,
- transfer_encoding: ?http.TransferEncoding = null,
-
- pub fn parse(bytes: []const u8) !Response.Headers {
- var it = mem.split(u8, bytes[0 .. bytes.len - 4], "\r\n");
-
- const first_line = it.first();
- if (first_line.len < 12)
- return error.ShortHttpStatusLine;
-
- const version: http.Version = switch (int64(first_line[0..8])) {
- int64("HTTP/1.0") => .@"HTTP/1.0",
- int64("HTTP/1.1") => .@"HTTP/1.1",
- else => return error.BadHttpVersion,
- };
- if (first_line[8] != ' ') return error.HttpHeadersInvalid;
- const status = @intToEnum(http.Status, parseInt3(first_line[9..12].*));
-
- var headers: Response.Headers = .{
- .version = version,
- .status = status,
- };
-
- while (it.next()) |line| {
- if (line.len == 0) return error.HttpHeadersInvalid;
- switch (line[0]) {
- ' ', '\t' => return error.HttpHeaderContinuationsUnsupported,
- else => {},
- }
- var line_it = mem.split(u8, line, ": ");
- const header_name = line_it.first();
- const header_value = line_it.rest();
- if (std.ascii.eqlIgnoreCase(header_name, "location")) {
- if (headers.location != null) return error.HttpHeadersInvalid;
- headers.location = header_value;
- } else if (std.ascii.eqlIgnoreCase(header_name, "content-length")) {
- if (headers.content_length != null) return error.HttpHeadersInvalid;
- headers.content_length = try std.fmt.parseInt(u64, header_value, 10);
- } else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
- if (headers.transfer_encoding != null) return error.HttpHeadersInvalid;
- headers.transfer_encoding = std.meta.stringToEnum(http.TransferEncoding, header_value) orelse
- return error.HttpTransferEncodingUnsupported;
- }
- }
-
- return headers;
- }
-
- test "parse headers" {
- const example =
- "HTTP/1.1 301 Moved Permanently\r\n" ++
- "Location: https://www.example.com/\r\n" ++
- "Content-Type: text/html; charset=UTF-8\r\n" ++
- "Content-Length: 220\r\n\r\n";
- const parsed = try Response.Headers.parse(example);
- try testing.expectEqual(http.Version.@"HTTP/1.1", parsed.version);
- try testing.expectEqual(http.Status.moved_permanently, parsed.status);
- try testing.expectEqualStrings("https://www.example.com/", parsed.location orelse
- return error.TestFailed);
- try testing.expectEqual(@as(?u64, 220), parsed.content_length);
- }
-
- test "header continuation" {
- const example =
- "HTTP/1.0 200 OK\r\n" ++
- "Content-Type: text/html;\r\n charset=UTF-8\r\n" ++
- "Content-Length: 220\r\n\r\n";
- try testing.expectError(
- error.HttpHeaderContinuationsUnsupported,
- Response.Headers.parse(example),
- );
- }
-
- test "extra content length" {
- const example =
- "HTTP/1.0 200 OK\r\n" ++
- "Content-Length: 220\r\n" ++
- "Content-Type: text/html; charset=UTF-8\r\n" ++
- "content-length: 220\r\n\r\n";
- try testing.expectError(
- error.HttpHeadersInvalid,
- Response.Headers.parse(example),
- );
- }
- };
-
- pub const State = enum {
- /// Begin header parsing states.
- invalid,
- start,
- seen_r,
- seen_rn,
- seen_rnr,
- finished,
- /// Begin transfer-encoding: chunked parsing states.
- chunk_size_prefix_r,
- chunk_size_prefix_n,
- chunk_size,
- chunk_r,
- chunk_data,
-
- pub fn zeroMeansEnd(state: State) bool {
- return switch (state) {
- .finished, .chunk_data => true,
- else => false,
- };
- }
- };
-
- pub fn initDynamic(max: usize) Response {
- return .{
- .state = .start,
- .headers = undefined,
- .header_bytes = .{},
- .max_header_bytes = max,
- .header_bytes_owned = true,
- .next_chunk_length = undefined,
- };
- }
-
- pub fn initStatic(buf: []u8) Response {
- return .{
- .state = .start,
- .headers = undefined,
- .header_bytes = .{ .items = buf[0..0], .capacity = buf.len },
- .max_header_bytes = buf.len,
- .header_bytes_owned = false,
- .next_chunk_length = undefined,
- };
- }
-
- /// Returns how many bytes are part of HTTP headers. Always less than or
- /// equal to bytes.len. If the amount returned is less than bytes.len, it
- /// means the headers ended and the first byte after the double \r\n\r\n is
- /// located at `bytes[result]`.
- pub fn findHeadersEnd(r: *Response, bytes: []const u8) usize {
- var index: usize = 0;
-
- // TODO: https://github.com/ziglang/zig/issues/8220
- state: while (true) {
- switch (r.state) {
- .invalid => unreachable,
- .finished => unreachable,
- .start => while (true) {
- switch (bytes.len - index) {
- 0 => return index,
- 1 => {
- if (bytes[index] == '\r')
- r.state = .seen_r;
- return index + 1;
- },
- 2 => {
- if (int16(bytes[index..][0..2]) == int16("\r\n")) {
- r.state = .seen_rn;
- } else if (bytes[index + 1] == '\r') {
- r.state = .seen_r;
- }
- return index + 2;
- },
- 3 => {
- if (int16(bytes[index..][0..2]) == int16("\r\n") and
- bytes[index + 2] == '\r')
- {
- r.state = .seen_rnr;
- } else if (int16(bytes[index + 1 ..][0..2]) == int16("\r\n")) {
- r.state = .seen_rn;
- } else if (bytes[index + 2] == '\r') {
- r.state = .seen_r;
- }
- return index + 3;
- },
- 4...15 => {
- if (int32(bytes[index..][0..4]) == int32("\r\n\r\n")) {
- r.state = .finished;
- return index + 4;
- } else if (int16(bytes[index + 1 ..][0..2]) == int16("\r\n") and
- bytes[index + 3] == '\r')
- {
- r.state = .seen_rnr;
- index += 4;
- continue :state;
- } else if (int16(bytes[index + 2 ..][0..2]) == int16("\r\n")) {
- r.state = .seen_rn;
- index += 4;
- continue :state;
- } else if (bytes[index + 3] == '\r') {
- r.state = .seen_r;
- index += 4;
- continue :state;
- }
- index += 4;
- continue;
- },
- else => {
- const chunk = bytes[index..][0..16];
- const v: @Vector(16, u8) = chunk.*;
- const matches_r = v == @splat(16, @as(u8, '\r'));
- const iota = std.simd.iota(u8, 16);
- const default = @splat(16, @as(u8, 16));
- const sub_index = @reduce(.Min, @select(u8, matches_r, iota, default));
- switch (sub_index) {
- 0...12 => {
- index += sub_index + 4;
- if (int32(chunk[sub_index..][0..4]) == int32("\r\n\r\n")) {
- r.state = .finished;
- return index;
- }
- continue;
- },
- 13 => {
- index += 16;
- if (int16(chunk[14..][0..2]) == int16("\n\r")) {
- r.state = .seen_rnr;
- continue :state;
- }
- continue;
- },
- 14 => {
- index += 16;
- if (chunk[15] == '\n') {
- r.state = .seen_rn;
- continue :state;
- }
- continue;
- },
- 15 => {
- r.state = .seen_r;
- index += 16;
- continue :state;
- },
- 16 => {
- index += 16;
- continue;
- },
- else => unreachable,
- }
- },
- }
- },
-
- .seen_r => switch (bytes.len - index) {
- 0 => return index,
- 1 => {
- switch (bytes[index]) {
- '\n' => r.state = .seen_rn,
- '\r' => r.state = .seen_r,
- else => r.state = .start,
- }
- return index + 1;
- },
- 2 => {
- if (int16(bytes[index..][0..2]) == int16("\n\r")) {
- r.state = .seen_rnr;
- return index + 2;
- }
- r.state = .start;
- return index + 2;
- },
- else => {
- if (int16(bytes[index..][0..2]) == int16("\n\r") and
- bytes[index + 2] == '\n')
- {
- r.state = .finished;
- return index + 3;
- }
- index += 3;
- r.state = .start;
- continue :state;
- },
- },
- .seen_rn => switch (bytes.len - index) {
- 0 => return index,
- 1 => {
- switch (bytes[index]) {
- '\r' => r.state = .seen_rnr,
- else => r.state = .start,
- }
- return index + 1;
- },
- else => {
- if (int16(bytes[index..][0..2]) == int16("\r\n")) {
- r.state = .finished;
- return index + 2;
- }
- index += 2;
- r.state = .start;
- continue :state;
- },
- },
- .seen_rnr => switch (bytes.len - index) {
- 0 => return index,
- else => {
- if (bytes[index] == '\n') {
- r.state = .finished;
- return index + 1;
- }
- index += 1;
- r.state = .start;
- continue :state;
- },
- },
- .chunk_size_prefix_r => unreachable,
- .chunk_size_prefix_n => unreachable,
- .chunk_size => unreachable,
- .chunk_r => unreachable,
- .chunk_data => unreachable,
- }
-
- return index;
- }
- }
-
- pub fn findChunkedLen(r: *Response, bytes: []const u8) usize {
- var i: usize = 0;
- if (r.state == .chunk_size) {
- while (i < bytes.len) : (i += 1) {
- const digit = switch (bytes[i]) {
- '0'...'9' => |b| b - '0',
- 'A'...'Z' => |b| b - 'A' + 10,
- 'a'...'z' => |b| b - 'a' + 10,
- '\r' => {
- r.state = .chunk_r;
- i += 1;
- break;
- },
- else => {
- r.state = .invalid;
- return i;
- },
- };
- const mul = @mulWithOverflow(r.next_chunk_length, 16);
- if (mul[1] != 0) {
- r.state = .invalid;
- return i;
- }
- const add = @addWithOverflow(mul[0], digit);
- if (add[1] != 0) {
- r.state = .invalid;
- return i;
- }
- r.next_chunk_length = add[0];
- } else {
- return i;
- }
- }
- assert(r.state == .chunk_r);
- if (i == bytes.len) return i;
-
- if (bytes[i] == '\n') {
- r.state = .chunk_data;
- return i + 1;
- } else {
- r.state = .invalid;
- return i;
- }
- }
-
- fn parseInt3(nnn: @Vector(3, u8)) u10 {
- const zero: @Vector(3, u8) = .{ '0', '0', '0' };
- const mmm: @Vector(3, u10) = .{ 100, 10, 1 };
- return @reduce(.Add, @as(@Vector(3, u10), nnn -% zero) *% mmm);
- }
-
- test parseInt3 {
- const expectEqual = std.testing.expectEqual;
- try expectEqual(@as(u10, 0), parseInt3("000".*));
- try expectEqual(@as(u10, 418), parseInt3("418".*));
- try expectEqual(@as(u10, 999), parseInt3("999".*));
- }
-
- test "find headers end basic" {
- var buffer: [1]u8 = undefined;
- var r = Response.initStatic(&buffer);
- try testing.expectEqual(@as(usize, 10), r.findHeadersEnd("HTTP/1.1 4"));
- try testing.expectEqual(@as(usize, 2), r.findHeadersEnd("18"));
- try testing.expectEqual(@as(usize, 8), r.findHeadersEnd(" lol\r\n\r\nblah blah"));
- }
-
- test "find headers end vectorized" {
- var buffer: [1]u8 = undefined;
- var r = Response.initStatic(&buffer);
- const example =
- "HTTP/1.1 301 Moved Permanently\r\n" ++
- "Location: https://www.example.com/\r\n" ++
- "Content-Type: text/html; charset=UTF-8\r\n" ++
- "Content-Length: 220\r\n" ++
- "\r\ncontent";
- try testing.expectEqual(@as(usize, 131), r.findHeadersEnd(example));
- }
-
- test "find headers end bug" {
- var buffer: [1]u8 = undefined;
- var r = Response.initStatic(&buffer);
- const trail = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
- const example =
- "HTTP/1.1 200 OK\r\n" ++
- "Access-Control-Allow-Origin: https://render.githubusercontent.com\r\n" ++
- "content-disposition: attachment; filename=zig-0.10.0.tar.gz\r\n" ++
- "Content-Security-Policy: default-src 'none'; style-src 'unsafe-inline'; sandbox\r\n" ++
- "Content-Type: application/x-gzip\r\n" ++
- "ETag: \"bfae0af6b01c7c0d89eb667cb5f0e65265968aeebda2689177e6b26acd3155ca\"\r\n" ++
- "Strict-Transport-Security: max-age=31536000\r\n" ++
- "Vary: Authorization,Accept-Encoding,Origin\r\n" ++
- "X-Content-Type-Options: nosniff\r\n" ++
- "X-Frame-Options: deny\r\n" ++
- "X-XSS-Protection: 1; mode=block\r\n" ++
- "Date: Fri, 06 Jan 2023 22:26:22 GMT\r\n" ++
- "Transfer-Encoding: chunked\r\n" ++
- "X-GitHub-Request-Id: 89C6:17E9:A7C9E:124B51:63B8A00E\r\n" ++
- "connection: close\r\n\r\n" ++ trail;
- try testing.expectEqual(@as(usize, example.len - trail.len), r.findHeadersEnd(example));
- }
- };
-
- pub const Headers = struct {
- version: http.Version = .@"HTTP/1.1",
- method: http.Method = .GET,
- };
-
- pub const Options = struct {
- max_redirects: u32 = 3,
- header_strategy: HeaderStrategy = .{ .dynamic = 16 * 1024 },
-
- pub const HeaderStrategy = union(enum) {
- /// In this case, the client's Allocator will be used to store the
- /// entire HTTP header. This value is the maximum total size of
- /// HTTP headers allowed, otherwise
- /// error.HttpHeadersExceededSizeLimit is returned from read().
- dynamic: usize,
- /// This is used to store the entire HTTP header. If the HTTP
- /// header is too big to fit, `error.HttpHeadersExceededSizeLimit`
- /// is returned from read(). When this is used, `error.OutOfMemory`
- /// cannot be returned from `read()`.
- static: []u8,
- };
- };
-
- /// May be skipped if header strategy is buffer.
- pub fn deinit(req: *Request) void {
- if (req.response.header_bytes_owned) {
- req.response.header_bytes.deinit(req.client.allocator);
- }
- req.* = undefined;
+ pub fn writer(conn: *Connection) Writer {
+ return Writer{ .context = conn };
}
- pub const Reader = std.io.Reader(*Request, ReadError, read);
-
- pub fn reader(req: *Request) Reader {
- return .{ .context = req };
- }
-
- pub fn readAll(req: *Request, buffer: []u8) !usize {
- return readAtLeast(req, buffer, buffer.len);
- }
-
- pub const ReadError = net.Stream.ReadError || error{
- // From HTTP protocol
- HttpHeadersInvalid,
- HttpHeadersExceededSizeLimit,
- HttpRedirectMissingLocation,
- HttpTransferEncodingUnsupported,
- HttpContentLengthUnknown,
- TooManyHttpRedirects,
- ShortHttpStatusLine,
- BadHttpVersion,
- HttpHeaderContinuationsUnsupported,
- UnsupportedUrlScheme,
- UriMissingHost,
- UnknownHostName,
-
- // Network problems
- NetworkUnreachable,
- HostLacksNetworkAddresses,
- TemporaryNameServerFailure,
- NameServerFailure,
- ProtocolFamilyNotAvailable,
- ProtocolNotSupported,
-
- // System resource problems
- ProcessFdQuotaExceeded,
- SystemFdQuotaExceeded,
- OutOfMemory,
-
- // TLS problems
- InsufficientEntropy,
- TlsConnectionTruncated,
- TlsRecordOverflow,
- TlsDecodeError,
- TlsAlert,
- TlsBadRecordMac,
- TlsBadLength,
- TlsIllegalParameter,
- TlsUnexpectedMessage,
- TlsDecryptFailure,
- CertificateFieldHasInvalidLength,
- CertificateHostMismatch,
- CertificatePublicKeyInvalid,
- CertificateExpired,
- CertificateFieldHasWrongDataType,
- CertificateIssuerMismatch,
- CertificateNotYetValid,
- CertificateSignatureAlgorithmMismatch,
- CertificateSignatureAlgorithmUnsupported,
- CertificateSignatureInvalid,
- CertificateSignatureInvalidLength,
- CertificateSignatureNamedCurveUnsupported,
- CertificateSignatureUnsupportedBitCount,
- TlsCertificateNotVerified,
- TlsBadSignatureScheme,
- TlsBadRsaSignatureBitCount,
- TlsDecryptError,
- UnsupportedCertificateVersion,
- CertificateTimeInvalid,
- CertificateHasUnrecognizedObjectId,
- CertificateHasInvalidBitString,
- CertificateAuthorityBundleTooBig,
-
- // TODO: convert to higher level errors
- InvalidFormat,
- InvalidPort,
- UnexpectedCharacter,
- Overflow,
- InvalidCharacter,
- AddressFamilyNotSupported,
- AddressInUse,
- AddressNotAvailable,
- ConnectionPending,
- ConnectionRefused,
- FileNotFound,
- PermissionDenied,
- ServiceUnavailable,
- SocketTypeNotSupported,
- FileTooBig,
- LockViolation,
- NoSpaceLeft,
- NotOpenForWriting,
- InvalidEncoding,
- IdentityElement,
- NonCanonical,
- SignatureVerificationFailed,
- MessageTooLong,
- NegativeIntoUnsigned,
- TargetTooSmall,
- BufferTooSmall,
- InvalidSignature,
- NotSquare,
- DiskQuota,
- InvalidEnd,
- Incomplete,
- InvalidIpv4Mapping,
- InvalidIPAddressFormat,
- BadPathName,
- DeviceBusy,
- FileBusy,
- FileLocksNotSupported,
- InvalidHandle,
- InvalidUtf8,
- NameTooLong,
- NoDevice,
- PathAlreadyExists,
- PipeBusy,
- SharingViolation,
- SymLinkLoop,
- FileSystem,
- InterfaceNotFound,
- AlreadyBound,
- FileDescriptorNotASocket,
- NetworkSubsystemFailed,
- NotDir,
- ReadOnlyFileSystem,
- Unseekable,
- MissingEndCertificateMarker,
- InvalidPadding,
- EndOfStream,
- };
-
- pub fn read(req: *Request, buffer: []u8) ReadError!usize {
- return readAtLeast(req, buffer, 1);
- }
-
- pub fn readAtLeast(req: *Request, buffer: []u8, len: usize) !usize {
- assert(len <= buffer.len);
- var index: usize = 0;
- while (index < len) {
- const zero_means_end = req.response.state.zeroMeansEnd();
- const amt = try readAdvanced(req, buffer[index..]);
- if (amt == 0 and zero_means_end) break;
- index += amt;
+ pub fn close(conn: *Connection, client: *const Client) void {
+ if (conn.protocol == .tls) {
+ // try to cleanly close the TLS connection, for any server that cares.
+ _ = conn.tls_client.writeEnd(conn.stream, "", true) catch {};
+ client.allocator.destroy(conn.tls_client);
}
- return index;
- }
- /// This one can return 0 without meaning EOF.
- /// TODO change to readvAdvanced
- pub fn readAdvanced(req: *Request, buffer: []u8) !usize {
- var in = buffer[0..try req.connection.read(buffer)];
- var out_index: usize = 0;
- while (true) {
- switch (req.response.state) {
- .invalid => unreachable,
- .start, .seen_r, .seen_rn, .seen_rnr => {
- const i = req.response.findHeadersEnd(in);
- if (req.response.state == .invalid) return error.HttpHeadersInvalid;
+ conn.stream.close();
- const headers_data = in[0..i];
- if (req.response.header_bytes.items.len + headers_data.len > req.response.max_header_bytes) {
- return error.HttpHeadersExceededSizeLimit;
- }
- try req.response.header_bytes.appendSlice(req.client.allocator, headers_data);
-
- if (req.response.state == .finished) {
- req.response.headers = try Response.Headers.parse(req.response.header_bytes.items);
-
- if (req.response.headers.status.class() == .redirect) {
- if (req.redirects_left == 0) return error.TooManyHttpRedirects;
- const location = req.response.headers.location orelse
- return error.HttpRedirectMissingLocation;
- const new_url = try std.Uri.parse(location);
- const new_req = try req.client.request(new_url, req.headers, .{
- .max_redirects = req.redirects_left - 1,
- .header_strategy = if (req.response.header_bytes_owned) .{
- .dynamic = req.response.max_header_bytes,
- } else .{
- .static = req.response.header_bytes.unusedCapacitySlice(),
- },
- });
- req.deinit();
- req.* = new_req;
- assert(out_index == 0);
- in = buffer[0..try req.connection.read(buffer)];
- continue;
- }
-
- if (req.response.headers.transfer_encoding) |transfer_encoding| {
- switch (transfer_encoding) {
- .chunked => {
- req.response.next_chunk_length = 0;
- req.response.state = .chunk_size;
- },
- .compress => return error.HttpTransferEncodingUnsupported,
- .deflate => return error.HttpTransferEncodingUnsupported,
- .gzip => return error.HttpTransferEncodingUnsupported,
- }
- } else if (req.response.headers.content_length) |content_length| {
- req.response.next_chunk_length = content_length;
- } else {
- return error.HttpContentLengthUnknown;
- }
-
- in = in[i..];
- continue;
- }
-
- assert(out_index == 0);
- return 0;
- },
- .finished => {
- if (in.ptr == buffer.ptr) {
- return in.len;
- } else {
- mem.copy(u8, buffer[out_index..], in);
- return out_index + in.len;
- }
- },
- .chunk_size_prefix_r => switch (in.len) {
- 0 => return out_index,
- 1 => switch (in[0]) {
- '\r' => {
- req.response.state = .chunk_size_prefix_n;
- return out_index;
- },
- else => {
- req.response.state = .invalid;
- return error.HttpHeadersInvalid;
- },
- },
- else => switch (int16(in[0..2])) {
- int16("\r\n") => {
- in = in[2..];
- req.response.state = .chunk_size;
- continue;
- },
- else => {
- req.response.state = .invalid;
- return error.HttpHeadersInvalid;
- },
- },
- },
- .chunk_size_prefix_n => switch (in.len) {
- 0 => return out_index,
- else => switch (in[0]) {
- '\n' => {
- in = in[1..];
- req.response.state = .chunk_size;
- continue;
- },
- else => {
- req.response.state = .invalid;
- return error.HttpHeadersInvalid;
- },
- },
- },
- .chunk_size, .chunk_r => {
- const i = req.response.findChunkedLen(in);
- switch (req.response.state) {
- .invalid => return error.HttpHeadersInvalid,
- .chunk_data => {
- if (req.response.next_chunk_length == 0) {
- req.response.state = .start;
- return out_index;
- }
- in = in[i..];
- continue;
- },
- .chunk_size => return out_index,
- else => unreachable,
- }
- },
- .chunk_data => {
- // TODO https://github.com/ziglang/zig/issues/14039
- const sub_amt = @intCast(usize, @min(req.response.next_chunk_length, in.len));
- req.response.next_chunk_length -= sub_amt;
- if (req.response.next_chunk_length > 0) {
- if (in.ptr == buffer.ptr) {
- return sub_amt;
- } else {
- mem.copy(u8, buffer[out_index..], in[0..sub_amt]);
- out_index += sub_amt;
- return out_index;
- }
- }
- mem.copy(u8, buffer[out_index..], in[0..sub_amt]);
- out_index += sub_amt;
- req.response.state = .chunk_size_prefix_r;
- in = in[sub_amt..];
- continue;
- },
- }
- }
- }
-
- inline fn int16(array: *const [2]u8) u16 {
- return @bitCast(u16, array.*);
- }
-
- inline fn int32(array: *const [4]u8) u32 {
- return @bitCast(u32, array.*);
- }
-
- inline fn int64(array: *const [8]u8) u64 {
- return @bitCast(u64, array.*);
- }
-
- test {
- _ = Response;
+ client.allocator.free(conn.host);
}
};
pub fn deinit(client: *Client) void {
+ client.connection_pool.deinit(client);
+
client.ca_bundle.deinit(client.allocator);
client.* = undefined;
}
-pub fn connect(client: *Client, host: []const u8, port: u16, protocol: Connection.Protocol) !Connection {
- var conn: Connection = .{
+pub const ConnectError = std.mem.Allocator.Error || net.TcpConnectToHostError || std.crypto.tls.Client.InitError(net.Stream);
+
+pub fn connect(client: *Client, host: []const u8, port: u16, protocol: Connection.Protocol) ConnectError!*ConnectionPool.Node {
+ if (client.connection_pool.findConnection(.{
+ .host = host,
+ .port = port,
+ .is_tls = protocol == .tls,
+ })) |node|
+ return node;
+
+ const conn = try client.allocator.create(ConnectionPool.Node);
+ errdefer client.allocator.destroy(conn);
+ conn.* = .{ .data = undefined };
+
+ conn.data = .{
.stream = try net.tcpConnectToHost(client.allocator, host, port),
.tls_client = undefined,
.protocol = protocol,
+ .host = try client.allocator.dupe(u8, host),
+ .port = port,
};
switch (protocol) {
.plain => {},
.tls => {
- conn.tls_client = try std.crypto.tls.Client.init(conn.stream, client.ca_bundle, host);
+ conn.data.tls_client = try client.allocator.create(std.crypto.tls.Client);
+ conn.data.tls_client.* = try std.crypto.tls.Client.init(conn.data.stream, client.ca_bundle, host);
// This is appropriate for HTTPS because the HTTP headers contain
// the content length which is used to detect truncation attacks.
- conn.tls_client.allow_truncation_attacks = true;
+ conn.data.tls_client.allow_truncation_attacks = true;
},
}
+ client.connection_pool.addUsed(conn);
+
return conn;
}
-pub fn request(client: *Client, uri: Uri, headers: Request.Headers, options: Request.Options) !Request {
+pub const RequestError = ConnectError || Connection.WriteError || error{
+ UnsupportedUrlScheme,
+ UriMissingHost,
+
+ CertificateAuthorityBundleTooBig,
+ InvalidPadding,
+ MissingEndCertificateMarker,
+ Unseekable,
+ EndOfStream,
+};
+
+pub fn request(client: *Client, uri: Uri, headers: Request.Headers, options: Request.Options) RequestError!Request {
const protocol: Connection.Protocol = if (mem.eql(u8, uri.scheme, "http"))
.plain
else if (mem.eql(u8, uri.scheme, "https"))
@@ -883,34 +292,89 @@ pub fn request(client: *Client, uri: Uri, headers: Request.Headers, options: Req
const host = uri.host orelse return error.UriMissingHost;
if (client.next_https_rescan_certs and protocol == .tls) {
- try client.ca_bundle.rescan(client.allocator);
- client.next_https_rescan_certs = false;
+ client.connection_pool.mutex.lock(); // TODO: this could be so much better than reusing the connection pool mutex.
+ defer client.connection_pool.mutex.unlock();
+
+ if (client.next_https_rescan_certs) {
+ try client.ca_bundle.rescan(client.allocator);
+ client.next_https_rescan_certs = false;
+ }
}
var req: Request = .{
+ .uri = uri,
.client = client,
.headers = headers,
.connection = try client.connect(host, port, protocol),
.redirects_left = options.max_redirects,
+ .handle_redirects = options.handle_redirects,
+ .compression_init = false,
.response = switch (options.header_strategy) {
- .dynamic => |max| Request.Response.initDynamic(max),
- .static => |buf| Request.Response.initStatic(buf),
+ .dynamic => |max| Response.initDynamic(max),
+ .static => |buf| Response.initStatic(buf),
},
+ .arena = undefined,
};
- {
- var h = try std.BoundedArray(u8, 1000).init(0);
- try h.appendSlice(@tagName(headers.method));
- try h.appendSlice(" ");
- try h.appendSlice(uri.path);
- try h.appendSlice(" ");
- try h.appendSlice(@tagName(headers.version));
- try h.appendSlice("\r\nHost: ");
- try h.appendSlice(host);
- try h.appendSlice("\r\nConnection: close\r\n\r\n");
+ req.arena = std.heap.ArenaAllocator.init(client.allocator);
- const header_bytes = h.slice();
- try req.connection.writeAll(header_bytes);
+ {
+ var buffered = std.io.bufferedWriter(req.connection.data.writer());
+ const writer = buffered.writer();
+
+ const escaped_path = try Uri.escapePath(client.allocator, uri.path);
+ defer client.allocator.free(escaped_path);
+
+ const escaped_query = if (uri.query) |q| try Uri.escapeQuery(client.allocator, q) else null;
+ defer if (escaped_query) |q| client.allocator.free(q);
+
+ const escaped_fragment = if (uri.fragment) |f| try Uri.escapeQuery(client.allocator, f) else null;
+ defer if (escaped_fragment) |f| client.allocator.free(f);
+
+ try writer.writeAll(@tagName(headers.method));
+ try writer.writeByte(' ');
+ if (escaped_path.len == 0) {
+ try writer.writeByte('/');
+ } else {
+ try writer.writeAll(escaped_path);
+ }
+ if (escaped_query) |q| {
+ try writer.writeByte('?');
+ try writer.writeAll(q);
+ }
+ if (escaped_fragment) |f| {
+ try writer.writeByte('#');
+ try writer.writeAll(f);
+ }
+ try writer.writeByte(' ');
+ try writer.writeAll(@tagName(headers.version));
+ try writer.writeAll("\r\nHost: ");
+ try writer.writeAll(host);
+ try writer.writeAll("\r\nUser-Agent: ");
+ try writer.writeAll(headers.user_agent);
+ if (headers.connection == .close) {
+ try writer.writeAll("\r\nConnection: close");
+ } else {
+ try writer.writeAll("\r\nConnection: keep-alive");
+ }
+ try writer.writeAll("\r\nAccept-Encoding: gzip, deflate, zstd");
+
+ switch (headers.transfer_encoding) {
+ .chunked => try writer.writeAll("\r\nTransfer-Encoding: chunked"),
+ .content_length => |content_length| try writer.print("\r\nContent-Length: {d}", .{content_length}),
+ .none => {},
+ }
+
+ for (headers.custom) |header| {
+ try writer.writeAll("\r\n");
+ try writer.writeAll(header.name);
+ try writer.writeAll(": ");
+ try writer.writeAll(header.value);
+ }
+
+ try writer.writeAll("\r\n\r\n");
+
+ try buffered.flush();
}
return req;
@@ -924,5 +388,7 @@ test {
return error.SkipZigTest;
}
+ if (builtin.os.tag == .wasi) return error.SkipZigTest;
+
_ = Request;
}
diff --git a/lib/std/http/Client/Request.zig b/lib/std/http/Client/Request.zig
new file mode 100644
index 0000000000..9e2ebd2d6c
--- /dev/null
+++ b/lib/std/http/Client/Request.zig
@@ -0,0 +1,482 @@
+const std = @import("std");
+const http = std.http;
+const Uri = std.Uri;
+const mem = std.mem;
+const assert = std.debug.assert;
+
+const Client = @import("../Client.zig");
+const Connection = Client.Connection;
+const ConnectionNode = Client.ConnectionPool.Node;
+const Response = @import("Response.zig");
+
+const Request = @This();
+
+const read_buffer_size = 8192;
+const ReadBufferIndex = std.math.IntFittingRange(0, read_buffer_size);
+
+uri: Uri,
+client: *Client,
+connection: *ConnectionNode,
+response: Response,
+/// These are stored in Request so that they are available when following
+/// redirects.
+headers: Headers,
+
+redirects_left: u32,
+handle_redirects: bool,
+compression_init: bool,
+
+/// Used as a allocator for resolving redirects locations.
+arena: std.heap.ArenaAllocator,
+
+/// Read buffer for the connection. This is used to pull in large amounts of data from the connection even if the user asks for a small amount. This can probably be removed with careful planning.
+read_buffer: [read_buffer_size]u8 = undefined,
+read_buffer_start: ReadBufferIndex = 0,
+read_buffer_len: ReadBufferIndex = 0,
+
+pub const RequestTransfer = union(enum) {
+ content_length: u64,
+ chunked: void,
+ none: void,
+};
+
+pub const Headers = struct {
+ version: http.Version = .@"HTTP/1.1",
+ method: http.Method = .GET,
+ user_agent: []const u8 = "zig (std.http)",
+ connection: http.Connection = .keep_alive,
+ transfer_encoding: RequestTransfer = .none,
+
+ custom: []const http.CustomHeader = &[_]http.CustomHeader{},
+};
+
+pub const Options = struct {
+ handle_redirects: bool = true,
+ max_redirects: u32 = 3,
+ header_strategy: HeaderStrategy = .{ .dynamic = 16 * 1024 },
+
+ pub const HeaderStrategy = union(enum) {
+ /// In this case, the client's Allocator will be used to store the
+ /// entire HTTP header. This value is the maximum total size of
+ /// HTTP headers allowed, otherwise
+ /// error.HttpHeadersExceededSizeLimit is returned from read().
+ dynamic: usize,
+ /// This is used to store the entire HTTP header. If the HTTP
+ /// header is too big to fit, `error.HttpHeadersExceededSizeLimit`
+ /// is returned from read(). When this is used, `error.OutOfMemory`
+ /// cannot be returned from `read()`.
+ static: []u8,
+ };
+};
+
+/// Frees all resources associated with the request.
+pub fn deinit(req: *Request) void {
+ switch (req.response.compression) {
+ .none => {},
+ .deflate => |*deflate| deflate.deinit(),
+ .gzip => |*gzip| gzip.deinit(),
+ .zstd => |*zstd| zstd.deinit(),
+ }
+
+ if (req.response.header_bytes_owned) {
+ req.response.header_bytes.deinit(req.client.allocator);
+ }
+
+ if (!req.response.done) {
+ // If the response wasn't fully read, then we need to close the connection.
+ req.connection.data.closing = true;
+ req.client.connection_pool.release(req.client, req.connection);
+ }
+
+ req.arena.deinit();
+ req.* = undefined;
+}
+
+pub const ReadRawError = Connection.ReadError || Uri.ParseError || Client.RequestError || error{
+ UnexpectedEndOfStream,
+ TooManyHttpRedirects,
+ HttpRedirectMissingLocation,
+ HttpHeadersInvalid,
+};
+
+pub const ReaderRaw = std.io.Reader(*Request, ReadRawError, readRaw);
+
+/// Read from the underlying stream, without decompressing or parsing the headers. Must be called
+/// after waitForCompleteHead() has returned successfully.
+pub fn readRaw(req: *Request, buffer: []u8) ReadRawError!usize {
+ assert(req.response.state.isContent());
+
+ var index: usize = 0;
+ while (index == 0) {
+ const amt = try req.readRawAdvanced(buffer[index..]);
+ if (amt == 0 and req.response.done) break;
+ index += amt;
+ }
+
+ return index;
+}
+
+fn checkForCompleteHead(req: *Request, buffer: []u8) !usize {
+ switch (req.response.state) {
+ .invalid => unreachable,
+ .start, .seen_r, .seen_rn, .seen_rnr => {},
+ else => return 0, // No more headers to read.
+ }
+
+ const i = req.response.findHeadersEnd(buffer[0..]);
+ if (req.response.state == .invalid) return error.HttpHeadersInvalid;
+
+ const headers_data = buffer[0..i];
+ if (req.response.header_bytes.items.len + headers_data.len > req.response.max_header_bytes) {
+ return error.HttpHeadersExceededSizeLimit;
+ }
+ try req.response.header_bytes.appendSlice(req.client.allocator, headers_data);
+
+ if (req.response.state == .finished) {
+ req.response.headers = try Response.Headers.parse(req.response.header_bytes.items);
+
+ if (req.response.headers.upgrade) |_| {
+ req.connection.data.closing = false;
+ req.response.done = true;
+ return i;
+ }
+
+ if (req.response.headers.connection == .keep_alive) {
+ req.connection.data.closing = false;
+ } else {
+ req.connection.data.closing = true;
+ }
+
+ if (req.response.headers.transfer_encoding) |transfer_encoding| {
+ switch (transfer_encoding) {
+ .chunked => {
+ req.response.next_chunk_length = 0;
+ req.response.state = .chunk_size;
+ },
+ }
+ } else if (req.response.headers.content_length) |content_length| {
+ req.response.next_chunk_length = content_length;
+
+ if (content_length == 0) req.response.done = true;
+ } else {
+ req.response.done = true;
+ }
+
+ return i;
+ }
+
+ return 0;
+}
+
+pub const WaitForCompleteHeadError = ReadRawError || error{
+ UnexpectedEndOfStream,
+
+ HttpHeadersExceededSizeLimit,
+ ShortHttpStatusLine,
+ BadHttpVersion,
+ HttpHeaderContinuationsUnsupported,
+ HttpTransferEncodingUnsupported,
+ HttpConnectionHeaderUnsupported,
+};
+
+/// Reads a complete response head. Any leftover data is stored in the request. This function is idempotent.
+pub fn waitForCompleteHead(req: *Request) WaitForCompleteHeadError!void {
+ if (req.response.state.isContent()) return;
+
+ while (true) {
+ const nread = try req.connection.data.read(req.read_buffer[0..]);
+ const amt = try checkForCompleteHead(req, req.read_buffer[0..nread]);
+
+ if (amt != 0) {
+ req.read_buffer_start = @intCast(ReadBufferIndex, amt);
+ req.read_buffer_len = @intCast(ReadBufferIndex, nread);
+ return;
+ } else if (nread == 0) {
+ return error.UnexpectedEndOfStream;
+ }
+ }
+}
+
+/// This one can return 0 without meaning EOF.
+fn readRawAdvanced(req: *Request, buffer: []u8) !usize {
+ assert(req.response.state.isContent());
+ if (req.response.done) return 0;
+
+ // var in: []const u8 = undefined;
+ if (req.read_buffer_start == req.read_buffer_len) {
+ const nread = try req.connection.data.read(req.read_buffer[0..]);
+ if (nread == 0) return error.UnexpectedEndOfStream;
+
+ req.read_buffer_start = 0;
+ req.read_buffer_len = @intCast(ReadBufferIndex, nread);
+ }
+
+ var out_index: usize = 0;
+ while (true) {
+ switch (req.response.state) {
+ .invalid, .start, .seen_r, .seen_rn, .seen_rnr => unreachable,
+ .finished => {
+ // TODO https://github.com/ziglang/zig/issues/14039
+ const buf_avail = req.read_buffer_len - req.read_buffer_start;
+ const data_avail = req.response.next_chunk_length;
+ const out_avail = buffer.len;
+
+ if (req.handle_redirects and req.response.headers.status.class() == .redirect) {
+ const can_read = @intCast(usize, @min(buf_avail, data_avail));
+ req.response.next_chunk_length -= can_read;
+
+ if (req.response.next_chunk_length == 0) {
+ req.client.connection_pool.release(req.client, req.connection);
+ req.connection = undefined;
+ req.response.done = true;
+ }
+
+ return 0; // skip over as much data as possible
+ }
+
+ const can_read = @intCast(usize, @min(@min(buf_avail, data_avail), out_avail));
+ req.response.next_chunk_length -= can_read;
+
+ mem.copy(u8, buffer[0..], req.read_buffer[req.read_buffer_start..][0..can_read]);
+ req.read_buffer_start += @intCast(ReadBufferIndex, can_read);
+
+ if (req.response.next_chunk_length == 0) {
+ req.client.connection_pool.release(req.client, req.connection);
+ req.connection = undefined;
+ req.response.done = true;
+ }
+
+ return can_read;
+ },
+ .chunk_size_prefix_r => switch (req.read_buffer_len - req.read_buffer_start) {
+ 0 => return out_index,
+ 1 => switch (req.read_buffer[req.read_buffer_start]) {
+ '\r' => {
+ req.response.state = .chunk_size_prefix_n;
+ return out_index;
+ },
+ else => {
+ req.response.state = .invalid;
+ return error.HttpHeadersInvalid;
+ },
+ },
+ else => switch (int16(req.read_buffer[req.read_buffer_start..][0..2])) {
+ int16("\r\n") => {
+ req.read_buffer_start += 2;
+ req.response.state = .chunk_size;
+ continue;
+ },
+ else => {
+ req.response.state = .invalid;
+ return error.HttpHeadersInvalid;
+ },
+ },
+ },
+ .chunk_size_prefix_n => switch (req.read_buffer_len - req.read_buffer_start) {
+ 0 => return out_index,
+ else => switch (req.read_buffer[req.read_buffer_start]) {
+ '\n' => {
+ req.read_buffer_start += 1;
+ req.response.state = .chunk_size;
+ continue;
+ },
+ else => {
+ req.response.state = .invalid;
+ return error.HttpHeadersInvalid;
+ },
+ },
+ },
+ .chunk_size, .chunk_r => {
+ const i = req.response.findChunkedLen(req.read_buffer[req.read_buffer_start..req.read_buffer_len]);
+ switch (req.response.state) {
+ .invalid => return error.HttpHeadersInvalid,
+ .chunk_data => {
+ if (req.response.next_chunk_length == 0) {
+ req.response.done = true;
+ req.client.connection_pool.release(req.client, req.connection);
+ req.connection = undefined;
+
+ return out_index;
+ }
+
+ req.read_buffer_start += @intCast(ReadBufferIndex, i);
+ continue;
+ },
+ .chunk_size => return out_index,
+ else => unreachable,
+ }
+ },
+ .chunk_data => {
+ // TODO https://github.com/ziglang/zig/issues/14039
+ const buf_avail = req.read_buffer_len - req.read_buffer_start;
+ const data_avail = req.response.next_chunk_length;
+ const out_avail = buffer.len - out_index;
+
+ if (req.handle_redirects and req.response.headers.status.class() == .redirect) {
+ const can_read = @intCast(usize, @min(buf_avail, data_avail));
+ req.response.next_chunk_length -= can_read;
+
+ if (req.response.next_chunk_length == 0) {
+ req.client.connection_pool.release(req.client, req.connection);
+ req.connection = undefined;
+ req.response.done = true;
+ continue;
+ }
+
+ return 0; // skip over as much data as possible
+ }
+
+ const can_read = @intCast(usize, @min(@min(buf_avail, data_avail), out_avail));
+ req.response.next_chunk_length -= can_read;
+
+ mem.copy(u8, buffer[out_index..], req.read_buffer[req.read_buffer_start..][0..can_read]);
+ req.read_buffer_start += @intCast(ReadBufferIndex, can_read);
+ out_index += can_read;
+
+ if (req.response.next_chunk_length == 0) {
+ req.response.state = .chunk_size_prefix_r;
+
+ continue;
+ }
+
+ return out_index;
+ },
+ }
+ }
+}
+
+pub const ReadError = Client.DeflateDecompressor.Error || Client.GzipDecompressor.Error || Client.ZstdDecompressor.Error || WaitForCompleteHeadError || error{ BadHeader, InvalidCompression, StreamTooLong, InvalidWindowSize, CompressionNotSupported };
+
+pub const Reader = std.io.Reader(*Request, ReadError, read);
+
+pub fn reader(req: *Request) Reader {
+ return .{ .context = req };
+}
+
+pub fn read(req: *Request, buffer: []u8) ReadError!usize {
+ while (true) {
+ if (!req.response.state.isContent()) try req.waitForCompleteHead();
+
+ if (req.handle_redirects and req.response.headers.status.class() == .redirect) {
+ assert(try req.readRaw(buffer) == 0);
+
+ if (req.redirects_left == 0) return error.TooManyHttpRedirects;
+
+ const location = req.response.headers.location orelse
+ return error.HttpRedirectMissingLocation;
+ const new_url = Uri.parse(location) catch try Uri.parseWithoutScheme(location);
+
+ var new_arena = std.heap.ArenaAllocator.init(req.client.allocator);
+ const resolved_url = try req.uri.resolve(new_url, false, new_arena.allocator());
+ errdefer new_arena.deinit();
+
+ req.arena.deinit();
+ req.arena = new_arena;
+
+ const new_req = try req.client.request(resolved_url, req.headers, .{
+ .max_redirects = req.redirects_left - 1,
+ .header_strategy = if (req.response.header_bytes_owned) .{
+ .dynamic = req.response.max_header_bytes,
+ } else .{
+ .static = req.response.header_bytes.unusedCapacitySlice(),
+ },
+ });
+ req.deinit();
+ req.* = new_req;
+ } else {
+ break;
+ }
+ }
+
+ if (req.response.compression == .none) {
+ if (req.response.headers.transfer_compression) |compression| {
+ switch (compression) {
+ .compress => return error.CompressionNotSupported,
+ .deflate => req.response.compression = .{
+ .deflate = try std.compress.zlib.zlibStream(req.client.allocator, ReaderRaw{ .context = req }),
+ },
+ .gzip => req.response.compression = .{
+ .gzip = try std.compress.gzip.decompress(req.client.allocator, ReaderRaw{ .context = req }),
+ },
+ .zstd => req.response.compression = .{
+ .zstd = std.compress.zstd.decompressStream(req.client.allocator, ReaderRaw{ .context = req }),
+ },
+ }
+ }
+ }
+
+ return switch (req.response.compression) {
+ .deflate => |*deflate| try deflate.read(buffer),
+ .gzip => |*gzip| try gzip.read(buffer),
+ .zstd => |*zstd| try zstd.read(buffer),
+ else => try req.readRaw(buffer),
+ };
+}
+
+pub fn readAll(req: *Request, buffer: []u8) !usize {
+ var index: usize = 0;
+ while (index < buffer.len) {
+ const amt = try read(req, buffer[index..]);
+ if (amt == 0) break;
+ index += amt;
+ }
+ return index;
+}
+
+pub const WriteError = Connection.WriteError || error{MessageTooLong};
+
+pub const Writer = std.io.Writer(*Request, WriteError, write);
+
+pub fn writer(req: *Request) Writer {
+ return .{ .context = req };
+}
+
+/// Write `bytes` to the server. The `transfer_encoding` request header determines how data will be sent.
+pub fn write(req: *Request, bytes: []const u8) !usize {
+ switch (req.headers.transfer_encoding) {
+ .chunked => {
+ try req.connection.data.writer().print("{x}\r\n", .{bytes.len});
+ try req.connection.data.writeAll(bytes);
+ try req.connection.data.writeAll("\r\n");
+
+ return bytes.len;
+ },
+ .content_length => |*len| {
+ if (len.* < bytes.len) return error.MessageTooLong;
+
+ const amt = try req.connection.data.write(bytes);
+ len.* -= amt;
+ return amt;
+ },
+ .none => return error.NotWriteable,
+ }
+}
+
+/// Finish the body of a request. This notifies the server that you have no more data to send.
+pub fn finish(req: *Request) !void {
+ switch (req.headers.transfer_encoding) {
+ .chunked => try req.connection.data.writeAll("0\r\n"),
+ .content_length => |len| if (len != 0) return error.MessageNotCompleted,
+ .none => {},
+ }
+}
+
+inline fn int16(array: *const [2]u8) u16 {
+ return @bitCast(u16, array.*);
+}
+
+inline fn int32(array: *const [4]u8) u32 {
+ return @bitCast(u32, array.*);
+}
+
+inline fn int64(array: *const [8]u8) u64 {
+ return @bitCast(u64, array.*);
+}
+
+test {
+ const builtin = @import("builtin");
+
+ if (builtin.os.tag == .wasi) return error.SkipZigTest;
+
+ _ = Response;
+}
diff --git a/lib/std/http/Client/Response.zig b/lib/std/http/Client/Response.zig
new file mode 100644
index 0000000000..8b2a9a4918
--- /dev/null
+++ b/lib/std/http/Client/Response.zig
@@ -0,0 +1,509 @@
+const std = @import("std");
+const http = std.http;
+const mem = std.mem;
+const testing = std.testing;
+const assert = std.debug.assert;
+
+const Client = @import("../Client.zig");
+const Response = @This();
+
+headers: Headers,
+state: State,
+header_bytes_owned: bool,
+/// This could either be a fixed buffer provided by the API user or it
+/// could be our own array list.
+header_bytes: std.ArrayListUnmanaged(u8),
+max_header_bytes: usize,
+next_chunk_length: u64,
+done: bool = false,
+
+compression: union(enum) {
+ deflate: Client.DeflateDecompressor,
+ gzip: Client.GzipDecompressor,
+ zstd: Client.ZstdDecompressor,
+ none: void,
+} = .none,
+
+pub const Headers = struct {
+ status: http.Status,
+ version: http.Version,
+ location: ?[]const u8 = null,
+ content_length: ?u64 = null,
+ transfer_encoding: ?http.TransferEncoding = null,
+ transfer_compression: ?http.ContentEncoding = null,
+ connection: http.Connection = .close,
+ upgrade: ?[]const u8 = null,
+
+ number_of_headers: usize = 0,
+
+ pub fn parse(bytes: []const u8) !Headers {
+ var it = mem.split(u8, bytes[0 .. bytes.len - 4], "\r\n");
+
+ const first_line = it.first();
+ if (first_line.len < 12)
+ return error.ShortHttpStatusLine;
+
+ const version: http.Version = switch (int64(first_line[0..8])) {
+ int64("HTTP/1.0") => .@"HTTP/1.0",
+ int64("HTTP/1.1") => .@"HTTP/1.1",
+ else => return error.BadHttpVersion,
+ };
+ if (first_line[8] != ' ') return error.HttpHeadersInvalid;
+ const status = @intToEnum(http.Status, parseInt3(first_line[9..12].*));
+
+ var headers: Headers = .{
+ .version = version,
+ .status = status,
+ };
+
+ while (it.next()) |line| {
+ headers.number_of_headers += 1;
+
+ if (line.len == 0) return error.HttpHeadersInvalid;
+ switch (line[0]) {
+ ' ', '\t' => return error.HttpHeaderContinuationsUnsupported,
+ else => {},
+ }
+ var line_it = mem.split(u8, line, ": ");
+ const header_name = line_it.first();
+ const header_value = line_it.rest();
+ if (std.ascii.eqlIgnoreCase(header_name, "location")) {
+ if (headers.location != null) return error.HttpHeadersInvalid;
+ headers.location = header_value;
+ } else if (std.ascii.eqlIgnoreCase(header_name, "content-length")) {
+ if (headers.content_length != null) return error.HttpHeadersInvalid;
+ headers.content_length = try std.fmt.parseInt(u64, header_value, 10);
+ } else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
+ if (headers.transfer_encoding != null or headers.transfer_compression != null) return error.HttpHeadersInvalid;
+
+ // Transfer-Encoding: second, first
+ // Transfer-Encoding: deflate, chunked
+ var iter = std.mem.splitBackwards(u8, header_value, ",");
+
+ if (iter.next()) |first| {
+ const trimmed = std.mem.trim(u8, first, " ");
+
+ if (std.meta.stringToEnum(http.TransferEncoding, trimmed)) |te| {
+ headers.transfer_encoding = te;
+ } else if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
+ headers.transfer_compression = ce;
+ } else {
+ return error.HttpTransferEncodingUnsupported;
+ }
+ }
+
+ if (iter.next()) |second| {
+ if (headers.transfer_compression != null) return error.HttpTransferEncodingUnsupported;
+
+ const trimmed = std.mem.trim(u8, second, " ");
+
+ if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
+ headers.transfer_compression = ce;
+ } else {
+ return error.HttpTransferEncodingUnsupported;
+ }
+ }
+
+ if (iter.next()) |_| return error.HttpTransferEncodingUnsupported;
+ } else if (std.ascii.eqlIgnoreCase(header_name, "content-encoding")) {
+ if (headers.transfer_compression != null) return error.HttpHeadersInvalid;
+
+ const trimmed = std.mem.trim(u8, header_value, " ");
+
+ if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
+ headers.transfer_compression = ce;
+ } else {
+ return error.HttpTransferEncodingUnsupported;
+ }
+ } else if (std.ascii.eqlIgnoreCase(header_name, "connection")) {
+ if (std.ascii.eqlIgnoreCase(header_value, "keep-alive")) {
+ headers.connection = .keep_alive;
+ } else if (std.ascii.eqlIgnoreCase(header_value, "close")) {
+ headers.connection = .close;
+ } else {
+ return error.HttpConnectionHeaderUnsupported;
+ }
+ } else if (std.ascii.eqlIgnoreCase(header_name, "upgrade")) {
+ headers.upgrade = header_value;
+ }
+ }
+
+ return headers;
+ }
+
+ test "parse headers" {
+ const example =
+ "HTTP/1.1 301 Moved Permanently\r\n" ++
+ "Location: https://www.example.com/\r\n" ++
+ "Content-Type: text/html; charset=UTF-8\r\n" ++
+ "Content-Length: 220\r\n\r\n";
+ const parsed = try Headers.parse(example);
+ try testing.expectEqual(http.Version.@"HTTP/1.1", parsed.version);
+ try testing.expectEqual(http.Status.moved_permanently, parsed.status);
+ try testing.expectEqualStrings("https://www.example.com/", parsed.location orelse
+ return error.TestFailed);
+ try testing.expectEqual(@as(?u64, 220), parsed.content_length);
+ }
+
+ test "header continuation" {
+ const example =
+ "HTTP/1.0 200 OK\r\n" ++
+ "Content-Type: text/html;\r\n charset=UTF-8\r\n" ++
+ "Content-Length: 220\r\n\r\n";
+ try testing.expectError(
+ error.HttpHeaderContinuationsUnsupported,
+ Headers.parse(example),
+ );
+ }
+
+ test "extra content length" {
+ const example =
+ "HTTP/1.0 200 OK\r\n" ++
+ "Content-Length: 220\r\n" ++
+ "Content-Type: text/html; charset=UTF-8\r\n" ++
+ "content-length: 220\r\n\r\n";
+ try testing.expectError(
+ error.HttpHeadersInvalid,
+ Headers.parse(example),
+ );
+ }
+};
+
+inline fn int16(array: *const [2]u8) u16 {
+ return @bitCast(u16, array.*);
+}
+
+inline fn int32(array: *const [4]u8) u32 {
+ return @bitCast(u32, array.*);
+}
+
+inline fn int64(array: *const [8]u8) u64 {
+ return @bitCast(u64, array.*);
+}
+
+pub const State = enum {
+ /// Begin header parsing states.
+ invalid,
+ start,
+ seen_r,
+ seen_rn,
+ seen_rnr,
+ finished,
+ /// Begin transfer-encoding: chunked parsing states.
+ chunk_size_prefix_r,
+ chunk_size_prefix_n,
+ chunk_size,
+ chunk_r,
+ chunk_data,
+
+ pub fn isContent(self: State) bool {
+ return switch (self) {
+ .invalid, .start, .seen_r, .seen_rn, .seen_rnr => false,
+ .finished, .chunk_size_prefix_r, .chunk_size_prefix_n, .chunk_size, .chunk_r, .chunk_data => true,
+ };
+ }
+};
+
+pub fn initDynamic(max: usize) Response {
+ return .{
+ .state = .start,
+ .headers = undefined,
+ .header_bytes = .{},
+ .max_header_bytes = max,
+ .header_bytes_owned = true,
+ .next_chunk_length = undefined,
+ };
+}
+
+pub fn initStatic(buf: []u8) Response {
+ return .{
+ .state = .start,
+ .headers = undefined,
+ .header_bytes = .{ .items = buf[0..0], .capacity = buf.len },
+ .max_header_bytes = buf.len,
+ .header_bytes_owned = false,
+ .next_chunk_length = undefined,
+ };
+}
+
+/// Returns how many bytes are part of HTTP headers. Always less than or
+/// equal to bytes.len. If the amount returned is less than bytes.len, it
+/// means the headers ended and the first byte after the double \r\n\r\n is
+/// located at `bytes[result]`.
+pub fn findHeadersEnd(r: *Response, bytes: []const u8) usize {
+ var index: usize = 0;
+
+ // TODO: https://github.com/ziglang/zig/issues/8220
+ state: while (true) {
+ switch (r.state) {
+ .invalid => unreachable,
+ .finished => unreachable,
+ .start => while (true) {
+ switch (bytes.len - index) {
+ 0 => return index,
+ 1 => {
+ if (bytes[index] == '\r')
+ r.state = .seen_r;
+ return index + 1;
+ },
+ 2 => {
+ if (int16(bytes[index..][0..2]) == int16("\r\n")) {
+ r.state = .seen_rn;
+ } else if (bytes[index + 1] == '\r') {
+ r.state = .seen_r;
+ }
+ return index + 2;
+ },
+ 3 => {
+ if (int16(bytes[index..][0..2]) == int16("\r\n") and
+ bytes[index + 2] == '\r')
+ {
+ r.state = .seen_rnr;
+ } else if (int16(bytes[index + 1 ..][0..2]) == int16("\r\n")) {
+ r.state = .seen_rn;
+ } else if (bytes[index + 2] == '\r') {
+ r.state = .seen_r;
+ }
+ return index + 3;
+ },
+ 4...15 => {
+ if (int32(bytes[index..][0..4]) == int32("\r\n\r\n")) {
+ r.state = .finished;
+ return index + 4;
+ } else if (int16(bytes[index + 1 ..][0..2]) == int16("\r\n") and
+ bytes[index + 3] == '\r')
+ {
+ r.state = .seen_rnr;
+ index += 4;
+ continue :state;
+ } else if (int16(bytes[index + 2 ..][0..2]) == int16("\r\n")) {
+ r.state = .seen_rn;
+ index += 4;
+ continue :state;
+ } else if (bytes[index + 3] == '\r') {
+ r.state = .seen_r;
+ index += 4;
+ continue :state;
+ }
+ index += 4;
+ continue;
+ },
+ else => {
+ const chunk = bytes[index..][0..16];
+ const v: @Vector(16, u8) = chunk.*;
+ const matches_r = v == @splat(16, @as(u8, '\r'));
+ const iota = std.simd.iota(u8, 16);
+ const default = @splat(16, @as(u8, 16));
+ const sub_index = @reduce(.Min, @select(u8, matches_r, iota, default));
+ switch (sub_index) {
+ 0...12 => {
+ index += sub_index + 4;
+ if (int32(chunk[sub_index..][0..4]) == int32("\r\n\r\n")) {
+ r.state = .finished;
+ return index;
+ }
+ continue;
+ },
+ 13 => {
+ index += 16;
+ if (int16(chunk[14..][0..2]) == int16("\n\r")) {
+ r.state = .seen_rnr;
+ continue :state;
+ }
+ continue;
+ },
+ 14 => {
+ index += 16;
+ if (chunk[15] == '\n') {
+ r.state = .seen_rn;
+ continue :state;
+ }
+ continue;
+ },
+ 15 => {
+ r.state = .seen_r;
+ index += 16;
+ continue :state;
+ },
+ 16 => {
+ index += 16;
+ continue;
+ },
+ else => unreachable,
+ }
+ },
+ }
+ },
+
+ .seen_r => switch (bytes.len - index) {
+ 0 => return index,
+ 1 => {
+ switch (bytes[index]) {
+ '\n' => r.state = .seen_rn,
+ '\r' => r.state = .seen_r,
+ else => r.state = .start,
+ }
+ return index + 1;
+ },
+ 2 => {
+ if (int16(bytes[index..][0..2]) == int16("\n\r")) {
+ r.state = .seen_rnr;
+ return index + 2;
+ }
+ r.state = .start;
+ return index + 2;
+ },
+ else => {
+ if (int16(bytes[index..][0..2]) == int16("\n\r") and
+ bytes[index + 2] == '\n')
+ {
+ r.state = .finished;
+ return index + 3;
+ }
+ index += 3;
+ r.state = .start;
+ continue :state;
+ },
+ },
+ .seen_rn => switch (bytes.len - index) {
+ 0 => return index,
+ 1 => {
+ switch (bytes[index]) {
+ '\r' => r.state = .seen_rnr,
+ else => r.state = .start,
+ }
+ return index + 1;
+ },
+ else => {
+ if (int16(bytes[index..][0..2]) == int16("\r\n")) {
+ r.state = .finished;
+ return index + 2;
+ }
+ index += 2;
+ r.state = .start;
+ continue :state;
+ },
+ },
+ .seen_rnr => switch (bytes.len - index) {
+ 0 => return index,
+ else => {
+ if (bytes[index] == '\n') {
+ r.state = .finished;
+ return index + 1;
+ }
+ index += 1;
+ r.state = .start;
+ continue :state;
+ },
+ },
+ .chunk_size_prefix_r => unreachable,
+ .chunk_size_prefix_n => unreachable,
+ .chunk_size => unreachable,
+ .chunk_r => unreachable,
+ .chunk_data => unreachable,
+ }
+
+ return index;
+ }
+}
+
+pub fn findChunkedLen(r: *Response, bytes: []const u8) usize {
+ var i: usize = 0;
+ if (r.state == .chunk_size) {
+ while (i < bytes.len) : (i += 1) {
+ const digit = switch (bytes[i]) {
+ '0'...'9' => |b| b - '0',
+ 'A'...'Z' => |b| b - 'A' + 10,
+ 'a'...'z' => |b| b - 'a' + 10,
+ '\r' => {
+ r.state = .chunk_r;
+ i += 1;
+ break;
+ },
+ else => {
+ r.state = .invalid;
+ return i;
+ },
+ };
+ const mul = @mulWithOverflow(r.next_chunk_length, 16);
+ if (mul[1] != 0) {
+ r.state = .invalid;
+ return i;
+ }
+ const add = @addWithOverflow(mul[0], digit);
+ if (add[1] != 0) {
+ r.state = .invalid;
+ return i;
+ }
+ r.next_chunk_length = add[0];
+ } else {
+ return i;
+ }
+ }
+ assert(r.state == .chunk_r);
+ if (i == bytes.len) return i;
+
+ if (bytes[i] == '\n') {
+ r.state = .chunk_data;
+ return i + 1;
+ } else {
+ r.state = .invalid;
+ return i;
+ }
+}
+
+fn parseInt3(nnn: @Vector(3, u8)) u10 {
+ const zero: @Vector(3, u8) = .{ '0', '0', '0' };
+ const mmm: @Vector(3, u10) = .{ 100, 10, 1 };
+ return @reduce(.Add, @as(@Vector(3, u10), nnn -% zero) *% mmm);
+}
+
+test parseInt3 {
+ const expectEqual = std.testing.expectEqual;
+ try expectEqual(@as(u10, 0), parseInt3("000".*));
+ try expectEqual(@as(u10, 418), parseInt3("418".*));
+ try expectEqual(@as(u10, 999), parseInt3("999".*));
+}
+
+test "find headers end basic" {
+ var buffer: [1]u8 = undefined;
+ var r = Response.initStatic(&buffer);
+ try testing.expectEqual(@as(usize, 10), r.findHeadersEnd("HTTP/1.1 4"));
+ try testing.expectEqual(@as(usize, 2), r.findHeadersEnd("18"));
+ try testing.expectEqual(@as(usize, 8), r.findHeadersEnd(" lol\r\n\r\nblah blah"));
+}
+
+test "find headers end vectorized" {
+ var buffer: [1]u8 = undefined;
+ var r = Response.initStatic(&buffer);
+ const example =
+ "HTTP/1.1 301 Moved Permanently\r\n" ++
+ "Location: https://www.example.com/\r\n" ++
+ "Content-Type: text/html; charset=UTF-8\r\n" ++
+ "Content-Length: 220\r\n" ++
+ "\r\ncontent";
+ try testing.expectEqual(@as(usize, 131), r.findHeadersEnd(example));
+}
+
+test "find headers end bug" {
+ var buffer: [1]u8 = undefined;
+ var r = Response.initStatic(&buffer);
+ const trail = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
+ const example =
+ "HTTP/1.1 200 OK\r\n" ++
+ "Access-Control-Allow-Origin: https://render.githubusercontent.com\r\n" ++
+ "content-disposition: attachment; filename=zig-0.10.0.tar.gz\r\n" ++
+ "Content-Security-Policy: default-src 'none'; style-src 'unsafe-inline'; sandbox\r\n" ++
+ "Content-Type: application/x-gzip\r\n" ++
+ "ETag: \"bfae0af6b01c7c0d89eb667cb5f0e65265968aeebda2689177e6b26acd3155ca\"\r\n" ++
+ "Strict-Transport-Security: max-age=31536000\r\n" ++
+ "Vary: Authorization,Accept-Encoding,Origin\r\n" ++
+ "X-Content-Type-Options: nosniff\r\n" ++
+ "X-Frame-Options: deny\r\n" ++
+ "X-XSS-Protection: 1; mode=block\r\n" ++
+ "Date: Fri, 06 Jan 2023 22:26:22 GMT\r\n" ++
+ "Transfer-Encoding: chunked\r\n" ++
+ "X-GitHub-Request-Id: 89C6:17E9:A7C9E:124B51:63B8A00E\r\n" ++
+ "connection: close\r\n\r\n" ++ trail;
+ try testing.expectEqual(@as(usize, example.len - trail.len), r.findHeadersEnd(example));
+}
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index b7725b9ae9..4e4e7c489e 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -1674,6 +1674,7 @@ pub const Mutable = struct {
/// If a is positive, this passes through to truncate.
/// If a is negative, then r is set to positive with the bit pattern ~(a - 1).
+ /// r may alias a.
///
/// Asserts `r` has enough storage to store the result.
/// The upper bound is `calcTwosCompLimbCount(a.len)`.
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index b9b5fb1004..a486713e1c 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -196,13 +196,8 @@ test "Allocator.resize" {
/// dest.len must be >= source.len.
/// If the slices overlap, dest.ptr must be <= src.ptr.
pub fn copy(comptime T: type, dest: []T, source: []const T) void {
- // TODO instead of manually doing this check for the whole array
- // and turning off runtime safety, the compiler should detect loops like
- // this and automatically omit safety checks for loops
- @setRuntimeSafety(false);
- assert(dest.len >= source.len);
- for (source, 0..) |s, i|
- dest[i] = s;
+ for (dest[0..source.len], source) |*d, s|
+ d.* = s;
}
/// Copy all of source into dest at position 0.
@@ -611,8 +606,8 @@ test "lessThan" {
pub fn eql(comptime T: type, a: []const T, b: []const T) bool {
if (a.len != b.len) return false;
if (a.ptr == b.ptr) return true;
- for (a, 0..) |item, index| {
- if (b[index] != item) return false;
+ for (a, b) |a_elem, b_elem| {
+ if (a_elem != b_elem) return false;
}
return true;
}
diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig
index 6965205b1e..f97e42cc89 100644
--- a/lib/std/multi_array_list.zig
+++ b/lib/std/multi_array_list.zig
@@ -68,6 +68,15 @@ pub fn MultiArrayList(comptime S: type) type {
other.deinit(gpa);
self.* = undefined;
}
+
+ /// This function is used in the debugger pretty formatters in tools/ to fetch the
+ /// child field order and entry type to facilitate fancy debug printing for this type.
+ fn dbHelper(self: *Slice, child: *S, field: *Field, entry: *Entry) void {
+ _ = self;
+ _ = child;
+ _ = field;
+ _ = entry;
+ }
};
const Self = @This();
@@ -463,16 +472,18 @@ pub fn MultiArrayList(comptime S: type) type {
} });
};
/// This function is used in the debugger pretty formatters in tools/ to fetch the
- /// child type to facilitate fancy debug printing for this type.
- fn dbHelper(self: *Self, child: *S, entry: *Entry) void {
+ /// child field order and entry type to facilitate fancy debug printing for this type.
+ fn dbHelper(self: *Self, child: *S, field: *Field, entry: *Entry) void {
_ = self;
_ = child;
+ _ = field;
_ = entry;
}
comptime {
if (builtin.mode == .Debug) {
_ = dbHelper;
+ _ = Slice.dbHelper;
}
}
};
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 50a0f8b9d7..7222433fd5 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -702,8 +702,10 @@ pub const AddressList = struct {
}
};
+pub const TcpConnectToHostError = GetAddressListError || TcpConnectToAddressError;
+
/// All memory allocated with `allocator` will be freed before this function returns.
-pub fn tcpConnectToHost(allocator: mem.Allocator, name: []const u8, port: u16) !Stream {
+pub fn tcpConnectToHost(allocator: mem.Allocator, name: []const u8, port: u16) TcpConnectToHostError!Stream {
const list = try getAddressList(allocator, name, port);
defer list.deinit();
@@ -720,7 +722,9 @@ pub fn tcpConnectToHost(allocator: mem.Allocator, name: []const u8, port: u16) !
return std.os.ConnectError.ConnectionRefused;
}
-pub fn tcpConnectToAddress(address: Address) !Stream {
+pub const TcpConnectToAddressError = std.os.SocketError || std.os.ConnectError;
+
+pub fn tcpConnectToAddress(address: Address) TcpConnectToAddressError!Stream {
const nonblock = if (std.io.is_async) os.SOCK.NONBLOCK else 0;
const sock_flags = os.SOCK.STREAM | nonblock |
(if (builtin.target.os.tag == .windows) 0 else os.SOCK.CLOEXEC);
@@ -737,8 +741,32 @@ pub fn tcpConnectToAddress(address: Address) !Stream {
return Stream{ .handle = sockfd };
}
+const GetAddressListError = std.mem.Allocator.Error || std.fs.File.OpenError || std.fs.File.ReadError || std.os.SocketError || std.os.BindError || error{
+ // TODO: break this up into error sets from the various underlying functions
+
+ TemporaryNameServerFailure,
+ NameServerFailure,
+ AddressFamilyNotSupported,
+ UnknownHostName,
+ ServiceUnavailable,
+ Unexpected,
+
+ HostLacksNetworkAddresses,
+
+ InvalidCharacter,
+ InvalidEnd,
+ NonCanonical,
+ Overflow,
+ Incomplete,
+ InvalidIpv4Mapping,
+ InvalidIPAddressFormat,
+
+ InterfaceNotFound,
+ FileSystem,
+};
+
/// Call `AddressList.deinit` on the result.
-pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) !*AddressList {
+pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) GetAddressListError!*AddressList {
const result = blk: {
var arena = std.heap.ArenaAllocator.init(allocator);
errdefer arena.deinit();
diff --git a/lib/std/os.zig b/lib/std/os.zig
index fe664302a7..25cc4e34c4 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -29,7 +29,7 @@ const Allocator = std.mem.Allocator;
const Preopen = std.fs.wasi.Preopen;
const PreopenList = std.fs.wasi.PreopenList;
-pub const darwin = @import("os/darwin.zig");
+pub const darwin = std.c;
pub const dragonfly = std.c;
pub const freebsd = std.c;
pub const haiku = std.c;
@@ -41,8 +41,6 @@ pub const plan9 = @import("os/plan9.zig");
pub const uefi = @import("os/uefi.zig");
pub const wasi = @import("os/wasi.zig");
pub const windows = @import("os/windows.zig");
-pub const posix_spawn = @import("os/posix_spawn.zig");
-pub const ptrace = @import("os/ptrace.zig");
comptime {
assert(@import("std") == std); // std lib tests require --zig-lib-dir
@@ -56,7 +54,6 @@ test {
}
_ = wasi;
_ = windows;
- _ = posix_spawn;
_ = @import("os/test.zig");
}
@@ -253,6 +250,25 @@ pub var argv: [][*:0]u8 = if (builtin.link_libc) undefined else switch (builtin.
else => undefined,
};
+pub const have_sigpipe_support = @hasDecl(@This(), "SIG") and @hasDecl(SIG, "PIPE");
+
+fn noopSigHandler(_: c_int) callconv(.C) void {}
+
+/// On default executed by posix startup code before main(), if SIGPIPE is supported.
+pub fn maybeIgnoreSigpipe() void {
+ if (have_sigpipe_support and !std.options.keep_sigpipe) {
+ const act = Sigaction{
+ // We set handler to a noop function instead of SIG.IGN so we don't leak our
+ // signal disposition to a child process
+ .handler = .{ .handler = noopSigHandler },
+ .mask = empty_sigset,
+ .flags = 0,
+ };
+ sigaction(SIG.PIPE, &act, null) catch |err|
+ std.debug.panic("failed to install noop SIGPIPE handler with '{s}'", .{@errorName(err)});
+ }
+}
+
/// To obtain errno, call this function with the return value of the
/// system function call. For some systems this will obtain the value directly
/// from the return code; for others it will use a thread-local errno variable.
@@ -575,22 +591,12 @@ pub fn abort() noreturn {
raise(SIG.KILL) catch {};
exit(127); // Pid 1 might not be signalled in some containers.
}
- if (builtin.os.tag == .uefi) {
- exit(0); // TODO choose appropriate exit code
+ switch (builtin.os.tag) {
+ .uefi, .wasi, .cuda => @trap(),
+ else => system.abort(),
}
- if (builtin.os.tag == .wasi) {
- exit(1);
- }
- if (builtin.os.tag == .cuda) {
- // TODO: introduce `@trap` instead of abusing https://github.com/ziglang/zig/issues/2291
- @"llvm.trap"();
- }
-
- system.abort();
}
-extern fn @"llvm.trap"() noreturn;
-
pub const RaiseError = UnexpectedError;
pub fn raise(sig: u8) RaiseError!void {
@@ -759,6 +765,9 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
/// This operation is non-atomic on the following systems:
/// * Windows
/// On these systems, the read races with concurrent writes to the same file descriptor.
+///
+/// This function assumes that all vectors, including zero-length vectors, have
+/// a pointer within the address space of the application.
pub fn readv(fd: fd_t, iov: []const iovec) ReadError!usize {
if (builtin.os.tag == .windows) {
// TODO improve this to use ReadFileScatter
@@ -1036,6 +1045,8 @@ pub const WriteError = error{
FileTooBig,
InputOutput,
NoSpaceLeft,
+ DeviceBusy,
+ InvalidArgument,
/// In WASI, this error may occur when the file descriptor does
/// not hold the required rights to write to it.
@@ -1122,7 +1133,7 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
switch (errno(rc)) {
.SUCCESS => return @intCast(usize, rc),
.INTR => continue,
- .INVAL => unreachable,
+ .INVAL => return error.InvalidArgument,
.FAULT => unreachable,
.AGAIN => return error.WouldBlock,
.BADF => return error.NotOpenForWriting, // can be a race condition.
@@ -1134,6 +1145,7 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
.PERM => return error.AccessDenied,
.PIPE => return error.BrokenPipe,
.CONNRESET => return error.ConnectionResetByPeer,
+ .BUSY => return error.DeviceBusy,
else => |err| return unexpectedErrno(err),
}
}
@@ -1157,6 +1169,9 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
/// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
///
/// If `iov.len` is larger than `IOV_MAX`, a partial write will occur.
+///
+/// This function assumes that all vectors, including zero-length vectors, have
+/// a pointer within the address space of the application.
pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize {
if (builtin.os.tag == .windows) {
// TODO improve this to use WriteFileScatter
@@ -1191,7 +1206,7 @@ pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize {
switch (errno(rc)) {
.SUCCESS => return @intCast(usize, rc),
.INTR => continue,
- .INVAL => unreachable,
+ .INVAL => return error.InvalidArgument,
.FAULT => unreachable,
.AGAIN => return error.WouldBlock,
.BADF => return error.NotOpenForWriting, // Can be a race condition.
@@ -1203,6 +1218,7 @@ pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize {
.PERM => return error.AccessDenied,
.PIPE => return error.BrokenPipe,
.CONNRESET => return error.ConnectionResetByPeer,
+ .BUSY => return error.DeviceBusy,
else => |err| return unexpectedErrno(err),
}
}
@@ -1285,7 +1301,7 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
switch (errno(rc)) {
.SUCCESS => return @intCast(usize, rc),
.INTR => continue,
- .INVAL => unreachable,
+ .INVAL => return error.InvalidArgument,
.FAULT => unreachable,
.AGAIN => return error.WouldBlock,
.BADF => return error.NotOpenForWriting, // Can be a race condition.
@@ -1299,6 +1315,7 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
.NXIO => return error.Unseekable,
.SPIPE => return error.Unseekable,
.OVERFLOW => return error.Unseekable,
+ .BUSY => return error.DeviceBusy,
else => |err| return unexpectedErrno(err),
}
}
@@ -1374,7 +1391,7 @@ pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usiz
switch (errno(rc)) {
.SUCCESS => return @intCast(usize, rc),
.INTR => continue,
- .INVAL => unreachable,
+ .INVAL => return error.InvalidArgument,
.FAULT => unreachable,
.AGAIN => return error.WouldBlock,
.BADF => return error.NotOpenForWriting, // Can be a race condition.
@@ -1388,6 +1405,7 @@ pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usiz
.NXIO => return error.Unseekable,
.SPIPE => return error.Unseekable,
.OVERFLOW => return error.Unseekable,
+ .BUSY => return error.DeviceBusy,
else => |err| return unexpectedErrno(err),
}
}
@@ -3456,7 +3474,7 @@ pub fn bind(sock: socket_t, addr: *const sockaddr, len: socklen_t) BindError!voi
const rc = system.bind(sock, addr, len);
switch (errno(rc)) {
.SUCCESS => return,
- .ACCES => return error.AccessDenied,
+ .ACCES, .PERM => return error.AccessDenied,
.ADDRINUSE => return error.AddressInUse,
.BADF => unreachable, // always a race condition if this error is returned
.INVAL => unreachable, // invalid parameters
@@ -3983,13 +4001,32 @@ pub const WaitPidResult = struct {
};
/// Use this version of the `waitpid` wrapper if you spawned your child process using explicit
-/// `fork` and `execve` method. If you spawned your child process using `posix_spawn` method,
-/// use `std.os.posix_spawn.waitpid` instead.
+/// `fork` and `execve` method.
pub fn waitpid(pid: pid_t, flags: u32) WaitPidResult {
const Status = if (builtin.link_libc) c_int else u32;
var status: Status = undefined;
+ const coerced_flags = if (builtin.link_libc) @intCast(c_int, flags) else flags;
while (true) {
- const rc = system.waitpid(pid, &status, if (builtin.link_libc) @intCast(c_int, flags) else flags);
+ const rc = system.waitpid(pid, &status, coerced_flags);
+ switch (errno(rc)) {
+ .SUCCESS => return .{
+ .pid = @intCast(pid_t, rc),
+ .status = @bitCast(u32, status),
+ },
+ .INTR => continue,
+ .CHILD => unreachable, // The process specified does not exist. It would be a race condition to handle this error.
+ .INVAL => unreachable, // Invalid flags.
+ else => unreachable,
+ }
+ }
+}
+
+pub fn wait4(pid: pid_t, flags: u32, ru: ?*rusage) WaitPidResult {
+ const Status = if (builtin.link_libc) c_int else u32;
+ var status: Status = undefined;
+ const coerced_flags = if (builtin.link_libc) @intCast(c_int, flags) else flags;
+ while (true) {
+ const rc = system.wait4(pid, &status, coerced_flags, ru);
switch (errno(rc)) {
.SUCCESS => return .{
.pid = @intCast(pid_t, rc),
@@ -4310,6 +4347,8 @@ pub const MMapError = error{
/// a filesystem that was mounted no-exec.
PermissionDenied,
LockedMemoryLimitExceeded,
+ ProcessFdQuotaExceeded,
+ SystemFdQuotaExceeded,
OutOfMemory,
} || UnexpectedError;
@@ -4351,6 +4390,8 @@ pub fn mmap(
.OVERFLOW => unreachable, // The number of pages used for length + offset would overflow.
.NODEV => return error.MemoryMappingNotSupported,
.INVAL => unreachable, // Invalid parameters to mmap()
+ .MFILE => return error.ProcessFdQuotaExceeded,
+ .NFILE => return error.SystemFdQuotaExceeded,
.NOMEM => return error.OutOfMemory,
else => return unexpectedErrno(err),
}
@@ -7086,20 +7127,24 @@ pub fn timerfd_gettime(fd: i32) TimerFdGetError!linux.itimerspec {
};
}
-pub const have_sigpipe_support = @hasDecl(@This(), "SIG") and @hasDecl(SIG, "PIPE");
+pub const PtraceError = error{
+ DeviceBusy,
+ ProcessNotFound,
+ PermissionDenied,
+} || UnexpectedError;
-fn noopSigHandler(_: c_int) callconv(.C) void {}
-
-pub fn maybeIgnoreSigpipe() void {
- if (have_sigpipe_support and !std.options.keep_sigpipe) {
- const act = Sigaction{
- // We set handler to a noop function instead of SIG.IGN so we don't leak our
- // signal disposition to a child process
- .handler = .{ .handler = noopSigHandler },
- .mask = empty_sigset,
- .flags = 0,
- };
- sigaction(SIG.PIPE, &act, null) catch |err|
- std.debug.panic("failed to install noop SIGPIPE handler with '{s}'", .{@errorName(err)});
+/// TODO on other OSes
+pub fn ptrace(request: i32, pid: pid_t, addr: ?[*]u8, signal: i32) PtraceError!void {
+ switch (builtin.os.tag) {
+ .macos, .ios, .tvos, .watchos => {},
+ else => @compileError("TODO implement ptrace"),
}
+ return switch (errno(system.ptrace(request, pid, addr, signal))) {
+ .SUCCESS => {},
+ .SRCH => error.ProcessNotFound,
+ .INVAL => unreachable,
+ .PERM => error.PermissionDenied,
+ .BUSY => error.DeviceBusy,
+ else => |err| return unexpectedErrno(err),
+ };
}
diff --git a/lib/std/os/darwin.zig b/lib/std/os/darwin.zig
deleted file mode 100644
index 164a0e06c2..0000000000
--- a/lib/std/os/darwin.zig
+++ /dev/null
@@ -1,540 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const log = std.log;
-const mem = std.mem;
-
-pub const cssm = @import("darwin/cssm.zig");
-
-pub usingnamespace std.c;
-pub usingnamespace mach_task;
-
-const mach_task = if (builtin.target.isDarwin()) struct {
- pub const MachError = error{
- /// Not enough permissions held to perform the requested kernel
- /// call.
- PermissionDenied,
- /// Kernel returned an unhandled and unexpected error code.
- /// This is a catch-all for any yet unobserved kernel response
- /// to some Mach message.
- Unexpected,
- };
-
- pub const MachTask = extern struct {
- port: std.c.mach_port_name_t,
-
- pub fn isValid(self: MachTask) bool {
- return self.port != std.c.TASK_NULL;
- }
-
- pub fn pidForTask(self: MachTask) MachError!std.os.pid_t {
- var pid: std.os.pid_t = undefined;
- switch (std.c.getKernError(std.c.pid_for_task(self.port, &pid))) {
- .SUCCESS => return pid,
- .FAILURE => return error.PermissionDenied,
- else => |err| {
- log.err("pid_for_task kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- }
-
- pub fn allocatePort(self: MachTask, right: std.c.MACH_PORT_RIGHT) MachError!MachTask {
- var out_port: std.c.mach_port_name_t = undefined;
- switch (std.c.getKernError(std.c.mach_port_allocate(
- self.port,
- @enumToInt(right),
- &out_port,
- ))) {
- .SUCCESS => return .{ .port = out_port },
- .FAILURE => return error.PermissionDenied,
- else => |err| {
- log.err("mach_task_allocate kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- }
-
- pub fn deallocatePort(self: MachTask, port: MachTask) void {
- _ = std.c.getKernError(std.c.mach_port_deallocate(self.port, port.port));
- }
-
- pub fn insertRight(self: MachTask, port: MachTask, msg: std.c.MACH_MSG_TYPE) !void {
- switch (std.c.getKernError(std.c.mach_port_insert_right(
- self.port,
- port.port,
- port.port,
- @enumToInt(msg),
- ))) {
- .SUCCESS => return,
- .FAILURE => return error.PermissionDenied,
- else => |err| {
- log.err("mach_port_insert_right kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- }
-
- pub const PortInfo = struct {
- mask: std.c.exception_mask_t,
- masks: [std.c.EXC_TYPES_COUNT]std.c.exception_mask_t,
- ports: [std.c.EXC_TYPES_COUNT]std.c.mach_port_t,
- behaviors: [std.c.EXC_TYPES_COUNT]std.c.exception_behavior_t,
- flavors: [std.c.EXC_TYPES_COUNT]std.c.thread_state_flavor_t,
- count: std.c.mach_msg_type_number_t,
- };
-
- pub fn getExceptionPorts(self: MachTask, mask: std.c.exception_mask_t) !PortInfo {
- var info = PortInfo{
- .mask = mask,
- .masks = undefined,
- .ports = undefined,
- .behaviors = undefined,
- .flavors = undefined,
- .count = 0,
- };
- info.count = info.ports.len / @sizeOf(std.c.mach_port_t);
-
- switch (std.c.getKernError(std.c.task_get_exception_ports(
- self.port,
- info.mask,
- &info.masks,
- &info.count,
- &info.ports,
- &info.behaviors,
- &info.flavors,
- ))) {
- .SUCCESS => return info,
- .FAILURE => return error.PermissionDenied,
- else => |err| {
- log.err("task_get_exception_ports kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- }
-
- pub fn setExceptionPorts(
- self: MachTask,
- mask: std.c.exception_mask_t,
- new_port: MachTask,
- behavior: std.c.exception_behavior_t,
- new_flavor: std.c.thread_state_flavor_t,
- ) !void {
- switch (std.c.getKernError(std.c.task_set_exception_ports(
- self.port,
- mask,
- new_port.port,
- behavior,
- new_flavor,
- ))) {
- .SUCCESS => return,
- .FAILURE => return error.PermissionDenied,
- else => |err| {
- log.err("task_set_exception_ports kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- }
-
- pub const RegionInfo = struct {
- pub const Tag = enum {
- basic,
- extended,
- top,
- };
-
- base_addr: u64,
- tag: Tag,
- info: union {
- basic: std.c.vm_region_basic_info_64,
- extended: std.c.vm_region_extended_info,
- top: std.c.vm_region_top_info,
- },
- };
-
- pub fn getRegionInfo(
- task: MachTask,
- address: u64,
- len: usize,
- tag: RegionInfo.Tag,
- ) MachError!RegionInfo {
- var info: RegionInfo = .{
- .base_addr = address,
- .tag = tag,
- .info = undefined,
- };
- switch (tag) {
- .basic => info.info = .{ .basic = undefined },
- .extended => info.info = .{ .extended = undefined },
- .top => info.info = .{ .top = undefined },
- }
- var base_len: std.c.mach_vm_size_t = if (len == 1) 2 else len;
- var objname: std.c.mach_port_t = undefined;
- var count: std.c.mach_msg_type_number_t = switch (tag) {
- .basic => std.c.VM_REGION_BASIC_INFO_COUNT,
- .extended => std.c.VM_REGION_EXTENDED_INFO_COUNT,
- .top => std.c.VM_REGION_TOP_INFO_COUNT,
- };
- switch (std.c.getKernError(std.c.mach_vm_region(
- task.port,
- &info.base_addr,
- &base_len,
- switch (tag) {
- .basic => std.c.VM_REGION_BASIC_INFO_64,
- .extended => std.c.VM_REGION_EXTENDED_INFO,
- .top => std.c.VM_REGION_TOP_INFO,
- },
- switch (tag) {
- .basic => @ptrCast(std.c.vm_region_info_t, &info.info.basic),
- .extended => @ptrCast(std.c.vm_region_info_t, &info.info.extended),
- .top => @ptrCast(std.c.vm_region_info_t, &info.info.top),
- },
- &count,
- &objname,
- ))) {
- .SUCCESS => return info,
- .FAILURE => return error.PermissionDenied,
- else => |err| {
- log.err("mach_vm_region kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- }
-
- pub const RegionSubmapInfo = struct {
- pub const Tag = enum {
- short,
- full,
- };
-
- tag: Tag,
- base_addr: u64,
- info: union {
- short: std.c.vm_region_submap_short_info_64,
- full: std.c.vm_region_submap_info_64,
- },
- };
-
- pub fn getRegionSubmapInfo(
- task: MachTask,
- address: u64,
- len: usize,
- nesting_depth: u32,
- tag: RegionSubmapInfo.Tag,
- ) MachError!RegionSubmapInfo {
- var info: RegionSubmapInfo = .{
- .base_addr = address,
- .tag = tag,
- .info = undefined,
- };
- switch (tag) {
- .short => info.info = .{ .short = undefined },
- .full => info.info = .{ .full = undefined },
- }
- var nesting = nesting_depth;
- var base_len: std.c.mach_vm_size_t = if (len == 1) 2 else len;
- var count: std.c.mach_msg_type_number_t = switch (tag) {
- .short => std.c.VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
- .full => std.c.VM_REGION_SUBMAP_INFO_COUNT_64,
- };
- switch (std.c.getKernError(std.c.mach_vm_region_recurse(
- task.port,
- &info.base_addr,
- &base_len,
- &nesting,
- switch (tag) {
- .short => @ptrCast(std.c.vm_region_recurse_info_t, &info.info.short),
- .full => @ptrCast(std.c.vm_region_recurse_info_t, &info.info.full),
- },
- &count,
- ))) {
- .SUCCESS => return info,
- .FAILURE => return error.PermissionDenied,
- else => |err| {
- log.err("mach_vm_region kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- }
-
- pub fn getCurrProtection(task: MachTask, address: u64, len: usize) MachError!std.c.vm_prot_t {
- const info = try task.getRegionSubmapInfo(address, len, 0, .short);
- return info.info.short.protection;
- }
-
- pub fn setMaxProtection(task: MachTask, address: u64, len: usize, prot: std.c.vm_prot_t) MachError!void {
- return task.setProtectionImpl(address, len, true, prot);
- }
-
- pub fn setCurrProtection(task: MachTask, address: u64, len: usize, prot: std.c.vm_prot_t) MachError!void {
- return task.setProtectionImpl(address, len, false, prot);
- }
-
- fn setProtectionImpl(task: MachTask, address: u64, len: usize, set_max: bool, prot: std.c.vm_prot_t) MachError!void {
- switch (std.c.getKernError(std.c.mach_vm_protect(task.port, address, len, @boolToInt(set_max), prot))) {
- .SUCCESS => return,
- .FAILURE => return error.PermissionDenied,
- else => |err| {
- log.err("mach_vm_protect kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- }
-
- /// Will write to VM even if current protection attributes specifically prohibit
- /// us from doing so, by temporarily setting protection level to a level with VM_PROT_COPY
- /// variant, and resetting after a successful or unsuccessful write.
- pub fn writeMemProtected(task: MachTask, address: u64, buf: []const u8, arch: std.Target.Cpu.Arch) MachError!usize {
- const curr_prot = try task.getCurrProtection(address, buf.len);
- try task.setCurrProtection(
- address,
- buf.len,
- std.c.PROT.READ | std.c.PROT.WRITE | std.c.PROT.COPY,
- );
- defer {
- task.setCurrProtection(address, buf.len, curr_prot) catch {};
- }
- return task.writeMem(address, buf, arch);
- }
-
- pub fn writeMem(task: MachTask, address: u64, buf: []const u8, arch: std.Target.Cpu.Arch) MachError!usize {
- const count = buf.len;
- var total_written: usize = 0;
- var curr_addr = address;
- const page_size = try getPageSize(task); // TODO we probably can assume value here
- var out_buf = buf[0..];
-
- while (total_written < count) {
- const curr_size = maxBytesLeftInPage(page_size, curr_addr, count - total_written);
- switch (std.c.getKernError(std.c.mach_vm_write(
- task.port,
- curr_addr,
- @ptrToInt(out_buf.ptr),
- @intCast(std.c.mach_msg_type_number_t, curr_size),
- ))) {
- .SUCCESS => {},
- .FAILURE => return error.PermissionDenied,
- else => |err| {
- log.err("mach_vm_write kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
-
- switch (arch) {
- .aarch64 => {
- var mattr_value: std.c.vm_machine_attribute_val_t = std.c.MATTR_VAL_CACHE_FLUSH;
- switch (std.c.getKernError(std.c.vm_machine_attribute(
- task.port,
- curr_addr,
- curr_size,
- std.c.MATTR_CACHE,
- &mattr_value,
- ))) {
- .SUCCESS => {},
- .FAILURE => return error.PermissionDenied,
- else => |err| {
- log.err("vm_machine_attribute kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- },
- .x86_64 => {},
- else => unreachable,
- }
-
- out_buf = out_buf[curr_size..];
- total_written += curr_size;
- curr_addr += curr_size;
- }
-
- return total_written;
- }
-
- pub fn readMem(task: MachTask, address: u64, buf: []u8) MachError!usize {
- const count = buf.len;
- var total_read: usize = 0;
- var curr_addr = address;
- const page_size = try getPageSize(task); // TODO we probably can assume value here
- var out_buf = buf[0..];
-
- while (total_read < count) {
- const curr_size = maxBytesLeftInPage(page_size, curr_addr, count - total_read);
- var curr_bytes_read: std.c.mach_msg_type_number_t = 0;
- var vm_memory: std.c.vm_offset_t = undefined;
- switch (std.c.getKernError(std.c.mach_vm_read(task.port, curr_addr, curr_size, &vm_memory, &curr_bytes_read))) {
- .SUCCESS => {},
- .FAILURE => return error.PermissionDenied,
- else => |err| {
- log.err("mach_vm_read kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
-
- @memcpy(out_buf[0..].ptr, @intToPtr([*]const u8, vm_memory), curr_bytes_read);
- _ = std.c.vm_deallocate(std.c.mach_task_self(), vm_memory, curr_bytes_read);
-
- out_buf = out_buf[curr_bytes_read..];
- curr_addr += curr_bytes_read;
- total_read += curr_bytes_read;
- }
-
- return total_read;
- }
-
- fn maxBytesLeftInPage(page_size: usize, address: u64, count: usize) usize {
- var left = count;
- if (page_size > 0) {
- const page_offset = address % page_size;
- const bytes_left_in_page = page_size - page_offset;
- if (count > bytes_left_in_page) {
- left = bytes_left_in_page;
- }
- }
- return left;
- }
-
- fn getPageSize(task: MachTask) MachError!usize {
- if (task.isValid()) {
- var info_count = std.c.TASK_VM_INFO_COUNT;
- var vm_info: std.c.task_vm_info_data_t = undefined;
- switch (std.c.getKernError(std.c.task_info(
- task.port,
- std.c.TASK_VM_INFO,
- @ptrCast(std.c.task_info_t, &vm_info),
- &info_count,
- ))) {
- .SUCCESS => return @intCast(usize, vm_info.page_size),
- else => {},
- }
- }
- var page_size: std.c.vm_size_t = undefined;
- switch (std.c.getKernError(std.c._host_page_size(std.c.mach_host_self(), &page_size))) {
- .SUCCESS => return page_size,
- else => |err| {
- log.err("_host_page_size kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- }
-
- pub fn basicTaskInfo(task: MachTask) MachError!std.c.mach_task_basic_info {
- var info: std.c.mach_task_basic_info = undefined;
- var count = std.c.MACH_TASK_BASIC_INFO_COUNT;
- switch (std.c.getKernError(std.c.task_info(
- task.port,
- std.c.MACH_TASK_BASIC_INFO,
- @ptrCast(std.c.task_info_t, &info),
- &count,
- ))) {
- .SUCCESS => return info,
- else => |err| {
- log.err("task_info kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- }
-
- pub fn @"resume"(task: MachTask) MachError!void {
- switch (std.c.getKernError(std.c.task_resume(task.port))) {
- .SUCCESS => {},
- else => |err| {
- log.err("task_resume kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- }
-
- pub fn @"suspend"(task: MachTask) MachError!void {
- switch (std.c.getKernError(std.c.task_suspend(task.port))) {
- .SUCCESS => {},
- else => |err| {
- log.err("task_suspend kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- }
-
- const ThreadList = struct {
- buf: []MachThread,
-
- pub fn deinit(list: ThreadList) void {
- const self_task = machTaskForSelf();
- _ = std.c.vm_deallocate(
- self_task.port,
- @ptrToInt(list.buf.ptr),
- @intCast(std.c.vm_size_t, list.buf.len * @sizeOf(std.c.mach_port_t)),
- );
- }
- };
-
- pub fn getThreads(task: MachTask) MachError!ThreadList {
- var thread_list: std.c.mach_port_array_t = undefined;
- var thread_count: std.c.mach_msg_type_number_t = undefined;
- switch (std.c.getKernError(std.c.task_threads(task.port, &thread_list, &thread_count))) {
- .SUCCESS => return ThreadList{ .buf = @ptrCast([*]MachThread, thread_list)[0..thread_count] },
- else => |err| {
- log.err("task_threads kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- }
- };
-
- pub const MachThread = extern struct {
- port: std.c.mach_port_t,
-
- pub fn isValid(thread: MachThread) bool {
- return thread.port != std.c.THREAD_NULL;
- }
-
- pub fn getBasicInfo(thread: MachThread) MachError!std.c.thread_basic_info {
- var info: std.c.thread_basic_info = undefined;
- var count = std.c.THREAD_BASIC_INFO_COUNT;
- switch (std.c.getKernError(std.c.thread_info(
- thread.port,
- std.c.THREAD_BASIC_INFO,
- @ptrCast(std.c.thread_info_t, &info),
- &count,
- ))) {
- .SUCCESS => return info,
- else => |err| {
- log.err("thread_info kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- }
-
- pub fn getIdentifierInfo(thread: MachThread) MachError!std.c.thread_identifier_info {
- var info: std.c.thread_identifier_info = undefined;
- var count = std.c.THREAD_IDENTIFIER_INFO_COUNT;
- switch (std.c.getKernError(std.c.thread_info(
- thread.port,
- std.c.THREAD_IDENTIFIER_INFO,
- @ptrCast(std.c.thread_info_t, &info),
- &count,
- ))) {
- .SUCCESS => return info,
- else => |err| {
- log.err("thread_info kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- }
- };
-
- pub fn machTaskForPid(pid: std.os.pid_t) MachError!MachTask {
- var port: std.c.mach_port_name_t = undefined;
- switch (std.c.getKernError(std.c.task_for_pid(std.c.mach_task_self(), pid, &port))) {
- .SUCCESS => {},
- .FAILURE => return error.PermissionDenied,
- else => |err| {
- log.err("task_for_pid kernel call failed with error code: {s}", .{@tagName(err)});
- return error.Unexpected;
- },
- }
- return MachTask{ .port = port };
- }
-
- pub fn machTaskForSelf() MachTask {
- return .{ .port = std.c.mach_task_self() };
- }
-} else struct {};
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index b151e5f235..53f6030b5f 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -944,6 +944,16 @@ pub fn waitpid(pid: pid_t, status: *u32, flags: u32) usize {
return syscall4(.wait4, @bitCast(usize, @as(isize, pid)), @ptrToInt(status), flags, 0);
}
+pub fn wait4(pid: pid_t, status: *u32, flags: u32, usage: ?*rusage) usize {
+ return syscall4(
+ .wait4,
+ @bitCast(usize, @as(isize, pid)),
+ @ptrToInt(status),
+ flags,
+ @ptrToInt(usage),
+ );
+}
+
pub fn waitid(id_type: P, id: i32, infop: *siginfo_t, flags: u32) usize {
return syscall5(.waitid, @enumToInt(id_type), @bitCast(usize, @as(isize, id)), @ptrToInt(infop), flags, 0);
}
@@ -1716,26 +1726,26 @@ pub fn pidfd_send_signal(pidfd: fd_t, sig: i32, info: ?*siginfo_t, flags: u32) u
);
}
-pub fn process_vm_readv(pid: pid_t, local: [*]const iovec, local_count: usize, remote: [*]const iovec, remote_count: usize, flags: usize) usize {
+pub fn process_vm_readv(pid: pid_t, local: []iovec, remote: []const iovec_const, flags: usize) usize {
return syscall6(
.process_vm_readv,
@bitCast(usize, @as(isize, pid)),
- @ptrToInt(local),
- local_count,
- @ptrToInt(remote),
- remote_count,
+ @ptrToInt(local.ptr),
+ local.len,
+ @ptrToInt(remote.ptr),
+ remote.len,
flags,
);
}
-pub fn process_vm_writev(pid: pid_t, local: [*]const iovec, local_count: usize, remote: [*]const iovec, remote_count: usize, flags: usize) usize {
+pub fn process_vm_writev(pid: pid_t, local: []const iovec_const, remote: []const iovec_const, flags: usize) usize {
return syscall6(
.process_vm_writev,
@bitCast(usize, @as(isize, pid)),
- @ptrToInt(local),
- local_count,
- @ptrToInt(remote),
- remote_count,
+ @ptrToInt(local.ptr),
+ local.len,
+ @ptrToInt(remote.ptr),
+ remote.len,
flags,
);
}
@@ -1820,6 +1830,23 @@ pub fn seccomp(operation: u32, flags: u32, args: ?*const anyopaque) usize {
return syscall3(.seccomp, operation, flags, @ptrToInt(args));
}
+pub fn ptrace(
+ req: u32,
+ pid: pid_t,
+ addr: usize,
+ data: usize,
+ addr2: usize,
+) usize {
+ return syscall5(
+ .ptrace,
+ req,
+ @bitCast(usize, @as(isize, pid)),
+ addr,
+ data,
+ addr2,
+ );
+}
+
pub const E = switch (native_arch) {
.mips, .mipsel => @import("linux/errno/mips.zig").E,
.sparc, .sparcel, .sparc64 => @import("linux/errno/sparc.zig").E,
@@ -5721,3 +5748,40 @@ pub const AUDIT = struct {
}
};
};
+
+pub const PTRACE = struct {
+ pub const TRACEME = 0;
+ pub const PEEKTEXT = 1;
+ pub const PEEKDATA = 2;
+ pub const PEEKUSER = 3;
+ pub const POKETEXT = 4;
+ pub const POKEDATA = 5;
+ pub const POKEUSER = 6;
+ pub const CONT = 7;
+ pub const KILL = 8;
+ pub const SINGLESTEP = 9;
+ pub const GETREGS = 12;
+ pub const SETREGS = 13;
+ pub const GETFPREGS = 14;
+ pub const SETFPREGS = 15;
+ pub const ATTACH = 16;
+ pub const DETACH = 17;
+ pub const GETFPXREGS = 18;
+ pub const SETFPXREGS = 19;
+ pub const SYSCALL = 24;
+ pub const SETOPTIONS = 0x4200;
+ pub const GETEVENTMSG = 0x4201;
+ pub const GETSIGINFO = 0x4202;
+ pub const SETSIGINFO = 0x4203;
+ pub const GETREGSET = 0x4204;
+ pub const SETREGSET = 0x4205;
+ pub const SEIZE = 0x4206;
+ pub const INTERRUPT = 0x4207;
+ pub const LISTEN = 0x4208;
+ pub const PEEKSIGINFO = 0x4209;
+ pub const GETSIGMASK = 0x420a;
+ pub const SETSIGMASK = 0x420b;
+ pub const SECCOMP_GET_FILTER = 0x420c;
+ pub const SECCOMP_GET_METADATA = 0x420d;
+ pub const GET_SYSCALL_INFO = 0x420e;
+};
diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig
index 61bf39105f..4dbf87c501 100644
--- a/lib/std/os/linux/io_uring.zig
+++ b/lib/std/os/linux/io_uring.zig
@@ -1728,10 +1728,12 @@ test "writev/fsync/readv" {
};
defer ring.deinit();
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
const path = "test_io_uring_writev_fsync_readv";
- const file = try std.fs.cwd().createFile(path, .{ .read = true, .truncate = true });
+ const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true });
defer file.close();
- defer std.fs.cwd().deleteFile(path) catch {};
const fd = file.handle;
const buffer_write = [_]u8{42} ** 128;
@@ -1796,10 +1798,11 @@ test "write/read" {
};
defer ring.deinit();
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
const path = "test_io_uring_write_read";
- const file = try std.fs.cwd().createFile(path, .{ .read = true, .truncate = true });
+ const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true });
defer file.close();
- defer std.fs.cwd().deleteFile(path) catch {};
const fd = file.handle;
const buffer_write = [_]u8{97} ** 20;
@@ -1842,10 +1845,12 @@ test "write_fixed/read_fixed" {
};
defer ring.deinit();
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
const path = "test_io_uring_write_read_fixed";
- const file = try std.fs.cwd().createFile(path, .{ .read = true, .truncate = true });
+ const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true });
defer file.close();
- defer std.fs.cwd().deleteFile(path) catch {};
const fd = file.handle;
var raw_buffers: [2][11]u8 = undefined;
@@ -1899,8 +1904,10 @@ test "openat" {
};
defer ring.deinit();
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
const path = "test_io_uring_openat";
- defer std.fs.cwd().deleteFile(path) catch {};
// Workaround for LLVM bug: https://github.com/ziglang/zig/issues/12014
const path_addr = if (builtin.zig_backend == .stage2_llvm) p: {
@@ -1910,12 +1917,12 @@ test "openat" {
const flags: u32 = os.O.CLOEXEC | os.O.RDWR | os.O.CREAT;
const mode: os.mode_t = 0o666;
- const sqe_openat = try ring.openat(0x33333333, linux.AT.FDCWD, path, flags, mode);
+ const sqe_openat = try ring.openat(0x33333333, tmp.dir.fd, path, flags, mode);
try testing.expectEqual(linux.io_uring_sqe{
.opcode = .OPENAT,
.flags = 0,
.ioprio = 0,
- .fd = linux.AT.FDCWD,
+ .fd = tmp.dir.fd,
.off = 0,
.addr = path_addr,
.len = mode,
@@ -1931,12 +1938,6 @@ test "openat" {
const cqe_openat = try ring.copy_cqe();
try testing.expectEqual(@as(u64, 0x33333333), cqe_openat.user_data);
if (cqe_openat.err() == .INVAL) return error.SkipZigTest;
- // AT.FDCWD is not fully supported before kernel 5.6:
- // See https://lore.kernel.org/io-uring/20200207155039.12819-1-axboe@kernel.dk/T/
- // We use IORING_FEAT_RW_CUR_POS to know if we are pre-5.6 since that feature was added in 5.6.
- if (cqe_openat.err() == .BADF and (ring.features & linux.IORING_FEAT_RW_CUR_POS) == 0) {
- return error.SkipZigTest;
- }
if (cqe_openat.res <= 0) std.debug.print("\ncqe_openat.res={}\n", .{cqe_openat.res});
try testing.expect(cqe_openat.res > 0);
try testing.expectEqual(@as(u32, 0), cqe_openat.flags);
@@ -1954,10 +1955,12 @@ test "close" {
};
defer ring.deinit();
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
const path = "test_io_uring_close";
- const file = try std.fs.cwd().createFile(path, .{});
+ const file = try tmp.dir.createFile(path, .{});
errdefer file.close();
- defer std.fs.cwd().deleteFile(path) catch {};
const sqe_close = try ring.close(0x44444444, file.handle);
try testing.expectEqual(linux.IORING_OP.CLOSE, sqe_close.opcode);
@@ -1976,6 +1979,11 @@ test "close" {
test "accept/connect/send/recv" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
+ if (true) {
+ // https://github.com/ziglang/zig/issues/14907
+ return error.SkipZigTest;
+ }
+
var ring = IO_Uring.init(16, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2017,6 +2025,11 @@ test "accept/connect/send/recv" {
test "sendmsg/recvmsg" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
+ if (true) {
+ // https://github.com/ziglang/zig/issues/14907
+ return error.SkipZigTest;
+ }
+
var ring = IO_Uring.init(2, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2024,6 +2037,7 @@ test "sendmsg/recvmsg" {
};
defer ring.deinit();
+ if (true) @compileError("don't hard code port numbers in unit tests"); // https://github.com/ziglang/zig/issues/14907
const address_server = try net.Address.parseIp4("127.0.0.1", 3131);
const server = try os.socket(address_server.any.family, os.SOCK.DGRAM, 0);
@@ -2223,6 +2237,11 @@ test "timeout_remove" {
test "accept/connect/recv/link_timeout" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
+ if (true) {
+ // https://github.com/ziglang/zig/issues/14907
+ return error.SkipZigTest;
+ }
+
var ring = IO_Uring.init(16, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2279,10 +2298,12 @@ test "fallocate" {
};
defer ring.deinit();
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
const path = "test_io_uring_fallocate";
- const file = try std.fs.cwd().createFile(path, .{ .truncate = true, .mode = 0o666 });
+ const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 });
defer file.close();
- defer std.fs.cwd().deleteFile(path) catch {};
try testing.expectEqual(@as(u64, 0), (try file.stat()).size);
@@ -2323,10 +2344,11 @@ test "statx" {
};
defer ring.deinit();
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
const path = "test_io_uring_statx";
- const file = try std.fs.cwd().createFile(path, .{ .truncate = true, .mode = 0o666 });
+ const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 });
defer file.close();
- defer std.fs.cwd().deleteFile(path) catch {};
try testing.expectEqual(@as(u64, 0), (try file.stat()).size);
@@ -2335,14 +2357,14 @@ test "statx" {
var buf: linux.Statx = undefined;
const sqe = try ring.statx(
0xaaaaaaaa,
- linux.AT.FDCWD,
+ tmp.dir.fd,
path,
0,
linux.STATX_SIZE,
&buf,
);
try testing.expectEqual(linux.IORING_OP.STATX, sqe.opcode);
- try testing.expectEqual(@as(i32, linux.AT.FDCWD), sqe.fd);
+ try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
@@ -2355,8 +2377,6 @@ test "statx" {
// The filesystem containing the file referred to by fd does not support this operation;
// or the mode is not supported by the filesystem containing the file referred to by fd:
.OPNOTSUPP => return error.SkipZigTest,
- // The kernel is too old to support FDCWD for dir_fd
- .BADF => return error.SkipZigTest,
else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
}
try testing.expectEqual(linux.io_uring_cqe{
@@ -2372,6 +2392,11 @@ test "statx" {
test "accept/connect/recv/cancel" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
+ if (true) {
+ // https://github.com/ziglang/zig/issues/14907
+ return error.SkipZigTest;
+ }
+
var ring = IO_Uring.init(16, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2509,6 +2534,11 @@ test "register_files_update" {
test "shutdown" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
+ if (true) {
+ // https://github.com/ziglang/zig/issues/14907
+ return error.SkipZigTest;
+ }
+
var ring = IO_Uring.init(16, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2516,6 +2546,7 @@ test "shutdown" {
};
defer ring.deinit();
+ if (true) @compileError("don't hard code port numbers in unit tests"); // https://github.com/ziglang/zig/issues/14907
const address = try net.Address.parseIp4("127.0.0.1", 3131);
// Socket bound, expect shutdown to work
@@ -2579,28 +2610,28 @@ test "renameat" {
const old_path = "test_io_uring_renameat_old";
const new_path = "test_io_uring_renameat_new";
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
// Write old file with data
- const old_file = try std.fs.cwd().createFile(old_path, .{ .truncate = true, .mode = 0o666 });
- defer {
- old_file.close();
- std.fs.cwd().deleteFile(new_path) catch {};
- }
+ const old_file = try tmp.dir.createFile(old_path, .{ .truncate = true, .mode = 0o666 });
+ defer old_file.close();
try old_file.writeAll("hello");
// Submit renameat
var sqe = try ring.renameat(
0x12121212,
- linux.AT.FDCWD,
+ tmp.dir.fd,
old_path,
- linux.AT.FDCWD,
+ tmp.dir.fd,
new_path,
0,
);
try testing.expectEqual(linux.IORING_OP.RENAMEAT, sqe.opcode);
- try testing.expectEqual(@as(i32, linux.AT.FDCWD), sqe.fd);
- try testing.expectEqual(@as(i32, linux.AT.FDCWD), @bitCast(i32, sqe.len));
+ try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
+ try testing.expectEqual(@as(i32, tmp.dir.fd), @bitCast(i32, sqe.len));
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
@@ -2618,7 +2649,7 @@ test "renameat" {
// Validate that the old file doesn't exist anymore
{
- _ = std.fs.cwd().openFile(old_path, .{}) catch |err| switch (err) {
+ _ = tmp.dir.openFile(old_path, .{}) catch |err| switch (err) {
error.FileNotFound => {},
else => std.debug.panic("unexpected error: {}", .{err}),
};
@@ -2626,7 +2657,7 @@ test "renameat" {
// Validate that the new file exists with the proper content
{
- const new_file = try std.fs.cwd().openFile(new_path, .{});
+ const new_file = try tmp.dir.openFile(new_path, .{});
defer new_file.close();
var new_file_data: [16]u8 = undefined;
@@ -2647,22 +2678,24 @@ test "unlinkat" {
const path = "test_io_uring_unlinkat";
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
// Write old file with data
- const file = try std.fs.cwd().createFile(path, .{ .truncate = true, .mode = 0o666 });
+ const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 });
defer file.close();
- defer std.fs.cwd().deleteFile(path) catch {};
// Submit unlinkat
var sqe = try ring.unlinkat(
0x12121212,
- linux.AT.FDCWD,
+ tmp.dir.fd,
path,
0,
);
try testing.expectEqual(linux.IORING_OP.UNLINKAT, sqe.opcode);
- try testing.expectEqual(@as(i32, linux.AT.FDCWD), sqe.fd);
+ try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
@@ -2679,7 +2712,7 @@ test "unlinkat" {
}, cqe);
// Validate that the file doesn't exist anymore
- _ = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) {
+ _ = tmp.dir.openFile(path, .{}) catch |err| switch (err) {
error.FileNotFound => {},
else => std.debug.panic("unexpected error: {}", .{err}),
};
@@ -2695,20 +2728,21 @@ test "mkdirat" {
};
defer ring.deinit();
- const path = "test_io_uring_mkdirat";
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
- defer std.fs.cwd().deleteDir(path) catch {};
+ const path = "test_io_uring_mkdirat";
// Submit mkdirat
var sqe = try ring.mkdirat(
0x12121212,
- linux.AT.FDCWD,
+ tmp.dir.fd,
path,
0o0755,
);
try testing.expectEqual(linux.IORING_OP.MKDIRAT, sqe.opcode);
- try testing.expectEqual(@as(i32, linux.AT.FDCWD), sqe.fd);
+ try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
@@ -2725,7 +2759,7 @@ test "mkdirat" {
}, cqe);
// Validate that the directory exist
- _ = try std.fs.cwd().openDir(path, .{});
+ _ = try tmp.dir.openDir(path, .{});
}
test "symlinkat" {
@@ -2738,26 +2772,25 @@ test "symlinkat" {
};
defer ring.deinit();
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
const path = "test_io_uring_symlinkat";
const link_path = "test_io_uring_symlinkat_link";
- const file = try std.fs.cwd().createFile(path, .{ .truncate = true, .mode = 0o666 });
- defer {
- file.close();
- std.fs.cwd().deleteFile(path) catch {};
- std.fs.cwd().deleteFile(link_path) catch {};
- }
+ const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 });
+ defer file.close();
// Submit symlinkat
var sqe = try ring.symlinkat(
0x12121212,
path,
- linux.AT.FDCWD,
+ tmp.dir.fd,
link_path,
);
try testing.expectEqual(linux.IORING_OP.SYMLINKAT, sqe.opcode);
- try testing.expectEqual(@as(i32, linux.AT.FDCWD), sqe.fd);
+ try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
@@ -2774,7 +2807,7 @@ test "symlinkat" {
}, cqe);
// Validate that the symlink exist
- _ = try std.fs.cwd().openFile(link_path, .{});
+ _ = try tmp.dir.openFile(link_path, .{});
}
test "linkat" {
@@ -2787,32 +2820,31 @@ test "linkat" {
};
defer ring.deinit();
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
const first_path = "test_io_uring_linkat_first";
const second_path = "test_io_uring_linkat_second";
// Write file with data
- const first_file = try std.fs.cwd().createFile(first_path, .{ .truncate = true, .mode = 0o666 });
- defer {
- first_file.close();
- std.fs.cwd().deleteFile(first_path) catch {};
- std.fs.cwd().deleteFile(second_path) catch {};
- }
+ const first_file = try tmp.dir.createFile(first_path, .{ .truncate = true, .mode = 0o666 });
+ defer first_file.close();
try first_file.writeAll("hello");
// Submit linkat
var sqe = try ring.linkat(
0x12121212,
- linux.AT.FDCWD,
+ tmp.dir.fd,
first_path,
- linux.AT.FDCWD,
+ tmp.dir.fd,
second_path,
0,
);
try testing.expectEqual(linux.IORING_OP.LINKAT, sqe.opcode);
- try testing.expectEqual(@as(i32, linux.AT.FDCWD), sqe.fd);
- try testing.expectEqual(@as(i32, linux.AT.FDCWD), @bitCast(i32, sqe.len));
+ try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
+ try testing.expectEqual(@as(i32, tmp.dir.fd), @bitCast(i32, sqe.len));
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
@@ -2829,7 +2861,7 @@ test "linkat" {
}, cqe);
// Validate the second file
- const second_file = try std.fs.cwd().openFile(second_path, .{});
+ const second_file = try tmp.dir.openFile(second_path, .{});
defer second_file.close();
var second_file_data: [16]u8 = undefined;
@@ -3060,6 +3092,11 @@ test "remove_buffers" {
test "provide_buffers: accept/connect/send/recv" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
+ if (true) {
+ // https://github.com/ziglang/zig/issues/14907
+ return error.SkipZigTest;
+ }
+
var ring = IO_Uring.init(16, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -3236,6 +3273,7 @@ const SocketTestHarness = struct {
fn createSocketTestHarness(ring: *IO_Uring) !SocketTestHarness {
// Create a TCP server socket
+ if (true) @compileError("don't hard code port numbers in unit tests"); // https://github.com/ziglang/zig/issues/14907
const address = try net.Address.parseIp4("127.0.0.1", 3131);
const kernel_backlog = 1;
const listener_socket = try os.socket(address.any.family, os.SOCK.STREAM | os.SOCK.CLOEXEC, 0);
diff --git a/lib/std/os/linux/test.zig b/lib/std/os/linux/test.zig
index 63deab3edc..e1ad36b2e5 100644
--- a/lib/std/os/linux/test.zig
+++ b/lib/std/os/linux/test.zig
@@ -8,10 +8,12 @@ const expectEqual = std.testing.expectEqual;
const fs = std.fs;
test "fallocate" {
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
const path = "test_fallocate";
- const file = try fs.cwd().createFile(path, .{ .truncate = true, .mode = 0o666 });
+ const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 });
defer file.close();
- defer fs.cwd().deleteFile(path) catch {};
try expect((try file.stat()).size == 0);
@@ -67,12 +69,12 @@ test "timer" {
}
test "statx" {
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
const tmp_file_name = "just_a_temporary_file.txt";
- var file = try fs.cwd().createFile(tmp_file_name, .{});
- defer {
- file.close();
- fs.cwd().deleteFile(tmp_file_name) catch {};
- }
+ var file = try tmp.dir.createFile(tmp_file_name, .{});
+ defer file.close();
var statx_buf: linux.Statx = undefined;
switch (linux.getErrno(linux.statx(file.handle, "", linux.AT.EMPTY_PATH, linux.STATX_BASIC_STATS, &statx_buf))) {
@@ -105,21 +107,16 @@ test "user and group ids" {
}
test "fadvise" {
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
const tmp_file_name = "temp_posix_fadvise.txt";
- var file = try fs.cwd().createFile(tmp_file_name, .{});
- defer {
- file.close();
- fs.cwd().deleteFile(tmp_file_name) catch {};
- }
+ var file = try tmp.dir.createFile(tmp_file_name, .{});
+ defer file.close();
var buf: [2048]u8 = undefined;
try file.writeAll(&buf);
- const ret = linux.fadvise(
- file.handle,
- 0,
- 0,
- linux.POSIX_FADV.SEQUENTIAL,
- );
+ const ret = linux.fadvise(file.handle, 0, 0, linux.POSIX_FADV.SEQUENTIAL);
try expectEqual(@as(usize, 0), ret);
}
diff --git a/lib/std/os/posix_spawn.zig b/lib/std/os/posix_spawn.zig
deleted file mode 100644
index 32904a9423..0000000000
--- a/lib/std/os/posix_spawn.zig
+++ /dev/null
@@ -1,290 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-
-const os = @import("../os.zig");
-const system = os.system;
-const errno = system.getErrno;
-const fd_t = system.fd_t;
-const mode_t = system.mode_t;
-const pid_t = system.pid_t;
-const unexpectedErrno = os.unexpectedErrno;
-const UnexpectedError = os.UnexpectedError;
-const toPosixPath = os.toPosixPath;
-const WaitPidResult = os.WaitPidResult;
-
-pub usingnamespace posix_spawn;
-
-pub const Error = error{
- SystemResources,
- InvalidFileDescriptor,
- NameTooLong,
- TooBig,
- PermissionDenied,
- InputOutput,
- FileSystem,
- FileNotFound,
- InvalidExe,
- NotDir,
- FileBusy,
-
- /// Returned when the child fails to execute either in the pre-exec() initialization step, or
- /// when exec(3) is invoked.
- ChildExecFailed,
-} || UnexpectedError;
-
-const posix_spawn = if (builtin.target.isDarwin()) struct {
- pub const Attr = struct {
- attr: system.posix_spawnattr_t,
-
- pub fn init() Error!Attr {
- var attr: system.posix_spawnattr_t = undefined;
- switch (errno(system.posix_spawnattr_init(&attr))) {
- .SUCCESS => return Attr{ .attr = attr },
- .NOMEM => return error.SystemResources,
- .INVAL => unreachable,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- pub fn deinit(self: *Attr) void {
- defer self.* = undefined;
- switch (errno(system.posix_spawnattr_destroy(&self.attr))) {
- .SUCCESS => return,
- .INVAL => unreachable, // Invalid parameters.
- else => unreachable,
- }
- }
-
- pub fn get(self: Attr) Error!u16 {
- var flags: c_short = undefined;
- switch (errno(system.posix_spawnattr_getflags(&self.attr, &flags))) {
- .SUCCESS => return @bitCast(u16, flags),
- .INVAL => unreachable,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- pub fn set(self: *Attr, flags: u16) Error!void {
- switch (errno(system.posix_spawnattr_setflags(&self.attr, @bitCast(c_short, flags)))) {
- .SUCCESS => return,
- .INVAL => unreachable,
- else => |err| return unexpectedErrno(err),
- }
- }
- };
-
- pub const Actions = struct {
- actions: system.posix_spawn_file_actions_t,
-
- pub fn init() Error!Actions {
- var actions: system.posix_spawn_file_actions_t = undefined;
- switch (errno(system.posix_spawn_file_actions_init(&actions))) {
- .SUCCESS => return Actions{ .actions = actions },
- .NOMEM => return error.SystemResources,
- .INVAL => unreachable,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- pub fn deinit(self: *Actions) void {
- defer self.* = undefined;
- switch (errno(system.posix_spawn_file_actions_destroy(&self.actions))) {
- .SUCCESS => return,
- .INVAL => unreachable, // Invalid parameters.
- else => unreachable,
- }
- }
-
- pub fn open(self: *Actions, fd: fd_t, path: []const u8, flags: u32, mode: mode_t) Error!void {
- const posix_path = try toPosixPath(path);
- return self.openZ(fd, &posix_path, flags, mode);
- }
-
- pub fn openZ(self: *Actions, fd: fd_t, path: [*:0]const u8, flags: u32, mode: mode_t) Error!void {
- switch (errno(system.posix_spawn_file_actions_addopen(&self.actions, fd, path, @bitCast(c_int, flags), mode))) {
- .SUCCESS => return,
- .BADF => return error.InvalidFileDescriptor,
- .NOMEM => return error.SystemResources,
- .NAMETOOLONG => return error.NameTooLong,
- .INVAL => unreachable, // the value of file actions is invalid
- else => |err| return unexpectedErrno(err),
- }
- }
-
- pub fn close(self: *Actions, fd: fd_t) Error!void {
- switch (errno(system.posix_spawn_file_actions_addclose(&self.actions, fd))) {
- .SUCCESS => return,
- .BADF => return error.InvalidFileDescriptor,
- .NOMEM => return error.SystemResources,
- .INVAL => unreachable, // the value of file actions is invalid
- .NAMETOOLONG => unreachable,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- pub fn dup2(self: *Actions, fd: fd_t, newfd: fd_t) Error!void {
- switch (errno(system.posix_spawn_file_actions_adddup2(&self.actions, fd, newfd))) {
- .SUCCESS => return,
- .BADF => return error.InvalidFileDescriptor,
- .NOMEM => return error.SystemResources,
- .INVAL => unreachable, // the value of file actions is invalid
- .NAMETOOLONG => unreachable,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- pub fn inherit(self: *Actions, fd: fd_t) Error!void {
- switch (errno(system.posix_spawn_file_actions_addinherit_np(&self.actions, fd))) {
- .SUCCESS => return,
- .BADF => return error.InvalidFileDescriptor,
- .NOMEM => return error.SystemResources,
- .INVAL => unreachable, // the value of file actions is invalid
- .NAMETOOLONG => unreachable,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- pub fn chdir(self: *Actions, path: []const u8) Error!void {
- const posix_path = try toPosixPath(path);
- return self.chdirZ(&posix_path);
- }
-
- pub fn chdirZ(self: *Actions, path: [*:0]const u8) Error!void {
- switch (errno(system.posix_spawn_file_actions_addchdir_np(&self.actions, path))) {
- .SUCCESS => return,
- .NOMEM => return error.SystemResources,
- .NAMETOOLONG => return error.NameTooLong,
- .BADF => unreachable,
- .INVAL => unreachable, // the value of file actions is invalid
- else => |err| return unexpectedErrno(err),
- }
- }
-
- pub fn fchdir(self: *Actions, fd: fd_t) Error!void {
- switch (errno(system.posix_spawn_file_actions_addfchdir_np(&self.actions, fd))) {
- .SUCCESS => return,
- .BADF => return error.InvalidFileDescriptor,
- .NOMEM => return error.SystemResources,
- .INVAL => unreachable, // the value of file actions is invalid
- .NAMETOOLONG => unreachable,
- else => |err| return unexpectedErrno(err),
- }
- }
- };
-
- pub fn spawn(
- path: []const u8,
- actions: ?Actions,
- attr: ?Attr,
- argv: [*:null]?[*:0]const u8,
- envp: [*:null]?[*:0]const u8,
- ) Error!pid_t {
- const posix_path = try toPosixPath(path);
- return spawnZ(&posix_path, actions, attr, argv, envp);
- }
-
- pub fn spawnZ(
- path: [*:0]const u8,
- actions: ?Actions,
- attr: ?Attr,
- argv: [*:null]?[*:0]const u8,
- envp: [*:null]?[*:0]const u8,
- ) Error!pid_t {
- var pid: pid_t = undefined;
- switch (errno(system.posix_spawn(
- &pid,
- path,
- if (actions) |a| &a.actions else null,
- if (attr) |a| &a.attr else null,
- argv,
- envp,
- ))) {
- .SUCCESS => return pid,
- .@"2BIG" => return error.TooBig,
- .NOMEM => return error.SystemResources,
- .BADF => return error.InvalidFileDescriptor,
- .ACCES => return error.PermissionDenied,
- .IO => return error.InputOutput,
- .LOOP => return error.FileSystem,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOEXEC => return error.InvalidExe,
- .NOTDIR => return error.NotDir,
- .TXTBSY => return error.FileBusy,
- .BADARCH => return error.InvalidExe,
- .BADEXEC => return error.InvalidExe,
- .FAULT => unreachable,
- .INVAL => unreachable,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- pub fn spawnp(
- file: []const u8,
- actions: ?Actions,
- attr: ?Attr,
- argv: [*:null]?[*:0]const u8,
- envp: [*:null]?[*:0]const u8,
- ) Error!pid_t {
- const posix_file = try toPosixPath(file);
- return spawnpZ(&posix_file, actions, attr, argv, envp);
- }
-
- pub fn spawnpZ(
- file: [*:0]const u8,
- actions: ?Actions,
- attr: ?Attr,
- argv: [*:null]?[*:0]const u8,
- envp: [*:null]?[*:0]const u8,
- ) Error!pid_t {
- var pid: pid_t = undefined;
- switch (errno(system.posix_spawnp(
- &pid,
- file,
- if (actions) |a| &a.actions else null,
- if (attr) |a| &a.attr else null,
- argv,
- envp,
- ))) {
- .SUCCESS => return pid,
- .@"2BIG" => return error.TooBig,
- .NOMEM => return error.SystemResources,
- .BADF => return error.InvalidFileDescriptor,
- .ACCES => return error.PermissionDenied,
- .IO => return error.InputOutput,
- .LOOP => return error.FileSystem,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOEXEC => return error.InvalidExe,
- .NOTDIR => return error.NotDir,
- .TXTBSY => return error.FileBusy,
- .BADARCH => return error.InvalidExe,
- .BADEXEC => return error.InvalidExe,
- .FAULT => unreachable,
- .INVAL => unreachable,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- /// Use this version of the `waitpid` wrapper if you spawned your child process using `posix_spawn`
- /// or `posix_spawnp` syscalls.
- /// See also `std.os.waitpid` for an alternative if your child process was spawned via `fork` and
- /// `execve` method.
- pub fn waitpid(pid: pid_t, flags: u32) Error!WaitPidResult {
- const Status = if (builtin.link_libc) c_int else u32;
- var status: Status = undefined;
- while (true) {
- const rc = system.waitpid(pid, &status, if (builtin.link_libc) @intCast(c_int, flags) else flags);
- switch (errno(rc)) {
- .SUCCESS => return WaitPidResult{
- .pid = @intCast(pid_t, rc),
- .status = @bitCast(u32, status),
- },
- .INTR => continue,
- .CHILD => return error.ChildExecFailed,
- .INVAL => unreachable, // Invalid flags.
- else => unreachable,
- }
- }
- }
-} else struct {};
diff --git a/lib/std/os/ptrace.zig b/lib/std/os/ptrace.zig
deleted file mode 100644
index afe0b51e2e..0000000000
--- a/lib/std/os/ptrace.zig
+++ /dev/null
@@ -1,28 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-
-const os = @import("../os.zig");
-const system = os.system;
-const errno = system.getErrno;
-const pid_t = system.pid_t;
-const unexpectedErrno = os.unexpectedErrno;
-const UnexpectedError = os.UnexpectedError;
-
-pub usingnamespace ptrace;
-
-const ptrace = if (builtin.target.isDarwin()) struct {
- pub const PtraceError = error{
- ProcessNotFound,
- PermissionDenied,
- } || UnexpectedError;
-
- pub fn ptrace(request: i32, pid: pid_t, addr: ?[*]u8, signal: i32) PtraceError!void {
- switch (errno(system.ptrace(request, pid, addr, signal))) {
- .SUCCESS => return,
- .SRCH => return error.ProcessNotFound,
- .INVAL => unreachable,
- .BUSY, .PERM => return error.PermissionDenied,
- else => |err| return unexpectedErrno(err),
- }
- }
-} else struct {};
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index b63fdb9f92..5576200ea5 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -105,41 +105,53 @@ pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HAN
// If we're not following symlinks, we need to ensure we don't pass in any synchronization flags such as FILE_SYNCHRONOUS_IO_NONALERT.
const flags: ULONG = if (options.follow_symlinks) file_or_dir_flag | blocking_flag else file_or_dir_flag | FILE_OPEN_REPARSE_POINT;
- const rc = ntdll.NtCreateFile(
- &result,
- options.access_mask,
- &attr,
- &io,
- null,
- FILE_ATTRIBUTE_NORMAL,
- options.share_access,
- options.creation,
- flags,
- null,
- 0,
- );
- switch (rc) {
- .SUCCESS => {
- if (std.io.is_async and options.io_mode == .evented) {
- _ = CreateIoCompletionPort(result, std.event.Loop.instance.?.os_data.io_port, undefined, undefined) catch undefined;
- }
- return result;
- },
- .OBJECT_NAME_INVALID => unreachable,
- .OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
- .OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
- .NO_MEDIA_IN_DEVICE => return error.NoDevice,
- .INVALID_PARAMETER => unreachable,
- .SHARING_VIOLATION => return error.AccessDenied,
- .ACCESS_DENIED => return error.AccessDenied,
- .PIPE_BUSY => return error.PipeBusy,
- .OBJECT_PATH_SYNTAX_BAD => unreachable,
- .OBJECT_NAME_COLLISION => return error.PathAlreadyExists,
- .FILE_IS_A_DIRECTORY => return error.IsDir,
- .NOT_A_DIRECTORY => return error.NotDir,
- .USER_MAPPED_FILE => return error.AccessDenied,
- .INVALID_HANDLE => unreachable,
- else => return unexpectedStatus(rc),
+ while (true) {
+ const rc = ntdll.NtCreateFile(
+ &result,
+ options.access_mask,
+ &attr,
+ &io,
+ null,
+ FILE_ATTRIBUTE_NORMAL,
+ options.share_access,
+ options.creation,
+ flags,
+ null,
+ 0,
+ );
+ switch (rc) {
+ .SUCCESS => {
+ if (std.io.is_async and options.io_mode == .evented) {
+ _ = CreateIoCompletionPort(result, std.event.Loop.instance.?.os_data.io_port, undefined, undefined) catch undefined;
+ }
+ return result;
+ },
+ .OBJECT_NAME_INVALID => unreachable,
+ .OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
+ .OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
+ .NO_MEDIA_IN_DEVICE => return error.NoDevice,
+ .INVALID_PARAMETER => unreachable,
+ .SHARING_VIOLATION => return error.AccessDenied,
+ .ACCESS_DENIED => return error.AccessDenied,
+ .PIPE_BUSY => return error.PipeBusy,
+ .OBJECT_PATH_SYNTAX_BAD => unreachable,
+ .OBJECT_NAME_COLLISION => return error.PathAlreadyExists,
+ .FILE_IS_A_DIRECTORY => return error.IsDir,
+ .NOT_A_DIRECTORY => return error.NotDir,
+ .USER_MAPPED_FILE => return error.AccessDenied,
+ .INVALID_HANDLE => unreachable,
+ .DELETE_PENDING => {
+ // This error means that there *was* a file in this location on
+ // the file system, but it was deleted. However, the OS is not
+ // finished with the deletion operation, and so this CreateFile
+ // call has failed. There is not really a sane way to handle
+ // this other than retrying the creation after the OS finishes
+ // the deletion.
+ std.time.sleep(std.time.ns_per_ms);
+ continue;
+ },
+ else => return unexpectedStatus(rc),
+ }
}
}
diff --git a/lib/std/os/windows/advapi32.zig b/lib/std/os/windows/advapi32.zig
index 67234a26e0..bace7ce850 100644
--- a/lib/std/os/windows/advapi32.zig
+++ b/lib/std/os/windows/advapi32.zig
@@ -27,6 +27,8 @@ pub extern "advapi32" fn RegQueryValueExW(
lpcbData: ?*DWORD,
) callconv(WINAPI) LSTATUS;
+pub extern "advapi32" fn RegCloseKey(hKey: HKEY) callconv(WINAPI) LSTATUS;
+
// RtlGenRandom is known as SystemFunction036 under advapi32
// http://msdn.microsoft.com/en-us/library/windows/desktop/aa387694.aspx */
pub extern "advapi32" fn SystemFunction036(output: [*]u8, length: ULONG) callconv(WINAPI) BOOL;
diff --git a/lib/std/os/windows/kernel32.zig b/lib/std/os/windows/kernel32.zig
index d3bfeaaf2c..1fd8a406d5 100644
--- a/lib/std/os/windows/kernel32.zig
+++ b/lib/std/os/windows/kernel32.zig
@@ -67,6 +67,7 @@ const RUNTIME_FUNCTION = windows.RUNTIME_FUNCTION;
const KNONVOLATILE_CONTEXT_POINTERS = windows.KNONVOLATILE_CONTEXT_POINTERS;
const EXCEPTION_ROUTINE = windows.EXCEPTION_ROUTINE;
const MODULEENTRY32 = windows.MODULEENTRY32;
+const ULONGLONG = windows.ULONGLONG;
pub extern "kernel32" fn AddVectoredExceptionHandler(First: c_ulong, Handler: ?VECTORED_EXCEPTION_HANDLER) callconv(WINAPI) ?*anyopaque;
pub extern "kernel32" fn RemoveVectoredExceptionHandler(Handle: HANDLE) callconv(WINAPI) c_ulong;
@@ -457,3 +458,5 @@ pub extern "kernel32" fn RegOpenKeyExW(
samDesired: REGSAM,
phkResult: *HKEY,
) callconv(WINAPI) LSTATUS;
+
+pub extern "kernel32" fn GetPhysicallyInstalledSystemMemory(TotalMemoryInKilobytes: *ULONGLONG) BOOL;
diff --git a/lib/std/process.zig b/lib/std/process.zig
index eff29e86fa..d06a012af2 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -828,24 +828,6 @@ pub fn argsWithAllocator(allocator: Allocator) ArgIterator.InitError!ArgIterator
return ArgIterator.initWithAllocator(allocator);
}
-test "args iterator" {
- var ga = std.testing.allocator;
- var it = try argsWithAllocator(ga);
- defer it.deinit(); // no-op unless WASI or Windows
-
- const prog_name = it.next() orelse unreachable;
- const expected_suffix = switch (builtin.os.tag) {
- .wasi => "test.wasm",
- .windows => "test.exe",
- else => "test",
- };
- const given_suffix = std.fs.path.basename(prog_name);
-
- try testing.expect(mem.eql(u8, expected_suffix, given_suffix));
- try testing.expect(it.next() == null);
- try testing.expect(!it.skip());
-}
-
/// Caller must call argsFree on result.
pub fn argsAlloc(allocator: Allocator) ![][:0]u8 {
// TODO refactor to only make 1 allocation.
@@ -1169,3 +1151,51 @@ pub fn execve(
return os.execvpeZ_expandArg0(.no_expand, argv_buf.ptr[0].?, argv_buf.ptr, envp);
}
+
+pub const TotalSystemMemoryError = error{
+ UnknownTotalSystemMemory,
+};
+
+/// Returns the total system memory, in bytes.
+pub fn totalSystemMemory() TotalSystemMemoryError!usize {
+ switch (builtin.os.tag) {
+ .linux => {
+ return totalSystemMemoryLinux() catch return error.UnknownTotalSystemMemory;
+ },
+ .windows => {
+ var kilobytes: std.os.windows.ULONGLONG = undefined;
+ assert(std.os.windows.kernel32.GetPhysicallyInstalledSystemMemory(&kilobytes) == std.os.windows.TRUE);
+ return kilobytes * 1024;
+ },
+ else => return error.UnknownTotalSystemMemory,
+ }
+}
+
+fn totalSystemMemoryLinux() !usize {
+ var file = try std.fs.openFileAbsoluteZ("/proc/meminfo", .{});
+ defer file.close();
+ var buf: [50]u8 = undefined;
+ const amt = try file.read(&buf);
+ if (amt != 50) return error.Unexpected;
+ var it = std.mem.tokenize(u8, buf[0..amt], " \n");
+ const label = it.next().?;
+ if (!std.mem.eql(u8, label, "MemTotal:")) return error.Unexpected;
+ const int_text = it.next() orelse return error.Unexpected;
+ const units = it.next() orelse return error.Unexpected;
+ if (!std.mem.eql(u8, units, "kB")) return error.Unexpected;
+ const kilobytes = try std.fmt.parseInt(usize, int_text, 10);
+ return kilobytes * 1024;
+}
+
+/// Indicate that we are now terminating with a successful exit code.
+/// In debug builds, this is a no-op, so that the calling code's
+/// cleanup mechanisms are tested and so that external tools that
+/// check for resource leaks can be accurate. In release builds, this
+/// calls exit(0), and does not return.
+pub fn cleanExit() void {
+ if (builtin.mode == .Debug) {
+ return;
+ } else {
+ exit(0);
+ }
+}
diff --git a/lib/std/std.zig b/lib/std/std.zig
index c1c682e224..92ebdf595b 100644
--- a/lib/std/std.zig
+++ b/lib/std/std.zig
@@ -185,6 +185,16 @@ pub const options = struct {
options_override.keep_sigpipe
else
false;
+
+ pub const http_connection_pool_size = if (@hasDecl(options_override, "http_connection_pool_size"))
+ options_override.http_connection_pool_size
+ else
+ http.Client.default_connection_pool_size;
+
+ pub const side_channels_mitigations: crypto.SideChannelsMitigations = if (@hasDecl(options_override, "side_channels_mitigations"))
+ options_override.side_channels_mitigations
+ else
+ crypto.default_side_channels_mitigations;
};
// This forces the start.zig file to be imported, and the comptime logic inside that
diff --git a/lib/std/target.zig b/lib/std/target.zig
index f93105d9d6..b04c5d11cf 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -724,11 +724,7 @@ pub const Target = struct {
/// Adds the specified feature set but not its dependencies.
pub fn addFeatureSet(set: *Set, other_set: Set) void {
- if (builtin.zig_backend == .stage2_c) {
- for (&set.ints, 0..) |*int, i| int.* |= other_set.ints[i];
- } else {
- set.ints = @as(@Vector(usize_count, usize), set.ints) | @as(@Vector(usize_count, usize), other_set.ints);
- }
+ set.ints = @as(@Vector(usize_count, usize), set.ints) | @as(@Vector(usize_count, usize), other_set.ints);
}
/// Removes the specified feature but not its dependents.
@@ -740,11 +736,7 @@ pub const Target = struct {
/// Removes the specified feature but not its dependents.
pub fn removeFeatureSet(set: *Set, other_set: Set) void {
- if (builtin.zig_backend == .stage2_c) {
- for (&set.ints, 0..) |*int, i| int.* &= ~other_set.ints[i];
- } else {
- set.ints = @as(@Vector(usize_count, usize), set.ints) & ~@as(@Vector(usize_count, usize), other_set.ints);
- }
+ set.ints = @as(@Vector(usize_count, usize), set.ints) & ~@as(@Vector(usize_count, usize), other_set.ints);
}
pub fn populateDependencies(set: *Set, all_features_list: []const Cpu.Feature) void {
diff --git a/lib/std/zig.zig b/lib/std/zig.zig
index f85cf75e60..98edeabd10 100644
--- a/lib/std/zig.zig
+++ b/lib/std/zig.zig
@@ -3,6 +3,9 @@ const tokenizer = @import("zig/tokenizer.zig");
const fmt = @import("zig/fmt.zig");
const assert = std.debug.assert;
+pub const ErrorBundle = @import("zig/ErrorBundle.zig");
+pub const Server = @import("zig/Server.zig");
+pub const Client = @import("zig/Client.zig");
pub const Token = tokenizer.Token;
pub const Tokenizer = tokenizer.Tokenizer;
pub const fmtId = fmt.fmtId;
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index f99d58aafa..cb86696e13 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -1407,7 +1407,8 @@ pub fn containerField(tree: Ast, node: Node.Index) full.ContainerField {
.type_expr = data.lhs,
.value_expr = extra.value_expr,
.align_expr = extra.align_expr,
- .tuple_like = tree.tokens.items(.tag)[main_token + 1] != .colon,
+ .tuple_like = tree.tokens.items(.tag)[main_token] != .identifier or
+ tree.tokens.items(.tag)[main_token + 1] != .colon,
});
}
@@ -1420,7 +1421,8 @@ pub fn containerFieldInit(tree: Ast, node: Node.Index) full.ContainerField {
.type_expr = data.lhs,
.value_expr = data.rhs,
.align_expr = 0,
- .tuple_like = tree.tokens.items(.tag)[main_token + 1] != .colon,
+ .tuple_like = tree.tokens.items(.tag)[main_token] != .identifier or
+ tree.tokens.items(.tag)[main_token + 1] != .colon,
});
}
@@ -1433,7 +1435,8 @@ pub fn containerFieldAlign(tree: Ast, node: Node.Index) full.ContainerField {
.type_expr = data.lhs,
.value_expr = 0,
.align_expr = data.rhs,
- .tuple_like = tree.tokens.items(.tag)[main_token + 1] != .colon,
+ .tuple_like = tree.tokens.items(.tag)[main_token] != .identifier or
+ tree.tokens.items(.tag)[main_token + 1] != .colon,
});
}
diff --git a/lib/std/zig/Client.zig b/lib/std/zig/Client.zig
new file mode 100644
index 0000000000..af4c29d37d
--- /dev/null
+++ b/lib/std/zig/Client.zig
@@ -0,0 +1,39 @@
+pub const Message = struct {
+ pub const Header = extern struct {
+ tag: Tag,
+ /// Size of the body only; does not include this Header.
+ bytes_len: u32,
+ };
+
+ pub const Tag = enum(u32) {
+ /// Tells the compiler to shut down cleanly.
+ /// No body.
+ exit,
+ /// Tells the compiler to detect changes in source files and update the
+ /// affected output compilation artifacts.
+ /// If one of the compilation artifacts is an executable that is
+ /// running as a child process, the compiler will wait for it to exit
+ /// before performing the update.
+ /// No body.
+ update,
+ /// Tells the compiler to execute the executable as a child process.
+ /// No body.
+ run,
+ /// Tells the compiler to detect changes in source files and update the
+ /// affected output compilation artifacts.
+ /// If one of the compilation artifacts is an executable that is
+ /// running as a child process, the compiler will perform a hot code
+ /// swap.
+ /// No body.
+ hot_update,
+ /// Ask the test runner for metadata about all the unit tests that can
+ /// be run. Server will respond with a `test_metadata` message.
+ /// No body.
+ query_test_metadata,
+ /// Ask the test runner to run a particular test.
+ /// The message body is a u32 test index.
+ run_test,
+
+ _,
+ };
+};
diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig
new file mode 100644
index 0000000000..ffe748203e
--- /dev/null
+++ b/lib/std/zig/ErrorBundle.zig
@@ -0,0 +1,515 @@
+//! To support incremental compilation, errors are stored in various places
+//! so that they can be created and destroyed appropriately. This structure
+//! is used to collect all the errors from the various places into one
+//! convenient place for API users to consume.
+//!
+//! There is one special encoding for this data structure. If both arrays are
+//! empty, it means there are no errors. This special encoding exists so that
+//! heap allocation is not needed in the common case of no errors.
+
+string_bytes: []const u8,
+/// The first thing in this array is an `ErrorMessageList`.
+extra: []const u32,
+
+/// Special encoding when there are no errors.
+pub const empty: ErrorBundle = .{
+ .string_bytes = &.{},
+ .extra = &.{},
+};
+
+// An index into `extra` pointing at an `ErrorMessage`.
+pub const MessageIndex = enum(u32) {
+ _,
+};
+
+// An index into `extra` pointing at an `SourceLocation`.
+pub const SourceLocationIndex = enum(u32) {
+ none = 0,
+ _,
+};
+
+/// There will be a MessageIndex for each len at start.
+pub const ErrorMessageList = struct {
+ len: u32,
+ start: u32,
+ /// null-terminated string index. 0 means no compile log text.
+ compile_log_text: u32,
+};
+
+/// Trailing:
+/// * ReferenceTrace for each reference_trace_len
+pub const SourceLocation = struct {
+ /// null terminated string index
+ src_path: u32,
+ line: u32,
+ column: u32,
+ /// byte offset of starting token
+ span_start: u32,
+ /// byte offset of main error location
+ span_main: u32,
+ /// byte offset of end of last token
+ span_end: u32,
+ /// null terminated string index, possibly null.
+ /// Does not include the trailing newline.
+ source_line: u32 = 0,
+ reference_trace_len: u32 = 0,
+};
+
+/// Trailing:
+/// * MessageIndex for each notes_len.
+pub const ErrorMessage = struct {
+ /// null terminated string index
+ msg: u32,
+ /// Usually one, but incremented for redundant messages.
+ count: u32 = 1,
+ src_loc: SourceLocationIndex = .none,
+ notes_len: u32 = 0,
+};
+
+pub const ReferenceTrace = struct {
+ /// null terminated string index
+ /// Except for the sentinel ReferenceTrace element, in which case:
+ /// * 0 means remaining references hidden
+ /// * >0 means N references hidden
+ decl_name: u32,
+ /// Index into extra of a SourceLocation
+ /// If this is 0, this is the sentinel ReferenceTrace element.
+ src_loc: SourceLocationIndex,
+};
+
+pub fn deinit(eb: *ErrorBundle, gpa: Allocator) void {
+ gpa.free(eb.string_bytes);
+ gpa.free(eb.extra);
+ eb.* = undefined;
+}
+
+pub fn errorMessageCount(eb: ErrorBundle) u32 {
+ if (eb.extra.len == 0) return 0;
+ return eb.getErrorMessageList().len;
+}
+
+pub fn getErrorMessageList(eb: ErrorBundle) ErrorMessageList {
+ return eb.extraData(ErrorMessageList, 0).data;
+}
+
+pub fn getMessages(eb: ErrorBundle) []const MessageIndex {
+ const list = eb.getErrorMessageList();
+ return @ptrCast([]const MessageIndex, eb.extra[list.start..][0..list.len]);
+}
+
+pub fn getErrorMessage(eb: ErrorBundle, index: MessageIndex) ErrorMessage {
+ return eb.extraData(ErrorMessage, @enumToInt(index)).data;
+}
+
+pub fn getSourceLocation(eb: ErrorBundle, index: SourceLocationIndex) SourceLocation {
+ assert(index != .none);
+ return eb.extraData(SourceLocation, @enumToInt(index)).data;
+}
+
+pub fn getNotes(eb: ErrorBundle, index: MessageIndex) []const MessageIndex {
+ const notes_len = eb.getErrorMessage(index).notes_len;
+ const start = @enumToInt(index) + @typeInfo(ErrorMessage).Struct.fields.len;
+ return @ptrCast([]const MessageIndex, eb.extra[start..][0..notes_len]);
+}
+
+pub fn getCompileLogOutput(eb: ErrorBundle) [:0]const u8 {
+ return nullTerminatedString(eb, getErrorMessageList(eb).compile_log_text);
+}
+
+/// Returns the requested data, as well as the new index which is at the start of the
+/// trailers for the object.
+fn extraData(eb: ErrorBundle, comptime T: type, index: usize) struct { data: T, end: usize } {
+ const fields = @typeInfo(T).Struct.fields;
+ var i: usize = index;
+ var result: T = undefined;
+ inline for (fields) |field| {
+ @field(result, field.name) = switch (field.type) {
+ u32 => eb.extra[i],
+ MessageIndex => @intToEnum(MessageIndex, eb.extra[i]),
+ SourceLocationIndex => @intToEnum(SourceLocationIndex, eb.extra[i]),
+ else => @compileError("bad field type"),
+ };
+ i += 1;
+ }
+ return .{
+ .data = result,
+ .end = i,
+ };
+}
+
+/// Given an index into `string_bytes` returns the null-terminated string found there.
+pub fn nullTerminatedString(eb: ErrorBundle, index: usize) [:0]const u8 {
+ const string_bytes = eb.string_bytes;
+ var end: usize = index;
+ while (string_bytes[end] != 0) {
+ end += 1;
+ }
+ return string_bytes[index..end :0];
+}
+
+pub const RenderOptions = struct {
+ ttyconf: std.debug.TTY.Config,
+ include_reference_trace: bool = true,
+ include_source_line: bool = true,
+ include_log_text: bool = true,
+};
+
+pub fn renderToStdErr(eb: ErrorBundle, options: RenderOptions) void {
+ std.debug.getStderrMutex().lock();
+ defer std.debug.getStderrMutex().unlock();
+ const stderr = std.io.getStdErr();
+ return renderToWriter(eb, options, stderr.writer()) catch return;
+}
+
+pub fn renderToWriter(eb: ErrorBundle, options: RenderOptions, writer: anytype) anyerror!void {
+ for (eb.getMessages()) |err_msg| {
+ try renderErrorMessageToWriter(eb, options, err_msg, writer, "error", .Red, 0);
+ }
+
+ if (options.include_log_text) {
+ const log_text = eb.getCompileLogOutput();
+ if (log_text.len != 0) {
+ try writer.writeAll("\nCompile Log Output:\n");
+ try writer.writeAll(log_text);
+ }
+ }
+}
+
+fn renderErrorMessageToWriter(
+ eb: ErrorBundle,
+ options: RenderOptions,
+ err_msg_index: MessageIndex,
+ stderr: anytype,
+ kind: []const u8,
+ color: std.debug.TTY.Color,
+ indent: usize,
+) anyerror!void {
+ const ttyconf = options.ttyconf;
+ var counting_writer = std.io.countingWriter(stderr);
+ const counting_stderr = counting_writer.writer();
+ const err_msg = eb.getErrorMessage(err_msg_index);
+ if (err_msg.src_loc != .none) {
+ const src = eb.extraData(SourceLocation, @enumToInt(err_msg.src_loc));
+ try counting_stderr.writeByteNTimes(' ', indent);
+ try ttyconf.setColor(stderr, .Bold);
+ try counting_stderr.print("{s}:{d}:{d}: ", .{
+ eb.nullTerminatedString(src.data.src_path),
+ src.data.line + 1,
+ src.data.column + 1,
+ });
+ try ttyconf.setColor(stderr, color);
+ try counting_stderr.writeAll(kind);
+ try counting_stderr.writeAll(": ");
+ // This is the length of the part before the error message:
+ // e.g. "file.zig:4:5: error: "
+ const prefix_len = @intCast(usize, counting_stderr.context.bytes_written);
+ try ttyconf.setColor(stderr, .Reset);
+ try ttyconf.setColor(stderr, .Bold);
+ if (err_msg.count == 1) {
+ try writeMsg(eb, err_msg, stderr, prefix_len);
+ try stderr.writeByte('\n');
+ } else {
+ try writeMsg(eb, err_msg, stderr, prefix_len);
+ try ttyconf.setColor(stderr, .Dim);
+ try stderr.print(" ({d} times)\n", .{err_msg.count});
+ }
+ try ttyconf.setColor(stderr, .Reset);
+ if (src.data.source_line != 0 and options.include_source_line) {
+ const line = eb.nullTerminatedString(src.data.source_line);
+ for (line) |b| switch (b) {
+ '\t' => try stderr.writeByte(' '),
+ else => try stderr.writeByte(b),
+ };
+ try stderr.writeByte('\n');
+ // TODO basic unicode code point monospace width
+ const before_caret = src.data.span_main - src.data.span_start;
+ // -1 since span.main includes the caret
+ const after_caret = src.data.span_end - src.data.span_main -| 1;
+ try stderr.writeByteNTimes(' ', src.data.column - before_caret);
+ try ttyconf.setColor(stderr, .Green);
+ try stderr.writeByteNTimes('~', before_caret);
+ try stderr.writeByte('^');
+ try stderr.writeByteNTimes('~', after_caret);
+ try stderr.writeByte('\n');
+ try ttyconf.setColor(stderr, .Reset);
+ }
+ for (eb.getNotes(err_msg_index)) |note| {
+ try renderErrorMessageToWriter(eb, options, note, stderr, "note", .Cyan, indent);
+ }
+ if (src.data.reference_trace_len > 0 and options.include_reference_trace) {
+ try ttyconf.setColor(stderr, .Reset);
+ try ttyconf.setColor(stderr, .Dim);
+ try stderr.print("referenced by:\n", .{});
+ var ref_index = src.end;
+ for (0..src.data.reference_trace_len) |_| {
+ const ref_trace = eb.extraData(ReferenceTrace, ref_index);
+ ref_index = ref_trace.end;
+ if (ref_trace.data.src_loc != .none) {
+ const ref_src = eb.getSourceLocation(ref_trace.data.src_loc);
+ try stderr.print(" {s}: {s}:{d}:{d}\n", .{
+ eb.nullTerminatedString(ref_trace.data.decl_name),
+ eb.nullTerminatedString(ref_src.src_path),
+ ref_src.line + 1,
+ ref_src.column + 1,
+ });
+ } else if (ref_trace.data.decl_name != 0) {
+ const count = ref_trace.data.decl_name;
+ try stderr.print(
+ " {d} reference(s) hidden; use '-freference-trace={d}' to see all references\n",
+ .{ count, count + src.data.reference_trace_len - 1 },
+ );
+ } else {
+ try stderr.print(
+ " remaining reference traces hidden; use '-freference-trace' to see all reference traces\n",
+ .{},
+ );
+ }
+ }
+ try stderr.writeByte('\n');
+ try ttyconf.setColor(stderr, .Reset);
+ }
+ } else {
+ try ttyconf.setColor(stderr, color);
+ try stderr.writeByteNTimes(' ', indent);
+ try stderr.writeAll(kind);
+ try stderr.writeAll(": ");
+ try ttyconf.setColor(stderr, .Reset);
+ const msg = eb.nullTerminatedString(err_msg.msg);
+ if (err_msg.count == 1) {
+ try stderr.print("{s}\n", .{msg});
+ } else {
+ try stderr.print("{s}", .{msg});
+ try ttyconf.setColor(stderr, .Dim);
+ try stderr.print(" ({d} times)\n", .{err_msg.count});
+ }
+ try ttyconf.setColor(stderr, .Reset);
+ for (eb.getNotes(err_msg_index)) |note| {
+ try renderErrorMessageToWriter(eb, options, note, stderr, "note", .Cyan, indent + 4);
+ }
+ }
+}
+
+/// Splits the error message up into lines to properly indent them
+/// to allow for long, good-looking error messages.
+///
+/// This is used to split the message in `@compileError("hello\nworld")` for example.
+fn writeMsg(eb: ErrorBundle, err_msg: ErrorMessage, stderr: anytype, indent: usize) !void {
+ var lines = std.mem.split(u8, eb.nullTerminatedString(err_msg.msg), "\n");
+ while (lines.next()) |line| {
+ try stderr.writeAll(line);
+ if (lines.index == null) break;
+ try stderr.writeByte('\n');
+ try stderr.writeByteNTimes(' ', indent);
+ }
+}
+
+const std = @import("std");
+const ErrorBundle = @This();
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+
+pub const Wip = struct {
+ gpa: Allocator,
+ string_bytes: std.ArrayListUnmanaged(u8),
+ /// The first thing in this array is a ErrorMessageList.
+ extra: std.ArrayListUnmanaged(u32),
+ root_list: std.ArrayListUnmanaged(MessageIndex),
+
+ pub fn init(wip: *Wip, gpa: Allocator) !void {
+ wip.* = .{
+ .gpa = gpa,
+ .string_bytes = .{},
+ .extra = .{},
+ .root_list = .{},
+ };
+
+ // So that 0 can be used to indicate a null string.
+ try wip.string_bytes.append(gpa, 0);
+
+ assert(0 == try addExtra(wip, ErrorMessageList{
+ .len = 0,
+ .start = 0,
+ .compile_log_text = 0,
+ }));
+ }
+
+ pub fn deinit(wip: *Wip) void {
+ const gpa = wip.gpa;
+ wip.root_list.deinit(gpa);
+ wip.string_bytes.deinit(gpa);
+ wip.extra.deinit(gpa);
+ wip.* = undefined;
+ }
+
+ pub fn toOwnedBundle(wip: *Wip, compile_log_text: []const u8) !ErrorBundle {
+ const gpa = wip.gpa;
+ if (wip.root_list.items.len == 0) {
+ assert(compile_log_text.len == 0);
+ // Special encoding when there are no errors.
+ wip.deinit();
+ wip.* = .{
+ .gpa = gpa,
+ .string_bytes = .{},
+ .extra = .{},
+ .root_list = .{},
+ };
+ return empty;
+ }
+
+ const compile_log_str_index = if (compile_log_text.len == 0) 0 else str: {
+ const str = @intCast(u32, wip.string_bytes.items.len);
+ try wip.string_bytes.ensureUnusedCapacity(gpa, compile_log_text.len + 1);
+ wip.string_bytes.appendSliceAssumeCapacity(compile_log_text);
+ wip.string_bytes.appendAssumeCapacity(0);
+ break :str str;
+ };
+
+ wip.setExtra(0, ErrorMessageList{
+ .len = @intCast(u32, wip.root_list.items.len),
+ .start = @intCast(u32, wip.extra.items.len),
+ .compile_log_text = compile_log_str_index,
+ });
+ try wip.extra.appendSlice(gpa, @ptrCast([]const u32, wip.root_list.items));
+ wip.root_list.clearAndFree(gpa);
+ return .{
+ .string_bytes = try wip.string_bytes.toOwnedSlice(gpa),
+ .extra = try wip.extra.toOwnedSlice(gpa),
+ };
+ }
+
+ pub fn tmpBundle(wip: Wip) ErrorBundle {
+ return .{
+ .string_bytes = wip.string_bytes.items,
+ .extra = wip.extra.items,
+ };
+ }
+
+ pub fn addString(wip: *Wip, s: []const u8) !u32 {
+ const gpa = wip.gpa;
+ const index = @intCast(u32, wip.string_bytes.items.len);
+ try wip.string_bytes.ensureUnusedCapacity(gpa, s.len + 1);
+ wip.string_bytes.appendSliceAssumeCapacity(s);
+ wip.string_bytes.appendAssumeCapacity(0);
+ return index;
+ }
+
+ pub fn printString(wip: *Wip, comptime fmt: []const u8, args: anytype) !u32 {
+ const gpa = wip.gpa;
+ const index = @intCast(u32, wip.string_bytes.items.len);
+ try wip.string_bytes.writer(gpa).print(fmt, args);
+ try wip.string_bytes.append(gpa, 0);
+ return index;
+ }
+
+ pub fn addRootErrorMessage(wip: *Wip, em: ErrorMessage) !void {
+ try wip.root_list.ensureUnusedCapacity(wip.gpa, 1);
+ wip.root_list.appendAssumeCapacity(try addErrorMessage(wip, em));
+ }
+
+ pub fn addErrorMessage(wip: *Wip, em: ErrorMessage) !MessageIndex {
+ return @intToEnum(MessageIndex, try addExtra(wip, em));
+ }
+
+ pub fn addErrorMessageAssumeCapacity(wip: *Wip, em: ErrorMessage) MessageIndex {
+ return @intToEnum(MessageIndex, addExtraAssumeCapacity(wip, em));
+ }
+
+ pub fn addSourceLocation(wip: *Wip, sl: SourceLocation) !SourceLocationIndex {
+ return @intToEnum(SourceLocationIndex, try addExtra(wip, sl));
+ }
+
+ pub fn addReferenceTrace(wip: *Wip, rt: ReferenceTrace) !void {
+ _ = try addExtra(wip, rt);
+ }
+
+ pub fn addBundle(wip: *Wip, other: ErrorBundle) !void {
+ const gpa = wip.gpa;
+
+ try wip.string_bytes.ensureUnusedCapacity(gpa, other.string_bytes.len);
+ try wip.extra.ensureUnusedCapacity(gpa, other.extra.len);
+
+ const other_list = other.getMessages();
+
+ // The ensureUnusedCapacity call above guarantees this.
+ const notes_start = wip.reserveNotes(@intCast(u32, other_list.len)) catch unreachable;
+ for (notes_start.., other_list) |note, message| {
+ wip.extra.items[note] = @enumToInt(wip.addOtherMessage(other, message) catch unreachable);
+ }
+ }
+
+ pub fn reserveNotes(wip: *Wip, notes_len: u32) !u32 {
+ try wip.extra.ensureUnusedCapacity(wip.gpa, notes_len +
+ notes_len * @typeInfo(ErrorBundle.ErrorMessage).Struct.fields.len);
+ wip.extra.items.len += notes_len;
+ return @intCast(u32, wip.extra.items.len - notes_len);
+ }
+
+ fn addOtherMessage(wip: *Wip, other: ErrorBundle, msg_index: MessageIndex) !MessageIndex {
+ const other_msg = other.getErrorMessage(msg_index);
+ const src_loc = try wip.addOtherSourceLocation(other, other_msg.src_loc);
+ const msg = try wip.addErrorMessage(.{
+ .msg = try wip.addString(other.nullTerminatedString(other_msg.msg)),
+ .count = other_msg.count,
+ .src_loc = src_loc,
+ .notes_len = other_msg.notes_len,
+ });
+ const notes_start = try wip.reserveNotes(other_msg.notes_len);
+ for (notes_start.., other.getNotes(msg_index)) |note, other_note| {
+ wip.extra.items[note] = @enumToInt(try wip.addOtherMessage(other, other_note));
+ }
+ return msg;
+ }
+
+ fn addOtherSourceLocation(
+ wip: *Wip,
+ other: ErrorBundle,
+ index: SourceLocationIndex,
+ ) !SourceLocationIndex {
+ if (index == .none) return .none;
+ const other_sl = other.getSourceLocation(index);
+
+ const src_loc = try wip.addSourceLocation(.{
+ .src_path = try wip.addString(other.nullTerminatedString(other_sl.src_path)),
+ .line = other_sl.line,
+ .column = other_sl.column,
+ .span_start = other_sl.span_start,
+ .span_main = other_sl.span_main,
+ .span_end = other_sl.span_end,
+ .source_line = try wip.addString(other.nullTerminatedString(other_sl.source_line)),
+ .reference_trace_len = other_sl.reference_trace_len,
+ });
+
+ // TODO: also add the reference trace
+
+ return src_loc;
+ }
+
+ fn addExtra(wip: *Wip, extra: anytype) Allocator.Error!u32 {
+ const gpa = wip.gpa;
+ const fields = @typeInfo(@TypeOf(extra)).Struct.fields;
+ try wip.extra.ensureUnusedCapacity(gpa, fields.len);
+ return addExtraAssumeCapacity(wip, extra);
+ }
+
+ fn addExtraAssumeCapacity(wip: *Wip, extra: anytype) u32 {
+ const fields = @typeInfo(@TypeOf(extra)).Struct.fields;
+ const result = @intCast(u32, wip.extra.items.len);
+ wip.extra.items.len += fields.len;
+ setExtra(wip, result, extra);
+ return result;
+ }
+
+ fn setExtra(wip: *Wip, index: usize, extra: anytype) void {
+ const fields = @typeInfo(@TypeOf(extra)).Struct.fields;
+ var i = index;
+ inline for (fields) |field| {
+ wip.extra.items[i] = switch (field.type) {
+ u32 => @field(extra, field.name),
+ MessageIndex => @enumToInt(@field(extra, field.name)),
+ SourceLocationIndex => @enumToInt(@field(extra, field.name)),
+ else => @compileError("bad field type"),
+ };
+ i += 1;
+ }
+ }
+};
diff --git a/lib/std/zig/Server.zig b/lib/std/zig/Server.zig
new file mode 100644
index 0000000000..788e361782
--- /dev/null
+++ b/lib/std/zig/Server.zig
@@ -0,0 +1,305 @@
+in: std.fs.File,
+out: std.fs.File,
+receive_fifo: std.fifo.LinearFifo(u8, .Dynamic),
+
+pub const Message = struct {
+ pub const Header = extern struct {
+ tag: Tag,
+ /// Size of the body only; does not include this Header.
+ bytes_len: u32,
+ };
+
+ pub const Tag = enum(u32) {
+ /// Body is a UTF-8 string.
+ zig_version,
+ /// Body is an ErrorBundle.
+ error_bundle,
+ /// Body is a UTF-8 string.
+ progress,
+ /// Body is a EmitBinPath.
+ emit_bin_path,
+ /// Body is a TestMetadata
+ test_metadata,
+ /// Body is a TestResults
+ test_results,
+
+ _,
+ };
+
+ /// Trailing:
+ /// * extra: [extra_len]u32,
+ /// * string_bytes: [string_bytes_len]u8,
+ /// See `std.zig.ErrorBundle`.
+ pub const ErrorBundle = extern struct {
+ extra_len: u32,
+ string_bytes_len: u32,
+ };
+
+ /// Trailing:
+ /// * name: [tests_len]u32
+ /// - null-terminated string_bytes index
+ /// * async_frame_len: [tests_len]u32,
+ /// - 0 means not async
+ /// * expected_panic_msg: [tests_len]u32,
+ /// - null-terminated string_bytes index
+ /// - 0 means does not expect pani
+ /// * string_bytes: [string_bytes_len]u8,
+ pub const TestMetadata = extern struct {
+ string_bytes_len: u32,
+ tests_len: u32,
+ };
+
+ pub const TestResults = extern struct {
+ index: u32,
+ flags: Flags,
+
+ pub const Flags = packed struct(u8) {
+ fail: bool,
+ skip: bool,
+ leak: bool,
+
+ reserved: u5 = 0,
+ };
+ };
+
+ /// Trailing:
+ /// * the file system path the emitted binary can be found
+ pub const EmitBinPath = extern struct {
+ flags: Flags,
+
+ pub const Flags = packed struct(u8) {
+ cache_hit: bool,
+ reserved: u7 = 0,
+ };
+ };
+};
+
+pub const Options = struct {
+ gpa: Allocator,
+ in: std.fs.File,
+ out: std.fs.File,
+ zig_version: []const u8,
+};
+
+pub fn init(options: Options) !Server {
+ var s: Server = .{
+ .in = options.in,
+ .out = options.out,
+ .receive_fifo = std.fifo.LinearFifo(u8, .Dynamic).init(options.gpa),
+ };
+ try s.serveStringMessage(.zig_version, options.zig_version);
+ return s;
+}
+
+pub fn deinit(s: *Server) void {
+ s.receive_fifo.deinit();
+ s.* = undefined;
+}
+
+pub fn receiveMessage(s: *Server) !InMessage.Header {
+ const Header = InMessage.Header;
+ const fifo = &s.receive_fifo;
+
+ while (true) {
+ const buf = fifo.readableSlice(0);
+ assert(fifo.readableLength() == buf.len);
+ if (buf.len >= @sizeOf(Header)) {
+ const header = @ptrCast(*align(1) const Header, buf[0..@sizeOf(Header)]);
+ // workaround for https://github.com/ziglang/zig/issues/14904
+ const bytes_len = bswap_and_workaround_u32(&header.bytes_len);
+ // workaround for https://github.com/ziglang/zig/issues/14904
+ const tag = bswap_and_workaround_tag(&header.tag);
+
+ if (buf.len - @sizeOf(Header) >= bytes_len) {
+ fifo.discard(@sizeOf(Header));
+ return .{
+ .tag = tag,
+ .bytes_len = bytes_len,
+ };
+ } else {
+ const needed = bytes_len - (buf.len - @sizeOf(Header));
+ const write_buffer = try fifo.writableWithSize(needed);
+ const amt = try s.in.read(write_buffer);
+ fifo.update(amt);
+ continue;
+ }
+ }
+
+ const write_buffer = try fifo.writableWithSize(256);
+ const amt = try s.in.read(write_buffer);
+ fifo.update(amt);
+ }
+}
+
+pub fn receiveBody_u32(s: *Server) !u32 {
+ const fifo = &s.receive_fifo;
+ const buf = fifo.readableSlice(0);
+ const result = @ptrCast(*align(1) const u32, buf[0..4]).*;
+ fifo.discard(4);
+ return bswap(result);
+}
+
+pub fn serveStringMessage(s: *Server, tag: OutMessage.Tag, msg: []const u8) !void {
+ return s.serveMessage(.{
+ .tag = tag,
+ .bytes_len = @intCast(u32, msg.len),
+ }, &.{msg});
+}
+
+pub fn serveMessage(
+ s: *const Server,
+ header: OutMessage.Header,
+ bufs: []const []const u8,
+) !void {
+ var iovecs: [10]std.os.iovec_const = undefined;
+ const header_le = bswap(header);
+ iovecs[0] = .{
+ .iov_base = @ptrCast([*]const u8, &header_le),
+ .iov_len = @sizeOf(OutMessage.Header),
+ };
+ for (bufs, iovecs[1 .. bufs.len + 1]) |buf, *iovec| {
+ iovec.* = .{
+ .iov_base = buf.ptr,
+ .iov_len = buf.len,
+ };
+ }
+ try s.out.writevAll(iovecs[0 .. bufs.len + 1]);
+}
+
+pub fn serveEmitBinPath(
+ s: *Server,
+ fs_path: []const u8,
+ header: OutMessage.EmitBinPath,
+) !void {
+ try s.serveMessage(.{
+ .tag = .emit_bin_path,
+ .bytes_len = @intCast(u32, fs_path.len + @sizeOf(OutMessage.EmitBinPath)),
+ }, &.{
+ std.mem.asBytes(&header),
+ fs_path,
+ });
+}
+
+pub fn serveTestResults(
+ s: *Server,
+ msg: OutMessage.TestResults,
+) !void {
+ const msg_le = bswap(msg);
+ try s.serveMessage(.{
+ .tag = .test_results,
+ .bytes_len = @intCast(u32, @sizeOf(OutMessage.TestResults)),
+ }, &.{
+ std.mem.asBytes(&msg_le),
+ });
+}
+
+pub fn serveErrorBundle(s: *Server, error_bundle: std.zig.ErrorBundle) !void {
+ const eb_hdr: OutMessage.ErrorBundle = .{
+ .extra_len = @intCast(u32, error_bundle.extra.len),
+ .string_bytes_len = @intCast(u32, error_bundle.string_bytes.len),
+ };
+ const bytes_len = @sizeOf(OutMessage.ErrorBundle) +
+ 4 * error_bundle.extra.len + error_bundle.string_bytes.len;
+ try s.serveMessage(.{
+ .tag = .error_bundle,
+ .bytes_len = @intCast(u32, bytes_len),
+ }, &.{
+ std.mem.asBytes(&eb_hdr),
+ // TODO: implement @ptrCast between slices changing the length
+ std.mem.sliceAsBytes(error_bundle.extra),
+ error_bundle.string_bytes,
+ });
+}
+
+pub const TestMetadata = struct {
+ names: []u32,
+ async_frame_sizes: []u32,
+ expected_panic_msgs: []u32,
+ string_bytes: []const u8,
+};
+
+pub fn serveTestMetadata(s: *Server, test_metadata: TestMetadata) !void {
+ const header: OutMessage.TestMetadata = .{
+ .tests_len = bswap(@intCast(u32, test_metadata.names.len)),
+ .string_bytes_len = bswap(@intCast(u32, test_metadata.string_bytes.len)),
+ };
+ const bytes_len = @sizeOf(OutMessage.TestMetadata) +
+ 3 * 4 * test_metadata.names.len + test_metadata.string_bytes.len;
+
+ if (need_bswap) {
+ bswap_u32_array(test_metadata.names);
+ bswap_u32_array(test_metadata.async_frame_sizes);
+ bswap_u32_array(test_metadata.expected_panic_msgs);
+ }
+ defer if (need_bswap) {
+ bswap_u32_array(test_metadata.names);
+ bswap_u32_array(test_metadata.async_frame_sizes);
+ bswap_u32_array(test_metadata.expected_panic_msgs);
+ };
+
+ return s.serveMessage(.{
+ .tag = .test_metadata,
+ .bytes_len = @intCast(u32, bytes_len),
+ }, &.{
+ std.mem.asBytes(&header),
+ // TODO: implement @ptrCast between slices changing the length
+ std.mem.sliceAsBytes(test_metadata.names),
+ std.mem.sliceAsBytes(test_metadata.async_frame_sizes),
+ std.mem.sliceAsBytes(test_metadata.expected_panic_msgs),
+ test_metadata.string_bytes,
+ });
+}
+
+fn bswap(x: anytype) @TypeOf(x) {
+ if (!need_bswap) return x;
+
+ const T = @TypeOf(x);
+ switch (@typeInfo(T)) {
+ .Enum => return @intToEnum(T, @byteSwap(@enumToInt(x))),
+ .Int => return @byteSwap(x),
+ .Struct => |info| switch (info.layout) {
+ .Extern => {
+ var result: T = undefined;
+ inline for (info.fields) |field| {
+ @field(result, field.name) = bswap(@field(x, field.name));
+ }
+ return result;
+ },
+ .Packed => {
+ const I = info.backing_integer.?;
+ return @bitCast(T, @byteSwap(@bitCast(I, x)));
+ },
+ .Auto => @compileError("auto layout struct"),
+ },
+ else => @compileError("bswap on type " ++ @typeName(T)),
+ }
+}
+
+fn bswap_u32_array(slice: []u32) void {
+ comptime assert(need_bswap);
+ for (slice) |*elem| elem.* = @byteSwap(elem.*);
+}
+
+/// workaround for https://github.com/ziglang/zig/issues/14904
+fn bswap_and_workaround_u32(x: *align(1) const u32) u32 {
+ const bytes_ptr = @ptrCast(*const [4]u8, x);
+ return std.mem.readIntLittle(u32, bytes_ptr);
+}
+
+/// workaround for https://github.com/ziglang/zig/issues/14904
+fn bswap_and_workaround_tag(x: *align(1) const InMessage.Tag) InMessage.Tag {
+ const bytes_ptr = @ptrCast(*const [4]u8, x);
+ const int = std.mem.readIntLittle(u32, bytes_ptr);
+ return @intToEnum(InMessage.Tag, int);
+}
+
+const OutMessage = std.zig.Server.Message;
+const InMessage = std.zig.Client.Message;
+
+const Server = @This();
+const builtin = @import("builtin");
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const native_endian = builtin.target.cpu.arch.endian();
+const need_bswap = native_endian != .Little;
diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig
index dbbebb43c9..987358ed5a 100644
--- a/lib/std/zig/system/NativeTargetInfo.zig
+++ b/lib/std/zig/system/NativeTargetInfo.zig
@@ -1090,6 +1090,11 @@ pub fn getExternalExecutor(
switch (candidate.target.os.tag) {
.windows => {
if (options.allow_wine) {
+ // x86_64 wine does not support emulating aarch64-windows and
+ // vice versa.
+ if (candidate.target.cpu.arch != builtin.cpu.arch) {
+ return bad_result;
+ }
switch (candidate.target.cpu.arch.ptrBitWidth()) {
32 => return Executor{ .wine = "wine" },
64 => return Executor{ .wine = "wine64" },
diff --git a/lib/test_runner.zig b/lib/test_runner.zig
index 5968fdaa54..3d87264851 100644
--- a/lib/test_runner.zig
+++ b/lib/test_runner.zig
@@ -8,14 +8,126 @@ pub const std_options = struct {
};
var log_err_count: usize = 0;
+var cmdline_buffer: [4096]u8 = undefined;
+var fba = std.heap.FixedBufferAllocator.init(&cmdline_buffer);
pub fn main() void {
- if (builtin.zig_backend != .stage1 and
- builtin.zig_backend != .stage2_llvm and
- builtin.zig_backend != .stage2_c)
+ if (builtin.zig_backend == .stage2_wasm or
+ builtin.zig_backend == .stage2_x86_64 or
+ builtin.zig_backend == .stage2_aarch64)
{
- return main2() catch @panic("test failure");
+ return mainSimple() catch @panic("test failure");
}
+
+ const args = std.process.argsAlloc(fba.allocator()) catch
+ @panic("unable to parse command line args");
+
+ var listen = false;
+
+ for (args[1..]) |arg| {
+ if (std.mem.eql(u8, arg, "--listen=-")) {
+ listen = true;
+ } else {
+ @panic("unrecognized command line argument");
+ }
+ }
+
+ if (listen) {
+ return mainServer() catch @panic("internal test runner failure");
+ } else {
+ return mainTerminal();
+ }
+}
+
+fn mainServer() !void {
+ var server = try std.zig.Server.init(.{
+ .gpa = fba.allocator(),
+ .in = std.io.getStdIn(),
+ .out = std.io.getStdOut(),
+ .zig_version = builtin.zig_version_string,
+ });
+ defer server.deinit();
+
+ while (true) {
+ const hdr = try server.receiveMessage();
+ switch (hdr.tag) {
+ .exit => {
+ return std.process.exit(0);
+ },
+ .query_test_metadata => {
+ std.testing.allocator_instance = .{};
+ defer if (std.testing.allocator_instance.deinit()) {
+ @panic("internal test runner memory leak");
+ };
+
+ var string_bytes: std.ArrayListUnmanaged(u8) = .{};
+ defer string_bytes.deinit(std.testing.allocator);
+ try string_bytes.append(std.testing.allocator, 0); // Reserve 0 for null.
+
+ const test_fns = builtin.test_functions;
+ const names = try std.testing.allocator.alloc(u32, test_fns.len);
+ defer std.testing.allocator.free(names);
+ const async_frame_sizes = try std.testing.allocator.alloc(u32, test_fns.len);
+ defer std.testing.allocator.free(async_frame_sizes);
+ const expected_panic_msgs = try std.testing.allocator.alloc(u32, test_fns.len);
+ defer std.testing.allocator.free(expected_panic_msgs);
+
+ for (test_fns, names, async_frame_sizes, expected_panic_msgs) |test_fn, *name, *async_frame_size, *expected_panic_msg| {
+ name.* = @intCast(u32, string_bytes.items.len);
+ try string_bytes.ensureUnusedCapacity(std.testing.allocator, test_fn.name.len + 1);
+ string_bytes.appendSliceAssumeCapacity(test_fn.name);
+ string_bytes.appendAssumeCapacity(0);
+
+ async_frame_size.* = @intCast(u32, test_fn.async_frame_size orelse 0);
+ expected_panic_msg.* = 0;
+ }
+
+ try server.serveTestMetadata(.{
+ .names = names,
+ .async_frame_sizes = async_frame_sizes,
+ .expected_panic_msgs = expected_panic_msgs,
+ .string_bytes = string_bytes.items,
+ });
+ },
+
+ .run_test => {
+ std.testing.allocator_instance = .{};
+ const index = try server.receiveBody_u32();
+ const test_fn = builtin.test_functions[index];
+ if (test_fn.async_frame_size != null)
+ @panic("TODO test runner implement async tests");
+ var fail = false;
+ var skip = false;
+ var leak = false;
+ test_fn.func() catch |err| switch (err) {
+ error.SkipZigTest => skip = true,
+ else => {
+ fail = true;
+ if (@errorReturnTrace()) |trace| {
+ std.debug.dumpStackTrace(trace.*);
+ }
+ },
+ };
+ leak = std.testing.allocator_instance.deinit();
+ try server.serveTestResults(.{
+ .index = index,
+ .flags = .{
+ .fail = fail,
+ .skip = skip,
+ .leak = leak,
+ },
+ });
+ },
+
+ else => {
+ std.debug.print("unsupported message: {x}", .{@enumToInt(hdr.tag)});
+ std.process.exit(1);
+ },
+ }
+ }
+}
+
+fn mainTerminal() void {
const test_fn_list = builtin.test_functions;
var ok_count: usize = 0;
var skip_count: usize = 0;
@@ -118,51 +230,17 @@ pub fn log(
}
}
-pub fn main2() anyerror!void {
- var skipped: usize = 0;
- var failed: usize = 0;
- // Simpler main(), exercising fewer language features, so that stage2 can handle it.
+/// Simpler main(), exercising fewer language features, so that
+/// work-in-progress backends can handle it.
+pub fn mainSimple() anyerror!void {
+ //const stderr = std.io.getStdErr();
for (builtin.test_functions) |test_fn| {
test_fn.func() catch |err| {
if (err != error.SkipZigTest) {
- failed += 1;
- } else {
- skipped += 1;
+ //stderr.writeAll(test_fn.name) catch {};
+ //stderr.writeAll("\n") catch {};
+ return err;
}
};
}
- if (builtin.zig_backend == .stage2_wasm or
- builtin.zig_backend == .stage2_x86_64 or
- builtin.zig_backend == .stage2_aarch64 or
- builtin.zig_backend == .stage2_llvm or
- builtin.zig_backend == .stage2_c)
- {
- const passed = builtin.test_functions.len - skipped - failed;
- const stderr = std.io.getStdErr();
- writeInt(stderr, passed) catch {};
- stderr.writeAll(" passed; ") catch {};
- writeInt(stderr, skipped) catch {};
- stderr.writeAll(" skipped; ") catch {};
- writeInt(stderr, failed) catch {};
- stderr.writeAll(" failed.\n") catch {};
- }
- if (failed != 0) {
- return error.TestsFailed;
- }
-}
-
-fn writeInt(stderr: std.fs.File, int: usize) anyerror!void {
- const base = 10;
- var buf: [100]u8 = undefined;
- var a: usize = int;
- var index: usize = buf.len;
- while (true) {
- const digit = a % base;
- index -= 1;
- buf[index] = std.fmt.digitToChar(@intCast(u8, digit), .lower);
- a /= base;
- if (a == 0) break;
- }
- const slice = buf[index..];
- try stderr.writeAll(slice);
}
diff --git a/lib/zig.h b/lib/zig.h
index c10720d1bd..59c3ddd695 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -37,6 +37,14 @@ typedef char bool;
#define zig_has_attribute(attribute) 0
#endif
+#if __LITTLE_ENDIAN__ || _MSC_VER
+#define zig_little_endian 1
+#define zig_big_endian 0
+#else
+#define zig_little_endian 0
+#define zig_big_endian 1
+#endif
+
#if __STDC_VERSION__ >= 201112L
#define zig_threadlocal _Thread_local
#elif defined(__GNUC__)
@@ -180,16 +188,38 @@ typedef char bool;
#define zig_export(sig, symbol, name) __asm(name " = " symbol)
#endif
+#if zig_has_builtin(trap)
+#define zig_trap() __builtin_trap()
+#elif _MSC_VER && (_M_IX86 || _M_X64)
+#define zig_trap() __ud2()
+#elif _MSC_VER
+#define zig_trap() __fastfail(0)
+#elif defined(__i386__) || defined(__x86_64__)
+#define zig_trap() __asm__ volatile("ud2");
+#elif defined(__arm__) || defined(__aarch64__)
+#define zig_breakpoint() __asm__ volatile("udf #0");
+#else
+#include
+#define zig_trap() abort()
+#endif
+
#if zig_has_builtin(debugtrap)
#define zig_breakpoint() __builtin_debugtrap()
-#elif zig_has_builtin(trap) || defined(zig_gnuc)
-#define zig_breakpoint() __builtin_trap()
#elif defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__)
#define zig_breakpoint() __debugbreak()
#elif defined(__i386__) || defined(__x86_64__)
#define zig_breakpoint() __asm__ volatile("int $0x03");
+#elif defined(__arm__)
+#define zig_breakpoint() __asm__ volatile("bkpt #0");
+#elif defined(__aarch64__)
+#define zig_breakpoint() __asm__ volatile("brk #0");
#else
+#include
+#if defined(SIGTRAP)
#define zig_breakpoint() raise(SIGTRAP)
+#else
+#define zig_breakpoint() zig_breakpoint_unavailable
+#endif
#endif
#if zig_has_builtin(return_address) || defined(zig_gnuc)
@@ -598,12 +628,6 @@ static inline bool zig_addo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8
#endif
}
-static inline void zig_vaddo_u32(uint8_t *ov, uint32_t *res, int n,
- const uint32_t *lhs, const uint32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int32_t __addosi4(int32_t lhs, int32_t rhs, int *overflow);
static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
@@ -618,12 +642,6 @@ static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vaddo_i32(uint8_t *ov, int32_t *res, int n,
- const int32_t *lhs, const int32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
uint64_t full_res;
@@ -636,12 +654,6 @@ static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8
#endif
}
-static inline void zig_vaddo_u64(uint8_t *ov, uint64_t *res, int n,
- const uint64_t *lhs, const uint64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int64_t __addodi4(int64_t lhs, int64_t rhs, int *overflow);
static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
@@ -656,12 +668,6 @@ static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vaddo_i64(uint8_t *ov, int64_t *res, int n,
- const int64_t *lhs, const int64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_addo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
uint8_t full_res;
@@ -676,12 +682,6 @@ static inline bool zig_addo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t b
#endif
}
-static inline void zig_vaddo_u8(uint8_t *ov, uint8_t *res, int n,
- const uint8_t *lhs, const uint8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_addo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
int8_t full_res;
@@ -696,12 +696,6 @@ static inline bool zig_addo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits
#endif
}
-static inline void zig_vaddo_i8(uint8_t *ov, int8_t *res, int n,
- const int8_t *lhs, const int8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_addo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
uint16_t full_res;
@@ -716,12 +710,6 @@ static inline bool zig_addo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8
#endif
}
-static inline void zig_vaddo_u16(uint8_t *ov, uint16_t *res, int n,
- const uint16_t *lhs, const uint16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_addo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
int16_t full_res;
@@ -736,12 +724,6 @@ static inline bool zig_addo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t
#endif
}
-static inline void zig_vaddo_i16(uint8_t *ov, int16_t *res, int n,
- const int16_t *lhs, const int16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
uint32_t full_res;
@@ -754,12 +736,6 @@ static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8
#endif
}
-static inline void zig_vsubo_u32(uint8_t *ov, uint32_t *res, int n,
- const uint32_t *lhs, const uint32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int32_t __subosi4(int32_t lhs, int32_t rhs, int *overflow);
static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
@@ -774,12 +750,6 @@ static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vsubo_i32(uint8_t *ov, int32_t *res, int n,
- const int32_t *lhs, const int32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
uint64_t full_res;
@@ -792,12 +762,6 @@ static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8
#endif
}
-static inline void zig_vsubo_u64(uint8_t *ov, uint64_t *res, int n,
- const uint64_t *lhs, const uint64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int64_t __subodi4(int64_t lhs, int64_t rhs, int *overflow);
static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
@@ -812,12 +776,6 @@ static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vsubo_i64(uint8_t *ov, int64_t *res, int n,
- const int64_t *lhs, const int64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_subo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
uint8_t full_res;
@@ -832,12 +790,6 @@ static inline bool zig_subo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t b
#endif
}
-static inline void zig_vsubo_u8(uint8_t *ov, uint8_t *res, int n,
- const uint8_t *lhs, const uint8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_subo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
int8_t full_res;
@@ -852,13 +804,6 @@ static inline bool zig_subo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits
#endif
}
-static inline void zig_vsubo_i8(uint8_t *ov, int8_t *res, int n,
- const int8_t *lhs, const int8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
-
static inline bool zig_subo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
uint16_t full_res;
@@ -873,13 +818,6 @@ static inline bool zig_subo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8
#endif
}
-static inline void zig_vsubo_u16(uint8_t *ov, uint16_t *res, int n,
- const uint16_t *lhs, const uint16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
-
static inline bool zig_subo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
int16_t full_res;
@@ -894,12 +832,6 @@ static inline bool zig_subo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t
#endif
}
-static inline void zig_vsubo_i16(uint8_t *ov, int16_t *res, int n,
- const int16_t *lhs, const int16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
uint32_t full_res;
@@ -912,12 +844,6 @@ static inline bool zig_mulo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8
#endif
}
-static inline void zig_vmulo_u32(uint8_t *ov, uint32_t *res, int n,
- const uint32_t *lhs, const uint32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int32_t __mulosi4(int32_t lhs, int32_t rhs, int *overflow);
static inline bool zig_mulo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
@@ -932,12 +858,6 @@ static inline bool zig_mulo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vmulo_i32(uint8_t *ov, int32_t *res, int n,
- const int32_t *lhs, const int32_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
uint64_t full_res;
@@ -950,12 +870,6 @@ static inline bool zig_mulo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8
#endif
}
-static inline void zig_vmulo_u64(uint8_t *ov, uint64_t *res, int n,
- const uint64_t *lhs, const uint64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
zig_extern int64_t __mulodi4(int64_t lhs, int64_t rhs, int *overflow);
static inline bool zig_mulo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
@@ -970,12 +884,6 @@ static inline bool zig_mulo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vmulo_i64(uint8_t *ov, int64_t *res, int n,
- const int64_t *lhs, const int64_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
uint8_t full_res;
@@ -990,12 +898,6 @@ static inline bool zig_mulo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t b
#endif
}
-static inline void zig_vmulo_u8(uint8_t *ov, uint8_t *res, int n,
- const uint8_t *lhs, const uint8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
int8_t full_res;
@@ -1010,12 +912,6 @@ static inline bool zig_mulo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits
#endif
}
-static inline void zig_vmulo_i8(uint8_t *ov, int8_t *res, int n,
- const int8_t *lhs, const int8_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
uint16_t full_res;
@@ -1030,12 +926,6 @@ static inline bool zig_mulo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8
#endif
}
-static inline void zig_vmulo_u16(uint8_t *ov, uint16_t *res, int n,
- const uint16_t *lhs, const uint16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
int16_t full_res;
@@ -1050,12 +940,6 @@ static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t
#endif
}
-static inline void zig_vmulo_i16(uint8_t *ov, int16_t *res, int n,
- const int16_t *lhs, const int16_t *rhs, uint8_t bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
#define zig_int_builtins(w) \
static inline bool zig_shlo_u##w(uint##w##_t *res, uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \
*res = zig_shlw_u##w(lhs, rhs, bits); \
@@ -1354,8 +1238,8 @@ typedef signed __int128 zig_i128;
#define zig_make_u128(hi, lo) ((zig_u128)(hi)<<64|(lo))
#define zig_make_i128(hi, lo) ((zig_i128)zig_make_u128(hi, lo))
-#define zig_make_constant_u128(hi, lo) zig_make_u128(hi, lo)
-#define zig_make_constant_i128(hi, lo) zig_make_i128(hi, lo)
+#define zig_init_u128(hi, lo) zig_make_u128(hi, lo)
+#define zig_init_i128(hi, lo) zig_make_i128(hi, lo)
#define zig_hi_u128(val) ((uint64_t)((val) >> 64))
#define zig_lo_u128(val) ((uint64_t)((val) >> 0))
#define zig_hi_i128(val) (( int64_t)((val) >> 64))
@@ -1373,7 +1257,7 @@ typedef signed __int128 zig_i128;
#else /* zig_has_int128 */
-#if __LITTLE_ENDIAN__ || _MSC_VER
+#if zig_little_endian
typedef struct { zig_align(16) uint64_t lo; uint64_t hi; } zig_u128;
typedef struct { zig_align(16) uint64_t lo; int64_t hi; } zig_i128;
#else
@@ -1385,11 +1269,11 @@ typedef struct { zig_align(16) int64_t hi; uint64_t lo; } zig_i128;
#define zig_make_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) })
#if _MSC_VER /* MSVC doesn't allow struct literals in constant expressions */
-#define zig_make_constant_u128(hi, lo) { .h##i = (hi), .l##o = (lo) }
-#define zig_make_constant_i128(hi, lo) { .h##i = (hi), .l##o = (lo) }
+#define zig_init_u128(hi, lo) { .h##i = (hi), .l##o = (lo) }
+#define zig_init_i128(hi, lo) { .h##i = (hi), .l##o = (lo) }
#else /* But non-MSVC doesn't like the unprotected commas */
-#define zig_make_constant_u128(hi, lo) zig_make_u128(hi, lo)
-#define zig_make_constant_i128(hi, lo) zig_make_i128(hi, lo)
+#define zig_init_u128(hi, lo) zig_make_u128(hi, lo)
+#define zig_init_i128(hi, lo) zig_make_i128(hi, lo)
#endif
#define zig_hi_u128(val) ((val).hi)
#define zig_lo_u128(val) ((val).lo)
@@ -1632,7 +1516,9 @@ static inline zig_u128 zig_wrap_u128(zig_u128 val, uint8_t bits) {
}
static inline zig_i128 zig_wrap_i128(zig_i128 val, uint8_t bits) {
- return zig_make_i128(zig_wrap_i64(zig_hi_i128(val), bits - UINT8_C(64)), zig_lo_i128(val));
+ if (bits > UINT8_C(64)) return zig_make_i128(zig_wrap_i64(zig_hi_i128(val), bits - UINT8_C(64)), zig_lo_i128(val));
+ int64_t lo = zig_wrap_i64((int64_t)zig_lo_i128(val), bits);
+ return zig_make_i128(zig_shr_i64(lo, 63), (uint64_t)lo);
}
static inline zig_u128 zig_shlw_u128(zig_u128 lhs, uint8_t rhs, uint8_t bits) {
@@ -1903,6 +1789,1010 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
return zig_bitcast_i128(zig_bit_reverse_u128(zig_bitcast_u128(val), bits));
}
+/* ========================== Big Integer Support =========================== */
+
+static inline uint16_t zig_int_bytes(uint16_t bits) {
+ uint16_t bytes = (bits + CHAR_BIT - 1) / CHAR_BIT;
+ uint16_t alignment = ZIG_TARGET_MAX_INT_ALIGNMENT;
+ while (alignment / 2 >= bytes) alignment /= 2;
+ return (bytes + alignment - 1) / alignment * alignment;
+}
+
+static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ bool do_signed = is_signed;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+
+#if zig_little_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ int32_t limb_cmp;
+
+#if zig_little_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_cmp = zig_cmp_i128(lhs_limb, rhs_limb);
+ do_signed = false;
+ } else {
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_cmp = zig_cmp_u128(lhs_limb, rhs_limb);
+ }
+
+ if (limb_cmp != 0) return limb_cmp;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return 0;
+}
+
+static inline bool zig_addo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ uint8_t *res_bytes = res;
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t top_bits = remaining_bytes * 8 - bits;
+ bool overflow = false;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ uint16_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 128 / CHAR_BIT && is_signed) {
+ zig_i128 res_limb;
+ zig_i128 tmp_limb;
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ zig_u128 res_limb;
+ zig_u128 tmp_limb;
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+ uint16_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 64 / CHAR_BIT && is_signed) {
+ int64_t res_limb;
+ int64_t tmp_limb;
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint64_t res_limb;
+ uint64_t tmp_limb;
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+ uint16_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 32 / CHAR_BIT && is_signed) {
+ int32_t res_limb;
+ int32_t tmp_limb;
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint32_t res_limb;
+ uint32_t tmp_limb;
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+ uint16_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 16 / CHAR_BIT && is_signed) {
+ int16_t res_limb;
+ int16_t tmp_limb;
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint16_t res_limb;
+ uint16_t tmp_limb;
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+ uint16_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 8 / CHAR_BIT && is_signed) {
+ int8_t res_limb;
+ int8_t tmp_limb;
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint8_t res_limb;
+ uint8_t tmp_limb;
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return overflow;
+}
+
+static inline bool zig_subo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ uint8_t *res_bytes = res;
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t top_bits = remaining_bytes * 8 - bits;
+ bool overflow = false;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ uint16_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 128 / CHAR_BIT && is_signed) {
+ zig_i128 res_limb;
+ zig_i128 tmp_limb;
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ zig_u128 res_limb;
+ zig_u128 tmp_limb;
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+ uint16_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 64 / CHAR_BIT && is_signed) {
+ int64_t res_limb;
+ int64_t tmp_limb;
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint64_t res_limb;
+ uint64_t tmp_limb;
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+ uint16_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 32 / CHAR_BIT && is_signed) {
+ int32_t res_limb;
+ int32_t tmp_limb;
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint32_t res_limb;
+ uint32_t tmp_limb;
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+ uint16_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 16 / CHAR_BIT && is_signed) {
+ int16_t res_limb;
+ int16_t tmp_limb;
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint16_t res_limb;
+ uint16_t tmp_limb;
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+ uint16_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 8 / CHAR_BIT && is_signed) {
+ int8_t res_limb;
+ int8_t tmp_limb;
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint8_t res_limb;
+ uint8_t tmp_limb;
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return overflow;
+}
+
+static inline void zig_addw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ (void)zig_addo_big(res, lhs, rhs, is_signed, bits);
+}
+
+static inline void zig_subw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ (void)zig_subo_big(res, lhs, rhs, is_signed, bits);
+}
+
+zig_extern void __udivei4(uint32_t *res, const uint32_t *lhs, const uint32_t *rhs, uintptr_t bits);
+static inline void zig_div_trunc_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ if (!is_signed) {
+ __udivei4(res, lhs, rhs, bits);
+ return;
+ }
+
+ zig_trap();
+}
+
+static inline void zig_div_floor_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ if (!is_signed) {
+ zig_div_trunc_big(res, lhs, rhs, is_signed, bits);
+ return;
+ }
+
+ zig_trap();
+}
+
+zig_extern void __umodei4(uint32_t *res, const uint32_t *lhs, const uint32_t *rhs, uintptr_t bits);
+static inline void zig_rem_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ if (!is_signed) {
+ __umodei4(res, lhs, rhs, bits);
+ return;
+ }
+
+ zig_trap();
+}
+
+static inline void zig_mod_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ if (!is_signed) {
+ zig_rem_big(res, lhs, rhs, is_signed, bits);
+ return;
+ }
+
+ zig_trap();
+}
+
+static inline uint16_t zig_clz_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t skip_bits = remaining_bytes * 8 - bits;
+ uint16_t total_lz = 0;
+ uint16_t limb_lz;
+ (void)is_signed;
+
+#if zig_little_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u128(val_limb, 128 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 128 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u64(val_limb, 64 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 64 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u32(val_limb, 32 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 32 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u16(val_limb, 16 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 16 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u8(val_limb, 8 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 8 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_lz;
+}
+
+static inline uint16_t zig_ctz_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t total_tz = 0;
+ uint16_t limb_tz;
+ (void)is_signed;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u128(val_limb, 128);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 128) return total_tz;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u64(val_limb, 64);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 64) return total_tz;
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u32(val_limb, 32);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 32) return total_tz;
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u16(val_limb, 16);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 16) return total_tz;
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u8(val_limb, 8);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 8) return total_tz;
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_tz;
+}
+
+static inline uint16_t zig_popcount_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t total_pc = 0;
+ (void)is_signed;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u128(val_limb, 128);
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u64(val_limb, 64);
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u32(val_limb, 32);
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc = zig_popcount_u16(val_limb, 16);
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc = zig_popcount_u8(val_limb, 8);
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_pc;
+}
+
/* ========================= Floating Point Support ========================= */
#if _MSC_VER
@@ -1927,7 +2817,6 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
#define zig_make_special_f64(sign, name, arg, repr) sign zig_make_f64(__builtin_##name, )(arg)
#define zig_make_special_f80(sign, name, arg, repr) sign zig_make_f80(__builtin_##name, )(arg)
#define zig_make_special_f128(sign, name, arg, repr) sign zig_make_f128(__builtin_##name, )(arg)
-#define zig_make_special_c_longdouble(sign, name, arg, repr) sign zig_make_c_longdouble(__builtin_##name, )(arg)
#else
#define zig_has_float_builtins 0
#define zig_make_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr)
@@ -1935,13 +2824,13 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
#define zig_make_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr)
#define zig_make_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr)
#define zig_make_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr)
-#define zig_make_special_c_longdouble(sign, name, arg, repr) zig_float_from_repr_c_longdouble(repr)
#endif
#define zig_has_f16 1
#define zig_bitSizeOf_f16 16
+typedef int16_t zig_repr_f16;
#define zig_libc_name_f16(name) __##name##h
-#define zig_make_special_constant_f16(sign, name, arg, repr) zig_make_special_f16(sign, name, arg, repr)
+#define zig_init_special_f16(sign, name, arg, repr) zig_make_special_f16(sign, name, arg, repr)
#if FLT_MANT_DIG == 11
typedef float zig_f16;
#define zig_make_f16(fp, repr) fp##f
@@ -1950,7 +2839,9 @@ typedef double zig_f16;
#define zig_make_f16(fp, repr) fp
#elif LDBL_MANT_DIG == 11
#define zig_bitSizeOf_c_longdouble 16
-typedef uint16_t zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f16 zig_repr_c_longdouble;
+#endif
typedef long double zig_f16;
#define zig_make_f16(fp, repr) fp##l
#elif FLT16_MANT_DIG == 11 && (zig_has_builtin(inff16) || defined(zig_gnuc))
@@ -1967,17 +2858,18 @@ typedef int16_t zig_f16;
#define zig_make_f16(fp, repr) repr
#undef zig_make_special_f16
#define zig_make_special_f16(sign, name, arg, repr) repr
-#undef zig_make_special_constant_f16
-#define zig_make_special_constant_f16(sign, name, arg, repr) repr
+#undef zig_init_special_f16
+#define zig_init_special_f16(sign, name, arg, repr) repr
#endif
#define zig_has_f32 1
#define zig_bitSizeOf_f32 32
+typedef int32_t zig_repr_f32;
#define zig_libc_name_f32(name) name##f
#if _MSC_VER
-#define zig_make_special_constant_f32(sign, name, arg, repr) sign zig_make_f32(zig_msvc_flt_##name, )
+#define zig_init_special_f32(sign, name, arg, repr) sign zig_make_f32(zig_msvc_flt_##name, )
#else
-#define zig_make_special_constant_f32(sign, name, arg, repr) zig_make_special_f32(sign, name, arg, repr)
+#define zig_init_special_f32(sign, name, arg, repr) zig_make_special_f32(sign, name, arg, repr)
#endif
#if FLT_MANT_DIG == 24
typedef float zig_f32;
@@ -1987,7 +2879,9 @@ typedef double zig_f32;
#define zig_make_f32(fp, repr) fp
#elif LDBL_MANT_DIG == 24
#define zig_bitSizeOf_c_longdouble 32
-typedef uint32_t zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f32 zig_repr_c_longdouble;
+#endif
typedef long double zig_f32;
#define zig_make_f32(fp, repr) fp##l
#elif FLT32_MANT_DIG == 24
@@ -2001,21 +2895,24 @@ typedef int32_t zig_f32;
#define zig_make_f32(fp, repr) repr
#undef zig_make_special_f32
#define zig_make_special_f32(sign, name, arg, repr) repr
-#undef zig_make_special_constant_f32
-#define zig_make_special_constant_f32(sign, name, arg, repr) repr
+#undef zig_init_special_f32
+#define zig_init_special_f32(sign, name, arg, repr) repr
#endif
#define zig_has_f64 1
#define zig_bitSizeOf_f64 64
+typedef int64_t zig_repr_f64;
#define zig_libc_name_f64(name) name
#if _MSC_VER
#ifdef ZIG_TARGET_ABI_MSVC
#define zig_bitSizeOf_c_longdouble 64
-typedef uint64_t zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f64 zig_repr_c_longdouble;
#endif
-#define zig_make_special_constant_f64(sign, name, arg, repr) sign zig_make_f64(zig_msvc_flt_##name, )
+#endif
+#define zig_init_special_f64(sign, name, arg, repr) sign zig_make_f64(zig_msvc_flt_##name, )
#else /* _MSC_VER */
-#define zig_make_special_constant_f64(sign, name, arg, repr) zig_make_special_f64(sign, name, arg, repr)
+#define zig_init_special_f64(sign, name, arg, repr) zig_make_special_f64(sign, name, arg, repr)
#endif /* _MSC_VER */
#if FLT_MANT_DIG == 53
typedef float zig_f64;
@@ -2025,7 +2922,9 @@ typedef double zig_f64;
#define zig_make_f64(fp, repr) fp
#elif LDBL_MANT_DIG == 53
#define zig_bitSizeOf_c_longdouble 64
-typedef uint64_t zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f64 zig_repr_c_longdouble;
+#endif
typedef long double zig_f64;
#define zig_make_f64(fp, repr) fp##l
#elif FLT64_MANT_DIG == 53
@@ -2042,14 +2941,15 @@ typedef int64_t zig_f64;
#define zig_make_f64(fp, repr) repr
#undef zig_make_special_f64
#define zig_make_special_f64(sign, name, arg, repr) repr
-#undef zig_make_special_constant_f64
-#define zig_make_special_constant_f64(sign, name, arg, repr) repr
+#undef zig_init_special_f64
+#define zig_init_special_f64(sign, name, arg, repr) repr
#endif
#define zig_has_f80 1
#define zig_bitSizeOf_f80 80
+typedef zig_i128 zig_repr_f80;
#define zig_libc_name_f80(name) __##name##x
-#define zig_make_special_constant_f80(sign, name, arg, repr) zig_make_special_f80(sign, name, arg, repr)
+#define zig_init_special_f80(sign, name, arg, repr) zig_make_special_f80(sign, name, arg, repr)
#if FLT_MANT_DIG == 64
typedef float zig_f80;
#define zig_make_f80(fp, repr) fp##f
@@ -2058,7 +2958,9 @@ typedef double zig_f80;
#define zig_make_f80(fp, repr) fp
#elif LDBL_MANT_DIG == 64
#define zig_bitSizeOf_c_longdouble 80
-typedef zig_u128 zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f80 zig_repr_c_longdouble;
+#endif
typedef long double zig_f80;
#define zig_make_f80(fp, repr) fp##l
#elif FLT80_MANT_DIG == 64
@@ -2078,14 +2980,15 @@ typedef zig_i128 zig_f80;
#define zig_make_f80(fp, repr) repr
#undef zig_make_special_f80
#define zig_make_special_f80(sign, name, arg, repr) repr
-#undef zig_make_special_constant_f80
-#define zig_make_special_constant_f80(sign, name, arg, repr) repr
+#undef zig_init_special_f80
+#define zig_init_special_f80(sign, name, arg, repr) repr
#endif
#define zig_has_f128 1
#define zig_bitSizeOf_f128 128
+typedef zig_i128 zig_repr_f128;
#define zig_libc_name_f128(name) name##q
-#define zig_make_special_constant_f128(sign, name, arg, repr) zig_make_special_f128(sign, name, arg, repr)
+#define zig_init_special_f128(sign, name, arg, repr) zig_make_special_f128(sign, name, arg, repr)
#if FLT_MANT_DIG == 113
typedef float zig_f128;
#define zig_make_f128(fp, repr) fp##f
@@ -2094,7 +2997,9 @@ typedef double zig_f128;
#define zig_make_f128(fp, repr) fp
#elif LDBL_MANT_DIG == 113
#define zig_bitSizeOf_c_longdouble 128
-typedef zig_u128 zig_repr_c_longdouble;
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f128 zig_repr_c_longdouble;
+#endif
typedef long double zig_f128;
#define zig_make_f128(fp, repr) fp##l
#elif FLT128_MANT_DIG == 113
@@ -2116,63 +3021,44 @@ typedef zig_i128 zig_f128;
#define zig_make_f128(fp, repr) repr
#undef zig_make_special_f128
#define zig_make_special_f128(sign, name, arg, repr) repr
-#undef zig_make_special_constant_f128
-#define zig_make_special_constant_f128(sign, name, arg, repr) repr
+#undef zig_init_special_f128
+#define zig_init_special_f128(sign, name, arg, repr) repr
#endif
-#define zig_has_c_longdouble 1
-
-#ifdef ZIG_TARGET_ABI_MSVC
-#define zig_libc_name_c_longdouble(name) name
-#else
-#define zig_libc_name_c_longdouble(name) name##l
-#endif
-
-#define zig_make_special_constant_c_longdouble(sign, name, arg, repr) zig_make_special_c_longdouble(sign, name, arg, repr)
#ifdef zig_bitSizeOf_c_longdouble
+#define zig_has_c_longdouble 1
#ifdef ZIG_TARGET_ABI_MSVC
#undef zig_bitSizeOf_c_longdouble
#define zig_bitSizeOf_c_longdouble 64
-typedef uint64_t zig_repr_c_longdouble;
typedef zig_f64 zig_c_longdouble;
-#define zig_make_c_longdouble(fp, repr) fp
+typedef zig_repr_f64 zig_repr_c_longdouble;
#else
typedef long double zig_c_longdouble;
-#define zig_make_c_longdouble(fp, repr) fp##l
#endif
#else /* zig_bitSizeOf_c_longdouble */
-#undef zig_has_c_longdouble
#define zig_has_c_longdouble 0
-#define zig_bitSizeOf_c_longdouble 80
-typedef zig_u128 zig_repr_c_longdouble;
-#define zig_compiler_rt_abbrev_c_longdouble zig_compiler_rt_abbrev_f80
#define zig_bitSizeOf_repr_c_longdouble 128
-typedef zig_i128 zig_c_longdouble;
-#define zig_make_c_longdouble(fp, repr) repr
-#undef zig_make_special_c_longdouble
-#define zig_make_special_c_longdouble(sign, name, arg, repr) repr
-#undef zig_make_special_constant_c_longdouble
-#define zig_make_special_constant_c_longdouble(sign, name, arg, repr) repr
+typedef zig_f128 zig_c_longdouble;
+typedef zig_repr_f128 zig_repr_c_longdouble;
#endif /* zig_bitSizeOf_c_longdouble */
#if !zig_has_float_builtins
-#define zig_float_from_repr(Type, ReprType) \
- static inline zig_##Type zig_float_from_repr_##Type(ReprType repr) { \
+#define zig_float_from_repr(Type) \
+ static inline zig_##Type zig_float_from_repr_##Type(zig_repr_##Type repr) { \
zig_##Type result; \
memcpy(&result, &repr, sizeof(result)); \
return result; \
}
-zig_float_from_repr(f16, uint16_t)
-zig_float_from_repr(f32, uint32_t)
-zig_float_from_repr(f64, uint64_t)
-zig_float_from_repr(f80, zig_u128)
-zig_float_from_repr(f128, zig_u128)
-zig_float_from_repr(c_longdouble, zig_repr_c_longdouble)
+zig_float_from_repr(f16)
+zig_float_from_repr(f32)
+zig_float_from_repr(f64)
+zig_float_from_repr(f80)
+zig_float_from_repr(f128)
#endif
#define zig_cast_f16 (zig_f16)
@@ -2181,11 +3067,9 @@ zig_float_from_repr(c_longdouble, zig_repr_c_longdouble)
#if _MSC_VER && !zig_has_f128
#define zig_cast_f80
-#define zig_cast_c_longdouble
#define zig_cast_f128
#else
#define zig_cast_f80 (zig_f80)
-#define zig_cast_c_longdouble (zig_c_longdouble)
#define zig_cast_f128 (zig_f128)
#endif
@@ -2314,7 +3198,6 @@ zig_float_builtins(f32)
zig_float_builtins(f64)
zig_float_builtins(f80)
zig_float_builtins(f128)
-zig_float_builtins(c_longdouble)
#if _MSC_VER && (_M_IX86 || _M_X64)
diff --git a/src/Air.zig b/src/Air.zig
index 3ebdd319de..4646dcc89e 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -232,7 +232,14 @@ pub const Inst = struct {
/// Result type is always noreturn; no instructions in a block follow this one.
/// Uses the `br` field.
br,
- /// Lowers to a hardware trap instruction, or the next best thing.
+ /// Lowers to a trap/jam instruction causing program abortion.
+ /// This may lower to an instruction known to be invalid.
+ /// Sometimes, for the lack of a better instruction, `trap` and `breakpoint` may compile down to the same code.
+ /// Result type is always noreturn; no instructions in a block follow this one.
+ trap,
+ /// Lowers to a trap instruction causing debuggers to break here, or the next best thing.
+ /// The debugger or something else may allow the program to resume after this point.
+ /// Sometimes, for the lack of a better instruction, `trap` and `breakpoint` may compile down to the same code.
/// Result type is always void.
breakpoint,
/// Yields the return address of the current function.
@@ -1186,6 +1193,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.ret,
.ret_load,
.unreach,
+ .trap,
=> return Type.initTag(.noreturn),
.breakpoint,
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 41a8ccadb2..182a28084f 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -148,18 +148,24 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
};
defer gz_instructions.deinit(gpa);
- if (AstGen.structDeclInner(
- &gen_scope,
- &gen_scope.base,
- 0,
- tree.containerDeclRoot(),
- .Auto,
- 0,
- )) |struct_decl_ref| {
- assert(refToIndex(struct_decl_ref).? == 0);
- } else |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.AnalysisFail => {}, // Handled via compile_errors below.
+ // The AST -> ZIR lowering process assumes an AST that does not have any
+ // parse errors.
+ if (tree.errors.len == 0) {
+ if (AstGen.structDeclInner(
+ &gen_scope,
+ &gen_scope.base,
+ 0,
+ tree.containerDeclRoot(),
+ .Auto,
+ 0,
+ )) |struct_decl_ref| {
+ assert(refToIndex(struct_decl_ref).? == 0);
+ } else |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.AnalysisFail => {}, // Handled via compile_errors below.
+ }
+ } else {
+ try lowerAstErrors(&astgen);
}
const err_index = @enumToInt(Zir.ExtraIndex.compile_errors);
@@ -205,7 +211,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
};
}
-pub fn deinit(astgen: *AstGen, gpa: Allocator) void {
+fn deinit(astgen: *AstGen, gpa: Allocator) void {
astgen.instructions.deinit(gpa);
astgen.extra.deinit(gpa);
astgen.string_table.deinit(gpa);
@@ -216,7 +222,7 @@ pub fn deinit(astgen: *AstGen, gpa: Allocator) void {
astgen.ref_table.deinit(gpa);
}
-pub const ResultInfo = struct {
+const ResultInfo = struct {
/// The semantics requested for the result location
rl: Loc,
@@ -245,7 +251,7 @@ pub const ResultInfo = struct {
}
}
- pub const Loc = union(enum) {
+ const Loc = union(enum) {
/// The expression is the right-hand side of assignment to `_`. Only the side-effects of the
/// expression should be generated. The result instruction from the expression must
/// be ignored.
@@ -277,11 +283,11 @@ pub const ResultInfo = struct {
src_node: ?Ast.Node.Index = null,
};
- pub const Strategy = struct {
+ const Strategy = struct {
elide_store_to_block_ptr_instructions: bool,
tag: Tag,
- pub const Tag = enum {
+ const Tag = enum {
/// Both branches will use break_void; result location is used to communicate the
/// result instruction.
break_void,
@@ -331,7 +337,7 @@ pub const ResultInfo = struct {
}
};
- pub const Context = enum {
+ const Context = enum {
/// The expression is the operand to a return expression.
@"return",
/// The expression is the input to an error-handling operator (if-else, try, or catch).
@@ -349,11 +355,11 @@ pub const ResultInfo = struct {
};
};
-pub const align_ri: ResultInfo = .{ .rl = .{ .ty = .u29_type } };
-pub const coerced_align_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .u29_type } };
-pub const bool_ri: ResultInfo = .{ .rl = .{ .ty = .bool_type } };
-pub const type_ri: ResultInfo = .{ .rl = .{ .ty = .type_type } };
-pub const coerced_type_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .type_type } };
+const align_ri: ResultInfo = .{ .rl = .{ .ty = .u29_type } };
+const coerced_align_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .u29_type } };
+const bool_ri: ResultInfo = .{ .rl = .{ .ty = .bool_type } };
+const type_ri: ResultInfo = .{ .rl = .{ .ty = .type_type } };
+const coerced_type_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .type_type } };
fn typeExpr(gz: *GenZir, scope: *Scope, type_node: Ast.Node.Index) InnerError!Zir.Inst.Ref {
const prev_force_comptime = gz.force_comptime;
@@ -1960,7 +1966,10 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn
else
.@"break";
+ block_gz.break_count += 1;
if (rhs == 0) {
+ _ = try rvalue(parent_gz, block_gz.break_result_info, .void_value, node);
+
try genDefers(parent_gz, scope, parent_scope, .normal_only);
// As our last action before the break, "pop" the error trace if needed
@@ -1970,7 +1979,6 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn
_ = try parent_gz.addBreak(break_tag, block_inst, .void_value);
return Zir.Inst.Ref.unreachable_value;
}
- block_gz.break_count += 1;
const operand = try reachableExpr(parent_gz, parent_scope, block_gz.break_result_info, rhs, node);
const search_index = @intCast(Zir.Inst.Index, astgen.instructions.len);
@@ -2609,8 +2617,9 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.extended => switch (gz.astgen.instructions.items(.data)[inst].extended.opcode) {
.breakpoint,
.fence,
- .set_align_stack,
.set_float_mode,
+ .set_align_stack,
+ .set_cold,
=> break :b true,
else => break :b false,
},
@@ -2630,6 +2639,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.repeat_inline,
.panic,
.panic_comptime,
+ .trap,
.check_comptime_control_flow,
=> {
noreturn_src_node = statement;
@@ -2658,7 +2668,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.validate_struct_init_comptime,
.validate_array_init,
.validate_array_init_comptime,
- .set_cold,
.set_runtime_safety,
.closure_capture,
.memcpy,
@@ -3506,7 +3515,7 @@ const WipMembers = struct {
/// (4 for src_hash + line + name + value + doc_comment + align + link_section + address_space )
const max_decl_size = 11;
- pub fn init(gpa: Allocator, payload: *ArrayListUnmanaged(u32), decl_count: u32, field_count: u32, comptime bits_per_field: u32, comptime max_field_size: u32) Allocator.Error!Self {
+ fn init(gpa: Allocator, payload: *ArrayListUnmanaged(u32), decl_count: u32, field_count: u32, comptime bits_per_field: u32, comptime max_field_size: u32) Allocator.Error!Self {
const payload_top = @intCast(u32, payload.items.len);
const decls_start = payload_top + (decl_count + decls_per_u32 - 1) / decls_per_u32;
const field_bits_start = decls_start + decl_count * max_decl_size;
@@ -3527,7 +3536,7 @@ const WipMembers = struct {
};
}
- pub fn nextDecl(self: *Self, is_pub: bool, is_export: bool, has_align: bool, has_section_or_addrspace: bool) void {
+ fn nextDecl(self: *Self, is_pub: bool, is_export: bool, has_align: bool, has_section_or_addrspace: bool) void {
const index = self.payload_top + self.decl_index / decls_per_u32;
assert(index < self.decls_start);
const bit_bag: u32 = if (self.decl_index % decls_per_u32 == 0) 0 else self.payload.items[index];
@@ -3539,7 +3548,7 @@ const WipMembers = struct {
self.decl_index += 1;
}
- pub fn nextField(self: *Self, comptime bits_per_field: u32, bits: [bits_per_field]bool) void {
+ fn nextField(self: *Self, comptime bits_per_field: u32, bits: [bits_per_field]bool) void {
const fields_per_u32 = 32 / bits_per_field;
const index = self.field_bits_start + self.field_index / fields_per_u32;
assert(index < self.fields_start);
@@ -3553,25 +3562,25 @@ const WipMembers = struct {
self.field_index += 1;
}
- pub fn appendToDecl(self: *Self, data: u32) void {
+ fn appendToDecl(self: *Self, data: u32) void {
assert(self.decls_end < self.field_bits_start);
self.payload.items[self.decls_end] = data;
self.decls_end += 1;
}
- pub fn appendToDeclSlice(self: *Self, data: []const u32) void {
+ fn appendToDeclSlice(self: *Self, data: []const u32) void {
assert(self.decls_end + data.len <= self.field_bits_start);
mem.copy(u32, self.payload.items[self.decls_end..], data);
self.decls_end += @intCast(u32, data.len);
}
- pub fn appendToField(self: *Self, data: u32) void {
+ fn appendToField(self: *Self, data: u32) void {
assert(self.fields_end < self.payload.items.len);
self.payload.items[self.fields_end] = data;
self.fields_end += 1;
}
- pub fn finishBits(self: *Self, comptime bits_per_field: u32) void {
+ fn finishBits(self: *Self, comptime bits_per_field: u32) void {
const empty_decl_slots = decls_per_u32 - (self.decl_index % decls_per_u32);
if (self.decl_index > 0 and empty_decl_slots < decls_per_u32) {
const index = self.payload_top + self.decl_index / decls_per_u32;
@@ -3587,15 +3596,15 @@ const WipMembers = struct {
}
}
- pub fn declsSlice(self: *Self) []u32 {
+ fn declsSlice(self: *Self) []u32 {
return self.payload.items[self.payload_top..self.decls_end];
}
- pub fn fieldsSlice(self: *Self) []u32 {
+ fn fieldsSlice(self: *Self) []u32 {
return self.payload.items[self.field_bits_start..self.fields_end];
}
- pub fn deinit(self: *Self) void {
+ fn deinit(self: *Self) void {
self.payload.items.len = self.payload_top;
}
};
@@ -6583,6 +6592,9 @@ fn forExpr(
cond_block,
break_tag,
);
+ if (ri.rl.strategy(&loop_scope).tag == .break_void and loop_scope.break_count == 0) {
+ _ = try rvalue(parent_gz, ri, .void_value, node);
+ }
if (is_statement) {
_ = try parent_gz.addUnNode(.ensure_result_used, result, node);
}
@@ -7976,6 +7988,9 @@ fn builtinCall(
switch (node_tags[params[0]]) {
.identifier => {
const ident_token = main_tokens[params[0]];
+ if (isPrimitive(tree.tokenSlice(ident_token))) {
+ return astgen.failTok(ident_token, "unable to export primitive value", .{});
+ }
decl_name = try astgen.identAsString(ident_token);
var s = scope;
@@ -8056,27 +8071,35 @@ fn builtinCall(
},
.fence => {
const order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[0]);
- const result = try gz.addExtendedPayload(.fence, Zir.Inst.UnNode{
+ _ = try gz.addExtendedPayload(.fence, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(node),
.operand = order,
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.set_float_mode => {
const order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .float_mode_type } }, params[0]);
- const result = try gz.addExtendedPayload(.set_float_mode, Zir.Inst.UnNode{
+ _ = try gz.addExtendedPayload(.set_float_mode, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(node),
.operand = order,
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.set_align_stack => {
const order = try expr(gz, scope, align_ri, params[0]);
- const result = try gz.addExtendedPayload(.set_align_stack, Zir.Inst.UnNode{
+ _ = try gz.addExtendedPayload(.set_align_stack, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(node),
.operand = order,
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
+ },
+ .set_cold => {
+ const order = try expr(gz, scope, ri, params[0]);
+ _ = try gz.addExtendedPayload(.set_cold, Zir.Inst.UnNode{
+ .node = gz.nodeIndexToRelative(node),
+ .operand = order,
+ });
+ return rvalue(gz, ri, .void_value, node);
},
.src => {
@@ -8097,7 +8120,7 @@ fn builtinCall(
.error_return_trace => return rvalue(gz, ri, try gz.addNodeExtended(.error_return_trace, node), node),
.frame => return rvalue(gz, ri, try gz.addNodeExtended(.frame, node), node),
.frame_address => return rvalue(gz, ri, try gz.addNodeExtended(.frame_address, node), node),
- .breakpoint => return rvalue(gz, ri, try gz.addNodeExtended(.breakpoint, node), node),
+ .breakpoint => return rvalue(gz, ri, try gz.addNodeExtended(.breakpoint, node), node),
.type_info => return simpleUnOpType(gz, scope, ri, node, params[0], .type_info),
.size_of => return simpleUnOpType(gz, scope, ri, node, params[0], .size_of),
@@ -8111,7 +8134,6 @@ fn builtinCall(
.bool_to_int => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .bool_to_int),
.embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .embed_file),
.error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .anyerror_type } }, params[0], .error_name),
- .set_cold => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_cold),
.set_runtime_safety => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_runtime_safety),
.sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt),
.sin => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sin),
@@ -8171,6 +8193,11 @@ fn builtinCall(
try emitDbgNode(gz, node);
return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], if (gz.force_comptime) .panic_comptime else .panic);
},
+ .trap => {
+ try emitDbgNode(gz, node);
+ _ = try gz.addNode(.trap, node);
+ return rvalue(gz, ri, .void_value, node);
+ },
.error_to_int => {
const operand = try expr(gz, scope, .{ .rl = .none }, params[0]);
const result = try gz.addExtendedPayload(.error_to_int, Zir.Inst.UnNode{
@@ -8357,14 +8384,14 @@ fn builtinCall(
},
.atomic_store => {
const int_type = try typeExpr(gz, scope, params[0]);
- const result = try gz.addPlNode(.atomic_store, node, Zir.Inst.AtomicStore{
+ _ = try gz.addPlNode(.atomic_store, node, Zir.Inst.AtomicStore{
// zig fmt: off
.ptr = try expr(gz, scope, .{ .rl = .none }, params[1]),
.operand = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[2]),
.ordering = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[3]),
// zig fmt: on
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.mul_add => {
const float_type = try typeExpr(gz, scope, params[0]);
@@ -8405,20 +8432,20 @@ fn builtinCall(
return rvalue(gz, ri, result, node);
},
.memcpy => {
- const result = try gz.addPlNode(.memcpy, node, Zir.Inst.Memcpy{
+ _ = try gz.addPlNode(.memcpy, node, Zir.Inst.Memcpy{
.dest = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_u8_type } }, params[0]),
.source = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_const_u8_type } }, params[1]),
.byte_count = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, params[2]),
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.memset => {
- const result = try gz.addPlNode(.memset, node, Zir.Inst.Memset{
+ _ = try gz.addPlNode(.memset, node, Zir.Inst.Memset{
.dest = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_u8_type } }, params[0]),
.byte = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u8_type } }, params[1]),
.byte_count = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, params[2]),
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.shuffle => {
const result = try gz.addPlNode(.shuffle, node, Zir.Inst.Shuffle{
@@ -8459,12 +8486,12 @@ fn builtinCall(
.prefetch => {
const ptr = try expr(gz, scope, .{ .rl = .none }, params[0]);
const options = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .prefetch_options_type } }, params[1]);
- const result = try gz.addExtendedPayload(.prefetch, Zir.Inst.BinNode{
+ _ = try gz.addExtendedPayload(.prefetch, Zir.Inst.BinNode{
.node = gz.nodeIndexToRelative(node),
.lhs = ptr,
.rhs = options,
});
- return rvalue(gz, ri, result, node);
+ return rvalue(gz, ri, .void_value, node);
},
.c_va_arg => {
if (astgen.fn_block == null) {
@@ -8509,16 +8536,6 @@ fn builtinCall(
}
}
-fn simpleNoOpVoid(
- gz: *GenZir,
- ri: ResultInfo,
- node: Ast.Node.Index,
- tag: Zir.Inst.Tag,
-) InnerError!Zir.Inst.Ref {
- _ = try gz.addNode(tag, node);
- return rvalue(gz, ri, .void_value, node);
-}
-
fn hasDeclOrField(
gz: *GenZir,
scope: *Scope,
@@ -8988,7 +9005,7 @@ const primitive_instrs = std.ComptimeStringMap(Zir.Inst.Ref, .{
});
comptime {
- // These checks ensure that std.zig.primitives stays in synce with the primitive->Zir map.
+ // These checks ensure that std.zig.primitives stays in sync with the primitive->Zir map.
const primitives = std.zig.primitives;
for (primitive_instrs.kvs) |kv| {
if (!primitives.isPrimitive(kv.key)) {
@@ -10369,7 +10386,7 @@ fn appendErrorTok(
comptime format: []const u8,
args: anytype,
) !void {
- try astgen.appendErrorTokNotes(token, format, args, &[0]u32{});
+ try astgen.appendErrorTokNotesOff(token, 0, format, args, &[0]u32{});
}
fn failTokNotes(
@@ -10379,7 +10396,7 @@ fn failTokNotes(
args: anytype,
notes: []const u32,
) InnerError {
- try appendErrorTokNotes(astgen, token, format, args, notes);
+ try appendErrorTokNotesOff(astgen, token, 0, format, args, notes);
return error.AnalysisFail;
}
@@ -10390,27 +10407,11 @@ fn appendErrorTokNotes(
args: anytype,
notes: []const u32,
) !void {
- @setCold(true);
- const string_bytes = &astgen.string_bytes;
- const msg = @intCast(u32, string_bytes.items.len);
- try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args);
- const notes_index: u32 = if (notes.len != 0) blk: {
- const notes_start = astgen.extra.items.len;
- try astgen.extra.ensureTotalCapacity(astgen.gpa, notes_start + 1 + notes.len);
- astgen.extra.appendAssumeCapacity(@intCast(u32, notes.len));
- astgen.extra.appendSliceAssumeCapacity(notes);
- break :blk @intCast(u32, notes_start);
- } else 0;
- try astgen.compile_errors.append(astgen.gpa, .{
- .msg = msg,
- .node = 0,
- .token = token,
- .byte_offset = 0,
- .notes = notes_index,
- });
+ return appendErrorTokNotesOff(astgen, token, 0, format, args, notes);
}
-/// Same as `fail`, except given an absolute byte offset.
+/// Same as `fail`, except given a token plus an offset from its starting byte
+/// offset.
fn failOff(
astgen: *AstGen,
token: Ast.TokenIndex,
@@ -10418,27 +10419,36 @@ fn failOff(
comptime format: []const u8,
args: anytype,
) InnerError {
- try appendErrorOff(astgen, token, byte_offset, format, args);
+ try appendErrorTokNotesOff(astgen, token, byte_offset, format, args, &.{});
return error.AnalysisFail;
}
-fn appendErrorOff(
+fn appendErrorTokNotesOff(
astgen: *AstGen,
token: Ast.TokenIndex,
byte_offset: u32,
comptime format: []const u8,
args: anytype,
-) Allocator.Error!void {
+ notes: []const u32,
+) !void {
@setCold(true);
+ const gpa = astgen.gpa;
const string_bytes = &astgen.string_bytes;
const msg = @intCast(u32, string_bytes.items.len);
- try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args);
- try astgen.compile_errors.append(astgen.gpa, .{
+ try string_bytes.writer(gpa).print(format ++ "\x00", args);
+ const notes_index: u32 = if (notes.len != 0) blk: {
+ const notes_start = astgen.extra.items.len;
+ try astgen.extra.ensureTotalCapacity(gpa, notes_start + 1 + notes.len);
+ astgen.extra.appendAssumeCapacity(@intCast(u32, notes.len));
+ astgen.extra.appendSliceAssumeCapacity(notes);
+ break :blk @intCast(u32, notes_start);
+ } else 0;
+ try astgen.compile_errors.append(gpa, .{
.msg = msg,
.node = 0,
.token = token,
.byte_offset = byte_offset,
- .notes = 0,
+ .notes = notes_index,
});
}
@@ -10447,6 +10457,16 @@ fn errNoteTok(
token: Ast.TokenIndex,
comptime format: []const u8,
args: anytype,
+) Allocator.Error!u32 {
+ return errNoteTokOff(astgen, token, 0, format, args);
+}
+
+fn errNoteTokOff(
+ astgen: *AstGen,
+ token: Ast.TokenIndex,
+ byte_offset: u32,
+ comptime format: []const u8,
+ args: anytype,
) Allocator.Error!u32 {
@setCold(true);
const string_bytes = &astgen.string_bytes;
@@ -10456,7 +10476,7 @@ fn errNoteTok(
.msg = msg,
.node = 0,
.token = token,
- .byte_offset = 0,
+ .byte_offset = byte_offset,
.notes = 0,
});
}
@@ -10787,7 +10807,7 @@ const Scope = struct {
/// ref of the capture for decls in this namespace
captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{},
- pub fn deinit(self: *Namespace, gpa: Allocator) void {
+ fn deinit(self: *Namespace, gpa: Allocator) void {
self.decls.deinit(gpa);
self.captures.deinit(gpa);
self.* = undefined;
@@ -12623,3 +12643,42 @@ fn emitDbgStmt(gz: *GenZir, line: u32, column: u32) !void {
},
} });
}
+
+fn lowerAstErrors(astgen: *AstGen) !void {
+ const tree = astgen.tree;
+ assert(tree.errors.len > 0);
+
+ const gpa = astgen.gpa;
+ const parse_err = tree.errors[0];
+
+ var msg: std.ArrayListUnmanaged(u8) = .{};
+ defer msg.deinit(gpa);
+
+ const token_starts = tree.tokens.items(.start);
+ const token_tags = tree.tokens.items(.tag);
+
+ var notes: std.ArrayListUnmanaged(u32) = .{};
+ defer notes.deinit(gpa);
+
+ if (token_tags[parse_err.token + @boolToInt(parse_err.token_is_prev)] == .invalid) {
+ const tok = parse_err.token + @boolToInt(parse_err.token_is_prev);
+ const bad_off = @intCast(u32, tree.tokenSlice(parse_err.token + @boolToInt(parse_err.token_is_prev)).len);
+ const byte_abs = token_starts[parse_err.token + @boolToInt(parse_err.token_is_prev)] + bad_off;
+ try notes.append(gpa, try astgen.errNoteTokOff(tok, bad_off, "invalid byte: '{'}'", .{
+ std.zig.fmtEscapes(tree.source[byte_abs..][0..1]),
+ }));
+ }
+
+ for (tree.errors[1..]) |note| {
+ if (!note.is_note) break;
+
+ msg.clearRetainingCapacity();
+ try tree.renderError(note, msg.writer(gpa));
+ try notes.append(gpa, try astgen.errNoteTok(note.token, "{s}", .{msg.items}));
+ }
+
+ const extra_offset = tree.errorOffset(parse_err);
+ msg.clearRetainingCapacity();
+ try tree.renderError(parse_err, msg.writer(gpa));
+ try astgen.appendErrorTokNotesOff(parse_err.token, extra_offset, "{s}", .{msg.items}, notes.items);
+}
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index 3cf3fff4c0..15d90b104b 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -1338,7 +1338,6 @@ fn walkInstruction(
.embed_file,
.error_name,
.panic,
- .set_cold, // @check
.set_runtime_safety, // @check
.sqrt,
.sin,
diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig
index 20edbabe47..79c6617483 100644
--- a/src/BuiltinFn.zig
+++ b/src/BuiltinFn.zig
@@ -109,6 +109,7 @@ pub const Tag = enum {
sub_with_overflow,
tag_name,
This,
+ trap,
truncate,
Type,
type_info,
@@ -915,6 +916,13 @@ pub const list = list: {
.param_count = 0,
},
},
+ .{
+ "@trap",
+ .{
+ .tag = .trap,
+ .param_count = 0,
+ },
+ },
.{
"@truncate",
.{
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 3348b7de11..24fed7f909 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -7,6 +7,9 @@ const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const log = std.log.scoped(.compilation);
const Target = std.Target;
+const ThreadPool = std.Thread.Pool;
+const WaitGroup = std.Thread.WaitGroup;
+const ErrorBundle = std.zig.ErrorBundle;
const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
@@ -30,8 +33,6 @@ const Cache = std.Build.Cache;
const translate_c = @import("translate_c.zig");
const clang = @import("clang.zig");
const c_codegen = @import("codegen/c.zig");
-const ThreadPool = @import("ThreadPool.zig");
-const WaitGroup = @import("WaitGroup.zig");
const libtsan = @import("libtsan.zig");
const Zir = @import("Zir.zig");
const Autodoc = @import("Autodoc.zig");
@@ -99,6 +100,7 @@ job_queued_compiler_rt_lib: bool = false,
job_queued_compiler_rt_obj: bool = false,
alloc_failure_occurred: bool = false,
formatted_panics: bool = false,
+last_update_was_cache_hit: bool = false,
c_source_files: []const CSourceFile,
clang_argv: []const []const u8,
@@ -334,12 +336,41 @@ pub const MiscTask = enum {
libssp,
zig_libc,
analyze_pkg,
+
+ @"musl crti.o",
+ @"musl crtn.o",
+ @"musl crt1.o",
+ @"musl rcrt1.o",
+ @"musl Scrt1.o",
+ @"musl libc.a",
+ @"musl libc.so",
+
+ @"wasi crt1-reactor.o",
+ @"wasi crt1-command.o",
+ @"wasi libc.a",
+ @"libwasi-emulated-process-clocks.a",
+ @"libwasi-emulated-getpid.a",
+ @"libwasi-emulated-mman.a",
+ @"libwasi-emulated-signal.a",
+
+ @"glibc crti.o",
+ @"glibc crtn.o",
+ @"glibc Scrt1.o",
+ @"glibc libc_nonshared.a",
+ @"glibc shared object",
+
+ @"mingw-w64 crt2.o",
+ @"mingw-w64 dllcrt2.o",
+ @"mingw-w64 mingw32.lib",
+ @"mingw-w64 msvcrt-os.lib",
+ @"mingw-w64 mingwex.lib",
+ @"mingw-w64 uuid.lib",
};
pub const MiscError = struct {
/// Allocated with gpa.
msg: []u8,
- children: ?AllErrors = null,
+ children: ?ErrorBundle = null,
pub fn deinit(misc_err: *MiscError, gpa: Allocator) void {
gpa.free(misc_err.msg);
@@ -365,448 +396,6 @@ pub const LldError = struct {
}
};
-/// To support incremental compilation, errors are stored in various places
-/// so that they can be created and destroyed appropriately. This structure
-/// is used to collect all the errors from the various places into one
-/// convenient place for API users to consume. It is allocated into 1 arena
-/// and freed all at once.
-pub const AllErrors = struct {
- arena: std.heap.ArenaAllocator.State,
- list: []const Message,
-
- pub const Message = union(enum) {
- src: struct {
- msg: []const u8,
- src_path: []const u8,
- line: u32,
- column: u32,
- span: Module.SrcLoc.Span,
- /// Usually one, but incremented for redundant messages.
- count: u32 = 1,
- /// Does not include the trailing newline.
- source_line: ?[]const u8,
- notes: []const Message = &.{},
- reference_trace: []Message = &.{},
-
- /// Splits the error message up into lines to properly indent them
- /// to allow for long, good-looking error messages.
- ///
- /// This is used to split the message in `@compileError("hello\nworld")` for example.
- fn writeMsg(src: @This(), stderr: anytype, indent: usize) !void {
- var lines = mem.split(u8, src.msg, "\n");
- while (lines.next()) |line| {
- try stderr.writeAll(line);
- if (lines.index == null) break;
- try stderr.writeByte('\n');
- try stderr.writeByteNTimes(' ', indent);
- }
- }
- },
- plain: struct {
- msg: []const u8,
- notes: []Message = &.{},
- /// Usually one, but incremented for redundant messages.
- count: u32 = 1,
- },
-
- pub fn incrementCount(msg: *Message) void {
- switch (msg.*) {
- .src => |*src| {
- src.count += 1;
- },
- .plain => |*plain| {
- plain.count += 1;
- },
- }
- }
-
- pub fn renderToStdErr(msg: Message, ttyconf: std.debug.TTY.Config) void {
- std.debug.getStderrMutex().lock();
- defer std.debug.getStderrMutex().unlock();
- const stderr = std.io.getStdErr();
- return msg.renderToWriter(ttyconf, stderr.writer(), "error", .Red, 0) catch return;
- }
-
- pub fn renderToWriter(
- msg: Message,
- ttyconf: std.debug.TTY.Config,
- stderr: anytype,
- kind: []const u8,
- color: std.debug.TTY.Color,
- indent: usize,
- ) anyerror!void {
- var counting_writer = std.io.countingWriter(stderr);
- const counting_stderr = counting_writer.writer();
- switch (msg) {
- .src => |src| {
- try counting_stderr.writeByteNTimes(' ', indent);
- try ttyconf.setColor(stderr, .Bold);
- try counting_stderr.print("{s}:{d}:{d}: ", .{
- src.src_path,
- src.line + 1,
- src.column + 1,
- });
- try ttyconf.setColor(stderr, color);
- try counting_stderr.writeAll(kind);
- try counting_stderr.writeAll(": ");
- // This is the length of the part before the error message:
- // e.g. "file.zig:4:5: error: "
- const prefix_len = @intCast(usize, counting_stderr.context.bytes_written);
- try ttyconf.setColor(stderr, .Reset);
- try ttyconf.setColor(stderr, .Bold);
- if (src.count == 1) {
- try src.writeMsg(stderr, prefix_len);
- try stderr.writeByte('\n');
- } else {
- try src.writeMsg(stderr, prefix_len);
- try ttyconf.setColor(stderr, .Dim);
- try stderr.print(" ({d} times)\n", .{src.count});
- }
- try ttyconf.setColor(stderr, .Reset);
- if (src.source_line) |line| {
- for (line) |b| switch (b) {
- '\t' => try stderr.writeByte(' '),
- else => try stderr.writeByte(b),
- };
- try stderr.writeByte('\n');
- // TODO basic unicode code point monospace width
- const before_caret = src.span.main - src.span.start;
- // -1 since span.main includes the caret
- const after_caret = src.span.end - src.span.main -| 1;
- try stderr.writeByteNTimes(' ', src.column - before_caret);
- try ttyconf.setColor(stderr, .Green);
- try stderr.writeByteNTimes('~', before_caret);
- try stderr.writeByte('^');
- try stderr.writeByteNTimes('~', after_caret);
- try stderr.writeByte('\n');
- try ttyconf.setColor(stderr, .Reset);
- }
- for (src.notes) |note| {
- try note.renderToWriter(ttyconf, stderr, "note", .Cyan, indent);
- }
- if (src.reference_trace.len != 0) {
- try ttyconf.setColor(stderr, .Reset);
- try ttyconf.setColor(stderr, .Dim);
- try stderr.print("referenced by:\n", .{});
- for (src.reference_trace) |reference| {
- switch (reference) {
- .src => |ref_src| try stderr.print(" {s}: {s}:{d}:{d}\n", .{
- ref_src.msg,
- ref_src.src_path,
- ref_src.line + 1,
- ref_src.column + 1,
- }),
- .plain => |plain| if (plain.count != 0) {
- try stderr.print(
- " {d} reference(s) hidden; use '-freference-trace={d}' to see all references\n",
- .{ plain.count, plain.count + src.reference_trace.len - 1 },
- );
- } else {
- try stderr.print(
- " remaining reference traces hidden; use '-freference-trace' to see all reference traces\n",
- .{},
- );
- },
- }
- }
- try stderr.writeByte('\n');
- try ttyconf.setColor(stderr, .Reset);
- }
- },
- .plain => |plain| {
- try ttyconf.setColor(stderr, color);
- try stderr.writeByteNTimes(' ', indent);
- try stderr.writeAll(kind);
- try stderr.writeAll(": ");
- try ttyconf.setColor(stderr, .Reset);
- if (plain.count == 1) {
- try stderr.print("{s}\n", .{plain.msg});
- } else {
- try stderr.print("{s}", .{plain.msg});
- try ttyconf.setColor(stderr, .Dim);
- try stderr.print(" ({d} times)\n", .{plain.count});
- }
- try ttyconf.setColor(stderr, .Reset);
- for (plain.notes) |note| {
- try note.renderToWriter(ttyconf, stderr, "note", .Cyan, indent + 4);
- }
- },
- }
- }
-
- pub const HashContext = struct {
- pub fn hash(ctx: HashContext, key: *Message) u64 {
- _ = ctx;
- var hasher = std.hash.Wyhash.init(0);
-
- switch (key.*) {
- .src => |src| {
- hasher.update(src.msg);
- hasher.update(src.src_path);
- std.hash.autoHash(&hasher, src.line);
- std.hash.autoHash(&hasher, src.column);
- std.hash.autoHash(&hasher, src.span.main);
- },
- .plain => |plain| {
- hasher.update(plain.msg);
- },
- }
-
- return hasher.final();
- }
-
- pub fn eql(ctx: HashContext, a: *Message, b: *Message) bool {
- _ = ctx;
- switch (a.*) {
- .src => |a_src| switch (b.*) {
- .src => |b_src| {
- return mem.eql(u8, a_src.msg, b_src.msg) and
- mem.eql(u8, a_src.src_path, b_src.src_path) and
- a_src.line == b_src.line and
- a_src.column == b_src.column and
- a_src.span.main == b_src.span.main;
- },
- .plain => return false,
- },
- .plain => |a_plain| switch (b.*) {
- .src => return false,
- .plain => |b_plain| {
- return mem.eql(u8, a_plain.msg, b_plain.msg);
- },
- },
- }
- }
- };
- };
-
- pub fn deinit(self: *AllErrors, gpa: Allocator) void {
- self.arena.promote(gpa).deinit();
- }
-
- pub fn add(
- module: *Module,
- arena: *std.heap.ArenaAllocator,
- errors: *std.ArrayList(Message),
- module_err_msg: Module.ErrorMsg,
- ) !void {
- const allocator = arena.allocator();
-
- const notes_buf = try allocator.alloc(Message, module_err_msg.notes.len);
- var note_i: usize = 0;
-
- // De-duplicate error notes. The main use case in mind for this is
- // too many "note: called from here" notes when eval branch quota is reached.
- var seen_notes = std.HashMap(
- *Message,
- void,
- Message.HashContext,
- std.hash_map.default_max_load_percentage,
- ).init(allocator);
- const err_source = module_err_msg.src_loc.file_scope.getSource(module.gpa) catch |err| {
- const file_path = try module_err_msg.src_loc.file_scope.fullPath(allocator);
- try errors.append(.{
- .plain = .{
- .msg = try std.fmt.allocPrint(allocator, "unable to load '{s}': {s}", .{
- file_path, @errorName(err),
- }),
- },
- });
- return;
- };
- const err_span = try module_err_msg.src_loc.span(module.gpa);
- const err_loc = std.zig.findLineColumn(err_source.bytes, err_span.main);
-
- for (module_err_msg.notes) |module_note| {
- const source = try module_note.src_loc.file_scope.getSource(module.gpa);
- const span = try module_note.src_loc.span(module.gpa);
- const loc = std.zig.findLineColumn(source.bytes, span.main);
- const file_path = try module_note.src_loc.file_scope.fullPath(allocator);
- const note = ¬es_buf[note_i];
- note.* = .{
- .src = .{
- .src_path = file_path,
- .msg = try allocator.dupe(u8, module_note.msg),
- .span = span,
- .line = @intCast(u32, loc.line),
- .column = @intCast(u32, loc.column),
- .source_line = if (err_loc.eql(loc)) null else try allocator.dupe(u8, loc.source_line),
- },
- };
- const gop = try seen_notes.getOrPut(note);
- if (gop.found_existing) {
- gop.key_ptr.*.incrementCount();
- } else {
- note_i += 1;
- }
- }
-
- const reference_trace = try allocator.alloc(Message, module_err_msg.reference_trace.len);
- for (reference_trace, 0..) |*reference, i| {
- const module_reference = module_err_msg.reference_trace[i];
- if (module_reference.hidden != 0) {
- reference.* = .{ .plain = .{ .msg = undefined, .count = module_reference.hidden } };
- break;
- } else if (module_reference.decl == null) {
- reference.* = .{ .plain = .{ .msg = undefined, .count = 0 } };
- break;
- }
- const source = try module_reference.src_loc.file_scope.getSource(module.gpa);
- const span = try module_reference.src_loc.span(module.gpa);
- const loc = std.zig.findLineColumn(source.bytes, span.main);
- const file_path = try module_reference.src_loc.file_scope.fullPath(allocator);
- reference.* = .{
- .src = .{
- .src_path = file_path,
- .msg = try allocator.dupe(u8, std.mem.sliceTo(module_reference.decl.?, 0)),
- .span = span,
- .line = @intCast(u32, loc.line),
- .column = @intCast(u32, loc.column),
- .source_line = null,
- },
- };
- }
- const file_path = try module_err_msg.src_loc.file_scope.fullPath(allocator);
- try errors.append(.{
- .src = .{
- .src_path = file_path,
- .msg = try allocator.dupe(u8, module_err_msg.msg),
- .span = err_span,
- .line = @intCast(u32, err_loc.line),
- .column = @intCast(u32, err_loc.column),
- .notes = notes_buf[0..note_i],
- .reference_trace = reference_trace,
- .source_line = if (module_err_msg.src_loc.lazy == .entire_file) null else try allocator.dupe(u8, err_loc.source_line),
- },
- });
- }
-
- pub fn addZir(
- arena: Allocator,
- errors: *std.ArrayList(Message),
- file: *Module.File,
- ) !void {
- assert(file.zir_loaded);
- assert(file.tree_loaded);
- assert(file.source_loaded);
- const payload_index = file.zir.extra[@enumToInt(Zir.ExtraIndex.compile_errors)];
- assert(payload_index != 0);
-
- const header = file.zir.extraData(Zir.Inst.CompileErrors, payload_index);
- const items_len = header.data.items_len;
- var extra_index = header.end;
- var item_i: usize = 0;
- while (item_i < items_len) : (item_i += 1) {
- const item = file.zir.extraData(Zir.Inst.CompileErrors.Item, extra_index);
- extra_index = item.end;
- const err_span = blk: {
- if (item.data.node != 0) {
- break :blk Module.SrcLoc.nodeToSpan(&file.tree, item.data.node);
- }
- const token_starts = file.tree.tokens.items(.start);
- const start = token_starts[item.data.token] + item.data.byte_offset;
- const end = start + @intCast(u32, file.tree.tokenSlice(item.data.token).len) - item.data.byte_offset;
- break :blk Module.SrcLoc.Span{ .start = start, .end = end, .main = start };
- };
- const err_loc = std.zig.findLineColumn(file.source, err_span.main);
-
- var notes: []Message = &[0]Message{};
- if (item.data.notes != 0) {
- const block = file.zir.extraData(Zir.Inst.Block, item.data.notes);
- const body = file.zir.extra[block.end..][0..block.data.body_len];
- notes = try arena.alloc(Message, body.len);
- for (notes, 0..) |*note, i| {
- const note_item = file.zir.extraData(Zir.Inst.CompileErrors.Item, body[i]);
- const msg = file.zir.nullTerminatedString(note_item.data.msg);
- const span = blk: {
- if (note_item.data.node != 0) {
- break :blk Module.SrcLoc.nodeToSpan(&file.tree, note_item.data.node);
- }
- const token_starts = file.tree.tokens.items(.start);
- const start = token_starts[note_item.data.token] + note_item.data.byte_offset;
- const end = start + @intCast(u32, file.tree.tokenSlice(note_item.data.token).len) - item.data.byte_offset;
- break :blk Module.SrcLoc.Span{ .start = start, .end = end, .main = start };
- };
- const loc = std.zig.findLineColumn(file.source, span.main);
-
- note.* = .{
- .src = .{
- .src_path = try file.fullPath(arena),
- .msg = try arena.dupe(u8, msg),
- .span = span,
- .line = @intCast(u32, loc.line),
- .column = @intCast(u32, loc.column),
- .notes = &.{}, // TODO rework this function to be recursive
- .source_line = if (loc.eql(err_loc)) null else try arena.dupe(u8, loc.source_line),
- },
- };
- }
- }
-
- const msg = file.zir.nullTerminatedString(item.data.msg);
- try errors.append(.{
- .src = .{
- .src_path = try file.fullPath(arena),
- .msg = try arena.dupe(u8, msg),
- .span = err_span,
- .line = @intCast(u32, err_loc.line),
- .column = @intCast(u32, err_loc.column),
- .notes = notes,
- .source_line = try arena.dupe(u8, err_loc.source_line),
- },
- });
- }
- }
-
- fn addPlain(
- arena: *std.heap.ArenaAllocator,
- errors: *std.ArrayList(Message),
- msg: []const u8,
- ) !void {
- _ = arena;
- try errors.append(.{ .plain = .{ .msg = msg } });
- }
-
- fn addPlainWithChildren(
- arena: *std.heap.ArenaAllocator,
- errors: *std.ArrayList(Message),
- msg: []const u8,
- optional_children: ?AllErrors,
- ) !void {
- const allocator = arena.allocator();
- const duped_msg = try allocator.dupe(u8, msg);
- if (optional_children) |*children| {
- try errors.append(.{ .plain = .{
- .msg = duped_msg,
- .notes = try dupeList(children.list, allocator),
- } });
- } else {
- try errors.append(.{ .plain = .{ .msg = duped_msg } });
- }
- }
-
- fn dupeList(list: []const Message, arena: Allocator) Allocator.Error![]Message {
- const duped_list = try arena.alloc(Message, list.len);
- for (list, 0..) |item, i| {
- duped_list[i] = switch (item) {
- .src => |src| .{ .src = .{
- .msg = try arena.dupe(u8, src.msg),
- .src_path = try arena.dupe(u8, src.src_path),
- .line = src.line,
- .column = src.column,
- .span = src.span,
- .source_line = if (src.source_line) |s| try arena.dupe(u8, s) else null,
- .notes = try dupeList(src.notes, arena),
- } },
- .plain => |plain| .{ .plain = .{
- .msg = try arena.dupe(u8, plain.msg),
- .notes = try dupeList(plain.notes, arena),
- } },
- };
- }
- return duped_list;
- }
-};
-
pub const Directory = Cache.Directory;
pub const EmitLoc = struct {
@@ -2259,12 +1848,20 @@ fn cleanupTmpArtifactDirectory(
}
}
+pub fn hotCodeSwap(comp: *Compilation, prog_node: *std.Progress.Node, pid: std.ChildProcess.Id) !void {
+ comp.bin_file.child_pid = pid;
+ try comp.makeBinFileWritable();
+ try comp.update(prog_node);
+ try comp.makeBinFileExecutable();
+}
+
/// Detect changes to source files, perform semantic analysis, and update the output files.
-pub fn update(comp: *Compilation) !void {
+pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void {
const tracy_trace = trace(@src());
defer tracy_trace.end();
comp.clearMiscFailures();
+ comp.last_update_was_cache_hit = false;
var man: Cache.Manifest = undefined;
defer if (comp.whole_cache_manifest != null) man.deinit();
@@ -2292,6 +1889,7 @@ pub fn update(comp: *Compilation) !void {
return err;
};
if (is_hit) {
+ comp.last_update_was_cache_hit = true;
log.debug("CacheMode.whole cache hit for {s}", .{comp.bin_file.options.root_name});
const digest = man.final();
@@ -2407,21 +2005,6 @@ pub fn update(comp: *Compilation) !void {
}
}
- // If the terminal is dumb, we dont want to show the user all the output.
- var progress: std.Progress = .{ .dont_print_on_dumb = true };
- const main_progress_node = progress.start("", 0);
- defer main_progress_node.end();
- switch (comp.color) {
- .off => {
- progress.terminal = null;
- },
- .on => {
- progress.terminal = std.io.getStdErr();
- progress.supports_ansi_escape_codes = true;
- },
- .auto => {},
- }
-
try comp.performAllTheWork(main_progress_node);
if (comp.bin_file.options.module) |module| {
@@ -2891,7 +2474,7 @@ pub fn makeBinFileWritable(self: *Compilation) !void {
}
/// This function is temporally single-threaded.
-pub fn totalErrorCount(self: *Compilation) usize {
+pub fn totalErrorCount(self: *Compilation) u32 {
var total: usize = self.failed_c_objects.count() + self.misc_failures.count() +
@boolToInt(self.alloc_failure_occurred) + self.lld_errors.items.len;
@@ -2951,17 +2534,16 @@ pub fn totalErrorCount(self: *Compilation) usize {
}
}
- return total;
+ return @intCast(u32, total);
}
/// This function is temporally single-threaded.
-pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
- var arena = std.heap.ArenaAllocator.init(self.gpa);
- errdefer arena.deinit();
- const arena_allocator = arena.allocator();
+pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle {
+ const gpa = self.gpa;
- var errors = std.ArrayList(AllErrors.Message).init(self.gpa);
- defer errors.deinit();
+ var bundle: ErrorBundle.Wip = undefined;
+ try bundle.init(gpa);
+ defer bundle.deinit();
{
var it = self.failed_c_objects.iterator();
@@ -2970,53 +2552,58 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
const err_msg = entry.value_ptr.*;
// TODO these fields will need to be adjusted when we have proper
// C error reporting bubbling up.
- try errors.append(.{
- .src = .{
- .src_path = try arena_allocator.dupe(u8, c_object.src.src_path),
- .msg = try std.fmt.allocPrint(arena_allocator, "unable to build C object: {s}", .{
- err_msg.msg,
- }),
- .span = .{ .start = 0, .end = 1, .main = 0 },
+ try bundle.addRootErrorMessage(.{
+ .msg = try bundle.printString("unable to build C object: {s}", .{err_msg.msg}),
+ .src_loc = try bundle.addSourceLocation(.{
+ .src_path = try bundle.addString(c_object.src.src_path),
+ .span_start = 0,
+ .span_main = 0,
+ .span_end = 1,
.line = err_msg.line,
.column = err_msg.column,
- .source_line = null, // TODO
- },
+ .source_line = 0, // TODO
+ }),
});
}
}
- for (self.lld_errors.items) |lld_error| {
- const notes = try arena_allocator.alloc(AllErrors.Message, lld_error.context_lines.len);
- for (lld_error.context_lines, 0..) |context_line, i| {
- notes[i] = .{ .plain = .{
- .msg = try arena_allocator.dupe(u8, context_line),
- } };
- }
- try errors.append(.{
- .plain = .{
- .msg = try arena_allocator.dupe(u8, lld_error.msg),
- .notes = notes,
- },
+ for (self.lld_errors.items) |lld_error| {
+ const notes_len = @intCast(u32, lld_error.context_lines.len);
+
+ try bundle.addRootErrorMessage(.{
+ .msg = try bundle.addString(lld_error.msg),
+ .notes_len = notes_len,
});
+ const notes_start = try bundle.reserveNotes(notes_len);
+ for (notes_start.., lld_error.context_lines) |note, context_line| {
+ bundle.extra.items[note] = @enumToInt(bundle.addErrorMessageAssumeCapacity(.{
+ .msg = try bundle.addString(context_line),
+ }));
+ }
}
for (self.misc_failures.values()) |*value| {
- try AllErrors.addPlainWithChildren(&arena, &errors, value.msg, value.children);
+ try bundle.addRootErrorMessage(.{
+ .msg = try bundle.addString(value.msg),
+ .notes_len = if (value.children) |b| b.errorMessageCount() else 0,
+ });
+ if (value.children) |b| try bundle.addBundle(b);
}
if (self.alloc_failure_occurred) {
- try AllErrors.addPlain(&arena, &errors, "memory allocation failure");
+ try bundle.addRootErrorMessage(.{
+ .msg = try bundle.addString("memory allocation failure"),
+ });
}
if (self.bin_file.options.module) |module| {
{
var it = module.failed_files.iterator();
while (it.next()) |entry| {
if (entry.value_ptr.*) |msg| {
- try AllErrors.add(module, &arena, &errors, msg.*);
+ try addModuleErrorMsg(&bundle, msg.*);
} else {
- // Must be ZIR errors. In order for ZIR errors to exist, the parsing
- // must have completed successfully.
- const tree = try entry.key_ptr.*.getTree(module.gpa);
- assert(tree.errors.len == 0);
- try AllErrors.addZir(arena_allocator, &errors, entry.key_ptr.*);
+ // Must be ZIR errors. Note that this may include AST errors.
+ // addZirErrorMessages asserts that the tree is loaded.
+ _ = try entry.key_ptr.*.getTree(gpa);
+ try addZirErrorMessages(&bundle, entry.key_ptr.*);
}
}
}
@@ -3024,7 +2611,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
var it = module.failed_embed_files.iterator();
while (it.next()) |entry| {
const msg = entry.value_ptr.*;
- try AllErrors.add(module, &arena, &errors, msg.*);
+ try addModuleErrorMsg(&bundle, msg.*);
}
}
{
@@ -3034,23 +2621,20 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
// Skip errors for Decls within files that had a parse failure.
// We'll try again once parsing succeeds.
if (decl.getFileScope().okToReportErrors()) {
- try AllErrors.add(module, &arena, &errors, entry.value_ptr.*.*);
+ try addModuleErrorMsg(&bundle, entry.value_ptr.*.*);
if (module.cimport_errors.get(entry.key_ptr.*)) |cimport_errors| for (cimport_errors) |c_error| {
- if (c_error.path) |some|
- try errors.append(.{
- .src = .{
- .src_path = try arena_allocator.dupe(u8, std.mem.span(some)),
- .span = .{ .start = c_error.offset, .end = c_error.offset + 1, .main = c_error.offset },
- .msg = try arena_allocator.dupe(u8, std.mem.span(c_error.msg)),
- .line = c_error.line,
- .column = c_error.column,
- .source_line = if (c_error.source_line) |line| try arena_allocator.dupe(u8, std.mem.span(line)) else null,
- },
- })
- else
- try errors.append(.{
- .plain = .{ .msg = try arena_allocator.dupe(u8, std.mem.span(c_error.msg)) },
- });
+ try bundle.addRootErrorMessage(.{
+ .msg = try bundle.addString(std.mem.span(c_error.msg)),
+ .src_loc = if (c_error.path) |some| try bundle.addSourceLocation(.{
+ .src_path = try bundle.addString(std.mem.span(some)),
+ .span_start = c_error.offset,
+ .span_main = c_error.offset,
+ .span_end = c_error.offset + 1,
+ .line = c_error.line,
+ .column = c_error.column,
+ .source_line = if (c_error.source_line) |line| try bundle.addString(std.mem.span(line)) else 0,
+ }) else .none,
+ });
};
}
}
@@ -3062,45 +2646,39 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
// Skip errors for Decls within files that had a parse failure.
// We'll try again once parsing succeeds.
if (decl.getFileScope().okToReportErrors()) {
- try AllErrors.add(module, &arena, &errors, entry.value_ptr.*.*);
+ try addModuleErrorMsg(&bundle, entry.value_ptr.*.*);
}
}
}
for (module.failed_exports.values()) |value| {
- try AllErrors.add(module, &arena, &errors, value.*);
+ try addModuleErrorMsg(&bundle, value.*);
}
}
- if (errors.items.len == 0) {
+ if (bundle.root_list.items.len == 0) {
if (self.link_error_flags.no_entry_point_found) {
- try errors.append(.{
- .plain = .{
- .msg = try std.fmt.allocPrint(arena_allocator, "no entry point found", .{}),
- },
+ try bundle.addRootErrorMessage(.{
+ .msg = try bundle.addString("no entry point found"),
});
}
}
if (self.link_error_flags.missing_libc) {
- const notes = try arena_allocator.create([2]AllErrors.Message);
- notes.* = .{
- .{ .plain = .{
- .msg = try arena_allocator.dupe(u8, "run 'zig libc -h' to learn about libc installations"),
- } },
- .{ .plain = .{
- .msg = try arena_allocator.dupe(u8, "run 'zig targets' to see the targets for which zig can always provide libc"),
- } },
- };
- try errors.append(.{
- .plain = .{
- .msg = try std.fmt.allocPrint(arena_allocator, "libc not available", .{}),
- .notes = notes,
- },
+ try bundle.addRootErrorMessage(.{
+ .msg = try bundle.addString("libc not available"),
+ .notes_len = 2,
});
+ const notes_start = try bundle.reserveNotes(2);
+ bundle.extra.items[notes_start + 0] = @enumToInt(try bundle.addErrorMessage(.{
+ .msg = try bundle.addString("run 'zig libc -h' to learn about libc installations"),
+ }));
+ bundle.extra.items[notes_start + 1] = @enumToInt(try bundle.addErrorMessage(.{
+ .msg = try bundle.addString("run 'zig targets' to see the targets for which zig can always provide libc"),
+ }));
}
if (self.bin_file.options.module) |module| {
- if (errors.items.len == 0 and module.compile_log_decls.count() != 0) {
+ if (bundle.root_list.items.len == 0 and module.compile_log_decls.count() != 0) {
const keys = module.compile_log_decls.keys();
const values = module.compile_log_decls.values();
// First one will be the error; subsequent ones will be notes.
@@ -3109,9 +2687,9 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
const err_msg = Module.ErrorMsg{
.src_loc = src_loc,
.msg = "found compile log statement",
- .notes = try self.gpa.alloc(Module.ErrorMsg, module.compile_log_decls.count() - 1),
+ .notes = try gpa.alloc(Module.ErrorMsg, module.compile_log_decls.count() - 1),
};
- defer self.gpa.free(err_msg.notes);
+ defer gpa.free(err_msg.notes);
for (keys[1..], 0..) |key, i| {
const note_decl = module.declPtr(key);
@@ -3121,21 +2699,260 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
};
}
- try AllErrors.add(module, &arena, &errors, err_msg);
+ try addModuleErrorMsg(&bundle, err_msg);
}
}
- assert(errors.items.len == self.totalErrorCount());
+ assert(self.totalErrorCount() == bundle.root_list.items.len);
- return AllErrors{
- .list = try arena_allocator.dupe(AllErrors.Message, errors.items),
- .arena = arena.state,
- };
+ const compile_log_text = if (self.bin_file.options.module) |m| m.compile_log_text.items else "";
+ return bundle.toOwnedBundle(compile_log_text);
}
-pub fn getCompileLogOutput(self: *Compilation) []const u8 {
- const module = self.bin_file.options.module orelse return &[0]u8{};
- return module.compile_log_text.items;
+pub const ErrorNoteHashContext = struct {
+ eb: *const ErrorBundle.Wip,
+
+ pub fn hash(ctx: ErrorNoteHashContext, key: ErrorBundle.ErrorMessage) u32 {
+ var hasher = std.hash.Wyhash.init(0);
+ const eb = ctx.eb.tmpBundle();
+
+ hasher.update(eb.nullTerminatedString(key.msg));
+ if (key.src_loc != .none) {
+ const src = eb.getSourceLocation(key.src_loc);
+ hasher.update(eb.nullTerminatedString(src.src_path));
+ std.hash.autoHash(&hasher, src.line);
+ std.hash.autoHash(&hasher, src.column);
+ std.hash.autoHash(&hasher, src.span_main);
+ }
+
+ return @truncate(u32, hasher.final());
+ }
+
+ pub fn eql(
+ ctx: ErrorNoteHashContext,
+ a: ErrorBundle.ErrorMessage,
+ b: ErrorBundle.ErrorMessage,
+ b_index: usize,
+ ) bool {
+ _ = b_index;
+ const eb = ctx.eb.tmpBundle();
+ const msg_a = eb.nullTerminatedString(a.msg);
+ const msg_b = eb.nullTerminatedString(b.msg);
+ if (!std.mem.eql(u8, msg_a, msg_b)) return false;
+
+ if (a.src_loc == .none and b.src_loc == .none) return true;
+ if (a.src_loc == .none or b.src_loc == .none) return false;
+ const src_a = eb.getSourceLocation(a.src_loc);
+ const src_b = eb.getSourceLocation(b.src_loc);
+
+ const src_path_a = eb.nullTerminatedString(src_a.src_path);
+ const src_path_b = eb.nullTerminatedString(src_b.src_path);
+
+ return std.mem.eql(u8, src_path_a, src_path_b) and
+ src_a.line == src_b.line and
+ src_a.column == src_b.column and
+ src_a.span_main == src_b.span_main;
+ }
+};
+
+pub fn addModuleErrorMsg(eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) !void {
+ const gpa = eb.gpa;
+ const err_source = module_err_msg.src_loc.file_scope.getSource(gpa) catch |err| {
+ const file_path = try module_err_msg.src_loc.file_scope.fullPath(gpa);
+ defer gpa.free(file_path);
+ try eb.addRootErrorMessage(.{
+ .msg = try eb.printString("unable to load '{s}': {s}", .{
+ file_path, @errorName(err),
+ }),
+ });
+ return;
+ };
+ const err_span = try module_err_msg.src_loc.span(gpa);
+ const err_loc = std.zig.findLineColumn(err_source.bytes, err_span.main);
+ const file_path = try module_err_msg.src_loc.file_scope.fullPath(gpa);
+ defer gpa.free(file_path);
+
+ var ref_traces: std.ArrayListUnmanaged(ErrorBundle.ReferenceTrace) = .{};
+ defer ref_traces.deinit(gpa);
+
+ for (module_err_msg.reference_trace) |module_reference| {
+ if (module_reference.hidden != 0) {
+ try ref_traces.append(gpa, .{
+ .decl_name = module_reference.hidden,
+ .src_loc = .none,
+ });
+ break;
+ } else if (module_reference.decl == null) {
+ try ref_traces.append(gpa, .{
+ .decl_name = 0,
+ .src_loc = .none,
+ });
+ break;
+ }
+ const source = try module_reference.src_loc.file_scope.getSource(gpa);
+ const span = try module_reference.src_loc.span(gpa);
+ const loc = std.zig.findLineColumn(source.bytes, span.main);
+ const rt_file_path = try module_reference.src_loc.file_scope.fullPath(gpa);
+ defer gpa.free(rt_file_path);
+ try ref_traces.append(gpa, .{
+ .decl_name = try eb.addString(std.mem.sliceTo(module_reference.decl.?, 0)),
+ .src_loc = try eb.addSourceLocation(.{
+ .src_path = try eb.addString(rt_file_path),
+ .span_start = span.start,
+ .span_main = span.main,
+ .span_end = span.end,
+ .line = @intCast(u32, loc.line),
+ .column = @intCast(u32, loc.column),
+ .source_line = 0,
+ }),
+ });
+ }
+
+ const src_loc = try eb.addSourceLocation(.{
+ .src_path = try eb.addString(file_path),
+ .span_start = err_span.start,
+ .span_main = err_span.main,
+ .span_end = err_span.end,
+ .line = @intCast(u32, err_loc.line),
+ .column = @intCast(u32, err_loc.column),
+ .source_line = if (module_err_msg.src_loc.lazy == .entire_file)
+ 0
+ else
+ try eb.addString(err_loc.source_line),
+ .reference_trace_len = @intCast(u32, ref_traces.items.len),
+ });
+
+ for (ref_traces.items) |rt| {
+ try eb.addReferenceTrace(rt);
+ }
+
+ // De-duplicate error notes. The main use case in mind for this is
+ // too many "note: called from here" notes when eval branch quota is reached.
+ var notes: std.ArrayHashMapUnmanaged(ErrorBundle.ErrorMessage, void, ErrorNoteHashContext, true) = .{};
+ defer notes.deinit(gpa);
+
+ for (module_err_msg.notes) |module_note| {
+ const source = try module_note.src_loc.file_scope.getSource(gpa);
+ const span = try module_note.src_loc.span(gpa);
+ const loc = std.zig.findLineColumn(source.bytes, span.main);
+ const note_file_path = try module_note.src_loc.file_scope.fullPath(gpa);
+ defer gpa.free(note_file_path);
+
+ const gop = try notes.getOrPutContext(gpa, .{
+ .msg = try eb.addString(module_note.msg),
+ .src_loc = try eb.addSourceLocation(.{
+ .src_path = try eb.addString(note_file_path),
+ .span_start = span.start,
+ .span_main = span.main,
+ .span_end = span.end,
+ .line = @intCast(u32, loc.line),
+ .column = @intCast(u32, loc.column),
+ .source_line = if (err_loc.eql(loc)) 0 else try eb.addString(loc.source_line),
+ }),
+ }, .{ .eb = eb });
+ if (gop.found_existing) {
+ gop.key_ptr.count += 1;
+ }
+ }
+
+ const notes_len = @intCast(u32, notes.entries.len);
+
+ try eb.addRootErrorMessage(.{
+ .msg = try eb.addString(module_err_msg.msg),
+ .src_loc = src_loc,
+ .notes_len = notes_len,
+ });
+
+ const notes_start = try eb.reserveNotes(notes_len);
+
+ for (notes_start.., notes.keys()) |i, note| {
+ eb.extra.items[i] = @enumToInt(try eb.addErrorMessage(note));
+ }
+}
+
+pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void {
+ assert(file.zir_loaded);
+ assert(file.tree_loaded);
+ assert(file.source_loaded);
+ const payload_index = file.zir.extra[@enumToInt(Zir.ExtraIndex.compile_errors)];
+ assert(payload_index != 0);
+ const gpa = eb.gpa;
+
+ const header = file.zir.extraData(Zir.Inst.CompileErrors, payload_index);
+ const items_len = header.data.items_len;
+ var extra_index = header.end;
+ for (0..items_len) |_| {
+ const item = file.zir.extraData(Zir.Inst.CompileErrors.Item, extra_index);
+ extra_index = item.end;
+ const err_span = blk: {
+ if (item.data.node != 0) {
+ break :blk Module.SrcLoc.nodeToSpan(&file.tree, item.data.node);
+ }
+ const token_starts = file.tree.tokens.items(.start);
+ const start = token_starts[item.data.token] + item.data.byte_offset;
+ const end = start + @intCast(u32, file.tree.tokenSlice(item.data.token).len) - item.data.byte_offset;
+ break :blk Module.SrcLoc.Span{ .start = start, .end = end, .main = start };
+ };
+ const err_loc = std.zig.findLineColumn(file.source, err_span.main);
+
+ {
+ const msg = file.zir.nullTerminatedString(item.data.msg);
+ const src_path = try file.fullPath(gpa);
+ defer gpa.free(src_path);
+ try eb.addRootErrorMessage(.{
+ .msg = try eb.addString(msg),
+ .src_loc = try eb.addSourceLocation(.{
+ .src_path = try eb.addString(src_path),
+ .span_start = err_span.start,
+ .span_main = err_span.main,
+ .span_end = err_span.end,
+ .line = @intCast(u32, err_loc.line),
+ .column = @intCast(u32, err_loc.column),
+ .source_line = try eb.addString(err_loc.source_line),
+ }),
+ .notes_len = item.data.notesLen(file.zir),
+ });
+ }
+
+ if (item.data.notes != 0) {
+ const notes_start = try eb.reserveNotes(item.data.notes);
+ const block = file.zir.extraData(Zir.Inst.Block, item.data.notes);
+ const body = file.zir.extra[block.end..][0..block.data.body_len];
+ for (notes_start.., body) |note_i, body_elem| {
+ const note_item = file.zir.extraData(Zir.Inst.CompileErrors.Item, body_elem);
+ const msg = file.zir.nullTerminatedString(note_item.data.msg);
+ const span = blk: {
+ if (note_item.data.node != 0) {
+ break :blk Module.SrcLoc.nodeToSpan(&file.tree, note_item.data.node);
+ }
+ const token_starts = file.tree.tokens.items(.start);
+ const start = token_starts[note_item.data.token] + note_item.data.byte_offset;
+ const end = start + @intCast(u32, file.tree.tokenSlice(note_item.data.token).len) - item.data.byte_offset;
+ break :blk Module.SrcLoc.Span{ .start = start, .end = end, .main = start };
+ };
+ const loc = std.zig.findLineColumn(file.source, span.main);
+ const src_path = try file.fullPath(gpa);
+ defer gpa.free(src_path);
+
+ eb.extra.items[note_i] = @enumToInt(try eb.addErrorMessage(.{
+ .msg = try eb.addString(msg),
+ .src_loc = try eb.addSourceLocation(.{
+ .src_path = try eb.addString(src_path),
+ .span_start = span.start,
+ .span_main = span.main,
+ .span_end = span.end,
+ .line = @intCast(u32, loc.line),
+ .column = @intCast(u32, loc.column),
+ .source_line = if (loc.eql(err_loc))
+ 0
+ else
+ try eb.addString(loc.source_line),
+ }),
+ .notes_len = 0, // TODO rework this function to be recursive
+ }));
+ }
+ }
+ }
}
pub fn performAllTheWork(
@@ -3231,11 +3048,11 @@ pub fn performAllTheWork(
// backend, preventing anonymous Decls from being prematurely destroyed.
while (true) {
if (comp.work_queue.readItem()) |work_item| {
- try processOneJob(comp, work_item);
+ try processOneJob(comp, work_item, main_progress_node);
continue;
}
if (comp.anon_work_queue.readItem()) |work_item| {
- try processOneJob(comp, work_item);
+ try processOneJob(comp, work_item, main_progress_node);
continue;
}
break;
@@ -3243,16 +3060,16 @@ pub fn performAllTheWork(
if (comp.job_queued_compiler_rt_lib) {
comp.job_queued_compiler_rt_lib = false;
- buildCompilerRtOneShot(comp, .Lib, &comp.compiler_rt_lib);
+ buildCompilerRtOneShot(comp, .Lib, &comp.compiler_rt_lib, main_progress_node);
}
if (comp.job_queued_compiler_rt_obj) {
comp.job_queued_compiler_rt_obj = false;
- buildCompilerRtOneShot(comp, .Obj, &comp.compiler_rt_obj);
+ buildCompilerRtOneShot(comp, .Obj, &comp.compiler_rt_obj, main_progress_node);
}
}
-fn processOneJob(comp: *Compilation, job: Job) !void {
+fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !void {
switch (job) {
.codegen_decl => |decl_index| {
const module = comp.bin_file.options.module.?;
@@ -3404,7 +3221,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const named_frame = tracy.namedFrame("glibc_crt_file");
defer named_frame.end();
- glibc.buildCRTFile(comp, crt_file) catch |err| {
+ glibc.buildCRTFile(comp, crt_file, prog_node) catch |err| {
// TODO Surface more error details.
comp.lockAndSetMiscFailure(.glibc_crt_file, "unable to build glibc CRT file: {s}", .{
@errorName(err),
@@ -3415,7 +3232,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const named_frame = tracy.namedFrame("glibc_shared_objects");
defer named_frame.end();
- glibc.buildSharedObjects(comp) catch |err| {
+ glibc.buildSharedObjects(comp, prog_node) catch |err| {
// TODO Surface more error details.
comp.lockAndSetMiscFailure(
.glibc_shared_objects,
@@ -3428,7 +3245,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const named_frame = tracy.namedFrame("musl_crt_file");
defer named_frame.end();
- musl.buildCRTFile(comp, crt_file) catch |err| {
+ musl.buildCRTFile(comp, crt_file, prog_node) catch |err| {
// TODO Surface more error details.
comp.lockAndSetMiscFailure(
.musl_crt_file,
@@ -3441,7 +3258,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const named_frame = tracy.namedFrame("mingw_crt_file");
defer named_frame.end();
- mingw.buildCRTFile(comp, crt_file) catch |err| {
+ mingw.buildCRTFile(comp, crt_file, prog_node) catch |err| {
// TODO Surface more error details.
comp.lockAndSetMiscFailure(
.mingw_crt_file,
@@ -3468,7 +3285,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const named_frame = tracy.namedFrame("libunwind");
defer named_frame.end();
- libunwind.buildStaticLib(comp) catch |err| {
+ libunwind.buildStaticLib(comp, prog_node) catch |err| {
// TODO Surface more error details.
comp.lockAndSetMiscFailure(
.libunwind,
@@ -3481,7 +3298,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const named_frame = tracy.namedFrame("libcxx");
defer named_frame.end();
- libcxx.buildLibCXX(comp) catch |err| {
+ libcxx.buildLibCXX(comp, prog_node) catch |err| {
// TODO Surface more error details.
comp.lockAndSetMiscFailure(
.libcxx,
@@ -3494,7 +3311,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const named_frame = tracy.namedFrame("libcxxabi");
defer named_frame.end();
- libcxx.buildLibCXXABI(comp) catch |err| {
+ libcxx.buildLibCXXABI(comp, prog_node) catch |err| {
// TODO Surface more error details.
comp.lockAndSetMiscFailure(
.libcxxabi,
@@ -3507,7 +3324,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const named_frame = tracy.namedFrame("libtsan");
defer named_frame.end();
- libtsan.buildTsan(comp) catch |err| {
+ libtsan.buildTsan(comp, prog_node) catch |err| {
// TODO Surface more error details.
comp.lockAndSetMiscFailure(
.libtsan,
@@ -3520,7 +3337,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const named_frame = tracy.namedFrame("wasi_libc_crt_file");
defer named_frame.end();
- wasi_libc.buildCRTFile(comp, crt_file) catch |err| {
+ wasi_libc.buildCRTFile(comp, crt_file, prog_node) catch |err| {
// TODO Surface more error details.
comp.lockAndSetMiscFailure(
.wasi_libc_crt_file,
@@ -3538,6 +3355,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
.Lib,
&comp.libssp_static_lib,
.libssp,
+ prog_node,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.SubCompilationFailed => return, // error reported already
@@ -3557,6 +3375,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
.Lib,
&comp.libc_static_lib,
.zig_libc,
+ prog_node,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.SubCompilationFailed => return, // error reported already
@@ -3897,8 +3716,15 @@ fn buildCompilerRtOneShot(
comp: *Compilation,
output_mode: std.builtin.OutputMode,
out: *?CRTFile,
+ prog_node: *std.Progress.Node,
) void {
- comp.buildOutputFromZig("compiler_rt.zig", output_mode, out, .compiler_rt) catch |err| switch (err) {
+ comp.buildOutputFromZig(
+ "compiler_rt.zig",
+ output_mode,
+ out,
+ .compiler_rt,
+ prog_node,
+ ) catch |err| switch (err) {
error.SubCompilationFailed => return, // error reported already
else => comp.lockAndSetMiscFailure(
.compiler_rt,
@@ -5237,7 +5063,8 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca
\\const std = @import("std");
\\/// Zig version. When writing code that supports multiple versions of Zig, prefer
\\/// feature detection (i.e. with `@hasDecl` or `@hasField`) over version checks.
- \\pub const zig_version = std.SemanticVersion.parse("{s}") catch unreachable;
+ \\pub const zig_version = std.SemanticVersion.parse(zig_version_string) catch unreachable;
+ \\pub const zig_version_string = "{s}";
\\pub const zig_backend = std.builtin.CompilerBackend.{};
\\
\\pub const output_mode = std.builtin.OutputMode.{};
@@ -5424,34 +5251,36 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca
return buffer.toOwnedSliceSentinel(0);
}
-pub fn updateSubCompilation(sub_compilation: *Compilation) !void {
- try sub_compilation.update();
+pub fn updateSubCompilation(
+ parent_comp: *Compilation,
+ sub_comp: *Compilation,
+ misc_task: MiscTask,
+ prog_node: *std.Progress.Node,
+) !void {
+ {
+ var sub_node = prog_node.start(@tagName(misc_task), 0);
+ sub_node.activate();
+ defer sub_node.end();
- // Look for compilation errors in this sub_compilation
- // TODO instead of logging these errors, handle them in the callsites
- // of updateSubCompilation and attach them as sub-errors, properly
- // surfacing the errors. You can see an example of this already
- // done inside buildOutputFromZig.
- var errors = try sub_compilation.getAllErrorsAlloc();
- defer errors.deinit(sub_compilation.gpa);
+ try sub_comp.update(prog_node);
+ }
- if (errors.list.len != 0) {
- for (errors.list) |full_err_msg| {
- switch (full_err_msg) {
- .src => |src| {
- log.err("{s}:{d}:{d}: {s}", .{
- src.src_path,
- src.line + 1,
- src.column + 1,
- src.msg,
- });
- },
- .plain => |plain| {
- log.err("{s}", .{plain.msg});
- },
- }
- }
- return error.BuildingLibCObjectFailed;
+ // Look for compilation errors in this sub compilation
+ const gpa = parent_comp.gpa;
+ var keep_errors = false;
+ var errors = try sub_comp.getAllErrorsAlloc();
+ defer if (!keep_errors) errors.deinit(gpa);
+
+ if (errors.errorMessageCount() > 0) {
+ try parent_comp.misc_failures.ensureUnusedCapacity(gpa, 1);
+ parent_comp.misc_failures.putAssumeCapacityNoClobber(misc_task, .{
+ .msg = try std.fmt.allocPrint(gpa, "sub-compilation of {s} failed", .{
+ @tagName(misc_task),
+ }),
+ .children = errors,
+ });
+ keep_errors = true;
+ return error.SubCompilationFailed;
}
}
@@ -5461,6 +5290,7 @@ fn buildOutputFromZig(
output_mode: std.builtin.OutputMode,
out: *?CRTFile,
misc_task_tag: MiscTask,
+ prog_node: *std.Progress.Node,
) !void {
const tracy_trace = trace(@src());
defer tracy_trace.end();
@@ -5527,23 +5357,7 @@ fn buildOutputFromZig(
});
defer sub_compilation.destroy();
- try sub_compilation.update();
- // Look for compilation errors in this sub_compilation.
- var keep_errors = false;
- var errors = try sub_compilation.getAllErrorsAlloc();
- defer if (!keep_errors) errors.deinit(sub_compilation.gpa);
-
- if (errors.list.len != 0) {
- try comp.misc_failures.ensureUnusedCapacity(comp.gpa, 1);
- comp.misc_failures.putAssumeCapacityNoClobber(misc_task_tag, .{
- .msg = try std.fmt.allocPrint(comp.gpa, "sub-compilation of {s} failed", .{
- @tagName(misc_task_tag),
- }),
- .children = errors,
- });
- keep_errors = true;
- return error.SubCompilationFailed;
- }
+ try comp.updateSubCompilation(sub_compilation, misc_task_tag, prog_node);
assert(out.* == null);
out.* = Compilation.CRTFile{
@@ -5558,6 +5372,8 @@ pub fn build_crt_file(
comp: *Compilation,
root_name: []const u8,
output_mode: std.builtin.OutputMode,
+ misc_task_tag: MiscTask,
+ prog_node: *std.Progress.Node,
c_source_files: []const Compilation.CSourceFile,
) !void {
const tracy_trace = trace(@src());
@@ -5618,7 +5434,7 @@ pub fn build_crt_file(
});
defer sub_compilation.destroy();
- try sub_compilation.updateSubCompilation();
+ try comp.updateSubCompilation(sub_compilation, misc_task_tag, prog_node);
try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1);
diff --git a/src/Liveness.zig b/src/Liveness.zig
index 481cf25d04..8dc81aa165 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -226,6 +226,7 @@ pub fn categorizeOperand(
.ret_ptr,
.constant,
.const_ty,
+ .trap,
.breakpoint,
.dbg_stmt,
.dbg_inline_begin,
@@ -848,6 +849,7 @@ fn analyzeInst(
.ret_ptr,
.constant,
.const_ty,
+ .trap,
.breakpoint,
.dbg_stmt,
.dbg_inline_begin,
diff --git a/src/Module.zig b/src/Module.zig
index a2502d36d3..c47e4fc234 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -3528,6 +3528,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
const digest = hash: {
var path_hash: Cache.HashHelper = .{};
path_hash.addBytes(build_options.version);
+ path_hash.add(builtin.zig_backend);
if (!want_local_cache) {
path_hash.addOptionalBytes(file.pkg.root_src_directory.path);
}
@@ -3537,44 +3538,66 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
const cache_directory = if (want_local_cache) mod.local_zir_cache else mod.global_zir_cache;
const zir_dir = cache_directory.handle;
- var cache_file: ?std.fs.File = null;
- defer if (cache_file) |f| f.close();
-
// Determine whether we need to reload the file from disk and redo parsing and AstGen.
- switch (file.status) {
- .never_loaded, .retryable_failure => cached: {
+ var lock: std.fs.File.Lock = switch (file.status) {
+ .never_loaded, .retryable_failure => lock: {
// First, load the cached ZIR code, if any.
log.debug("AstGen checking cache: {s} (local={}, digest={s})", .{
file.sub_file_path, want_local_cache, &digest,
});
- // We ask for a lock in order to coordinate with other zig processes.
- // If another process is already working on this file, we will get the cached
- // version. Likewise if we're working on AstGen and another process asks for
- // the cached file, they'll get it.
- cache_file = zir_dir.openFile(&digest, .{ .lock = .Shared }) catch |err| switch (err) {
- error.PathAlreadyExists => unreachable, // opening for reading
- error.NoSpaceLeft => unreachable, // opening for reading
- error.NotDir => unreachable, // no dir components
- error.InvalidUtf8 => unreachable, // it's a hex encoded name
- error.BadPathName => unreachable, // it's a hex encoded name
- error.NameTooLong => unreachable, // it's a fixed size name
- error.PipeBusy => unreachable, // it's not a pipe
- error.WouldBlock => unreachable, // not asking for non-blocking I/O
+ break :lock .Shared;
+ },
+ .parse_failure, .astgen_failure, .success_zir => lock: {
+ const unchanged_metadata =
+ stat.size == file.stat.size and
+ stat.mtime == file.stat.mtime and
+ stat.inode == file.stat.inode;
- error.SymLinkLoop,
- error.FileNotFound,
- error.Unexpected,
- => break :cached,
+ if (unchanged_metadata) {
+ log.debug("unmodified metadata of file: {s}", .{file.sub_file_path});
+ return;
+ }
- else => |e| return e, // Retryable errors are handled at callsite.
- };
+ log.debug("metadata changed: {s}", .{file.sub_file_path});
+ break :lock .Exclusive;
+ },
+ };
+
+ // We ask for a lock in order to coordinate with other zig processes.
+ // If another process is already working on this file, we will get the cached
+ // version. Likewise if we're working on AstGen and another process asks for
+ // the cached file, they'll get it.
+ const cache_file = while (true) {
+ break zir_dir.createFile(&digest, .{
+ .read = true,
+ .truncate = false,
+ .lock = lock,
+ }) catch |err| switch (err) {
+ error.NotDir => unreachable, // no dir components
+ error.InvalidUtf8 => unreachable, // it's a hex encoded name
+ error.BadPathName => unreachable, // it's a hex encoded name
+ error.NameTooLong => unreachable, // it's a fixed size name
+ error.PipeBusy => unreachable, // it's not a pipe
+ error.WouldBlock => unreachable, // not asking for non-blocking I/O
+ // There are no dir components, so you would think that this was
+ // unreachable, however we have observed on macOS two processes racing
+ // to do openat() with O_CREAT manifest in ENOENT.
+ error.FileNotFound => continue,
+
+ else => |e| return e, // Retryable errors are handled at callsite.
+ };
+ };
+ defer cache_file.close();
+
+ while (true) {
+ update: {
// First we read the header to determine the lengths of arrays.
- const header = cache_file.?.reader().readStruct(Zir.Header) catch |err| switch (err) {
+ const header = cache_file.reader().readStruct(Zir.Header) catch |err| switch (err) {
// This can happen if Zig bails out of this function between creating
// the cached file and writing it.
- error.EndOfStream => break :cached,
+ error.EndOfStream => break :update,
else => |e| return e,
};
const unchanged_metadata =
@@ -3584,7 +3607,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
if (!unchanged_metadata) {
log.debug("AstGen cache stale: {s}", .{file.sub_file_path});
- break :cached;
+ break :update;
}
log.debug("AstGen cache hit: {s} instructions_len={d}", .{
file.sub_file_path, header.instructions_len,
@@ -3636,13 +3659,13 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
.iov_len = header.extra_len * 4,
},
};
- const amt_read = try cache_file.?.readvAll(&iovecs);
+ const amt_read = try cache_file.readvAll(&iovecs);
const amt_expected = zir.instructions.len * 9 +
zir.string_bytes.len +
zir.extra.len * 4;
if (amt_read != amt_expected) {
log.warn("unexpected EOF reading cached ZIR for {s}", .{file.sub_file_path});
- break :cached;
+ break :update;
}
if (data_has_safety_tag) {
const tags = zir.instructions.items(.tag);
@@ -3678,42 +3701,22 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
return error.AnalysisFail;
}
return;
- },
- .parse_failure, .astgen_failure, .success_zir => {
- const unchanged_metadata =
- stat.size == file.stat.size and
- stat.mtime == file.stat.mtime and
- stat.inode == file.stat.inode;
+ }
- if (unchanged_metadata) {
- log.debug("unmodified metadata of file: {s}", .{file.sub_file_path});
- return;
- }
-
- log.debug("metadata changed: {s}", .{file.sub_file_path});
- },
+ // If we already have the exclusive lock then it is our job to update.
+ if (builtin.os.tag == .wasi or lock == .Exclusive) break;
+ // Otherwise, unlock to give someone a chance to get the exclusive lock
+ // and then upgrade to an exclusive lock.
+ cache_file.unlock();
+ lock = .Exclusive;
+ try cache_file.lock(lock);
}
- if (cache_file) |f| {
- f.close();
- cache_file = null;
- }
- cache_file = zir_dir.createFile(&digest, .{ .lock = .Exclusive }) catch |err| switch (err) {
- error.NotDir => unreachable, // no dir components
- error.InvalidUtf8 => unreachable, // it's a hex encoded name
- error.BadPathName => unreachable, // it's a hex encoded name
- error.NameTooLong => unreachable, // it's a fixed size name
- error.PipeBusy => unreachable, // it's not a pipe
- error.WouldBlock => unreachable, // not asking for non-blocking I/O
- error.FileNotFound => unreachable, // no dir components
- else => |e| {
- const pkg_path = file.pkg.root_src_directory.path orelse ".";
- const cache_path = cache_directory.path orelse ".";
- log.warn("unable to save cached ZIR code for {s}/{s} to {s}/{s}: {s}", .{
- pkg_path, file.sub_file_path, cache_path, &digest, @errorName(e),
- });
- return;
- },
+ // The cache is definitely stale so delete the contents to avoid an underwrite later.
+ cache_file.setEndPos(0) catch |err| switch (err) {
+ error.FileTooBig => unreachable, // 0 is not too big
+
+ else => |e| return e,
};
mod.lockAndClearFileCompileError(file);
@@ -3753,67 +3756,9 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
file.source_loaded = true;
file.tree = try Ast.parse(gpa, source, .zig);
- defer if (!file.tree_loaded) file.tree.deinit(gpa);
-
- if (file.tree.errors.len != 0) {
- const parse_err = file.tree.errors[0];
-
- var msg = std.ArrayList(u8).init(gpa);
- defer msg.deinit();
-
- const token_starts = file.tree.tokens.items(.start);
- const token_tags = file.tree.tokens.items(.tag);
-
- const extra_offset = file.tree.errorOffset(parse_err);
- try file.tree.renderError(parse_err, msg.writer());
- const err_msg = try gpa.create(ErrorMsg);
- err_msg.* = .{
- .src_loc = .{
- .file_scope = file,
- .parent_decl_node = 0,
- .lazy = if (extra_offset == 0) .{
- .token_abs = parse_err.token,
- } else .{
- .byte_abs = token_starts[parse_err.token] + extra_offset,
- },
- },
- .msg = try msg.toOwnedSlice(),
- };
- if (token_tags[parse_err.token + @boolToInt(parse_err.token_is_prev)] == .invalid) {
- const bad_off = @intCast(u32, file.tree.tokenSlice(parse_err.token + @boolToInt(parse_err.token_is_prev)).len);
- const byte_abs = token_starts[parse_err.token + @boolToInt(parse_err.token_is_prev)] + bad_off;
- try mod.errNoteNonLazy(.{
- .file_scope = file,
- .parent_decl_node = 0,
- .lazy = .{ .byte_abs = byte_abs },
- }, err_msg, "invalid byte: '{'}'", .{std.zig.fmtEscapes(source[byte_abs..][0..1])});
- }
-
- for (file.tree.errors[1..]) |note| {
- if (!note.is_note) break;
-
- try file.tree.renderError(note, msg.writer());
- err_msg.notes = try mod.gpa.realloc(err_msg.notes, err_msg.notes.len + 1);
- err_msg.notes[err_msg.notes.len - 1] = .{
- .src_loc = .{
- .file_scope = file,
- .parent_decl_node = 0,
- .lazy = .{ .token_abs = note.token },
- },
- .msg = try msg.toOwnedSlice(),
- };
- }
-
- {
- comp.mutex.lock();
- defer comp.mutex.unlock();
- try mod.failed_files.putNoClobber(gpa, file, err_msg);
- }
- file.status = .parse_failure;
- return error.AnalysisFail;
- }
file.tree_loaded = true;
+ // Any potential AST errors are converted to ZIR errors here.
file.zir = try AstGen.generate(gpa, file.tree);
file.zir_loaded = true;
file.status = .success_zir;
@@ -3870,7 +3815,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
.iov_len = file.zir.extra.len * 4,
},
};
- cache_file.?.writevAll(&iovecs) catch |err| {
+ cache_file.writevAll(&iovecs) catch |err| {
const pkg_path = file.pkg.root_src_directory.path orelse ".";
const cache_path = cache_directory.path orelse ".";
log.warn("unable to write cached ZIR code for {s}/{s} to {s}/{s}: {s}", .{
@@ -3922,6 +3867,9 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void {
const gpa = mod.gpa;
const new_zir = file.zir;
+ // The root decl will be null if the previous ZIR had AST errors.
+ const root_decl = file.root_decl.unwrap() orelse return;
+
// Maps from old ZIR to new ZIR, struct_decl, enum_decl, etc. Any instruction which
// creates a namespace, gets mapped from old to new here.
var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{};
@@ -3939,7 +3887,6 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void {
var decl_stack: ArrayListUnmanaged(Decl.Index) = .{};
defer decl_stack.deinit(gpa);
- const root_decl = file.root_decl.unwrap().?;
try decl_stack.append(gpa, root_decl);
file.deleted_decls.clearRetainingCapacity();
diff --git a/src/Package.zig b/src/Package.zig
index 2aa5e85294..d26daf5a0c 100644
--- a/src/Package.zig
+++ b/src/Package.zig
@@ -8,11 +8,11 @@ const Allocator = mem.Allocator;
const assert = std.debug.assert;
const log = std.log.scoped(.package);
const main = @import("main.zig");
+const ThreadPool = std.Thread.Pool;
+const WaitGroup = std.Thread.WaitGroup;
const Compilation = @import("Compilation.zig");
const Module = @import("Module.zig");
-const ThreadPool = @import("ThreadPool.zig");
-const WaitGroup = @import("WaitGroup.zig");
const Cache = std.Build.Cache;
const build_options = @import("build_options");
const Manifest = @import("Manifest.zig");
@@ -215,6 +215,7 @@ pub const build_zig_basename = "build.zig";
pub fn fetchAndAddDependencies(
pkg: *Package,
+ root_pkg: *Package,
arena: Allocator,
thread_pool: *ThreadPool,
http_client: *std.http.Client,
@@ -224,7 +225,7 @@ pub fn fetchAndAddDependencies(
dependencies_source: *std.ArrayList(u8),
build_roots_source: *std.ArrayList(u8),
name_prefix: []const u8,
- color: main.Color,
+ error_bundle: *std.zig.ErrorBundle.Wip,
all_modules: *AllModules,
) !void {
const max_bytes = 10 * 1024 * 1024;
@@ -249,7 +250,7 @@ pub fn fetchAndAddDependencies(
if (ast.errors.len > 0) {
const file_path = try directory.join(arena, &.{Manifest.basename});
- try main.printErrsMsgToStdErr(gpa, arena, ast, file_path, color);
+ try main.putAstErrorsIntoBundle(gpa, ast, file_path, error_bundle);
return error.PackageFetchFailed;
}
@@ -257,14 +258,9 @@ pub fn fetchAndAddDependencies(
defer manifest.deinit(gpa);
if (manifest.errors.len > 0) {
- const ttyconf: std.debug.TTY.Config = switch (color) {
- .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
- .on => .escape_codes,
- .off => .no_color,
- };
const file_path = try directory.join(arena, &.{Manifest.basename});
for (manifest.errors) |msg| {
- Report.renderErrorMessage(ast, file_path, ttyconf, msg, &.{});
+ try Report.addErrorMessage(ast, file_path, error_bundle, 0, msg);
}
return error.PackageFetchFailed;
}
@@ -272,8 +268,7 @@ pub fn fetchAndAddDependencies(
const report: Report = .{
.ast = &ast,
.directory = directory,
- .color = color,
- .arena = arena,
+ .error_bundle = error_bundle,
};
var any_error = false;
@@ -295,7 +290,8 @@ pub fn fetchAndAddDependencies(
all_modules,
);
- try pkg.fetchAndAddDependencies(
+ try sub_pkg.fetchAndAddDependencies(
+ root_pkg,
arena,
thread_pool,
http_client,
@@ -305,11 +301,12 @@ pub fn fetchAndAddDependencies(
dependencies_source,
build_roots_source,
sub_prefix,
- color,
+ error_bundle,
all_modules,
);
- try add(pkg, gpa, fqn, sub_pkg);
+ try pkg.add(gpa, name, sub_pkg);
+ try root_pkg.add(gpa, fqn, sub_pkg);
try dependencies_source.writer().print(" pub const {s} = @import(\"{}\");\n", .{
std.zig.fmtId(fqn), std.zig.fmtEscapes(fqn),
@@ -347,8 +344,7 @@ pub fn createFilePkg(
const Report = struct {
ast: *const std.zig.Ast,
directory: Compilation.Directory,
- color: main.Color,
- arena: Allocator,
+ error_bundle: *std.zig.ErrorBundle.Wip,
fn fail(
report: Report,
@@ -356,52 +352,46 @@ const Report = struct {
comptime fmt_string: []const u8,
fmt_args: anytype,
) error{ PackageFetchFailed, OutOfMemory } {
- return failWithNotes(report, &.{}, tok, fmt_string, fmt_args);
- }
+ const gpa = report.error_bundle.gpa;
- fn failWithNotes(
- report: Report,
- notes: []const Compilation.AllErrors.Message,
- tok: std.zig.Ast.TokenIndex,
- comptime fmt_string: []const u8,
- fmt_args: anytype,
- ) error{ PackageFetchFailed, OutOfMemory } {
- const ttyconf: std.debug.TTY.Config = switch (report.color) {
- .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
- .on => .escape_codes,
- .off => .no_color,
- };
- const file_path = try report.directory.join(report.arena, &.{Manifest.basename});
- renderErrorMessage(report.ast.*, file_path, ttyconf, .{
+ const file_path = try report.directory.join(gpa, &.{Manifest.basename});
+ defer gpa.free(file_path);
+
+ const msg = try std.fmt.allocPrint(gpa, fmt_string, fmt_args);
+ defer gpa.free(msg);
+
+ try addErrorMessage(report.ast.*, file_path, report.error_bundle, 0, .{
.tok = tok,
.off = 0,
- .msg = try std.fmt.allocPrint(report.arena, fmt_string, fmt_args),
- }, notes);
+ .msg = msg,
+ });
+
return error.PackageFetchFailed;
}
- fn renderErrorMessage(
+ fn addErrorMessage(
ast: std.zig.Ast,
file_path: []const u8,
- ttyconf: std.debug.TTY.Config,
+ eb: *std.zig.ErrorBundle.Wip,
+ notes_len: u32,
msg: Manifest.ErrorMessage,
- notes: []const Compilation.AllErrors.Message,
- ) void {
+ ) error{OutOfMemory}!void {
const token_starts = ast.tokens.items(.start);
const start_loc = ast.tokenLocation(0, msg.tok);
- Compilation.AllErrors.Message.renderToStdErr(.{ .src = .{
- .msg = msg.msg,
- .src_path = file_path,
- .line = @intCast(u32, start_loc.line),
- .column = @intCast(u32, start_loc.column),
- .span = .{
- .start = token_starts[msg.tok],
- .end = @intCast(u32, token_starts[msg.tok] + ast.tokenSlice(msg.tok).len),
- .main = token_starts[msg.tok] + msg.off,
- },
- .source_line = ast.source[start_loc.line_start..start_loc.line_end],
- .notes = notes,
- } }, ttyconf);
+
+ try eb.addRootErrorMessage(.{
+ .msg = try eb.addString(msg.msg),
+ .src_loc = try eb.addSourceLocation(.{
+ .src_path = try eb.addString(file_path),
+ .span_start = token_starts[msg.tok],
+ .span_end = @intCast(u32, token_starts[msg.tok] + ast.tokenSlice(msg.tok).len),
+ .span_main = token_starts[msg.tok] + msg.off,
+ .line = @intCast(u32, start_loc.line),
+ .column = @intCast(u32, start_loc.column),
+ .source_line = try eb.addString(ast.source[start_loc.line_start..start_loc.line_end]),
+ }),
+ .notes_len = notes_len,
+ });
}
};
@@ -432,6 +422,12 @@ fn fetchAndUnpack(
const build_root = try global_cache_directory.join(gpa, &.{pkg_dir_sub_path});
errdefer gpa.free(build_root);
+ var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) {
+ error.FileNotFound => break :cached,
+ else => |e| return e,
+ };
+ errdefer pkg_dir.close();
+
try build_roots_source.writer().print(" pub const {s} = \"{}\";\n", .{
std.zig.fmtId(fqn), std.zig.fmtEscapes(build_root),
});
@@ -444,12 +440,6 @@ fn fetchAndUnpack(
return gop.value_ptr.*;
}
- var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) {
- error.FileNotFound => break :cached,
- else => |e| return e,
- };
- errdefer pkg_dir.close();
-
const ptr = try gpa.create(Package);
errdefer gpa.destroy(ptr);
@@ -501,9 +491,7 @@ fn fetchAndUnpack(
// by default, so the same logic applies for buffering the reader as for gzip.
try unpackTarball(gpa, &req, tmp_directory.handle, std.compress.xz);
} else {
- return report.fail(dep.url_tok, "unknown file extension for path '{s}'", .{
- uri.path,
- });
+ return report.fail(dep.url_tok, "unknown file extension for path '{s}'", .{uri.path});
}
// TODO: delete files not included in the package prior to computing the package hash.
@@ -530,10 +518,21 @@ fn fetchAndUnpack(
});
}
} else {
- const notes: [1]Compilation.AllErrors.Message = .{.{ .plain = .{
- .msg = try std.fmt.allocPrint(report.arena, "expected .hash = \"{s}\",", .{&actual_hex}),
- } }};
- return report.failWithNotes(¬es, dep.url_tok, "url field is missing corresponding hash field", .{});
+ const file_path = try report.directory.join(gpa, &.{Manifest.basename});
+ defer gpa.free(file_path);
+
+ const eb = report.error_bundle;
+ const notes_len = 1;
+ try Report.addErrorMessage(report.ast.*, file_path, eb, notes_len, .{
+ .tok = dep.url_tok,
+ .off = 0,
+ .msg = "url field is missing corresponding hash field",
+ });
+ const notes_start = try eb.reserveNotes(notes_len);
+ eb.extra.items[notes_start] = @enumToInt(try eb.addErrorMessage(.{
+ .msg = try eb.printString("expected .hash = \"{s}\",", .{&actual_hex}),
+ }));
+ return error.PackageFetchFailed;
}
const build_root = try global_cache_directory.join(gpa, &.{pkg_dir_sub_path});
diff --git a/src/Sema.zig b/src/Sema.zig
index f9a6f39867..8b476d4542 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -574,11 +574,13 @@ pub const Block = struct {
});
}
- fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator, vector_ty: Air.Inst.Ref) !Air.Inst.Ref {
+ fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator) !Air.Inst.Ref {
return block.addInst(.{
.tag = if (block.float_mode == .Optimized) .cmp_vector_optimized else .cmp_vector,
.data = .{ .ty_pl = .{
- .ty = vector_ty,
+ .ty = try block.sema.addType(
+ try Type.vector(block.sema.arena, block.sema.typeOf(lhs).vectorLen(), Type.bool),
+ ),
.payload = try block.sema.addExtra(Air.VectorCmp{
.lhs = lhs,
.rhs = rhs,
@@ -1101,6 +1103,7 @@ fn analyzeBodyInner(
.@"unreachable" => break sema.zirUnreachable(block, inst),
.panic => break sema.zirPanic(block, inst, false),
.panic_comptime => break sema.zirPanic(block, inst, true),
+ .trap => break sema.zirTrap(block, inst),
// zig fmt: on
.extended => ext: {
@@ -1167,6 +1170,11 @@ fn analyzeBodyInner(
i += 1;
continue;
},
+ .set_cold => {
+ try sema.zirSetCold(block, extended);
+ i += 1;
+ continue;
+ },
.breakpoint => {
if (!block.is_comptime) {
_ = try block.addNoOp(.breakpoint);
@@ -1304,11 +1312,6 @@ fn analyzeBodyInner(
i += 1;
continue;
},
- .set_cold => {
- try sema.zirSetCold(block, inst);
- i += 1;
- continue;
- },
.set_runtime_safety => {
try sema.zirSetRuntimeSafety(block, inst);
i += 1;
@@ -1609,6 +1612,12 @@ fn analyzeBodyInner(
const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index);
const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len];
const err_union = try sema.resolveInst(extra.data.operand);
+ const err_union_ty = sema.typeOf(err_union);
+ if (err_union_ty.zigTypeTag() != .ErrorUnion) {
+ return sema.fail(block, operand_src, "expected error union type, found '{}'", .{
+ err_union_ty.fmt(sema.mod),
+ });
+ }
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union);
assert(is_non_err != .none);
const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| {
@@ -1616,7 +1625,6 @@ fn analyzeBodyInner(
return err;
};
if (is_non_err_tv.val.toBool()) {
- const err_union_ty = sema.typeOf(err_union);
break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false);
}
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
@@ -2203,29 +2211,27 @@ pub fn fail(
fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError {
@setCold(true);
+ const gpa = sema.gpa;
if (crash_report.is_enabled and sema.mod.comp.debug_compile_errors) {
if (err_msg.src_loc.lazy == .unneeded) return error.NeededSourceLocation;
- var arena = std.heap.ArenaAllocator.init(sema.gpa);
- errdefer arena.deinit();
- var errors = std.ArrayList(Compilation.AllErrors.Message).init(sema.gpa);
- defer errors.deinit();
-
- Compilation.AllErrors.add(sema.mod, &arena, &errors, err_msg.*) catch unreachable;
-
+ var wip_errors: std.zig.ErrorBundle.Wip = undefined;
+ wip_errors.init(gpa) catch unreachable;
+ Compilation.addModuleErrorMsg(&wip_errors, err_msg.*) catch unreachable;
std.debug.print("compile error during Sema:\n", .{});
- Compilation.AllErrors.Message.renderToStdErr(errors.items[0], .no_color);
+ var error_bundle = wip_errors.toOwnedBundle("") catch unreachable;
+ error_bundle.renderToStdErr(.{ .ttyconf = .no_color });
crash_report.compilerPanic("unexpected compile error occurred", null, null);
}
const mod = sema.mod;
ref: {
- errdefer err_msg.destroy(mod.gpa);
+ errdefer err_msg.destroy(gpa);
if (err_msg.src_loc.lazy == .unneeded) {
return error.NeededSourceLocation;
}
- try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1);
- try mod.failed_files.ensureUnusedCapacity(mod.gpa, 1);
+ try mod.failed_decls.ensureUnusedCapacity(gpa, 1);
+ try mod.failed_files.ensureUnusedCapacity(gpa, 1);
const max_references = blk: {
if (sema.mod.comp.reference_trace) |num| break :blk num;
@@ -2235,11 +2241,11 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError {
};
var referenced_by = if (sema.func) |some| some.owner_decl else sema.owner_decl_index;
- var reference_stack = std.ArrayList(Module.ErrorMsg.Trace).init(sema.gpa);
+ var reference_stack = std.ArrayList(Module.ErrorMsg.Trace).init(gpa);
defer reference_stack.deinit();
// Avoid infinite loops.
- var seen = std.AutoHashMap(Module.Decl.Index, void).init(sema.gpa);
+ var seen = std.AutoHashMap(Module.Decl.Index, void).init(gpa);
defer seen.deinit();
var cur_reference_trace: u32 = 0;
@@ -2280,7 +2286,7 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError {
if (gop.found_existing) {
// If there are multiple errors for the same Decl, prefer the first one added.
sema.err = null;
- err_msg.destroy(mod.gpa);
+ err_msg.destroy(gpa);
} else {
sema.err = err_msg;
gop.value_ptr.* = err_msg;
@@ -5144,6 +5150,14 @@ fn zirPanic(sema: *Sema, block: *Block, inst: Zir.Inst.Index, force_comptime: bo
return always_noreturn;
}
+fn zirTrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
+ const src_node = sema.code.instructions.items(.data)[inst].node;
+ const src = LazySrcLoc.nodeOffset(src_node);
+ sema.src = src;
+ _ = try block.addNoOp(.trap);
+ return always_noreturn;
+}
+
fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -5721,10 +5735,10 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
gop.value_ptr.* = .{ .alignment = alignment, .src = src };
}
-fn zirSetCold(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
- const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
- const is_cold = try sema.resolveConstBool(block, operand_src, inst_data.operand, "operand to @setCold must be comptime-known");
+fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
+ const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
+ const is_cold = try sema.resolveConstBool(block, operand_src, extra.operand, "operand to @setCold must be comptime-known");
const func = sema.func orelse return; // does nothing outside a function
func.is_cold = is_cold;
}
@@ -8766,7 +8780,7 @@ fn funcCommon(
};
return sema.failWithOwnedErrorMsg(msg);
}
- if (!Type.fnCallingConventionAllowsZigTypes(cc_resolved) and !try sema.validateExternType(return_type, .ret_ty)) {
+ if (!ret_poison and !Type.fnCallingConventionAllowsZigTypes(cc_resolved) and !try sema.validateExternType(return_type, .ret_ty)) {
const msg = msg: {
const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{
return_type.fmt(sema.mod), @tagName(cc_resolved),
@@ -9403,7 +9417,7 @@ fn intCast(
const ok = if (is_vector) ok: {
const zeros = try Value.Tag.repeated.create(sema.arena, Value.zero);
const zero_inst = try sema.addConstant(sema.typeOf(operand), zeros);
- const is_in_range = try block.addCmpVector(operand, zero_inst, .eq, try sema.addType(operand_ty));
+ const is_in_range = try block.addCmpVector(operand, zero_inst, .eq);
const all_in_range = try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{ .operand = is_in_range, .operation = .And } },
@@ -9457,7 +9471,7 @@ fn intCast(
const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val);
const ok = if (is_vector) ok: {
- const is_in_range = try block.addCmpVector(diff_unsigned, dest_range, .lte, try sema.addType(operand_ty));
+ const is_in_range = try block.addCmpVector(diff_unsigned, dest_range, .lte);
const all_in_range = try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@@ -9474,7 +9488,7 @@ fn intCast(
try sema.addSafetyCheck(block, ok, .cast_truncated_data);
} else {
const ok = if (is_vector) ok: {
- const is_in_range = try block.addCmpVector(diff, dest_max, .lte, try sema.addType(operand_ty));
+ const is_in_range = try block.addCmpVector(diff, dest_max, .lte);
const all_in_range = try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@@ -9495,7 +9509,7 @@ fn intCast(
const ok = if (is_vector) ok: {
const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero);
const zero_inst = try sema.addConstant(operand_ty, zero_val);
- const is_in_range = try block.addCmpVector(operand, zero_inst, .gte, try sema.addType(operand_ty));
+ const is_in_range = try block.addCmpVector(operand, zero_inst, .gte);
const all_in_range = try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@@ -12007,7 +12021,7 @@ fn zirShl(
const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val));
- const lt = try block.addCmpVector(rhs, bit_count_inst, .lt, try sema.addType(rhs_ty));
+ const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
break :ok try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
@@ -12163,7 +12177,7 @@ fn zirShr(
const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val));
- const lt = try block.addCmpVector(rhs, bit_count_inst, .lt, try sema.addType(rhs_ty));
+ const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
break :ok try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
@@ -12182,7 +12196,7 @@ fn zirShr(
const back = try block.addBinOp(.shl, result, rhs);
const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
- const eql = try block.addCmpVector(lhs, back, .eq, try sema.addType(rhs_ty));
+ const eql = try block.addCmpVector(lhs, back, .eq);
break :ok try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@@ -13183,7 +13197,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const floored = try block.addUnOp(.floor, result);
if (resolved_type.zigTypeTag() == .Vector) {
- const eql = try block.addCmpVector(result, floored, .eq, try sema.addType(resolved_type));
+ const eql = try block.addCmpVector(result, floored, .eq);
break :ok try block.addInst(.{
.tag = switch (block.float_mode) {
.Strict => .reduce,
@@ -13207,7 +13221,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (resolved_type.zigTypeTag() == .Vector) {
const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero);
const zero = try sema.addConstant(resolved_type, zero_val);
- const eql = try block.addCmpVector(remainder, zero, .eq, try sema.addType(resolved_type));
+ const eql = try block.addCmpVector(remainder, zero, .eq);
break :ok try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
@@ -13505,14 +13519,13 @@ fn addDivIntOverflowSafety(
var ok: Air.Inst.Ref = .none;
if (resolved_type.zigTypeTag() == .Vector) {
- const vector_ty_ref = try sema.addType(resolved_type);
if (maybe_lhs_val == null) {
const min_int_ref = try sema.addConstant(resolved_type, min_int);
- ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq, vector_ty_ref);
+ ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq);
}
if (maybe_rhs_val == null) {
const neg_one_ref = try sema.addConstant(resolved_type, neg_one);
- const rhs_ok = try block.addCmpVector(casted_rhs, neg_one_ref, .neq, vector_ty_ref);
+ const rhs_ok = try block.addCmpVector(casted_rhs, neg_one_ref, .neq);
if (ok == .none) {
ok = rhs_ok;
} else {
@@ -13564,7 +13577,7 @@ fn addDivByZeroSafety(
const ok = if (resolved_type.zigTypeTag() == .Vector) ok: {
const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero);
const zero = try sema.addConstant(resolved_type, zero_val);
- const ok = try block.addCmpVector(casted_rhs, zero, .neq, try sema.addType(resolved_type));
+ const ok = try block.addCmpVector(casted_rhs, zero, .neq);
break :ok try block.addInst(.{
.tag = if (is_int) .reduce else .reduce_optimized,
.data = .{ .reduce = .{
@@ -15193,9 +15206,7 @@ fn cmpSelf(
};
try sema.requireRuntimeBlock(block, src, runtime_src);
if (resolved_type.zigTypeTag() == .Vector) {
- const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.bool);
- const result_ty_ref = try sema.addType(result_ty);
- return block.addCmpVector(casted_lhs, casted_rhs, op, result_ty_ref);
+ return block.addCmpVector(casted_lhs, casted_rhs, op);
}
const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized);
return block.addBinOp(tag, casted_lhs, casted_rhs);
@@ -18360,7 +18371,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
const union_val = val.cast(Value.Payload.Union).?.data;
const target = mod.getTarget();
const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag, mod).?;
- if (union_val.val.anyUndef()) return sema.failWithUseOfUndef(block, src);
+ if (union_val.val.anyUndef(mod)) return sema.failWithUseOfUndef(block, src);
switch (@intToEnum(std.builtin.TypeId, tag_index)) {
.Type => return Air.Inst.Ref.type_type,
.Void => return Air.Inst.Ref.void_type,
@@ -22274,7 +22285,6 @@ fn zirBuiltinExtern(
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
- const src = LazySrcLoc.nodeOffset(extra.node);
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
@@ -22302,39 +22312,41 @@ fn zirBuiltinExtern(
const new_decl = sema.mod.declPtr(new_decl_index);
new_decl.name = try sema.gpa.dupeZ(u8, options.name);
- var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
- errdefer new_decl_arena.deinit();
- const new_decl_arena_allocator = new_decl_arena.allocator();
+ {
+ var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
+ errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.allocator();
- const new_var = try new_decl_arena_allocator.create(Module.Var);
- errdefer new_decl_arena_allocator.destroy(new_var);
+ const new_var = try new_decl_arena_allocator.create(Module.Var);
+ new_var.* = .{
+ .owner_decl = sema.owner_decl_index,
+ .init = Value.initTag(.unreachable_value),
+ .is_extern = true,
+ .is_mutable = false,
+ .is_threadlocal = options.is_thread_local,
+ .is_weak_linkage = options.linkage == .Weak,
+ .lib_name = null,
+ };
- new_var.* = .{
- .owner_decl = sema.owner_decl_index,
- .init = Value.initTag(.unreachable_value),
- .is_extern = true,
- .is_mutable = false,
- .is_threadlocal = options.is_thread_local,
- .is_weak_linkage = options.linkage == .Weak,
- .lib_name = null,
- };
+ new_decl.src_line = sema.owner_decl.src_line;
+ // We only access this decl through the decl_ref with the correct type created
+ // below, so this type doesn't matter
+ new_decl.ty = Type.Tag.init(.anyopaque);
+ new_decl.val = try Value.Tag.variable.create(new_decl_arena_allocator, new_var);
+ new_decl.@"align" = 0;
+ new_decl.@"linksection" = null;
+ new_decl.has_tv = true;
+ new_decl.analysis = .complete;
+ new_decl.generation = sema.mod.generation;
- new_decl.src_line = sema.owner_decl.src_line;
- new_decl.ty = try ty.copy(new_decl_arena_allocator);
- new_decl.val = try Value.Tag.variable.create(new_decl_arena_allocator, new_var);
- new_decl.@"align" = 0;
- new_decl.@"linksection" = null;
- new_decl.has_tv = true;
- new_decl.analysis = .complete;
- new_decl.generation = sema.mod.generation;
+ try new_decl.finalizeNewArena(&new_decl_arena);
+ }
- const arena_state = try new_decl_arena_allocator.create(std.heap.ArenaAllocator.State);
- arena_state.* = new_decl_arena.state;
- new_decl.value_arena = arena_state;
+ try sema.mod.declareDeclDependency(sema.owner_decl_index, new_decl_index);
+ try sema.ensureDeclAnalyzed(new_decl_index);
- const ref = try sema.analyzeDeclRef(new_decl_index);
- try sema.requireRuntimeBlock(block, src, null);
- return block.addBitCast(ty, ref);
+ const ref = try Value.Tag.decl_ref.create(sema.arena, new_decl_index);
+ return sema.addConstant(ty, ref);
}
fn requireRuntimeBlock(sema: *Sema, block: *Block, src: LazySrcLoc, runtime_src: ?LazySrcLoc) !void {
@@ -23026,7 +23038,7 @@ fn panicSentinelMismatch(
const ok = if (sentinel_ty.zigTypeTag() == .Vector) ok: {
const eql =
- try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq, try sema.addType(sentinel_ty));
+ try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq);
break :ok try parent_block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
@@ -23592,10 +23604,13 @@ fn fieldCallBind(
}
// If we get here, we need to look for a decl in the struct type instead.
- switch (concrete_ty.zigTypeTag()) {
- .Struct, .Opaque, .Union, .Enum => {
+ const found_decl = switch (concrete_ty.zigTypeTag()) {
+ .Struct, .Opaque, .Union, .Enum => found_decl: {
if (concrete_ty.getNamespace()) |namespace| {
- if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
+ if (try sema.namespaceLookup(block, src, namespace, field_name)) |decl_idx| {
+ try sema.addReferencedBy(block, src, decl_idx);
+ const inst = try sema.analyzeDeclRef(decl_idx);
+
const decl_val = try sema.analyzeLoad(block, src, inst, src);
const decl_type = sema.typeOf(decl_val);
if (decl_type.zigTypeTag() == .Fn and
@@ -23612,7 +23627,7 @@ fn fieldCallBind(
first_param_type.ptrSize() == .C) and
first_param_type.childType().eql(concrete_ty, sema.mod)))
{
- // zig fmt: on
+ // zig fmt: on
// TODO: bound fn calls on rvalues should probably
// generate a by-value argument somehow.
const ty = Type.Tag.bound_fn.init();
@@ -23651,16 +23666,22 @@ fn fieldCallBind(
return sema.addConstant(ty, value);
}
}
+ break :found_decl decl_idx;
}
}
+ break :found_decl null;
},
- else => {},
- }
+ else => null,
+ };
const msg = msg: {
const msg = try sema.errMsg(block, src, "no field or member function named '{s}' in '{}'", .{ field_name, concrete_ty.fmt(sema.mod) });
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, concrete_ty);
+ if (found_decl) |decl_idx| {
+ const decl = sema.mod.declPtr(decl_idx);
+ try sema.mod.errNoteNonLazy(decl.srcLoc(), msg, "'{s}' is not a member function", .{field_name});
+ }
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
@@ -24832,6 +24853,7 @@ fn coerceExtra(
const array_ty = dest_info.pointee_type;
if (array_ty.zigTypeTag() != .Array) break :single_item;
const array_elem_ty = array_ty.childType();
+ if (array_ty.arrayLen() != 1) break :single_item;
const dest_is_mut = dest_info.mutable;
switch (try sema.coerceInMemoryAllowed(block, array_elem_ty, ptr_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) {
.ok => {},
@@ -26625,6 +26647,23 @@ fn beginComptimePtrMutation(
});
}
const elem_ty = parent.ty.childType();
+
+ // We might have a pointer to multiple elements of the array (e.g. a pointer
+ // to a sub-array). In this case, we just have to reinterpret the relevant
+ // bytes of the whole array rather than any single element.
+ const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty);
+ if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) {
+ const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64);
+ return .{
+ .decl_ref_mut = parent.decl_ref_mut,
+ .pointee = .{ .reinterpret = .{
+ .val_ptr = val_ptr,
+ .byte_offset = elem_abi_size * elem_ptr.index,
+ } },
+ .ty = parent.ty,
+ };
+ }
+
switch (val_ptr.tag()) {
.undef => {
// An array has been initialized to undefined at comptime and now we
@@ -29359,8 +29398,7 @@ fn cmpVector(
};
try sema.requireRuntimeBlock(block, src, runtime_src);
- const result_ty_inst = try sema.addType(result_ty);
- return block.addCmpVector(lhs, rhs, op, result_ty_inst);
+ return block.addCmpVector(lhs, rhs, op);
}
fn wrapOptional(
diff --git a/src/Zir.zig b/src/Zir.zig
index 4dd2386c51..001c4e8101 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -617,7 +617,7 @@ pub const Inst = struct {
/// Uses the `un_node` field.
typeof_log2_int_type,
/// Asserts control-flow will not reach this instruction (`unreachable`).
- /// Uses the `unreachable` union field.
+ /// Uses the `@"unreachable"` union field.
@"unreachable",
/// Bitwise XOR. `^`
/// Uses the `pl_node` union field. Payload is `Bin`.
@@ -808,8 +808,9 @@ pub const Inst = struct {
panic,
/// Same as `panic` but forces comptime.
panic_comptime,
- /// Implement builtin `@setCold`. Uses `un_node`.
- set_cold,
+ /// Implements `@trap`.
+ /// Uses the `node` field.
+ trap,
/// Implement builtin `@setRuntimeSafety`. Uses `un_node`.
set_runtime_safety,
/// Implement builtin `@sqrt`. Uses `un_node`.
@@ -1187,7 +1188,6 @@ pub const Inst = struct {
.bool_to_int,
.embed_file,
.error_name,
- .set_cold,
.set_runtime_safety,
.sqrt,
.sin,
@@ -1277,6 +1277,7 @@ pub const Inst = struct {
.repeat_inline,
.panic,
.panic_comptime,
+ .trap,
.check_comptime_control_flow,
=> true,
};
@@ -1323,7 +1324,6 @@ pub const Inst = struct {
.validate_deref,
.@"export",
.export_value,
- .set_cold,
.set_runtime_safety,
.memcpy,
.memset,
@@ -1553,6 +1553,7 @@ pub const Inst = struct {
.repeat_inline,
.panic,
.panic_comptime,
+ .trap,
.for_len,
.@"try",
.try_ptr,
@@ -1561,7 +1562,7 @@ pub const Inst = struct {
=> false,
.extended => switch (data.extended.opcode) {
- .breakpoint, .fence => true,
+ .fence, .set_cold, .breakpoint => true,
else => false,
},
};
@@ -1750,7 +1751,7 @@ pub const Inst = struct {
.error_name = .un_node,
.panic = .un_node,
.panic_comptime = .un_node,
- .set_cold = .un_node,
+ .trap = .node,
.set_runtime_safety = .un_node,
.sqrt = .un_node,
.sin = .un_node,
@@ -1979,11 +1980,15 @@ pub const Inst = struct {
/// Implement builtin `@setAlignStack`.
/// `operand` is payload index to `UnNode`.
set_align_stack,
+ /// Implements `@setCold`.
+ /// `operand` is payload index to `UnNode`.
+ set_cold,
/// Implements the `@errSetCast` builtin.
/// `operand` is payload index to `BinNode`. `lhs` is dest type, `rhs` is operand.
err_set_cast,
/// `operand` is payload index to `UnNode`.
await_nosuspend,
+ /// Implements `@breakpoint`.
/// `operand` is `src_node: i32`.
breakpoint,
/// Implements the `@select` builtin.
@@ -1997,7 +2002,7 @@ pub const Inst = struct {
int_to_error,
/// Implement builtin `@Type`.
/// `operand` is payload index to `UnNode`.
- /// `small` contains `NameStrategy
+ /// `small` contains `NameStrategy`.
reify,
/// Implements the `@asyncCall` builtin.
/// `operand` is payload index to `AsyncCall`.
@@ -2040,8 +2045,7 @@ pub const Inst = struct {
/// A reference to a TypedValue or ZIR instruction.
///
- /// If the Ref has a tag in this enum, it refers to a TypedValue which may be
- /// retrieved with Ref.toTypedValue().
+ /// If the Ref has a tag in this enum, it refers to a TypedValue.
///
/// If the value of a Ref does not have a tag, it refers to a ZIR instruction.
///
@@ -3590,6 +3594,12 @@ pub const Inst = struct {
/// 0 or a payload index of a `Block`, each is a payload
/// index of another `Item`.
notes: u32,
+
+ pub fn notesLen(item: Item, zir: Zir) u32 {
+ if (item.notes == 0) return 0;
+ const block = zir.extraData(Block, item.notes);
+ return block.data.body_len;
+ }
};
};
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 818b04f890..e20cf900af 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -23,7 +23,7 @@ const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
-const GenerateSymbolError = codegen.GenerateSymbolError;
+const CodeGenError = codegen.CodeGenError;
const Result = codegen.Result;
const DebugInfoOutput = codegen.DebugInfoOutput;
@@ -41,11 +41,7 @@ const c_abi_int_param_regs = abi.c_abi_int_param_regs;
const c_abi_int_return_regs = abi.c_abi_int_return_regs;
const gp = abi.RegisterClass.gp;
-const InnerError = error{
- OutOfMemory,
- CodegenFail,
- OutOfRegisters,
-};
+const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
@@ -337,7 +333,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -737,6 +733,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.bitcast => try self.airBitCast(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
+ .trap => try self.airTrap(),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
@@ -4198,10 +4195,18 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ .none, .none, .none });
}
+fn airTrap(self: *Self) !void {
+ _ = try self.addInst(.{
+ .tag = .brk,
+ .data = .{ .imm16 = 0x0001 },
+ });
+ return self.finishAirBookkeeping();
+}
+
fn airBreakpoint(self: *Self) !void {
_ = try self.addInst(.{
.tag = .brk,
- .data = .{ .imm16 = 1 },
+ .data = .{ .imm16 = 0xf000 },
});
return self.finishAirBookkeeping();
}
@@ -6137,201 +6142,26 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
-fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
-
- // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
- if (tv.ty.zigTypeTag() == .Pointer) blk: {
- if (tv.ty.castPtrToFn()) |_| break :blk;
- if (!tv.ty.elemType2().hasRuntimeBits()) {
- return MCValue.none;
- }
- }
-
- const mod = self.bin_file.options.module.?;
- const decl = mod.declPtr(decl_index);
- mod.markDeclAlive(decl);
-
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
- const atom = elf_file.getAtom(atom_index);
- return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
- } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- const atom = try macho_file.getOrCreateAtomForDecl(decl_index);
- const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
- return MCValue{ .linker_load = .{
- .type = .got,
- .sym_index = sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
- const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
- return MCValue{ .linker_load = .{
- .type = .got,
- .sym_index = sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- const decl_block_index = try p9.seeDecl(decl_index);
- const decl_block = p9.getDeclBlock(decl_block_index);
- const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else {
- return self.fail("TODO codegen non-ELF const Decl pointer", .{});
- }
-}
-
-fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
- log.debug("lowerUnnamedConst: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
- const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| {
- return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
- };
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- return MCValue{ .linker_load = .{
- .type = .direct,
- .sym_index = local_sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- return MCValue{ .linker_load = .{
- .type = .direct,
- .sym_index = local_sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Plan9)) |_| {
- return self.fail("TODO lower unnamed const in Plan9", .{});
- } else {
- return self.fail("TODO lower unnamed const", .{});
- }
-}
-
fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
- var typed_value = arg_tv;
- if (typed_value.val.castTag(.runtime_value)) |rt| {
- typed_value.val = rt.data;
- }
- log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() });
- if (typed_value.val.isUndef())
- return MCValue{ .undef = {} };
-
- if (typed_value.val.castTag(.decl_ref)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data);
- }
- if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data.decl_index);
- }
- const target = self.target.*;
-
- switch (typed_value.ty.zigTypeTag()) {
- .Pointer => switch (typed_value.ty.ptrSize()) {
- .Slice => {},
- else => {
- switch (typed_value.val.tag()) {
- .int_u64 => {
- return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
- },
- else => {},
- }
- },
+ const mcv: MCValue = switch (try codegen.genTypedValue(
+ self.bin_file,
+ self.src_loc,
+ arg_tv,
+ self.mod_fn.owner_decl,
+ )) {
+ .mcv => |mcv| switch (mcv) {
+ .none => .none,
+ .undef => .undef,
+ .linker_load => |ll| .{ .linker_load = ll },
+ .immediate => |imm| .{ .immediate = imm },
+ .memory => |addr| .{ .memory = addr },
},
- .Int => {
- const info = typed_value.ty.intInfo(self.target.*);
- if (info.bits <= 64) {
- const unsigned = switch (info.signedness) {
- .signed => blk: {
- const signed = typed_value.val.toSignedInt(target);
- break :blk @bitCast(u64, signed);
- },
- .unsigned => typed_value.val.toUnsignedInt(target),
- };
-
- return MCValue{ .immediate = unsigned };
- }
+ .fail => |msg| {
+ self.err_msg = msg;
+ return error.CodegenFail;
},
- .Bool => {
- return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
- },
- .Optional => {
- if (typed_value.ty.isPtrLikeOptional()) {
- if (typed_value.val.isNull())
- return MCValue{ .immediate = 0 };
-
- var buf: Type.Payload.ElemType = undefined;
- return self.genTypedValue(.{
- .ty = typed_value.ty.optionalChild(&buf),
- .val = typed_value.val,
- });
- } else if (typed_value.ty.abiSize(self.target.*) == 1) {
- return MCValue{ .immediate = @boolToInt(typed_value.val.isNull()) };
- }
- },
- .Enum => {
- if (typed_value.val.castTag(.enum_field_index)) |field_index| {
- switch (typed_value.ty.tag()) {
- .enum_simple => {
- return MCValue{ .immediate = field_index.data };
- },
- .enum_full, .enum_nonexhaustive => {
- const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val });
- } else {
- return MCValue{ .immediate = field_index.data };
- }
- },
- else => unreachable,
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
- return self.genTypedValue(.{ .ty = int_tag_ty, .val = typed_value.val });
- }
- },
- .ErrorSet => {
- switch (typed_value.val.tag()) {
- .@"error" => {
- const err_name = typed_value.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return MCValue{ .immediate = 0 };
- },
- }
- },
- .ErrorUnion => {
- const error_type = typed_value.ty.errorUnionSet();
- const payload_type = typed_value.ty.errorUnionPayload();
-
- const is_pl = typed_value.val.errorUnionIsPayload();
-
- if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
- // We use the error type directly as the type.
- const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero);
- return self.genTypedValue(.{ .ty = error_type, .val = err_val });
- }
-
- return self.lowerUnnamedConst(typed_value);
- },
-
- .ComptimeInt => unreachable, // semantic analysis prevents this
- .ComptimeFloat => unreachable, // semantic analysis prevents this
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .Void => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
-
- else => {},
- }
-
- return self.lowerUnnamedConst(typed_value);
+ };
+ return mcv;
}
const CallMCValues = struct {
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index ceabe70438..8de5ae006a 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -24,7 +24,7 @@ const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
const Result = codegen.Result;
-const GenerateSymbolError = codegen.GenerateSymbolError;
+const CodeGenError = codegen.CodeGenError;
const DebugInfoOutput = codegen.DebugInfoOutput;
const bits = @import("bits.zig");
@@ -42,11 +42,7 @@ const c_abi_int_param_regs = abi.c_abi_int_param_regs;
const c_abi_int_return_regs = abi.c_abi_int_return_regs;
const gp = abi.RegisterClass.gp;
-const InnerError = error{
- OutOfMemory,
- CodegenFail,
- OutOfRegisters,
-};
+const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
@@ -343,7 +339,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -721,6 +717,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.bitcast => try self.airBitCast(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
+ .trap => try self.airTrap(),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
@@ -4146,6 +4143,14 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ .none, .none, .none });
}
+fn airTrap(self: *Self) !void {
+ _ = try self.addInst(.{
+ .tag = .undefined_instruction,
+ .data = .{ .nop = {} },
+ });
+ return self.finishAirBookkeeping();
+}
+
fn airBreakpoint(self: *Self) !void {
_ = try self.addInst(.{
.tag = .bkpt,
@@ -6087,178 +6092,26 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
-fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
-
- const mod = self.bin_file.options.module.?;
- const decl = mod.declPtr(decl_index);
- mod.markDeclAlive(decl);
-
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
- const atom = elf_file.getAtom(atom_index);
- return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- unreachable; // unsupported architecture for MachO
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- return self.fail("TODO codegen COFF const Decl pointer", .{});
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- const decl_block_index = try p9.seeDecl(decl_index);
- const decl_block = p9.getDeclBlock(decl_block_index);
- const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else {
- return self.fail("TODO codegen non-ELF const Decl pointer", .{});
- }
-
- _ = tv;
-}
-
-fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
- const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| {
- return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
- };
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- unreachable;
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- return self.fail("TODO lower unnamed const in COFF", .{});
- } else if (self.bin_file.cast(link.File.Plan9)) |_| {
- return self.fail("TODO lower unnamed const in Plan9", .{});
- } else {
- return self.fail("TODO lower unnamed const", .{});
- }
-}
-
fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
- var typed_value = arg_tv;
- if (typed_value.val.castTag(.runtime_value)) |rt| {
- typed_value.val = rt.data;
- }
- log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() });
- if (typed_value.val.isUndef())
- return MCValue{ .undef = {} };
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
-
- if (typed_value.val.castTag(.decl_ref)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data);
- }
- if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data.decl_index);
- }
- const target = self.target.*;
-
- switch (typed_value.ty.zigTypeTag()) {
- .Pointer => switch (typed_value.ty.ptrSize()) {
- .Slice => {},
- else => {
- switch (typed_value.val.tag()) {
- .int_u64 => {
- return MCValue{ .immediate = @intCast(u32, typed_value.val.toUnsignedInt(target)) };
- },
- else => {},
- }
- },
+ const mcv: MCValue = switch (try codegen.genTypedValue(
+ self.bin_file,
+ self.src_loc,
+ arg_tv,
+ self.mod_fn.owner_decl,
+ )) {
+ .mcv => |mcv| switch (mcv) {
+ .none => .none,
+ .undef => .undef,
+ .linker_load => unreachable, // TODO
+ .immediate => |imm| .{ .immediate = @truncate(u32, imm) },
+ .memory => |addr| .{ .memory = addr },
},
- .Int => {
- const info = typed_value.ty.intInfo(self.target.*);
- if (info.bits <= ptr_bits) {
- const unsigned = switch (info.signedness) {
- .signed => blk: {
- const signed = @intCast(i32, typed_value.val.toSignedInt(target));
- break :blk @bitCast(u32, signed);
- },
- .unsigned => @intCast(u32, typed_value.val.toUnsignedInt(target)),
- };
-
- return MCValue{ .immediate = unsigned };
- } else {
- return self.lowerUnnamedConst(typed_value);
- }
+ .fail => |msg| {
+ self.err_msg = msg;
+ return error.CodegenFail;
},
- .Bool => {
- return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
- },
- .Optional => {
- if (typed_value.ty.isPtrLikeOptional()) {
- if (typed_value.val.isNull())
- return MCValue{ .immediate = 0 };
-
- var buf: Type.Payload.ElemType = undefined;
- return self.genTypedValue(.{
- .ty = typed_value.ty.optionalChild(&buf),
- .val = typed_value.val,
- });
- } else if (typed_value.ty.abiSize(self.target.*) == 1) {
- return MCValue{ .immediate = @boolToInt(typed_value.val.isNull()) };
- }
- },
- .Enum => {
- if (typed_value.val.castTag(.enum_field_index)) |field_index| {
- switch (typed_value.ty.tag()) {
- .enum_simple => {
- return MCValue{ .immediate = field_index.data };
- },
- .enum_full, .enum_nonexhaustive => {
- const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val });
- } else {
- return MCValue{ .immediate = field_index.data };
- }
- },
- else => unreachable,
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
- return self.genTypedValue(.{ .ty = int_tag_ty, .val = typed_value.val });
- }
- },
- .ErrorSet => {
- switch (typed_value.val.tag()) {
- .@"error" => {
- const err_name = typed_value.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return MCValue{ .immediate = 0 };
- },
- }
- },
- .ErrorUnion => {
- const error_type = typed_value.ty.errorUnionSet();
- const payload_type = typed_value.ty.errorUnionPayload();
- const is_pl = typed_value.val.errorUnionIsPayload();
-
- if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
- // We use the error type directly as the type.
- const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero);
- return self.genTypedValue(.{ .ty = error_type, .val = err_val });
- }
- },
-
- .ComptimeInt => unreachable, // semantic analysis prevents this
- .ComptimeFloat => unreachable, // semantic analysis prevents this
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .Void => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
-
- else => {},
- }
-
- return self.lowerUnnamedConst(typed_value);
+ };
+ return mcv;
}
const CallMCValues = struct {
diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig
index 17540f0968..17415318de 100644
--- a/src/arch/arm/Emit.zig
+++ b/src/arch/arm/Emit.zig
@@ -1,4 +1,4 @@
-//! This file contains the functionality for lowering AArch64 MIR into
+//! This file contains the functionality for lowering AArch32 MIR into
//! machine code
const Emit = @This();
@@ -15,7 +15,7 @@ const Target = std.Target;
const assert = std.debug.assert;
const Instruction = bits.Instruction;
const Register = bits.Register;
-const log = std.log.scoped(.aarch64_emit);
+const log = std.log.scoped(.aarch32_emit);
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const CodeGen = @import("CodeGen.zig");
@@ -100,6 +100,7 @@ pub fn emitMir(
.b => try emit.mirBranch(inst),
+ .undefined_instruction => try emit.mirUndefinedInstruction(),
.bkpt => try emit.mirExceptionGeneration(inst),
.blx => try emit.mirBranchExchange(inst),
@@ -494,6 +495,10 @@ fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
}
}
+fn mirUndefinedInstruction(emit: *Emit) !void {
+ try emit.writeInstruction(Instruction.undefinedInstruction());
+}
+
fn mirExceptionGeneration(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const imm16 = emit.mir.instructions.items(.data)[inst].imm16;
diff --git a/src/arch/arm/Mir.zig b/src/arch/arm/Mir.zig
index 07a8384c2c..736d0574bb 100644
--- a/src/arch/arm/Mir.zig
+++ b/src/arch/arm/Mir.zig
@@ -35,6 +35,8 @@ pub const Inst = struct {
asr,
/// Branch
b,
+ /// Undefined instruction
+ undefined_instruction,
/// Breakpoint
bkpt,
/// Branch with Link and Exchange
diff --git a/src/arch/arm/bits.zig b/src/arch/arm/bits.zig
index 8e76ae9409..185c4ed921 100644
--- a/src/arch/arm/bits.zig
+++ b/src/arch/arm/bits.zig
@@ -307,6 +307,9 @@ pub const Instruction = union(enum) {
fixed: u4 = 0b1111,
cond: u4,
},
+ undefined_instruction: packed struct {
+ imm32: u32 = 0xe7ffdefe,
+ },
breakpoint: packed struct {
imm4: u4,
fixed_1: u4 = 0b0111,
@@ -613,6 +616,7 @@ pub const Instruction = union(enum) {
.branch => |v| @bitCast(u32, v),
.branch_exchange => |v| @bitCast(u32, v),
.supervisor_call => |v| @bitCast(u32, v),
+ .undefined_instruction => |v| v.imm32,
.breakpoint => |v| @intCast(u32, v.imm4) | (@intCast(u32, v.fixed_1) << 4) | (@intCast(u32, v.imm12) << 8) | (@intCast(u32, v.fixed_2_and_cond) << 20),
};
}
@@ -890,6 +894,13 @@ pub const Instruction = union(enum) {
};
}
+ // This instruction has no official mnemonic equivalent so it is public as-is.
+ pub fn undefinedInstruction() Instruction {
+ return Instruction{
+ .undefined_instruction = .{},
+ };
+ }
+
fn breakpoint(imm: u16) Instruction {
return Instruction{
.breakpoint = .{
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index afcf4b0bb7..68df794bf7 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -21,10 +21,11 @@ const DW = std.dwarf;
const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
+const codegen = @import("../../codegen.zig");
-const Result = @import("../../codegen.zig").Result;
-const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
-const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
+const CodeGenError = codegen.CodeGenError;
+const Result = codegen.Result;
+const DebugInfoOutput = codegen.DebugInfoOutput;
const bits = @import("bits.zig");
const abi = @import("abi.zig");
@@ -35,11 +36,7 @@ const Instruction = abi.Instruction;
const callee_preserved_regs = abi.callee_preserved_regs;
const gp = abi.RegisterClass.gp;
-const InnerError = error{
- OutOfMemory,
- CodegenFail,
- OutOfRegisters,
-};
+const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
@@ -225,7 +222,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -550,6 +547,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.bitcast => try self.airBitCast(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
+ .trap => try self.airTrap(),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
@@ -1652,6 +1650,14 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, mcv, .{ .none, .none, .none });
}
+fn airTrap(self: *Self) !void {
+ _ = try self.addInst(.{
+ .tag = .unimp,
+ .data = .{ .nop = {} },
+ });
+ return self.finishAirBookkeeping();
+}
+
fn airBreakpoint(self: *Self) !void {
_ = try self.addInst(.{
.tag = .ebreak,
@@ -2552,145 +2558,26 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
-fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const mod = self.bin_file.options.module.?;
- const decl = mod.declPtr(decl_index);
- mod.markDeclAlive(decl);
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
- const atom = elf_file.getAtom(atom_index);
- return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- unreachable;
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- return self.fail("TODO codegen COFF const Decl pointer", .{});
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- const decl_block_index = try p9.seeDecl(decl_index);
- const decl_block = p9.getDeclBlock(decl_block_index);
- const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else {
- return self.fail("TODO codegen non-ELF const Decl pointer", .{});
- }
- _ = tv;
-}
-
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
- if (typed_value.val.isUndef())
- return MCValue{ .undef = {} };
-
- if (typed_value.val.castTag(.decl_ref)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data);
- }
- if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data.decl_index);
- }
- const target = self.target.*;
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- switch (typed_value.ty.zigTypeTag()) {
- .Pointer => switch (typed_value.ty.ptrSize()) {
- .Slice => {
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_type = typed_value.ty.slicePtrFieldType(&buf);
- const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val });
- const mod = self.bin_file.options.module.?;
- const slice_len = typed_value.val.sliceLen(mod);
- // Codegen can't handle some kinds of indirection. If the wrong union field is accessed here it may mean
- // the Sema code needs to use anonymous Decls or alloca instructions to store data.
- const ptr_imm = ptr_mcv.memory;
- _ = slice_len;
- _ = ptr_imm;
- // We need more general support for const data being stored in memory to make this work.
- return self.fail("TODO codegen for const slices", .{});
- },
- else => {
- if (typed_value.val.tag() == .int_u64) {
- return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
- }
- return self.fail("TODO codegen more kinds of const pointers", .{});
- },
+ const mcv: MCValue = switch (try codegen.genTypedValue(
+ self.bin_file,
+ self.src_loc,
+ typed_value,
+ self.mod_fn.owner_decl,
+ )) {
+ .mcv => |mcv| switch (mcv) {
+ .none => .none,
+ .undef => .undef,
+ .linker_load => unreachable, // TODO
+ .immediate => |imm| .{ .immediate = imm },
+ .memory => |addr| .{ .memory = addr },
},
- .Int => {
- const info = typed_value.ty.intInfo(self.target.*);
- if (info.bits > ptr_bits or info.signedness == .signed) {
- return self.fail("TODO const int bigger than ptr and signed int", .{});
- }
- return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
+ .fail => |msg| {
+ self.err_msg = msg;
+ return error.CodegenFail;
},
- .Bool => {
- return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
- },
- .ComptimeInt => unreachable, // semantic analysis prevents this
- .ComptimeFloat => unreachable, // semantic analysis prevents this
- .Optional => {
- if (typed_value.ty.isPtrLikeOptional()) {
- if (typed_value.val.isNull())
- return MCValue{ .immediate = 0 };
-
- var buf: Type.Payload.ElemType = undefined;
- return self.genTypedValue(.{
- .ty = typed_value.ty.optionalChild(&buf),
- .val = typed_value.val,
- });
- } else if (typed_value.ty.abiSize(self.target.*) == 1) {
- return MCValue{ .immediate = @boolToInt(typed_value.val.isNull()) };
- }
- return self.fail("TODO non pointer optionals", .{});
- },
- .Enum => {
- if (typed_value.val.castTag(.enum_field_index)) |field_index| {
- switch (typed_value.ty.tag()) {
- .enum_simple => {
- return MCValue{ .immediate = field_index.data };
- },
- .enum_full, .enum_nonexhaustive => {
- const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val });
- } else {
- return MCValue{ .immediate = field_index.data };
- }
- },
- else => unreachable,
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
- return self.genTypedValue(.{ .ty = int_tag_ty, .val = typed_value.val });
- }
- },
- .ErrorSet => {
- switch (typed_value.val.tag()) {
- .@"error" => {
- const err_name = typed_value.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return MCValue{ .immediate = 0 };
- },
- }
- },
- .ErrorUnion => {
- const error_type = typed_value.ty.errorUnionSet();
- const payload_type = typed_value.ty.errorUnionPayload();
- const sub_val = typed_value.val.castTag(.eu_payload).?.data;
-
- if (!payload_type.hasRuntimeBits()) {
- // We use the error type directly as the type.
- return self.genTypedValue(.{ .ty = error_type, .val = sub_val });
- }
-
- return self.fail("TODO implement error union const of type '{}'", .{typed_value.ty.fmtDebug()});
- },
- else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty.fmtDebug()}),
- }
+ };
+ return mcv;
}
const CallMCValues = struct {
diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig
index 387c735896..3b330cbd3f 100644
--- a/src/arch/riscv64/Emit.zig
+++ b/src/arch/riscv64/Emit.zig
@@ -51,6 +51,7 @@ pub fn emitMir(
.ebreak => try emit.mirSystem(inst),
.ecall => try emit.mirSystem(inst),
+ .unimp => try emit.mirSystem(inst),
.dbg_line => try emit.mirDbgLine(inst),
@@ -153,6 +154,7 @@ fn mirSystem(emit: *Emit, inst: Mir.Inst.Index) !void {
switch (tag) {
.ebreak => try emit.writeInstruction(Instruction.ebreak),
.ecall => try emit.writeInstruction(Instruction.ecall),
+ .unimp => try emit.writeInstruction(Instruction.unimp),
else => unreachable,
}
}
diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig
index 97accb7642..8905b24c3c 100644
--- a/src/arch/riscv64/Mir.zig
+++ b/src/arch/riscv64/Mir.zig
@@ -32,6 +32,7 @@ pub const Inst = struct {
dbg_epilogue_begin,
/// Pseudo-instruction: Update debug line
dbg_line,
+ unimp,
ebreak,
ecall,
jalr,
diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig
index 6b94927df8..7b3ff0bfe9 100644
--- a/src/arch/riscv64/bits.zig
+++ b/src/arch/riscv64/bits.zig
@@ -380,6 +380,7 @@ pub const Instruction = union(enum) {
pub const ecall = iType(0b1110011, 0b000, .zero, .zero, 0x000);
pub const ebreak = iType(0b1110011, 0b000, .zero, .zero, 0x001);
+ pub const unimp = iType(0, 0, .zero, .zero, 0);
};
pub const Register = enum(u6) {
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index c8f77fe702..30df999267 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -19,7 +19,7 @@ const Mir = @import("Mir.zig");
const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
const Type = @import("../../type.zig").Type;
-const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
+const CodeGenError = codegen.CodeGenError;
const Result = @import("../../codegen.zig").Result;
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
@@ -38,11 +38,7 @@ const gp = abi.RegisterClass.gp;
const Self = @This();
-const InnerError = error{
- OutOfMemory,
- CodegenFail,
- OutOfRegisters,
-};
+const InnerError = CodeGenError || error{OutOfRegisters};
const RegisterView = enum(u1) {
caller,
@@ -265,7 +261,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -566,6 +562,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.bitcast => try self.airBitCast(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
+ .trap => try self.airTrap(),
.breakpoint => try self.airBreakpoint(),
.ret_addr => @panic("TODO try self.airRetAddr(inst)"),
.frame_addr => @panic("TODO try self.airFrameAddress(inst)"),
@@ -1160,6 +1157,21 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, .dead, .{ branch.operand, .none, .none });
}
+fn airTrap(self: *Self) !void {
+ // ta 0x05
+ _ = try self.addInst(.{
+ .tag = .tcc,
+ .data = .{
+ .trap = .{
+ .is_imm = true,
+ .cond = .al,
+ .rs2_or_imm = .{ .imm = 0x05 },
+ },
+ },
+ });
+ return self.finishAirBookkeeping();
+}
+
fn airBreakpoint(self: *Self) !void {
// ta 0x01
_ = try self.addInst(.{
@@ -3898,133 +3910,25 @@ fn genStore(self: *Self, value_reg: Register, addr_reg: Register, comptime off_t
}
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
- var tv = typed_value;
- log.debug("genTypedValue: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
-
- if (tv.val.castTag(.runtime_value)) |rt| {
- tv.val = rt.data;
- }
-
- if (tv.val.isUndef())
- return MCValue{ .undef = {} };
-
- if (tv.val.castTag(.decl_ref)) |payload| {
- return self.lowerDeclRef(tv, payload.data);
- }
- if (tv.val.castTag(.decl_ref_mut)) |payload| {
- return self.lowerDeclRef(tv, payload.data.decl_index);
- }
- const target = self.target.*;
-
- switch (tv.ty.zigTypeTag()) {
- .Pointer => switch (tv.ty.ptrSize()) {
- .Slice => {},
- else => {
- switch (tv.val.tag()) {
- .int_u64 => {
- return MCValue{ .immediate = tv.val.toUnsignedInt(target) };
- },
- else => {},
- }
- },
+ const mcv: MCValue = switch (try codegen.genTypedValue(
+ self.bin_file,
+ self.src_loc,
+ typed_value,
+ self.mod_fn.owner_decl,
+ )) {
+ .mcv => |mcv| switch (mcv) {
+ .none => .none,
+ .undef => .undef,
+ .linker_load => unreachable, // TODO
+ .immediate => |imm| .{ .immediate = imm },
+ .memory => |addr| .{ .memory = addr },
},
- .Bool => {
- return MCValue{ .immediate = @boolToInt(tv.val.toBool()) };
+ .fail => |msg| {
+ self.err_msg = msg;
+ return error.CodegenFail;
},
- .Int => {
- const info = tv.ty.intInfo(self.target.*);
- if (info.bits <= 64) {
- const unsigned = switch (info.signedness) {
- .signed => blk: {
- const signed = tv.val.toSignedInt(target);
- break :blk @bitCast(u64, signed);
- },
- .unsigned => tv.val.toUnsignedInt(target),
- };
-
- return MCValue{ .immediate = unsigned };
- } else {
- return self.fail("TODO implement int genTypedValue of > 64 bits", .{});
- }
- },
- .Optional => {
- if (tv.ty.isPtrLikeOptional()) {
- if (tv.val.isNull())
- return MCValue{ .immediate = 0 };
-
- var buf: Type.Payload.ElemType = undefined;
- return self.genTypedValue(.{
- .ty = tv.ty.optionalChild(&buf),
- .val = tv.val,
- });
- } else if (tv.ty.abiSize(self.target.*) == 1) {
- return MCValue{ .immediate = @boolToInt(tv.val.isNull()) };
- }
- },
- .Enum => {
- if (tv.val.castTag(.enum_field_index)) |field_index| {
- switch (tv.ty.tag()) {
- .enum_simple => {
- return MCValue{ .immediate = field_index.data };
- },
- .enum_full, .enum_nonexhaustive => {
- const enum_full = tv.ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val });
- } else {
- return MCValue{ .immediate = field_index.data };
- }
- },
- else => unreachable,
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = tv.ty.intTagType(&int_tag_buffer);
- return self.genTypedValue(.{ .ty = int_tag_ty, .val = tv.val });
- }
- },
- .ErrorSet => {
- const err_name = tv.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- .ErrorUnion => {
- const error_type = tv.ty.errorUnionSet();
- const payload_type = tv.ty.errorUnionPayload();
-
- if (tv.val.castTag(.eu_payload)) |pl| {
- if (!payload_type.hasRuntimeBits()) {
- // We use the error type directly as the type.
- return MCValue{ .immediate = 0 };
- }
-
- _ = pl;
- return self.fail("TODO implement error union const of type '{}' (non-error)", .{tv.ty.fmtDebug()});
- } else {
- if (!payload_type.hasRuntimeBits()) {
- // We use the error type directly as the type.
- return self.genTypedValue(.{ .ty = error_type, .val = tv.val });
- }
-
- return self.fail("TODO implement error union const of type '{}' (error)", .{tv.ty.fmtDebug()});
- }
- },
- .ComptimeInt => unreachable, // semantic analysis prevents this
- .ComptimeFloat => unreachable, // semantic analysis prevents this
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .Void => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
- else => {},
- }
-
- return self.fail("TODO implement const of type '{}'", .{tv.ty.fmtDebug()});
+ };
+ return mcv;
}
fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
@@ -4200,28 +4104,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
}
}
-fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
- // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
- if (tv.ty.zigTypeTag() == .Pointer) blk: {
- if (tv.ty.castPtrToFn()) |_| break :blk;
- if (!tv.ty.elemType2().hasRuntimeBits()) {
- return MCValue.none;
- }
- }
-
- const mod = self.bin_file.options.module.?;
- const decl = mod.declPtr(decl_index);
-
- mod.markDeclAlive(decl);
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
- const atom = elf_file.getAtom(atom_index);
- return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
- } else {
- return self.fail("TODO codegen non-ELF const Decl pointer", .{});
- }
-}
-
fn minMax(
self: *Self,
tag: Air.Inst.Tag,
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 2f191fd834..e79129ddb8 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -733,8 +733,6 @@ const InnerError = error{
OutOfMemory,
/// An error occurred when trying to lower AIR to MIR.
CodegenFail,
- /// Can occur when dereferencing a pointer that points to a `Decl` of which the analysis has failed
- AnalysisFail,
/// Compiler implementation could not handle a large integer.
Overflow,
};
@@ -1164,7 +1162,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: codegen.DebugInfoOutput,
-) codegen.GenerateSymbolError!codegen.Result {
+) codegen.CodeGenError!codegen.Result {
_ = src_loc;
var code_gen: CodeGen = .{
.gpa = bin_file.allocator,
@@ -1829,6 +1827,7 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.arg => func.airArg(inst),
.bitcast => func.airBitcast(inst),
.block => func.airBlock(inst),
+ .trap => func.airTrap(inst),
.breakpoint => func.airBreakpoint(inst),
.br => func.airBr(inst),
.bool_to_int => func.airBoolToInt(inst),
@@ -3289,9 +3288,15 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
func.finishAir(inst, result, &.{ty_op.operand});
}
+fn airTrap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ try func.addTag(.@"unreachable");
+ func.finishAir(inst, .none, &.{});
+}
+
fn airBreakpoint(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// unsupported by wasm itfunc. Can be implemented once we support DWARF
// for wasm
+ try func.addTag(.@"unreachable");
func.finishAir(inst, .none, &.{});
}
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 53d38f520a..5dfce901f7 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -12,12 +12,12 @@ const trace = @import("../../tracy.zig").trace;
const Air = @import("../../Air.zig");
const Allocator = mem.Allocator;
+const CodeGenError = codegen.CodeGenError;
const Compilation = @import("../../Compilation.zig");
const DebugInfoOutput = codegen.DebugInfoOutput;
const DW = std.dwarf;
const ErrorMsg = Module.ErrorMsg;
const Result = codegen.Result;
-const GenerateSymbolError = codegen.GenerateSymbolError;
const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
const Mir = @import("Mir.zig");
@@ -33,18 +33,16 @@ const errUnionPayloadOffset = codegen.errUnionPayloadOffset;
const errUnionErrorOffset = codegen.errUnionErrorOffset;
const Condition = bits.Condition;
+const Immediate = bits.Immediate;
+const Memory = bits.Memory;
+const Register = bits.Register;
const RegisterManager = abi.RegisterManager;
const RegisterLock = RegisterManager.RegisterLock;
-const Register = bits.Register;
const gp = abi.RegisterClass.gp;
const sse = abi.RegisterClass.sse;
-const InnerError = error{
- OutOfMemory,
- CodegenFail,
- OutOfRegisters,
-};
+const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
air: Air,
@@ -186,8 +184,7 @@ const Branch = struct {
_ = options;
comptime assert(unused_format_string.len == 0);
try writer.writeAll("Branch {\n");
- for (ctx.insts, 0..) |inst, i| {
- const mcv = ctx.mcvs[i];
+ for (ctx.insts, ctx.mcvs) |inst, mcv| {
try writer.print(" %{d} => {}\n", .{ inst, mcv });
}
try writer.writeAll("}");
@@ -210,7 +207,7 @@ const Branch = struct {
};
const StackAllocation = struct {
- inst: Air.Inst.Index,
+ inst: ?Air.Inst.Index,
/// TODO do we need size? should be determined by inst.ty.abiSize(self.target.*)
size: u32,
};
@@ -257,7 +254,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -307,7 +304,12 @@ pub fn generate(
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return Result{
- .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
+ .fail = try ErrorMsg.create(
+ bin_file.allocator,
+ src_loc,
+ "CodeGen ran out of registers. This is a bug in the Zig compiler.",
+ .{},
+ ),
},
else => |e| return e,
};
@@ -346,6 +348,20 @@ pub fn generate(
defer emit.deinit();
emit.lowerMir() catch |err| switch (err) {
error.EmitFail => return Result{ .fail = emit.err_msg.? },
+ error.InvalidInstruction, error.CannotEncode => |e| {
+ const msg = switch (e) {
+ error.InvalidInstruction => "CodeGen failed to find a viable instruction.",
+ error.CannotEncode => "CodeGen failed to encode the instruction.",
+ };
+ return Result{
+ .fail = try ErrorMsg.create(
+ bin_file.allocator,
+ src_loc,
+ "{s} This is a bug in the Zig compiler.",
+ .{msg},
+ ),
+ };
+ },
else => |e| return e,
};
@@ -359,52 +375,263 @@ pub fn generate(
fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
const gpa = self.gpa;
try self.mir_instructions.ensureUnusedCapacity(gpa, 1);
- const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len);
+ const result_index = @intCast(Mir.Inst.Index, self.mir_instructions.len);
self.mir_instructions.appendAssumeCapacity(inst);
return result_index;
}
-pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 {
+fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 {
const fields = std.meta.fields(@TypeOf(extra));
try self.mir_extra.ensureUnusedCapacity(self.gpa, fields.len);
return self.addExtraAssumeCapacity(extra);
}
-pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
+fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
const fields = std.meta.fields(@TypeOf(extra));
const result = @intCast(u32, self.mir_extra.items.len);
inline for (fields) |field| {
self.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
i32 => @bitCast(u32, @field(extra, field.name)),
- else => @compileError("bad field type"),
+ else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
});
}
return result;
}
+fn asmSetccRegister(self: *Self, reg: Register, cc: bits.Condition) !void {
+ _ = try self.addInst(.{
+ .tag = .setcc,
+ .ops = .r_c,
+ .data = .{ .r_c = .{
+ .r1 = reg,
+ .cc = cc,
+ } },
+ });
+}
+
+fn asmCmovccRegisterRegister(self: *Self, reg1: Register, reg2: Register, cc: bits.Condition) !void {
+ _ = try self.addInst(.{
+ .tag = .cmovcc,
+ .ops = .rr_c,
+ .data = .{ .rr_c = .{
+ .r1 = reg1,
+ .r2 = reg2,
+ .cc = cc,
+ } },
+ });
+}
+
+fn asmJmpReloc(self: *Self, target: Mir.Inst.Index) !Mir.Inst.Index {
+ return self.addInst(.{
+ .tag = .jmp_reloc,
+ .ops = undefined,
+ .data = .{ .inst = target },
+ });
+}
+
+fn asmJccReloc(self: *Self, target: Mir.Inst.Index, cc: bits.Condition) !Mir.Inst.Index {
+ return self.addInst(.{
+ .tag = .jcc,
+ .ops = .inst_cc,
+ .data = .{ .inst_cc = .{
+ .inst = target,
+ .cc = cc,
+ } },
+ });
+}
+
+fn asmOpOnly(self: *Self, tag: Mir.Inst.Tag) !void {
+ _ = try self.addInst(.{
+ .tag = tag,
+ .ops = .none,
+ .data = undefined,
+ });
+}
+
+fn asmRegister(self: *Self, tag: Mir.Inst.Tag, reg: Register) !void {
+ _ = try self.addInst(.{
+ .tag = tag,
+ .ops = .r,
+ .data = .{ .r = reg },
+ });
+}
+
+fn asmImmediate(self: *Self, tag: Mir.Inst.Tag, imm: Immediate) !void {
+ const ops: Mir.Inst.Ops = if (imm == .signed) .imm_s else .imm_u;
+ _ = try self.addInst(.{
+ .tag = tag,
+ .ops = ops,
+ .data = .{ .imm = switch (imm) {
+ .signed => |x| @bitCast(u32, x),
+ .unsigned => |x| @intCast(u32, x),
+ } },
+ });
+}
+
+fn asmRegisterRegister(self: *Self, tag: Mir.Inst.Tag, reg1: Register, reg2: Register) !void {
+ _ = try self.addInst(.{
+ .tag = tag,
+ .ops = .rr,
+ .data = .{ .rr = .{
+ .r1 = reg1,
+ .r2 = reg2,
+ } },
+ });
+}
+
+fn asmRegisterImmediate(self: *Self, tag: Mir.Inst.Tag, reg: Register, imm: Immediate) !void {
+ const ops: Mir.Inst.Ops = switch (imm) {
+ .signed => .ri_s,
+ .unsigned => |x| if (x <= math.maxInt(u32)) .ri_u else .ri64,
+ };
+ const data: Mir.Inst.Data = switch (ops) {
+ .ri_s => .{ .ri = .{
+ .r1 = reg,
+ .imm = @bitCast(u32, imm.signed),
+ } },
+ .ri_u => .{ .ri = .{
+ .r1 = reg,
+ .imm = @intCast(u32, imm.unsigned),
+ } },
+ .ri64 => .{ .rx = .{
+ .r1 = reg,
+ .payload = try self.addExtra(Mir.Imm64.encode(imm.unsigned)),
+ } },
+ else => unreachable,
+ };
+ _ = try self.addInst(.{
+ .tag = tag,
+ .ops = ops,
+ .data = data,
+ });
+}
+
+fn asmRegisterRegisterImmediate(
+ self: *Self,
+ tag: Mir.Inst.Tag,
+ reg1: Register,
+ reg2: Register,
+ imm: Immediate,
+) !void {
+ const ops: Mir.Inst.Ops = switch (imm) {
+ .signed => .rri_s,
+ .unsigned => .rri_u,
+ };
+ const data: Mir.Inst.Data = switch (ops) {
+ .rri_s => .{ .rri = .{
+ .r1 = reg1,
+ .r2 = reg2,
+ .imm = @bitCast(u32, imm.signed),
+ } },
+ .rri_u => .{ .rri = .{
+ .r1 = reg1,
+ .r2 = reg2,
+ .imm = @intCast(u32, imm.unsigned),
+ } },
+ else => unreachable,
+ };
+ _ = try self.addInst(.{
+ .tag = tag,
+ .ops = ops,
+ .data = data,
+ });
+}
+
+fn asmMemory(self: *Self, tag: Mir.Inst.Tag, m: Memory) !void {
+ const ops: Mir.Inst.Ops = switch (m) {
+ .sib => .m_sib,
+ .rip => .m_rip,
+ else => unreachable,
+ };
+ const data: Mir.Inst.Data = .{ .payload = switch (ops) {
+ .m_sib => try self.addExtra(Mir.MemorySib.encode(m)),
+ .m_rip => try self.addExtra(Mir.MemoryRip.encode(m)),
+ else => unreachable,
+ } };
+ _ = try self.addInst(.{
+ .tag = tag,
+ .ops = ops,
+ .data = data,
+ });
+}
+
+fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.Tag, m: Memory, imm: Immediate) !void {
+ const ops: Mir.Inst.Ops = switch (m) {
+ .sib => if (imm == .signed) .mi_s_sib else .mi_u_sib,
+ .rip => if (imm == .signed) .mi_s_rip else .mi_u_rip,
+ else => unreachable,
+ };
+ const payload: u32 = switch (ops) {
+ .mi_s_sib, .mi_u_sib => try self.addExtra(Mir.MemorySib.encode(m)),
+ .mi_s_rip, .mi_u_rip => try self.addExtra(Mir.MemoryRip.encode(m)),
+ else => unreachable,
+ };
+ const data: Mir.Inst.Data = .{
+ .xi = .{ .payload = payload, .imm = switch (imm) {
+ .signed => |x| @bitCast(u32, x),
+ .unsigned => |x| @intCast(u32, x),
+ } },
+ };
+ _ = try self.addInst(.{
+ .tag = tag,
+ .ops = ops,
+ .data = data,
+ });
+}
+
+fn asmRegisterMemory(self: *Self, tag: Mir.Inst.Tag, reg: Register, m: Memory) !void {
+ const ops: Mir.Inst.Ops = switch (m) {
+ .sib => .rm_sib,
+ .rip => .rm_rip,
+ else => unreachable,
+ };
+ const data: Mir.Inst.Data = .{
+ .rx = .{ .r1 = reg, .payload = switch (ops) {
+ .rm_sib => try self.addExtra(Mir.MemorySib.encode(m)),
+ .rm_rip => try self.addExtra(Mir.MemoryRip.encode(m)),
+ else => unreachable,
+ } },
+ };
+ _ = try self.addInst(.{
+ .tag = tag,
+ .ops = ops,
+ .data = data,
+ });
+}
+
+fn asmMemoryRegister(self: *Self, tag: Mir.Inst.Tag, m: Memory, reg: Register) !void {
+ const ops: Mir.Inst.Ops = switch (m) {
+ .sib => .mr_sib,
+ .rip => .mr_rip,
+ else => unreachable,
+ };
+ const data: Mir.Inst.Data = .{
+ .rx = .{ .r1 = reg, .payload = switch (ops) {
+ .mr_sib => try self.addExtra(Mir.MemorySib.encode(m)),
+ .mr_rip => try self.addExtra(Mir.MemoryRip.encode(m)),
+ else => unreachable,
+ } },
+ };
+ _ = try self.addInst(.{
+ .tag = tag,
+ .ops = ops,
+ .data = data,
+ });
+}
+
fn gen(self: *Self) InnerError!void {
const cc = self.fn_type.fnCallingConvention();
if (cc != .Naked) {
- _ = try self.addInst(.{
- .tag = .push,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp }),
- .data = undefined, // unused for push reg,
- });
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rbp,
- .reg2 = .rsp,
- }),
- .data = undefined,
- });
+ try self.asmRegister(.push, .rbp);
+ try self.asmRegisterRegister(.mov, .rbp, .rsp);
+
// We want to subtract the aligned stack frame size from rsp here, but we don't
// yet know how big it will be, so we leave room for a 4-byte stack size.
// TODO During semantic analysis, check if there are no function calls. If there
// are none, here we can omit the part where we subtract and then add rsp.
const backpatch_stack_sub = try self.addInst(.{
- .tag = .nop,
+ .tag = .dead,
.ops = undefined,
.data = undefined,
});
@@ -431,7 +658,7 @@ fn gen(self: *Self) InnerError!void {
// Push callee-preserved regs that were used actually in use.
const backpatch_push_callee_preserved_regs = try self.addInst(.{
- .tag = .nop,
+ .tag = .dead,
.ops = undefined,
.data = undefined,
});
@@ -462,7 +689,7 @@ fn gen(self: *Self) InnerError!void {
// Pop saved callee-preserved regs.
const backpatch_pop_callee_preserved_regs = try self.addInst(.{
- .tag = .nop,
+ .tag = .dead,
.ops = undefined,
.data = undefined,
});
@@ -475,22 +702,13 @@ fn gen(self: *Self) InnerError!void {
// Maybe add rsp, x if required. This is backpatched later.
const backpatch_stack_add = try self.addInst(.{
- .tag = .nop,
+ .tag = .dead,
.ops = undefined,
.data = undefined,
});
- _ = try self.addInst(.{
- .tag = .pop,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp }),
- .data = undefined,
- });
-
- _ = try self.addInst(.{
- .tag = .ret,
- .ops = Mir.Inst.Ops.encode(.{ .flags = 0b11 }),
- .data = undefined,
- });
+ try self.asmRegister(.pop, .rbp);
+ try self.asmOpOnly(.ret);
// Adjust the stack
if (self.max_end_stack > math.maxInt(i32)) {
@@ -504,27 +722,34 @@ fn gen(self: *Self) InnerError!void {
if (aligned_stack_end > 0) {
self.mir_instructions.set(backpatch_stack_sub, .{
.tag = .sub,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }),
- .data = .{ .imm = aligned_stack_end },
+ .ops = .ri_u,
+ .data = .{ .ri = .{
+ .r1 = .rsp,
+ .imm = aligned_stack_end,
+ } },
});
self.mir_instructions.set(backpatch_stack_add, .{
.tag = .add,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }),
- .data = .{ .imm = aligned_stack_end },
+ .ops = .ri_u,
+ .data = .{ .ri = .{
+ .r1 = .rsp,
+ .imm = aligned_stack_end,
+ } },
});
const save_reg_list = try self.addExtra(Mir.SaveRegisterList{
+ .base_reg = @enumToInt(Register.rbp),
.register_list = reg_list.asInt(),
.stack_end = aligned_stack_end,
});
self.mir_instructions.set(backpatch_push_callee_preserved_regs, .{
.tag = .push_regs,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp }),
+ .ops = undefined,
.data = .{ .payload = save_reg_list },
});
self.mir_instructions.set(backpatch_pop_callee_preserved_regs, .{
.tag = .pop_regs,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp }),
+ .ops = undefined,
.data = .{ .payload = save_reg_list },
});
}
@@ -638,6 +863,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.bitcast => try self.airBitCast(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
+ .trap => try self.airTrap(),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
@@ -811,17 +1037,17 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
fn processDeath(self: *Self, inst: Air.Inst.Index) void {
const air_tags = self.air.instructions.items(.tag);
if (air_tags[inst] == .constant) return; // Constants are immortal.
- log.debug("%{d} => {}", .{ inst, MCValue{ .dead = {} } });
+ const prev_value = self.getResolvedInstValue(inst) orelse return;
+ log.debug("%{d} => {}", .{ inst, MCValue.dead });
// When editing this function, note that the logic must synchronize with `reuseOperand`.
- const prev_value = self.getResolvedInstValue(inst);
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
branch.inst_table.putAssumeCapacity(inst, .dead);
switch (prev_value) {
.register => |reg| {
- self.register_manager.freeReg(reg.to64());
+ self.register_manager.freeReg(reg);
},
.register_overflow => |ro| {
- self.register_manager.freeReg(ro.reg.to64());
+ self.register_manager.freeReg(ro.reg);
self.eflags_inst = null;
},
.eflags => {
@@ -882,7 +1108,7 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
try table.ensureUnusedCapacity(self.gpa, additional_count);
}
-fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 {
+fn allocMem(self: *Self, inst: ?Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 {
if (abi_align > self.stack_align)
self.stack_align = abi_align;
// TODO find a free slot instead of always appending
@@ -915,7 +1141,14 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
}
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
- const elem_ty = self.air.typeOfIndex(inst);
+ return self.allocRegOrMemAdvanced(self.air.typeOfIndex(inst), inst, reg_ok);
+}
+
+fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue {
+ return self.allocRegOrMemAdvanced(elem_ty, null, reg_ok);
+}
+
+fn allocRegOrMemAdvanced(self: *Self, elem_ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue {
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
@@ -992,7 +1225,7 @@ fn revertState(self: *Self, state: State) void {
pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void {
const stack_mcv = try self.allocRegOrMem(inst, false);
log.debug("spilling %{d} to stack mcv {any}", .{ inst, stack_mcv });
- const reg_mcv = self.getResolvedInstValue(inst);
+ const reg_mcv = self.getResolvedInstValue(inst).?;
switch (reg_mcv) {
.register => |other| {
assert(reg.to64() == other.to64());
@@ -1009,7 +1242,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
pub fn spillEflagsIfOccupied(self: *Self) !void {
if (self.eflags_inst) |inst_to_save| {
- const mcv = self.getResolvedInstValue(inst_to_save);
+ const mcv = self.getResolvedInstValue(inst_to_save).?;
const new_mcv = switch (mcv) {
.register_overflow => try self.allocRegOrMem(inst_to_save, false),
.eflags => try self.allocRegOrMem(inst_to_save, true),
@@ -1219,7 +1452,13 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
};
defer if (dst_mcv_lock) |lock| self.register_manager.unlockReg(lock);
- const mask = ~@as(u64, 0);
+ const mask = switch (operand_ty.abiSize(self.target.*)) {
+ 1 => ~@as(u8, 0),
+ 2 => ~@as(u16, 0),
+ 4 => ~@as(u32, 0),
+ 8 => ~@as(u64, 0),
+ else => unreachable,
+ };
try self.genBinOpMir(.xor, operand_ty, dst_mcv, .{ .immediate = mask });
break :result dst_mcv;
@@ -1266,14 +1505,7 @@ fn airMin(self: *Self, inst: Air.Inst.Index) !void {
.unsigned => .b,
.signed => .l,
};
- _ = try self.addInst(.{
- .tag = .cond_mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_mcv.register,
- .reg2 = lhs_reg,
- }),
- .data = .{ .cc = cc },
- });
+ try self.asmCmovccRegisterRegister(dst_mcv.register, lhs_reg, cc);
break :result dst_mcv;
};
@@ -1473,13 +1705,7 @@ fn genSetStackTruncatedOverflowCompare(
.signed => .o,
.unsigned => .c,
};
- _ = try self.addInst(.{
- .tag = .cond_set_byte,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = overflow_reg.to8(),
- }),
- .data = .{ .cc = cc },
- });
+ try self.asmSetccRegister(overflow_reg.to8(), cc);
const scratch_reg = temp_regs[1];
try self.genSetReg(extended_ty, scratch_reg, .{ .register = reg });
@@ -1492,12 +1718,7 @@ fn genSetStackTruncatedOverflowCompare(
);
const eq_reg = temp_regs[2];
- _ = try self.addInst(.{
- .tag = .cond_set_byte,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = eq_reg.to8() }),
- .data = .{ .cc = .ne },
- });
-
+ try self.asmSetccRegister(eq_reg.to8(), .ne);
try self.genBinOpMir(
.@"or",
Type.u8,
@@ -1641,23 +1862,8 @@ fn genIntMulDivOpMir(
}
switch (signedness) {
- .signed => {
- _ = try self.addInst(.{
- .tag = .cwd,
- .ops = Mir.Inst.Ops.encode(.{ .flags = 0b11 }),
- .data = undefined,
- });
- },
- .unsigned => {
- _ = try self.addInst(.{
- .tag = .xor,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rdx,
- .reg2 = .rdx,
- }),
- .data = undefined,
- });
- },
+ .signed => try self.asmOpOnly(.cqo),
+ .unsigned => try self.asmRegisterRegister(.xor, .rdx, .rdx),
}
const factor = switch (rhs) {
@@ -1670,29 +1876,11 @@ fn genIntMulDivOpMir(
};
switch (factor) {
- .register => |reg| {
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }),
- .data = undefined,
- });
- },
- .stack_offset => |off| {
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg2 = .rbp,
- .flags = switch (abi_size) {
- 1 => 0b00,
- 2 => 0b01,
- 4 => 0b10,
- 8 => 0b11,
- else => unreachable,
- },
- }),
- .data = .{ .imm = @bitCast(u32, -off) },
- });
- },
+ .register => |reg| try self.asmRegister(tag, reg),
+ .stack_offset => |off| try self.asmMemory(tag, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
+ .base = .rbp,
+ .disp = -off,
+ })),
else => unreachable,
}
}
@@ -1720,38 +1908,10 @@ fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCVa
.unsigned => .div,
}, Type.isize, signedness, .{ .register = dividend }, .{ .register = divisor });
- _ = try self.addInst(.{
- .tag = .xor,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = divisor.to64(),
- .reg2 = dividend.to64(),
- }),
- .data = undefined,
- });
- _ = try self.addInst(.{
- .tag = .sar,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = divisor.to64(),
- .flags = 0b10,
- }),
- .data = .{ .imm = 63 },
- });
- _ = try self.addInst(.{
- .tag = .@"test",
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rdx,
- .reg2 = .rdx,
- }),
- .data = undefined,
- });
- _ = try self.addInst(.{
- .tag = .cond_mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = divisor.to64(),
- .reg2 = .rdx,
- }),
- .data = .{ .cc = .e },
- });
+ try self.asmRegisterRegister(.xor, divisor.to64(), dividend.to64());
+ try self.asmRegisterImmediate(.sar, divisor.to64(), Immediate.u(63));
+ try self.asmRegisterRegister(.@"test", .rdx, .rdx);
+ try self.asmCmovccRegisterRegister(divisor.to64(), .rdx, .e);
try self.genBinOpMir(.add, Type.isize, .{ .register = divisor }, .{ .register = .rax });
return MCValue{ .register = divisor };
}
@@ -2185,18 +2345,10 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
const addr_reg = try self.register_manager.allocReg(null, gp);
switch (slice_mcv) {
- .stack_offset => |off| {
- // mov reg, [rbp - 8]
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = addr_reg.to64(),
- .reg2 = .rbp,
- .flags = 0b01,
- }),
- .data = .{ .imm = @bitCast(u32, -@intCast(i32, off)) },
- });
- },
+ .stack_offset => |off| try self.asmRegisterMemory(.mov, addr_reg.to64(), Memory.sib(.qword, .{
+ .base = .rbp,
+ .disp = -off,
+ })),
else => return self.fail("TODO implement slice_elem_ptr when slice is {}", .{slice_mcv}),
}
// TODO we could allocate register here, but need to expect addr register and potentially
@@ -2271,26 +2423,16 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
array_ty.abiAlignment(self.target.*),
));
try self.genSetStack(array_ty, off, array, .{});
- // lea reg, [rbp]
- _ = try self.addInst(.{
- .tag = .lea,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = addr_reg.to64(),
- .reg2 = .rbp,
- }),
- .data = .{ .imm = @bitCast(u32, -off) },
- });
+ try self.asmRegisterMemory(.lea, addr_reg.to64(), Memory.sib(.qword, .{
+ .base = .rbp,
+ .disp = -off,
+ }));
},
.stack_offset => |off| {
- // lea reg, [rbp]
- _ = try self.addInst(.{
- .tag = .lea,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = addr_reg.to64(),
- .reg2 = .rbp,
- }),
- .data = .{ .imm = @bitCast(u32, -off) },
- });
+ try self.asmRegisterMemory(.lea, addr_reg.to64(), Memory.sib(.qword, .{
+ .base = .rbp,
+ .disp = -off,
+ }));
},
.memory, .linker_load => {
try self.loadMemPtrIntoRegister(addr_reg, Type.usize, array);
@@ -2327,7 +2469,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
const elem_ty = ptr_ty.elemType2();
- const elem_abi_size = elem_ty.abiSize(self.target.*);
+ const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*));
const index_ty = self.air.typeOf(bin_op.rhs);
const index = try self.resolveInst(bin_op.rhs);
const index_lock: ?RegisterLock = switch (index) {
@@ -2347,16 +2489,14 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
if (elem_abi_size > 8) {
return self.fail("TODO copy value with size {} from pointer", .{elem_abi_size});
} else {
- // mov dst_mcv, [dst_mcv]
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(dst_mcv.register, @intCast(u32, elem_abi_size)),
- .reg2 = dst_mcv.register,
- .flags = 0b01,
+ try self.asmRegisterMemory(
+ .mov,
+ registerAlias(dst_mcv.register, elem_abi_size),
+ Memory.sib(Memory.PtrSize.fromSize(elem_abi_size), .{
+ .base = dst_mcv.register,
+ .disp = 0,
}),
- .data = .{ .imm = 0 },
- });
+ );
break :result .{ .register = registerAlias(dst_mcv.register, @intCast(u32, elem_abi_size)) };
}
};
@@ -2583,7 +2723,7 @@ fn reuseOperand(
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
const elem_ty = ptr_ty.elemType();
- const abi_size = elem_ty.abiSize(self.target.*);
+ const abi_size = @intCast(u32, elem_ty.abiSize(self.target.*));
switch (ptr) {
.none => unreachable,
.undef => unreachable,
@@ -2610,16 +2750,11 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.undef => unreachable,
.eflags => unreachable,
.register => |dst_reg| {
- // mov dst_reg, [reg]
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(dst_reg, @intCast(u32, abi_size)),
- .reg2 = reg,
- .flags = 0b01,
- }),
- .data = .{ .imm = 0 },
- });
+ try self.asmRegisterMemory(
+ .mov,
+ registerAlias(dst_reg, abi_size),
+ Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = reg, .disp = 0 }),
+ );
},
.stack_offset => |off| {
if (abi_size <= 8) {
@@ -2679,23 +2814,19 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
} else unreachable;
- const flags: u2 = switch (load_struct.type) {
- .got => 0b00,
- .direct => 0b01,
- .import => 0b10,
+ const ops: Mir.Inst.Ops = switch (load_struct.type) {
+ .got => .got_reloc,
+ .direct => .direct_reloc,
+ .import => .import_reloc,
};
_ = try self.addInst(.{
- .tag = .lea_pic,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .flags = flags,
- }),
- .data = .{
- .relocation = .{
- .atom_index = atom_index,
- .sym_index = load_struct.sym_index,
- },
- },
+ .tag = .lea_linker,
+ .ops = ops,
+ .data = .{ .payload = try self.addExtra(Mir.LeaRegisterReloc{
+ .reg = @enumToInt(registerAlias(reg, abi_size)),
+ .atom_index = atom_index,
+ .sym_index = load_struct.sym_index,
+ }) },
});
},
.memory => |addr| {
@@ -2708,7 +2839,7 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
}
fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
- const abi_size = value_ty.abiSize(self.target.*);
+ const abi_size = @intCast(u32, value_ty.abiSize(self.target.*));
switch (ptr) {
.none => unreachable,
.undef => unreachable,
@@ -2736,30 +2867,25 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.unreach => unreachable,
.eflags => unreachable,
.undef => {
- try self.genSetReg(value_ty, reg, value);
+ switch (abi_size) {
+ 1 => try self.store(ptr, .{ .immediate = 0xaa }, ptr_ty, value_ty),
+ 2 => try self.store(ptr, .{ .immediate = 0xaaaa }, ptr_ty, value_ty),
+ 4 => try self.store(ptr, .{ .immediate = 0xaaaaaaaa }, ptr_ty, value_ty),
+ 8 => try self.store(ptr, .{ .immediate = 0xaaaaaaaaaaaaaaaa }, ptr_ty, value_ty),
+ else => try self.genInlineMemset(ptr, .{ .immediate = 0xaa }, .{ .immediate = abi_size }, .{}),
+ }
},
.immediate => |imm| {
switch (abi_size) {
1, 2, 4 => {
- // TODO this is wasteful!
- // introduce new MIR tag specifically for mov [reg + 0], imm
- const payload = try self.addExtra(Mir.ImmPair{
- .dest_off = 0,
- .operand = @truncate(u32, imm),
- });
- _ = try self.addInst(.{
- .tag = .mov_mem_imm,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to64(),
- .flags = switch (abi_size) {
- 1 => 0b00,
- 2 => 0b01,
- 4 => 0b10,
- else => unreachable,
- },
- }),
- .data = .{ .payload = payload },
- });
+ const immediate = if (value_ty.isSignedInt())
+ Immediate.s(@intCast(i32, @bitCast(i64, imm)))
+ else
+ Immediate.u(@truncate(u32, imm));
+ try self.asmMemoryImmediate(.mov, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
+ .base = reg.to64(),
+ .disp = 0,
+ }), immediate);
},
8 => {
// TODO: optimization: if the imm is only using the lower
@@ -2789,13 +2915,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
const overflow_bit_ty = value_ty.structFieldType(1);
const overflow_bit_offset = value_ty.structFieldOffset(1, self.target.*);
const tmp_reg = try self.register_manager.allocReg(null, gp);
- _ = try self.addInst(.{
- .tag = .cond_set_byte,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = tmp_reg.to8(),
- }),
- .data = .{ .cc = ro.eflags },
- });
+ try self.asmSetccRegister(tmp_reg.to8(), ro.eflags);
try self.genInlineMemcpyRegisterRegister(
overflow_bit_ty,
reg,
@@ -2836,17 +2956,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
try self.loadMemPtrIntoRegister(addr_reg, ptr_ty, ptr);
- // to get the actual address of the value we want to modify we have to go through the GOT
- // mov reg, [reg]
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = addr_reg.to64(),
- .reg2 = addr_reg.to64(),
- .flags = 0b01,
- }),
- .data = .{ .imm = 0 },
- });
+ // To get the actual address of the value we want to modify we have to go through the GOT
+ try self.asmRegisterMemory(.mov, addr_reg.to64(), Memory.sib(.qword, .{
+ .base = addr_reg.to64(),
+ .disp = 0,
+ }));
const new_ptr = MCValue{ .register = addr_reg.to64() };
@@ -2856,19 +2970,8 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
return self.fail("TODO saving imm to memory for abi_size {}", .{abi_size});
}
- const payload = try self.addExtra(Mir.ImmPair{
- .dest_off = 0,
- // TODO check if this logic is correct
- .operand = @truncate(u32, imm),
- });
- const flags: u2 = switch (abi_size) {
- 1 => 0b00,
- 2 => 0b01,
- 4 => 0b10,
- 8 => 0b11,
- else => unreachable,
- };
- if (flags == 0b11) {
+ if (abi_size == 8) {
+ // TODO
const top_bits: u32 = @intCast(u32, imm >> 32);
const can_extend = if (value_ty.isUnsignedInt())
(top_bits == 0) and (imm & 0x8000_0000) == 0
@@ -2879,14 +2982,10 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
return self.fail("TODO imm64 would get incorrectly sign extended", .{});
}
}
- _ = try self.addInst(.{
- .tag = .mov_mem_imm,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = addr_reg.to64(),
- .flags = flags,
- }),
- .data = .{ .payload = payload },
- });
+ try self.asmMemoryImmediate(.mov, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
+ .base = addr_reg.to64(),
+ .disp = 0,
+ }), Immediate.u(@intCast(u32, imm)));
},
.register => {
return self.store(new_ptr, value, ptr_ty, value_ty);
@@ -2898,16 +2997,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
defer self.register_manager.unlockReg(tmp_reg_lock);
try self.loadMemPtrIntoRegister(tmp_reg, value_ty, value);
+ try self.asmRegisterMemory(.mov, tmp_reg, Memory.sib(.qword, .{
+ .base = tmp_reg,
+ .disp = 0,
+ }));
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = tmp_reg,
- .reg2 = tmp_reg,
- .flags = 0b01,
- }),
- .data = .{ .imm = 0 },
- });
return self.store(new_ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
}
@@ -2959,6 +3053,9 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
const mcv = try self.resolveInst(operand);
const ptr_ty = self.air.typeOf(operand);
const struct_ty = ptr_ty.childType();
+ if (struct_ty.zigTypeTag() == .Struct and struct_ty.containerLayout() == .Packed) {
+ return self.fail("TODO structFieldPtr implement packed structs", .{});
+ }
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
const dst_mcv: MCValue = result: {
@@ -3022,6 +3119,9 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const mcv = try self.resolveInst(operand);
const struct_ty = self.air.typeOf(operand);
+ if (struct_ty.zigTypeTag() == .Struct and struct_ty.containerLayout() == .Packed) {
+ return self.fail("TODO airStructFieldVal implement packed structs", .{});
+ }
const struct_field_offset = struct_ty.structFieldOffset(index, self.target.*);
const struct_field_ty = struct_ty.structFieldType(index);
@@ -3055,8 +3155,8 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const shift = @intCast(u8, struct_field_offset * @sizeOf(usize));
try self.genShiftBinOpMir(.shr, Type.usize, dst_mcv.register, .{ .immediate = shift });
- // Mask with reg.size() - struct_field_size
- const max_reg_bit_width = Register.rax.size();
+ // Mask with reg.bitSize() - struct_field_size
+ const max_reg_bit_width = Register.rax.bitSize();
const mask_shift = @intCast(u6, (max_reg_bit_width - struct_field_ty.bitSize(self.target.*)));
const mask = (~@as(u64, 0)) >> mask_shift;
@@ -3069,14 +3169,11 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
};
const field_size = @intCast(u32, struct_field_ty.abiSize(self.target.*));
if (signedness == .signed and field_size < 8) {
- _ = try self.addInst(.{
- .tag = .mov_sign_extend,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_mcv.register,
- .reg2 = registerAlias(dst_mcv.register, field_size),
- }),
- .data = undefined,
- });
+ try self.asmRegisterRegister(
+ .movsx,
+ dst_mcv.register,
+ registerAlias(dst_mcv.register, field_size),
+ );
}
break :result dst_mcv;
@@ -3093,13 +3190,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
defer self.register_manager.unlockReg(reg_lock);
const dst_reg = try self.register_manager.allocReg(inst, gp);
- _ = try self.addInst(.{
- .tag = .cond_set_byte,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_reg.to8(),
- }),
- .data = .{ .cc = ro.eflags },
- });
+ try self.asmSetccRegister(dst_reg.to8(), ro.eflags);
break :result MCValue{ .register = dst_reg.to8() };
},
else => unreachable,
@@ -3135,25 +3226,7 @@ fn genShiftBinOpMir(self: *Self, tag: Mir.Inst.Tag, ty: Type, reg: Register, shi
switch (shift) {
.immediate => |imm| switch (imm) {
0 => return,
- 1 => {
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size) }),
- .data = undefined,
- });
- return;
- },
- else => {
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .flags = 0b10,
- }),
- .data = .{ .imm = @intCast(u8, imm) },
- });
- return;
- },
+ else => return self.asmRegisterImmediate(tag, registerAlias(reg, abi_size), Immediate.u(imm)),
},
.register => |shift_reg| {
if (shift_reg == .rcx) break :blk;
@@ -3165,14 +3238,7 @@ fn genShiftBinOpMir(self: *Self, tag: Mir.Inst.Tag, ty: Type, reg: Register, shi
try self.genSetReg(Type.u8, .rcx, shift);
}
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .flags = 0b01,
- }),
- .data = undefined,
- });
+ try self.asmRegisterRegister(tag, registerAlias(reg, abi_size), .cl);
}
/// Result is always a register.
@@ -3545,59 +3611,67 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
if (intrinsicsAllowed(self.target.*, dst_ty)) {
const actual_tag: Mir.Inst.Tag = switch (dst_ty.tag()) {
.f32 => switch (mir_tag) {
- .add => if (hasAvxSupport(self.target.*))
- Mir.Inst.Tag.add_f32_avx
- else
- Mir.Inst.Tag.add_f32_sse,
- .cmp => if (hasAvxSupport(self.target.*))
- Mir.Inst.Tag.cmp_f32_avx
- else
- Mir.Inst.Tag.cmp_f32_sse,
- else => return self.fail("TODO genBinOpMir for f32 register-register with MIR tag {}", .{mir_tag}),
+ .add => .addss,
+ .cmp => .ucomiss,
+ else => return self.fail(
+ "TODO genBinOpMir for f32 register-register with MIR tag {}",
+ .{mir_tag},
+ ),
},
.f64 => switch (mir_tag) {
- .add => if (hasAvxSupport(self.target.*))
- Mir.Inst.Tag.add_f64_avx
- else
- Mir.Inst.Tag.add_f64_sse,
- .cmp => if (hasAvxSupport(self.target.*))
- Mir.Inst.Tag.cmp_f64_avx
- else
- Mir.Inst.Tag.cmp_f64_sse,
- else => return self.fail("TODO genBinOpMir for f64 register-register with MIR tag {}", .{mir_tag}),
+ .add => .addsd,
+ .cmp => .ucomisd,
+ else => return self.fail(
+ "TODO genBinOpMir for f64 register-register with MIR tag {}",
+ .{mir_tag},
+ ),
},
- else => return self.fail("TODO genBinOpMir for float register-register and type {}", .{dst_ty.fmtDebug()}),
+ else => return self.fail(
+ "TODO genBinOpMir for float register-register and type {}",
+ .{dst_ty.fmtDebug()},
+ ),
};
- _ = try self.addInst(.{
- .tag = actual_tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_reg.to128(),
- .reg2 = src_reg.to128(),
- }),
- .data = undefined,
- });
- return;
+ return self.asmRegisterRegister(actual_tag, dst_reg.to128(), src_reg.to128());
}
return self.fail("TODO genBinOpMir for float register-register and no intrinsics", .{});
},
- else => {
- _ = try self.addInst(.{
- .tag = mir_tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(dst_reg, abi_size),
- .reg2 = registerAlias(src_reg, abi_size),
- }),
- .data = undefined,
- });
- },
+ else => try self.asmRegisterRegister(
+ mir_tag,
+ registerAlias(dst_reg, abi_size),
+ registerAlias(src_reg, abi_size),
+ ),
},
.immediate => |imm| {
- _ = try self.addInst(.{
- .tag = mir_tag,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(dst_reg, abi_size) }),
- .data = .{ .imm = @truncate(u32, imm) },
- });
+ switch (abi_size) {
+ 0 => unreachable,
+ 1...4 => {
+ try self.asmRegisterImmediate(
+ mir_tag,
+ registerAlias(dst_reg, abi_size),
+ Immediate.u(@intCast(u32, imm)),
+ );
+ },
+ 5...8 => {
+ if (math.cast(i32, @bitCast(i64, imm))) |small| {
+ try self.asmRegisterImmediate(
+ mir_tag,
+ registerAlias(dst_reg, abi_size),
+ Immediate.s(small),
+ );
+ } else {
+ const tmp_reg = try self.register_manager.allocReg(null, gp);
+ const tmp_alias = registerAlias(tmp_reg, abi_size);
+ try self.asmRegisterImmediate(.mov, tmp_alias, Immediate.u(imm));
+ try self.asmRegisterRegister(
+ mir_tag,
+ registerAlias(dst_reg, abi_size),
+ tmp_alias,
+ );
+ }
+ },
+ else => return self.fail("TODO getBinOpMir implement large immediate ABI", .{}),
+ }
},
.memory,
.linker_load,
@@ -3614,15 +3688,11 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
if (off > math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
- _ = try self.addInst(.{
- .tag = mir_tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(dst_reg, abi_size),
- .reg2 = .rbp,
- .flags = 0b01,
- }),
- .data = .{ .imm = @bitCast(u32, -off) },
- });
+ try self.asmRegisterMemory(
+ mir_tag,
+ registerAlias(dst_reg, abi_size),
+ Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .rbp, .disp = -off }),
+ );
},
}
},
@@ -3640,45 +3710,17 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
.dead, .unreach => unreachable,
.register_overflow => unreachable,
.register => |src_reg| {
- _ = try self.addInst(.{
- .tag = mir_tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rbp,
- .reg2 = registerAlias(src_reg, abi_size),
- .flags = 0b10,
- }),
- .data = .{ .imm = @bitCast(u32, -off) },
- });
+ try self.asmMemoryRegister(mir_tag, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
+ .base = .rbp,
+ .disp = -off,
+ }), registerAlias(src_reg, abi_size));
},
.immediate => |imm| {
- const tag: Mir.Inst.Tag = switch (mir_tag) {
- .add => .add_mem_imm,
- .@"or" => .or_mem_imm,
- .@"and" => .and_mem_imm,
- .sub => .sub_mem_imm,
- .xor => .xor_mem_imm,
- .cmp => .cmp_mem_imm,
- else => unreachable,
- };
- const flags: u2 = switch (abi_size) {
- 1 => 0b00,
- 2 => 0b01,
- 4 => 0b10,
- 8 => 0b11,
- else => unreachable,
- };
- const payload = try self.addExtra(Mir.ImmPair{
- .dest_off = @bitCast(u32, -off),
- .operand = @truncate(u32, imm),
- });
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rbp,
- .flags = flags,
- }),
- .data = .{ .payload = payload },
- });
+ // TODO
+ try self.asmMemoryImmediate(mir_tag, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
+ .base = .rbp,
+ .disp = -off,
+ }), Immediate.u(@intCast(u32, imm)));
},
.memory,
.stack_offset,
@@ -3721,30 +3763,21 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.dead, .unreach => unreachable,
.ptr_stack_offset => unreachable,
.register_overflow => unreachable,
- .register => |src_reg| {
- // register, register
- _ = try self.addInst(.{
- .tag = .imul_complex,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(dst_reg, abi_size),
- .reg2 = registerAlias(src_reg, abi_size),
- }),
- .data = undefined,
- });
- },
+ .register => |src_reg| try self.asmRegisterRegister(
+ .imul,
+ registerAlias(dst_reg, abi_size),
+ registerAlias(src_reg, abi_size),
+ ),
.immediate => |imm| {
- // TODO take into account the type's ABI size when selecting the register alias
- // register, immediate
if (math.minInt(i32) <= imm and imm <= math.maxInt(i32)) {
- _ = try self.addInst(.{
- .tag = .imul_complex,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_reg.to32(),
- .reg2 = dst_reg.to32(),
- .flags = 0b10,
- }),
- .data = .{ .imm = @truncate(u32, imm) },
- });
+ // TODO take into account the type's ABI size when selecting the register alias
+ // register, immediate
+ try self.asmRegisterRegisterImmediate(
+ .imul,
+ dst_reg.to32(),
+ dst_reg.to32(),
+ Immediate.u(@intCast(u32, imm)),
+ );
} else {
// TODO verify we don't spill and assign to the same register as dst_mcv
const src_reg = try self.copyToTmpRegister(dst_ty, src_mcv);
@@ -3752,15 +3785,11 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
}
},
.stack_offset => |off| {
- _ = try self.addInst(.{
- .tag = .imul_complex,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(dst_reg, abi_size),
- .reg2 = .rbp,
- .flags = 0b01,
- }),
- .data = .{ .imm = @bitCast(u32, -off) },
- });
+ try self.asmRegisterMemory(
+ .imul,
+ registerAlias(dst_reg, abi_size),
+ Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .rbp, .disp = -off }),
+ );
},
.memory => {
return self.fail("TODO implement x86 multiply source memory", .{});
@@ -3783,16 +3812,11 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.register => |src_reg| {
// copy dst to a register
const dst_reg = try self.copyToTmpRegister(dst_ty, dst_mcv);
- // multiply into dst_reg
- // register, register
- _ = try self.addInst(.{
- .tag = .imul_complex,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(dst_reg, abi_size),
- .reg2 = registerAlias(src_reg, abi_size),
- }),
- .data = undefined,
- });
+ try self.asmRegisterRegister(
+ .imul,
+ registerAlias(dst_reg, abi_size),
+ registerAlias(src_reg, abi_size),
+ );
// copy dst_reg back out
return self.genSetStack(dst_ty, off, .{ .register = dst_reg }, .{});
},
@@ -3917,12 +3941,13 @@ fn genVarDbgInfo(
}
}
+fn airTrap(self: *Self) !void {
+ try self.asmOpOnly(.ud2);
+ return self.finishAirBookkeeping();
+}
+
fn airBreakpoint(self: *Self) !void {
- _ = try self.addInst(.{
- .tag = .interrupt,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = undefined,
- });
+ try self.asmOpOnly(.int3);
return self.finishAirBookkeeping();
}
@@ -3985,10 +4010,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
};
defer if (ret_reg_lock) |lock| self.register_manager.unlockReg(lock);
- for (args, 0..) |arg, arg_i| {
- const mc_arg = info.args[arg_i];
+ for (args, info.args) |arg, info_arg| {
+ const mc_arg = info_arg;
const arg_ty = self.air.typeOf(arg);
- const arg_mcv = try self.resolveInst(args[arg_i]);
+ const arg_mcv = try self.resolveInst(arg);
// Here we do not use setRegOrMem even though the logic is similar, because
// the function call will move the stack pointer, so the offsets are different.
switch (mc_arg) {
@@ -4017,11 +4042,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (info.stack_byte_count > 0) {
// Adjust the stack
- _ = try self.addInst(.{
- .tag = .sub,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }),
- .data = .{ .imm = info.stack_byte_count },
- });
+ try self.asmRegisterImmediate(.sub, .rsp, Immediate.u(info.stack_byte_count));
}
// Due to incremental compilation, how function calls are generated depends
@@ -4034,12 +4055,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
- const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
- .data = .{ .imm = got_addr },
- });
+ const got_addr = atom.getOffsetTableAddress(elf_file);
+ try self.asmMemory(.call, Memory.sib(.qword, .{
+ .base = .ds,
+ .disp = @intCast(i32, got_addr),
+ }));
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const atom_index = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
@@ -4049,14 +4069,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.sym_index = sym_index,
},
});
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rax,
- .flags = 0b01,
- }),
- .data = undefined,
- });
+ try self.asmRegister(.call, .rax);
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const atom_index = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
@@ -4066,14 +4079,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.sym_index = sym_index,
},
});
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rax,
- .flags = 0b01,
- }),
- .data = undefined,
- });
+ try self.asmRegister(.call, .rax);
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
const decl_block_index = try p9.seeDecl(func.owner_decl);
const decl_block = p9.getDeclBlock(decl_block_index);
@@ -4082,11 +4088,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const got_addr = p9.bases.data;
const got_index = decl_block.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
- .data = .{ .imm = @intCast(u32, fn_got_addr) },
- });
+ try self.asmMemory(.call, Memory.sib(.qword, .{
+ .base = .ds,
+ .disp = @intCast(i32, fn_got_addr),
+ }));
} else unreachable;
} else if (func_value.castTag(.extern_fn)) |func_payload| {
const extern_fn = func_payload.data;
@@ -4106,14 +4111,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.sym_index = sym_index,
},
});
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rax,
- .flags = 0b01,
- }),
- .data = undefined,
- });
+ try self.asmRegister(.call, .rax);
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
@@ -4136,23 +4134,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
assert(ty.zigTypeTag() == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(Type.initTag(.usize), .rax, mcv);
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rax,
- .flags = 0b01,
- }),
- .data = undefined,
- });
+ try self.asmRegister(.call, .rax);
}
if (info.stack_byte_count > 0) {
// Readjust the stack
- _ = try self.addInst(.{
- .tag = .add,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }),
- .data = .{ .imm = info.stack_byte_count },
- });
+ try self.asmRegisterImmediate(.add, .rsp, Immediate.u(info.stack_byte_count));
}
const result: MCValue = result: {
@@ -4209,11 +4196,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
// TODO when implementing defer, this will need to jump to the appropriate defer expression.
// TODO optimization opportunity: figure out when we can emit this as a 2 byte instruction
// which is available if the jump is 127 bytes or less forward.
- const jmp_reloc = try self.addInst(.{
- .tag = .jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst = undefined },
- });
+ const jmp_reloc = try self.asmJmpReloc(undefined);
try self.exitlude_jump_relocs.append(self.gpa, jmp_reloc);
return self.finishAir(inst, .dead, .{ un_op, .none, .none });
}
@@ -4245,11 +4228,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
// TODO when implementing defer, this will need to jump to the appropriate defer expression.
// TODO optimization opportunity: figure out when we can emit this as a 2 byte instruction
// which is available if the jump is 127 bytes or less forward.
- const jmp_reloc = try self.addInst(.{
- .tag = .jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst = undefined },
- });
+ const jmp_reloc = try self.asmJmpReloc(undefined);
try self.exitlude_jump_relocs.append(self.gpa, jmp_reloc);
return self.finishAir(inst, .dead, .{ un_op, .none, .none });
}
@@ -4424,33 +4403,13 @@ fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 {
const abi_size = ty.abiSize(self.target.*);
switch (mcv) {
.eflags => |cc| {
- return self.addInst(.{
- .tag = .cond_jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{
- .inst_cc = .{
- .inst = undefined,
- // Here we map the opposites since the jump is to the false branch.
- .cc = cc.negate(),
- },
- },
- });
+ // Here we map the opposites since the jump is to the false branch.
+ return self.asmJccReloc(undefined, cc.negate());
},
.register => |reg| {
try self.spillEflagsIfOccupied();
- _ = try self.addInst(.{
- .tag = .@"test",
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }),
- .data = .{ .imm = 1 },
- });
- return self.addInst(.{
- .tag = .cond_jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst_cc = .{
- .inst = undefined,
- .cc = .e,
- } },
- });
+ try self.asmRegisterImmediate(.@"test", reg, Immediate.u(1));
+ return self.asmJccReloc(undefined, .e);
},
.immediate,
.stack_offset,
@@ -4464,6 +4423,7 @@ fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 {
},
else => return self.fail("TODO implement condbr when condition is {s}", .{@tagName(mcv)}),
}
+ return 0; // TODO
}
fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
@@ -4491,16 +4451,16 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
// Capture the state of register and stack allocation state so that we can revert to it.
const saved_state = try self.captureState();
- try self.branch_stack.append(.{});
- errdefer {
- _ = self.branch_stack.pop();
- }
+ {
+ try self.branch_stack.append(.{});
+ errdefer _ = self.branch_stack.pop();
- try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len);
- for (liveness_condbr.then_deaths) |operand| {
- self.processDeath(operand);
+ try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len);
+ for (liveness_condbr.then_deaths) |operand| {
+ self.processDeath(operand);
+ }
+ try self.genBody(then_body);
}
- try self.genBody(then_body);
// Revert to the previous register and stack allocation state.
@@ -4511,16 +4471,16 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
try self.performReloc(reloc);
- try self.branch_stack.append(.{});
- errdefer {
- _ = self.branch_stack.pop();
- }
+ {
+ try self.branch_stack.append(.{});
+ errdefer _ = self.branch_stack.pop();
- try self.ensureProcessDeathCapacity(liveness_condbr.else_deaths.len);
- for (liveness_condbr.else_deaths) |operand| {
- self.processDeath(operand);
+ try self.ensureProcessDeathCapacity(liveness_condbr.else_deaths.len);
+ for (liveness_condbr.else_deaths) |operand| {
+ self.processDeath(operand);
+ }
+ try self.genBody(else_body);
}
- try self.genBody(else_body);
var else_branch = self.branch_stack.pop();
defer else_branch.deinit(self.gpa);
@@ -4646,18 +4606,16 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
};
defer if (operand_ptr_lock) |lock| self.register_manager.unlockReg(lock);
- const operand: MCValue = blk: {
- if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
- // The MCValue that holds the pointer can be re-used as the value.
- break :blk operand_ptr;
- } else {
- break :blk try self.allocRegOrMem(inst, true);
- }
- };
const ptr_ty = self.air.typeOf(un_op);
+ const elem_ty = ptr_ty.childType();
+ const operand = if (elem_ty.isPtrLikeOptional() and self.reuseOperand(inst, un_op, 0, operand_ptr))
+ // The MCValue that holds the pointer can be re-used as the value.
+ operand_ptr
+ else
+ try self.allocTempRegOrMem(elem_ty, true);
try self.load(operand, operand_ptr, ptr_ty);
- const result = try self.isNull(inst, ptr_ty.elemType(), operand);
+ const result = try self.isNull(inst, elem_ty, operand);
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -4686,15 +4644,13 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
};
defer if (operand_ptr_lock) |lock| self.register_manager.unlockReg(lock);
- const operand: MCValue = blk: {
- if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
- // The MCValue that holds the pointer can be re-used as the value.
- break :blk operand_ptr;
- } else {
- break :blk try self.allocRegOrMem(inst, true);
- }
- };
const ptr_ty = self.air.typeOf(un_op);
+ const elem_ty = ptr_ty.childType();
+ const operand = if (elem_ty.isPtrLikeOptional() and self.reuseOperand(inst, un_op, 0, operand_ptr))
+ // The MCValue that holds the pointer can be re-used as the value.
+ operand_ptr
+ else
+ try self.allocTempRegOrMem(elem_ty, true);
try self.load(operand, operand_ptr, ptr_ty);
const result = try self.isNonNull(inst, ptr_ty.elemType(), operand);
@@ -4789,11 +4745,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
const body = self.air.extra[loop.end..][0..loop.data.body_len];
const jmp_target = @intCast(u32, self.mir_instructions.len);
try self.genBody(body);
- _ = try self.addInst(.{
- .tag = .jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst = jmp_target },
- });
+ _ = try self.asmJmpReloc(jmp_target);
return self.finishAirBookkeeping();
}
@@ -4806,7 +4758,7 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
// break instruction will choose a MCValue for the block result and overwrite
// this field. Following break instructions will use that MCValue to put their
// block results.
- .mcv = MCValue{ .none = {} },
+ .mcv = .none,
});
defer self.blocks.getPtr(inst).?.relocs.deinit(self.gpa);
@@ -4838,23 +4790,16 @@ fn genCondSwitchMir(self: *Self, ty: Type, condition: MCValue, case: MCValue) !u
.none => unreachable,
.undef => unreachable,
.dead, .unreach => unreachable,
- .immediate => |imm| {
- _ = try self.addInst(.{
- .tag = .xor,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(cond_reg, abi_size) }),
- .data = .{ .imm = @intCast(u32, imm) },
- });
- },
- .register => |reg| {
- _ = try self.addInst(.{
- .tag = .xor,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(cond_reg, abi_size),
- .reg2 = registerAlias(reg, abi_size),
- }),
- .data = undefined,
- });
- },
+ .immediate => |imm| try self.asmRegisterImmediate(
+ .xor,
+ registerAlias(cond_reg, abi_size),
+ Immediate.u(imm),
+ ),
+ .register => |reg| try self.asmRegisterRegister(
+ .xor,
+ registerAlias(cond_reg, abi_size),
+ registerAlias(reg, abi_size),
+ ),
.stack_offset => {
if (abi_size <= 8) {
const reg = try self.copyToTmpRegister(ty, case);
@@ -4868,22 +4813,9 @@ fn genCondSwitchMir(self: *Self, ty: Type, condition: MCValue, case: MCValue) !u
},
}
- _ = try self.addInst(.{
- .tag = .@"test",
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(cond_reg, abi_size),
- .reg2 = registerAlias(cond_reg, abi_size),
- }),
- .data = undefined,
- });
- return self.addInst(.{
- .tag = .cond_jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst_cc = .{
- .inst = undefined,
- .cc = .ne,
- } },
- });
+ const aliased_reg = registerAlias(cond_reg, abi_size);
+ try self.asmRegisterRegister(.@"test", aliased_reg, aliased_reg);
+ return self.asmJccReloc(undefined, .ne);
},
.stack_offset => {
try self.spillEflagsIfOccupied();
@@ -4901,6 +4833,7 @@ fn genCondSwitchMir(self: *Self, ty: Type, condition: MCValue, case: MCValue) !u
return self.fail("TODO implemenent switch mir when condition is {}", .{condition});
},
}
+ return 0; // TODO
}
fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
@@ -4946,25 +4879,25 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
var relocs = try self.gpa.alloc(u32, items.len);
defer self.gpa.free(relocs);
- for (items, 0..) |item, item_i| {
+ for (items, relocs) |item, *reloc| {
const item_mcv = try self.resolveInst(item);
- relocs[item_i] = try self.genCondSwitchMir(condition_ty, condition, item_mcv);
+ reloc.* = try self.genCondSwitchMir(condition_ty, condition, item_mcv);
}
// Capture the state of register and stack allocation state so that we can revert to it.
const saved_state = try self.captureState();
- try self.branch_stack.append(.{});
- errdefer {
- _ = self.branch_stack.pop();
- }
+ {
+ try self.branch_stack.append(.{});
+ errdefer _ = self.branch_stack.pop();
- try self.ensureProcessDeathCapacity(liveness.deaths[case_i].len);
- for (liveness.deaths[case_i]) |operand| {
- self.processDeath(operand);
- }
+ try self.ensureProcessDeathCapacity(liveness.deaths[case_i].len);
+ for (liveness.deaths[case_i]) |operand| {
+ self.processDeath(operand);
+ }
- try self.genBody(case_body);
+ try self.genBody(case_body);
+ }
branch_stack.appendAssumeCapacity(self.branch_stack.pop());
@@ -4982,18 +4915,18 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
// Capture the state of register and stack allocation state so that we can revert to it.
const saved_state = try self.captureState();
- try self.branch_stack.append(.{});
- errdefer {
- _ = self.branch_stack.pop();
- }
+ {
+ try self.branch_stack.append(.{});
+ errdefer _ = self.branch_stack.pop();
- const else_deaths = liveness.deaths.len - 1;
- try self.ensureProcessDeathCapacity(liveness.deaths[else_deaths].len);
- for (liveness.deaths[else_deaths]) |operand| {
- self.processDeath(operand);
- }
+ const else_deaths = liveness.deaths.len - 1;
+ try self.ensureProcessDeathCapacity(liveness.deaths[else_deaths].len);
+ for (liveness.deaths[else_deaths]) |operand| {
+ self.processDeath(operand);
+ }
- try self.genBody(else_body);
+ try self.genBody(else_body);
+ }
branch_stack.appendAssumeCapacity(self.branch_stack.pop());
@@ -5030,11 +4963,7 @@ fn canonicaliseBranches(self: *Self, parent_branch: *Branch, canon_branch: *Bran
try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, target_branch.inst_table.count());
const target_slice = target_branch.inst_table.entries.slice();
- const target_keys = target_slice.items(.key);
- const target_values = target_slice.items(.value);
-
- for (target_keys, 0..) |target_key, target_idx| {
- const target_value = target_values[target_idx];
+ for (target_slice.items(.key), target_slice.items(.value)) |target_key, target_value| {
const canon_mcv = if (canon_branch.inst_table.fetchSwapRemove(target_key)) |canon_entry| blk: {
// The instruction's MCValue is overridden in both branches.
parent_branch.inst_table.putAssumeCapacity(target_key, canon_entry.value);
@@ -5064,10 +4993,7 @@ fn canonicaliseBranches(self: *Self, parent_branch: *Branch, canon_branch: *Bran
}
try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, canon_branch.inst_table.count());
const canon_slice = canon_branch.inst_table.entries.slice();
- const canon_keys = canon_slice.items(.key);
- const canon_values = canon_slice.items(.value);
- for (canon_keys, 0..) |canon_key, canon_idx| {
- const canon_value = canon_values[canon_idx];
+ for (canon_slice.items(.key), canon_slice.items(.value)) |canon_key, canon_value| {
// We already deleted the items from this table that matched the target_branch.
// So these are all instructions that are only overridden in the canon branch.
parent_branch.inst_table.putAssumeCapacity(canon_key, canon_value);
@@ -5095,10 +5021,10 @@ fn canonicaliseBranches(self: *Self, parent_branch: *Branch, canon_branch: *Bran
fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void {
const next_inst = @intCast(u32, self.mir_instructions.len);
switch (self.mir_instructions.items(.tag)[reloc]) {
- .cond_jmp => {
+ .jcc => {
self.mir_instructions.items(.data)[reloc].inst_cc.inst = next_inst;
},
- .jmp => {
+ .jmp_reloc => {
self.mir_instructions.items(.data)[reloc].inst = next_inst;
},
else => unreachable,
@@ -5140,11 +5066,7 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
// Emit a jump with a relocation. It will be patched up after the block ends.
try block_data.relocs.ensureUnusedCapacity(self.gpa, 1);
// Leave the jump offset undefined
- const jmp_reloc = try self.addInst(.{
- .tag = .jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst = undefined },
- });
+ const jmp_reloc = try self.asmJmpReloc(undefined);
block_data.relocs.appendAssumeCapacity(jmp_reloc);
}
@@ -5217,31 +5139,19 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
var iter = std.mem.tokenize(u8, asm_source, "\n\r");
while (iter.next()) |ins| {
if (mem.eql(u8, ins, "syscall")) {
- _ = try self.addInst(.{
- .tag = .syscall,
- .ops = undefined,
- .data = undefined,
- });
+ try self.asmOpOnly(.syscall);
} else if (mem.indexOf(u8, ins, "push")) |_| {
const arg = ins[4..];
if (mem.indexOf(u8, arg, "$")) |l| {
const n = std.fmt.parseInt(u8, ins[4 + l + 1 ..], 10) catch {
return self.fail("TODO implement more inline asm int parsing", .{});
};
- _ = try self.addInst(.{
- .tag = .push,
- .ops = Mir.Inst.Ops.encode(.{ .flags = 0b10 }),
- .data = .{ .imm = n },
- });
+ try self.asmImmediate(.push, Immediate.u(n));
} else if (mem.indexOf(u8, arg, "%%")) |l| {
const reg_name = ins[4 + l + 2 ..];
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
- _ = try self.addInst(.{
- .tag = .push,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }),
- .data = undefined,
- });
+ try self.asmRegister(.push, reg);
} else return self.fail("TODO more push operands", .{});
} else if (mem.indexOf(u8, ins, "pop")) |_| {
const arg = ins[3..];
@@ -5249,11 +5159,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const reg_name = ins[3 + l + 2 ..];
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
- _ = try self.addInst(.{
- .tag = .pop,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }),
- .data = undefined,
- });
+ try self.asmRegister(.pop, reg);
} else return self.fail("TODO more pop operands", .{});
} else {
return self.fail("TODO implement support for more x86 assembly instructions", .{});
@@ -5268,9 +5174,9 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const reg_name = output[2 .. output.len - 1];
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
- break :result MCValue{ .register = reg };
+ break :result .{ .register = reg };
} else {
- break :result MCValue{ .none = {} };
+ break :result .none;
}
};
@@ -5326,7 +5232,7 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
}
fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerError!void {
- const abi_size = ty.abiSize(self.target.*);
+ const abi_size = @intCast(u32, ty.abiSize(self.target.*));
switch (mcv) {
.dead => unreachable,
.unreach, .none => return,
@@ -5350,26 +5256,17 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
.immediate => |imm| {
switch (abi_size) {
1, 2, 4 => {
+ // TODO
// We have a positive stack offset value but we want a twos complement negative
// offset from rbp, which is at the top of the stack frame.
- // mov [rbp+offset], immediate
- const payload = try self.addExtra(Mir.ImmPair{
- .dest_off = @bitCast(u32, -stack_offset),
- .operand = @truncate(u32, imm),
- });
- _ = try self.addInst(.{
- .tag = .mov_mem_imm,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rsp,
- .flags = switch (abi_size) {
- 1 => 0b00,
- 2 => 0b01,
- 4 => 0b10,
- else => unreachable,
- },
- }),
- .data = .{ .payload = payload },
- });
+ const immediate = if (ty.isSignedInt())
+ Immediate.s(@intCast(i32, @bitCast(i64, imm)))
+ else
+ Immediate.u(@intCast(u32, imm));
+ try self.asmMemoryImmediate(.mov, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
+ .base = .rsp,
+ .disp = -stack_offset,
+ }), immediate);
},
8 => {
const reg = try self.copyToTmpRegister(ty, mcv);
@@ -5394,44 +5291,32 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
.Float => {
if (intrinsicsAllowed(self.target.*, ty)) {
const tag: Mir.Inst.Tag = switch (ty.tag()) {
- .f32 => if (hasAvxSupport(self.target.*))
- Mir.Inst.Tag.mov_f32_avx
- else
- Mir.Inst.Tag.mov_f32_sse,
- .f64 => if (hasAvxSupport(self.target.*))
- Mir.Inst.Tag.mov_f64_avx
- else
- Mir.Inst.Tag.mov_f64_sse,
- else => return self.fail("TODO genSetStackArg for register for type {}", .{ty.fmtDebug()}),
+ .f32 => .movss,
+ .f64 => .movsd,
+ else => return self.fail(
+ "TODO genSetStackArg for register for type {}",
+ .{ty.fmtDebug()},
+ ),
};
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = switch (ty.tag()) {
- .f32 => .esp,
- .f64 => .rsp,
- else => unreachable,
- },
- .reg2 = reg.to128(),
- .flags = 0b01,
- }),
- .data = .{ .imm = @bitCast(u32, -stack_offset) },
- });
- return;
+ // TODO verify this
+ const ptr_size: Memory.PtrSize = switch (ty.tag()) {
+ .f32 => .dword,
+ .f64 => .qword,
+ else => unreachable,
+ };
+ return self.asmMemoryRegister(tag, Memory.sib(ptr_size, .{
+ .base = .rsp,
+ .disp = -stack_offset,
+ }), reg.to128());
}
return self.fail("TODO genSetStackArg for register with no intrinsics", .{});
},
else => {
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rsp,
- .reg2 = registerAlias(reg, @intCast(u32, abi_size)),
- .flags = 0b10,
- }),
- .data = .{ .imm = @bitCast(u32, -stack_offset) },
- });
+ try self.asmMemoryRegister(.mov, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
+ .base = .rsp,
+ .disp = -stack_offset,
+ }), registerAlias(reg, abi_size));
},
}
},
@@ -5454,7 +5339,7 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
}
fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: InlineMemcpyOpts) InnerError!void {
- const abi_size = ty.abiSize(self.target.*);
+ const abi_size = @intCast(u32, ty.abiSize(self.target.*));
switch (mcv) {
.dead => unreachable,
.unreach, .none => return, // Nothing to do.
@@ -5462,10 +5347,19 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
if (!self.wantSafety())
return; // The already existing value will do just fine.
// TODO Upgrade this to a memset call when we have that available.
- switch (ty.abiSize(self.target.*)) {
- 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }, opts),
- 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }, opts),
- 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }, opts),
+ switch (abi_size) {
+ 1, 2, 4 => {
+ const value: u64 = switch (abi_size) {
+ 1 => 0xaa,
+ 2 => 0xaaaa,
+ 4 => 0xaaaaaaaa,
+ else => unreachable,
+ };
+ return self.asmMemoryImmediate(.mov, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
+ .base = opts.dest_stack_base orelse .rbp,
+ .disp = -stack_offset,
+ }), Immediate.u(value));
+ },
8 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }, opts),
else => |x| return self.genInlineMemset(
.{ .stack_offset = stack_offset },
@@ -5485,13 +5379,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
const overflow_bit_ty = ty.structFieldType(1);
const overflow_bit_offset = ty.structFieldOffset(1, self.target.*);
const tmp_reg = try self.register_manager.allocReg(null, gp);
- _ = try self.addInst(.{
- .tag = .cond_set_byte,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = tmp_reg.to8(),
- }),
- .data = .{ .cc = ro.eflags },
- });
+ try self.asmSetccRegister(tmp_reg.to8(), ro.eflags);
return self.genSetStack(
overflow_bit_ty,
@@ -5506,72 +5394,36 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
},
.immediate => |x_big| {
const base_reg = opts.dest_stack_base orelse .rbp;
+ // TODO
switch (abi_size) {
0 => {
assert(ty.isError());
- const payload = try self.addExtra(Mir.ImmPair{
- .dest_off = @bitCast(u32, -stack_offset),
- .operand = @truncate(u32, x_big),
- });
- _ = try self.addInst(.{
- .tag = .mov_mem_imm,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = base_reg,
- .flags = 0b00,
- }),
- .data = .{ .payload = payload },
- });
+ try self.asmMemoryImmediate(.mov, Memory.sib(.byte, .{
+ .base = base_reg,
+ .disp = -stack_offset,
+ }), Immediate.u(@truncate(u8, x_big)));
},
1, 2, 4 => {
- const payload = try self.addExtra(Mir.ImmPair{
- .dest_off = @bitCast(u32, -stack_offset),
- .operand = @truncate(u32, x_big),
- });
- _ = try self.addInst(.{
- .tag = .mov_mem_imm,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = base_reg,
- .flags = switch (abi_size) {
- 1 => 0b00,
- 2 => 0b01,
- 4 => 0b10,
- else => unreachable,
- },
- }),
- .data = .{ .payload = payload },
- });
+ const immediate = if (ty.isSignedInt())
+ Immediate.s(@truncate(i32, @bitCast(i64, x_big)))
+ else
+ Immediate.u(@intCast(u32, x_big));
+ try self.asmMemoryImmediate(.mov, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
+ .base = base_reg,
+ .disp = -stack_offset,
+ }), immediate);
},
8 => {
// 64 bit write to memory would take two mov's anyways so we
// insted just use two 32 bit writes to avoid register allocation
- {
- const payload = try self.addExtra(Mir.ImmPair{
- .dest_off = @bitCast(u32, -stack_offset + 4),
- .operand = @truncate(u32, x_big >> 32),
- });
- _ = try self.addInst(.{
- .tag = .mov_mem_imm,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = base_reg,
- .flags = 0b10,
- }),
- .data = .{ .payload = payload },
- });
- }
- {
- const payload = try self.addExtra(Mir.ImmPair{
- .dest_off = @bitCast(u32, -stack_offset),
- .operand = @truncate(u32, x_big),
- });
- _ = try self.addInst(.{
- .tag = .mov_mem_imm,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = base_reg,
- .flags = 0b10,
- }),
- .data = .{ .payload = payload },
- });
- }
+ try self.asmMemoryImmediate(.mov, Memory.sib(.dword, .{
+ .base = base_reg,
+ .disp = -stack_offset + 4,
+ }), Immediate.u(@truncate(u32, x_big >> 32)));
+ try self.asmMemoryImmediate(.mov, Memory.sib(.dword, .{
+ .base = base_reg,
+ .disp = -stack_offset,
+ }), Immediate.u(@truncate(u32, x_big)));
},
else => {
return self.fail("TODO implement set abi_size=large stack variable with immediate", .{});
@@ -5589,30 +5441,22 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
.Float => {
if (intrinsicsAllowed(self.target.*, ty)) {
const tag: Mir.Inst.Tag = switch (ty.tag()) {
- .f32 => if (hasAvxSupport(self.target.*))
- Mir.Inst.Tag.mov_f32_avx
- else
- Mir.Inst.Tag.mov_f32_sse,
- .f64 => if (hasAvxSupport(self.target.*))
- Mir.Inst.Tag.mov_f64_avx
- else
- Mir.Inst.Tag.mov_f64_sse,
- else => return self.fail("TODO genSetStack for register for type {}", .{ty.fmtDebug()}),
+ .f32 => .movss,
+ .f64 => .movsd,
+ else => return self.fail(
+ "TODO genSetStack for register for type {}",
+ .{ty.fmtDebug()},
+ ),
};
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = switch (ty.tag()) {
- .f32 => base_reg.to32(),
- .f64 => base_reg.to64(),
- else => unreachable,
- },
- .reg2 = reg.to128(),
- .flags = 0b01,
- }),
- .data = .{ .imm = @bitCast(u32, -stack_offset) },
- });
- return;
+ const ptr_size: Memory.PtrSize = switch (ty.tag()) {
+ .f32 => .dword,
+ .f64 => .qword,
+ else => unreachable,
+ };
+ return self.asmMemoryRegister(tag, Memory.sib(ptr_size, .{
+ .base = base_reg.to64(),
+ .disp = -stack_offset,
+ }), reg.to128());
}
return self.fail("TODO genSetStack for register for type float with no intrinsics", .{});
@@ -5660,7 +5504,7 @@ fn genInlineMemcpyRegisterRegister(
src_reg: Register,
offset: i32,
) InnerError!void {
- assert(dst_reg.size() == 64);
+ assert(dst_reg.bitSize() == 64);
const dst_reg_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock);
@@ -5677,16 +5521,10 @@ fn genInlineMemcpyRegisterRegister(
var remainder = abi_size;
while (remainder > 0) {
const nearest_power_of_two = @as(u6, 1) << math.log2_int(u3, @intCast(u3, remainder));
-
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_reg,
- .reg2 = registerAlias(tmp_reg, nearest_power_of_two),
- .flags = 0b10,
- }),
- .data = .{ .imm = @bitCast(u32, -next_offset) },
- });
+ try self.asmMemoryRegister(.mov, Memory.sib(Memory.PtrSize.fromSize(nearest_power_of_two), .{
+ .base = dst_reg,
+ .disp = -next_offset,
+ }), registerAlias(tmp_reg, nearest_power_of_two));
if (nearest_power_of_two > 1) {
try self.genShiftBinOpMir(.shr, ty, tmp_reg, .{
@@ -5698,15 +5536,10 @@ fn genInlineMemcpyRegisterRegister(
next_offset -= nearest_power_of_two;
}
} else {
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_reg,
- .reg2 = registerAlias(src_reg, @intCast(u32, abi_size)),
- .flags = 0b10,
- }),
- .data = .{ .imm = @bitCast(u32, -offset) },
- });
+ try self.asmMemoryRegister(.mov, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
+ .base = dst_reg,
+ .disp = -offset,
+ }), registerAlias(src_reg, abi_size));
}
}
@@ -5746,24 +5579,17 @@ fn genInlineMemcpy(
try self.loadMemPtrIntoRegister(dst_addr_reg, Type.usize, dst_ptr);
},
.ptr_stack_offset, .stack_offset => |off| {
- _ = try self.addInst(.{
- .tag = .lea,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_addr_reg.to64(),
- .reg2 = opts.dest_stack_base orelse .rbp,
- }),
- .data = .{ .imm = @bitCast(u32, -off) },
- });
+ try self.asmRegisterMemory(.lea, dst_addr_reg.to64(), Memory.sib(.qword, .{
+ .base = opts.dest_stack_base orelse .rbp,
+ .disp = -off,
+ }));
},
.register => |reg| {
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(dst_addr_reg, @divExact(reg.size(), 8)),
- .reg2 = reg,
- }),
- .data = undefined,
- });
+ try self.asmRegisterRegister(
+ .mov,
+ registerAlias(dst_addr_reg, @intCast(u32, @divExact(reg.bitSize(), 8))),
+ reg,
+ );
},
else => {
return self.fail("TODO implement memcpy for setting stack when dest is {}", .{dst_ptr});
@@ -5775,24 +5601,17 @@ fn genInlineMemcpy(
try self.loadMemPtrIntoRegister(src_addr_reg, Type.usize, src_ptr);
},
.ptr_stack_offset, .stack_offset => |off| {
- _ = try self.addInst(.{
- .tag = .lea,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = src_addr_reg.to64(),
- .reg2 = opts.source_stack_base orelse .rbp,
- }),
- .data = .{ .imm = @bitCast(u32, -off) },
- });
+ try self.asmRegisterMemory(.lea, src_addr_reg.to64(), Memory.sib(.qword, .{
+ .base = opts.source_stack_base orelse .rbp,
+ .disp = -off,
+ }));
},
.register => |reg| {
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(src_addr_reg, @divExact(reg.size(), 8)),
- .reg2 = reg,
- }),
- .data = undefined,
- });
+ try self.asmRegisterRegister(
+ .mov,
+ registerAlias(src_addr_reg, @intCast(u32, @divExact(reg.bitSize(), 8))),
+ reg,
+ );
},
else => {
return self.fail("TODO implement memcpy for setting stack when src is {}", .{src_ptr});
@@ -5800,74 +5619,35 @@ fn genInlineMemcpy(
}
try self.genSetReg(Type.usize, count_reg, len);
-
- // mov index_reg, 0
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = index_reg }),
- .data = .{ .imm = 0 },
- });
-
- // loop:
- // cmp count, 0
+ try self.asmRegisterImmediate(.mov, index_reg, Immediate.u(0));
const loop_start = try self.addInst(.{
.tag = .cmp,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = count_reg }),
- .data = .{ .imm = 0 },
- });
-
- // je end
- const loop_reloc = try self.addInst(.{
- .tag = .cond_jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst_cc = .{
- .inst = undefined,
- .cc = .e,
+ .ops = .ri_u,
+ .data = .{ .ri = .{
+ .r1 = count_reg,
+ .imm = 0,
} },
});
-
- // mov tmp, [addr + index_reg]
- _ = try self.addInst(.{
- .tag = .mov_scale_src,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = tmp_reg.to8(),
- .reg2 = src_addr_reg,
- }),
- .data = .{ .payload = try self.addExtra(Mir.IndexRegisterDisp.encode(index_reg, 0)) },
- });
-
- // mov [stack_offset + index_reg], tmp
- _ = try self.addInst(.{
- .tag = .mov_scale_dst,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_addr_reg,
- .reg2 = tmp_reg.to8(),
- }),
- .data = .{ .payload = try self.addExtra(Mir.IndexRegisterDisp.encode(index_reg, 0)) },
- });
-
- // add index_reg, 1
- _ = try self.addInst(.{
- .tag = .add,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = index_reg }),
- .data = .{ .imm = 1 },
- });
-
- // sub count, 1
- _ = try self.addInst(.{
- .tag = .sub,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = count_reg }),
- .data = .{ .imm = 1 },
- });
-
- // jmp loop
- _ = try self.addInst(.{
- .tag = .jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst = loop_start },
- });
-
- // end:
+ const loop_reloc = try self.asmJccReloc(undefined, .e);
+ try self.asmRegisterMemory(.mov, tmp_reg.to8(), Memory.sib(.byte, .{
+ .base = src_addr_reg,
+ .scale_index = .{
+ .scale = 1,
+ .index = index_reg,
+ },
+ .disp = 0,
+ }));
+ try self.asmMemoryRegister(.mov, Memory.sib(.byte, .{
+ .base = dst_addr_reg,
+ .scale_index = .{
+ .scale = 1,
+ .index = index_reg,
+ },
+ .disp = 0,
+ }), tmp_reg.to8());
+ try self.asmRegisterImmediate(.add, index_reg, Immediate.u(1));
+ try self.asmRegisterImmediate(.sub, count_reg, Immediate.u(1));
+ _ = try self.asmJmpReloc(loop_start);
try self.performReloc(loop_reloc);
}
@@ -5899,24 +5679,17 @@ fn genInlineMemset(
try self.loadMemPtrIntoRegister(addr_reg, Type.usize, dst_ptr);
},
.ptr_stack_offset, .stack_offset => |off| {
- _ = try self.addInst(.{
- .tag = .lea,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = addr_reg.to64(),
- .reg2 = opts.dest_stack_base orelse .rbp,
- }),
- .data = .{ .imm = @bitCast(u32, -off) },
- });
+ try self.asmRegisterMemory(.lea, addr_reg.to64(), Memory.sib(.qword, .{
+ .base = opts.dest_stack_base orelse .rbp,
+ .disp = -off,
+ }));
},
.register => |reg| {
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(addr_reg, @divExact(reg.size(), 8)),
- .reg2 = reg,
- }),
- .data = undefined,
- });
+ try self.asmRegisterRegister(
+ .mov,
+ registerAlias(addr_reg, @intCast(u32, @divExact(reg.bitSize(), 8))),
+ reg,
+ );
},
else => {
return self.fail("TODO implement memcpy for setting stack when dest is {}", .{dst_ptr});
@@ -5926,54 +5699,35 @@ fn genInlineMemset(
try self.genSetReg(Type.usize, index_reg, len);
try self.genBinOpMir(.sub, Type.usize, .{ .register = index_reg }, .{ .immediate = 1 });
- // loop:
- // cmp index_reg, -1
const loop_start = try self.addInst(.{
.tag = .cmp,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = index_reg }),
- .data = .{ .imm = @bitCast(u32, @as(i32, -1)) },
- });
-
- // je end
- const loop_reloc = try self.addInst(.{
- .tag = .cond_jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst_cc = .{
- .inst = undefined,
- .cc = .e,
+ .ops = .ri_s,
+ .data = .{ .ri = .{
+ .r1 = index_reg,
+ .imm = @bitCast(u32, @as(i32, -1)),
} },
});
+ const loop_reloc = try self.asmJccReloc(undefined, .e);
switch (value) {
.immediate => |x| {
if (x > math.maxInt(i32)) {
return self.fail("TODO inline memset for value immediate larger than 32bits", .{});
}
- // mov byte ptr [rbp + index_reg + stack_offset], imm
- _ = try self.addInst(.{
- .tag = .mov_mem_index_imm,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = addr_reg }),
- .data = .{ .payload = try self.addExtra(Mir.IndexRegisterDispImm.encode(index_reg, 0, @truncate(u32, x))) },
- });
+ try self.asmMemoryImmediate(.mov, Memory.sib(.byte, .{
+ .base = addr_reg,
+ .scale_index = .{
+ .scale = 1,
+ .index = index_reg,
+ },
+ .disp = 0,
+ }), Immediate.u(@intCast(u8, x)));
},
else => return self.fail("TODO inline memset for value of type {}", .{value}),
}
- // sub index_reg, 1
- _ = try self.addInst(.{
- .tag = .sub,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = index_reg }),
- .data = .{ .imm = 1 },
- });
-
- // jmp loop
- _ = try self.addInst(.{
- .tag = .jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst = loop_start },
- });
-
- // end:
+ try self.asmRegisterImmediate(.sub, index_reg, Immediate.u(1));
+ _ = try self.asmJmpReloc(loop_start);
try self.performReloc(loop_reloc);
}
@@ -5986,21 +5740,18 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
if (off < std.math.minInt(i32) or off > std.math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
- _ = try self.addInst(.{
- .tag = .lea,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .reg2 = .rbp,
- }),
- .data = .{ .imm = @bitCast(u32, -off) },
- });
+ try self.asmRegisterMemory(
+ .lea,
+ registerAlias(reg, abi_size),
+ Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .rbp, .disp = -off }),
+ );
},
.unreach, .none => return, // Nothing to do.
.undef => {
if (!self.wantSafety())
return; // The already existing value will do just fine.
// Write the debug undefined value.
- switch (registerAlias(reg, abi_size).size()) {
+ switch (registerAlias(reg, abi_size).bitSize()) {
8 => return self.genSetReg(ty, reg, .{ .immediate = 0xaa }),
16 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaa }),
32 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaa }),
@@ -6009,50 +5760,29 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
}
},
.eflags => |cc| {
- _ = try self.addInst(.{
- .tag = .cond_set_byte,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to8(),
- }),
- .data = .{ .cc = cc },
- });
+ return self.asmSetccRegister(reg.to8(), cc);
},
.immediate => |x| {
- // 32-bit moves zero-extend to 64-bit, so xoring the 32-bit
- // register is the fastest way to zero a register.
if (x == 0) {
- _ = try self.addInst(.{
- .tag = .xor,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to32(),
- .reg2 = reg.to32(),
- }),
- .data = undefined,
- });
- return;
+ // 32-bit moves zero-extend to 64-bit, so xoring the 32-bit
+ // register is the fastest way to zero a register.
+ return self.asmRegisterRegister(.xor, reg.to32(), reg.to32());
}
- if (x <= math.maxInt(i32)) {
- // Next best case: if we set the lower four bytes, the upper four will be zeroed.
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size) }),
- .data = .{ .imm = @truncate(u32, x) },
- });
- return;
+ if (ty.isSignedInt()) {
+ const signed_x = @bitCast(i64, x);
+ if (math.minInt(i32) <= signed_x and signed_x <= math.maxInt(i32)) {
+ return self.asmRegisterImmediate(
+ .mov,
+ registerAlias(reg, abi_size),
+ Immediate.s(@intCast(i32, signed_x)),
+ );
+ }
}
- // Worst case: we need to load the 64-bit register with the IMM. GNU's assemblers calls
- // this `movabs`, though this is officially just a different variant of the plain `mov`
- // instruction.
- //
- // This encoding is, in fact, the *same* as the one used for 32-bit loads. The only
- // difference is that we set REX.W before the instruction, which extends the load to
- // 64-bit and uses the full bit-width of the register.
- const payload = try self.addExtra(Mir.Imm64.encode(x));
- _ = try self.addInst(.{
- .tag = .movabs,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg.to64() }),
- .data = .{ .payload = payload },
- });
+ return self.asmRegisterImmediate(
+ .mov,
+ registerAlias(reg, abi_size),
+ Immediate.u(x),
+ );
},
.register => |src_reg| {
// If the registers are the same, nothing to do.
@@ -6063,69 +5793,38 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.Int => switch (ty.intInfo(self.target.*).signedness) {
.signed => {
if (abi_size <= 4) {
- _ = try self.addInst(.{
- .tag = .mov_sign_extend,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to64(),
- .reg2 = registerAlias(src_reg, abi_size),
- }),
- .data = undefined,
- });
- return;
+ return self.asmRegisterRegister(
+ .movsx,
+ reg.to64(),
+ registerAlias(src_reg, abi_size),
+ );
}
},
.unsigned => {
if (abi_size <= 2) {
- _ = try self.addInst(.{
- .tag = .mov_zero_extend,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to64(),
- .reg2 = registerAlias(src_reg, abi_size),
- }),
- .data = undefined,
- });
- return;
+ return self.asmRegisterRegister(
+ .movzx,
+ reg.to64(),
+ registerAlias(src_reg, abi_size),
+ );
}
},
},
.Float => {
if (intrinsicsAllowed(self.target.*, ty)) {
const tag: Mir.Inst.Tag = switch (ty.tag()) {
- .f32 => if (hasAvxSupport(self.target.*))
- Mir.Inst.Tag.mov_f32_avx
- else
- Mir.Inst.Tag.mov_f32_sse,
- .f64 => if (hasAvxSupport(self.target.*))
- Mir.Inst.Tag.mov_f64_avx
- else
- Mir.Inst.Tag.mov_f64_sse,
+ .f32 => .movss,
+ .f64 => .movsd,
else => return self.fail("TODO genSetReg from register for {}", .{ty.fmtDebug()}),
};
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to128(),
- .reg2 = src_reg.to128(),
- .flags = 0b10,
- }),
- .data = undefined,
- });
- return;
+ return self.asmRegisterRegister(tag, reg.to128(), src_reg.to128());
}
-
return self.fail("TODO genSetReg from register for float with no intrinsics", .{});
},
else => {},
}
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .reg2 = registerAlias(src_reg, abi_size),
- }),
- .data = undefined,
- });
+ try self.asmRegisterRegister(.mov, registerAlias(reg, abi_size), registerAlias(src_reg, abi_size));
},
.linker_load => {
switch (ty.zigTypeTag()) {
@@ -6135,45 +5834,30 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
if (intrinsicsAllowed(self.target.*, ty)) {
const tag: Mir.Inst.Tag = switch (ty.tag()) {
- .f32 => if (hasAvxSupport(self.target.*))
- Mir.Inst.Tag.mov_f32_avx
- else
- Mir.Inst.Tag.mov_f32_sse,
- .f64 => if (hasAvxSupport(self.target.*))
- Mir.Inst.Tag.mov_f64_avx
- else
- Mir.Inst.Tag.mov_f64_sse,
+ .f32 => .movss,
+ .f64 => .movsd,
else => return self.fail("TODO genSetReg from memory for {}", .{ty.fmtDebug()}),
};
-
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to128(),
- .reg2 = switch (ty.tag()) {
- .f32 => base_reg.to32(),
- .f64 => base_reg.to64(),
- else => unreachable,
- },
- }),
- .data = .{ .imm = 0 },
- });
- return;
+ const ptr_size: Memory.PtrSize = switch (ty.tag()) {
+ .f32 => .dword,
+ .f64 => .qword,
+ else => unreachable,
+ };
+ return self.asmRegisterMemory(tag, reg.to128(), Memory.sib(ptr_size, .{
+ .base = base_reg.to64(),
+ .disp = 0,
+ }));
}
return self.fail("TODO genSetReg from memory for float with no intrinsics", .{});
},
else => {
try self.loadMemPtrIntoRegister(reg, Type.usize, mcv);
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .reg2 = reg.to64(),
- .flags = 0b01,
- }),
- .data = .{ .imm = 0 },
- });
+ try self.asmRegisterMemory(
+ .mov,
+ registerAlias(reg, abi_size),
+ Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = reg.to64(), .disp = 0 }),
+ );
},
}
},
@@ -6184,73 +5868,56 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
if (intrinsicsAllowed(self.target.*, ty)) {
const tag: Mir.Inst.Tag = switch (ty.tag()) {
- .f32 => if (hasAvxSupport(self.target.*))
- Mir.Inst.Tag.mov_f32_avx
- else
- Mir.Inst.Tag.mov_f32_sse,
- .f64 => if (hasAvxSupport(self.target.*))
- Mir.Inst.Tag.mov_f64_avx
- else
- Mir.Inst.Tag.mov_f64_sse,
+ .f32 => .movss,
+ .f64 => .movsd,
else => return self.fail("TODO genSetReg from memory for {}", .{ty.fmtDebug()}),
};
-
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to128(),
- .reg2 = switch (ty.tag()) {
- .f32 => base_reg.to32(),
- .f64 => base_reg.to64(),
- else => unreachable,
- },
- }),
- .data = .{ .imm = 0 },
- });
- return;
+ const ptr_size: Memory.PtrSize = switch (ty.tag()) {
+ .f32 => .dword,
+ .f64 => .qword,
+ else => unreachable,
+ };
+ return self.asmRegisterMemory(tag, reg.to128(), Memory.sib(ptr_size, .{
+ .base = base_reg.to64(),
+ .disp = 0,
+ }));
}
return self.fail("TODO genSetReg from memory for float with no intrinsics", .{});
},
else => {
if (x <= math.maxInt(i32)) {
- // mov reg, [ds:imm32]
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .flags = 0b01,
+ try self.asmRegisterMemory(
+ .mov,
+ registerAlias(reg, abi_size),
+ Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
+ .base = .ds,
+ .disp = @intCast(i32, x),
}),
- .data = .{ .imm = @truncate(u32, x) },
- });
+ );
} else {
- // If this is RAX, we can use a direct load.
- // Otherwise, we need to load the address, then indirectly load the value.
- if (reg.id() == 0) {
- // movabs rax, ds:moffs64
- const payload = try self.addExtra(Mir.Imm64.encode(x));
+ if (reg.to64() == .rax) {
+ // If this is RAX, we can use a direct load.
+ // Otherwise, we need to load the address, then indirectly load the value.
+ var moffs: Mir.MemoryMoffs = .{
+ .seg = @enumToInt(Register.ds),
+ .msb = undefined,
+ .lsb = undefined,
+ };
+ moffs.encodeOffset(x);
_ = try self.addInst(.{
- .tag = .movabs,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rax,
- .flags = 0b01, // imm64 will become moffs64
- }),
- .data = .{ .payload = payload },
+ .tag = .mov_moffs,
+ .ops = .rax_moffs,
+ .data = .{ .payload = try self.addExtra(moffs) },
});
} else {
// Rather than duplicate the logic used for the move, we just use a self-call with a new MCValue.
try self.genSetReg(ty, reg, MCValue{ .immediate = x });
-
- // mov reg, [reg + 0x0]
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .reg2 = reg.to64(),
- .flags = 0b01,
- }),
- .data = .{ .imm = 0 },
- });
+ try self.asmRegisterMemory(
+ .mov,
+ registerAlias(reg, abi_size),
+ Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = reg.to64(), .disp = 0 }),
+ );
}
}
},
@@ -6264,85 +5931,59 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.Int => switch (ty.intInfo(self.target.*).signedness) {
.signed => {
if (abi_size <= 4) {
- const flags: u2 = switch (abi_size) {
- 1 => 0b01,
- 2 => 0b10,
- 4 => 0b11,
- else => unreachable,
- };
- _ = try self.addInst(.{
- .tag = .mov_sign_extend,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to64(),
- .reg2 = .rbp,
- .flags = flags,
+ return self.asmRegisterMemory(
+ .movsx,
+ reg.to64(),
+ Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
+ .base = .rbp,
+ .disp = -off,
}),
- .data = .{ .imm = @bitCast(u32, -off) },
- });
- return;
+ );
}
},
.unsigned => {
if (abi_size <= 2) {
- const flags: u2 = switch (abi_size) {
- 1 => 0b01,
- 2 => 0b10,
- else => unreachable,
- };
- _ = try self.addInst(.{
- .tag = .mov_zero_extend,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to64(),
- .reg2 = .rbp,
- .flags = flags,
+ return self.asmRegisterMemory(
+ .movzx,
+ reg.to64(),
+ Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
+ .base = .rbp,
+ .disp = -off,
}),
- .data = .{ .imm = @bitCast(u32, -off) },
- });
- return;
+ );
}
},
},
.Float => {
if (intrinsicsAllowed(self.target.*, ty)) {
const tag: Mir.Inst.Tag = switch (ty.tag()) {
- .f32 => if (hasAvxSupport(self.target.*))
- Mir.Inst.Tag.mov_f32_avx
- else
- Mir.Inst.Tag.mov_f32_sse,
- .f64 => if (hasAvxSupport(self.target.*))
- Mir.Inst.Tag.mov_f64_avx
- else
- Mir.Inst.Tag.mov_f64_sse,
- else => return self.fail("TODO genSetReg from stack offset for {}", .{ty.fmtDebug()}),
+ .f32 => .movss,
+ .f64 => .movsd,
+ else => return self.fail(
+ "TODO genSetReg from stack offset for {}",
+ .{ty.fmtDebug()},
+ ),
};
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to128(),
- .reg2 = switch (ty.tag()) {
- .f32 => .ebp,
- .f64 => .rbp,
- else => unreachable,
- },
- }),
- .data = .{ .imm = @bitCast(u32, -off) },
- });
- return;
+ const ptr_size: Memory.PtrSize = switch (ty.tag()) {
+ .f32 => .dword,
+ .f64 => .qword,
+ else => unreachable,
+ };
+ return self.asmRegisterMemory(tag, reg.to128(), Memory.sib(ptr_size, .{
+ .base = .rbp,
+ .disp = -off,
+ }));
}
return self.fail("TODO genSetReg from stack offset for float with no intrinsics", .{});
},
else => {},
}
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .reg2 = .rbp,
- .flags = 0b01,
- }),
- .data = .{ .imm = @bitCast(u32, -off) },
- });
+ try self.asmRegisterMemory(
+ .mov,
+ registerAlias(reg, abi_size),
+ Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .rbp, .disp = -off }),
+ );
},
}
}
@@ -6406,6 +6047,16 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void {
const src_ty = self.air.typeOf(ty_op.operand);
const dst_ty = self.air.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
+ const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*));
+ const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
+
+ switch (src_abi_size) {
+ 4, 8 => {},
+ else => |size| return self.fail("TODO load ST(0) with abiSize={}", .{size}),
+ }
+ if (dst_abi_size > 8) {
+ return self.fail("TODO convert float with abiSize={}", .{dst_abi_size});
+ }
// move float src to ST(0)
const stack_offset = switch (operand) {
@@ -6413,41 +6064,24 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void {
else => blk: {
const offset = @intCast(i32, try self.allocMem(
inst,
- @intCast(u32, src_ty.abiSize(self.target.*)),
+ src_abi_size,
src_ty.abiAlignment(self.target.*),
));
try self.genSetStack(src_ty, offset, operand, .{});
break :blk offset;
},
};
- _ = try self.addInst(.{
- .tag = .fld,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rbp,
- .flags = switch (src_ty.abiSize(self.target.*)) {
- 4 => 0b01,
- 8 => 0b10,
- else => |size| return self.fail("TODO load ST(0) with abiSize={}", .{size}),
- },
- }),
- .data = .{ .imm = @bitCast(u32, -stack_offset) },
- });
+ try self.asmMemory(.fld, Memory.sib(Memory.PtrSize.fromSize(src_abi_size), .{
+ .base = .rbp,
+ .disp = -stack_offset,
+ }));
// convert
const stack_dst = try self.allocRegOrMem(inst, false);
- _ = try self.addInst(.{
- .tag = .fisttp,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rbp,
- .flags = switch (dst_ty.abiSize(self.target.*)) {
- 1...2 => 0b00,
- 3...4 => 0b01,
- 5...8 => 0b10,
- else => |size| return self.fail("TODO convert float with abiSize={}", .{size}),
- },
- }),
- .data = .{ .imm = @bitCast(u32, -stack_dst.stack_offset) },
- });
+ try self.asmMemory(.fisttp, Memory.sib(Memory.PtrSize.fromSize(dst_abi_size), .{
+ .base = .rbp,
+ .disp = -stack_dst.stack_offset,
+ }));
return self.finishAir(inst, stack_dst, .{ ty_op.operand, .none, .none });
}
@@ -6538,15 +6172,10 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
.linker_load, .memory => {
const reg = try self.register_manager.allocReg(null, gp);
try self.loadMemPtrIntoRegister(reg, src_ty, src_ptr);
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg,
- .reg2 = reg,
- .flags = 0b01,
- }),
- .data = .{ .imm = 0 },
- });
+ try self.asmRegisterMemory(.mov, reg, Memory.sib(.qword, .{
+ .base = reg,
+ .disp = 0,
+ }));
break :blk MCValue{ .register = reg };
},
else => break :blk src_ptr,
@@ -6619,6 +6248,9 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
if (self.liveness.isUnused(inst)) break :res MCValue.dead;
switch (result_ty.zigTypeTag()) {
.Struct => {
+ if (result_ty.containerLayout() == .Packed) {
+ return self.fail("TODO airAggregateInit implement packed structs", .{});
+ }
const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align));
for (elements, 0..) |elem, elem_i| {
if (result_ty.structFieldValueComptime(elem_i) != null) continue; // comptime elem
@@ -6683,13 +6315,13 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand });
}
-pub fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
+fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// First section of indexes correspond to a set number of constant values.
const ref_int = @enumToInt(inst);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
const tv = Air.Inst.Ref.typed_value_map[ref_int];
if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) {
- return MCValue{ .none = {} };
+ return .none;
}
return self.genTypedValue(tv);
}
@@ -6697,7 +6329,7 @@ pub fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// If the type has no codegen bits, no need to store it.
const inst_ty = self.air.typeOf(inst);
if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError())
- return MCValue{ .none = {} };
+ return .none;
const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
switch (self.air.instructions.items(.tag)[inst_index]) {
@@ -6715,18 +6347,17 @@ pub fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
return gop.value_ptr.*;
},
.const_ty => unreachable,
- else => return self.getResolvedInstValue(inst_index),
+ else => return self.getResolvedInstValue(inst_index).?,
}
}
-fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
+fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) ?MCValue {
// Treat each stack item as a "layer" on top of the previous one.
var i: usize = self.branch_stack.items.len;
while (true) {
i -= 1;
if (self.branch_stack.items[i].inst_table.get(inst)) |mcv| {
- assert(mcv != .dead);
- return mcv;
+ return if (mcv != .dead) mcv else null;
}
}
}
@@ -6752,200 +6383,26 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV
return mcv;
}
-fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
- log.debug("lowerDeclRef: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
-
- // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
- if (tv.ty.zigTypeTag() == .Pointer) blk: {
- if (tv.ty.castPtrToFn()) |_| break :blk;
- if (!tv.ty.elemType2().hasRuntimeBits()) {
- return MCValue.none;
- }
- }
-
- const module = self.bin_file.options.module.?;
- const decl = module.declPtr(decl_index);
- module.markDeclAlive(decl);
-
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
- const atom = elf_file.getAtom(atom_index);
- return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
- } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
- const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
- return MCValue{ .linker_load = .{
- .type = .got,
- .sym_index = sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
- const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
- return MCValue{ .linker_load = .{
- .type = .got,
- .sym_index = sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- const decl_block_index = try p9.seeDecl(decl_index);
- const decl_block = p9.getDeclBlock(decl_block_index);
- const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else {
- return self.fail("TODO codegen non-ELF const Decl pointer", .{});
- }
-}
-
-fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
- log.debug("lowerUnnamedConst: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
- const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| {
- return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
- };
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- return MCValue{ .linker_load = .{
- .type = .direct,
- .sym_index = local_sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- return MCValue{ .linker_load = .{
- .type = .direct,
- .sym_index = local_sym_index,
- } };
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const got_index = local_sym_index; // the plan9 backend returns the got_index
- const got_addr = p9.bases.data + got_index * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else {
- return self.fail("TODO lower unnamed const", .{});
- }
-}
-
fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
- var typed_value = arg_tv;
- if (typed_value.val.castTag(.runtime_value)) |rt| {
- typed_value.val = rt.data;
- }
- log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() });
- if (typed_value.val.isUndef())
- return MCValue{ .undef = {} };
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
-
- if (typed_value.val.castTag(.decl_ref)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data);
- }
- if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
- return self.lowerDeclRef(typed_value, payload.data.decl_index);
- }
-
- const target = self.target.*;
-
- switch (typed_value.ty.zigTypeTag()) {
- .Void => return MCValue{ .none = {} },
- .Pointer => switch (typed_value.ty.ptrSize()) {
- .Slice => {},
- else => {
- switch (typed_value.val.tag()) {
- .int_u64 => {
- return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
- },
- else => {},
- }
- },
+ const mcv: MCValue = switch (try codegen.genTypedValue(
+ self.bin_file,
+ self.src_loc,
+ arg_tv,
+ self.mod_fn.owner_decl,
+ )) {
+ .mcv => |mcv| switch (mcv) {
+ .none => .none,
+ .undef => .undef,
+ .linker_load => |ll| .{ .linker_load = ll },
+ .immediate => |imm| .{ .immediate = imm },
+ .memory => |addr| .{ .memory = addr },
},
- .Int => {
- const info = typed_value.ty.intInfo(self.target.*);
- if (info.bits <= ptr_bits and info.signedness == .signed) {
- return MCValue{ .immediate = @bitCast(u64, typed_value.val.toSignedInt(target)) };
- }
- if (!(info.bits > ptr_bits or info.signedness == .signed)) {
- return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
- }
+ .fail => |msg| {
+ self.err_msg = msg;
+ return error.CodegenFail;
},
- .Bool => {
- return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
- },
- .Optional => {
- if (typed_value.ty.isPtrLikeOptional()) {
- if (typed_value.val.isNull())
- return MCValue{ .immediate = 0 };
-
- var buf: Type.Payload.ElemType = undefined;
- return self.genTypedValue(.{
- .ty = typed_value.ty.optionalChild(&buf),
- .val = typed_value.val,
- });
- } else if (typed_value.ty.abiSize(self.target.*) == 1) {
- return MCValue{ .immediate = @boolToInt(!typed_value.val.isNull()) };
- }
- },
- .Enum => {
- if (typed_value.val.castTag(.enum_field_index)) |field_index| {
- switch (typed_value.ty.tag()) {
- .enum_simple => {
- return MCValue{ .immediate = field_index.data };
- },
- .enum_full, .enum_nonexhaustive => {
- const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val });
- } else {
- return MCValue{ .immediate = field_index.data };
- }
- },
- else => unreachable,
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
- return self.genTypedValue(.{ .ty = int_tag_ty, .val = typed_value.val });
- }
- },
- .ErrorSet => {
- switch (typed_value.val.tag()) {
- .@"error" => {
- const err_name = typed_value.val.castTag(.@"error").?.data.name;
- const module = self.bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return MCValue{ .immediate = error_index };
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return MCValue{ .immediate = 0 };
- },
- }
- },
- .ErrorUnion => {
- const error_type = typed_value.ty.errorUnionSet();
- const payload_type = typed_value.ty.errorUnionPayload();
- const is_pl = typed_value.val.errorUnionIsPayload();
-
- if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
- // We use the error type directly as the type.
- const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero);
- return self.genTypedValue(.{ .ty = error_type, .val = err_val });
- }
- },
-
- .ComptimeInt => unreachable,
- .ComptimeFloat => unreachable,
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
-
- else => {},
- }
-
- return self.lowerUnnamedConst(typed_value);
+ };
+ return mcv;
}
const CallMCValues = struct {
@@ -6980,7 +6437,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
switch (cc) {
.Naked => {
assert(result.args.len == 0);
- result.return_value = .{ .unreach = {} };
+ result.return_value = .unreach;
result.stack_byte_count = 0;
result.stack_align = 1;
return result;
@@ -6988,10 +6445,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
.C => {
// Return values
if (ret_ty.zigTypeTag() == .NoReturn) {
- result.return_value = .{ .unreach = {} };
+ result.return_value = .unreach;
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) {
// TODO: is this even possible for C calling convention?
- result.return_value = .{ .none = {} };
+ result.return_value = .none;
} else {
const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
if (ret_ty_size == 0) {
@@ -7012,7 +6469,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
else => 0,
};
- for (param_types, 0..) |ty, i| {
+ for (param_types, result.args, 0..) |ty, *arg, i| {
assert(ty.hasRuntimeBits());
const classes: []const abi.Class = switch (self.target.os.tag) {
@@ -7025,7 +6482,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
switch (classes[0]) {
.integer => blk: {
if (i >= abi.getCAbiIntParamRegs(self.target.*).len) break :blk; // fallthrough
- result.args[i] = .{ .register = abi.getCAbiIntParamRegs(self.target.*)[i] };
+ arg.* = .{ .register = abi.getCAbiIntParamRegs(self.target.*)[i] };
continue;
},
.memory => {}, // fallthrough
@@ -7037,7 +6494,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
const param_align = @intCast(u32, ty.abiAlignment(self.target.*));
const offset = mem.alignForwardGeneric(u32, next_stack_offset + param_size, param_align);
- result.args[i] = .{ .stack_offset = @intCast(i32, offset) };
+ arg.* = .{ .stack_offset = @intCast(i32, offset) };
next_stack_offset = offset;
}
@@ -7063,15 +6520,15 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
.Unspecified => {
// Return values
if (ret_ty.zigTypeTag() == .NoReturn) {
- result.return_value = .{ .unreach = {} };
+ result.return_value = .unreach;
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) {
- result.return_value = .{ .none = {} };
+ result.return_value = .none;
} else {
const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
if (ret_ty_size == 0) {
assert(ret_ty.isError());
result.return_value = .{ .immediate = 0 };
- } else if (ret_ty_size <= 8) {
+ } else if (ret_ty_size <= 8 and !ret_ty.isRuntimeFloat()) {
const aliased_reg = registerAlias(abi.getCAbiIntReturnRegs(self.target.*)[0], ret_ty_size);
result.return_value = .{ .register = aliased_reg };
} else {
@@ -7088,15 +6545,15 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
else => 0,
};
- for (param_types, 0..) |ty, i| {
+ for (param_types, result.args) |ty, *arg| {
if (!ty.hasRuntimeBits()) {
- result.args[i] = .{ .none = {} };
+ arg.* = .none;
continue;
}
const param_size = @intCast(u32, ty.abiSize(self.target.*));
const param_align = @intCast(u32, ty.abiAlignment(self.target.*));
const offset = mem.alignForwardGeneric(u32, next_stack_offset + param_size, param_align);
- result.args[i] = .{ .stack_offset = @intCast(i32, offset) };
+ arg.* = .{ .stack_offset = @intCast(i32, offset) };
next_stack_offset = offset;
}
@@ -7146,28 +6603,34 @@ fn parseRegName(name: []const u8) ?Register {
/// Returns register wide enough to hold at least `size_bytes`.
fn registerAlias(reg: Register, size_bytes: u32) Register {
- if (size_bytes == 0) {
- unreachable; // should be comptime-known
- } else if (size_bytes <= 1) {
- return reg.to8();
- } else if (size_bytes <= 2) {
- return reg.to16();
- } else if (size_bytes <= 4) {
- return reg.to32();
- } else if (size_bytes <= 8) {
- return reg.to64();
- } else if (size_bytes <= 16) {
- return reg.to128();
- } else if (size_bytes <= 32) {
- return reg.to256();
- } else unreachable;
+ return switch (reg.class()) {
+ .general_purpose => if (size_bytes == 0)
+ unreachable // should be comptime-known
+ else if (size_bytes <= 1)
+ reg.to8()
+ else if (size_bytes <= 2)
+ reg.to16()
+ else if (size_bytes <= 4)
+ reg.to32()
+ else if (size_bytes <= 8)
+ reg.to64()
+ else
+ unreachable,
+ .floating_point => if (size_bytes <= 16)
+ reg.to128()
+ else if (size_bytes <= 32)
+ reg.to256()
+ else
+ unreachable,
+ .segment => unreachable,
+ };
}
/// Truncates the value in the register in place.
/// Clobbers any remaining bits.
fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
const int_info = ty.intInfo(self.target.*);
- const max_reg_bit_width = Register.rax.size();
+ const max_reg_bit_width = Register.rax.bitSize();
switch (int_info.signedness) {
.signed => {
const shift = @intCast(u6, max_reg_bit_width - int_info.bits);
@@ -7177,7 +6640,7 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
.unsigned => {
const shift = @intCast(u6, max_reg_bit_width - int_info.bits);
const mask = (~@as(u64, 0)) >> shift;
- if (int_info.bits <= 32) {
+ if (int_info.bits < 32) {
try self.genBinOpMir(.@"and", Type.usize, .{ .register = reg }, .{ .immediate = mask });
} else {
const tmp_reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = mask });
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index 12c19915c6..32699d35cb 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -7,6 +7,7 @@ const std = @import("std");
const assert = std.debug.assert;
const bits = @import("bits.zig");
const abi = @import("abi.zig");
+const encoder = @import("encoder.zig");
const link = @import("../../link.zig");
const log = std.log.scoped(.codegen);
const math = std.math;
@@ -19,12 +20,14 @@ const CodeGen = @import("CodeGen.zig");
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const Encoder = bits.Encoder;
const ErrorMsg = Module.ErrorMsg;
+const Immediate = bits.Immediate;
+const Instruction = encoder.Instruction;
const MCValue = @import("CodeGen.zig").MCValue;
+const Memory = bits.Memory;
const Mir = @import("Mir.zig");
const Module = @import("../../Module.zig");
-const Instruction = bits.Instruction;
-const Type = @import("../../type.zig").Type;
const Register = bits.Register;
+const Type = @import("../../type.zig").Type;
mir: Mir,
bin_file: *link.File,
@@ -45,6 +48,8 @@ relocs: std.ArrayListUnmanaged(Reloc) = .{},
const InnerError = error{
OutOfMemory,
EmitFail,
+ InvalidInstruction,
+ CannotEncode,
};
const Reloc = struct {
@@ -65,133 +70,67 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
const inst = @intCast(u32, index);
try emit.code_offset_mapping.putNoClobber(emit.bin_file.allocator, inst, emit.code.items.len);
switch (tag) {
- // GPR instructions
- .adc => try emit.mirArith(.adc, inst),
- .add => try emit.mirArith(.add, inst),
- .sub => try emit.mirArith(.sub, inst),
- .xor => try emit.mirArith(.xor, inst),
- .@"and" => try emit.mirArith(.@"and", inst),
- .@"or" => try emit.mirArith(.@"or", inst),
- .sbb => try emit.mirArith(.sbb, inst),
- .cmp => try emit.mirArith(.cmp, inst),
- .mov => try emit.mirArith(.mov, inst),
+ .adc,
+ .add,
+ .@"and",
+ .call,
+ .cbw,
+ .cwde,
+ .cdqe,
+ .cwd,
+ .cdq,
+ .cqo,
+ .cmp,
+ .div,
+ .fisttp,
+ .fld,
+ .idiv,
+ .imul,
+ .int3,
+ .jmp,
+ .lea,
+ .mov,
+ .movzx,
+ .mul,
+ .nop,
+ .@"or",
+ .pop,
+ .push,
+ .ret,
+ .sal,
+ .sar,
+ .sbb,
+ .shl,
+ .shr,
+ .sub,
+ .syscall,
+ .@"test",
+ .ud2,
+ .xor,
- .adc_mem_imm => try emit.mirArithMemImm(.adc, inst),
- .add_mem_imm => try emit.mirArithMemImm(.add, inst),
- .sub_mem_imm => try emit.mirArithMemImm(.sub, inst),
- .xor_mem_imm => try emit.mirArithMemImm(.xor, inst),
- .and_mem_imm => try emit.mirArithMemImm(.@"and", inst),
- .or_mem_imm => try emit.mirArithMemImm(.@"or", inst),
- .sbb_mem_imm => try emit.mirArithMemImm(.sbb, inst),
- .cmp_mem_imm => try emit.mirArithMemImm(.cmp, inst),
- .mov_mem_imm => try emit.mirArithMemImm(.mov, inst),
+ .addss,
+ .cmpss,
+ .movss,
+ .ucomiss,
+ .addsd,
+ .cmpsd,
+ .movsd,
+ .ucomisd,
+ => try emit.mirEncodeGeneric(tag, inst),
- .adc_scale_src => try emit.mirArithScaleSrc(.adc, inst),
- .add_scale_src => try emit.mirArithScaleSrc(.add, inst),
- .sub_scale_src => try emit.mirArithScaleSrc(.sub, inst),
- .xor_scale_src => try emit.mirArithScaleSrc(.xor, inst),
- .and_scale_src => try emit.mirArithScaleSrc(.@"and", inst),
- .or_scale_src => try emit.mirArithScaleSrc(.@"or", inst),
- .sbb_scale_src => try emit.mirArithScaleSrc(.sbb, inst),
- .cmp_scale_src => try emit.mirArithScaleSrc(.cmp, inst),
- .mov_scale_src => try emit.mirArithScaleSrc(.mov, inst),
+ .jmp_reloc => try emit.mirJmpReloc(inst),
- .adc_scale_dst => try emit.mirArithScaleDst(.adc, inst),
- .add_scale_dst => try emit.mirArithScaleDst(.add, inst),
- .sub_scale_dst => try emit.mirArithScaleDst(.sub, inst),
- .xor_scale_dst => try emit.mirArithScaleDst(.xor, inst),
- .and_scale_dst => try emit.mirArithScaleDst(.@"and", inst),
- .or_scale_dst => try emit.mirArithScaleDst(.@"or", inst),
- .sbb_scale_dst => try emit.mirArithScaleDst(.sbb, inst),
- .cmp_scale_dst => try emit.mirArithScaleDst(.cmp, inst),
- .mov_scale_dst => try emit.mirArithScaleDst(.mov, inst),
-
- .adc_scale_imm => try emit.mirArithScaleImm(.adc, inst),
- .add_scale_imm => try emit.mirArithScaleImm(.add, inst),
- .sub_scale_imm => try emit.mirArithScaleImm(.sub, inst),
- .xor_scale_imm => try emit.mirArithScaleImm(.xor, inst),
- .and_scale_imm => try emit.mirArithScaleImm(.@"and", inst),
- .or_scale_imm => try emit.mirArithScaleImm(.@"or", inst),
- .sbb_scale_imm => try emit.mirArithScaleImm(.sbb, inst),
- .cmp_scale_imm => try emit.mirArithScaleImm(.cmp, inst),
- .mov_scale_imm => try emit.mirArithScaleImm(.mov, inst),
-
- .adc_mem_index_imm => try emit.mirArithMemIndexImm(.adc, inst),
- .add_mem_index_imm => try emit.mirArithMemIndexImm(.add, inst),
- .sub_mem_index_imm => try emit.mirArithMemIndexImm(.sub, inst),
- .xor_mem_index_imm => try emit.mirArithMemIndexImm(.xor, inst),
- .and_mem_index_imm => try emit.mirArithMemIndexImm(.@"and", inst),
- .or_mem_index_imm => try emit.mirArithMemIndexImm(.@"or", inst),
- .sbb_mem_index_imm => try emit.mirArithMemIndexImm(.sbb, inst),
- .cmp_mem_index_imm => try emit.mirArithMemIndexImm(.cmp, inst),
- .mov_mem_index_imm => try emit.mirArithMemIndexImm(.mov, inst),
-
- .mov_sign_extend => try emit.mirMovSignExtend(inst),
- .mov_zero_extend => try emit.mirMovZeroExtend(inst),
-
- .movabs => try emit.mirMovabs(inst),
-
- .fisttp => try emit.mirFisttp(inst),
- .fld => try emit.mirFld(inst),
-
- .lea => try emit.mirLea(inst),
- .lea_pic => try emit.mirLeaPic(inst),
-
- .shl => try emit.mirShift(.shl, inst),
- .sal => try emit.mirShift(.sal, inst),
- .shr => try emit.mirShift(.shr, inst),
- .sar => try emit.mirShift(.sar, inst),
-
- .imul => try emit.mirMulDiv(.imul, inst),
- .mul => try emit.mirMulDiv(.mul, inst),
- .idiv => try emit.mirMulDiv(.idiv, inst),
- .div => try emit.mirMulDiv(.div, inst),
- .imul_complex => try emit.mirIMulComplex(inst),
-
- .cwd => try emit.mirCwd(inst),
-
- .push => try emit.mirPushPop(.push, inst),
- .pop => try emit.mirPushPop(.pop, inst),
-
- .jmp => try emit.mirJmpCall(.jmp_near, inst),
- .call => try emit.mirJmpCall(.call_near, inst),
-
- .cond_jmp => try emit.mirCondJmp(inst),
- .cond_set_byte => try emit.mirCondSetByte(inst),
- .cond_mov => try emit.mirCondMov(inst),
-
- .ret => try emit.mirRet(inst),
-
- .syscall => try emit.mirSyscall(),
-
- .@"test" => try emit.mirTest(inst),
-
- .interrupt => try emit.mirInterrupt(inst),
- .nop => {}, // just skip it
-
- // SSE instructions
- .mov_f64_sse => try emit.mirMovFloatSse(.movsd, inst),
- .mov_f32_sse => try emit.mirMovFloatSse(.movss, inst),
-
- .add_f64_sse => try emit.mirAddFloatSse(.addsd, inst),
- .add_f32_sse => try emit.mirAddFloatSse(.addss, inst),
-
- .cmp_f64_sse => try emit.mirCmpFloatSse(.ucomisd, inst),
- .cmp_f32_sse => try emit.mirCmpFloatSse(.ucomiss, inst),
-
- // AVX instructions
- .mov_f64_avx => try emit.mirMovFloatAvx(.vmovsd, inst),
- .mov_f32_avx => try emit.mirMovFloatAvx(.vmovss, inst),
-
- .add_f64_avx => try emit.mirAddFloatAvx(.vaddsd, inst),
- .add_f32_avx => try emit.mirAddFloatAvx(.vaddss, inst),
-
- .cmp_f64_avx => try emit.mirCmpFloatAvx(.vucomisd, inst),
- .cmp_f32_avx => try emit.mirCmpFloatAvx(.vucomiss, inst),
-
- // Pseudo-instructions
.call_extern => try emit.mirCallExtern(inst),
+ .lea_linker => try emit.mirLeaLinker(inst),
+
+ .mov_moffs => try emit.mirMovMoffs(inst),
+
+ .movsx => try emit.mirMovsx(inst),
+ .cmovcc => try emit.mirCmovcc(inst),
+ .setcc => try emit.mirSetcc(inst),
+ .jcc => try emit.mirJcc(inst),
+
.dbg_line => try emit.mirDbgLine(inst),
.dbg_prologue_end => try emit.mirDbgPrologueEnd(inst),
.dbg_epilogue_begin => try emit.mirDbgEpilogueBegin(inst),
@@ -199,9 +138,7 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.push_regs => try emit.mirPushPopRegisterList(.push, inst),
.pop_regs => try emit.mirPushPopRegisterList(.pop, inst),
- else => {
- return emit.fail("Implement MIR->Emit lowering for x86_64 for pseudo-inst: {}", .{tag});
- },
+ .dead => {},
}
}
@@ -234,907 +171,277 @@ fn fixupRelocs(emit: *Emit) InnerError!void {
}
}
-fn mirInterrupt(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .interrupt);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => return lowerToZoEnc(.int3, emit.code),
- else => return emit.fail("TODO handle variant 0b{b} of interrupt instruction", .{ops.flags}),
- }
+fn encode(emit: *Emit, mnemonic: Instruction.Mnemonic, ops: struct {
+ op1: Instruction.Operand = .none,
+ op2: Instruction.Operand = .none,
+ op3: Instruction.Operand = .none,
+ op4: Instruction.Operand = .none,
+}) InnerError!void {
+ const inst = try Instruction.new(mnemonic, .{
+ .op1 = ops.op1,
+ .op2 = ops.op2,
+ .op3 = ops.op3,
+ .op4 = ops.op4,
+ });
+ return inst.encode(emit.code.writer());
}
-fn mirSyscall(emit: *Emit) InnerError!void {
- return lowerToZoEnc(.syscall, emit.code);
-}
+fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerError!void {
+ const mnemonic = inline for (@typeInfo(Instruction.Mnemonic).Enum.fields) |field| {
+ if (mem.eql(u8, field.name, @tagName(tag))) break @field(Instruction.Mnemonic, field.name);
+ } else unreachable;
-fn mirPushPop(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- // PUSH/POP reg
- return lowerToOEnc(tag, ops.reg1, emit.code);
+ const ops = emit.mir.instructions.items(.ops)[inst];
+ const data = emit.mir.instructions.items(.data)[inst];
+
+ var op1: Instruction.Operand = .none;
+ var op2: Instruction.Operand = .none;
+ var op3: Instruction.Operand = .none;
+ var op4: Instruction.Operand = .none;
+
+ switch (ops) {
+ .none => {},
+ .imm_s => op1 = .{ .imm = Immediate.s(@bitCast(i32, data.imm)) },
+ .imm_u => op1 = .{ .imm = Immediate.u(data.imm) },
+ .r => op1 = .{ .reg = data.r },
+ .rr => {
+ op1 = .{ .reg = data.rr.r1 };
+ op2 = .{ .reg = data.rr.r2 };
},
- 0b01 => {
- // PUSH/POP r/m64
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- const ptr_size: Memory.PtrSize = switch (immOpSize(imm)) {
- 16 => .word_ptr,
- else => .qword_ptr,
+ .ri_s, .ri_u => {
+ const imm = switch (ops) {
+ .ri_s => Immediate.s(@bitCast(i32, data.ri.imm)),
+ .ri_u => Immediate.u(data.ri.imm),
+ else => unreachable,
};
- return lowerToMEnc(tag, RegisterOrMemory.mem(ptr_size, .{
- .disp = imm,
- .base = ops.reg1,
- }), emit.code);
+ op1 = .{ .reg = data.ri.r1 };
+ op2 = .{ .imm = imm };
},
- 0b10 => {
- // PUSH imm32
- assert(tag == .push);
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- return lowerToIEnc(.push, imm, emit.code);
+ .ri64 => {
+ const imm64 = emit.mir.extraData(Mir.Imm64, data.rx.payload).data;
+ op1 = .{ .reg = data.rx.r1 };
+ op2 = .{ .imm = Immediate.u(Mir.Imm64.decode(imm64)) };
},
- 0b11 => unreachable,
- }
-}
-
-fn mirPushPopRegisterList(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const save_reg_list = emit.mir.extraData(Mir.SaveRegisterList, payload).data;
- var disp: i32 = -@intCast(i32, save_reg_list.stack_end);
- const reg_list = Mir.RegisterList.fromInt(save_reg_list.register_list);
- const callee_preserved_regs = abi.getCalleePreservedRegs(emit.target.*);
- for (callee_preserved_regs) |reg| {
- if (reg_list.isSet(callee_preserved_regs, reg)) {
- switch (tag) {
- .push => try lowerToMrEnc(.mov, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = @bitCast(u32, disp),
- .base = ops.reg1,
- }), reg, emit.code),
- .pop => try lowerToRmEnc(.mov, reg, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = @bitCast(u32, disp),
- .base = ops.reg1,
- }), emit.code),
+ .rri_s, .rri_u => {
+ const imm = switch (ops) {
+ .rri_s => Immediate.s(@bitCast(i32, data.rri.imm)),
+ .rri_u => Immediate.u(data.rri.imm),
+ else => unreachable,
+ };
+ op1 = .{ .reg = data.rri.r1 };
+ op2 = .{ .reg = data.rri.r2 };
+ op3 = .{ .imm = imm };
+ },
+ .m_sib => {
+ const msib = emit.mir.extraData(Mir.MemorySib, data.payload).data;
+ op1 = .{ .mem = Mir.MemorySib.decode(msib) };
+ },
+ .m_rip => {
+ const mrip = emit.mir.extraData(Mir.MemoryRip, data.payload).data;
+ op1 = .{ .mem = Mir.MemoryRip.decode(mrip) };
+ },
+ .mi_s_sib, .mi_u_sib => {
+ const msib = emit.mir.extraData(Mir.MemorySib, data.xi.payload).data;
+ const imm = switch (ops) {
+ .mi_s_sib => Immediate.s(@bitCast(i32, data.xi.imm)),
+ .mi_u_sib => Immediate.u(data.xi.imm),
+ else => unreachable,
+ };
+ op1 = .{ .mem = Mir.MemorySib.decode(msib) };
+ op2 = .{ .imm = imm };
+ },
+ .mi_u_rip, .mi_s_rip => {
+ const mrip = emit.mir.extraData(Mir.MemoryRip, data.xi.payload).data;
+ const imm = switch (ops) {
+ .mi_s_rip => Immediate.s(@bitCast(i32, data.xi.imm)),
+ .mi_u_rip => Immediate.u(data.xi.imm),
+ else => unreachable,
+ };
+ op1 = .{ .mem = Mir.MemoryRip.decode(mrip) };
+ op2 = .{ .imm = imm };
+ },
+ .rm_sib, .mr_sib => {
+ const msib = emit.mir.extraData(Mir.MemorySib, data.rx.payload).data;
+ const op_r = .{ .reg = data.rx.r1 };
+ const op_m = .{ .mem = Mir.MemorySib.decode(msib) };
+ switch (ops) {
+ .rm_sib => {
+ op1 = op_r;
+ op2 = op_m;
+ },
+ .mr_sib => {
+ op1 = op_m;
+ op2 = op_r;
+ },
else => unreachable,
}
- disp += 8;
- }
- }
-}
-
-fn mirJmpCall(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- const target = emit.mir.instructions.items(.data)[inst].inst;
- const source = emit.code.items.len;
- try lowerToDEnc(tag, 0, emit.code);
- try emit.relocs.append(emit.bin_file.allocator, .{
- .source = source,
- .target = target,
- .offset = emit.code.items.len - 4,
- .length = 5,
- });
},
- 0b01 => {
- if (ops.reg1 == .none) {
- // JMP/CALL [imm]
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- const ptr_size: Memory.PtrSize = switch (immOpSize(imm)) {
- 16 => .word_ptr,
- else => .qword_ptr,
- };
- return lowerToMEnc(tag, RegisterOrMemory.mem(ptr_size, .{ .disp = imm }), emit.code);
+ .rm_rip, .mr_rip => {
+ const mrip = emit.mir.extraData(Mir.MemoryRip, data.rx.payload).data;
+ const op_r = .{ .reg = data.rx.r1 };
+ const op_m = .{ .mem = Mir.MemoryRip.decode(mrip) };
+ switch (ops) {
+ .rm_sib => {
+ op1 = op_r;
+ op2 = op_m;
+ },
+ .mr_sib => {
+ op1 = op_m;
+ op2 = op_r;
+ },
+ else => unreachable,
}
- // JMP/CALL reg
- return lowerToMEnc(tag, RegisterOrMemory.reg(ops.reg1), emit.code);
},
- 0b10 => {
- // JMP/CALL r/m64
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- return lowerToMEnc(tag, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{
- .disp = imm,
- .base = ops.reg1,
- }), emit.code);
- },
- 0b11 => return emit.fail("TODO unused variant jmp/call 0b11", .{}),
+ else => return emit.fail("TODO handle generic encoding: {s}, {s}", .{
+ @tagName(mnemonic),
+ @tagName(ops),
+ }),
}
-}
-fn mirCondJmp(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const mir_tag = emit.mir.instructions.items(.tag)[inst];
- assert(mir_tag == .cond_jmp);
- const inst_cc = emit.mir.instructions.items(.data)[inst].inst_cc;
- const tag: Tag = switch (inst_cc.cc) {
- .a => .ja,
- .ae => .jae,
- .b => .jb,
- .be => .jbe,
- .c => .jc,
- .e => .je,
- .g => .jg,
- .ge => .jge,
- .l => .jl,
- .le => .jle,
- .na => .jna,
- .nae => .jnae,
- .nb => .jnb,
- .nbe => .jnbe,
- .nc => .jnc,
- .ne => .jne,
- .ng => .jng,
- .nge => .jnge,
- .nl => .jnl,
- .nle => .jnle,
- .no => .jno,
- .np => .jnp,
- .ns => .jns,
- .nz => .jnz,
- .o => .jo,
- .p => .jp,
- .pe => .jpe,
- .po => .jpo,
- .s => .js,
- .z => .jz,
- };
- const source = emit.code.items.len;
- try lowerToDEnc(tag, 0, emit.code);
- try emit.relocs.append(emit.bin_file.allocator, .{
- .source = source,
- .target = inst_cc.inst,
- .offset = emit.code.items.len - 4,
- .length = 6,
+ return emit.encode(mnemonic, .{
+ .op1 = op1,
+ .op2 = op2,
+ .op3 = op3,
+ .op4 = op4,
});
}
-fn mirCondSetByte(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const mir_tag = emit.mir.instructions.items(.tag)[inst];
- assert(mir_tag == .cond_set_byte);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const cc = emit.mir.instructions.items(.data)[inst].cc;
- const tag: Tag = switch (cc) {
- .a => .seta,
- .ae => .setae,
- .b => .setb,
- .be => .setbe,
- .c => .setc,
- .e => .sete,
- .g => .setg,
- .ge => .setge,
- .l => .setl,
- .le => .setle,
- .na => .setna,
- .nae => .setnae,
- .nb => .setnb,
- .nbe => .setnbe,
- .nc => .setnc,
- .ne => .setne,
- .ng => .setng,
- .nge => .setnge,
- .nl => .setnl,
- .nle => .setnle,
- .no => .setno,
- .np => .setnp,
- .ns => .setns,
- .nz => .setnz,
- .o => .seto,
- .p => .setp,
- .pe => .setpe,
- .po => .setpo,
- .s => .sets,
- .z => .setz,
- };
- return lowerToMEnc(tag, RegisterOrMemory.reg(ops.reg1.to8()), emit.code);
-}
-
-fn mirCondMov(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const mir_tag = emit.mir.instructions.items(.tag)[inst];
- assert(mir_tag == .cond_mov);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const cc = emit.mir.instructions.items(.data)[inst].cc;
- const tag: Tag = switch (cc) {
- .a => .cmova,
- .ae => .cmovae,
- .b => .cmovb,
- .be => .cmovbe,
- .c => .cmovc,
- .e => .cmove,
- .g => .cmovg,
- .ge => .cmovge,
- .l => .cmovl,
- .le => .cmovle,
- .na => .cmovna,
- .nae => .cmovnae,
- .nb => .cmovnb,
- .nbe => .cmovnbe,
- .nc => .cmovnc,
- .ne => .cmovne,
- .ng => .cmovng,
- .nge => .cmovnge,
- .nl => .cmovnl,
- .nle => .cmovnle,
- .no => .cmovno,
- .np => .cmovnp,
- .ns => .cmovns,
- .nz => .cmovnz,
- .o => .cmovo,
- .p => .cmovp,
- .pe => .cmovpe,
- .po => .cmovpo,
- .s => .cmovs,
- .z => .cmovz,
- };
-
- if (ops.flags == 0b00) {
- return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
- }
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- const ptr_size: Memory.PtrSize = switch (ops.flags) {
- 0b00 => unreachable,
- 0b01 => .word_ptr,
- 0b10 => .dword_ptr,
- 0b11 => .qword_ptr,
- };
- return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.mem(ptr_size, .{
- .disp = imm,
- .base = ops.reg2,
- }), emit.code);
-}
-
-fn mirTest(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .@"test");
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- if (ops.reg2 == .none) {
- // TEST r/m64, imm32
- // MI
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- if (ops.reg1.to64() == .rax) {
- // TEST rax, imm32
- // I
- return lowerToIEnc(.@"test", imm, emit.code);
- }
- return lowerToMiEnc(.@"test", RegisterOrMemory.reg(ops.reg1), imm, emit.code);
- }
- // TEST r/m64, r64
- return lowerToMrEnc(.@"test", RegisterOrMemory.reg(ops.reg1), ops.reg2, emit.code);
- },
- else => return emit.fail("TODO more TEST alternatives", .{}),
- }
-}
-
-fn mirRet(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .ret);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- // RETF imm16
- // I
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- return lowerToIEnc(.ret_far, imm, emit.code);
- },
- 0b01 => {
- return lowerToZoEnc(.ret_far, emit.code);
- },
- 0b10 => {
- // RET imm16
- // I
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- return lowerToIEnc(.ret_near, imm, emit.code);
- },
- 0b11 => {
- return lowerToZoEnc(.ret_near, emit.code);
- },
- }
-}
-
-fn mirArith(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- if (ops.reg2 == .none) {
- // mov reg1, imm32
- // MI
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- return lowerToMiEnc(tag, RegisterOrMemory.reg(ops.reg1), imm, emit.code);
- }
- // mov reg1, reg2
- // RM
- return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
- },
- 0b01 => {
- // mov reg1, [reg2 + imm32]
- // RM
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null;
- return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{
- .disp = imm,
- .base = src_reg,
- }), emit.code);
- },
- 0b10 => {
- if (ops.reg2 == .none) {
- return emit.fail("TODO unused variant: mov reg1, none, 0b10", .{});
- }
- // mov [reg1 + imm32], reg2
- // MR
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- return lowerToMrEnc(tag, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg2.size()), .{
- .disp = imm,
- .base = ops.reg1,
- }), ops.reg2, emit.code);
- },
- 0b11 => {
- return emit.fail("TODO unused variant: mov reg1, reg2, 0b11", .{});
- },
- }
-}
-
-fn mirArithMemImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- assert(ops.reg2 == .none);
+fn mirMovMoffs(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
+ const ops = emit.mir.instructions.items(.ops)[inst];
const payload = emit.mir.instructions.items(.data)[inst].payload;
- const imm_pair = emit.mir.extraData(Mir.ImmPair, payload).data;
- const ptr_size: Memory.PtrSize = switch (ops.flags) {
- 0b00 => .byte_ptr,
- 0b01 => .word_ptr,
- 0b10 => .dword_ptr,
- 0b11 => .qword_ptr,
- };
- return lowerToMiEnc(tag, RegisterOrMemory.mem(ptr_size, .{
- .disp = imm_pair.dest_off,
- .base = ops.reg1,
- }), imm_pair.operand, emit.code);
-}
-
-inline fn setRexWRegister(reg: Register) bool {
- if (reg.size() > 64) return false;
- if (reg.size() == 64) return true;
- return switch (reg) {
- .ah, .ch, .dh, .bh => true,
- else => false,
- };
-}
-
-inline fn immOpSize(u_imm: u32) u6 {
- const imm = @bitCast(i32, u_imm);
- if (math.minInt(i8) <= imm and imm <= math.maxInt(i8)) {
- return 8;
- }
- if (math.minInt(i16) <= imm and imm <= math.maxInt(i16)) {
- return 16;
- }
- return 32;
-}
-
-fn mirArithScaleSrc(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const scale = ops.flags;
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const index_reg_disp = emit.mir.extraData(Mir.IndexRegisterDisp, payload).data.decode();
- // OP reg1, [reg2 + scale*index + imm32]
- const scale_index = ScaleIndex{
- .scale = scale,
- .index = index_reg_disp.index,
- };
- return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{
- .disp = index_reg_disp.disp,
- .base = ops.reg2,
- .scale_index = scale_index,
- }), emit.code);
-}
-
-fn mirArithScaleDst(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const scale = ops.flags;
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const index_reg_disp = emit.mir.extraData(Mir.IndexRegisterDisp, payload).data.decode();
- const scale_index = ScaleIndex{
- .scale = scale,
- .index = index_reg_disp.index,
- };
- assert(ops.reg2 != .none);
- // OP [reg1 + scale*index + imm32], reg2
- return lowerToMrEnc(tag, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg2.size()), .{
- .disp = index_reg_disp.disp,
- .base = ops.reg1,
- .scale_index = scale_index,
- }), ops.reg2, emit.code);
-}
-
-fn mirArithScaleImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const scale = ops.flags;
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const index_reg_disp_imm = emit.mir.extraData(Mir.IndexRegisterDispImm, payload).data.decode();
- const scale_index = ScaleIndex{
- .scale = scale,
- .index = index_reg_disp_imm.index,
- };
- // OP qword ptr [reg1 + scale*index + imm32], imm32
- return lowerToMiEnc(tag, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = index_reg_disp_imm.disp,
- .base = ops.reg1,
- .scale_index = scale_index,
- }), index_reg_disp_imm.imm, emit.code);
-}
-
-fn mirArithMemIndexImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- assert(ops.reg2 == .none);
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const index_reg_disp_imm = emit.mir.extraData(Mir.IndexRegisterDispImm, payload).data.decode();
- const ptr_size: Memory.PtrSize = switch (ops.flags) {
- 0b00 => .byte_ptr,
- 0b01 => .word_ptr,
- 0b10 => .dword_ptr,
- 0b11 => .qword_ptr,
- };
- const scale_index = ScaleIndex{
- .scale = 0,
- .index = index_reg_disp_imm.index,
- };
- // OP ptr [reg1 + index + imm32], imm32
- return lowerToMiEnc(tag, RegisterOrMemory.mem(ptr_size, .{
- .disp = index_reg_disp_imm.disp,
- .base = ops.reg1,
- .scale_index = scale_index,
- }), index_reg_disp_imm.imm, emit.code);
-}
-
-fn mirMovSignExtend(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const mir_tag = emit.mir.instructions.items(.tag)[inst];
- assert(mir_tag == .mov_sign_extend);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const imm = if (ops.flags != 0b00) emit.mir.instructions.items(.data)[inst].imm else undefined;
- switch (ops.flags) {
- 0b00 => {
- const tag: Tag = if (ops.reg2.size() == 32) .movsxd else .movsx;
- return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
+ const moffs = emit.mir.extraData(Mir.MemoryMoffs, payload).data;
+ const seg = @intToEnum(Register, moffs.seg);
+ const offset = moffs.decodeOffset();
+ switch (ops) {
+ .rax_moffs => {
+ try emit.encode(.mov, .{
+ .op1 = .{ .reg = .rax },
+ .op2 = .{ .mem = Memory.moffs(seg, offset) },
+ });
},
- 0b01 => {
- return lowerToRmEnc(.movsx, ops.reg1, RegisterOrMemory.mem(.byte_ptr, .{
- .disp = imm,
- .base = ops.reg2,
- }), emit.code);
+ .moffs_rax => {
+ try emit.encode(.mov, .{
+ .op1 = .{ .mem = Memory.moffs(seg, offset) },
+ .op2 = .{ .reg = .rax },
+ });
},
- 0b10 => {
- return lowerToRmEnc(.movsx, ops.reg1, RegisterOrMemory.mem(.word_ptr, .{
- .disp = imm,
- .base = ops.reg2,
- }), emit.code);
- },
- 0b11 => {
- return lowerToRmEnc(.movsxd, ops.reg1, RegisterOrMemory.mem(.dword_ptr, .{
- .disp = imm,
- .base = ops.reg2,
- }), emit.code);
- },
- }
-}
-
-fn mirMovZeroExtend(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const mir_tag = emit.mir.instructions.items(.tag)[inst];
- assert(mir_tag == .mov_zero_extend);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const imm = if (ops.flags != 0b00) emit.mir.instructions.items(.data)[inst].imm else undefined;
- switch (ops.flags) {
- 0b00 => {
- return lowerToRmEnc(.movzx, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
- },
- 0b01 => {
- return lowerToRmEnc(.movzx, ops.reg1, RegisterOrMemory.mem(.byte_ptr, .{
- .disp = imm,
- .base = ops.reg2,
- }), emit.code);
- },
- 0b10 => {
- return lowerToRmEnc(.movzx, ops.reg1, RegisterOrMemory.mem(.word_ptr, .{
- .disp = imm,
- .base = ops.reg2,
- }), emit.code);
- },
- 0b11 => {
- return emit.fail("TODO unused variant: movzx 0b11", .{});
- },
- }
-}
-
-fn mirMovabs(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .movabs);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- const imm: u64 = if (ops.reg1.size() == 64) blk: {
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const imm = emit.mir.extraData(Mir.Imm64, payload).data;
- break :blk imm.decode();
- } else emit.mir.instructions.items(.data)[inst].imm;
- // movabs reg, imm64
- // OI
- return lowerToOiEnc(.mov, ops.reg1, imm, emit.code);
- },
- 0b01 => {
- if (ops.reg1 == .none) {
- const imm: u64 = if (ops.reg2.size() == 64) blk: {
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const imm = emit.mir.extraData(Mir.Imm64, payload).data;
- break :blk imm.decode();
- } else emit.mir.instructions.items(.data)[inst].imm;
- // movabs moffs64, rax
- // TD
- return lowerToTdEnc(.mov, imm, ops.reg2, emit.code);
- }
- const imm: u64 = if (ops.reg1.size() == 64) blk: {
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const imm = emit.mir.extraData(Mir.Imm64, payload).data;
- break :blk imm.decode();
- } else emit.mir.instructions.items(.data)[inst].imm;
- // movabs rax, moffs64
- // FD
- return lowerToFdEnc(.mov, ops.reg1, imm, emit.code);
- },
- else => return emit.fail("TODO unused movabs variant", .{}),
- }
-}
-
-fn mirFisttp(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .fisttp);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
-
- // the selecting between operand sizes for this particular `fisttp` instruction
- // is done via opcode instead of the usual prefixes.
-
- const opcode: Tag = switch (ops.flags) {
- 0b00 => .fisttp16,
- 0b01 => .fisttp32,
- 0b10 => .fisttp64,
else => unreachable,
+ }
+}
+
+fn mirMovsx(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
+ const ops = emit.mir.instructions.items(.ops)[inst];
+ const data = emit.mir.instructions.items(.data)[inst];
+
+ var op1: Instruction.Operand = .none;
+ var op2: Instruction.Operand = .none;
+ switch (ops) {
+ .rr => {
+ op1 = .{ .reg = data.rr.r1 };
+ op2 = .{ .reg = data.rr.r2 };
+ },
+ .rm_sib => {
+ const msib = emit.mir.extraData(Mir.MemorySib, data.rx.payload).data;
+ op1 = .{ .reg = data.rx.r1 };
+ op2 = .{ .mem = Mir.MemorySib.decode(msib) };
+ },
+ .rm_rip => {
+ const mrip = emit.mir.extraData(Mir.MemoryRip, data.rx.payload).data;
+ op1 = .{ .reg = data.rx.r1 };
+ op2 = .{ .mem = Mir.MemoryRip.decode(mrip) };
+ },
+ else => unreachable, // TODO
+ }
+
+ const mnemonic: Instruction.Mnemonic = switch (op1.bitSize()) {
+ 32, 64 => if (op2.bitSize() == 32) .movsxd else .movsx,
+ else => .movsx,
};
- const mem_or_reg = Memory{
- .base = ops.reg1,
- .disp = emit.mir.instructions.items(.data)[inst].imm,
- .ptr_size = Memory.PtrSize.dword_ptr, // to prevent any prefix from being used
- };
- return lowerToMEnc(opcode, .{ .memory = mem_or_reg }, emit.code);
+
+ return emit.encode(mnemonic, .{
+ .op1 = op1,
+ .op2 = op2,
+ });
}
-fn mirFld(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .fld);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
-
- // the selecting between operand sizes for this particular `fisttp` instruction
- // is done via opcode instead of the usual prefixes.
-
- const opcode: Tag = switch (ops.flags) {
- 0b01 => .fld32,
- 0b10 => .fld64,
- else => unreachable,
- };
- const mem_or_reg = Memory{
- .base = ops.reg1,
- .disp = emit.mir.instructions.items(.data)[inst].imm,
- .ptr_size = Memory.PtrSize.dword_ptr, // to prevent any prefix from being used
- };
- return lowerToMEnc(opcode, .{ .memory = mem_or_reg }, emit.code);
+fn mnemonicFromConditionCode(comptime basename: []const u8, cc: bits.Condition) Instruction.Mnemonic {
+ inline for (@typeInfo(bits.Condition).Enum.fields) |field| {
+ if (mem.eql(u8, field.name, @tagName(cc)))
+ return @field(Instruction.Mnemonic, basename ++ field.name);
+ } else unreachable;
}
-fn mirShift(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- // sal reg1, 1
- // M1
- return lowerToM1Enc(tag, RegisterOrMemory.reg(ops.reg1), emit.code);
- },
- 0b01 => {
- // sal reg1, .cl
- // MC
- return lowerToMcEnc(tag, RegisterOrMemory.reg(ops.reg1), emit.code);
- },
- 0b10 => {
- // sal reg1, imm8
- // MI
- const imm = @truncate(u8, emit.mir.instructions.items(.data)[inst].imm);
- return lowerToMiImm8Enc(tag, RegisterOrMemory.reg(ops.reg1), imm, emit.code);
- },
- 0b11 => {
- return emit.fail("TODO unused variant: SHIFT reg1, 0b11", .{});
+fn mirCmovcc(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
+ const ops = emit.mir.instructions.items(.ops)[inst];
+ switch (ops) {
+ .rr_c => {
+ const data = emit.mir.instructions.items(.data)[inst].rr_c;
+ const mnemonic = mnemonicFromConditionCode("cmov", data.cc);
+ return emit.encode(mnemonic, .{
+ .op1 = .{ .reg = data.r1 },
+ .op2 = .{ .reg = data.r2 },
+ });
},
+ else => unreachable, // TODO
}
}
-fn mirMulDiv(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- if (ops.reg1 != .none) {
- assert(ops.reg2 == .none);
- return lowerToMEnc(tag, RegisterOrMemory.reg(ops.reg1), emit.code);
- }
- assert(ops.reg2 != .none);
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- const ptr_size: Memory.PtrSize = switch (ops.flags) {
- 0b00 => .byte_ptr,
- 0b01 => .word_ptr,
- 0b10 => .dword_ptr,
- 0b11 => .qword_ptr,
- };
- return lowerToMEnc(tag, RegisterOrMemory.mem(ptr_size, .{
- .disp = imm,
- .base = ops.reg2,
- }), emit.code);
-}
-
-fn mirIMulComplex(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .imul_complex);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- return lowerToRmEnc(.imul, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
- },
- 0b01 => {
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null;
- return lowerToRmEnc(.imul, ops.reg1, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = imm,
- .base = src_reg,
- }), emit.code);
- },
- 0b10 => {
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- return lowerToRmiEnc(.imul, ops.reg1, RegisterOrMemory.reg(ops.reg2), imm, emit.code);
- },
- 0b11 => {
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const imm_pair = emit.mir.extraData(Mir.ImmPair, payload).data;
- return lowerToRmiEnc(.imul, ops.reg1, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = imm_pair.dest_off,
- .base = ops.reg2,
- }), imm_pair.operand, emit.code);
+fn mirSetcc(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
+ const ops = emit.mir.instructions.items(.ops)[inst];
+ switch (ops) {
+ .r_c => {
+ const data = emit.mir.instructions.items(.data)[inst].r_c;
+ const mnemonic = mnemonicFromConditionCode("set", data.cc);
+ return emit.encode(mnemonic, .{
+ .op1 = .{ .reg = data.r1 },
+ });
},
+ else => unreachable, // TODO
}
}
-fn mirCwd(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const tag: Tag = switch (ops.flags) {
- 0b00 => .cbw,
- 0b01 => .cwd,
- 0b10 => .cdq,
- 0b11 => .cqo,
- };
- return lowerToZoEnc(tag, emit.code);
-}
-
-fn mirLea(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .lea);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- // lea reg1, [reg2 + imm32]
- // RM
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null;
- return lowerToRmEnc(
- .lea,
- ops.reg1,
- RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{
- .disp = imm,
- .base = src_reg,
- }),
- emit.code,
- );
+fn mirJcc(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
+ const ops = emit.mir.instructions.items(.ops)[inst];
+ switch (ops) {
+ .inst_cc => {
+ const data = emit.mir.instructions.items(.data)[inst].inst_cc;
+ const mnemonic = mnemonicFromConditionCode("j", data.cc);
+ const source = emit.code.items.len;
+ try emit.encode(mnemonic, .{
+ .op1 = .{ .imm = Immediate.s(0) },
+ });
+ try emit.relocs.append(emit.bin_file.allocator, .{
+ .source = source,
+ .target = data.inst,
+ .offset = emit.code.items.len - 4,
+ .length = 6,
+ });
},
- 0b01 => {
- // lea reg1, [rip + imm32]
- // RM
- const start_offset = emit.code.items.len;
- try lowerToRmEnc(
- .lea,
- ops.reg1,
- RegisterOrMemory.rip(Memory.PtrSize.new(ops.reg1.size()), 0),
- emit.code,
- );
- const end_offset = emit.code.items.len;
- // Backpatch the displacement
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const imm = emit.mir.extraData(Mir.Imm64, payload).data.decode();
- const disp = @intCast(i32, @intCast(i64, imm) - @intCast(i64, end_offset - start_offset));
- mem.writeIntLittle(i32, emit.code.items[end_offset - 4 ..][0..4], disp);
- },
- 0b10 => {
- // lea reg, [rbp + index + imm32]
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const index_reg_disp = emit.mir.extraData(Mir.IndexRegisterDisp, payload).data.decode();
- const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null;
- const scale_index = ScaleIndex{
- .scale = 0,
- .index = index_reg_disp.index,
- };
- return lowerToRmEnc(
- .lea,
- ops.reg1,
- RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{
- .disp = index_reg_disp.disp,
- .base = src_reg,
- .scale_index = scale_index,
- }),
- emit.code,
- );
- },
- 0b11 => return emit.fail("TODO unused LEA variant 0b11", .{}),
+ else => unreachable, // TODO
}
}
-fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .lea_pic);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const relocation = emit.mir.instructions.items(.data)[inst].relocation;
-
- switch (ops.flags) {
- 0b00, 0b01, 0b10 => {},
- else => return emit.fail("TODO unused LEA PIC variant 0b11", .{}),
- }
-
- // lea reg1, [rip + reloc]
- // RM
- try lowerToRmEnc(
- .lea,
- ops.reg1,
- RegisterOrMemory.rip(Memory.PtrSize.new(ops.reg1.size()), 0),
- emit.code,
- );
-
- const end_offset = emit.code.items.len;
-
- if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
- const reloc_type = switch (ops.flags) {
- 0b00 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_GOT),
- 0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
- else => unreachable,
- };
- const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
- try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
- .type = reloc_type,
- .target = .{ .sym_index = relocation.sym_index, .file = null },
- .offset = @intCast(u32, end_offset - 4),
- .addend = 0,
- .pcrel = true,
- .length = 2,
- });
- } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
- try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
- .type = switch (ops.flags) {
- 0b00 => .got,
- 0b01 => .direct,
- 0b10 => .import,
- else => unreachable,
- },
- .target = switch (ops.flags) {
- 0b00, 0b01 => .{ .sym_index = relocation.sym_index, .file = null },
- 0b10 => coff_file.getGlobalByIndex(relocation.sym_index),
- else => unreachable,
- },
- .offset = @intCast(u32, end_offset - 4),
- .addend = 0,
- .pcrel = true,
- .length = 2,
- });
- } else {
- return emit.fail("TODO implement lea reg, [rip + reloc] for linking backends different than MachO", .{});
- }
+fn mirJmpReloc(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
+ const target = emit.mir.instructions.items(.data)[inst].inst;
+ const source = emit.code.items.len;
+ try emit.encode(.jmp, .{
+ .op1 = .{ .imm = Immediate.s(0) },
+ });
+ try emit.relocs.append(emit.bin_file.allocator, .{
+ .source = source,
+ .target = target,
+ .offset = emit.code.items.len - 4,
+ .length = 5,
+ });
}
-// SSE instructions
-
-fn mirMovFloatSse(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg2.size()), .{
- .disp = imm,
- .base = ops.reg2,
- }), emit.code);
- },
- 0b01 => {
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- return lowerToMrEnc(tag, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{
- .disp = imm,
- .base = ops.reg1,
- }), ops.reg2, emit.code);
- },
- 0b10 => {
- return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
- },
- else => return emit.fail("TODO unused variant 0b{b} for {}", .{ ops.flags, tag }),
- }
-}
-
-fn mirAddFloatSse(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
- },
- else => return emit.fail("TODO unused variant 0b{b} for {}", .{ ops.flags, tag }),
- }
-}
-
-fn mirCmpFloatSse(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
- },
- else => return emit.fail("TODO unused variant 0b{b} for {}", .{ ops.flags, tag }),
- }
-}
-// AVX instructions
-
-fn mirMovFloatAvx(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- return lowerToVmEnc(tag, ops.reg1, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg2.size()), .{
- .disp = imm,
- .base = ops.reg2,
- }), emit.code);
- },
- 0b01 => {
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- return lowerToMvEnc(tag, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{
- .disp = imm,
- .base = ops.reg1,
- }), ops.reg2, emit.code);
- },
- 0b10 => {
- return lowerToRvmEnc(tag, ops.reg1, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
- },
- else => return emit.fail("TODO unused variant 0b{b} for {}", .{ ops.flags, tag }),
- }
-}
-
-fn mirAddFloatAvx(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- return lowerToRvmEnc(tag, ops.reg1, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
- },
- else => return emit.fail("TODO unused variant 0b{b} for {}", .{ ops.flags, tag }),
- }
-}
-
-fn mirCmpFloatAvx(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- return lowerToVmEnc(tag, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
- },
- else => return emit.fail("TODO unused variant 0b{b} for mov_f64", .{ops.flags}),
- }
-}
-
-// Pseudo-instructions
-
fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .call_extern);
const relocation = emit.mir.instructions.items(.data)[inst].relocation;
const offset = blk: {
- // callq
- try lowerToDEnc(.call_near, 0, emit.code);
+ try emit.encode(.call, .{
+ .op1 = .{ .imm = Immediate.s(0) },
+ });
break :blk @intCast(u32, emit.code.items.len) - 4;
};
@@ -1167,9 +474,95 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
}
}
+fn mirPushPopRegisterList(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerError!void {
+ const payload = emit.mir.instructions.items(.data)[inst].payload;
+ const save_reg_list = emit.mir.extraData(Mir.SaveRegisterList, payload).data;
+ const base = @intToEnum(Register, save_reg_list.base_reg);
+ var disp: i32 = -@intCast(i32, save_reg_list.stack_end);
+ const reg_list = Mir.RegisterList.fromInt(save_reg_list.register_list);
+ const callee_preserved_regs = abi.getCalleePreservedRegs(emit.target.*);
+ for (callee_preserved_regs) |reg| {
+ if (reg_list.isSet(callee_preserved_regs, reg)) {
+ const op1: Instruction.Operand = .{ .mem = Memory.sib(.qword, .{
+ .base = base,
+ .disp = disp,
+ }) };
+ const op2: Instruction.Operand = .{ .reg = reg };
+ switch (tag) {
+ .push => try emit.encode(.mov, .{
+ .op1 = op1,
+ .op2 = op2,
+ }),
+ .pop => try emit.encode(.mov, .{
+ .op1 = op2,
+ .op2 = op1,
+ }),
+ else => unreachable,
+ }
+ disp += 8;
+ }
+ }
+}
+
+fn mirLeaLinker(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
+ const ops = emit.mir.instructions.items(.ops)[inst];
+ const payload = emit.mir.instructions.items(.data)[inst].payload;
+ const metadata = emit.mir.extraData(Mir.LeaRegisterReloc, payload).data;
+ const reg = @intToEnum(Register, metadata.reg);
+
+ try emit.encode(.lea, .{
+ .op1 = .{ .reg = reg },
+ .op2 = .{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(reg.bitSize()), 0) },
+ });
+
+ const end_offset = emit.code.items.len;
+
+ if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
+ const reloc_type = switch (ops) {
+ .got_reloc => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_GOT),
+ .direct_reloc => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
+ else => unreachable,
+ };
+ const atom_index = macho_file.getAtomIndexForSymbol(.{
+ .sym_index = metadata.atom_index,
+ .file = null,
+ }).?;
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
+ .type = reloc_type,
+ .target = .{ .sym_index = metadata.sym_index, .file = null },
+ .offset = @intCast(u32, end_offset - 4),
+ .addend = 0,
+ .pcrel = true,
+ .length = 2,
+ });
+ } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
+ const atom_index = coff_file.getAtomIndexForSymbol(.{
+ .sym_index = metadata.atom_index,
+ .file = null,
+ }).?;
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
+ .type = switch (ops) {
+ .got_reloc => .got,
+ .direct_reloc => .direct,
+ .import_reloc => .import,
+ else => unreachable,
+ },
+ .target = switch (ops) {
+ .got_reloc, .direct_reloc => .{ .sym_index = metadata.sym_index, .file = null },
+ .import_reloc => coff_file.getGlobalByIndex(metadata.sym_index),
+ else => unreachable,
+ },
+ .offset = @intCast(u32, end_offset - 4),
+ .addend = 0,
+ .pcrel = true,
+ .length = 2,
+ });
+ } else {
+ return emit.fail("TODO implement lea reg, [rip + reloc] for linking backends different than MachO", .{});
+ }
+}
+
fn mirDbgLine(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .dbg_line);
const payload = emit.mir.instructions.items(.data)[inst].payload;
const dbg_line_column = emit.mir.extraData(Mir.DbgLineColumn, payload).data;
log.debug("mirDbgLine", .{});
@@ -1227,8 +620,7 @@ fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) InnerError!void {
}
fn mirDbgPrologueEnd(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .dbg_prologue_end);
+ _ = inst;
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setPrologueEnd();
@@ -1244,8 +636,7 @@ fn mirDbgPrologueEnd(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
}
fn mirDbgEpilogueBegin(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .dbg_epilogue_begin);
+ _ = inst;
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setEpilogueBegin();
@@ -1259,1839 +650,3 @@ fn mirDbgEpilogueBegin(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.none => {},
}
}
-
-const Tag = enum {
- adc,
- add,
- sub,
- xor,
- @"and",
- @"or",
- sbb,
- cmp,
- mov,
- movsx,
- movsxd,
- movzx,
- lea,
- jmp_near,
- call_near,
- push,
- pop,
- @"test",
- int3,
- nop,
- imul,
- mul,
- idiv,
- div,
- syscall,
- ret_near,
- ret_far,
- fisttp16,
- fisttp32,
- fisttp64,
- fld32,
- fld64,
- jo,
- jno,
- jb,
- jbe,
- jc,
- jnae,
- jnc,
- jae,
- je,
- jz,
- jne,
- jnz,
- jna,
- jnb,
- jnbe,
- ja,
- js,
- jns,
- jpe,
- jp,
- jpo,
- jnp,
- jnge,
- jl,
- jge,
- jnl,
- jle,
- jng,
- jg,
- jnle,
- seto,
- setno,
- setb,
- setc,
- setnae,
- setnb,
- setnc,
- setae,
- sete,
- setz,
- setne,
- setnz,
- setbe,
- setna,
- seta,
- setnbe,
- sets,
- setns,
- setp,
- setpe,
- setnp,
- setpo,
- setl,
- setnge,
- setnl,
- setge,
- setle,
- setng,
- setnle,
- setg,
- cmovo,
- cmovno,
- cmovb,
- cmovc,
- cmovnae,
- cmovnb,
- cmovnc,
- cmovae,
- cmove,
- cmovz,
- cmovne,
- cmovnz,
- cmovbe,
- cmovna,
- cmova,
- cmovnbe,
- cmovs,
- cmovns,
- cmovp,
- cmovpe,
- cmovnp,
- cmovpo,
- cmovl,
- cmovnge,
- cmovnl,
- cmovge,
- cmovle,
- cmovng,
- cmovnle,
- cmovg,
- shl,
- sal,
- shr,
- sar,
- cbw,
- cwd,
- cdq,
- cqo,
- movsd,
- movss,
- addsd,
- addss,
- cmpsd,
- cmpss,
- ucomisd,
- ucomiss,
- vmovsd,
- vmovss,
- vaddsd,
- vaddss,
- vcmpsd,
- vcmpss,
- vucomisd,
- vucomiss,
-
- fn isSse(tag: Tag) bool {
- return switch (tag) {
- .movsd,
- .movss,
- .addsd,
- .addss,
- .cmpsd,
- .cmpss,
- .ucomisd,
- .ucomiss,
- => true,
-
- else => false,
- };
- }
-
- fn isAvx(tag: Tag) bool {
- return switch (tag) {
- .vmovsd,
- .vmovss,
- .vaddsd,
- .vaddss,
- .vcmpsd,
- .vcmpss,
- .vucomisd,
- .vucomiss,
- => true,
-
- else => false,
- };
- }
-
- fn isSetCC(tag: Tag) bool {
- return switch (tag) {
- .seto,
- .setno,
- .setb,
- .setc,
- .setnae,
- .setnb,
- .setnc,
- .setae,
- .sete,
- .setz,
- .setne,
- .setnz,
- .setbe,
- .setna,
- .seta,
- .setnbe,
- .sets,
- .setns,
- .setp,
- .setpe,
- .setnp,
- .setpo,
- .setl,
- .setnge,
- .setnl,
- .setge,
- .setle,
- .setng,
- .setnle,
- .setg,
- => true,
- else => false,
- };
- }
-};
-
-const Encoding = enum {
- /// OP
- zo,
-
- /// OP rel32
- d,
-
- /// OP r/m64
- m,
-
- /// OP r64
- o,
-
- /// OP imm32
- i,
-
- /// OP r/m64, 1
- m1,
-
- /// OP r/m64, .cl
- mc,
-
- /// OP r/m64, imm32
- mi,
-
- /// OP r/m64, imm8
- mi8,
-
- /// OP r/m64, r64
- mr,
-
- /// OP r64, r/m64
- rm,
-
- /// OP r64, imm64
- oi,
-
- /// OP al/ax/eax/rax, moffs
- fd,
-
- /// OP moffs, al/ax/eax/rax
- td,
-
- /// OP r64, r/m64, imm32
- rmi,
-
- /// OP xmm1, xmm2/m64
- vm,
-
- /// OP m64, xmm1
- mv,
-
- /// OP xmm1, xmm2, xmm3/m64
- rvm,
-
- /// OP xmm1, xmm2, xmm3/m64, imm8
- rvmi,
-};
-
-const OpCode = struct {
- bytes: [3]u8,
- count: usize,
-
- fn init(comptime in_bytes: []const u8) OpCode {
- comptime assert(in_bytes.len <= 3);
- comptime var bytes: [3]u8 = undefined;
- inline for (in_bytes, 0..) |x, i| {
- bytes[i] = x;
- }
- return .{ .bytes = bytes, .count = in_bytes.len };
- }
-
- fn encode(opc: OpCode, encoder: Encoder) void {
- switch (opc.count) {
- 1 => encoder.opcode_1byte(opc.bytes[0]),
- 2 => encoder.opcode_2byte(opc.bytes[0], opc.bytes[1]),
- 3 => encoder.opcode_3byte(opc.bytes[0], opc.bytes[1], opc.bytes[2]),
- else => unreachable,
- }
- }
-
- fn encodeWithReg(opc: OpCode, encoder: Encoder, reg: Register) void {
- assert(opc.count == 1);
- encoder.opcode_withReg(opc.bytes[0], reg.lowEnc());
- }
-};
-
-inline fn getOpCode(tag: Tag, enc: Encoding, is_one_byte: bool) OpCode {
- // zig fmt: off
- switch (enc) {
- .zo => return switch (tag) {
- .ret_near => OpCode.init(&.{0xc3}),
- .ret_far => OpCode.init(&.{0xcb}),
- .int3 => OpCode.init(&.{0xcc}),
- .nop => OpCode.init(&.{0x90}),
- .syscall => OpCode.init(&.{ 0x0f, 0x05 }),
- .cbw => OpCode.init(&.{0x98}),
- .cwd,
- .cdq,
- .cqo => OpCode.init(&.{0x99}),
- else => unreachable,
- },
- .d => return switch (tag) {
- .jmp_near => OpCode.init(&.{0xe9}),
- .call_near => OpCode.init(&.{0xe8}),
-
- .jo => if (is_one_byte) OpCode.init(&.{0x70}) else OpCode.init(&.{0x0f,0x80}),
-
- .jno => if (is_one_byte) OpCode.init(&.{0x71}) else OpCode.init(&.{0x0f,0x81}),
-
- .jb,
- .jc,
- .jnae => if (is_one_byte) OpCode.init(&.{0x72}) else OpCode.init(&.{0x0f,0x82}),
-
- .jnb,
- .jnc,
- .jae => if (is_one_byte) OpCode.init(&.{0x73}) else OpCode.init(&.{0x0f,0x83}),
-
- .je,
- .jz => if (is_one_byte) OpCode.init(&.{0x74}) else OpCode.init(&.{0x0f,0x84}),
-
- .jne,
- .jnz => if (is_one_byte) OpCode.init(&.{0x75}) else OpCode.init(&.{0x0f,0x85}),
-
- .jna,
- .jbe => if (is_one_byte) OpCode.init(&.{0x76}) else OpCode.init(&.{0x0f,0x86}),
-
- .jnbe,
- .ja => if (is_one_byte) OpCode.init(&.{0x77}) else OpCode.init(&.{0x0f,0x87}),
-
- .js => if (is_one_byte) OpCode.init(&.{0x78}) else OpCode.init(&.{0x0f,0x88}),
-
- .jns => if (is_one_byte) OpCode.init(&.{0x79}) else OpCode.init(&.{0x0f,0x89}),
-
- .jpe,
- .jp => if (is_one_byte) OpCode.init(&.{0x7a}) else OpCode.init(&.{0x0f,0x8a}),
-
- .jpo,
- .jnp => if (is_one_byte) OpCode.init(&.{0x7b}) else OpCode.init(&.{0x0f,0x8b}),
-
- .jnge,
- .jl => if (is_one_byte) OpCode.init(&.{0x7c}) else OpCode.init(&.{0x0f,0x8c}),
-
- .jge,
- .jnl => if (is_one_byte) OpCode.init(&.{0x7d}) else OpCode.init(&.{0x0f,0x8d}),
-
- .jle,
- .jng => if (is_one_byte) OpCode.init(&.{0x7e}) else OpCode.init(&.{0x0f,0x8e}),
-
- .jg,
- .jnle => if (is_one_byte) OpCode.init(&.{0x7f}) else OpCode.init(&.{0x0f,0x8f}),
-
- else => unreachable,
- },
- .m => return switch (tag) {
- .jmp_near,
- .call_near,
- .push => OpCode.init(&.{0xff}),
-
- .pop => OpCode.init(&.{0x8f}),
- .seto => OpCode.init(&.{0x0f,0x90}),
- .setno => OpCode.init(&.{0x0f,0x91}),
-
- .setb,
- .setc,
- .setnae => OpCode.init(&.{0x0f,0x92}),
-
- .setnb,
- .setnc,
- .setae => OpCode.init(&.{0x0f,0x93}),
-
- .sete,
- .setz => OpCode.init(&.{0x0f,0x94}),
-
- .setne,
- .setnz => OpCode.init(&.{0x0f,0x95}),
-
- .setbe,
- .setna => OpCode.init(&.{0x0f,0x96}),
-
- .seta,
- .setnbe => OpCode.init(&.{0x0f,0x97}),
-
- .sets => OpCode.init(&.{0x0f,0x98}),
- .setns => OpCode.init(&.{0x0f,0x99}),
-
- .setp,
- .setpe => OpCode.init(&.{0x0f,0x9a}),
-
- .setnp,
- .setpo => OpCode.init(&.{0x0f,0x9b}),
-
- .setl,
- .setnge => OpCode.init(&.{0x0f,0x9c}),
-
- .setnl,
- .setge => OpCode.init(&.{0x0f,0x9d}),
-
- .setle,
- .setng => OpCode.init(&.{0x0f,0x9e}),
-
- .setnle,
- .setg => OpCode.init(&.{0x0f,0x9f}),
-
- .idiv,
- .div,
- .imul,
- .mul => if (is_one_byte) OpCode.init(&.{0xf6}) else OpCode.init(&.{0xf7}),
-
- .fisttp16 => OpCode.init(&.{0xdf}),
- .fisttp32 => OpCode.init(&.{0xdb}),
- .fisttp64 => OpCode.init(&.{0xdd}),
- .fld32 => OpCode.init(&.{0xd9}),
- .fld64 => OpCode.init(&.{0xdd}),
- else => unreachable,
- },
- .o => return switch (tag) {
- .push => OpCode.init(&.{0x50}),
- .pop => OpCode.init(&.{0x58}),
- else => unreachable,
- },
- .i => return switch (tag) {
- .push => if (is_one_byte) OpCode.init(&.{0x6a}) else OpCode.init(&.{0x68}),
- .@"test" => if (is_one_byte) OpCode.init(&.{0xa8}) else OpCode.init(&.{0xa9}),
- .ret_near => OpCode.init(&.{0xc2}),
- .ret_far => OpCode.init(&.{0xca}),
- else => unreachable,
- },
- .m1 => return switch (tag) {
- .shl, .sal,
- .shr, .sar => if (is_one_byte) OpCode.init(&.{0xd0}) else OpCode.init(&.{0xd1}),
- else => unreachable,
- },
- .mc => return switch (tag) {
- .shl, .sal,
- .shr, .sar => if (is_one_byte) OpCode.init(&.{0xd2}) else OpCode.init(&.{0xd3}),
- else => unreachable,
- },
- .mi => return switch (tag) {
- .adc, .add,
- .sub, .xor,
- .@"and", .@"or",
- .sbb, .cmp => if (is_one_byte) OpCode.init(&.{0x80}) else OpCode.init(&.{0x81}),
- .mov => if (is_one_byte) OpCode.init(&.{0xc6}) else OpCode.init(&.{0xc7}),
- .@"test" => if (is_one_byte) OpCode.init(&.{0xf6}) else OpCode.init(&.{0xf7}),
- else => unreachable,
- },
- .mi8 => return switch (tag) {
- .adc, .add,
- .sub, .xor,
- .@"and", .@"or",
- .sbb, .cmp => OpCode.init(&.{0x83}),
- .shl, .sal,
- .shr, .sar => if (is_one_byte) OpCode.init(&.{0xc0}) else OpCode.init(&.{0xc1}),
- else => unreachable,
- },
- .mr => return switch (tag) {
- .adc => if (is_one_byte) OpCode.init(&.{0x10}) else OpCode.init(&.{0x11}),
- .add => if (is_one_byte) OpCode.init(&.{0x00}) else OpCode.init(&.{0x01}),
- .sub => if (is_one_byte) OpCode.init(&.{0x28}) else OpCode.init(&.{0x29}),
- .xor => if (is_one_byte) OpCode.init(&.{0x30}) else OpCode.init(&.{0x31}),
- .@"and" => if (is_one_byte) OpCode.init(&.{0x20}) else OpCode.init(&.{0x21}),
- .@"or" => if (is_one_byte) OpCode.init(&.{0x08}) else OpCode.init(&.{0x09}),
- .sbb => if (is_one_byte) OpCode.init(&.{0x18}) else OpCode.init(&.{0x19}),
- .cmp => if (is_one_byte) OpCode.init(&.{0x38}) else OpCode.init(&.{0x39}),
- .mov => if (is_one_byte) OpCode.init(&.{0x88}) else OpCode.init(&.{0x89}),
- .@"test" => if (is_one_byte) OpCode.init(&.{0x84}) else OpCode.init(&.{0x85}),
- .movsd => OpCode.init(&.{0xf2,0x0f,0x11}),
- .movss => OpCode.init(&.{0xf3,0x0f,0x11}),
- else => unreachable,
- },
- .rm => return switch (tag) {
- .adc => if (is_one_byte) OpCode.init(&.{0x12}) else OpCode.init(&.{0x13}),
- .add => if (is_one_byte) OpCode.init(&.{0x02}) else OpCode.init(&.{0x03}),
- .sub => if (is_one_byte) OpCode.init(&.{0x2a}) else OpCode.init(&.{0x2b}),
- .xor => if (is_one_byte) OpCode.init(&.{0x32}) else OpCode.init(&.{0x33}),
- .@"and" => if (is_one_byte) OpCode.init(&.{0x22}) else OpCode.init(&.{0x23}),
- .@"or" => if (is_one_byte) OpCode.init(&.{0x0a}) else OpCode.init(&.{0x0b}),
- .sbb => if (is_one_byte) OpCode.init(&.{0x1a}) else OpCode.init(&.{0x1b}),
- .cmp => if (is_one_byte) OpCode.init(&.{0x3a}) else OpCode.init(&.{0x3b}),
- .mov => if (is_one_byte) OpCode.init(&.{0x8a}) else OpCode.init(&.{0x8b}),
- .movsx => if (is_one_byte) OpCode.init(&.{0x0f,0xbe}) else OpCode.init(&.{0x0f,0xbf}),
- .movsxd => OpCode.init(&.{0x63}),
- .movzx => if (is_one_byte) OpCode.init(&.{0x0f,0xb6}) else OpCode.init(&.{0x0f,0xb7}),
- .lea => if (is_one_byte) OpCode.init(&.{0x8c}) else OpCode.init(&.{0x8d}),
- .imul => OpCode.init(&.{0x0f,0xaf}),
-
- .cmova,
- .cmovnbe, => OpCode.init(&.{0x0f,0x47}),
-
- .cmovae,
- .cmovnb, => OpCode.init(&.{0x0f,0x43}),
-
- .cmovb,
- .cmovc,
- .cmovnae => OpCode.init(&.{0x0f,0x42}),
-
- .cmovbe,
- .cmovna, => OpCode.init(&.{0x0f,0x46}),
-
- .cmove,
- .cmovz, => OpCode.init(&.{0x0f,0x44}),
-
- .cmovg,
- .cmovnle, => OpCode.init(&.{0x0f,0x4f}),
-
- .cmovge,
- .cmovnl, => OpCode.init(&.{0x0f,0x4d}),
-
- .cmovl,
- .cmovnge, => OpCode.init(&.{0x0f,0x4c}),
-
- .cmovle,
- .cmovng, => OpCode.init(&.{0x0f,0x4e}),
-
- .cmovne,
- .cmovnz, => OpCode.init(&.{0x0f,0x45}),
-
- .cmovno => OpCode.init(&.{0x0f,0x41}),
-
- .cmovnp,
- .cmovpo, => OpCode.init(&.{0x0f,0x4b}),
-
- .cmovns => OpCode.init(&.{0x0f,0x49}),
-
- .cmovo => OpCode.init(&.{0x0f,0x40}),
-
- .cmovp,
- .cmovpe, => OpCode.init(&.{0x0f,0x4a}),
-
- .cmovs => OpCode.init(&.{0x0f,0x48}),
-
- .movsd => OpCode.init(&.{0xf2,0x0f,0x10}),
- .movss => OpCode.init(&.{0xf3,0x0f,0x10}),
- .addsd => OpCode.init(&.{0xf2,0x0f,0x58}),
- .addss => OpCode.init(&.{0xf3,0x0f,0x58}),
- .ucomisd => OpCode.init(&.{0x66,0x0f,0x2e}),
- .ucomiss => OpCode.init(&.{0x0f,0x2e}),
- else => unreachable,
- },
- .oi => return switch (tag) {
- .mov => if (is_one_byte) OpCode.init(&.{0xb0}) else OpCode.init(&.{0xb8}),
- else => unreachable,
- },
- .fd => return switch (tag) {
- .mov => if (is_one_byte) OpCode.init(&.{0xa0}) else OpCode.init(&.{0xa1}),
- else => unreachable,
- },
- .td => return switch (tag) {
- .mov => if (is_one_byte) OpCode.init(&.{0xa2}) else OpCode.init(&.{0xa3}),
- else => unreachable,
- },
- .rmi => return switch (tag) {
- .imul => if (is_one_byte) OpCode.init(&.{0x6b}) else OpCode.init(&.{0x69}),
- else => unreachable,
- },
- .mv => return switch (tag) {
- .vmovsd,
- .vmovss => OpCode.init(&.{0x11}),
- else => unreachable,
- },
- .vm => return switch (tag) {
- .vmovsd,
- .vmovss => OpCode.init(&.{0x10}),
- .vucomisd,
- .vucomiss => OpCode.init(&.{0x2e}),
- else => unreachable,
- },
- .rvm => return switch (tag) {
- .vaddsd,
- .vaddss => OpCode.init(&.{0x58}),
- .vmovsd,
- .vmovss => OpCode.init(&.{0x10}),
- else => unreachable,
- },
- .rvmi => return switch (tag) {
- .vcmpsd,
- .vcmpss => OpCode.init(&.{0xc2}),
- else => unreachable,
- },
- }
- // zig fmt: on
-}
-
-inline fn getModRmExt(tag: Tag) u3 {
- return switch (tag) {
- .adc => 0x2,
- .add => 0x0,
- .sub => 0x5,
- .xor => 0x6,
- .@"and" => 0x4,
- .@"or" => 0x1,
- .sbb => 0x3,
- .cmp => 0x7,
- .mov => 0x0,
- .jmp_near => 0x4,
- .call_near => 0x2,
- .push => 0x6,
- .pop => 0x0,
- .@"test" => 0x0,
- .seto,
- .setno,
- .setb,
- .setc,
- .setnae,
- .setnb,
- .setnc,
- .setae,
- .sete,
- .setz,
- .setne,
- .setnz,
- .setbe,
- .setna,
- .seta,
- .setnbe,
- .sets,
- .setns,
- .setp,
- .setpe,
- .setnp,
- .setpo,
- .setl,
- .setnge,
- .setnl,
- .setge,
- .setle,
- .setng,
- .setnle,
- .setg,
- => 0x0,
- .shl,
- .sal,
- => 0x4,
- .shr => 0x5,
- .sar => 0x7,
- .mul => 0x4,
- .imul => 0x5,
- .div => 0x6,
- .idiv => 0x7,
- .fisttp16 => 0x1,
- .fisttp32 => 0x1,
- .fisttp64 => 0x1,
- .fld32 => 0x0,
- .fld64 => 0x0,
- else => unreachable,
- };
-}
-
-const VexEncoding = struct {
- prefix: Encoder.Vex,
- reg: ?enum {
- ndd,
- nds,
- dds,
- },
-};
-
-inline fn getVexEncoding(tag: Tag, enc: Encoding) VexEncoding {
- const desc: struct {
- reg: enum {
- none,
- ndd,
- nds,
- dds,
- } = .none,
- len_256: bool = false,
- wig: bool = false,
- lig: bool = false,
- lz: bool = false,
- lead_opc: enum {
- l_0f,
- l_0f_3a,
- l_0f_38,
- } = .l_0f,
- simd_prefix: enum {
- none,
- p_66,
- p_f2,
- p_f3,
- } = .none,
- } = blk: {
- switch (enc) {
- .mv => switch (tag) {
- .vmovsd => break :blk .{ .lig = true, .simd_prefix = .p_f2, .wig = true },
- .vmovss => break :blk .{ .lig = true, .simd_prefix = .p_f3, .wig = true },
- else => unreachable,
- },
- .vm => switch (tag) {
- .vmovsd => break :blk .{ .lig = true, .simd_prefix = .p_f2, .wig = true },
- .vmovss => break :blk .{ .lig = true, .simd_prefix = .p_f3, .wig = true },
- .vucomisd => break :blk .{ .lig = true, .simd_prefix = .p_66, .wig = true },
- .vucomiss => break :blk .{ .lig = true, .wig = true },
- else => unreachable,
- },
- .rvm => switch (tag) {
- .vaddsd => break :blk .{ .reg = .nds, .lig = true, .simd_prefix = .p_f2, .wig = true },
- .vaddss => break :blk .{ .reg = .nds, .lig = true, .simd_prefix = .p_f3, .wig = true },
- .vmovsd => break :blk .{ .reg = .nds, .lig = true, .simd_prefix = .p_f2, .wig = true },
- .vmovss => break :blk .{ .reg = .nds, .lig = true, .simd_prefix = .p_f3, .wig = true },
- else => unreachable,
- },
- .rvmi => switch (tag) {
- .vcmpsd => break :blk .{ .reg = .nds, .lig = true, .simd_prefix = .p_f2, .wig = true },
- .vcmpss => break :blk .{ .reg = .nds, .lig = true, .simd_prefix = .p_f3, .wig = true },
- else => unreachable,
- },
- else => unreachable,
- }
- };
-
- var vex: Encoder.Vex = .{};
-
- if (desc.len_256) vex.len_256();
- if (desc.wig) vex.wig();
- if (desc.lig) vex.lig();
- if (desc.lz) vex.lz();
-
- switch (desc.lead_opc) {
- .l_0f => {},
- .l_0f_3a => vex.lead_opc_0f_3a(),
- .l_0f_38 => vex.lead_opc_0f_38(),
- }
-
- switch (desc.simd_prefix) {
- .none => {},
- .p_66 => vex.simd_prefix_66(),
- .p_f2 => vex.simd_prefix_f2(),
- .p_f3 => vex.simd_prefix_f3(),
- }
-
- return VexEncoding{ .prefix = vex, .reg = switch (desc.reg) {
- .none => null,
- .nds => .nds,
- .dds => .dds,
- .ndd => .ndd,
- } };
-}
-
-const ScaleIndex = packed struct {
- scale: u2,
- index: Register,
-};
-
-const Memory = struct {
- base: ?Register,
- rip: bool = false,
- disp: u32,
- ptr_size: PtrSize,
- scale_index: ?ScaleIndex = null,
-
- const PtrSize = enum(u2) {
- byte_ptr = 0b00,
- word_ptr = 0b01,
- dword_ptr = 0b10,
- qword_ptr = 0b11,
-
- fn new(bit_size: u64) PtrSize {
- return @intToEnum(PtrSize, math.log2_int(u4, @intCast(u4, @divExact(bit_size, 8))));
- }
-
- /// Returns size in bits.
- fn size(ptr_size: PtrSize) u64 {
- return 8 * (math.powi(u8, 2, @enumToInt(ptr_size)) catch unreachable);
- }
- };
-
- fn encode(mem_op: Memory, encoder: Encoder, operand: u3) void {
- if (mem_op.base) |base| {
- const dst = base.lowEnc();
- const src = operand;
- if (dst == 4 or mem_op.scale_index != null) {
- if (mem_op.disp == 0 and dst != 5) {
- encoder.modRm_SIBDisp0(src);
- if (mem_op.scale_index) |si| {
- encoder.sib_scaleIndexBase(si.scale, si.index.lowEnc(), dst);
- } else {
- encoder.sib_base(dst);
- }
- } else if (immOpSize(mem_op.disp) == 8) {
- encoder.modRm_SIBDisp8(src);
- if (mem_op.scale_index) |si| {
- encoder.sib_scaleIndexBaseDisp8(si.scale, si.index.lowEnc(), dst);
- } else {
- encoder.sib_baseDisp8(dst);
- }
- encoder.disp8(@bitCast(i8, @truncate(u8, mem_op.disp)));
- } else {
- encoder.modRm_SIBDisp32(src);
- if (mem_op.scale_index) |si| {
- encoder.sib_scaleIndexBaseDisp32(si.scale, si.index.lowEnc(), dst);
- } else {
- encoder.sib_baseDisp32(dst);
- }
- encoder.disp32(@bitCast(i32, mem_op.disp));
- }
- } else {
- if (mem_op.disp == 0 and dst != 5) {
- encoder.modRm_indirectDisp0(src, dst);
- } else if (immOpSize(mem_op.disp) == 8) {
- encoder.modRm_indirectDisp8(src, dst);
- encoder.disp8(@bitCast(i8, @truncate(u8, mem_op.disp)));
- } else {
- encoder.modRm_indirectDisp32(src, dst);
- encoder.disp32(@bitCast(i32, mem_op.disp));
- }
- }
- } else {
- if (mem_op.rip) {
- encoder.modRm_RIPDisp32(operand);
- } else {
- encoder.modRm_SIBDisp0(operand);
- if (mem_op.scale_index) |si| {
- encoder.sib_scaleIndexDisp32(si.scale, si.index.lowEnc());
- } else {
- encoder.sib_disp32();
- }
- }
- encoder.disp32(@bitCast(i32, mem_op.disp));
- }
- }
-
- /// Returns size in bits.
- fn size(memory: Memory) u64 {
- return memory.ptr_size.size();
- }
-};
-
-fn encodeImm(encoder: Encoder, imm: u32, size: u64) void {
- switch (size) {
- 8 => encoder.imm8(@bitCast(i8, @truncate(u8, imm))),
- 16 => encoder.imm16(@bitCast(i16, @truncate(u16, imm))),
- 32, 64 => encoder.imm32(@bitCast(i32, imm)),
- else => unreachable,
- }
-}
-
-const RegisterOrMemory = union(enum) {
- register: Register,
- memory: Memory,
-
- fn reg(register: Register) RegisterOrMemory {
- return .{ .register = register };
- }
-
- fn mem(ptr_size: Memory.PtrSize, args: struct {
- disp: u32,
- base: ?Register = null,
- scale_index: ?ScaleIndex = null,
- }) RegisterOrMemory {
- return .{
- .memory = .{
- .base = args.base,
- .disp = args.disp,
- .ptr_size = ptr_size,
- .scale_index = args.scale_index,
- },
- };
- }
-
- fn rip(ptr_size: Memory.PtrSize, disp: u32) RegisterOrMemory {
- return .{
- .memory = .{
- .base = null,
- .rip = true,
- .disp = disp,
- .ptr_size = ptr_size,
- },
- };
- }
-
- /// Returns size in bits.
- fn size(reg_or_mem: RegisterOrMemory) u64 {
- return switch (reg_or_mem) {
- .register => |register| register.size(),
- .memory => |memory| memory.size(),
- };
- }
-};
-
-fn lowerToZoEnc(tag: Tag, code: *std.ArrayList(u8)) InnerError!void {
- assert(!tag.isAvx());
- const opc = getOpCode(tag, .zo, false);
- const encoder = try Encoder.init(code, 2);
- switch (tag) {
- .cqo => {
- encoder.rex(.{
- .w = true,
- });
- },
- else => {},
- }
- opc.encode(encoder);
-}
-
-fn lowerToIEnc(tag: Tag, imm: u32, code: *std.ArrayList(u8)) InnerError!void {
- assert(!tag.isAvx());
- if (tag == .ret_far or tag == .ret_near) {
- const encoder = try Encoder.init(code, 3);
- const opc = getOpCode(tag, .i, false);
- opc.encode(encoder);
- encoder.imm16(@bitCast(i16, @truncate(u16, imm)));
- return;
- }
- const opc = getOpCode(tag, .i, immOpSize(imm) == 8);
- const encoder = try Encoder.init(code, 5);
- if (immOpSize(imm) == 16) {
- encoder.prefix16BitMode();
- }
- opc.encode(encoder);
- encodeImm(encoder, imm, immOpSize(imm));
-}
-
-fn lowerToOEnc(tag: Tag, reg: Register, code: *std.ArrayList(u8)) InnerError!void {
- assert(!tag.isAvx());
- const opc = getOpCode(tag, .o, false);
- const encoder = try Encoder.init(code, 3);
- if (reg.size() == 16) {
- encoder.prefix16BitMode();
- }
- encoder.rex(.{
- .w = false,
- .b = reg.isExtended(),
- });
- opc.encodeWithReg(encoder, reg);
-}
-
-fn lowerToDEnc(tag: Tag, imm: u32, code: *std.ArrayList(u8)) InnerError!void {
- assert(!tag.isAvx());
- const opc = getOpCode(tag, .d, false);
- const encoder = try Encoder.init(code, 6);
- opc.encode(encoder);
- encoder.imm32(@bitCast(i32, imm));
-}
-
-fn lowerToMxEnc(tag: Tag, reg_or_mem: RegisterOrMemory, enc: Encoding, code: *std.ArrayList(u8)) InnerError!void {
- assert(!tag.isAvx());
- const opc = getOpCode(tag, enc, reg_or_mem.size() == 8);
- const modrm_ext = getModRmExt(tag);
- switch (reg_or_mem) {
- .register => |reg| {
- const encoder = try Encoder.init(code, 4);
- if (reg.size() == 16) {
- encoder.prefix16BitMode();
- }
- const wide = if (tag == .jmp_near) false else setRexWRegister(reg);
- encoder.rex(.{
- .w = wide,
- .b = reg.isExtended(),
- });
- opc.encode(encoder);
- encoder.modRm_direct(modrm_ext, reg.lowEnc());
- },
- .memory => |mem_op| {
- const encoder = try Encoder.init(code, 8);
- if (mem_op.ptr_size == .word_ptr) {
- encoder.prefix16BitMode();
- }
- if (mem_op.base) |base| {
- const wide = if (tag == .jmp_near) false else mem_op.ptr_size == .qword_ptr;
- encoder.rex(.{
- .w = wide,
- .b = base.isExtended(),
- .x = if (mem_op.scale_index) |si| si.index.isExtended() else false,
- });
- }
- opc.encode(encoder);
- mem_op.encode(encoder, modrm_ext);
- },
- }
-}
-
-fn lowerToMEnc(tag: Tag, reg_or_mem: RegisterOrMemory, code: *std.ArrayList(u8)) InnerError!void {
- return lowerToMxEnc(tag, reg_or_mem, .m, code);
-}
-
-fn lowerToM1Enc(tag: Tag, reg_or_mem: RegisterOrMemory, code: *std.ArrayList(u8)) InnerError!void {
- return lowerToMxEnc(tag, reg_or_mem, .m1, code);
-}
-
-fn lowerToMcEnc(tag: Tag, reg_or_mem: RegisterOrMemory, code: *std.ArrayList(u8)) InnerError!void {
- return lowerToMxEnc(tag, reg_or_mem, .mc, code);
-}
-
-fn lowerToTdEnc(tag: Tag, moffs: u64, reg: Register, code: *std.ArrayList(u8)) InnerError!void {
- return lowerToTdFdEnc(tag, reg, moffs, code, true);
-}
-
-fn lowerToFdEnc(tag: Tag, reg: Register, moffs: u64, code: *std.ArrayList(u8)) InnerError!void {
- return lowerToTdFdEnc(tag, reg, moffs, code, false);
-}
-
-fn lowerToTdFdEnc(tag: Tag, reg: Register, moffs: u64, code: *std.ArrayList(u8), td: bool) InnerError!void {
- assert(!tag.isAvx());
- const opc = if (td) getOpCode(tag, .td, reg.size() == 8) else getOpCode(tag, .fd, reg.size() == 8);
- const encoder = try Encoder.init(code, 10);
- if (reg.size() == 16) {
- encoder.prefix16BitMode();
- }
- encoder.rex(.{
- .w = setRexWRegister(reg),
- });
- opc.encode(encoder);
- switch (reg.size()) {
- 8 => encoder.imm8(@bitCast(i8, @truncate(u8, moffs))),
- 16 => encoder.imm16(@bitCast(i16, @truncate(u16, moffs))),
- 32 => encoder.imm32(@bitCast(i32, @truncate(u32, moffs))),
- 64 => encoder.imm64(moffs),
- else => unreachable,
- }
-}
-
-fn lowerToOiEnc(tag: Tag, reg: Register, imm: u64, code: *std.ArrayList(u8)) InnerError!void {
- assert(!tag.isAvx());
- const opc = getOpCode(tag, .oi, reg.size() == 8);
- const encoder = try Encoder.init(code, 10);
- if (reg.size() == 16) {
- encoder.prefix16BitMode();
- }
- encoder.rex(.{
- .w = setRexWRegister(reg),
- .b = reg.isExtended(),
- });
- opc.encodeWithReg(encoder, reg);
- switch (reg.size()) {
- 8 => encoder.imm8(@bitCast(i8, @truncate(u8, imm))),
- 16 => encoder.imm16(@bitCast(i16, @truncate(u16, imm))),
- 32 => encoder.imm32(@bitCast(i32, @truncate(u32, imm))),
- 64 => encoder.imm64(imm),
- else => unreachable,
- }
-}
-
-fn lowerToMiXEnc(
- tag: Tag,
- reg_or_mem: RegisterOrMemory,
- imm: u32,
- enc: Encoding,
- code: *std.ArrayList(u8),
-) InnerError!void {
- assert(!tag.isAvx());
- const modrm_ext = getModRmExt(tag);
- const opc = getOpCode(tag, enc, reg_or_mem.size() == 8);
- switch (reg_or_mem) {
- .register => |dst_reg| {
- const encoder = try Encoder.init(code, 7);
- if (dst_reg.size() == 16) {
- // 0x66 prefix switches to the non-default size; here we assume a switch from
- // the default 32bits to 16bits operand-size.
- // More info: https://www.cs.uni-potsdam.de/desn/lehre/ss15/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf#page=32&zoom=auto,-159,773
- encoder.prefix16BitMode();
- }
- encoder.rex(.{
- .w = setRexWRegister(dst_reg),
- .b = dst_reg.isExtended(),
- });
- opc.encode(encoder);
- encoder.modRm_direct(modrm_ext, dst_reg.lowEnc());
- encodeImm(encoder, imm, if (enc == .mi8) 8 else dst_reg.size());
- },
- .memory => |dst_mem| {
- const encoder = try Encoder.init(code, 12);
- if (dst_mem.ptr_size == .word_ptr) {
- encoder.prefix16BitMode();
- }
- if (dst_mem.base) |base| {
- encoder.rex(.{
- .w = dst_mem.ptr_size == .qword_ptr,
- .b = base.isExtended(),
- .x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
- });
- } else {
- encoder.rex(.{
- .w = dst_mem.ptr_size == .qword_ptr,
- .x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
- });
- }
- opc.encode(encoder);
- dst_mem.encode(encoder, modrm_ext);
- encodeImm(encoder, imm, if (enc == .mi8) 8 else dst_mem.ptr_size.size());
- },
- }
-}
-
-fn lowerToMiImm8Enc(tag: Tag, reg_or_mem: RegisterOrMemory, imm: u8, code: *std.ArrayList(u8)) InnerError!void {
- return lowerToMiXEnc(tag, reg_or_mem, imm, .mi8, code);
-}
-
-fn lowerToMiEnc(tag: Tag, reg_or_mem: RegisterOrMemory, imm: u32, code: *std.ArrayList(u8)) InnerError!void {
- return lowerToMiXEnc(tag, reg_or_mem, imm, .mi, code);
-}
-
-fn lowerToRmEnc(
- tag: Tag,
- reg: Register,
- reg_or_mem: RegisterOrMemory,
- code: *std.ArrayList(u8),
-) InnerError!void {
- assert(!tag.isAvx());
- const opc = getOpCode(tag, .rm, reg.size() == 8 or reg_or_mem.size() == 8);
- switch (reg_or_mem) {
- .register => |src_reg| {
- const encoder = try Encoder.init(code, 5);
- if (reg.size() == 16) {
- encoder.prefix16BitMode();
- }
- encoder.rex(.{
- .w = setRexWRegister(reg) or setRexWRegister(src_reg),
- .r = reg.isExtended(),
- .b = src_reg.isExtended(),
- });
- opc.encode(encoder);
- encoder.modRm_direct(reg.lowEnc(), src_reg.lowEnc());
- },
- .memory => |src_mem| {
- const encoder = try Encoder.init(code, 9);
- if (reg.size() == 16) {
- encoder.prefix16BitMode();
- }
- if (src_mem.base) |base| {
- // TODO handle 32-bit base register - requires prefix 0x67
- // Intel Manual, Vol 1, chapter 3.6 and 3.6.1
- encoder.rex(.{
- .w = setRexWRegister(reg),
- .r = reg.isExtended(),
- .b = base.isExtended(),
- .x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
- });
- } else {
- encoder.rex(.{
- .w = setRexWRegister(reg),
- .r = reg.isExtended(),
- .x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
- });
- }
- opc.encode(encoder);
- src_mem.encode(encoder, reg.lowEnc());
- },
- }
-}
-
-fn lowerToMrEnc(
- tag: Tag,
- reg_or_mem: RegisterOrMemory,
- reg: Register,
- code: *std.ArrayList(u8),
-) InnerError!void {
- assert(!tag.isAvx());
- const opc = getOpCode(tag, .mr, reg.size() == 8 or reg_or_mem.size() == 8);
- switch (reg_or_mem) {
- .register => |dst_reg| {
- const encoder = try Encoder.init(code, 4);
- if (dst_reg.size() == 16) {
- encoder.prefix16BitMode();
- }
- encoder.rex(.{
- .w = setRexWRegister(dst_reg) or setRexWRegister(reg),
- .r = reg.isExtended(),
- .b = dst_reg.isExtended(),
- });
- opc.encode(encoder);
- encoder.modRm_direct(reg.lowEnc(), dst_reg.lowEnc());
- },
- .memory => |dst_mem| {
- const encoder = try Encoder.init(code, 9);
- if (reg.size() == 16) {
- encoder.prefix16BitMode();
- }
- if (dst_mem.base) |base| {
- encoder.rex(.{
- .w = dst_mem.ptr_size == .qword_ptr or setRexWRegister(reg),
- .r = reg.isExtended(),
- .b = base.isExtended(),
- .x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
- });
- } else {
- encoder.rex(.{
- .w = dst_mem.ptr_size == .qword_ptr or setRexWRegister(reg),
- .r = reg.isExtended(),
- .x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
- });
- }
- opc.encode(encoder);
- dst_mem.encode(encoder, reg.lowEnc());
- },
- }
-}
-
-fn lowerToRmiEnc(
- tag: Tag,
- reg: Register,
- reg_or_mem: RegisterOrMemory,
- imm: u32,
- code: *std.ArrayList(u8),
-) InnerError!void {
- assert(!tag.isAvx());
- const opc = getOpCode(tag, .rmi, false);
- const encoder = try Encoder.init(code, 13);
- if (reg.size() == 16) {
- encoder.prefix16BitMode();
- }
- switch (reg_or_mem) {
- .register => |src_reg| {
- encoder.rex(.{
- .w = setRexWRegister(reg) or setRexWRegister(src_reg),
- .r = reg.isExtended(),
- .b = src_reg.isExtended(),
- });
- opc.encode(encoder);
- encoder.modRm_direct(reg.lowEnc(), src_reg.lowEnc());
- },
- .memory => |src_mem| {
- if (src_mem.base) |base| {
- // TODO handle 32-bit base register - requires prefix 0x67
- // Intel Manual, Vol 1, chapter 3.6 and 3.6.1
- encoder.rex(.{
- .w = setRexWRegister(reg),
- .r = reg.isExtended(),
- .b = base.isExtended(),
- .x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
- });
- } else {
- encoder.rex(.{
- .w = setRexWRegister(reg),
- .r = reg.isExtended(),
- .x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
- });
- }
- opc.encode(encoder);
- src_mem.encode(encoder, reg.lowEnc());
- },
- }
- encodeImm(encoder, imm, reg.size());
-}
-
-/// Also referred to as XM encoding in Intel manual.
-fn lowerToVmEnc(
- tag: Tag,
- reg: Register,
- reg_or_mem: RegisterOrMemory,
- code: *std.ArrayList(u8),
-) InnerError!void {
- const opc = getOpCode(tag, .vm, false);
- var enc = getVexEncoding(tag, .vm);
- const vex = &enc.prefix;
- switch (reg_or_mem) {
- .register => |src_reg| {
- const encoder = try Encoder.init(code, 5);
- vex.rex(.{
- .r = reg.isExtended(),
- .b = src_reg.isExtended(),
- });
- encoder.vex(enc.prefix);
- opc.encode(encoder);
- encoder.modRm_direct(reg.lowEnc(), src_reg.lowEnc());
- },
- .memory => |src_mem| {
- const encoder = try Encoder.init(code, 10);
- if (src_mem.base) |base| {
- vex.rex(.{
- .r = reg.isExtended(),
- .b = base.isExtended(),
- .x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
- });
- } else {
- vex.rex(.{
- .r = reg.isExtended(),
- .x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
- });
- }
- encoder.vex(enc.prefix);
- opc.encode(encoder);
- src_mem.encode(encoder, reg.lowEnc());
- },
- }
-}
-
-/// Usually referred to as MR encoding with V/V in Intel manual.
-fn lowerToMvEnc(
- tag: Tag,
- reg_or_mem: RegisterOrMemory,
- reg: Register,
- code: *std.ArrayList(u8),
-) InnerError!void {
- const opc = getOpCode(tag, .mv, false);
- var enc = getVexEncoding(tag, .mv);
- const vex = &enc.prefix;
- switch (reg_or_mem) {
- .register => |dst_reg| {
- const encoder = try Encoder.init(code, 4);
- vex.rex(.{
- .r = reg.isExtended(),
- .b = dst_reg.isExtended(),
- });
- encoder.vex(enc.prefix);
- opc.encode(encoder);
- encoder.modRm_direct(reg.lowEnc(), dst_reg.lowEnc());
- },
- .memory => |dst_mem| {
- const encoder = try Encoder.init(code, 10);
- if (dst_mem.base) |base| {
- vex.rex(.{
- .r = reg.isExtended(),
- .b = base.isExtended(),
- .x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
- });
- } else {
- vex.rex(.{
- .r = reg.isExtended(),
- .x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
- });
- }
- encoder.vex(enc.prefix);
- opc.encode(encoder);
- dst_mem.encode(encoder, reg.lowEnc());
- },
- }
-}
-
-fn lowerToRvmEnc(
- tag: Tag,
- reg1: Register,
- reg2: Register,
- reg_or_mem: RegisterOrMemory,
- code: *std.ArrayList(u8),
-) InnerError!void {
- const opc = getOpCode(tag, .rvm, false);
- var enc = getVexEncoding(tag, .rvm);
- const vex = &enc.prefix;
- switch (reg_or_mem) {
- .register => |reg3| {
- if (enc.reg) |vvvv| {
- switch (vvvv) {
- .nds => vex.reg(reg2.enc()),
- else => unreachable, // TODO
- }
- }
- const encoder = try Encoder.init(code, 5);
- vex.rex(.{
- .r = reg1.isExtended(),
- .b = reg3.isExtended(),
- });
- encoder.vex(enc.prefix);
- opc.encode(encoder);
- encoder.modRm_direct(reg1.lowEnc(), reg3.lowEnc());
- },
- .memory => |dst_mem| {
- _ = dst_mem;
- unreachable; // TODO
- },
- }
-}
-
-fn lowerToRvmiEnc(
- tag: Tag,
- reg1: Register,
- reg2: Register,
- reg_or_mem: RegisterOrMemory,
- imm: u32,
- code: *std.ArrayList(u8),
-) InnerError!void {
- const opc = getOpCode(tag, .rvmi, false);
- var enc = getVexEncoding(tag, .rvmi);
- const vex = &enc.prefix;
- const encoder: Encoder = blk: {
- switch (reg_or_mem) {
- .register => |reg3| {
- if (enc.reg) |vvvv| {
- switch (vvvv) {
- .nds => vex.reg(reg2.enc()),
- else => unreachable, // TODO
- }
- }
- const encoder = try Encoder.init(code, 5);
- vex.rex(.{
- .r = reg1.isExtended(),
- .b = reg3.isExtended(),
- });
- encoder.vex(enc.prefix);
- opc.encode(encoder);
- encoder.modRm_direct(reg1.lowEnc(), reg3.lowEnc());
- break :blk encoder;
- },
- .memory => |dst_mem| {
- _ = dst_mem;
- unreachable; // TODO
- },
- }
- };
- encodeImm(encoder, imm, 8); // TODO
-}
-
-fn expectEqualHexStrings(expected: []const u8, given: []const u8, assembly: []const u8) !void {
- assert(expected.len > 0);
- if (mem.eql(u8, expected, given)) return;
- const expected_fmt = try std.fmt.allocPrint(testing.allocator, "{x}", .{std.fmt.fmtSliceHexLower(expected)});
- defer testing.allocator.free(expected_fmt);
- const given_fmt = try std.fmt.allocPrint(testing.allocator, "{x}", .{std.fmt.fmtSliceHexLower(given)});
- defer testing.allocator.free(given_fmt);
- const idx = mem.indexOfDiff(u8, expected_fmt, given_fmt).?;
- var padding = try testing.allocator.alloc(u8, idx + 5);
- defer testing.allocator.free(padding);
- mem.set(u8, padding, ' ');
- std.debug.print("\nASM: {s}\nEXP: {s}\nGIV: {s}\n{s}^ -- first differing byte\n", .{
- assembly,
- expected_fmt,
- given_fmt,
- padding,
- });
- return error.TestFailed;
-}
-
-const TestEmit = struct {
- code_buffer: std.ArrayList(u8),
- next: usize = 0,
-
- fn init() TestEmit {
- return .{
- .code_buffer = std.ArrayList(u8).init(testing.allocator),
- };
- }
-
- fn deinit(emit: *TestEmit) void {
- emit.code_buffer.deinit();
- emit.next = undefined;
- }
-
- fn code(emit: *TestEmit) *std.ArrayList(u8) {
- emit.next = emit.code_buffer.items.len;
- return &emit.code_buffer;
- }
-
- fn lowered(emit: TestEmit) []const u8 {
- return emit.code_buffer.items[emit.next..];
- }
-};
-
-test "lower MI encoding" {
- var emit = TestEmit.init();
- defer emit.deinit();
- try lowerToMiEnc(.mov, RegisterOrMemory.reg(.rax), 0x10, emit.code());
- try expectEqualHexStrings("\x48\xc7\xc0\x10\x00\x00\x00", emit.lowered(), "mov rax, 0x10");
- try lowerToMiEnc(.mov, RegisterOrMemory.mem(.dword_ptr, .{ .disp = 0, .base = .r11 }), 0x10, emit.code());
- try expectEqualHexStrings("\x41\xc7\x03\x10\x00\x00\x00", emit.lowered(), "mov dword ptr [r11 + 0], 0x10");
- try lowerToMiEnc(.add, RegisterOrMemory.mem(.dword_ptr, .{
- .disp = @bitCast(u32, @as(i32, -8)),
- .base = .rdx,
- }), 0x10, emit.code());
- try expectEqualHexStrings("\x81\x42\xF8\x10\x00\x00\x00", emit.lowered(), "add dword ptr [rdx - 8], 0x10");
- try lowerToMiEnc(.sub, RegisterOrMemory.mem(.dword_ptr, .{
- .disp = 0x10000000,
- .base = .r11,
- }), 0x10, emit.code());
- try expectEqualHexStrings(
- "\x41\x81\xab\x00\x00\x00\x10\x10\x00\x00\x00",
- emit.lowered(),
- "sub dword ptr [r11 + 0x10000000], 0x10",
- );
- try lowerToMiEnc(.@"and", RegisterOrMemory.mem(.dword_ptr, .{ .disp = 0x10000000 }), 0x10, emit.code());
- try expectEqualHexStrings(
- "\x81\x24\x25\x00\x00\x00\x10\x10\x00\x00\x00",
- emit.lowered(),
- "and dword ptr [ds:0x10000000], 0x10",
- );
- try lowerToMiEnc(.@"and", RegisterOrMemory.mem(.dword_ptr, .{
- .disp = 0x10000000,
- .base = .r12,
- }), 0x10, emit.code());
- try expectEqualHexStrings(
- "\x41\x81\xA4\x24\x00\x00\x00\x10\x10\x00\x00\x00",
- emit.lowered(),
- "and dword ptr [r12 + 0x10000000], 0x10",
- );
- try lowerToMiEnc(.mov, RegisterOrMemory.rip(.qword_ptr, 0x10), 0x10, emit.code());
- try expectEqualHexStrings(
- "\x48\xC7\x05\x10\x00\x00\x00\x10\x00\x00\x00",
- emit.lowered(),
- "mov qword ptr [rip + 0x10], 0x10",
- );
- try lowerToMiEnc(.mov, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = @bitCast(u32, @as(i32, -8)),
- .base = .rbp,
- }), 0x10, emit.code());
- try expectEqualHexStrings(
- "\x48\xc7\x45\xf8\x10\x00\x00\x00",
- emit.lowered(),
- "mov qword ptr [rbp - 8], 0x10",
- );
- try lowerToMiEnc(.mov, RegisterOrMemory.mem(.word_ptr, .{
- .disp = @bitCast(u32, @as(i32, -2)),
- .base = .rbp,
- }), 0x10, emit.code());
- try expectEqualHexStrings("\x66\xC7\x45\xFE\x10\x00", emit.lowered(), "mov word ptr [rbp - 2], 0x10");
- try lowerToMiEnc(.mov, RegisterOrMemory.mem(.byte_ptr, .{
- .disp = @bitCast(u32, @as(i32, -1)),
- .base = .rbp,
- }), 0x10, emit.code());
- try expectEqualHexStrings("\xC6\x45\xFF\x10", emit.lowered(), "mov byte ptr [rbp - 1], 0x10");
- try lowerToMiEnc(.mov, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = 0x10000000,
- .scale_index = .{
- .scale = 1,
- .index = .rcx,
- },
- }), 0x10, emit.code());
- try expectEqualHexStrings(
- "\x48\xC7\x04\x4D\x00\x00\x00\x10\x10\x00\x00\x00",
- emit.lowered(),
- "mov qword ptr [rcx*2 + 0x10000000], 0x10",
- );
-
- try lowerToMiImm8Enc(.add, RegisterOrMemory.reg(.rax), 0x10, emit.code());
- try expectEqualHexStrings("\x48\x83\xC0\x10", emit.lowered(), "add rax, 0x10");
-}
-
-test "lower RM encoding" {
- var emit = TestEmit.init();
- defer emit.deinit();
- try lowerToRmEnc(.mov, .rax, RegisterOrMemory.reg(.rbx), emit.code());
- try expectEqualHexStrings("\x48\x8b\xc3", emit.lowered(), "mov rax, rbx");
- try lowerToRmEnc(.mov, .rax, RegisterOrMemory.mem(.qword_ptr, .{ .disp = 0, .base = .r11 }), emit.code());
- try expectEqualHexStrings("\x49\x8b\x03", emit.lowered(), "mov rax, qword ptr [r11 + 0]");
- try lowerToRmEnc(.add, .r11, RegisterOrMemory.mem(.qword_ptr, .{ .disp = 0x10000000 }), emit.code());
- try expectEqualHexStrings(
- "\x4C\x03\x1C\x25\x00\x00\x00\x10",
- emit.lowered(),
- "add r11, qword ptr [ds:0x10000000]",
- );
- try lowerToRmEnc(.add, .r12b, RegisterOrMemory.mem(.byte_ptr, .{ .disp = 0x10000000 }), emit.code());
- try expectEqualHexStrings(
- "\x44\x02\x24\x25\x00\x00\x00\x10",
- emit.lowered(),
- "add r11b, byte ptr [ds:0x10000000]",
- );
- try lowerToRmEnc(.sub, .r11, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = 0x10000000,
- .base = .r13,
- }), emit.code());
- try expectEqualHexStrings(
- "\x4D\x2B\x9D\x00\x00\x00\x10",
- emit.lowered(),
- "sub r11, qword ptr [r13 + 0x10000000]",
- );
- try lowerToRmEnc(.sub, .r11, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = 0x10000000,
- .base = .r12,
- }), emit.code());
- try expectEqualHexStrings(
- "\x4D\x2B\x9C\x24\x00\x00\x00\x10",
- emit.lowered(),
- "sub r11, qword ptr [r12 + 0x10000000]",
- );
- try lowerToRmEnc(.mov, .rax, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = @bitCast(u32, @as(i32, -4)),
- .base = .rbp,
- }), emit.code());
- try expectEqualHexStrings("\x48\x8B\x45\xFC", emit.lowered(), "mov rax, qword ptr [rbp - 4]");
- try lowerToRmEnc(.lea, .rax, RegisterOrMemory.rip(.qword_ptr, 0x10), emit.code());
- try expectEqualHexStrings("\x48\x8D\x05\x10\x00\x00\x00", emit.lowered(), "lea rax, [rip + 0x10]");
- try lowerToRmEnc(.mov, .rax, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = @bitCast(u32, @as(i32, -8)),
- .base = .rbp,
- .scale_index = .{
- .scale = 0,
- .index = .rcx,
- },
- }), emit.code());
- try expectEqualHexStrings("\x48\x8B\x44\x0D\xF8", emit.lowered(), "mov rax, qword ptr [rbp + rcx*1 - 8]");
- try lowerToRmEnc(.mov, .eax, RegisterOrMemory.mem(.dword_ptr, .{
- .disp = @bitCast(u32, @as(i32, -4)),
- .base = .rbp,
- .scale_index = .{
- .scale = 2,
- .index = .rdx,
- },
- }), emit.code());
- try expectEqualHexStrings("\x8B\x44\x95\xFC", emit.lowered(), "mov eax, dword ptr [rbp + rdx*4 - 4]");
- try lowerToRmEnc(.mov, .rax, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = @bitCast(u32, @as(i32, -8)),
- .base = .rbp,
- .scale_index = .{
- .scale = 3,
- .index = .rcx,
- },
- }), emit.code());
- try expectEqualHexStrings("\x48\x8B\x44\xCD\xF8", emit.lowered(), "mov rax, qword ptr [rbp + rcx*8 - 8]");
- try lowerToRmEnc(.mov, .r8b, RegisterOrMemory.mem(.byte_ptr, .{
- .disp = @bitCast(u32, @as(i32, -24)),
- .base = .rsi,
- .scale_index = .{
- .scale = 0,
- .index = .rcx,
- },
- }), emit.code());
- try expectEqualHexStrings("\x44\x8A\x44\x0E\xE8", emit.lowered(), "mov r8b, byte ptr [rsi + rcx*1 - 24]");
- try lowerToRmEnc(.lea, .rsi, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = 0,
- .base = .rbp,
- .scale_index = .{
- .scale = 0,
- .index = .rcx,
- },
- }), emit.code());
- try expectEqualHexStrings("\x48\x8D\x74\x0D\x00", emit.lowered(), "lea rsi, qword ptr [rbp + rcx*1 + 0]");
-}
-
-test "lower MR encoding" {
- var emit = TestEmit.init();
- defer emit.deinit();
- try lowerToMrEnc(.mov, RegisterOrMemory.reg(.rax), .rbx, emit.code());
- try expectEqualHexStrings("\x48\x89\xd8", emit.lowered(), "mov rax, rbx");
- try lowerToMrEnc(.mov, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = @bitCast(u32, @as(i32, -4)),
- .base = .rbp,
- }), .r11, emit.code());
- try expectEqualHexStrings("\x4c\x89\x5d\xfc", emit.lowered(), "mov qword ptr [rbp - 4], r11");
- try lowerToMrEnc(.add, RegisterOrMemory.mem(.byte_ptr, .{ .disp = 0x10000000 }), .r12b, emit.code());
- try expectEqualHexStrings(
- "\x44\x00\x24\x25\x00\x00\x00\x10",
- emit.lowered(),
- "add byte ptr [ds:0x10000000], r12b",
- );
- try lowerToMrEnc(.add, RegisterOrMemory.mem(.dword_ptr, .{ .disp = 0x10000000 }), .r12d, emit.code());
- try expectEqualHexStrings(
- "\x44\x01\x24\x25\x00\x00\x00\x10",
- emit.lowered(),
- "add dword ptr [ds:0x10000000], r12d",
- );
- try lowerToMrEnc(.sub, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = 0x10000000,
- .base = .r11,
- }), .r12, emit.code());
- try expectEqualHexStrings(
- "\x4D\x29\xA3\x00\x00\x00\x10",
- emit.lowered(),
- "sub qword ptr [r11 + 0x10000000], r12",
- );
- try lowerToMrEnc(.mov, RegisterOrMemory.rip(.qword_ptr, 0x10), .r12, emit.code());
- try expectEqualHexStrings("\x4C\x89\x25\x10\x00\x00\x00", emit.lowered(), "mov qword ptr [rip + 0x10], r12");
-}
-
-test "lower OI encoding" {
- var emit = TestEmit.init();
- defer emit.deinit();
- try lowerToOiEnc(.mov, .rax, 0x1000000000000000, emit.code());
- try expectEqualHexStrings(
- "\x48\xB8\x00\x00\x00\x00\x00\x00\x00\x10",
- emit.lowered(),
- "movabs rax, 0x1000000000000000",
- );
- try lowerToOiEnc(.mov, .r11, 0x1000000000000000, emit.code());
- try expectEqualHexStrings(
- "\x49\xBB\x00\x00\x00\x00\x00\x00\x00\x10",
- emit.lowered(),
- "movabs r11, 0x1000000000000000",
- );
- try lowerToOiEnc(.mov, .r11d, 0x10000000, emit.code());
- try expectEqualHexStrings("\x41\xBB\x00\x00\x00\x10", emit.lowered(), "mov r11d, 0x10000000");
- try lowerToOiEnc(.mov, .r11w, 0x1000, emit.code());
- try expectEqualHexStrings("\x66\x41\xBB\x00\x10", emit.lowered(), "mov r11w, 0x1000");
- try lowerToOiEnc(.mov, .r11b, 0x10, emit.code());
- try expectEqualHexStrings("\x41\xB3\x10", emit.lowered(), "mov r11b, 0x10");
-}
-
-test "lower FD/TD encoding" {
- var emit = TestEmit.init();
- defer emit.deinit();
- try lowerToFdEnc(.mov, .rax, 0x1000000000000000, emit.code());
- try expectEqualHexStrings(
- "\x48\xa1\x00\x00\x00\x00\x00\x00\x00\x10",
- emit.lowered(),
- "mov rax, ds:0x1000000000000000",
- );
- try lowerToFdEnc(.mov, .eax, 0x10000000, emit.code());
- try expectEqualHexStrings("\xa1\x00\x00\x00\x10", emit.lowered(), "mov eax, ds:0x10000000");
- try lowerToFdEnc(.mov, .ax, 0x1000, emit.code());
- try expectEqualHexStrings("\x66\xa1\x00\x10", emit.lowered(), "mov ax, ds:0x1000");
- try lowerToFdEnc(.mov, .al, 0x10, emit.code());
- try expectEqualHexStrings("\xa0\x10", emit.lowered(), "mov al, ds:0x10");
-}
-
-test "lower M encoding" {
- var emit = TestEmit.init();
- defer emit.deinit();
- try lowerToMEnc(.jmp_near, RegisterOrMemory.reg(.r12), emit.code());
- try expectEqualHexStrings("\x41\xFF\xE4", emit.lowered(), "jmp r12");
- try lowerToMEnc(.jmp_near, RegisterOrMemory.reg(.r12w), emit.code());
- try expectEqualHexStrings("\x66\x41\xFF\xE4", emit.lowered(), "jmp r12w");
- try lowerToMEnc(.jmp_near, RegisterOrMemory.mem(.qword_ptr, .{ .disp = 0, .base = .r12 }), emit.code());
- try expectEqualHexStrings("\x41\xFF\x24\x24", emit.lowered(), "jmp qword ptr [r12]");
- try lowerToMEnc(.jmp_near, RegisterOrMemory.mem(.word_ptr, .{ .disp = 0, .base = .r12 }), emit.code());
- try expectEqualHexStrings("\x66\x41\xFF\x24\x24", emit.lowered(), "jmp word ptr [r12]");
- try lowerToMEnc(.jmp_near, RegisterOrMemory.mem(.qword_ptr, .{ .disp = 0x10, .base = .r12 }), emit.code());
- try expectEqualHexStrings("\x41\xFF\x64\x24\x10", emit.lowered(), "jmp qword ptr [r12 + 0x10]");
- try lowerToMEnc(.jmp_near, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = 0x1000,
- .base = .r12,
- }), emit.code());
- try expectEqualHexStrings(
- "\x41\xFF\xA4\x24\x00\x10\x00\x00",
- emit.lowered(),
- "jmp qword ptr [r12 + 0x1000]",
- );
- try lowerToMEnc(.jmp_near, RegisterOrMemory.rip(.qword_ptr, 0x10), emit.code());
- try expectEqualHexStrings("\xFF\x25\x10\x00\x00\x00", emit.lowered(), "jmp qword ptr [rip + 0x10]");
- try lowerToMEnc(.jmp_near, RegisterOrMemory.mem(.qword_ptr, .{ .disp = 0x10 }), emit.code());
- try expectEqualHexStrings("\xFF\x24\x25\x10\x00\x00\x00", emit.lowered(), "jmp qword ptr [ds:0x10]");
- try lowerToMEnc(.seta, RegisterOrMemory.reg(.r11b), emit.code());
- try expectEqualHexStrings("\x41\x0F\x97\xC3", emit.lowered(), "seta r11b");
- try lowerToMEnc(.idiv, RegisterOrMemory.reg(.rax), emit.code());
- try expectEqualHexStrings("\x48\xF7\xF8", emit.lowered(), "idiv rax");
- try lowerToMEnc(.imul, RegisterOrMemory.reg(.al), emit.code());
- try expectEqualHexStrings("\xF6\xE8", emit.lowered(), "imul al");
-}
-
-test "lower M1 and MC encodings" {
- var emit = TestEmit.init();
- defer emit.deinit();
- try lowerToM1Enc(.sal, RegisterOrMemory.reg(.r12), emit.code());
- try expectEqualHexStrings("\x49\xD1\xE4", emit.lowered(), "sal r12, 1");
- try lowerToM1Enc(.sal, RegisterOrMemory.reg(.r12d), emit.code());
- try expectEqualHexStrings("\x41\xD1\xE4", emit.lowered(), "sal r12d, 1");
- try lowerToM1Enc(.sal, RegisterOrMemory.reg(.r12w), emit.code());
- try expectEqualHexStrings("\x66\x41\xD1\xE4", emit.lowered(), "sal r12w, 1");
- try lowerToM1Enc(.sal, RegisterOrMemory.reg(.r12b), emit.code());
- try expectEqualHexStrings("\x41\xD0\xE4", emit.lowered(), "sal r12b, 1");
- try lowerToM1Enc(.sal, RegisterOrMemory.reg(.rax), emit.code());
- try expectEqualHexStrings("\x48\xD1\xE0", emit.lowered(), "sal rax, 1");
- try lowerToM1Enc(.sal, RegisterOrMemory.reg(.eax), emit.code());
- try expectEqualHexStrings("\xD1\xE0", emit.lowered(), "sal eax, 1");
- try lowerToM1Enc(.sal, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = @bitCast(u32, @as(i32, -0x10)),
- .base = .rbp,
- }), emit.code());
- try expectEqualHexStrings("\x48\xD1\x65\xF0", emit.lowered(), "sal qword ptr [rbp - 0x10], 1");
- try lowerToM1Enc(.sal, RegisterOrMemory.mem(.dword_ptr, .{
- .disp = @bitCast(u32, @as(i32, -0x10)),
- .base = .rbp,
- }), emit.code());
- try expectEqualHexStrings("\xD1\x65\xF0", emit.lowered(), "sal dword ptr [rbp - 0x10], 1");
-
- try lowerToMcEnc(.shr, RegisterOrMemory.reg(.r12), emit.code());
- try expectEqualHexStrings("\x49\xD3\xEC", emit.lowered(), "shr r12, cl");
- try lowerToMcEnc(.shr, RegisterOrMemory.reg(.rax), emit.code());
- try expectEqualHexStrings("\x48\xD3\xE8", emit.lowered(), "shr rax, cl");
-
- try lowerToMcEnc(.sar, RegisterOrMemory.reg(.rsi), emit.code());
- try expectEqualHexStrings("\x48\xD3\xFE", emit.lowered(), "sar rsi, cl");
-}
-
-test "lower O encoding" {
- var emit = TestEmit.init();
- defer emit.deinit();
- try lowerToOEnc(.pop, .r12, emit.code());
- try expectEqualHexStrings("\x41\x5c", emit.lowered(), "pop r12");
- try lowerToOEnc(.push, .r12w, emit.code());
- try expectEqualHexStrings("\x66\x41\x54", emit.lowered(), "push r12w");
-}
-
-test "lower RMI encoding" {
- var emit = TestEmit.init();
- defer emit.deinit();
- try lowerToRmiEnc(.imul, .rax, RegisterOrMemory.mem(.qword_ptr, .{
- .disp = @bitCast(u32, @as(i32, -8)),
- .base = .rbp,
- }), 0x10, emit.code());
- try expectEqualHexStrings(
- "\x48\x69\x45\xF8\x10\x00\x00\x00",
- emit.lowered(),
- "imul rax, qword ptr [rbp - 8], 0x10",
- );
- try lowerToRmiEnc(.imul, .eax, RegisterOrMemory.mem(.dword_ptr, .{
- .disp = @bitCast(u32, @as(i32, -4)),
- .base = .rbp,
- }), 0x10, emit.code());
- try expectEqualHexStrings("\x69\x45\xFC\x10\x00\x00\x00", emit.lowered(), "imul eax, dword ptr [rbp - 4], 0x10");
- try lowerToRmiEnc(.imul, .ax, RegisterOrMemory.mem(.word_ptr, .{
- .disp = @bitCast(u32, @as(i32, -2)),
- .base = .rbp,
- }), 0x10, emit.code());
- try expectEqualHexStrings("\x66\x69\x45\xFE\x10\x00", emit.lowered(), "imul ax, word ptr [rbp - 2], 0x10");
- try lowerToRmiEnc(.imul, .r12, RegisterOrMemory.reg(.r12), 0x10, emit.code());
- try expectEqualHexStrings("\x4D\x69\xE4\x10\x00\x00\x00", emit.lowered(), "imul r12, r12, 0x10");
- try lowerToRmiEnc(.imul, .r12w, RegisterOrMemory.reg(.r12w), 0x10, emit.code());
- try expectEqualHexStrings("\x66\x45\x69\xE4\x10\x00", emit.lowered(), "imul r12w, r12w, 0x10");
-}
-
-test "lower MV encoding" {
- var emit = TestEmit.init();
- defer emit.deinit();
- try lowerToMvEnc(.vmovsd, RegisterOrMemory.rip(.qword_ptr, 0x10), .xmm1, emit.code());
- try expectEqualHexStrings(
- "\xC5\xFB\x11\x0D\x10\x00\x00\x00",
- emit.lowered(),
- "vmovsd qword ptr [rip + 0x10], xmm1",
- );
-}
-
-test "lower VM encoding" {
- var emit = TestEmit.init();
- defer emit.deinit();
- try lowerToVmEnc(.vmovsd, .xmm1, RegisterOrMemory.rip(.qword_ptr, 0x10), emit.code());
- try expectEqualHexStrings(
- "\xC5\xFB\x10\x0D\x10\x00\x00\x00",
- emit.lowered(),
- "vmovsd xmm1, qword ptr [rip + 0x10]",
- );
-}
-
-test "lower to RVM encoding" {
- var emit = TestEmit.init();
- defer emit.deinit();
- try lowerToRvmEnc(.vaddsd, .xmm0, .xmm1, RegisterOrMemory.reg(.xmm2), emit.code());
- try expectEqualHexStrings("\xC5\xF3\x58\xC2", emit.lowered(), "vaddsd xmm0, xmm1, xmm2");
- try lowerToRvmEnc(.vaddsd, .xmm0, .xmm0, RegisterOrMemory.reg(.xmm1), emit.code());
- try expectEqualHexStrings("\xC5\xFB\x58\xC1", emit.lowered(), "vaddsd xmm0, xmm0, xmm1");
-}
diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig
new file mode 100644
index 0000000000..a51f954aed
--- /dev/null
+++ b/src/arch/x86_64/Encoding.zig
@@ -0,0 +1,587 @@
+const Encoding = @This();
+
+const std = @import("std");
+const assert = std.debug.assert;
+const math = std.math;
+
+const bits = @import("bits.zig");
+const encoder = @import("encoder.zig");
+const Instruction = encoder.Instruction;
+const Register = bits.Register;
+const Rex = encoder.Rex;
+const LegacyPrefixes = encoder.LegacyPrefixes;
+
+const table = @import("encodings.zig").table;
+
+mnemonic: Mnemonic,
+op_en: OpEn,
+op1: Op,
+op2: Op,
+op3: Op,
+op4: Op,
+opc_len: u2,
+opc: [3]u8,
+modrm_ext: u3,
+mode: Mode,
+
+pub fn findByMnemonic(mnemonic: Mnemonic, args: struct {
+ op1: Instruction.Operand,
+ op2: Instruction.Operand,
+ op3: Instruction.Operand,
+ op4: Instruction.Operand,
+}) !?Encoding {
+ const input_op1 = Op.fromOperand(args.op1);
+ const input_op2 = Op.fromOperand(args.op2);
+ const input_op3 = Op.fromOperand(args.op3);
+ const input_op4 = Op.fromOperand(args.op4);
+
+ const ops = &[_]Instruction.Operand{ args.op1, args.op2, args.op3, args.op4 };
+ const rex_required = for (ops) |op| switch (op) {
+ .reg => |r| switch (r) {
+ .spl, .bpl, .sil, .dil => break true,
+ else => {},
+ },
+ else => {},
+ } else false;
+ const rex_invalid = for (ops) |op| switch (op) {
+ .reg => |r| switch (r) {
+ .ah, .bh, .ch, .dh => break true,
+ else => {},
+ },
+ else => {},
+ } else false;
+ const rex_extended = for (ops) |op| switch (op) {
+ .reg => |r| if (r.isExtended()) break true,
+ .mem => |m| {
+ if (m.base()) |base| {
+ if (base.isExtended()) break true;
+ }
+ if (m.scaleIndex()) |si| {
+ if (si.index.isExtended()) break true;
+ }
+ },
+ else => {},
+ } else false;
+
+ if ((rex_required or rex_extended) and rex_invalid) return error.CannotEncode;
+
+ // TODO work out what is the maximum number of variants we can actually find in one swoop.
+ var candidates: [10]Encoding = undefined;
+ var count: usize = 0;
+ for (table) |entry| {
+ const enc = Encoding{
+ .mnemonic = entry[0],
+ .op_en = entry[1],
+ .op1 = entry[2],
+ .op2 = entry[3],
+ .op3 = entry[4],
+ .op4 = entry[5],
+ .opc_len = entry[6],
+ .opc = .{ entry[7], entry[8], entry[9] },
+ .modrm_ext = entry[10],
+ .mode = entry[11],
+ };
+ if (enc.mnemonic == mnemonic and
+ input_op1.isSubset(enc.op1, enc.mode) and
+ input_op2.isSubset(enc.op2, enc.mode) and
+ input_op3.isSubset(enc.op3, enc.mode) and
+ input_op4.isSubset(enc.op4, enc.mode))
+ {
+ if (rex_required) {
+ switch (enc.mode) {
+ .rex, .long => {
+ candidates[count] = enc;
+ count += 1;
+ },
+ else => {},
+ }
+ } else {
+ if (enc.mode != .rex) {
+ candidates[count] = enc;
+ count += 1;
+ }
+ }
+ }
+ }
+
+ if (count == 0) return null;
+ if (count == 1) return candidates[0];
+
+ const EncodingLength = struct {
+ fn estimate(encoding: Encoding, params: struct {
+ op1: Instruction.Operand,
+ op2: Instruction.Operand,
+ op3: Instruction.Operand,
+ op4: Instruction.Operand,
+ }) usize {
+ var inst = Instruction{
+ .op1 = params.op1,
+ .op2 = params.op2,
+ .op3 = params.op3,
+ .op4 = params.op4,
+ .encoding = encoding,
+ };
+ var cwriter = std.io.countingWriter(std.io.null_writer);
+ inst.encode(cwriter.writer()) catch unreachable; // Not allowed to fail here unless OOM.
+ return @intCast(usize, cwriter.bytes_written);
+ }
+ };
+
+ var shortest_encoding: ?struct {
+ index: usize,
+ len: usize,
+ } = null;
+ var i: usize = 0;
+ while (i < count) : (i += 1) {
+ const candidate = candidates[i];
+ switch (candidate.mode) {
+ .long, .rex => if (rex_invalid) return error.CannotEncode,
+ else => {},
+ }
+
+ const len = EncodingLength.estimate(candidate, .{
+ .op1 = args.op1,
+ .op2 = args.op2,
+ .op3 = args.op3,
+ .op4 = args.op4,
+ });
+ const current = shortest_encoding orelse {
+ shortest_encoding = .{ .index = i, .len = len };
+ continue;
+ };
+ if (len < current.len) {
+ shortest_encoding = .{ .index = i, .len = len };
+ }
+ }
+
+ return candidates[shortest_encoding.?.index];
+}
+
+/// Returns first matching encoding by opcode.
+pub fn findByOpcode(opc: []const u8, prefixes: struct {
+ legacy: LegacyPrefixes,
+ rex: Rex,
+}, modrm_ext: ?u3) ?Encoding {
+ for (table) |entry| {
+ const enc = Encoding{
+ .mnemonic = entry[0],
+ .op_en = entry[1],
+ .op1 = entry[2],
+ .op2 = entry[3],
+ .op3 = entry[4],
+ .op4 = entry[5],
+ .opc_len = entry[6],
+ .opc = .{ entry[7], entry[8], entry[9] },
+ .modrm_ext = entry[10],
+ .mode = entry[11],
+ };
+ const match = match: {
+ if (modrm_ext) |ext| {
+ break :match ext == enc.modrm_ext and std.mem.eql(u8, enc.opcode(), opc);
+ }
+ break :match std.mem.eql(u8, enc.opcode(), opc);
+ };
+ if (match) {
+ if (prefixes.rex.w) {
+ switch (enc.mode) {
+ .fpu, .sse, .sse2, .none => {},
+ .long, .rex => return enc,
+ }
+ } else if (prefixes.rex.present and !prefixes.rex.isSet()) {
+ if (enc.mode == .rex) return enc;
+ } else if (prefixes.legacy.prefix_66) {
+ switch (enc.operandBitSize()) {
+ 16 => return enc,
+ else => {},
+ }
+ } else {
+ if (enc.mode == .none) {
+ switch (enc.operandBitSize()) {
+ 16 => {},
+ else => return enc,
+ }
+ }
+ }
+ }
+ }
+ return null;
+}
+
+pub fn opcode(encoding: *const Encoding) []const u8 {
+ return encoding.opc[0..encoding.opc_len];
+}
+
+pub fn mandatoryPrefix(encoding: *const Encoding) ?u8 {
+ const prefix = encoding.opc[0];
+ return switch (prefix) {
+ 0x66, 0xf2, 0xf3 => prefix,
+ else => null,
+ };
+}
+
+pub fn modRmExt(encoding: Encoding) u3 {
+ return switch (encoding.op_en) {
+ .m, .mi, .m1, .mc => encoding.modrm_ext,
+ else => unreachable,
+ };
+}
+
+pub fn operandBitSize(encoding: Encoding) u64 {
+ if (encoding.mode == .long) return 64;
+ const bit_size: u64 = switch (encoding.op_en) {
+ .np => switch (encoding.op1) {
+ .o16 => 16,
+ .o32 => 32,
+ .o64 => 64,
+ else => 32,
+ },
+ .td => encoding.op2.bitSize(),
+ else => encoding.op1.bitSize(),
+ };
+ return bit_size;
+}
+
+pub fn format(
+ encoding: Encoding,
+ comptime fmt: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+) !void {
+ _ = options;
+ _ = fmt;
+ switch (encoding.mode) {
+ .long => try writer.writeAll("REX.W + "),
+ else => {},
+ }
+
+ for (encoding.opcode()) |byte| {
+ try writer.print("{x:0>2} ", .{byte});
+ }
+
+ switch (encoding.op_en) {
+ .np, .fd, .td, .i, .zi, .d => {},
+ .o, .oi => {
+ const tag = switch (encoding.op1) {
+ .r8 => "rb",
+ .r16 => "rw",
+ .r32 => "rd",
+ .r64 => "rd",
+ else => unreachable,
+ };
+ try writer.print("+{s} ", .{tag});
+ },
+ .m, .mi, .m1, .mc => try writer.print("/{d} ", .{encoding.modRmExt()}),
+ .mr, .rm, .rmi => try writer.writeAll("/r "),
+ }
+
+ switch (encoding.op_en) {
+ .i, .d, .zi, .oi, .mi, .rmi => {
+ const op = switch (encoding.op_en) {
+ .i, .d => encoding.op1,
+ .zi, .oi, .mi => encoding.op2,
+ .rmi => encoding.op3,
+ else => unreachable,
+ };
+ const tag = switch (op) {
+ .imm8, .imm8s => "ib",
+ .imm16, .imm16s => "iw",
+ .imm32, .imm32s => "id",
+ .imm64 => "io",
+ .rel8 => "cb",
+ .rel16 => "cw",
+ .rel32 => "cd",
+ else => unreachable,
+ };
+ try writer.print("{s} ", .{tag});
+ },
+ .np, .fd, .td, .o, .m, .m1, .mc, .mr, .rm => {},
+ }
+
+ try writer.print("{s} ", .{@tagName(encoding.mnemonic)});
+
+ const ops = &[_]Op{ encoding.op1, encoding.op2, encoding.op3, encoding.op4 };
+ for (ops) |op| switch (op) {
+ .none, .o16, .o32, .o64 => break,
+ else => try writer.print("{s} ", .{@tagName(op)}),
+ };
+
+ const op_en = switch (encoding.op_en) {
+ .zi => .i,
+ else => |op_en| op_en,
+ };
+ try writer.print("{s}", .{@tagName(op_en)});
+}
+
+pub const Mnemonic = enum {
+ // zig fmt: off
+ // General-purpose
+ adc, add, @"and",
+ call, cbw, cwde, cdqe, cwd, cdq, cqo, cmp,
+ cmova, cmovae, cmovb, cmovbe, cmovc, cmove, cmovg, cmovge, cmovl, cmovle, cmovna,
+ cmovnae, cmovnb, cmovnbe, cmovnc, cmovne, cmovng, cmovnge, cmovnl, cmovnle, cmovno,
+ cmovnp, cmovns, cmovnz, cmovo, cmovp, cmovpe, cmovpo, cmovs, cmovz,
+ div,
+ fisttp, fld,
+ idiv, imul, int3,
+ ja, jae, jb, jbe, jc, jrcxz, je, jg, jge, jl, jle, jna, jnae, jnb, jnbe,
+ jnc, jne, jng, jnge, jnl, jnle, jno, jnp, jns, jnz, jo, jp, jpe, jpo, js, jz,
+ jmp,
+ lea,
+ mov, movsx, movsxd, movzx, mul,
+ nop,
+ @"or",
+ pop, push,
+ ret,
+ sal, sar, sbb, shl, shr, sub, syscall,
+ seta, setae, setb, setbe, setc, sete, setg, setge, setl, setle, setna, setnae,
+ setnb, setnbe, setnc, setne, setng, setnge, setnl, setnle, setno, setnp, setns,
+ setnz, seto, setp, setpe, setpo, sets, setz,
+ @"test",
+ ud2,
+ xor,
+ // SSE
+ addss,
+ cmpss,
+ movss,
+ ucomiss,
+ // SSE2
+ addsd,
+ cmpsd,
+ movq, movsd,
+ ucomisd,
+ // zig fmt: on
+};
+
+pub const OpEn = enum {
+ // zig fmt: off
+ np,
+ o, oi,
+ i, zi,
+ d, m,
+ fd, td,
+ m1, mc, mi, mr, rm, rmi,
+ // zig fmt: on
+};
+
+pub const Op = enum {
+ // zig fmt: off
+ none,
+ o16, o32, o64,
+ unity,
+ imm8, imm16, imm32, imm64,
+ imm8s, imm16s, imm32s,
+ al, ax, eax, rax,
+ cl,
+ r8, r16, r32, r64,
+ rm8, rm16, rm32, rm64,
+ m8, m16, m32, m64, m80,
+ rel8, rel16, rel32,
+ m,
+ moffs,
+ sreg,
+ xmm, xmm_m32, xmm_m64,
+ // zig fmt: on
+
+ pub fn fromOperand(operand: Instruction.Operand) Op {
+ switch (operand) {
+ .none => return .none,
+
+ .reg => |reg| {
+ switch (reg.class()) {
+ .segment => return .sreg,
+ .floating_point => return switch (reg.bitSize()) {
+ 128 => .xmm,
+ else => unreachable,
+ },
+ .general_purpose => {
+ if (reg.to64() == .rax) return switch (reg) {
+ .al => .al,
+ .ax => .ax,
+ .eax => .eax,
+ .rax => .rax,
+ else => unreachable,
+ };
+ if (reg == .cl) return .cl;
+ return switch (reg.bitSize()) {
+ 8 => .r8,
+ 16 => .r16,
+ 32 => .r32,
+ 64 => .r64,
+ else => unreachable,
+ };
+ },
+ }
+ },
+
+ .mem => |mem| switch (mem) {
+ .moffs => return .moffs,
+ .sib, .rip => {
+ const bit_size = mem.bitSize();
+ return switch (bit_size) {
+ 8 => .m8,
+ 16 => .m16,
+ 32 => .m32,
+ 64 => .m64,
+ 80 => .m80,
+ else => unreachable,
+ };
+ },
+ },
+
+ .imm => |imm| {
+ switch (imm) {
+ .signed => |x| {
+ if (x == 1) return .unity;
+ if (math.cast(i8, x)) |_| return .imm8s;
+ if (math.cast(i16, x)) |_| return .imm16s;
+ return .imm32s;
+ },
+ .unsigned => |x| {
+ if (x == 1) return .unity;
+ if (math.cast(i8, x)) |_| return .imm8s;
+ if (math.cast(u8, x)) |_| return .imm8;
+ if (math.cast(i16, x)) |_| return .imm16s;
+ if (math.cast(u16, x)) |_| return .imm16;
+ if (math.cast(i32, x)) |_| return .imm32s;
+ if (math.cast(u32, x)) |_| return .imm32;
+ return .imm64;
+ },
+ }
+ },
+ }
+ }
+
+ pub fn bitSize(op: Op) u64 {
+ return switch (op) {
+ .none, .o16, .o32, .o64, .moffs, .m, .sreg => unreachable,
+ .unity => 1,
+ .imm8, .imm8s, .al, .cl, .r8, .m8, .rm8, .rel8 => 8,
+ .imm16, .imm16s, .ax, .r16, .m16, .rm16, .rel16 => 16,
+ .imm32, .imm32s, .eax, .r32, .m32, .rm32, .rel32, .xmm_m32 => 32,
+ .imm64, .rax, .r64, .m64, .rm64, .xmm_m64 => 64,
+ .m80 => 80,
+ .xmm => 128,
+ };
+ }
+
+ pub fn isSigned(op: Op) bool {
+ return switch (op) {
+ .unity, .imm8, .imm16, .imm32, .imm64 => false,
+ .imm8s, .imm16s, .imm32s => true,
+ else => unreachable,
+ };
+ }
+
+ pub fn isUnsigned(op: Op) bool {
+ return !op.isSigned();
+ }
+
+ pub fn isRegister(op: Op) bool {
+ // zig fmt: off
+ return switch (op) {
+ .cl,
+ .al, .ax, .eax, .rax,
+ .r8, .r16, .r32, .r64,
+ .rm8, .rm16, .rm32, .rm64,
+ .xmm, .xmm_m32, .xmm_m64,
+ => true,
+ else => false,
+ };
+ // zig fmt: on
+ }
+
+ pub fn isImmediate(op: Op) bool {
+ // zig fmt: off
+ return switch (op) {
+ .imm8, .imm16, .imm32, .imm64,
+ .imm8s, .imm16s, .imm32s,
+ .rel8, .rel16, .rel32,
+ .unity,
+ => true,
+ else => false,
+ };
+ // zig fmt: on
+ }
+
+ pub fn isMemory(op: Op) bool {
+ // zig fmt: off
+ return switch (op) {
+ .rm8, .rm16, .rm32, .rm64,
+ .m8, .m16, .m32, .m64, .m80,
+ .m,
+ .xmm_m32, .xmm_m64,
+ => true,
+ else => false,
+ };
+ // zig fmt: on
+ }
+
+ pub fn isSegmentRegister(op: Op) bool {
+ return switch (op) {
+ .moffs, .sreg => true,
+ else => false,
+ };
+ }
+
+ pub fn isFloatingPointRegister(op: Op) bool {
+ return switch (op) {
+ .xmm, .xmm_m32, .xmm_m64 => true,
+ else => false,
+ };
+ }
+
+ /// Given an operand `op` checks if `target` is a subset for the purposes of the encoding.
+ pub fn isSubset(op: Op, target: Op, mode: Mode) bool {
+ switch (op) {
+ .m, .o16, .o32, .o64 => unreachable,
+ .moffs, .sreg => return op == target,
+ .none => switch (target) {
+ .o16, .o32, .o64, .none => return true,
+ else => return false,
+ },
+ else => {
+ if (op.isRegister() and target.isRegister()) {
+ switch (mode) {
+ .sse, .sse2 => return op.isFloatingPointRegister() and target.isFloatingPointRegister(),
+ else => switch (target) {
+ .cl, .al, .ax, .eax, .rax => return op == target,
+ else => return op.bitSize() == target.bitSize(),
+ },
+ }
+ }
+ if (op.isMemory() and target.isMemory()) {
+ switch (target) {
+ .m => return true,
+ else => return op.bitSize() == target.bitSize(),
+ }
+ }
+ if (op.isImmediate() and target.isImmediate()) {
+ switch (target) {
+ .imm64 => if (op.bitSize() <= 64) return true,
+ .imm32s, .rel32 => if (op.bitSize() < 32 or (op.bitSize() == 32 and op.isSigned()))
+ return true,
+ .imm32 => if (op.bitSize() <= 32) return true,
+ .imm16s, .rel16 => if (op.bitSize() < 16 or (op.bitSize() == 16 and op.isSigned()))
+ return true,
+ .imm16 => if (op.bitSize() <= 16) return true,
+ .imm8s, .rel8 => if (op.bitSize() < 8 or (op.bitSize() == 8 and op.isSigned()))
+ return true,
+ .imm8 => if (op.bitSize() <= 8) return true,
+ else => {},
+ }
+ return op == target;
+ }
+ return false;
+ },
+ }
+ }
+};
+
+pub const Mode = enum {
+ none,
+ fpu,
+ rex,
+ long,
+ sse,
+ sse2,
+};
diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig
index 112d9a5982..3951108e3a 100644
--- a/src/arch/x86_64/Mir.zig
+++ b/src/arch/x86_64/Mir.zig
@@ -12,9 +12,12 @@ const builtin = @import("builtin");
const assert = std.debug.assert;
const bits = @import("bits.zig");
+const encoder = @import("encoder.zig");
+
const Air = @import("../../Air.zig");
const CodeGen = @import("CodeGen.zig");
const IntegerBitSet = std.bit_set.IntegerBitSet;
+const Memory = bits.Memory;
const Register = bits.Register;
instructions: std.MultiArrayList(Inst).Slice,
@@ -24,428 +27,300 @@ extra: []const u32,
pub const Inst = struct {
tag: Tag,
ops: Ops,
- /// The meaning of this depends on `tag` and `ops`.
data: Data,
- pub const Tag = enum(u16) {
- /// ops flags: form:
- /// 0b00 reg1, reg2
- /// 0b00 reg1, imm32
- /// 0b01 reg1, [reg2 + imm32]
- /// 0b01 reg1, [ds:imm32]
- /// 0b10 [reg1 + imm32], reg2
- /// Notes:
- /// * If reg2 is `none` then it means Data field `imm` is used as the immediate.
- /// * When two imm32 values are required, Data field `payload` points at `ImmPair`.
- adc,
-
- /// ops flags: form:
- /// 0b00 byte ptr [reg1 + imm32], imm8
- /// 0b01 word ptr [reg1 + imm32], imm16
- /// 0b10 dword ptr [reg1 + imm32], imm32
- /// 0b11 qword ptr [reg1 + imm32], imm32 (sign-extended to imm64)
- /// Notes:
- /// * Uses `ImmPair` as payload
- adc_mem_imm,
-
- /// form: reg1, [reg2 + scale*index + imm32]
- /// ops flags scale
- /// 0b00 1
- /// 0b01 2
- /// 0b10 4
- /// 0b11 8
- /// Notes:
- /// * Uses `IndexRegisterDisp` as payload
- adc_scale_src,
-
- /// form: [reg1 + scale*index + imm32], reg2
- /// ops flags scale
- /// 0b00 1
- /// 0b01 2
- /// 0b10 4
- /// 0b11 8
- /// Notes:
- /// * Uses `IndexRegisterDisp` payload.
- adc_scale_dst,
-
- /// form: [reg1 + scale*rax + imm32], imm32
- /// ops flags scale
- /// 0b00 1
- /// 0b01 2
- /// 0b10 4
- /// 0b11 8
- /// Notes:
- /// * Uses `IndexRegisterDispImm` payload.
- adc_scale_imm,
-
- /// ops flags: form:
- /// 0b00 byte ptr [reg1 + index + imm32], imm8
- /// 0b01 word ptr [reg1 + index + imm32], imm16
- /// 0b10 dword ptr [reg1 + index + imm32], imm32
- /// 0b11 qword ptr [reg1 + index + imm32], imm32 (sign-extended to imm64)
- /// Notes:
- /// * Uses `IndexRegisterDispImm` payload.
- adc_mem_index_imm,
-
- // The following instructions all have the same encoding as `adc`.
-
- add,
- add_mem_imm,
- add_scale_src,
- add_scale_dst,
- add_scale_imm,
- add_mem_index_imm,
- sub,
- sub_mem_imm,
- sub_scale_src,
- sub_scale_dst,
- sub_scale_imm,
- sub_mem_index_imm,
- xor,
- xor_mem_imm,
- xor_scale_src,
- xor_scale_dst,
- xor_scale_imm,
- xor_mem_index_imm,
- @"and",
- and_mem_imm,
- and_scale_src,
- and_scale_dst,
- and_scale_imm,
- and_mem_index_imm,
- @"or",
- or_mem_imm,
- or_scale_src,
- or_scale_dst,
- or_scale_imm,
- or_mem_index_imm,
- rol,
- rol_mem_imm,
- rol_scale_src,
- rol_scale_dst,
- rol_scale_imm,
- rol_mem_index_imm,
- ror,
- ror_mem_imm,
- ror_scale_src,
- ror_scale_dst,
- ror_scale_imm,
- ror_mem_index_imm,
- rcl,
- rcl_mem_imm,
- rcl_scale_src,
- rcl_scale_dst,
- rcl_scale_imm,
- rcl_mem_index_imm,
- rcr,
- rcr_mem_imm,
- rcr_scale_src,
- rcr_scale_dst,
- rcr_scale_imm,
- rcr_mem_index_imm,
- sbb,
- sbb_mem_imm,
- sbb_scale_src,
- sbb_scale_dst,
- sbb_scale_imm,
- sbb_mem_index_imm,
- cmp,
- cmp_mem_imm,
- cmp_scale_src,
- cmp_scale_dst,
- cmp_scale_imm,
- cmp_mem_index_imm,
- mov,
- mov_mem_imm,
- mov_scale_src,
- mov_scale_dst,
- mov_scale_imm,
- mov_mem_index_imm,
-
- /// ops flags: form:
- /// 0b00 reg1, reg2,
- /// 0b01 reg1, byte ptr [reg2 + imm32]
- /// 0b10 reg1, word ptr [reg2 + imm32]
- /// 0b11 reg1, dword ptr [reg2 + imm32]
- mov_sign_extend,
-
- /// ops flags: form:
- /// 0b00 reg1, reg2
- /// 0b01 reg1, byte ptr [reg2 + imm32]
- /// 0b10 reg1, word ptr [reg2 + imm32]
- mov_zero_extend,
-
- /// ops flags: form:
- /// 0b00 reg1, [reg2 + imm32]
- /// 0b00 reg1, [ds:imm32]
- /// 0b01 reg1, [rip + imm32]
- /// 0b10 reg1, [reg2 + index + imm32]
- /// Notes:
- /// * 0b10 uses `IndexRegisterDisp` payload
- lea,
-
- /// ops flags: form:
- /// 0b00 reg1, [rip + reloc] // via GOT PIC
- /// 0b01 reg1, [rip + reloc] // direct load PIC
- /// 0b10 reg1, [rip + reloc] // via imports table PIC
- /// Notes:
- /// * `Data` contains `relocation`
- lea_pic,
-
- /// ops flags: form:
- /// 0b00 reg1, 1
- /// 0b01 reg1, .cl
- /// 0b10 reg1, imm8
- /// Notes:
- /// * If flags == 0b10, uses `imm`.
- shl,
- shl_mem_imm,
- shl_scale_src,
- shl_scale_dst,
- shl_scale_imm,
- shl_mem_index_imm,
- sal,
- sal_mem_imm,
- sal_scale_src,
- sal_scale_dst,
- sal_scale_imm,
- sal_mem_index_imm,
- shr,
- shr_mem_imm,
- shr_scale_src,
- shr_scale_dst,
- shr_scale_imm,
- shr_mem_index_imm,
- sar,
- sar_mem_imm,
- sar_scale_src,
- sar_scale_dst,
- sar_scale_imm,
- sar_mem_index_imm,
-
- /// ops flags: form:
- /// 0b00 reg1
- /// 0b00 byte ptr [reg2 + imm32]
- /// 0b01 word ptr [reg2 + imm32]
- /// 0b10 dword ptr [reg2 + imm32]
- /// 0b11 qword ptr [reg2 + imm32]
- imul,
- idiv,
- mul,
- div,
-
- /// ops flags: form:
- /// 0b00 AX <- AL
- /// 0b01 DX:AX <- AX
- /// 0b10 EDX:EAX <- EAX
- /// 0b11 RDX:RAX <- RAX
- cwd,
-
- /// ops flags: form:
- /// 0b00 reg1, reg2
- /// 0b01 reg1, [reg2 + imm32]
- /// 0b01 reg1, [imm32] if reg2 is none
- /// 0b10 reg1, reg2, imm32
- /// 0b11 reg1, [reg2 + imm32], imm32
- imul_complex,
-
- /// ops flags: form:
- /// 0b00 reg1, imm64
- /// 0b01 rax, moffs64
- /// Notes:
- /// * If reg1 is 64-bit, the immediate is 64-bit and stored
- /// within extra data `Imm64`.
- /// * For 0b01, reg1 (or reg2) need to be
- /// a version of rax. If reg1 == .none, then reg2 == .rax,
- /// or vice versa.
- movabs,
-
- /// ops flags: form:
- /// 0b00 word ptr [reg1 + imm32]
- /// 0b01 dword ptr [reg1 + imm32]
- /// 0b10 qword ptr [reg1 + imm32]
- /// Notes:
- /// * source is always ST(0)
- /// * only supports memory operands as destination
- fisttp,
-
- /// ops flags: form:
- /// 0b01 dword ptr [reg1 + imm32]
- /// 0b10 qword ptr [reg1 + imm32]
- fld,
-
- /// ops flags: form:
- /// 0b00 inst
- /// 0b01 reg1
- /// 0b01 [imm32] if reg1 is none
- /// 0b10 [reg1 + imm32]
- jmp,
- call,
-
- /// ops flags:
- /// unused
- /// Notes:
- /// * uses `inst_cc` in Data.
- cond_jmp,
-
- /// ops flags:
- /// 0b00 reg1
- /// Notes:
- /// * uses condition code (CC) stored as part of data
- cond_set_byte,
-
- /// ops flags:
- /// 0b00 reg1, reg2,
- /// 0b01 reg1, word ptr [reg2 + imm]
- /// 0b10 reg1, dword ptr [reg2 + imm]
- /// 0b11 reg1, qword ptr [reg2 + imm]
- /// Notes:
- /// * uses condition code (CC) stored as part of data
- cond_mov,
-
- /// ops flags: form:
- /// 0b00 reg1
- /// 0b01 [reg1 + imm32]
- /// 0b10 imm32
- /// Notes:
- /// * If 0b10 is specified and the tag is push, pushes immediate onto the stack
- /// using the mnemonic PUSH imm32.
- push,
- pop,
-
- /// ops flags: form:
- /// 0b00 retf imm16
- /// 0b01 retf
- /// 0b10 retn imm16
- /// 0b11 retn
- ret,
-
- /// Fast system call
- syscall,
-
- /// ops flags: form:
- /// 0b00 reg1, imm32 if reg2 == .none
- /// 0b00 reg1, reg2
- /// TODO handle more cases
- @"test",
-
- /// Breakpoint form:
- /// 0b00 int3
- interrupt,
-
- /// Nop
- nop,
-
- /// SSE instructions
- /// ops flags: form:
- /// 0b00 reg1, qword ptr [reg2 + imm32]
- /// 0b01 qword ptr [reg1 + imm32], reg2
- /// 0b10 reg1, reg2
- mov_f64_sse,
- mov_f32_sse,
-
- /// ops flags: form:
- /// 0b00 reg1, reg2
- add_f64_sse,
- add_f32_sse,
-
- /// ops flags: form:
- /// 0b00 reg1, reg2
- cmp_f64_sse,
- cmp_f32_sse,
-
- /// AVX instructions
- /// ops flags: form:
- /// 0b00 reg1, qword ptr [reg2 + imm32]
- /// 0b01 qword ptr [reg1 + imm32], reg2
- /// 0b10 reg1, reg1, reg2
- mov_f64_avx,
- mov_f32_avx,
-
- /// ops flags: form:
- /// 0b00 reg1, reg1, reg2
- add_f64_avx,
- add_f32_avx,
-
- /// ops flags: form:
- /// 0b00 reg1, reg1, reg2
- cmp_f64_avx,
- cmp_f32_avx,
-
- /// Pseudo-instructions
- /// call extern function
- /// Notes:
- /// * target of the call is stored as `relocation` in `Data` union.
- call_extern,
-
- /// end of prologue
- dbg_prologue_end,
-
- /// start of epilogue
- dbg_epilogue_begin,
-
- /// update debug line
- dbg_line,
-
- /// push registers
- /// Uses `payload` field with `SaveRegisterList` as payload.
- push_regs,
-
- /// pop registers
- /// Uses `payload` field with `SaveRegisterList` as payload.
- pop_regs,
- };
- /// The position of an MIR instruction within the `Mir` instructions array.
pub const Index = u32;
- pub const Ops = packed struct {
- reg1: u7,
- reg2: u7,
- flags: u2,
+ pub const Tag = enum(u8) {
+ /// Add with carry
+ adc,
+ /// Add
+ add,
+ /// Logical and
+ @"and",
+ /// Call
+ call,
+ /// Convert byte to word
+ cbw,
+ /// Convert word to doubleword
+ cwde,
+ /// Convert doubleword to quadword
+ cdqe,
+ /// Convert word to doubleword
+ cwd,
+ /// Convert doubleword to quadword
+ cdq,
+ /// Convert doubleword to quadword
+ cqo,
+ /// Logical compare
+ cmp,
+ /// Unsigned division
+ div,
+ /// Store integer with truncation
+ fisttp,
+ /// Load floating-point value
+ fld,
+ /// Signed division
+ idiv,
+ /// Signed multiplication
+ imul,
+ ///
+ int3,
+ /// Jump
+ jmp,
+ /// Load effective address
+ lea,
+ /// Move
+ mov,
+ /// Move with sign extension
+ movsx,
+ /// Move with zero extension
+ movzx,
+ /// Multiply
+ mul,
+ /// No-op
+ nop,
+ /// Logical or
+ @"or",
+ /// Pop
+ pop,
+ /// Push
+ push,
+ /// Return
+ ret,
+ /// Arithmetic shift left
+ sal,
+ /// Arithmetic shift right
+ sar,
+ /// Integer subtraction with borrow
+ sbb,
+ /// Logical shift left
+ shl,
+ /// Logical shift right
+ shr,
+ /// Subtract
+ sub,
+ /// Syscall
+ syscall,
+ /// Test condition
+ @"test",
+ /// Undefined instruction
+ ud2,
+ /// Logical exclusive-or
+ xor,
- pub fn encode(vals: struct {
- reg1: Register = .none,
- reg2: Register = .none,
- flags: u2 = 0b00,
- }) Ops {
- return .{
- .reg1 = @enumToInt(vals.reg1),
- .reg2 = @enumToInt(vals.reg2),
- .flags = vals.flags,
- };
- }
+ /// Add single precision floating point
+ addss,
+ /// Compare scalar single-precision floating-point values
+ cmpss,
+ /// Move scalar single-precision floating-point value
+ movss,
+ /// Unordered compare scalar single-precision floating-point values
+ ucomiss,
+ /// Add double precision floating point
+ addsd,
+ /// Compare scalar double-precision floating-point values
+ cmpsd,
+ /// Move scalar double-precision floating-point value
+ movsd,
+ /// Unordered compare scalar double-precision floating-point values
+ ucomisd,
- pub fn decode(ops: Ops) struct {
- reg1: Register,
- reg2: Register,
- flags: u2,
- } {
- return .{
- .reg1 = @intToEnum(Register, ops.reg1),
- .reg2 = @intToEnum(Register, ops.reg2),
- .flags = ops.flags,
- };
- }
+ /// Conditional move
+ cmovcc,
+ /// Conditional jump
+ jcc,
+ /// Set byte on condition
+ setcc,
+
+ /// Mov absolute to/from memory wrt segment register to/from rax
+ mov_moffs,
+
+ /// Jump with relocation to another local MIR instruction
+ /// Uses `inst` payload.
+ jmp_reloc,
+
+ /// Call to an extern symbol via linker relocation.
+ /// Uses `relocation` payload.
+ call_extern,
+
+ /// Load effective address of a symbol not yet allocated in VM.
+ lea_linker,
+
+ /// End of prologue
+ dbg_prologue_end,
+ /// Start of epilogue
+ dbg_epilogue_begin,
+ /// Update debug line
+ /// Uses `payload` payload with data of type `DbgLineColumn`.
+ dbg_line,
+ /// Push registers
+ /// Uses `payload` payload with data of type `SaveRegisterList`.
+ push_regs,
+ /// Pop registers
+ /// Uses `payload` payload with data of type `SaveRegisterList`.
+ pop_regs,
+
+ /// Tombstone
+ /// Emitter should skip this instruction.
+ dead,
+ };
+
+ pub const Ops = enum(u8) {
+ /// No data associated with this instruction (only mnemonic is used).
+ none,
+ /// Single register operand.
+ /// Uses `r` payload.
+ r,
+ /// Register, register operands.
+ /// Uses `rr` payload.
+ rr,
+ /// Register, register, register operands.
+ /// Uses `rrr` payload.
+ rrr,
+ /// Register, register, immediate (sign-extended) operands.
+ /// Uses `rri` payload.
+ rri_s,
+ /// Register, register, immediate (unsigned) operands.
+ /// Uses `rri` payload.
+ rri_u,
+ /// Register with condition code (CC).
+ /// Uses `r_c` payload.
+ r_c,
+ /// Register, register with condition code (CC).
+ /// Uses `rr_c` payload.
+ rr_c,
+ /// Register, immediate (sign-extended) operands.
+ /// Uses `ri` payload.
+ ri_s,
+ /// Register, immediate (unsigned) operands.
+ /// Uses `ri` payload.
+ ri_u,
+ /// Register, 64-bit unsigned immediate operands.
+ /// Uses `rx` payload with payload type `Imm64`.
+ ri64,
+ /// Immediate (sign-extended) operand.
+ /// Uses `imm` payload.
+ imm_s,
+ /// Immediate (unsigned) operand.
+ /// Uses `imm` payload.
+ imm_u,
+ /// Relative displacement operand.
+ /// Uses `imm` payload.
+ rel,
+ /// Register, memory (SIB) operands.
+ /// Uses `rx` payload.
+ rm_sib,
+ /// Register, memory (RIP) operands.
+ /// Uses `rx` payload.
+ rm_rip,
+ /// Single memory (SIB) operand.
+ /// Uses `payload` with extra data of type `MemorySib`.
+ m_sib,
+ /// Single memory (RIP) operand.
+ /// Uses `payload` with extra data of type `MemoryRip`.
+ m_rip,
+ /// Memory (SIB), immediate (unsigned) operands.
+ /// Uses `xi` payload with extra data of type `MemorySib`.
+ mi_u_sib,
+ /// Memory (RIP), immediate (unsigned) operands.
+ /// Uses `xi` payload with extra data of type `MemoryRip`.
+ mi_u_rip,
+ /// Memory (SIB), immediate (sign-extend) operands.
+ /// Uses `xi` payload with extra data of type `MemorySib`.
+ mi_s_sib,
+ /// Memory (RIP), immediate (sign-extend) operands.
+ /// Uses `xi` payload with extra data of type `MemoryRip`.
+ mi_s_rip,
+ /// Memory (SIB), register operands.
+ /// Uses `rx` payload with extra data of type `MemorySib`.
+ mr_sib,
+ /// Memory (RIP), register operands.
+ /// Uses `rx` payload with extra data of type `MemoryRip`.
+ mr_rip,
+ /// Rax, Memory moffs.
+ /// Uses `payload` with extra data of type `MemoryMoffs`.
+ rax_moffs,
+ /// Memory moffs, rax.
+ /// Uses `payload` with extra data of type `MemoryMoffs`.
+ moffs_rax,
+ /// References another Mir instruction directly.
+ /// Uses `inst` payload.
+ inst,
+ /// References another Mir instruction directly with condition code (CC).
+ /// Uses `inst_cc` payload.
+ inst_cc,
+ /// Uses `payload` payload with data of type `MemoryConditionCode`.
+ m_cc,
+ /// Uses `rx` payload with extra data of type `MemoryConditionCode`.
+ rm_cc,
+ /// Uses `reloc` payload.
+ reloc,
+ /// Linker relocation - GOT indirection.
+ /// Uses `payload` payload with extra data of type `LeaRegisterReloc`.
+ got_reloc,
+ /// Linker relocation - direct reference.
+ /// Uses `payload` payload with extra data of type `LeaRegisterReloc`.
+ direct_reloc,
+ /// Linker relocation - imports table indirection (binding).
+ /// Uses `payload` payload with extra data of type `LeaRegisterReloc`.
+ import_reloc,
};
- /// All instructions have a 4-byte payload, which is contained within
- /// this union. `Tag` determines which union field is active, as well as
- /// how to interpret the data within.
pub const Data = union {
- /// Another instruction.
+ /// References another Mir instruction.
inst: Index,
- /// A 32-bit immediate value.
- imm: u32,
- /// A condition code for use with EFLAGS register.
- cc: bits.Condition,
- /// Another instruction with condition code.
- /// Used by `cond_jmp`.
+ /// Another instruction with condition code (CC).
+ /// Used by `jcc`.
inst_cc: struct {
/// Another instruction.
inst: Index,
/// A condition code for use with EFLAGS register.
cc: bits.Condition,
},
+ /// A 32-bit immediate value.
+ imm: u32,
+ r: Register,
+ rr: struct {
+ r1: Register,
+ r2: Register,
+ },
+ rrr: struct {
+ r1: Register,
+ r2: Register,
+ r3: Register,
+ },
+ rri: struct {
+ r1: Register,
+ r2: Register,
+ imm: u32,
+ },
+ /// Register with condition code (CC).
+ r_c: struct {
+ r1: Register,
+ cc: bits.Condition,
+ },
+ /// Register, register with condition code (CC).
+ rr_c: struct {
+ r1: Register,
+ r2: Register,
+ cc: bits.Condition,
+ },
+ /// Register, immediate.
+ ri: struct {
+ r1: Register,
+ imm: u32,
+ },
+ /// Register, followed by custom payload found in extra.
+ rx: struct {
+ r1: Register,
+ payload: u32,
+ },
+ /// Custom payload followed by an immediate.
+ xi: struct {
+ payload: u32,
+ imm: u32,
+ },
/// Relocation for the linker where:
/// * `atom_index` is the index of the source
/// * `sym_index` is the index of the target
@@ -468,62 +343,13 @@ pub const Inst = struct {
}
};
-pub const IndexRegisterDisp = struct {
- /// Index register to use with SIB-based encoding
- index: u32,
-
- /// Displacement value
- disp: u32,
-
- pub fn encode(index: Register, disp: u32) IndexRegisterDisp {
- return .{
- .index = @enumToInt(index),
- .disp = disp,
- };
- }
-
- pub fn decode(this: IndexRegisterDisp) struct {
- index: Register,
- disp: u32,
- } {
- return .{
- .index = @intToEnum(Register, this.index),
- .disp = this.disp,
- };
- }
-};
-
-/// TODO: would it be worth making `IndexRegisterDisp` and `IndexRegisterDispImm` a variable length list
-/// instead of having two structs, one a superset of the other one?
-pub const IndexRegisterDispImm = struct {
- /// Index register to use with SIB-based encoding
- index: u32,
-
- /// Displacement value
- disp: u32,
-
- /// Immediate
- imm: u32,
-
- pub fn encode(index: Register, disp: u32, imm: u32) IndexRegisterDispImm {
- return .{
- .index = @enumToInt(index),
- .disp = disp,
- .imm = imm,
- };
- }
-
- pub fn decode(this: IndexRegisterDispImm) struct {
- index: Register,
- disp: u32,
- imm: u32,
- } {
- return .{
- .index = @intToEnum(Register, this.index),
- .disp = this.disp,
- .imm = this.imm,
- };
- }
+pub const LeaRegisterReloc = struct {
+ /// Destination register.
+ reg: u32,
+ /// Index of the containing atom.
+ atom_index: u32,
+ /// Index into the linker's symbol table.
+ sym_index: u32,
};
/// Used in conjunction with `SaveRegisterList` payload to transfer a list of used registers
@@ -567,16 +393,13 @@ pub const RegisterList = struct {
};
pub const SaveRegisterList = struct {
+ /// Base register
+ base_reg: u32,
/// Use `RegisterList` to populate.
register_list: u32,
stack_end: u32,
};
-pub const ImmPair = struct {
- dest_off: u32,
- operand: u32,
-};
-
pub const Imm64 = struct {
msb: u32,
lsb: u32,
@@ -596,6 +419,90 @@ pub const Imm64 = struct {
}
};
+// TODO this can be further compacted using packed struct
+pub const MemorySib = struct {
+ /// Size of the pointer.
+ ptr_size: u32,
+ /// Base register. -1 means null, or no base register.
+ base: i32,
+ /// Scale for index register. -1 means null, or no scale.
+ /// This has to be in sync with `index` field.
+ scale: i32,
+ /// Index register. -1 means null, or no index register.
+ /// This has to be in sync with `scale` field.
+ index: i32,
+ /// Displacement value.
+ disp: i32,
+
+ pub fn encode(mem: Memory) MemorySib {
+ const sib = mem.sib;
+ return .{
+ .ptr_size = @enumToInt(sib.ptr_size),
+ .base = if (sib.base) |r| @enumToInt(r) else -1,
+ .scale = if (sib.scale_index) |si| si.scale else -1,
+ .index = if (sib.scale_index) |si| @enumToInt(si.index) else -1,
+ .disp = sib.disp,
+ };
+ }
+
+ pub fn decode(msib: MemorySib) Memory {
+ const base: ?Register = if (msib.base == -1) null else @intToEnum(Register, msib.base);
+ const scale_index: ?Memory.ScaleIndex = if (msib.index == -1) null else .{
+ .scale = @intCast(u4, msib.scale),
+ .index = @intToEnum(Register, msib.index),
+ };
+ const mem: Memory = .{ .sib = .{
+ .ptr_size = @intToEnum(Memory.PtrSize, msib.ptr_size),
+ .base = base,
+ .scale_index = scale_index,
+ .disp = msib.disp,
+ } };
+ return mem;
+ }
+};
+
+pub const MemoryRip = struct {
+ /// Size of the pointer.
+ ptr_size: u32,
+ /// Displacement value.
+ disp: i32,
+
+ pub fn encode(mem: Memory) MemoryRip {
+ return .{
+ .ptr_size = @enumToInt(mem.rip.ptr_size),
+ .disp = mem.rip.disp,
+ };
+ }
+
+ pub fn decode(mrip: MemoryRip) Memory {
+ return .{ .rip = .{
+ .ptr_size = @intToEnum(Memory.PtrSize, mrip.ptr_size),
+ .disp = mrip.disp,
+ } };
+ }
+};
+
+pub const MemoryMoffs = struct {
+ /// Segment register.
+ seg: u32,
+ /// Absolute offset wrt to the segment register split between MSB and LSB parts much like
+ /// `Imm64` payload.
+ msb: u32,
+ lsb: u32,
+
+ pub fn encodeOffset(moffs: *MemoryMoffs, v: u64) void {
+ moffs.msb = @truncate(u32, v >> 32);
+ moffs.lsb = @truncate(u32, v);
+ }
+
+ pub fn decodeOffset(moffs: *const MemoryMoffs) u64 {
+ var res: u64 = 0;
+ res |= (@intCast(u64, moffs.msb) << 32);
+ res |= @intCast(u64, moffs.lsb);
+ return res;
+ }
+};
+
pub const DbgLineColumn = struct {
line: u32,
column: u32,
@@ -607,9 +514,9 @@ pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.* = undefined;
}
-pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end: usize } {
+pub fn extraData(mir: Mir, comptime T: type, index: u32) struct { data: T, end: u32 } {
const fields = std.meta.fields(T);
- var i: usize = index;
+ var i: u32 = index;
var result: T = undefined;
inline for (fields) |field| {
@field(result, field.name) = switch (field.type) {
diff --git a/src/arch/x86_64/bits.zig b/src/arch/x86_64/bits.zig
index cc123b96b6..043e589af4 100644
--- a/src/arch/x86_64/bits.zig
+++ b/src/arch/x86_64/bits.zig
@@ -1,9 +1,9 @@
const std = @import("std");
-const testing = std.testing;
-const mem = std.mem;
const assert = std.debug.assert;
-const ArrayList = std.ArrayList;
+const expect = std.testing.expect;
+
const Allocator = std.mem.Allocator;
+const ArrayList = std.ArrayList;
const DW = std.dwarf;
/// EFLAGS condition codes
@@ -135,131 +135,199 @@ pub const Condition = enum(u5) {
}
};
-/// Definitions of all of the general purpose x64 registers. The order is semantically meaningful.
-/// The registers are defined such that IDs go in descending order of 64-bit,
-/// 32-bit, 16-bit, and then 8-bit, and each set contains exactly sixteen
-/// registers. This results in some useful properties:
-///
-/// Any 64-bit register can be turned into its 32-bit form by adding 16, and
-/// vice versa. This also works between 32-bit and 16-bit forms. With 8-bit, it
-/// works for all except for sp, bp, si, and di, which do *not* have an 8-bit
-/// form.
-///
-/// If (register & 8) is set, the register is extended.
-///
-/// The ID can be easily determined by figuring out what range the register is
-/// in, and then subtracting the base.
pub const Register = enum(u7) {
// zig fmt: off
- // 0 through 15, 64-bit registers. 8-15 are extended.
- // id is just the int value.
rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi,
r8, r9, r10, r11, r12, r13, r14, r15,
- // 16 through 31, 32-bit registers. 24-31 are extended.
- // id is int value - 16.
eax, ecx, edx, ebx, esp, ebp, esi, edi,
r8d, r9d, r10d, r11d, r12d, r13d, r14d, r15d,
- // 32-47, 16-bit registers. 40-47 are extended.
- // id is int value - 32.
ax, cx, dx, bx, sp, bp, si, di,
r8w, r9w, r10w, r11w, r12w, r13w, r14w, r15w,
- // 48-63, 8-bit registers. 56-63 are extended.
- // id is int value - 48.
- al, cl, dl, bl, ah, ch, dh, bh,
+ al, cl, dl, bl, spl, bpl, sil, dil,
r8b, r9b, r10b, r11b, r12b, r13b, r14b, r15b,
- // 64-79, 256-bit registers.
- // id is int value - 64.
+ ah, ch, dh, bh,
+
ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7,
ymm8, ymm9, ymm10, ymm11, ymm12, ymm13, ymm14, ymm15,
- // 80-95, 128-bit registers.
- // id is int value - 80.
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,
- // Pseudo-value for MIR instructions.
+ es, cs, ss, ds, fs, gs,
+
none,
// zig fmt: on
- pub fn id(self: Register) u7 {
- return switch (@enumToInt(self)) {
- 0...63 => @as(u7, @truncate(u4, @enumToInt(self))),
- 64...79 => @enumToInt(self),
+ pub const Class = enum(u2) {
+ general_purpose,
+ floating_point,
+ segment,
+ };
+
+ pub fn class(reg: Register) Class {
+ return switch (@enumToInt(reg)) {
+ // zig fmt: off
+ @enumToInt(Register.rax) ... @enumToInt(Register.r15) => .general_purpose,
+ @enumToInt(Register.eax) ... @enumToInt(Register.r15d) => .general_purpose,
+ @enumToInt(Register.ax) ... @enumToInt(Register.r15w) => .general_purpose,
+ @enumToInt(Register.al) ... @enumToInt(Register.r15b) => .general_purpose,
+ @enumToInt(Register.ah) ... @enumToInt(Register.bh) => .general_purpose,
+
+ @enumToInt(Register.ymm0) ... @enumToInt(Register.ymm15) => .floating_point,
+ @enumToInt(Register.xmm0) ... @enumToInt(Register.xmm15) => .floating_point,
+
+ @enumToInt(Register.es) ... @enumToInt(Register.gs) => .segment,
+
+ else => unreachable,
+ // zig fmt: on
+ };
+ }
+
+ pub fn id(reg: Register) u6 {
+ const base = switch (@enumToInt(reg)) {
+ // zig fmt: off
+ @enumToInt(Register.rax) ... @enumToInt(Register.r15) => @enumToInt(Register.rax),
+ @enumToInt(Register.eax) ... @enumToInt(Register.r15d) => @enumToInt(Register.eax),
+ @enumToInt(Register.ax) ... @enumToInt(Register.r15w) => @enumToInt(Register.ax),
+ @enumToInt(Register.al) ... @enumToInt(Register.r15b) => @enumToInt(Register.al),
+ @enumToInt(Register.ah) ... @enumToInt(Register.bh) => @enumToInt(Register.ah) - 4,
+
+ @enumToInt(Register.ymm0) ... @enumToInt(Register.ymm15) => @enumToInt(Register.ymm0) - 16,
+ @enumToInt(Register.xmm0) ... @enumToInt(Register.xmm15) => @enumToInt(Register.xmm0) - 16,
+
+ @enumToInt(Register.es) ... @enumToInt(Register.gs) => @enumToInt(Register.es) - 32,
+
+ else => unreachable,
+ // zig fmt: on
+ };
+ return @intCast(u6, @enumToInt(reg) - base);
+ }
+
+ pub fn bitSize(reg: Register) u64 {
+ return switch (@enumToInt(reg)) {
+ // zig fmt: off
+ @enumToInt(Register.rax) ... @enumToInt(Register.r15) => 64,
+ @enumToInt(Register.eax) ... @enumToInt(Register.r15d) => 32,
+ @enumToInt(Register.ax) ... @enumToInt(Register.r15w) => 16,
+ @enumToInt(Register.al) ... @enumToInt(Register.r15b) => 8,
+ @enumToInt(Register.ah) ... @enumToInt(Register.bh) => 8,
+
+ @enumToInt(Register.ymm0) ... @enumToInt(Register.ymm15) => 256,
+ @enumToInt(Register.xmm0) ... @enumToInt(Register.xmm15) => 128,
+
+ @enumToInt(Register.es) ... @enumToInt(Register.gs) => 16,
+
+ else => unreachable,
+ // zig fmt: on
+ };
+ }
+
+ pub fn isExtended(reg: Register) bool {
+ return switch (@enumToInt(reg)) {
+ // zig fmt: off
+ @enumToInt(Register.r8) ... @enumToInt(Register.r15) => true,
+ @enumToInt(Register.r8d) ... @enumToInt(Register.r15d) => true,
+ @enumToInt(Register.r8w) ... @enumToInt(Register.r15w) => true,
+ @enumToInt(Register.r8b) ... @enumToInt(Register.r15b) => true,
+
+ @enumToInt(Register.ymm8) ... @enumToInt(Register.ymm15) => true,
+ @enumToInt(Register.xmm8) ... @enumToInt(Register.xmm15) => true,
+
+ else => false,
+ // zig fmt: on
+ };
+ }
+
+ pub fn enc(reg: Register) u4 {
+ const base = switch (@enumToInt(reg)) {
+ // zig fmt: off
+ @enumToInt(Register.rax) ... @enumToInt(Register.r15) => @enumToInt(Register.rax),
+ @enumToInt(Register.eax) ... @enumToInt(Register.r15d) => @enumToInt(Register.eax),
+ @enumToInt(Register.ax) ... @enumToInt(Register.r15w) => @enumToInt(Register.ax),
+ @enumToInt(Register.al) ... @enumToInt(Register.r15b) => @enumToInt(Register.al),
+ @enumToInt(Register.ah) ... @enumToInt(Register.bh) => @enumToInt(Register.ah) - 4,
+
+ @enumToInt(Register.ymm0) ... @enumToInt(Register.ymm15) => @enumToInt(Register.ymm0),
+ @enumToInt(Register.xmm0) ... @enumToInt(Register.xmm15) => @enumToInt(Register.xmm0),
+
+ @enumToInt(Register.es) ... @enumToInt(Register.gs) => @enumToInt(Register.es),
+
+ else => unreachable,
+ // zig fmt: on
+ };
+ return @truncate(u4, @enumToInt(reg) - base);
+ }
+
+ pub fn lowEnc(reg: Register) u3 {
+ return @truncate(u3, reg.enc());
+ }
+
+ pub fn toBitSize(reg: Register, bit_size: u64) Register {
+ return switch (bit_size) {
+ 8 => reg.to8(),
+ 16 => reg.to16(),
+ 32 => reg.to32(),
+ 64 => reg.to64(),
+ 128 => reg.to128(),
+ 256 => reg.to256(),
else => unreachable,
};
}
- /// Returns the bit-width of the register.
- pub fn size(self: Register) u9 {
- return switch (@enumToInt(self)) {
- 0...15 => 64,
- 16...31 => 32,
- 32...47 => 16,
- 48...63 => 8,
- 64...79 => 256,
- 80...95 => 128,
+ fn gpBase(reg: Register) u7 {
+ assert(reg.class() == .general_purpose);
+ return switch (@enumToInt(reg)) {
+ // zig fmt: off
+ @enumToInt(Register.rax) ... @enumToInt(Register.r15) => @enumToInt(Register.rax),
+ @enumToInt(Register.eax) ... @enumToInt(Register.r15d) => @enumToInt(Register.eax),
+ @enumToInt(Register.ax) ... @enumToInt(Register.r15w) => @enumToInt(Register.ax),
+ @enumToInt(Register.al) ... @enumToInt(Register.r15b) => @enumToInt(Register.al),
+ @enumToInt(Register.ah) ... @enumToInt(Register.bh) => @enumToInt(Register.ah) - 4,
+ else => unreachable,
+ // zig fmt: on
+ };
+ }
+
+ pub fn to64(reg: Register) Register {
+ return @intToEnum(Register, @enumToInt(reg) - reg.gpBase() + @enumToInt(Register.rax));
+ }
+
+ pub fn to32(reg: Register) Register {
+ return @intToEnum(Register, @enumToInt(reg) - reg.gpBase() + @enumToInt(Register.eax));
+ }
+
+ pub fn to16(reg: Register) Register {
+ return @intToEnum(Register, @enumToInt(reg) - reg.gpBase() + @enumToInt(Register.ax));
+ }
+
+ pub fn to8(reg: Register) Register {
+ return @intToEnum(Register, @enumToInt(reg) - reg.gpBase() + @enumToInt(Register.al));
+ }
+
+ fn fpBase(reg: Register) u7 {
+ assert(reg.class() == .floating_point);
+ return switch (@enumToInt(reg)) {
+ @enumToInt(Register.ymm0)...@enumToInt(Register.ymm15) => @enumToInt(Register.ymm0),
+ @enumToInt(Register.xmm0)...@enumToInt(Register.xmm15) => @enumToInt(Register.xmm0),
else => unreachable,
};
}
- /// Returns whether the register is *extended*. Extended registers are the
- /// new registers added with amd64, r8 through r15. This also includes any
- /// other variant of access to those registers, such as r8b, r15d, and so
- /// on. This is needed because access to these registers requires special
- /// handling via the REX prefix, via the B or R bits, depending on context.
- pub fn isExtended(self: Register) bool {
- return @enumToInt(self) & 0x08 != 0;
+ pub fn to256(reg: Register) Register {
+ return @intToEnum(Register, @enumToInt(reg) - reg.fpBase() + @enumToInt(Register.ymm0));
}
- /// This returns the 4-bit register ID, which is used in practically every
- /// opcode. Note that bit 3 (the highest bit) is *never* used directly in
- /// an instruction (@see isExtended), and requires special handling. The
- /// lower three bits are often embedded directly in instructions (such as
- /// the B8 variant of moves), or used in R/M bytes.
- pub fn enc(self: Register) u4 {
- return @truncate(u4, @enumToInt(self));
+ pub fn to128(reg: Register) Register {
+ return @intToEnum(Register, @enumToInt(reg) - reg.fpBase() + @enumToInt(Register.xmm0));
}
- /// Like enc, but only returns the lower 3 bits.
- pub fn lowEnc(self: Register) u3 {
- return @truncate(u3, @enumToInt(self));
- }
-
- pub fn to256(self: Register) Register {
- return @intToEnum(Register, @as(u8, self.enc()) + 64);
- }
-
- pub fn to128(self: Register) Register {
- return @intToEnum(Register, @as(u8, self.enc()) + 80);
- }
-
- /// Convert from any register to its 64 bit alias.
- pub fn to64(self: Register) Register {
- return @intToEnum(Register, self.enc());
- }
-
- /// Convert from any register to its 32 bit alias.
- pub fn to32(self: Register) Register {
- return @intToEnum(Register, @as(u8, self.enc()) + 16);
- }
-
- /// Convert from any register to its 16 bit alias.
- pub fn to16(self: Register) Register {
- return @intToEnum(Register, @as(u8, self.enc()) + 32);
- }
-
- /// Convert from any register to its 8 bit alias.
- pub fn to8(self: Register) Register {
- return @intToEnum(Register, @as(u8, self.enc()) + 48);
- }
-
- pub fn dwarfLocOp(self: Register) u8 {
- switch (@enumToInt(self)) {
- 0...63 => return switch (self.to64()) {
+ pub fn dwarfLocOp(reg: Register) u8 {
+ return switch (reg.class()) {
+ .general_purpose => switch (reg.to64()) {
.rax => DW.OP.reg0,
.rdx => DW.OP.reg1,
.rcx => DW.OP.reg2,
@@ -268,31 +336,19 @@ pub const Register = enum(u7) {
.rdi => DW.OP.reg5,
.rbp => DW.OP.reg6,
.rsp => DW.OP.reg7,
-
- .r8 => DW.OP.reg8,
- .r9 => DW.OP.reg9,
- .r10 => DW.OP.reg10,
- .r11 => DW.OP.reg11,
- .r12 => DW.OP.reg12,
- .r13 => DW.OP.reg13,
- .r14 => DW.OP.reg14,
- .r15 => DW.OP.reg15,
-
- else => unreachable,
+ else => @intCast(u8, @enumToInt(reg) - reg.gpBase()) + DW.OP.reg0,
},
-
- 64...79 => return @as(u8, self.enc()) + DW.OP.reg17,
-
+ .floating_point => @intCast(u8, @enumToInt(reg) - reg.fpBase()) + DW.OP.reg17,
else => unreachable,
- }
+ };
}
/// DWARF encodings that push a value onto the DWARF stack that is either
/// the contents of a register or the result of adding the contents a given
/// register to a given signed offset.
- pub fn dwarfLocOpDeref(self: Register) u8 {
- switch (@enumToInt(self)) {
- 0...63 => return switch (self.to64()) {
+ pub fn dwarfLocOpDeref(reg: Register) u8 {
+ return switch (reg.class()) {
+ .general_purpose => switch (reg.to64()) {
.rax => DW.OP.breg0,
.rdx => DW.OP.breg1,
.rcx => DW.OP.breg2,
@@ -300,795 +356,195 @@ pub const Register = enum(u7) {
.rsi => DW.OP.breg4,
.rdi => DW.OP.breg5,
.rbp => DW.OP.breg6,
- .rsp => DW.OP.fbreg,
+ .rsp => DW.OP.breg7,
+ else => @intCast(u8, @enumToInt(reg) - reg.gpBase()) + DW.OP.breg0,
+ },
+ .floating_point => @intCast(u8, @enumToInt(reg) - reg.fpBase()) + DW.OP.breg17,
+ else => unreachable,
+ };
+ }
+};
- .r8 => DW.OP.breg8,
- .r9 => DW.OP.breg9,
- .r10 => DW.OP.breg10,
- .r11 => DW.OP.breg11,
- .r12 => DW.OP.breg12,
- .r13 => DW.OP.breg13,
- .r14 => DW.OP.breg14,
- .r15 => DW.OP.breg15,
+test "Register id - different classes" {
+ try expect(Register.al.id() == Register.ax.id());
+ try expect(Register.ah.id() == Register.spl.id());
+ try expect(Register.ax.id() == Register.eax.id());
+ try expect(Register.eax.id() == Register.rax.id());
+ try expect(Register.ymm0.id() == 0b10000);
+ try expect(Register.ymm0.id() != Register.rax.id());
+ try expect(Register.xmm0.id() == Register.ymm0.id());
+
+ try expect(Register.es.id() == 0b100000);
+}
+
+test "Register enc - different classes" {
+ try expect(Register.al.enc() == Register.ax.enc());
+ try expect(Register.ax.enc() == Register.eax.enc());
+ try expect(Register.eax.enc() == Register.rax.enc());
+ try expect(Register.ymm0.enc() == Register.rax.enc());
+ try expect(Register.xmm0.enc() == Register.ymm0.enc());
+ try expect(Register.es.enc() == Register.rax.enc());
+}
+
+test "Register classes" {
+ try expect(Register.r11.class() == .general_purpose);
+ try expect(Register.ymm11.class() == .floating_point);
+ try expect(Register.fs.class() == .segment);
+}
+
+pub const Memory = union(enum) {
+ sib: Sib,
+ rip: Rip,
+ moffs: Moffs,
+
+ pub const ScaleIndex = packed struct {
+ scale: u4,
+ index: Register,
+ };
+
+ pub const PtrSize = enum {
+ byte,
+ word,
+ dword,
+ qword,
+ tbyte,
+
+ pub fn fromSize(size: u32) PtrSize {
+ return if (size <= 1)
+ .byte
+ else if (size <= 2)
+ .word
+ else if (size <= 4)
+ .dword
+ else if (size <= 8)
+ .qword
+ else if (size == 10)
+ .tbyte
+ else
+ unreachable;
+ }
+
+ pub fn fromBitSize(bit_size: u64) PtrSize {
+ return switch (bit_size) {
+ 8 => .byte,
+ 16 => .word,
+ 32 => .dword,
+ 64 => .qword,
+ 80 => .tbyte,
+ else => unreachable,
+ };
+ }
+
+ pub fn bitSize(s: PtrSize) u64 {
+ return switch (s) {
+ .byte => 8,
+ .word => 16,
+ .dword => 32,
+ .qword => 64,
+ .tbyte => 80,
+ };
+ }
+ };
+
+ pub const Sib = struct {
+ ptr_size: PtrSize,
+ base: ?Register,
+ scale_index: ?ScaleIndex,
+ disp: i32,
+ };
+
+ pub const Rip = struct {
+ ptr_size: PtrSize,
+ disp: i32,
+ };
+
+ pub const Moffs = struct {
+ seg: Register,
+ offset: u64,
+ };
+
+ pub fn moffs(reg: Register, offset: u64) Memory {
+ assert(reg.class() == .segment);
+ return .{ .moffs = .{ .seg = reg, .offset = offset } };
+ }
+
+ pub fn sib(ptr_size: PtrSize, args: struct {
+ disp: i32,
+ base: ?Register = null,
+ scale_index: ?ScaleIndex = null,
+ }) Memory {
+ return .{ .sib = .{
+ .base = args.base,
+ .disp = args.disp,
+ .ptr_size = ptr_size,
+ .scale_index = args.scale_index,
+ } };
+ }
+
+ pub fn rip(ptr_size: PtrSize, disp: i32) Memory {
+ return .{ .rip = .{ .ptr_size = ptr_size, .disp = disp } };
+ }
+
+ pub fn isSegmentRegister(mem: Memory) bool {
+ return switch (mem) {
+ .moffs => true,
+ .rip => false,
+ .sib => |s| if (s.base) |r| r.class() == .segment else false,
+ };
+ }
+
+ pub fn base(mem: Memory) ?Register {
+ return switch (mem) {
+ .moffs => |m| m.seg,
+ .sib => |s| s.base,
+ .rip => null,
+ };
+ }
+
+ pub fn scaleIndex(mem: Memory) ?ScaleIndex {
+ return switch (mem) {
+ .moffs, .rip => null,
+ .sib => |s| s.scale_index,
+ };
+ }
+
+ pub fn bitSize(mem: Memory) u64 {
+ return switch (mem) {
+ .rip => |r| r.ptr_size.bitSize(),
+ .sib => |s| s.ptr_size.bitSize(),
+ .moffs => unreachable,
+ };
+ }
+};
+
+pub const Immediate = union(enum) {
+ signed: i32,
+ unsigned: u64,
+
+ pub fn u(x: u64) Immediate {
+ return .{ .unsigned = x };
+ }
+
+ pub fn s(x: i32) Immediate {
+ return .{ .signed = x };
+ }
+
+ pub fn asUnsigned(imm: Immediate, bit_size: u64) u64 {
+ return switch (imm) {
+ .signed => |x| switch (bit_size) {
+ 1, 8 => @bitCast(u8, @intCast(i8, x)),
+ 16 => @bitCast(u16, @intCast(i16, x)),
+ 32, 64 => @bitCast(u32, x),
else => unreachable,
},
-
- 64...79 => return @as(u8, self.enc()) + DW.OP.breg17,
-
- else => unreachable,
- }
+ .unsigned => |x| switch (bit_size) {
+ 1, 8 => @intCast(u8, x),
+ 16 => @intCast(u16, x),
+ 32 => @intCast(u32, x),
+ 64 => x,
+ else => unreachable,
+ },
+ };
}
};
-
-// zig fmt: on
-
-/// Encoding helper functions for x86_64 instructions
-///
-/// Many of these helpers do very little, but they can help make things
-/// slightly more readable with more descriptive field names / function names.
-///
-/// Some of them also have asserts to ensure that we aren't doing dumb things.
-/// For example, trying to use register 4 (esp) in an indirect modr/m byte is illegal,
-/// you need to encode it with an SIB byte.
-///
-/// Note that ALL of these helper functions will assume capacity,
-/// so ensure that the `code` has sufficient capacity before using them.
-/// The `init` method is the recommended way to ensure capacity.
-pub const Encoder = struct {
- /// Non-owning reference to the code array
- code: *ArrayList(u8),
-
- const Self = @This();
-
- /// Wrap `code` in Encoder to make it easier to call these helper functions
- ///
- /// maximum_inst_size should contain the maximum number of bytes
- /// that the encoded instruction will take.
- /// This is because the helper functions will assume capacity
- /// in order to avoid bounds checking.
- pub fn init(code: *ArrayList(u8), maximum_inst_size: u8) !Self {
- try code.ensureUnusedCapacity(maximum_inst_size);
- return Self{ .code = code };
- }
-
- /// Directly write a number to the code array with big endianness
- pub fn writeIntBig(self: Self, comptime T: type, value: T) void {
- mem.writeIntBig(
- T,
- self.code.addManyAsArrayAssumeCapacity(@divExact(@typeInfo(T).Int.bits, 8)),
- value,
- );
- }
-
- /// Directly write a number to the code array with little endianness
- pub fn writeIntLittle(self: Self, comptime T: type, value: T) void {
- mem.writeIntLittle(
- T,
- self.code.addManyAsArrayAssumeCapacity(@divExact(@typeInfo(T).Int.bits, 8)),
- value,
- );
- }
-
- // --------
- // Prefixes
- // --------
-
- pub const LegacyPrefixes = packed struct {
- /// LOCK
- prefix_f0: bool = false,
- /// REPNZ, REPNE, REP, Scalar Double-precision
- prefix_f2: bool = false,
- /// REPZ, REPE, REP, Scalar Single-precision
- prefix_f3: bool = false,
-
- /// CS segment override or Branch not taken
- prefix_2e: bool = false,
- /// DS segment override
- prefix_36: bool = false,
- /// ES segment override
- prefix_26: bool = false,
- /// FS segment override
- prefix_64: bool = false,
- /// GS segment override
- prefix_65: bool = false,
-
- /// Branch taken
- prefix_3e: bool = false,
-
- /// Operand size override (enables 16 bit operation)
- prefix_66: bool = false,
-
- /// Address size override (enables 16 bit address size)
- prefix_67: bool = false,
-
- padding: u5 = 0,
- };
-
- /// Encodes legacy prefixes
- pub fn legacyPrefixes(self: Self, prefixes: LegacyPrefixes) void {
- if (@bitCast(u16, prefixes) != 0) {
- // Hopefully this path isn't taken very often, so we'll do it the slow way for now
-
- // LOCK
- if (prefixes.prefix_f0) self.code.appendAssumeCapacity(0xf0);
- // REPNZ, REPNE, REP, Scalar Double-precision
- if (prefixes.prefix_f2) self.code.appendAssumeCapacity(0xf2);
- // REPZ, REPE, REP, Scalar Single-precision
- if (prefixes.prefix_f3) self.code.appendAssumeCapacity(0xf3);
-
- // CS segment override or Branch not taken
- if (prefixes.prefix_2e) self.code.appendAssumeCapacity(0x2e);
- // DS segment override
- if (prefixes.prefix_36) self.code.appendAssumeCapacity(0x36);
- // ES segment override
- if (prefixes.prefix_26) self.code.appendAssumeCapacity(0x26);
- // FS segment override
- if (prefixes.prefix_64) self.code.appendAssumeCapacity(0x64);
- // GS segment override
- if (prefixes.prefix_65) self.code.appendAssumeCapacity(0x65);
-
- // Branch taken
- if (prefixes.prefix_3e) self.code.appendAssumeCapacity(0x3e);
-
- // Operand size override
- if (prefixes.prefix_66) self.code.appendAssumeCapacity(0x66);
-
- // Address size override
- if (prefixes.prefix_67) self.code.appendAssumeCapacity(0x67);
- }
- }
-
- /// Use 16 bit operand size
- ///
- /// Note that this flag is overridden by REX.W, if both are present.
- pub fn prefix16BitMode(self: Self) void {
- self.code.appendAssumeCapacity(0x66);
- }
-
- pub const Vex = struct {
- rex_prefix: Rex = .{},
- lead_opc: u5 = 0b0_0001,
- register: u4 = 0b1111,
- length: u1 = 0b0,
- simd_prefix: u2 = 0b00,
- wig_desc: bool = false,
- lig_desc: bool = false,
- lz_desc: bool = false,
-
- pub fn rex(self: *Vex, r: Rex) void {
- self.rex_prefix = r;
- }
-
- pub fn lead_opc_0f(self: *Vex) void {
- self.lead_opc = 0b0_0001;
- }
-
- pub fn lead_opc_0f_38(self: *Vex) void {
- self.lead_opc = 0b0_0010;
- }
-
- pub fn lead_opc_0f_3a(self: *Vex) void {
- self.lead_opc = 0b0_0011;
- }
-
- pub fn reg(self: *Vex, register: u4) void {
- self.register = ~register;
- }
-
- pub fn len_128(self: *Vex) void {
- self.length = 0;
- }
-
- pub fn len_256(self: *Vex) void {
- assert(!self.lz_desc);
- self.length = 1;
- }
-
- pub fn simd_prefix_66(self: *Vex) void {
- self.simd_prefix = 0b01;
- }
-
- pub fn simd_prefix_f3(self: *Vex) void {
- self.simd_prefix = 0b10;
- }
-
- pub fn simd_prefix_f2(self: *Vex) void {
- self.simd_prefix = 0b11;
- }
-
- pub fn wig(self: *Vex) void {
- self.wig_desc = true;
- }
-
- pub fn lig(self: *Vex) void {
- self.lig_desc = true;
- }
-
- pub fn lz(self: *Vex) void {
- self.lz_desc = true;
- }
-
- pub fn write(self: Vex, writer: anytype) usize {
- var buf: [3]u8 = .{0} ** 3;
- const form_3byte: bool = blk: {
- if (self.rex_prefix.w and !self.wig_desc) break :blk true;
- if (self.rex_prefix.x or self.rex_prefix.b) break :blk true;
- break :blk self.lead_opc != 0b0_0001;
- };
-
- if (self.lz_desc) {
- assert(self.length == 0);
- }
-
- if (form_3byte) {
- // First byte
- buf[0] = 0xc4;
- // Second byte
- const rxb_mask: u3 = @intCast(u3, @boolToInt(!self.rex_prefix.r)) << 2 |
- @intCast(u2, @boolToInt(!self.rex_prefix.x)) << 1 |
- @boolToInt(!self.rex_prefix.b);
- buf[1] |= @intCast(u8, rxb_mask) << 5;
- buf[1] |= self.lead_opc;
- // Third byte
- buf[2] |= @intCast(u8, @boolToInt(!self.rex_prefix.w)) << 7;
- buf[2] |= @intCast(u7, self.register) << 3;
- buf[2] |= @intCast(u3, self.length) << 2;
- buf[2] |= self.simd_prefix;
- } else {
- // First byte
- buf[0] = 0xc5;
- // Second byte
- buf[1] |= @intCast(u8, @boolToInt(!self.rex_prefix.r)) << 7;
- buf[1] |= @intCast(u7, self.register) << 3;
- buf[1] |= @intCast(u3, self.length) << 2;
- buf[1] |= self.simd_prefix;
- }
-
- const count: usize = if (form_3byte) 3 else 2;
- _ = writer.writeAll(buf[0..count]) catch unreachable;
- return count;
- }
- };
-
- pub fn vex(self: Self, prefix: Vex) void {
- _ = prefix.write(self.code.writer());
- }
-
- /// From section 2.2.1.2 of the manual, REX is encoded as b0100WRXB
- pub const Rex = struct {
- /// Wide, enables 64-bit operation
- w: bool = false,
- /// Extends the reg field in the ModR/M byte
- r: bool = false,
- /// Extends the index field in the SIB byte
- x: bool = false,
- /// Extends the r/m field in the ModR/M byte,
- /// or the base field in the SIB byte,
- /// or the reg field in the Opcode byte
- b: bool = false,
- };
-
- /// Encodes a REX prefix byte given all the fields
- ///
- /// Use this byte whenever you need 64 bit operation,
- /// or one of reg, index, r/m, base, or opcode-reg might be extended.
- ///
- /// See struct `Rex` for a description of each field.
- ///
- /// Does not add a prefix byte if none of the fields are set!
- pub fn rex(self: Self, byte: Rex) void {
- var value: u8 = 0b0100_0000;
-
- if (byte.w) value |= 0b1000;
- if (byte.r) value |= 0b0100;
- if (byte.x) value |= 0b0010;
- if (byte.b) value |= 0b0001;
-
- if (value != 0b0100_0000) {
- self.code.appendAssumeCapacity(value);
- }
- }
-
- // ------
- // Opcode
- // ------
-
- /// Encodes a 1 byte opcode
- pub fn opcode_1byte(self: Self, opcode: u8) void {
- self.code.appendAssumeCapacity(opcode);
- }
-
- /// Encodes a 2 byte opcode
- ///
- /// e.g. IMUL has the opcode 0x0f 0xaf, so you use
- ///
- /// encoder.opcode_2byte(0x0f, 0xaf);
- pub fn opcode_2byte(self: Self, prefix: u8, opcode: u8) void {
- self.code.appendAssumeCapacity(prefix);
- self.code.appendAssumeCapacity(opcode);
- }
-
- /// Encodes a 3 byte opcode
- ///
- /// e.g. MOVSD has the opcode 0xf2 0x0f 0x10
- ///
- /// encoder.opcode_3byte(0xf2, 0x0f, 0x10);
- pub fn opcode_3byte(self: Self, prefix_1: u8, prefix_2: u8, opcode: u8) void {
- self.code.appendAssumeCapacity(prefix_1);
- self.code.appendAssumeCapacity(prefix_2);
- self.code.appendAssumeCapacity(opcode);
- }
-
- /// Encodes a 1 byte opcode with a reg field
- ///
- /// Remember to add a REX prefix byte if reg is extended!
- pub fn opcode_withReg(self: Self, opcode: u8, reg: u3) void {
- assert(opcode & 0b111 == 0);
- self.code.appendAssumeCapacity(opcode | reg);
- }
-
- // ------
- // ModR/M
- // ------
-
- /// Construct a ModR/M byte given all the fields
- ///
- /// Remember to add a REX prefix byte if reg or rm are extended!
- pub fn modRm(self: Self, mod: u2, reg_or_opx: u3, rm: u3) void {
- self.code.appendAssumeCapacity(
- @as(u8, mod) << 6 | @as(u8, reg_or_opx) << 3 | rm,
- );
- }
-
- /// Construct a ModR/M byte using direct r/m addressing
- /// r/m effective address: r/m
- ///
- /// Note reg's effective address is always just reg for the ModR/M byte.
- /// Remember to add a REX prefix byte if reg or rm are extended!
- pub fn modRm_direct(self: Self, reg_or_opx: u3, rm: u3) void {
- self.modRm(0b11, reg_or_opx, rm);
- }
-
- /// Construct a ModR/M byte using indirect r/m addressing
- /// r/m effective address: [r/m]
- ///
- /// Note reg's effective address is always just reg for the ModR/M byte.
- /// Remember to add a REX prefix byte if reg or rm are extended!
- pub fn modRm_indirectDisp0(self: Self, reg_or_opx: u3, rm: u3) void {
- assert(rm != 4 and rm != 5);
- self.modRm(0b00, reg_or_opx, rm);
- }
-
- /// Construct a ModR/M byte using indirect SIB addressing
- /// r/m effective address: [SIB]
- ///
- /// Note reg's effective address is always just reg for the ModR/M byte.
- /// Remember to add a REX prefix byte if reg or rm are extended!
- pub fn modRm_SIBDisp0(self: Self, reg_or_opx: u3) void {
- self.modRm(0b00, reg_or_opx, 0b100);
- }
-
- /// Construct a ModR/M byte using RIP-relative addressing
- /// r/m effective address: [RIP + disp32]
- ///
- /// Note reg's effective address is always just reg for the ModR/M byte.
- /// Remember to add a REX prefix byte if reg or rm are extended!
- pub fn modRm_RIPDisp32(self: Self, reg_or_opx: u3) void {
- self.modRm(0b00, reg_or_opx, 0b101);
- }
-
- /// Construct a ModR/M byte using indirect r/m with a 8bit displacement
- /// r/m effective address: [r/m + disp8]
- ///
- /// Note reg's effective address is always just reg for the ModR/M byte.
- /// Remember to add a REX prefix byte if reg or rm are extended!
- pub fn modRm_indirectDisp8(self: Self, reg_or_opx: u3, rm: u3) void {
- assert(rm != 4);
- self.modRm(0b01, reg_or_opx, rm);
- }
-
- /// Construct a ModR/M byte using indirect SIB with a 8bit displacement
- /// r/m effective address: [SIB + disp8]
- ///
- /// Note reg's effective address is always just reg for the ModR/M byte.
- /// Remember to add a REX prefix byte if reg or rm are extended!
- pub fn modRm_SIBDisp8(self: Self, reg_or_opx: u3) void {
- self.modRm(0b01, reg_or_opx, 0b100);
- }
-
- /// Construct a ModR/M byte using indirect r/m with a 32bit displacement
- /// r/m effective address: [r/m + disp32]
- ///
- /// Note reg's effective address is always just reg for the ModR/M byte.
- /// Remember to add a REX prefix byte if reg or rm are extended!
- pub fn modRm_indirectDisp32(self: Self, reg_or_opx: u3, rm: u3) void {
- assert(rm != 4);
- self.modRm(0b10, reg_or_opx, rm);
- }
-
- /// Construct a ModR/M byte using indirect SIB with a 32bit displacement
- /// r/m effective address: [SIB + disp32]
- ///
- /// Note reg's effective address is always just reg for the ModR/M byte.
- /// Remember to add a REX prefix byte if reg or rm are extended!
- pub fn modRm_SIBDisp32(self: Self, reg_or_opx: u3) void {
- self.modRm(0b10, reg_or_opx, 0b100);
- }
-
- // ---
- // SIB
- // ---
-
- /// Construct a SIB byte given all the fields
- ///
- /// Remember to add a REX prefix byte if index or base are extended!
- pub fn sib(self: Self, scale: u2, index: u3, base: u3) void {
- self.code.appendAssumeCapacity(
- @as(u8, scale) << 6 | @as(u8, index) << 3 | base,
- );
- }
-
- /// Construct a SIB byte with scale * index + base, no frills.
- /// r/m effective address: [base + scale * index]
- ///
- /// Remember to add a REX prefix byte if index or base are extended!
- pub fn sib_scaleIndexBase(self: Self, scale: u2, index: u3, base: u3) void {
- assert(base != 5);
-
- self.sib(scale, index, base);
- }
-
- /// Construct a SIB byte with scale * index + disp32
- /// r/m effective address: [scale * index + disp32]
- ///
- /// Remember to add a REX prefix byte if index or base are extended!
- pub fn sib_scaleIndexDisp32(self: Self, scale: u2, index: u3) void {
- assert(index != 4);
-
- // scale is actually ignored
- // index = 4 means no index
- // base = 5 means no base, if mod == 0.
- self.sib(scale, index, 5);
- }
-
- /// Construct a SIB byte with just base
- /// r/m effective address: [base]
- ///
- /// Remember to add a REX prefix byte if index or base are extended!
- pub fn sib_base(self: Self, base: u3) void {
- assert(base != 5);
-
- // scale is actually ignored
- // index = 4 means no index
- self.sib(0, 4, base);
- }
-
- /// Construct a SIB byte with just disp32
- /// r/m effective address: [disp32]
- ///
- /// Remember to add a REX prefix byte if index or base are extended!
- pub fn sib_disp32(self: Self) void {
- // scale is actually ignored
- // index = 4 means no index
- // base = 5 means no base, if mod == 0.
- self.sib(0, 4, 5);
- }
-
- /// Construct a SIB byte with scale * index + base + disp8
- /// r/m effective address: [base + scale * index + disp8]
- ///
- /// Remember to add a REX prefix byte if index or base are extended!
- pub fn sib_scaleIndexBaseDisp8(self: Self, scale: u2, index: u3, base: u3) void {
- self.sib(scale, index, base);
- }
-
- /// Construct a SIB byte with base + disp8, no index
- /// r/m effective address: [base + disp8]
- ///
- /// Remember to add a REX prefix byte if index or base are extended!
- pub fn sib_baseDisp8(self: Self, base: u3) void {
- // scale is ignored
- // index = 4 means no index
- self.sib(0, 4, base);
- }
-
- /// Construct a SIB byte with scale * index + base + disp32
- /// r/m effective address: [base + scale * index + disp32]
- ///
- /// Remember to add a REX prefix byte if index or base are extended!
- pub fn sib_scaleIndexBaseDisp32(self: Self, scale: u2, index: u3, base: u3) void {
- self.sib(scale, index, base);
- }
-
- /// Construct a SIB byte with base + disp32, no index
- /// r/m effective address: [base + disp32]
- ///
- /// Remember to add a REX prefix byte if index or base are extended!
- pub fn sib_baseDisp32(self: Self, base: u3) void {
- // scale is ignored
- // index = 4 means no index
- self.sib(0, 4, base);
- }
-
- // -------------------------
- // Trivial (no bit fiddling)
- // -------------------------
-
- /// Encode an 8 bit immediate
- ///
- /// It is sign-extended to 64 bits by the cpu.
- pub fn imm8(self: Self, imm: i8) void {
- self.code.appendAssumeCapacity(@bitCast(u8, imm));
- }
-
- /// Encode an 8 bit displacement
- ///
- /// It is sign-extended to 64 bits by the cpu.
- pub fn disp8(self: Self, disp: i8) void {
- self.code.appendAssumeCapacity(@bitCast(u8, disp));
- }
-
- /// Encode an 16 bit immediate
- ///
- /// It is sign-extended to 64 bits by the cpu.
- pub fn imm16(self: Self, imm: i16) void {
- self.writeIntLittle(i16, imm);
- }
-
- /// Encode an 32 bit immediate
- ///
- /// It is sign-extended to 64 bits by the cpu.
- pub fn imm32(self: Self, imm: i32) void {
- self.writeIntLittle(i32, imm);
- }
-
- /// Encode an 32 bit displacement
- ///
- /// It is sign-extended to 64 bits by the cpu.
- pub fn disp32(self: Self, disp: i32) void {
- self.writeIntLittle(i32, disp);
- }
-
- /// Encode an 64 bit immediate
- ///
- /// It is sign-extended to 64 bits by the cpu.
- pub fn imm64(self: Self, imm: u64) void {
- self.writeIntLittle(u64, imm);
- }
-};
-
-test "Encoder helpers - general purpose registers" {
- var code = ArrayList(u8).init(testing.allocator);
- defer code.deinit();
-
- // simple integer multiplication
-
- // imul eax,edi
- // 0faf c7
- {
- try code.resize(0);
- const encoder = try Encoder.init(&code, 4);
- encoder.rex(.{
- .r = Register.eax.isExtended(),
- .b = Register.edi.isExtended(),
- });
- encoder.opcode_2byte(0x0f, 0xaf);
- encoder.modRm_direct(
- Register.eax.lowEnc(),
- Register.edi.lowEnc(),
- );
-
- try testing.expectEqualSlices(u8, &[_]u8{ 0x0f, 0xaf, 0xc7 }, code.items);
- }
-
- // simple mov
-
- // mov eax,edi
- // 89 f8
- {
- try code.resize(0);
- const encoder = try Encoder.init(&code, 3);
- encoder.rex(.{
- .r = Register.edi.isExtended(),
- .b = Register.eax.isExtended(),
- });
- encoder.opcode_1byte(0x89);
- encoder.modRm_direct(
- Register.edi.lowEnc(),
- Register.eax.lowEnc(),
- );
-
- try testing.expectEqualSlices(u8, &[_]u8{ 0x89, 0xf8 }, code.items);
- }
-
- // signed integer addition of 32-bit sign extended immediate to 64 bit register
-
- // add rcx, 2147483647
- //
- // Using the following opcode: REX.W + 81 /0 id, we expect the following encoding
- //
- // 48 : REX.W set for 64 bit operand (*r*cx)
- // 81 : opcode for " with immediate"
- // c1 : id = rcx,
- // : c1 = 11 <-- mod = 11 indicates r/m is register (rcx)
- // : 000 <-- opcode_extension = 0 because opcode extension is /0. /0 specifies ADD
- // : 001 <-- 001 is rcx
- // ffffff7f : 2147483647
- {
- try code.resize(0);
- const encoder = try Encoder.init(&code, 7);
- encoder.rex(.{ .w = true }); // use 64 bit operation
- encoder.opcode_1byte(0x81);
- encoder.modRm_direct(
- 0,
- Register.rcx.lowEnc(),
- );
- encoder.imm32(2147483647);
-
- try testing.expectEqualSlices(u8, &[_]u8{ 0x48, 0x81, 0xc1, 0xff, 0xff, 0xff, 0x7f }, code.items);
- }
-}
-
-test "Encoder helpers - Vex prefix" {
- var buf: [3]u8 = undefined;
- var stream = std.io.fixedBufferStream(&buf);
- const writer = stream.writer();
-
- {
- var vex_prefix = Encoder.Vex{};
- vex_prefix.rex(.{
- .r = true,
- });
- const nwritten = vex_prefix.write(writer);
- try testing.expectEqualSlices(u8, &[_]u8{ 0xc5, 0x78 }, buf[0..nwritten]);
- }
-
- {
- stream.reset();
- var vex_prefix = Encoder.Vex{};
- vex_prefix.reg(Register.xmm15.enc());
- const nwritten = vex_prefix.write(writer);
- try testing.expectEqualSlices(u8, &[_]u8{ 0xc5, 0x80 }, buf[0..nwritten]);
- }
-
- {
- stream.reset();
- var vex_prefix = Encoder.Vex{};
- vex_prefix.rex(.{
- .w = true,
- .x = true,
- });
- const nwritten = vex_prefix.write(writer);
- try testing.expectEqualSlices(u8, &[_]u8{ 0xc4, 0b101_0_0001, 0b0_1111_0_00 }, buf[0..nwritten]);
- }
-
- {
- stream.reset();
- var vex_prefix = Encoder.Vex{};
- vex_prefix.rex(.{
- .w = true,
- .r = true,
- });
- vex_prefix.len_256();
- vex_prefix.lead_opc_0f();
- vex_prefix.simd_prefix_66();
- const nwritten = vex_prefix.write(writer);
- try testing.expectEqualSlices(u8, &[_]u8{ 0xc4, 0b011_0_0001, 0b0_1111_1_01 }, buf[0..nwritten]);
- }
-
- var code = ArrayList(u8).init(testing.allocator);
- defer code.deinit();
-
- {
- // vmovapd xmm1, xmm2
- const encoder = try Encoder.init(&code, 4);
- var vex = Encoder.Vex{};
- vex.simd_prefix_66();
- encoder.vex(vex); // use 64 bit operation
- encoder.opcode_1byte(0x28);
- encoder.modRm_direct(0, Register.xmm1.lowEnc());
- try testing.expectEqualSlices(u8, &[_]u8{ 0xC5, 0xF9, 0x28, 0xC1 }, code.items);
- }
-
- {
- try code.resize(0);
-
- // vmovhpd xmm13, xmm1, qword ptr [rip]
- const encoder = try Encoder.init(&code, 9);
- var vex = Encoder.Vex{};
- vex.len_128();
- vex.simd_prefix_66();
- vex.lead_opc_0f();
- vex.rex(.{ .r = true });
- vex.reg(Register.xmm1.enc());
- encoder.vex(vex);
- encoder.opcode_1byte(0x16);
- encoder.modRm_RIPDisp32(Register.xmm13.lowEnc());
- encoder.disp32(0);
- try testing.expectEqualSlices(u8, &[_]u8{ 0xC5, 0x71, 0x16, 0x2D, 0x00, 0x00, 0x00, 0x00 }, code.items);
- }
-}
-
-// TODO add these registers to the enum and populate dwarfLocOp
-// // Return Address register. This is stored in `0(%rsp, "")` and is not a physical register.
-// RA = (16, "RA"),
-//
-// XMM0 = (17, "xmm0"),
-// XMM1 = (18, "xmm1"),
-// XMM2 = (19, "xmm2"),
-// XMM3 = (20, "xmm3"),
-// XMM4 = (21, "xmm4"),
-// XMM5 = (22, "xmm5"),
-// XMM6 = (23, "xmm6"),
-// XMM7 = (24, "xmm7"),
-//
-// XMM8 = (25, "xmm8"),
-// XMM9 = (26, "xmm9"),
-// XMM10 = (27, "xmm10"),
-// XMM11 = (28, "xmm11"),
-// XMM12 = (29, "xmm12"),
-// XMM13 = (30, "xmm13"),
-// XMM14 = (31, "xmm14"),
-// XMM15 = (32, "xmm15"),
-//
-// ST0 = (33, "st0"),
-// ST1 = (34, "st1"),
-// ST2 = (35, "st2"),
-// ST3 = (36, "st3"),
-// ST4 = (37, "st4"),
-// ST5 = (38, "st5"),
-// ST6 = (39, "st6"),
-// ST7 = (40, "st7"),
-//
-// MM0 = (41, "mm0"),
-// MM1 = (42, "mm1"),
-// MM2 = (43, "mm2"),
-// MM3 = (44, "mm3"),
-// MM4 = (45, "mm4"),
-// MM5 = (46, "mm5"),
-// MM6 = (47, "mm6"),
-// MM7 = (48, "mm7"),
-//
-// RFLAGS = (49, "rFLAGS"),
-// ES = (50, "es"),
-// CS = (51, "cs"),
-// SS = (52, "ss"),
-// DS = (53, "ds"),
-// FS = (54, "fs"),
-// GS = (55, "gs"),
-//
-// FS_BASE = (58, "fs.base"),
-// GS_BASE = (59, "gs.base"),
-//
-// TR = (62, "tr"),
-// LDTR = (63, "ldtr"),
-// MXCSR = (64, "mxcsr"),
-// FCW = (65, "fcw"),
-// FSW = (66, "fsw"),
-//
-// XMM16 = (67, "xmm16"),
-// XMM17 = (68, "xmm17"),
-// XMM18 = (69, "xmm18"),
-// XMM19 = (70, "xmm19"),
-// XMM20 = (71, "xmm20"),
-// XMM21 = (72, "xmm21"),
-// XMM22 = (73, "xmm22"),
-// XMM23 = (74, "xmm23"),
-// XMM24 = (75, "xmm24"),
-// XMM25 = (76, "xmm25"),
-// XMM26 = (77, "xmm26"),
-// XMM27 = (78, "xmm27"),
-// XMM28 = (79, "xmm28"),
-// XMM29 = (80, "xmm29"),
-// XMM30 = (81, "xmm30"),
-// XMM31 = (82, "xmm31"),
-//
-// K0 = (118, "k0"),
-// K1 = (119, "k1"),
-// K2 = (120, "k2"),
-// K3 = (121, "k3"),
-// K4 = (122, "k4"),
-// K5 = (123, "k5"),
-// K6 = (124, "k6"),
-// K7 = (125, "k7"),
diff --git a/src/arch/x86_64/encoder.zig b/src/arch/x86_64/encoder.zig
new file mode 100644
index 0000000000..7e29f95069
--- /dev/null
+++ b/src/arch/x86_64/encoder.zig
@@ -0,0 +1,2275 @@
+const std = @import("std");
+const assert = std.debug.assert;
+const log = std.log.scoped(.x86_64_encoder);
+const math = std.math;
+const testing = std.testing;
+
+const bits = @import("bits.zig");
+const Encoding = @import("Encoding.zig");
+const Immediate = bits.Immediate;
+const Memory = bits.Memory;
+const Register = bits.Register;
+
+pub const Instruction = struct {
+ op1: Operand = .none,
+ op2: Operand = .none,
+ op3: Operand = .none,
+ op4: Operand = .none,
+ encoding: Encoding,
+
+ pub const Mnemonic = Encoding.Mnemonic;
+
+ pub const Operand = union(enum) {
+ none,
+ reg: Register,
+ mem: Memory,
+ imm: Immediate,
+
+ /// Returns the bitsize of the operand.
+ pub fn bitSize(op: Operand) u64 {
+ return switch (op) {
+ .none => unreachable,
+ .reg => |reg| reg.bitSize(),
+ .mem => |mem| mem.bitSize(),
+ .imm => unreachable,
+ };
+ }
+
+ /// Returns true if the operand is a segment register.
+ /// Asserts the operand is either register or memory.
+ pub fn isSegmentRegister(op: Operand) bool {
+ return switch (op) {
+ .none => unreachable,
+ .reg => |reg| reg.class() == .segment,
+ .mem => |mem| mem.isSegmentRegister(),
+ .imm => unreachable,
+ };
+ }
+
+ pub fn fmtPrint(op: Operand, enc_op: Encoding.Op, writer: anytype) !void {
+ switch (op) {
+ .none => {},
+ .reg => |reg| try writer.writeAll(@tagName(reg)),
+ .mem => |mem| switch (mem) {
+ .rip => |rip| {
+ try writer.print("{s} ptr [rip", .{@tagName(rip.ptr_size)});
+ if (rip.disp != 0) {
+ const sign_bit = if (sign(rip.disp) < 0) "-" else "+";
+ const disp_abs = try std.math.absInt(rip.disp);
+ try writer.print(" {s} 0x{x}", .{ sign_bit, disp_abs });
+ }
+ try writer.writeByte(']');
+ },
+ .sib => |sib| {
+ try writer.print("{s} ptr ", .{@tagName(sib.ptr_size)});
+
+ if (mem.isSegmentRegister()) {
+ return writer.print("{s}:0x{x}", .{ @tagName(sib.base.?), sib.disp });
+ }
+
+ try writer.writeByte('[');
+
+ if (sib.base) |base| {
+ try writer.print("{s}", .{@tagName(base)});
+ }
+ if (sib.scale_index) |si| {
+ if (sib.base != null) {
+ try writer.writeAll(" + ");
+ }
+ try writer.print("{s} * {d}", .{ @tagName(si.index), si.scale });
+ }
+ if (sib.disp != 0) {
+ if (sib.base != null or sib.scale_index != null) {
+ try writer.writeByte(' ');
+ }
+ try writer.writeByte(if (sign(sib.disp) < 0) '-' else '+');
+ const disp_abs = try std.math.absInt(sib.disp);
+ try writer.print(" 0x{x}", .{disp_abs});
+ }
+
+ try writer.writeByte(']');
+ },
+ .moffs => |moffs| try writer.print("{s}:0x{x}", .{ @tagName(moffs.seg), moffs.offset }),
+ },
+ .imm => |imm| try writer.print("0x{x}", .{imm.asUnsigned(enc_op.bitSize())}),
+ }
+ }
+ };
+
+ pub fn new(mnemonic: Mnemonic, args: struct {
+ op1: Operand = .none,
+ op2: Operand = .none,
+ op3: Operand = .none,
+ op4: Operand = .none,
+ }) !Instruction {
+ const encoding = (try Encoding.findByMnemonic(mnemonic, .{
+ .op1 = args.op1,
+ .op2 = args.op2,
+ .op3 = args.op3,
+ .op4 = args.op4,
+ })) orelse {
+ log.debug("no encoding found for: {s} {s} {s} {s} {s}", .{
+ @tagName(mnemonic),
+ @tagName(Encoding.Op.fromOperand(args.op1)),
+ @tagName(Encoding.Op.fromOperand(args.op2)),
+ @tagName(Encoding.Op.fromOperand(args.op3)),
+ @tagName(Encoding.Op.fromOperand(args.op4)),
+ });
+ return error.InvalidInstruction;
+ };
+ log.debug("selected encoding: {}", .{encoding});
+ return .{
+ .op1 = args.op1,
+ .op2 = args.op2,
+ .op3 = args.op3,
+ .op4 = args.op4,
+ .encoding = encoding,
+ };
+ }
+
+ pub fn fmtPrint(inst: Instruction, writer: anytype) !void {
+ try writer.print("{s}", .{@tagName(inst.encoding.mnemonic)});
+ const ops = [_]struct { Operand, Encoding.Op }{
+ .{ inst.op1, inst.encoding.op1 },
+ .{ inst.op2, inst.encoding.op2 },
+ .{ inst.op3, inst.encoding.op3 },
+ .{ inst.op4, inst.encoding.op4 },
+ };
+ for (&ops, 0..) |op, i| {
+ if (op[0] == .none) break;
+ if (i > 0) {
+ try writer.writeByte(',');
+ }
+ try writer.writeByte(' ');
+ try op[0].fmtPrint(op[1], writer);
+ }
+ }
+
+ pub fn encode(inst: Instruction, writer: anytype) !void {
+ const encoder = Encoder(@TypeOf(writer)){ .writer = writer };
+ const encoding = inst.encoding;
+
+ try inst.encodeLegacyPrefixes(encoder);
+ try inst.encodeMandatoryPrefix(encoder);
+ try inst.encodeRexPrefix(encoder);
+ try inst.encodeOpcode(encoder);
+
+ switch (encoding.op_en) {
+ .np, .o => {},
+ .i, .d => try encodeImm(inst.op1.imm, encoding.op1, encoder),
+ .zi, .oi => try encodeImm(inst.op2.imm, encoding.op2, encoder),
+ .fd => try encoder.imm64(inst.op2.mem.moffs.offset),
+ .td => try encoder.imm64(inst.op1.mem.moffs.offset),
+ else => {
+ const mem_op = switch (encoding.op_en) {
+ .m, .mi, .m1, .mc, .mr => inst.op1,
+ .rm, .rmi => inst.op2,
+ else => unreachable,
+ };
+ switch (mem_op) {
+ .reg => |reg| {
+ const rm = switch (encoding.op_en) {
+ .m, .mi, .m1, .mc => encoding.modRmExt(),
+ .mr => inst.op2.reg.lowEnc(),
+ .rm, .rmi => inst.op1.reg.lowEnc(),
+ else => unreachable,
+ };
+ try encoder.modRm_direct(rm, reg.lowEnc());
+ },
+ .mem => |mem| {
+ const op = switch (encoding.op_en) {
+ .m, .mi, .m1, .mc => .none,
+ .mr => inst.op2,
+ .rm, .rmi => inst.op1,
+ else => unreachable,
+ };
+ try encodeMemory(encoding, mem, op, encoder);
+ },
+ else => unreachable,
+ }
+
+ switch (encoding.op_en) {
+ .mi => try encodeImm(inst.op2.imm, encoding.op2, encoder),
+ .rmi => try encodeImm(inst.op3.imm, encoding.op3, encoder),
+ else => {},
+ }
+ },
+ }
+ }
+
+ fn encodeOpcode(inst: Instruction, encoder: anytype) !void {
+ const opcode = inst.encoding.opcode();
+ switch (inst.encoding.op_en) {
+ .o, .oi => try encoder.opcode_withReg(opcode[0], inst.op1.reg.lowEnc()),
+ else => {
+ const index: usize = if (inst.encoding.mandatoryPrefix()) |_| 1 else 0;
+ for (opcode[index..]) |byte| {
+ try encoder.opcode_1byte(byte);
+ }
+ },
+ }
+ }
+
+ fn encodeLegacyPrefixes(inst: Instruction, encoder: anytype) !void {
+ const enc = inst.encoding;
+ const op_en = enc.op_en;
+
+ var legacy = LegacyPrefixes{};
+ if (enc.mode == .none) {
+ const bit_size = enc.operandBitSize();
+ if (bit_size == 16) {
+ legacy.set16BitOverride();
+ }
+ }
+
+ const segment_override: ?Register = switch (op_en) {
+ .i, .zi, .o, .oi, .d, .np => null,
+ .fd => inst.op2.mem.base().?,
+ .td => inst.op1.mem.base().?,
+ .rm, .rmi => if (inst.op2.isSegmentRegister()) blk: {
+ break :blk switch (inst.op2) {
+ .reg => |r| r,
+ .mem => |m| m.base().?,
+ else => unreachable,
+ };
+ } else null,
+ .m, .mi, .m1, .mc, .mr => if (inst.op1.isSegmentRegister()) blk: {
+ break :blk switch (inst.op1) {
+ .reg => |r| r,
+ .mem => |m| m.base().?,
+ else => unreachable,
+ };
+ } else null,
+ };
+ if (segment_override) |seg| {
+ legacy.setSegmentOverride(seg);
+ }
+
+ try encoder.legacyPrefixes(legacy);
+ }
+
+ fn encodeRexPrefix(inst: Instruction, encoder: anytype) !void {
+ const op_en = inst.encoding.op_en;
+
+ var rex = Rex{};
+ rex.present = inst.encoding.mode == .rex;
+ rex.w = inst.encoding.mode == .long;
+
+ switch (op_en) {
+ .np, .i, .zi, .fd, .td, .d => {},
+ .o, .oi => {
+ rex.b = inst.op1.reg.isExtended();
+ },
+ .m, .mi, .m1, .mc, .mr, .rm, .rmi => {
+ const r_op = switch (op_en) {
+ .rm, .rmi => inst.op1,
+ .mr => inst.op2,
+ else => null,
+ };
+ if (r_op) |op| {
+ rex.r = op.reg.isExtended();
+ }
+
+ const b_x_op = switch (op_en) {
+ .rm, .rmi => inst.op2,
+ .m, .mi, .m1, .mc, .mr => inst.op1,
+ else => unreachable,
+ };
+ switch (b_x_op) {
+ .reg => |r| {
+ rex.b = r.isExtended();
+ },
+ .mem => |mem| {
+ rex.b = if (mem.base()) |base| base.isExtended() else false;
+ rex.x = if (mem.scaleIndex()) |si| si.index.isExtended() else false;
+ },
+ else => unreachable,
+ }
+ },
+ }
+
+ try encoder.rex(rex);
+ }
+
+ fn encodeMandatoryPrefix(inst: Instruction, encoder: anytype) !void {
+ const prefix = inst.encoding.mandatoryPrefix() orelse return;
+ try encoder.opcode_1byte(prefix);
+ }
+
+ fn encodeMemory(encoding: Encoding, mem: Memory, operand: Operand, encoder: anytype) !void {
+ const operand_enc = switch (operand) {
+ .reg => |reg| reg.lowEnc(),
+ .none => encoding.modRmExt(),
+ else => unreachable,
+ };
+
+ switch (mem) {
+ .moffs => unreachable,
+ .sib => |sib| {
+ if (sib.base) |base| {
+ if (base.class() == .segment) {
+ // TODO audit this wrt SIB
+ try encoder.modRm_SIBDisp0(operand_enc);
+ if (sib.scale_index) |si| {
+ const scale = math.log2_int(u4, si.scale);
+ try encoder.sib_scaleIndexDisp32(scale, si.index.lowEnc());
+ } else {
+ try encoder.sib_disp32();
+ }
+ try encoder.disp32(sib.disp);
+ } else {
+ assert(base.class() == .general_purpose);
+ const dst = base.lowEnc();
+ const src = operand_enc;
+ if (dst == 4 or sib.scale_index != null) {
+ if (sib.disp == 0 and dst != 5) {
+ try encoder.modRm_SIBDisp0(src);
+ if (sib.scale_index) |si| {
+ const scale = math.log2_int(u4, si.scale);
+ try encoder.sib_scaleIndexBase(scale, si.index.lowEnc(), dst);
+ } else {
+ try encoder.sib_base(dst);
+ }
+ } else if (math.cast(i8, sib.disp)) |_| {
+ try encoder.modRm_SIBDisp8(src);
+ if (sib.scale_index) |si| {
+ const scale = math.log2_int(u4, si.scale);
+ try encoder.sib_scaleIndexBaseDisp8(scale, si.index.lowEnc(), dst);
+ } else {
+ try encoder.sib_baseDisp8(dst);
+ }
+ try encoder.disp8(@truncate(i8, sib.disp));
+ } else {
+ try encoder.modRm_SIBDisp32(src);
+ if (sib.scale_index) |si| {
+ const scale = math.log2_int(u4, si.scale);
+ try encoder.sib_scaleIndexBaseDisp32(scale, si.index.lowEnc(), dst);
+ } else {
+ try encoder.sib_baseDisp32(dst);
+ }
+ try encoder.disp32(sib.disp);
+ }
+ } else {
+ if (sib.disp == 0 and dst != 5) {
+ try encoder.modRm_indirectDisp0(src, dst);
+ } else if (math.cast(i8, sib.disp)) |_| {
+ try encoder.modRm_indirectDisp8(src, dst);
+ try encoder.disp8(@truncate(i8, sib.disp));
+ } else {
+ try encoder.modRm_indirectDisp32(src, dst);
+ try encoder.disp32(sib.disp);
+ }
+ }
+ }
+ } else {
+ try encoder.modRm_SIBDisp0(operand_enc);
+ if (sib.scale_index) |si| {
+ const scale = math.log2_int(u4, si.scale);
+ try encoder.sib_scaleIndexDisp32(scale, si.index.lowEnc());
+ } else {
+ try encoder.sib_disp32();
+ }
+ try encoder.disp32(sib.disp);
+ }
+ },
+ .rip => |rip| {
+ try encoder.modRm_RIPDisp32(operand_enc);
+ try encoder.disp32(rip.disp);
+ },
+ }
+ }
+
+ fn encodeImm(imm: Immediate, kind: Encoding.Op, encoder: anytype) !void {
+ const raw = imm.asUnsigned(kind.bitSize());
+ switch (kind.bitSize()) {
+ 8 => try encoder.imm8(@intCast(u8, raw)),
+ 16 => try encoder.imm16(@intCast(u16, raw)),
+ 32 => try encoder.imm32(@intCast(u32, raw)),
+ 64 => try encoder.imm64(raw),
+ else => unreachable,
+ }
+ }
+};
+
+inline fn sign(i: anytype) @TypeOf(i) {
+ return @as(@TypeOf(i), @boolToInt(i > 0)) - @boolToInt(i < 0);
+}
+
+pub const LegacyPrefixes = packed struct {
+ /// LOCK
+ prefix_f0: bool = false,
+ /// REPNZ, REPNE, REP, Scalar Double-precision
+ prefix_f2: bool = false,
+ /// REPZ, REPE, REP, Scalar Single-precision
+ prefix_f3: bool = false,
+
+ /// CS segment override or Branch not taken
+ prefix_2e: bool = false,
+ /// SS segment override
+ prefix_36: bool = false,
+ /// ES segment override
+ prefix_26: bool = false,
+ /// FS segment override
+ prefix_64: bool = false,
+ /// GS segment override
+ prefix_65: bool = false,
+
+ /// Branch taken
+ prefix_3e: bool = false,
+
+ /// Address size override (enables 16 bit address size)
+ prefix_67: bool = false,
+
+ /// Operand size override (enables 16 bit operation)
+ prefix_66: bool = false,
+
+ padding: u5 = 0,
+
+ pub fn setSegmentOverride(self: *LegacyPrefixes, reg: Register) void {
+ assert(reg.class() == .segment);
+ switch (reg) {
+ .cs => self.prefix_2e = true,
+ .ss => self.prefix_36 = true,
+ .es => self.prefix_26 = true,
+ .fs => self.prefix_64 = true,
+ .gs => self.prefix_65 = true,
+ .ds => {},
+ else => unreachable,
+ }
+ }
+
+ pub fn set16BitOverride(self: *LegacyPrefixes) void {
+ self.prefix_66 = true;
+ }
+};
+
+fn Encoder(comptime T: type) type {
+ return struct {
+ writer: T,
+
+ const Self = @This();
+
+ // --------
+ // Prefixes
+ // --------
+
+ /// Encodes legacy prefixes
+ pub fn legacyPrefixes(self: Self, prefixes: LegacyPrefixes) !void {
+ if (@bitCast(u16, prefixes) != 0) {
+ // Hopefully this path isn't taken very often, so we'll do it the slow way for now
+
+ // LOCK
+ if (prefixes.prefix_f0) try self.writer.writeByte(0xf0);
+ // REPNZ, REPNE, REP, Scalar Double-precision
+ if (prefixes.prefix_f2) try self.writer.writeByte(0xf2);
+ // REPZ, REPE, REP, Scalar Single-precision
+ if (prefixes.prefix_f3) try self.writer.writeByte(0xf3);
+
+ // CS segment override or Branch not taken
+ if (prefixes.prefix_2e) try self.writer.writeByte(0x2e);
+ // DS segment override
+ if (prefixes.prefix_36) try self.writer.writeByte(0x36);
+ // ES segment override
+ if (prefixes.prefix_26) try self.writer.writeByte(0x26);
+ // FS segment override
+ if (prefixes.prefix_64) try self.writer.writeByte(0x64);
+ // GS segment override
+ if (prefixes.prefix_65) try self.writer.writeByte(0x65);
+
+ // Branch taken
+ if (prefixes.prefix_3e) try self.writer.writeByte(0x3e);
+
+ // Operand size override
+ if (prefixes.prefix_66) try self.writer.writeByte(0x66);
+
+ // Address size override
+ if (prefixes.prefix_67) try self.writer.writeByte(0x67);
+ }
+ }
+
+ /// Use 16 bit operand size
+ ///
+ /// Note that this flag is overridden by REX.W, if both are present.
+ pub fn prefix16BitMode(self: Self) !void {
+ try self.writer.writeByte(0x66);
+ }
+
+ /// Encodes a REX prefix byte given all the fields
+ ///
+ /// Use this byte whenever you need 64 bit operation,
+ /// or one of reg, index, r/m, base, or opcode-reg might be extended.
+ ///
+ /// See struct `Rex` for a description of each field.
+ pub fn rex(self: Self, byte: Rex) !void {
+ if (!byte.present and !byte.isSet()) return;
+
+ var value: u8 = 0b0100_0000;
+
+ if (byte.w) value |= 0b1000;
+ if (byte.r) value |= 0b0100;
+ if (byte.x) value |= 0b0010;
+ if (byte.b) value |= 0b0001;
+
+ try self.writer.writeByte(value);
+ }
+
+ // ------
+ // Opcode
+ // ------
+
+ /// Encodes a 1 byte opcode
+ pub fn opcode_1byte(self: Self, opcode: u8) !void {
+ try self.writer.writeByte(opcode);
+ }
+
+ /// Encodes a 2 byte opcode
+ ///
+ /// e.g. IMUL has the opcode 0x0f 0xaf, so you use
+ ///
+ /// encoder.opcode_2byte(0x0f, 0xaf);
+ pub fn opcode_2byte(self: Self, prefix: u8, opcode: u8) !void {
+ try self.writer.writeAll(&.{ prefix, opcode });
+ }
+
+ /// Encodes a 3 byte opcode
+ ///
+ /// e.g. MOVSD has the opcode 0xf2 0x0f 0x10
+ ///
+ /// encoder.opcode_3byte(0xf2, 0x0f, 0x10);
+ pub fn opcode_3byte(self: Self, prefix_1: u8, prefix_2: u8, opcode: u8) !void {
+ try self.writer.writeAll(&.{ prefix_1, prefix_2, opcode });
+ }
+
+ /// Encodes a 1 byte opcode with a reg field
+ ///
+ /// Remember to add a REX prefix byte if reg is extended!
+ pub fn opcode_withReg(self: Self, opcode: u8, reg: u3) !void {
+ assert(opcode & 0b111 == 0);
+ try self.writer.writeByte(opcode | reg);
+ }
+
+ // ------
+ // ModR/M
+ // ------
+
+ /// Construct a ModR/M byte given all the fields
+ ///
+ /// Remember to add a REX prefix byte if reg or rm are extended!
+ pub fn modRm(self: Self, mod: u2, reg_or_opx: u3, rm: u3) !void {
+ try self.writer.writeByte(@as(u8, mod) << 6 | @as(u8, reg_or_opx) << 3 | rm);
+ }
+
+ /// Construct a ModR/M byte using direct r/m addressing
+ /// r/m effective address: r/m
+ ///
+ /// Note reg's effective address is always just reg for the ModR/M byte.
+ /// Remember to add a REX prefix byte if reg or rm are extended!
+ pub fn modRm_direct(self: Self, reg_or_opx: u3, rm: u3) !void {
+ try self.modRm(0b11, reg_or_opx, rm);
+ }
+
+ /// Construct a ModR/M byte using indirect r/m addressing
+ /// r/m effective address: [r/m]
+ ///
+ /// Note reg's effective address is always just reg for the ModR/M byte.
+ /// Remember to add a REX prefix byte if reg or rm are extended!
+ pub fn modRm_indirectDisp0(self: Self, reg_or_opx: u3, rm: u3) !void {
+ assert(rm != 4 and rm != 5);
+ try self.modRm(0b00, reg_or_opx, rm);
+ }
+
+ /// Construct a ModR/M byte using indirect SIB addressing
+ /// r/m effective address: [SIB]
+ ///
+ /// Note reg's effective address is always just reg for the ModR/M byte.
+ /// Remember to add a REX prefix byte if reg or rm are extended!
+ pub fn modRm_SIBDisp0(self: Self, reg_or_opx: u3) !void {
+ try self.modRm(0b00, reg_or_opx, 0b100);
+ }
+
+ /// Construct a ModR/M byte using RIP-relative addressing
+ /// r/m effective address: [RIP + disp32]
+ ///
+ /// Note reg's effective address is always just reg for the ModR/M byte.
+ /// Remember to add a REX prefix byte if reg or rm are extended!
+ pub fn modRm_RIPDisp32(self: Self, reg_or_opx: u3) !void {
+ try self.modRm(0b00, reg_or_opx, 0b101);
+ }
+
+ /// Construct a ModR/M byte using indirect r/m with a 8bit displacement
+ /// r/m effective address: [r/m + disp8]
+ ///
+ /// Note reg's effective address is always just reg for the ModR/M byte.
+ /// Remember to add a REX prefix byte if reg or rm are extended!
+ pub fn modRm_indirectDisp8(self: Self, reg_or_opx: u3, rm: u3) !void {
+ assert(rm != 4);
+ try self.modRm(0b01, reg_or_opx, rm);
+ }
+
+ /// Construct a ModR/M byte using indirect SIB with a 8bit displacement
+ /// r/m effective address: [SIB + disp8]
+ ///
+ /// Note reg's effective address is always just reg for the ModR/M byte.
+ /// Remember to add a REX prefix byte if reg or rm are extended!
+ pub fn modRm_SIBDisp8(self: Self, reg_or_opx: u3) !void {
+ try self.modRm(0b01, reg_or_opx, 0b100);
+ }
+
+ /// Construct a ModR/M byte using indirect r/m with a 32bit displacement
+ /// r/m effective address: [r/m + disp32]
+ ///
+ /// Note reg's effective address is always just reg for the ModR/M byte.
+ /// Remember to add a REX prefix byte if reg or rm are extended!
+ pub fn modRm_indirectDisp32(self: Self, reg_or_opx: u3, rm: u3) !void {
+ assert(rm != 4);
+ try self.modRm(0b10, reg_or_opx, rm);
+ }
+
+ /// Construct a ModR/M byte using indirect SIB with a 32bit displacement
+ /// r/m effective address: [SIB + disp32]
+ ///
+ /// Note reg's effective address is always just reg for the ModR/M byte.
+ /// Remember to add a REX prefix byte if reg or rm are extended!
+ pub fn modRm_SIBDisp32(self: Self, reg_or_opx: u3) !void {
+ try self.modRm(0b10, reg_or_opx, 0b100);
+ }
+
+ // ---
+ // SIB
+ // ---
+
+ /// Construct a SIB byte given all the fields
+ ///
+ /// Remember to add a REX prefix byte if index or base are extended!
+ pub fn sib(self: Self, scale: u2, index: u3, base: u3) !void {
+ try self.writer.writeByte(@as(u8, scale) << 6 | @as(u8, index) << 3 | base);
+ }
+
+ /// Construct a SIB byte with scale * index + base, no frills.
+ /// r/m effective address: [base + scale * index]
+ ///
+ /// Remember to add a REX prefix byte if index or base are extended!
+ pub fn sib_scaleIndexBase(self: Self, scale: u2, index: u3, base: u3) !void {
+ assert(base != 5);
+
+ try self.sib(scale, index, base);
+ }
+
+ /// Construct a SIB byte with scale * index + disp32
+ /// r/m effective address: [scale * index + disp32]
+ ///
+ /// Remember to add a REX prefix byte if index or base are extended!
+ pub fn sib_scaleIndexDisp32(self: Self, scale: u2, index: u3) !void {
+ // scale is actually ignored
+ // index = 4 means no index if and only if we haven't extended the register
+ // TODO enforce this
+ // base = 5 means no base, if mod == 0.
+ try self.sib(scale, index, 5);
+ }
+
+ /// Construct a SIB byte with just base
+ /// r/m effective address: [base]
+ ///
+ /// Remember to add a REX prefix byte if index or base are extended!
+ pub fn sib_base(self: Self, base: u3) !void {
+ assert(base != 5);
+
+ // scale is actually ignored
+ // index = 4 means no index
+ try self.sib(0, 4, base);
+ }
+
+ /// Construct a SIB byte with just disp32
+ /// r/m effective address: [disp32]
+ ///
+ /// Remember to add a REX prefix byte if index or base are extended!
+ pub fn sib_disp32(self: Self) !void {
+ // scale is actually ignored
+ // index = 4 means no index
+ // base = 5 means no base, if mod == 0.
+ try self.sib(0, 4, 5);
+ }
+
+ /// Construct a SIB byte with scale * index + base + disp8
+ /// r/m effective address: [base + scale * index + disp8]
+ ///
+ /// Remember to add a REX prefix byte if index or base are extended!
+ pub fn sib_scaleIndexBaseDisp8(self: Self, scale: u2, index: u3, base: u3) !void {
+ try self.sib(scale, index, base);
+ }
+
+ /// Construct a SIB byte with base + disp8, no index
+ /// r/m effective address: [base + disp8]
+ ///
+ /// Remember to add a REX prefix byte if index or base are extended!
+ pub fn sib_baseDisp8(self: Self, base: u3) !void {
+ // scale is ignored
+ // index = 4 means no index
+ try self.sib(0, 4, base);
+ }
+
+ /// Construct a SIB byte with scale * index + base + disp32
+ /// r/m effective address: [base + scale * index + disp32]
+ ///
+ /// Remember to add a REX prefix byte if index or base are extended!
+ pub fn sib_scaleIndexBaseDisp32(self: Self, scale: u2, index: u3, base: u3) !void {
+ try self.sib(scale, index, base);
+ }
+
+ /// Construct a SIB byte with base + disp32, no index
+ /// r/m effective address: [base + disp32]
+ ///
+ /// Remember to add a REX prefix byte if index or base are extended!
+ pub fn sib_baseDisp32(self: Self, base: u3) !void {
+ // scale is ignored
+ // index = 4 means no index
+ try self.sib(0, 4, base);
+ }
+
+ // -------------------------
+ // Trivial (no bit fiddling)
+ // -------------------------
+
+ /// Encode an 8 bit displacement
+ ///
+ /// It is sign-extended to 64 bits by the cpu.
+ pub fn disp8(self: Self, disp: i8) !void {
+ try self.writer.writeByte(@bitCast(u8, disp));
+ }
+
+ /// Encode an 32 bit displacement
+ ///
+ /// It is sign-extended to 64 bits by the cpu.
+ pub fn disp32(self: Self, disp: i32) !void {
+ try self.writer.writeIntLittle(i32, disp);
+ }
+
+ /// Encode an 8 bit immediate
+ ///
+ /// It is sign-extended to 64 bits by the cpu.
+ pub fn imm8(self: Self, imm: u8) !void {
+ try self.writer.writeByte(imm);
+ }
+
+ /// Encode an 16 bit immediate
+ ///
+ /// It is sign-extended to 64 bits by the cpu.
+ pub fn imm16(self: Self, imm: u16) !void {
+ try self.writer.writeIntLittle(u16, imm);
+ }
+
+ /// Encode an 32 bit immediate
+ ///
+ /// It is sign-extended to 64 bits by the cpu.
+ pub fn imm32(self: Self, imm: u32) !void {
+ try self.writer.writeIntLittle(u32, imm);
+ }
+
+ /// Encode an 64 bit immediate
+ ///
+ /// It is sign-extended to 64 bits by the cpu.
+ pub fn imm64(self: Self, imm: u64) !void {
+ try self.writer.writeIntLittle(u64, imm);
+ }
+ };
+}
+
+pub const Rex = struct {
+ w: bool = false,
+ r: bool = false,
+ x: bool = false,
+ b: bool = false,
+ present: bool = false,
+
+ pub fn isSet(rex: Rex) bool {
+ return rex.w or rex.r or rex.x or rex.b;
+ }
+};
+
+// Tests
+fn expectEqualHexStrings(expected: []const u8, given: []const u8, assembly: []const u8) !void {
+ assert(expected.len > 0);
+ if (std.mem.eql(u8, expected, given)) return;
+ const expected_fmt = try std.fmt.allocPrint(testing.allocator, "{x}", .{std.fmt.fmtSliceHexLower(expected)});
+ defer testing.allocator.free(expected_fmt);
+ const given_fmt = try std.fmt.allocPrint(testing.allocator, "{x}", .{std.fmt.fmtSliceHexLower(given)});
+ defer testing.allocator.free(given_fmt);
+ const idx = std.mem.indexOfDiff(u8, expected_fmt, given_fmt).?;
+ var padding = try testing.allocator.alloc(u8, idx + 5);
+ defer testing.allocator.free(padding);
+ std.mem.set(u8, padding, ' ');
+ std.debug.print("\nASM: {s}\nEXP: {s}\nGIV: {s}\n{s}^ -- first differing byte\n", .{
+ assembly,
+ expected_fmt,
+ given_fmt,
+ padding,
+ });
+ return error.TestFailed;
+}
+
+const TestEncode = struct {
+ buffer: [32]u8 = undefined,
+ index: usize = 0,
+
+ fn encode(enc: *TestEncode, mnemonic: Instruction.Mnemonic, args: struct {
+ op1: Instruction.Operand = .none,
+ op2: Instruction.Operand = .none,
+ op3: Instruction.Operand = .none,
+ op4: Instruction.Operand = .none,
+ }) !void {
+ var stream = std.io.fixedBufferStream(&enc.buffer);
+ var count_writer = std.io.countingWriter(stream.writer());
+ const inst = try Instruction.new(mnemonic, .{
+ .op1 = args.op1,
+ .op2 = args.op2,
+ .op3 = args.op3,
+ .op4 = args.op4,
+ });
+ try inst.encode(count_writer.writer());
+ enc.index = count_writer.bytes_written;
+ }
+
+ fn code(enc: TestEncode) []const u8 {
+ return enc.buffer[0..enc.index];
+ }
+};
+
+test "encode" {
+ var buf = std.ArrayList(u8).init(testing.allocator);
+ defer buf.deinit();
+
+ const inst = try Instruction.new(.mov, .{
+ .op1 = .{ .reg = .rbx },
+ .op2 = .{ .imm = Immediate.u(4) },
+ });
+ try inst.encode(buf.writer());
+ try testing.expectEqualSlices(u8, &.{ 0x48, 0xc7, 0xc3, 0x4, 0x0, 0x0, 0x0 }, buf.items);
+}
+
+test "lower I encoding" {
+ var enc = TestEncode{};
+
+ try enc.encode(.push, .{ .op1 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings("\x6A\x10", enc.code(), "push 0x10");
+
+ try enc.encode(.push, .{ .op1 = .{ .imm = Immediate.u(0x1000) } });
+ try expectEqualHexStrings("\x66\x68\x00\x10", enc.code(), "push 0x1000");
+
+ try enc.encode(.push, .{ .op1 = .{ .imm = Immediate.u(0x10000000) } });
+ try expectEqualHexStrings("\x68\x00\x00\x00\x10", enc.code(), "push 0x10000000");
+
+ try enc.encode(.adc, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .imm = Immediate.u(0x10000000) } });
+ try expectEqualHexStrings("\x48\x15\x00\x00\x00\x10", enc.code(), "adc rax, 0x10000000");
+
+ try enc.encode(.add, .{ .op1 = .{ .reg = .al }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings("\x04\x10", enc.code(), "add al, 0x10");
+
+ try enc.encode(.add, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings("\x48\x83\xC0\x10", enc.code(), "add rax, 0x10");
+
+ try enc.encode(.sbb, .{ .op1 = .{ .reg = .ax }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings("\x66\x1D\x10\x00", enc.code(), "sbb ax, 0x10");
+
+ try enc.encode(.xor, .{ .op1 = .{ .reg = .al }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings("\x34\x10", enc.code(), "xor al, 0x10");
+}
+
+test "lower MI encoding" {
+ var enc = TestEncode{};
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .r12 }, .op2 = .{ .imm = Immediate.u(0x1000) } });
+ try expectEqualHexStrings("\x49\xC7\xC4\x00\x10\x00\x00", enc.code(), "mov r12, 0x1000");
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.sib(.byte, .{
+ .base = .r12,
+ .disp = 0,
+ }) }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings("\x41\xC6\x04\x24\x10", enc.code(), "mov BYTE PTR [r12], 0x10");
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .r12 }, .op2 = .{ .imm = Immediate.u(0x1000) } });
+ try expectEqualHexStrings("\x49\xC7\xC4\x00\x10\x00\x00", enc.code(), "mov r12, 0x1000");
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .r12 }, .op2 = .{ .imm = Immediate.u(0x1000) } });
+ try expectEqualHexStrings("\x49\xC7\xC4\x00\x10\x00\x00", enc.code(), "mov r12, 0x1000");
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings("\x48\xc7\xc0\x10\x00\x00\x00", enc.code(), "mov rax, 0x10");
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.sib(.dword, .{
+ .base = .r11,
+ .disp = 0,
+ }) }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings("\x41\xc7\x03\x10\x00\x00\x00", enc.code(), "mov DWORD PTR [r11], 0x10");
+
+ try enc.encode(.mov, .{
+ .op1 = .{ .mem = Memory.rip(.qword, 0x10) },
+ .op2 = .{ .imm = Immediate.u(0x10) },
+ });
+ try expectEqualHexStrings(
+ "\x48\xC7\x05\x10\x00\x00\x00\x10\x00\x00\x00",
+ enc.code(),
+ "mov QWORD PTR [rip + 0x10], 0x10",
+ );
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
+ .base = .rbp,
+ .disp = -8,
+ }) }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings("\x48\xc7\x45\xf8\x10\x00\x00\x00", enc.code(), "mov QWORD PTR [rbp - 8], 0x10");
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.sib(.word, .{
+ .base = .rbp,
+ .disp = -2,
+ }) }, .op2 = .{ .imm = Immediate.s(-16) } });
+ try expectEqualHexStrings("\x66\xC7\x45\xFE\xF0\xFF", enc.code(), "mov WORD PTR [rbp - 2], -16");
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.sib(.byte, .{
+ .base = .rbp,
+ .disp = -1,
+ }) }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings("\xC6\x45\xFF\x10", enc.code(), "mov BYTE PTR [rbp - 1], 0x10");
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
+ .base = .ds,
+ .disp = 0x10000000,
+ .scale_index = .{
+ .scale = 2,
+ .index = .rcx,
+ },
+ }) }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings(
+ "\x48\xC7\x04\x4D\x00\x00\x00\x10\x10\x00\x00\x00",
+ enc.code(),
+ "mov QWORD PTR [rcx*2 + 0x10000000], 0x10",
+ );
+
+ try enc.encode(.adc, .{ .op1 = .{ .mem = Memory.sib(.byte, .{
+ .base = .rbp,
+ .disp = -0x10,
+ }) }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings("\x80\x55\xF0\x10", enc.code(), "adc BYTE PTR [rbp - 0x10], 0x10");
+
+ try enc.encode(.adc, .{ .op1 = .{ .mem = Memory.rip(.qword, 0) }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings("\x48\x83\x15\x00\x00\x00\x00\x10", enc.code(), "adc QWORD PTR [rip], 0x10");
+
+ try enc.encode(.adc, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings("\x48\x83\xD0\x10", enc.code(), "adc rax, 0x10");
+
+ try enc.encode(.add, .{ .op1 = .{ .mem = Memory.sib(.dword, .{
+ .base = .rdx,
+ .disp = -8,
+ }) }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings("\x83\x42\xF8\x10", enc.code(), "add DWORD PTR [rdx - 8], 0x10");
+
+ try enc.encode(.add, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings("\x48\x83\xC0\x10", enc.code(), "add rax, 0x10");
+
+ try enc.encode(.add, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
+ .base = .rbp,
+ .disp = -0x10,
+ }) }, .op2 = .{ .imm = Immediate.s(-0x10) } });
+ try expectEqualHexStrings("\x48\x83\x45\xF0\xF0", enc.code(), "add QWORD PTR [rbp - 0x10], -0x10");
+
+ try enc.encode(.@"and", .{ .op1 = .{ .mem = Memory.sib(.dword, .{
+ .base = .ds,
+ .disp = 0x10000000,
+ }) }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings(
+ "\x83\x24\x25\x00\x00\x00\x10\x10",
+ enc.code(),
+ "and DWORD PTR ds:0x10000000, 0x10",
+ );
+
+ try enc.encode(.@"and", .{ .op1 = .{ .mem = Memory.sib(.dword, .{
+ .base = .es,
+ .disp = 0x10000000,
+ }) }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings(
+ "\x26\x83\x24\x25\x00\x00\x00\x10\x10",
+ enc.code(),
+ "and DWORD PTR es:0x10000000, 0x10",
+ );
+
+ try enc.encode(.@"and", .{ .op1 = .{ .mem = Memory.sib(.dword, .{
+ .base = .r12,
+ .disp = 0x10000000,
+ }) }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings(
+ "\x41\x83\xA4\x24\x00\x00\x00\x10\x10",
+ enc.code(),
+ "and DWORD PTR [r12 + 0x10000000], 0x10",
+ );
+
+ try enc.encode(.sub, .{ .op1 = .{ .mem = Memory.sib(.dword, .{
+ .base = .r11,
+ .disp = 0x10000000,
+ }) }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings(
+ "\x41\x83\xAB\x00\x00\x00\x10\x10",
+ enc.code(),
+ "sub DWORD PTR [r11 + 0x10000000], 0x10",
+ );
+}
+
+test "lower RM encoding" {
+ var enc = TestEncode{};
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .mem = Memory.sib(.qword, .{
+ .base = .r11,
+ .disp = 0,
+ }) } });
+ try expectEqualHexStrings("\x49\x8b\x03", enc.code(), "mov rax, QWORD PTR [r11]");
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .rbx }, .op2 = .{ .mem = Memory.sib(.qword, .{
+ .base = .ds,
+ .disp = 0x10,
+ }) } });
+ try expectEqualHexStrings("\x48\x8B\x1C\x25\x10\x00\x00\x00", enc.code(), "mov rbx, QWORD PTR ds:0x10");
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .mem = Memory.sib(.qword, .{
+ .base = .rbp,
+ .disp = -4,
+ }) } });
+ try expectEqualHexStrings("\x48\x8B\x45\xFC", enc.code(), "mov rax, QWORD PTR [rbp - 4]");
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .mem = Memory.sib(.qword, .{
+ .base = .rbp,
+ .scale_index = .{
+ .scale = 1,
+ .index = .rcx,
+ },
+ .disp = -8,
+ }) } });
+ try expectEqualHexStrings("\x48\x8B\x44\x0D\xF8", enc.code(), "mov rax, QWORD PTR [rbp + rcx*1 - 8]");
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .eax }, .op2 = .{ .mem = Memory.sib(.dword, .{
+ .base = .rbp,
+ .scale_index = .{
+ .scale = 4,
+ .index = .rdx,
+ },
+ .disp = -4,
+ }) } });
+ try expectEqualHexStrings("\x8B\x44\x95\xFC", enc.code(), "mov eax, dword ptr [rbp + rdx*4 - 4]");
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .mem = Memory.sib(.qword, .{
+ .base = .rbp,
+ .scale_index = .{
+ .scale = 8,
+ .index = .rcx,
+ },
+ .disp = -8,
+ }) } });
+ try expectEqualHexStrings("\x48\x8B\x44\xCD\xF8", enc.code(), "mov rax, QWORD PTR [rbp + rcx*8 - 8]");
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .r8b }, .op2 = .{ .mem = Memory.sib(.byte, .{
+ .base = .rsi,
+ .scale_index = .{
+ .scale = 1,
+ .index = .rcx,
+ },
+ .disp = -24,
+ }) } });
+ try expectEqualHexStrings("\x44\x8A\x44\x0E\xE8", enc.code(), "mov r8b, BYTE PTR [rsi + rcx*1 - 24]");
+
+ // TODO this mnemonic needs cleanup as some prefixes are obsolete.
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .reg = .cs } });
+ try expectEqualHexStrings("\x48\x8C\xC8", enc.code(), "mov rax, cs");
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
+ .base = .rbp,
+ .disp = -16,
+ }) }, .op2 = .{ .reg = .fs } });
+ try expectEqualHexStrings("\x48\x8C\x65\xF0", enc.code(), "mov QWORD PTR [rbp - 16], fs");
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .r12w }, .op2 = .{ .reg = .cs } });
+ try expectEqualHexStrings("\x66\x41\x8C\xCC", enc.code(), "mov r12w, cs");
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.sib(.word, .{
+ .base = .rbp,
+ .disp = -16,
+ }) }, .op2 = .{ .reg = .fs } });
+ try expectEqualHexStrings("\x66\x8C\x65\xF0", enc.code(), "mov WORD PTR [rbp - 16], fs");
+
+ try enc.encode(.movsx, .{ .op1 = .{ .reg = .eax }, .op2 = .{ .reg = .bx } });
+ try expectEqualHexStrings("\x0F\xBF\xC3", enc.code(), "movsx eax, bx");
+
+ try enc.encode(.movsx, .{ .op1 = .{ .reg = .eax }, .op2 = .{ .reg = .bl } });
+ try expectEqualHexStrings("\x0F\xBE\xC3", enc.code(), "movsx eax, bl");
+
+ try enc.encode(.movsx, .{ .op1 = .{ .reg = .ax }, .op2 = .{ .reg = .bl } });
+ try expectEqualHexStrings("\x66\x0F\xBE\xC3", enc.code(), "movsx ax, bl");
+
+ try enc.encode(.movsx, .{ .op1 = .{ .reg = .eax }, .op2 = .{ .mem = Memory.sib(.word, .{
+ .base = .rbp,
+ .disp = 0,
+ }) } });
+ try expectEqualHexStrings("\x0F\xBF\x45\x00", enc.code(), "movsx eax, BYTE PTR [rbp]");
+
+ try enc.encode(.movsx, .{ .op1 = .{ .reg = .eax }, .op2 = .{ .mem = Memory.sib(.byte, .{
+ .base = null,
+ .scale_index = .{
+ .index = .rax,
+ .scale = 2,
+ },
+ .disp = 0,
+ }) } });
+ try expectEqualHexStrings("\x0F\xBE\x04\x45\x00\x00\x00\x00", enc.code(), "movsx eax, BYTE PTR [rax * 2]");
+
+ try enc.encode(.movsx, .{ .op1 = .{ .reg = .ax }, .op2 = .{ .mem = Memory.rip(.byte, 0x10) } });
+ try expectEqualHexStrings("\x66\x0F\xBE\x05\x10\x00\x00\x00", enc.code(), "movsx ax, BYTE PTR [rip + 0x10]");
+
+ try enc.encode(.movsx, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .reg = .bx } });
+ try expectEqualHexStrings("\x48\x0F\xBF\xC3", enc.code(), "movsx rax, bx");
+
+ try enc.encode(.movsxd, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .reg = .ebx } });
+ try expectEqualHexStrings("\x48\x63\xC3", enc.code(), "movsxd rax, ebx");
+
+ try enc.encode(.lea, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .mem = Memory.rip(.qword, 0x10) } });
+ try expectEqualHexStrings("\x48\x8D\x05\x10\x00\x00\x00", enc.code(), "lea rax, QWORD PTR [rip + 0x10]");
+
+ try enc.encode(.lea, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .mem = Memory.rip(.dword, 0x10) } });
+ try expectEqualHexStrings("\x48\x8D\x05\x10\x00\x00\x00", enc.code(), "lea rax, DWORD PTR [rip + 0x10]");
+
+ try enc.encode(.lea, .{ .op1 = .{ .reg = .eax }, .op2 = .{ .mem = Memory.rip(.dword, 0x10) } });
+ try expectEqualHexStrings("\x8D\x05\x10\x00\x00\x00", enc.code(), "lea eax, DWORD PTR [rip + 0x10]");
+
+ try enc.encode(.lea, .{ .op1 = .{ .reg = .eax }, .op2 = .{ .mem = Memory.rip(.word, 0x10) } });
+ try expectEqualHexStrings("\x8D\x05\x10\x00\x00\x00", enc.code(), "lea eax, WORD PTR [rip + 0x10]");
+
+ try enc.encode(.lea, .{ .op1 = .{ .reg = .ax }, .op2 = .{ .mem = Memory.rip(.byte, 0x10) } });
+ try expectEqualHexStrings("\x66\x8D\x05\x10\x00\x00\x00", enc.code(), "lea ax, BYTE PTR [rip + 0x10]");
+
+ try enc.encode(.lea, .{ .op1 = .{ .reg = .rsi }, .op2 = .{ .mem = Memory.sib(.qword, .{
+ .base = .rbp,
+ .scale_index = .{
+ .scale = 1,
+ .index = .rcx,
+ },
+ .disp = 0,
+ }) } });
+ try expectEqualHexStrings("\x48\x8D\x74\x0D\x00", enc.code(), "lea rsi, QWORD PTR [rbp + rcx*1 + 0]");
+
+ try enc.encode(.add, .{ .op1 = .{ .reg = .r11 }, .op2 = .{ .mem = Memory.sib(.qword, .{
+ .base = .ds,
+ .disp = 0x10000000,
+ }) } });
+ try expectEqualHexStrings("\x4C\x03\x1C\x25\x00\x00\x00\x10", enc.code(), "add r11, QWORD PTR ds:0x10000000");
+
+ try enc.encode(.add, .{ .op1 = .{ .reg = .r12b }, .op2 = .{ .mem = Memory.sib(.byte, .{
+ .base = .ds,
+ .disp = 0x10000000,
+ }) } });
+ try expectEqualHexStrings("\x44\x02\x24\x25\x00\x00\x00\x10", enc.code(), "add r11b, BYTE PTR ds:0x10000000");
+
+ try enc.encode(.add, .{ .op1 = .{ .reg = .r12b }, .op2 = .{ .mem = Memory.sib(.byte, .{
+ .base = .fs,
+ .disp = 0x10000000,
+ }) } });
+ try expectEqualHexStrings("\x64\x44\x02\x24\x25\x00\x00\x00\x10", enc.code(), "add r11b, BYTE PTR fs:0x10000000");
+
+ try enc.encode(.sub, .{ .op1 = .{ .reg = .r11 }, .op2 = .{ .mem = Memory.sib(.qword, .{
+ .base = .r13,
+ .disp = 0x10000000,
+ }) } });
+ try expectEqualHexStrings("\x4D\x2B\x9D\x00\x00\x00\x10", enc.code(), "sub r11, QWORD PTR [r13 + 0x10000000]");
+
+ try enc.encode(.sub, .{ .op1 = .{ .reg = .r11 }, .op2 = .{ .mem = Memory.sib(.qword, .{
+ .base = .r12,
+ .disp = 0x10000000,
+ }) } });
+ try expectEqualHexStrings("\x4D\x2B\x9C\x24\x00\x00\x00\x10", enc.code(), "sub r11, QWORD PTR [r12 + 0x10000000]");
+
+ try enc.encode(.imul, .{ .op1 = .{ .reg = .r11 }, .op2 = .{ .reg = .r12 } });
+ try expectEqualHexStrings("\x4D\x0F\xAF\xDC", enc.code(), "mov r11, r12");
+}
+
+test "lower RMI encoding" {
+ var enc = TestEncode{};
+
+ try enc.encode(.imul, .{
+ .op1 = .{ .reg = .r11 },
+ .op2 = .{ .reg = .r12 },
+ .op3 = .{ .imm = Immediate.s(-2) },
+ });
+ try expectEqualHexStrings("\x4D\x6B\xDC\xFE", enc.code(), "imul r11, r12, -2");
+
+ try enc.encode(.imul, .{
+ .op1 = .{ .reg = .r11 },
+ .op2 = .{ .mem = Memory.rip(.qword, -16) },
+ .op3 = .{ .imm = Immediate.s(-1024) },
+ });
+ try expectEqualHexStrings(
+ "\x4C\x69\x1D\xF0\xFF\xFF\xFF\x00\xFC\xFF\xFF",
+ enc.code(),
+ "imul r11, QWORD PTR [rip - 16], -1024",
+ );
+
+ try enc.encode(.imul, .{
+ .op1 = .{ .reg = .bx },
+ .op2 = .{ .mem = Memory.sib(.word, .{
+ .base = .rbp,
+ .disp = -16,
+ }) },
+ .op3 = .{ .imm = Immediate.s(-1024) },
+ });
+ try expectEqualHexStrings(
+ "\x66\x69\x5D\xF0\x00\xFC",
+ enc.code(),
+ "imul bx, WORD PTR [rbp - 16], -1024",
+ );
+
+ try enc.encode(.imul, .{
+ .op1 = .{ .reg = .bx },
+ .op2 = .{ .mem = Memory.sib(.word, .{
+ .base = .rbp,
+ .disp = -16,
+ }) },
+ .op3 = .{ .imm = Immediate.u(1024) },
+ });
+ try expectEqualHexStrings(
+ "\x66\x69\x5D\xF0\x00\x04",
+ enc.code(),
+ "imul bx, WORD PTR [rbp - 16], 1024",
+ );
+}
+
+test "lower MR encoding" {
+ var enc = TestEncode{};
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .reg = .rbx } });
+ try expectEqualHexStrings("\x48\x89\xD8", enc.code(), "mov rax, rbx");
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
+ .base = .rbp,
+ .disp = -4,
+ }) }, .op2 = .{ .reg = .r11 } });
+ try expectEqualHexStrings("\x4c\x89\x5d\xfc", enc.code(), "mov QWORD PTR [rbp - 4], r11");
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.rip(.qword, 0x10) }, .op2 = .{ .reg = .r12 } });
+ try expectEqualHexStrings("\x4C\x89\x25\x10\x00\x00\x00", enc.code(), "mov QWORD PTR [rip + 0x10], r12");
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
+ .base = .r11,
+ .scale_index = .{
+ .scale = 2,
+ .index = .r12,
+ },
+ .disp = 0x10,
+ }) }, .op2 = .{ .reg = .r13 } });
+ try expectEqualHexStrings("\x4F\x89\x6C\x63\x10", enc.code(), "mov QWORD PTR [r11 + 2 * r12 + 0x10], r13");
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.rip(.word, -0x10) }, .op2 = .{ .reg = .r12w } });
+ try expectEqualHexStrings("\x66\x44\x89\x25\xF0\xFF\xFF\xFF", enc.code(), "mov WORD PTR [rip - 0x10], r12w");
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.sib(.byte, .{
+ .base = .r11,
+ .scale_index = .{
+ .scale = 2,
+ .index = .r12,
+ },
+ .disp = 0x10,
+ }) }, .op2 = .{ .reg = .r13b } });
+ try expectEqualHexStrings("\x47\x88\x6C\x63\x10", enc.code(), "mov BYTE PTR [r11 + 2 * r12 + 0x10], r13b");
+
+ try enc.encode(.add, .{ .op1 = .{ .mem = Memory.sib(.byte, .{
+ .base = .ds,
+ .disp = 0x10000000,
+ }) }, .op2 = .{ .reg = .r12b } });
+ try expectEqualHexStrings("\x44\x00\x24\x25\x00\x00\x00\x10", enc.code(), "add BYTE PTR ds:0x10000000, r12b");
+
+ try enc.encode(.add, .{ .op1 = .{ .mem = Memory.sib(.dword, .{
+ .base = .ds,
+ .disp = 0x10000000,
+ }) }, .op2 = .{ .reg = .r12d } });
+ try expectEqualHexStrings("\x44\x01\x24\x25\x00\x00\x00\x10", enc.code(), "add DWORD PTR [ds:0x10000000], r12d");
+
+ try enc.encode(.add, .{ .op1 = .{ .mem = Memory.sib(.dword, .{
+ .base = .gs,
+ .disp = 0x10000000,
+ }) }, .op2 = .{ .reg = .r12d } });
+ try expectEqualHexStrings("\x65\x44\x01\x24\x25\x00\x00\x00\x10", enc.code(), "add DWORD PTR [gs:0x10000000], r12d");
+
+ try enc.encode(.sub, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
+ .base = .r11,
+ .disp = 0x10000000,
+ }) }, .op2 = .{ .reg = .r12 } });
+ try expectEqualHexStrings("\x4D\x29\xA3\x00\x00\x00\x10", enc.code(), "sub QWORD PTR [r11 + 0x10000000], r12");
+}
+
+test "lower M encoding" {
+ var enc = TestEncode{};
+
+ try enc.encode(.call, .{ .op1 = .{ .reg = .r12 } });
+ try expectEqualHexStrings("\x41\xFF\xD4", enc.code(), "call r12");
+
+ try enc.encode(.call, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
+ .base = .r12,
+ .disp = 0,
+ }) } });
+ try expectEqualHexStrings("\x41\xFF\x14\x24", enc.code(), "call QWORD PTR [r12]");
+
+ try enc.encode(.call, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
+ .base = null,
+ .scale_index = .{
+ .index = .r11,
+ .scale = 2,
+ },
+ .disp = 0,
+ }) } });
+ try expectEqualHexStrings("\x42\xFF\x14\x5D\x00\x00\x00\x00", enc.code(), "call QWORD PTR [r11 * 2]");
+
+ try enc.encode(.call, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
+ .base = null,
+ .scale_index = .{
+ .index = .r12,
+ .scale = 2,
+ },
+ .disp = 0,
+ }) } });
+ try expectEqualHexStrings("\x42\xFF\x14\x65\x00\x00\x00\x00", enc.code(), "call QWORD PTR [r12 * 2]");
+
+ try enc.encode(.call, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
+ .base = .gs,
+ .disp = 0,
+ }) } });
+ try expectEqualHexStrings("\x65\xFF\x14\x25\x00\x00\x00\x00", enc.code(), "call gs:0x0");
+
+ try enc.encode(.call, .{ .op1 = .{ .imm = Immediate.s(0) } });
+ try expectEqualHexStrings("\xE8\x00\x00\x00\x00", enc.code(), "call 0x0");
+
+ try enc.encode(.push, .{ .op1 = .{ .mem = Memory.sib(.qword, .{
+ .base = .rbp,
+ .disp = 0,
+ }) } });
+ try expectEqualHexStrings("\xFF\x75\x00", enc.code(), "push QWORD PTR [rbp]");
+
+ try enc.encode(.push, .{ .op1 = .{ .mem = Memory.sib(.word, .{
+ .base = .rbp,
+ .disp = 0,
+ }) } });
+ try expectEqualHexStrings("\x66\xFF\x75\x00", enc.code(), "push QWORD PTR [rbp]");
+
+ try enc.encode(.pop, .{ .op1 = .{ .mem = Memory.rip(.qword, 0) } });
+ try expectEqualHexStrings("\x8F\x05\x00\x00\x00\x00", enc.code(), "pop QWORD PTR [rip]");
+
+ try enc.encode(.pop, .{ .op1 = .{ .mem = Memory.rip(.word, 0) } });
+ try expectEqualHexStrings("\x66\x8F\x05\x00\x00\x00\x00", enc.code(), "pop WORD PTR [rbp]");
+
+ try enc.encode(.imul, .{ .op1 = .{ .reg = .rax } });
+ try expectEqualHexStrings("\x48\xF7\xE8", enc.code(), "imul rax");
+
+ try enc.encode(.imul, .{ .op1 = .{ .reg = .r12 } });
+ try expectEqualHexStrings("\x49\xF7\xEC", enc.code(), "imul r12");
+}
+
+test "lower O encoding" {
+ var enc = TestEncode{};
+
+ try enc.encode(.push, .{ .op1 = .{ .reg = .rax } });
+ try expectEqualHexStrings("\x50", enc.code(), "push rax");
+
+ try enc.encode(.push, .{ .op1 = .{ .reg = .r12w } });
+ try expectEqualHexStrings("\x66\x41\x54", enc.code(), "push r12w");
+
+ try enc.encode(.pop, .{ .op1 = .{ .reg = .r12 } });
+ try expectEqualHexStrings("\x41\x5c", enc.code(), "pop r12");
+}
+
+test "lower OI encoding" {
+ var enc = TestEncode{};
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .imm = Immediate.u(0x1000000000000000) } });
+ try expectEqualHexStrings(
+ "\x48\xB8\x00\x00\x00\x00\x00\x00\x00\x10",
+ enc.code(),
+ "movabs rax, 0x1000000000000000",
+ );
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .r11 }, .op2 = .{ .imm = Immediate.u(0x1000000000000000) } });
+ try expectEqualHexStrings(
+ "\x49\xBB\x00\x00\x00\x00\x00\x00\x00\x10",
+ enc.code(),
+ "movabs r11, 0x1000000000000000",
+ );
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .r11d }, .op2 = .{ .imm = Immediate.u(0x10000000) } });
+ try expectEqualHexStrings("\x41\xBB\x00\x00\x00\x10", enc.code(), "mov r11d, 0x10000000");
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .r11w }, .op2 = .{ .imm = Immediate.u(0x1000) } });
+ try expectEqualHexStrings("\x66\x41\xBB\x00\x10", enc.code(), "mov r11w, 0x1000");
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .r11b }, .op2 = .{ .imm = Immediate.u(0x10) } });
+ try expectEqualHexStrings("\x41\xB3\x10", enc.code(), "mov r11b, 0x10");
+}
+
+test "lower FD/TD encoding" {
+ var enc = TestEncode{};
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .mem = Memory.moffs(.cs, 0x10) } });
+ try expectEqualHexStrings("\x2E\x48\xA1\x10\x00\x00\x00\x00\x00\x00\x00", enc.code(), "movabs rax, cs:0x10");
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .eax }, .op2 = .{ .mem = Memory.moffs(.fs, 0x10) } });
+ try expectEqualHexStrings("\x64\xA1\x10\x00\x00\x00\x00\x00\x00\x00", enc.code(), "movabs eax, fs:0x10");
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .ax }, .op2 = .{ .mem = Memory.moffs(.gs, 0x10) } });
+ try expectEqualHexStrings("\x65\x66\xA1\x10\x00\x00\x00\x00\x00\x00\x00", enc.code(), "movabs ax, gs:0x10");
+
+ try enc.encode(.mov, .{ .op1 = .{ .reg = .al }, .op2 = .{ .mem = Memory.moffs(.ds, 0x10) } });
+ try expectEqualHexStrings("\xA0\x10\x00\x00\x00\x00\x00\x00\x00", enc.code(), "movabs al, ds:0x10");
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.moffs(.cs, 0x10) }, .op2 = .{ .reg = .rax } });
+ try expectEqualHexStrings("\x2E\x48\xA3\x10\x00\x00\x00\x00\x00\x00\x00", enc.code(), "movabs cs:0x10, rax");
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.moffs(.fs, 0x10) }, .op2 = .{ .reg = .eax } });
+ try expectEqualHexStrings("\x64\xA3\x10\x00\x00\x00\x00\x00\x00\x00", enc.code(), "movabs fs:0x10, eax");
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.moffs(.gs, 0x10) }, .op2 = .{ .reg = .ax } });
+ try expectEqualHexStrings("\x65\x66\xA3\x10\x00\x00\x00\x00\x00\x00\x00", enc.code(), "movabs gs:0x10, ax");
+
+ try enc.encode(.mov, .{ .op1 = .{ .mem = Memory.moffs(.ds, 0x10) }, .op2 = .{ .reg = .al } });
+ try expectEqualHexStrings("\xA2\x10\x00\x00\x00\x00\x00\x00\x00", enc.code(), "movabs ds:0x10, al");
+}
+
+test "lower NP encoding" {
+ var enc = TestEncode{};
+
+ try enc.encode(.int3, .{});
+ try expectEqualHexStrings("\xCC", enc.code(), "int3");
+
+ try enc.encode(.nop, .{});
+ try expectEqualHexStrings("\x90", enc.code(), "nop");
+
+ try enc.encode(.ret, .{});
+ try expectEqualHexStrings("\xC3", enc.code(), "ret");
+
+ try enc.encode(.syscall, .{});
+ try expectEqualHexStrings("\x0f\x05", enc.code(), "syscall");
+}
+
+fn invalidInstruction(mnemonic: Instruction.Mnemonic, args: struct {
+ op1: Instruction.Operand = .none,
+ op2: Instruction.Operand = .none,
+ op3: Instruction.Operand = .none,
+ op4: Instruction.Operand = .none,
+}) !void {
+ const err = Instruction.new(mnemonic, .{
+ .op1 = args.op1,
+ .op2 = args.op2,
+ .op3 = args.op3,
+ .op4 = args.op4,
+ });
+ try testing.expectError(error.InvalidInstruction, err);
+}
+
+test "invalid instruction" {
+ try invalidInstruction(.call, .{ .op1 = .{ .reg = .eax } });
+ try invalidInstruction(.call, .{ .op1 = .{ .reg = .ax } });
+ try invalidInstruction(.call, .{ .op1 = .{ .reg = .al } });
+ try invalidInstruction(.call, .{ .op1 = .{ .mem = Memory.rip(.dword, 0) } });
+ try invalidInstruction(.call, .{ .op1 = .{ .mem = Memory.rip(.word, 0) } });
+ try invalidInstruction(.call, .{ .op1 = .{ .mem = Memory.rip(.byte, 0) } });
+ try invalidInstruction(.mov, .{ .op1 = .{ .mem = Memory.rip(.word, 0x10) }, .op2 = .{ .reg = .r12 } });
+ try invalidInstruction(.lea, .{ .op1 = .{ .reg = .rax }, .op2 = .{ .reg = .rbx } });
+ try invalidInstruction(.lea, .{ .op1 = .{ .reg = .al }, .op2 = .{ .mem = Memory.rip(.byte, 0) } });
+ try invalidInstruction(.pop, .{ .op1 = .{ .reg = .r12b } });
+ try invalidInstruction(.pop, .{ .op1 = .{ .reg = .r12d } });
+ try invalidInstruction(.push, .{ .op1 = .{ .reg = .r12b } });
+ try invalidInstruction(.push, .{ .op1 = .{ .reg = .r12d } });
+ try invalidInstruction(.push, .{ .op1 = .{ .imm = Immediate.u(0x1000000000000000) } });
+}
+
+fn cannotEncode(mnemonic: Instruction.Mnemonic, args: struct {
+ op1: Instruction.Operand = .none,
+ op2: Instruction.Operand = .none,
+ op3: Instruction.Operand = .none,
+ op4: Instruction.Operand = .none,
+}) !void {
+ try testing.expectError(error.CannotEncode, Instruction.new(mnemonic, .{
+ .op1 = args.op1,
+ .op2 = args.op2,
+ .op3 = args.op3,
+ .op4 = args.op4,
+ }));
+}
+
+test "cannot encode" {
+ try cannotEncode(.@"test", .{
+ .op1 = .{ .mem = Memory.sib(.byte, .{ .base = .r12, .disp = 0 }) },
+ .op2 = .{ .reg = .ah },
+ });
+ try cannotEncode(.@"test", .{
+ .op1 = .{ .reg = .r11b },
+ .op2 = .{ .reg = .bh },
+ });
+ try cannotEncode(.mov, .{
+ .op1 = .{ .reg = .sil },
+ .op2 = .{ .reg = .ah },
+ });
+}
+
+const Assembler = struct {
+ it: Tokenizer,
+
+ const Tokenizer = struct {
+ input: []const u8,
+ pos: usize = 0,
+
+ const Error = error{InvalidToken};
+
+ const Token = struct {
+ id: Id,
+ start: usize,
+ end: usize,
+
+ const Id = enum {
+ eof,
+
+ space,
+ new_line,
+
+ colon,
+ comma,
+ open_br,
+ close_br,
+ plus,
+ minus,
+ star,
+
+ string,
+ numeral,
+ };
+ };
+
+ const Iterator = struct {};
+
+ fn next(it: *Tokenizer) !Token {
+ var result = Token{
+ .id = .eof,
+ .start = it.pos,
+ .end = it.pos,
+ };
+
+ var state: enum {
+ start,
+ space,
+ new_line,
+ string,
+ numeral,
+ numeral_hex,
+ } = .start;
+
+ while (it.pos < it.input.len) : (it.pos += 1) {
+ const ch = it.input[it.pos];
+ switch (state) {
+ .start => switch (ch) {
+ ',' => {
+ result.id = .comma;
+ it.pos += 1;
+ break;
+ },
+ ':' => {
+ result.id = .colon;
+ it.pos += 1;
+ break;
+ },
+ '[' => {
+ result.id = .open_br;
+ it.pos += 1;
+ break;
+ },
+ ']' => {
+ result.id = .close_br;
+ it.pos += 1;
+ break;
+ },
+ '+' => {
+ result.id = .plus;
+ it.pos += 1;
+ break;
+ },
+ '-' => {
+ result.id = .minus;
+ it.pos += 1;
+ break;
+ },
+ '*' => {
+ result.id = .star;
+ it.pos += 1;
+ break;
+ },
+ ' ', '\t' => state = .space,
+ '\n', '\r' => state = .new_line,
+ 'a'...'z', 'A'...'Z' => state = .string,
+ '0'...'9' => state = .numeral,
+ else => return error.InvalidToken,
+ },
+
+ .space => switch (ch) {
+ ' ', '\t' => {},
+ else => {
+ result.id = .space;
+ break;
+ },
+ },
+
+ .new_line => switch (ch) {
+ '\n', '\r', ' ', '\t' => {},
+ else => {
+ result.id = .new_line;
+ break;
+ },
+ },
+
+ .string => switch (ch) {
+ 'a'...'z', 'A'...'Z', '0'...'9' => {},
+ else => {
+ result.id = .string;
+ break;
+ },
+ },
+
+ .numeral => switch (ch) {
+ 'x' => state = .numeral_hex,
+ '0'...'9' => {},
+ else => {
+ result.id = .numeral;
+ break;
+ },
+ },
+
+ .numeral_hex => switch (ch) {
+ 'a'...'f' => {},
+ '0'...'9' => {},
+ else => {
+ result.id = .numeral;
+ break;
+ },
+ },
+ }
+ }
+
+ if (it.pos >= it.input.len) {
+ switch (state) {
+ .string => result.id = .string,
+ .numeral, .numeral_hex => result.id = .numeral,
+ else => {},
+ }
+ }
+
+ result.end = it.pos;
+ return result;
+ }
+
+ fn seekTo(it: *Tokenizer, pos: usize) void {
+ it.pos = pos;
+ }
+ };
+
+ pub fn init(input: []const u8) Assembler {
+ return .{
+ .it = Tokenizer{ .input = input },
+ };
+ }
+
+ pub fn assemble(as: *Assembler, writer: anytype) !void {
+ while (try as.next()) |parsed_inst| {
+ const inst = try Instruction.new(parsed_inst.mnemonic, .{
+ .op1 = parsed_inst.ops[0],
+ .op2 = parsed_inst.ops[1],
+ .op3 = parsed_inst.ops[2],
+ .op4 = parsed_inst.ops[3],
+ });
+ try inst.encode(writer);
+ }
+ }
+
+ const ParseResult = struct {
+ mnemonic: Instruction.Mnemonic,
+ ops: [4]Instruction.Operand,
+ };
+
+ const ParseError = error{
+ UnexpectedToken,
+ InvalidMnemonic,
+ InvalidOperand,
+ InvalidRegister,
+ InvalidPtrSize,
+ InvalidMemoryOperand,
+ InvalidScaleIndex,
+ } || Tokenizer.Error || std.fmt.ParseIntError;
+
+ fn next(as: *Assembler) ParseError!?ParseResult {
+ try as.skip(2, .{ .space, .new_line });
+ const mnemonic_tok = as.expect(.string) catch |err| switch (err) {
+ error.UnexpectedToken => return if (try as.peek() == .eof) null else err,
+ else => return err,
+ };
+ const mnemonic = mnemonicFromString(as.source(mnemonic_tok)) orelse
+ return error.InvalidMnemonic;
+ try as.skip(1, .{.space});
+
+ const rules = .{
+ .{},
+ .{.register},
+ .{.memory},
+ .{.immediate},
+ .{ .register, .register },
+ .{ .register, .memory },
+ .{ .memory, .register },
+ .{ .register, .immediate },
+ .{ .memory, .immediate },
+ .{ .register, .register, .immediate },
+ .{ .register, .memory, .immediate },
+ };
+
+ const pos = as.it.pos;
+ inline for (rules) |rule| {
+ var ops = [4]Instruction.Operand{ .none, .none, .none, .none };
+ if (as.parseOperandRule(rule, &ops)) {
+ return .{
+ .mnemonic = mnemonic,
+ .ops = ops,
+ };
+ } else |_| {
+ as.it.seekTo(pos);
+ }
+ }
+
+ return error.InvalidOperand;
+ }
+
+ fn source(as: *Assembler, token: Tokenizer.Token) []const u8 {
+ return as.it.input[token.start..token.end];
+ }
+
+ fn peek(as: *Assembler) Tokenizer.Error!Tokenizer.Token.Id {
+ const pos = as.it.pos;
+ const next_tok = try as.it.next();
+ const id = next_tok.id;
+ as.it.seekTo(pos);
+ return id;
+ }
+
+ fn expect(as: *Assembler, id: Tokenizer.Token.Id) ParseError!Tokenizer.Token {
+ const next_tok_id = try as.peek();
+ if (next_tok_id == id) return as.it.next();
+ return error.UnexpectedToken;
+ }
+
+ fn skip(as: *Assembler, comptime num: comptime_int, tok_ids: [num]Tokenizer.Token.Id) Tokenizer.Error!void {
+ outer: while (true) {
+ const pos = as.it.pos;
+ const next_tok = try as.it.next();
+ inline for (tok_ids) |tok_id| {
+ if (next_tok.id == tok_id) continue :outer;
+ }
+ as.it.seekTo(pos);
+ break;
+ }
+ }
+
+ fn mnemonicFromString(bytes: []const u8) ?Instruction.Mnemonic {
+ const ti = @typeInfo(Instruction.Mnemonic).Enum;
+ inline for (ti.fields) |field| {
+ if (std.mem.eql(u8, bytes, field.name)) {
+ return @field(Instruction.Mnemonic, field.name);
+ }
+ }
+ return null;
+ }
+
+ fn parseOperandRule(as: *Assembler, rule: anytype, ops: *[4]Instruction.Operand) ParseError!void {
+ inline for (rule, 0..) |cond, i| {
+ comptime assert(i < 4);
+ if (i > 0) {
+ _ = try as.expect(.comma);
+ try as.skip(1, .{.space});
+ }
+ if (@typeInfo(@TypeOf(cond)) != .EnumLiteral) {
+ @compileError("invalid condition in the rule: " ++ @typeName(@TypeOf(cond)));
+ }
+ switch (cond) {
+ .register => {
+ const reg_tok = try as.expect(.string);
+ const reg = registerFromString(as.source(reg_tok)) orelse
+ return error.InvalidOperand;
+ ops[i] = .{ .reg = reg };
+ },
+ .memory => {
+ const mem = try as.parseMemory();
+ ops[i] = .{ .mem = mem };
+ },
+ .immediate => {
+ const is_neg = if (as.expect(.minus)) |_| true else |_| false;
+ const imm_tok = try as.expect(.numeral);
+ const imm: Immediate = if (is_neg) blk: {
+ const imm = try std.fmt.parseInt(i32, as.source(imm_tok), 0);
+ break :blk .{ .signed = imm * -1 };
+ } else .{ .unsigned = try std.fmt.parseInt(u64, as.source(imm_tok), 0) };
+ ops[i] = .{ .imm = imm };
+ },
+ else => @compileError("unhandled enum literal " ++ @tagName(cond)),
+ }
+ try as.skip(1, .{.space});
+ }
+
+ try as.skip(1, .{.space});
+ const tok = try as.it.next();
+ switch (tok.id) {
+ .new_line, .eof => {},
+ else => return error.InvalidOperand,
+ }
+ }
+
+ fn registerFromString(bytes: []const u8) ?Register {
+ const ti = @typeInfo(Register).Enum;
+ inline for (ti.fields) |field| {
+ if (std.mem.eql(u8, bytes, field.name)) {
+ return @field(Register, field.name);
+ }
+ }
+ return null;
+ }
+
+ fn parseMemory(as: *Assembler) ParseError!Memory {
+ const ptr_size: ?Memory.PtrSize = blk: {
+ const pos = as.it.pos;
+ const ptr_size = as.parsePtrSize() catch |err| switch (err) {
+ error.UnexpectedToken => {
+ as.it.seekTo(pos);
+ break :blk null;
+ },
+ else => return err,
+ };
+ break :blk ptr_size;
+ };
+
+ try as.skip(1, .{.space});
+
+ // Supported rules and orderings.
+ const rules = .{
+ .{ .open_br, .base, .close_br }, // [ base ]
+ .{ .open_br, .base, .plus, .disp, .close_br }, // [ base + disp ]
+ .{ .open_br, .base, .minus, .disp, .close_br }, // [ base - disp ]
+ .{ .open_br, .disp, .plus, .base, .close_br }, // [ disp + base ]
+ .{ .open_br, .base, .plus, .index, .close_br }, // [ base + index ]
+ .{ .open_br, .base, .plus, .index, .star, .scale, .close_br }, // [ base + index * scale ]
+ .{ .open_br, .index, .star, .scale, .plus, .base, .close_br }, // [ index * scale + base ]
+ .{ .open_br, .base, .plus, .index, .star, .scale, .plus, .disp, .close_br }, // [ base + index * scale + disp ]
+ .{ .open_br, .base, .plus, .index, .star, .scale, .minus, .disp, .close_br }, // [ base + index * scale - disp ]
+ .{ .open_br, .index, .star, .scale, .plus, .base, .plus, .disp, .close_br }, // [ index * scale + base + disp ]
+ .{ .open_br, .index, .star, .scale, .plus, .base, .minus, .disp, .close_br }, // [ index * scale + base - disp ]
+ .{ .open_br, .disp, .plus, .index, .star, .scale, .plus, .base, .close_br }, // [ disp + index * scale + base ]
+ .{ .open_br, .disp, .plus, .base, .plus, .index, .star, .scale, .close_br }, // [ disp + base + index * scale ]
+ .{ .open_br, .base, .plus, .disp, .plus, .index, .star, .scale, .close_br }, // [ base + disp + index * scale ]
+ .{ .open_br, .base, .minus, .disp, .plus, .index, .star, .scale, .close_br }, // [ base - disp + index * scale ]
+ .{ .open_br, .base, .plus, .disp, .plus, .scale, .star, .index, .close_br }, // [ base + disp + scale * index ]
+ .{ .open_br, .base, .minus, .disp, .plus, .scale, .star, .index, .close_br }, // [ base - disp + scale * index ]
+ .{ .open_br, .rip, .plus, .disp, .close_br }, // [ rip + disp ]
+ .{ .open_br, .rip, .minus, .disp, .close_br }, // [ rig - disp ]
+ .{ .base, .colon, .disp }, // seg:disp
+ };
+
+ const pos = as.it.pos;
+ inline for (rules) |rule| {
+ if (as.parseMemoryRule(rule)) |res| {
+ if (res.rip) {
+ if (res.base != null or res.scale_index != null or res.offset != null)
+ return error.InvalidMemoryOperand;
+ return Memory.rip(ptr_size orelse .qword, res.disp orelse 0);
+ }
+ if (res.base) |base| {
+ if (res.rip)
+ return error.InvalidMemoryOperand;
+ if (res.offset) |offset| {
+ if (res.scale_index != null or res.disp != null)
+ return error.InvalidMemoryOperand;
+ return Memory.moffs(base, offset);
+ }
+ return Memory.sib(ptr_size orelse .qword, .{
+ .base = base,
+ .scale_index = res.scale_index,
+ .disp = res.disp orelse 0,
+ });
+ }
+ return error.InvalidMemoryOperand;
+ } else |_| {
+ as.it.seekTo(pos);
+ }
+ }
+
+ return error.InvalidOperand;
+ }
+
+ const MemoryParseResult = struct {
+ rip: bool = false,
+ base: ?Register = null,
+ scale_index: ?Memory.ScaleIndex = null,
+ disp: ?i32 = null,
+ offset: ?u64 = null,
+ };
+
+ fn parseMemoryRule(as: *Assembler, rule: anytype) ParseError!MemoryParseResult {
+ var res: MemoryParseResult = .{};
+ inline for (rule, 0..) |cond, i| {
+ if (@typeInfo(@TypeOf(cond)) != .EnumLiteral) {
+ @compileError("unsupported condition type in the rule: " ++ @typeName(@TypeOf(cond)));
+ }
+ switch (cond) {
+ .open_br, .close_br, .plus, .minus, .star, .colon => {
+ _ = try as.expect(cond);
+ },
+ .base => {
+ const tok = try as.expect(.string);
+ res.base = registerFromString(as.source(tok)) orelse return error.InvalidMemoryOperand;
+ },
+ .rip => {
+ const tok = try as.expect(.string);
+ if (!std.mem.eql(u8, as.source(tok), "rip")) return error.InvalidMemoryOperand;
+ res.rip = true;
+ },
+ .index => {
+ const tok = try as.expect(.string);
+ const index = registerFromString(as.source(tok)) orelse
+ return error.InvalidMemoryOperand;
+ if (res.scale_index) |*si| {
+ si.index = index;
+ } else {
+ res.scale_index = .{ .scale = 1, .index = index };
+ }
+ },
+ .scale => {
+ const tok = try as.expect(.numeral);
+ const scale = try std.fmt.parseInt(u2, as.source(tok), 0);
+ if (res.scale_index) |*si| {
+ si.scale = scale;
+ } else {
+ res.scale_index = .{ .scale = scale, .index = undefined };
+ }
+ },
+ .disp => {
+ const tok = try as.expect(.numeral);
+ const is_neg = blk: {
+ if (i > 0) {
+ if (rule[i - 1] == .minus) break :blk true;
+ }
+ break :blk false;
+ };
+ if (std.fmt.parseInt(i32, as.source(tok), 0)) |disp| {
+ res.disp = if (is_neg) -1 * disp else disp;
+ } else |err| switch (err) {
+ error.Overflow => {
+ if (is_neg) return err;
+ if (res.base) |base| {
+ if (base.class() != .segment) return err;
+ }
+ const offset = try std.fmt.parseInt(u64, as.source(tok), 0);
+ res.offset = offset;
+ },
+ else => return err,
+ }
+ },
+ else => @compileError("unhandled operand output type: " ++ @tagName(cond)),
+ }
+ try as.skip(1, .{.space});
+ }
+ return res;
+ }
+
+ fn parsePtrSize(as: *Assembler) ParseError!Memory.PtrSize {
+ const size = try as.expect(.string);
+ try as.skip(1, .{.space});
+ const ptr = try as.expect(.string);
+
+ const size_raw = as.source(size);
+ const ptr_raw = as.source(ptr);
+ const len = size_raw.len + ptr_raw.len + 1;
+ var buf: ["qword ptr".len]u8 = undefined;
+ if (len > buf.len) return error.InvalidPtrSize;
+
+ for (size_raw, 0..) |c, i| {
+ buf[i] = std.ascii.toLower(c);
+ }
+ buf[size_raw.len] = ' ';
+ for (ptr_raw, 0..) |c, i| {
+ buf[size_raw.len + i + 1] = std.ascii.toLower(c);
+ }
+
+ const slice = buf[0..len];
+ if (std.mem.eql(u8, slice, "qword ptr")) return .qword;
+ if (std.mem.eql(u8, slice, "dword ptr")) return .dword;
+ if (std.mem.eql(u8, slice, "word ptr")) return .word;
+ if (std.mem.eql(u8, slice, "byte ptr")) return .byte;
+ if (std.mem.eql(u8, slice, "tbyte ptr")) return .tbyte;
+ return error.InvalidPtrSize;
+ }
+};
+
+test "assemble" {
+ const input =
+ \\int3
+ \\mov rax, rbx
+ \\mov qword ptr [rbp], rax
+ \\mov qword ptr [rbp - 16], rax
+ \\mov qword ptr [16 + rbp], rax
+ \\mov rax, 0x10
+ \\mov byte ptr [rbp - 0x10], 0x10
+ \\mov word ptr [rbp + r12], r11w
+ \\mov word ptr [rbp + r12 * 2], r11w
+ \\mov word ptr [rbp + r12 * 2 - 16], r11w
+ \\mov dword ptr [rip - 16], r12d
+ \\mov rax, fs:0x0
+ \\mov rax, gs:0x1000000000000000
+ \\movzx r12, al
+ \\imul r12, qword ptr [rbp - 16], 6
+ \\jmp 0x0
+ \\jc 0x0
+ \\jb 0x0
+ \\sal rax, 1
+ \\sal rax, 63
+ \\shl rax, 63
+ \\sar rax, 63
+ \\shr rax, 63
+ \\test byte ptr [rbp - 16], r12b
+ \\sal r12, cl
+ \\mul qword ptr [rip - 16]
+ \\div r12
+ \\idiv byte ptr [rbp - 16]
+ \\cwde
+ \\cbw
+ \\cdqe
+ \\test byte ptr [rbp], ah
+ \\test byte ptr [r12], spl
+ \\cdq
+ \\cwd
+ \\cqo
+ \\test bl, 0x1
+ \\mov rbx,0x8000000000000000
+ \\movss xmm0, dword ptr [rbp]
+ \\movss xmm0, xmm1
+ \\movss dword ptr [rbp - 16 + rax * 2], xmm7
+ \\movss dword ptr [rbp - 16 + rax * 2], xmm8
+ \\movss xmm15, xmm9
+ \\movsd xmm8, qword ptr [rbp - 16]
+ \\movsd qword ptr [rbp - 8], xmm0
+ \\movq xmm8, qword ptr [rbp - 16]
+ \\movq qword ptr [rbp - 16], xmm8
+ \\ucomisd xmm0, qword ptr [rbp - 16]
+ \\fisttp qword ptr [rbp - 16]
+ \\fisttp word ptr [rip + 32]
+ \\fisttp dword ptr [rax]
+ \\fld tbyte ptr [rbp]
+ \\fld dword ptr [rbp]
+ \\xor bl, 0xff
+ \\ud2
+ \\add rsp, -1
+ \\add rsp, 0xff
+ \\mov sil, byte ptr [rax + rcx * 1]
+ \\
+ ;
+
+ // zig fmt: off
+ const expected = &[_]u8{
+ 0xCC,
+ 0x48, 0x89, 0xD8,
+ 0x48, 0x89, 0x45, 0x00,
+ 0x48, 0x89, 0x45, 0xF0,
+ 0x48, 0x89, 0x45, 0x10,
+ 0x48, 0xC7, 0xC0, 0x10, 0x00, 0x00, 0x00,
+ 0xC6, 0x45, 0xF0, 0x10,
+ 0x66, 0x46, 0x89, 0x5C, 0x25, 0x00,
+ 0x66, 0x46, 0x89, 0x5C, 0x65, 0x00,
+ 0x66, 0x46, 0x89, 0x5C, 0x65, 0xF0,
+ 0x44, 0x89, 0x25, 0xF0, 0xFF, 0xFF, 0xFF,
+ 0x64, 0x48, 0x8B, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00,
+ 0x65, 0x48, 0xA1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
+ 0x4C, 0x0F, 0xB6, 0xE0,
+ 0x4C, 0x6B, 0x65, 0xF0, 0x06,
+ 0xE9, 0x00, 0x00, 0x00, 0x00,
+ 0x0F, 0x82, 0x00, 0x00, 0x00, 0x00,
+ 0x0F, 0x82, 0x00, 0x00, 0x00, 0x00,
+ 0x48, 0xD1, 0xE0,
+ 0x48, 0xC1, 0xE0, 0x3F,
+ 0x48, 0xC1, 0xE0, 0x3F,
+ 0x48, 0xC1, 0xF8, 0x3F,
+ 0x48, 0xC1, 0xE8, 0x3F,
+ 0x44, 0x84, 0x65, 0xF0,
+ 0x49, 0xD3, 0xE4,
+ 0x48, 0xF7, 0x25, 0xF0, 0xFF, 0xFF, 0xFF,
+ 0x49, 0xF7, 0xF4,
+ 0xF6, 0x7D, 0xF0,
+ 0x98,
+ 0x66, 0x98,
+ 0x48, 0x98,
+ 0x84, 0x65, 0x00,
+ 0x41, 0x84, 0x24, 0x24,
+ 0x99,
+ 0x66, 0x99,
+ 0x48, 0x99,
+ 0xF6, 0xC3, 0x01,
+ 0x48, 0xBB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0xF3, 0x0F, 0x10, 0x45, 0x00,
+ 0xF3, 0x0F, 0x10, 0xC1,
+ 0xF3, 0x0F, 0x11, 0x7C, 0x45, 0xF0,
+ 0xF3, 0x44, 0x0F, 0x11, 0x44, 0x45, 0xF0,
+ 0xF3, 0x45, 0x0F, 0x10, 0xF9,
+ 0xF2, 0x44, 0x0F, 0x10, 0x45, 0xF0,
+ 0xF2, 0x0F, 0x11, 0x45, 0xF8,
+ 0xF3, 0x44, 0x0F, 0x7E, 0x45, 0xF0,
+ 0x66, 0x44, 0x0F, 0xD6, 0x45, 0xF0,
+ 0x66, 0x0F, 0x2E, 0x45, 0xF0,
+ 0xDD, 0x4D, 0xF0,
+ 0xDF, 0x0D, 0x20, 0x00, 0x00, 0x00,
+ 0xDB, 0x08,
+ 0xDB, 0x6D, 0x00,
+ 0xD9, 0x45, 0x00,
+ 0x80, 0xF3, 0xFF,
+ 0x0F, 0x0B,
+ 0x48, 0x83, 0xC4, 0xFF,
+ 0x48, 0x81, 0xC4, 0xFF, 0x00, 0x00, 0x00,
+ 0x40, 0x8A, 0x34, 0x08,
+ };
+ // zig fmt: on
+
+ var as = Assembler.init(input);
+ var output = std.ArrayList(u8).init(testing.allocator);
+ defer output.deinit();
+ try as.assemble(output.writer());
+ try expectEqualHexStrings(expected, output.items, input);
+}
+
+test "assemble - Jcc" {
+ const mnemonics = [_]struct { Instruction.Mnemonic, u8 }{
+ .{ .ja, 0x87 },
+ .{ .jae, 0x83 },
+ .{ .jb, 0x82 },
+ .{ .jbe, 0x86 },
+ .{ .jc, 0x82 },
+ .{ .je, 0x84 },
+ .{ .jg, 0x8f },
+ .{ .jge, 0x8d },
+ .{ .jl, 0x8c },
+ .{ .jle, 0x8e },
+ .{ .jna, 0x86 },
+ .{ .jnae, 0x82 },
+ .{ .jnb, 0x83 },
+ .{ .jnbe, 0x87 },
+ .{ .jnc, 0x83 },
+ .{ .jne, 0x85 },
+ .{ .jng, 0x8e },
+ .{ .jnge, 0x8c },
+ .{ .jnl, 0x8d },
+ .{ .jnle, 0x8f },
+ .{ .jno, 0x81 },
+ .{ .jnp, 0x8b },
+ .{ .jns, 0x89 },
+ .{ .jnz, 0x85 },
+ .{ .jo, 0x80 },
+ .{ .jp, 0x8a },
+ .{ .jpe, 0x8a },
+ .{ .jpo, 0x8b },
+ .{ .js, 0x88 },
+ .{ .jz, 0x84 },
+ };
+
+ inline for (&mnemonics) |mnemonic| {
+ const input = @tagName(mnemonic[0]) ++ " 0x0";
+ const expected = [_]u8{ 0x0f, mnemonic[1], 0x0, 0x0, 0x0, 0x0 };
+ var as = Assembler.init(input);
+ var output = std.ArrayList(u8).init(testing.allocator);
+ defer output.deinit();
+ try as.assemble(output.writer());
+ try expectEqualHexStrings(&expected, output.items, input);
+ }
+}
+
+test "assemble - SETcc" {
+ const mnemonics = [_]struct { Instruction.Mnemonic, u8 }{
+ .{ .seta, 0x97 },
+ .{ .setae, 0x93 },
+ .{ .setb, 0x92 },
+ .{ .setbe, 0x96 },
+ .{ .setc, 0x92 },
+ .{ .sete, 0x94 },
+ .{ .setg, 0x9f },
+ .{ .setge, 0x9d },
+ .{ .setl, 0x9c },
+ .{ .setle, 0x9e },
+ .{ .setna, 0x96 },
+ .{ .setnae, 0x92 },
+ .{ .setnb, 0x93 },
+ .{ .setnbe, 0x97 },
+ .{ .setnc, 0x93 },
+ .{ .setne, 0x95 },
+ .{ .setng, 0x9e },
+ .{ .setnge, 0x9c },
+ .{ .setnl, 0x9d },
+ .{ .setnle, 0x9f },
+ .{ .setno, 0x91 },
+ .{ .setnp, 0x9b },
+ .{ .setns, 0x99 },
+ .{ .setnz, 0x95 },
+ .{ .seto, 0x90 },
+ .{ .setp, 0x9a },
+ .{ .setpe, 0x9a },
+ .{ .setpo, 0x9b },
+ .{ .sets, 0x98 },
+ .{ .setz, 0x94 },
+ };
+
+ inline for (&mnemonics) |mnemonic| {
+ const input = @tagName(mnemonic[0]) ++ " al";
+ const expected = [_]u8{ 0x0f, mnemonic[1], 0xC0 };
+ var as = Assembler.init(input);
+ var output = std.ArrayList(u8).init(testing.allocator);
+ defer output.deinit();
+ try as.assemble(output.writer());
+ try expectEqualHexStrings(&expected, output.items, input);
+ }
+}
+
+test "assemble - CMOVcc" {
+ const mnemonics = [_]struct { Instruction.Mnemonic, u8 }{
+ .{ .cmova, 0x47 },
+ .{ .cmovae, 0x43 },
+ .{ .cmovb, 0x42 },
+ .{ .cmovbe, 0x46 },
+ .{ .cmovc, 0x42 },
+ .{ .cmove, 0x44 },
+ .{ .cmovg, 0x4f },
+ .{ .cmovge, 0x4d },
+ .{ .cmovl, 0x4c },
+ .{ .cmovle, 0x4e },
+ .{ .cmovna, 0x46 },
+ .{ .cmovnae, 0x42 },
+ .{ .cmovnb, 0x43 },
+ .{ .cmovnbe, 0x47 },
+ .{ .cmovnc, 0x43 },
+ .{ .cmovne, 0x45 },
+ .{ .cmovng, 0x4e },
+ .{ .cmovnge, 0x4c },
+ .{ .cmovnl, 0x4d },
+ .{ .cmovnle, 0x4f },
+ .{ .cmovno, 0x41 },
+ .{ .cmovnp, 0x4b },
+ .{ .cmovns, 0x49 },
+ .{ .cmovnz, 0x45 },
+ .{ .cmovo, 0x40 },
+ .{ .cmovp, 0x4a },
+ .{ .cmovpe, 0x4a },
+ .{ .cmovpo, 0x4b },
+ .{ .cmovs, 0x48 },
+ .{ .cmovz, 0x44 },
+ };
+
+ inline for (&mnemonics) |mnemonic| {
+ const input = @tagName(mnemonic[0]) ++ " rax, rbx";
+ const expected = [_]u8{ 0x48, 0x0f, mnemonic[1], 0xC3 };
+ var as = Assembler.init(input);
+ var output = std.ArrayList(u8).init(testing.allocator);
+ defer output.deinit();
+ try as.assemble(output.writer());
+ try expectEqualHexStrings(&expected, output.items, input);
+ }
+}
diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig
new file mode 100644
index 0000000000..b008eb9f3e
--- /dev/null
+++ b/src/arch/x86_64/encodings.zig
@@ -0,0 +1,621 @@
+const Encoding = @import("Encoding.zig");
+const Mnemonic = Encoding.Mnemonic;
+const OpEn = Encoding.OpEn;
+const Op = Encoding.Op;
+const Mode = Encoding.Mode;
+
+const opcode_len = u2;
+const modrm_ext = u3;
+
+const Entry = struct { Mnemonic, OpEn, Op, Op, Op, Op, opcode_len, u8, u8, u8, modrm_ext, Mode };
+
+// TODO move this into a .zon file when Zig is capable of importing .zon files
+// zig fmt: off
+pub const table = &[_]Entry{
+ // General-purpose
+ .{ .adc, .zi, .al, .imm8, .none, .none, 1, 0x14, 0x00, 0x00, 0, .none },
+ .{ .adc, .zi, .ax, .imm16, .none, .none, 1, 0x15, 0x00, 0x00, 0, .none },
+ .{ .adc, .zi, .eax, .imm32, .none, .none, 1, 0x15, 0x00, 0x00, 0, .none },
+ .{ .adc, .zi, .rax, .imm32s, .none, .none, 1, 0x15, 0x00, 0x00, 0, .long },
+ .{ .adc, .mi, .rm8, .imm8, .none, .none, 1, 0x80, 0x00, 0x00, 2, .none },
+ .{ .adc, .mi, .rm8, .imm8, .none, .none, 1, 0x80, 0x00, 0x00, 2, .rex },
+ .{ .adc, .mi, .rm16, .imm16, .none, .none, 1, 0x81, 0x00, 0x00, 2, .none },
+ .{ .adc, .mi, .rm32, .imm32, .none, .none, 1, 0x81, 0x00, 0x00, 2, .none },
+ .{ .adc, .mi, .rm64, .imm32s, .none, .none, 1, 0x81, 0x00, 0x00, 2, .long },
+ .{ .adc, .mi, .rm16, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 2, .none },
+ .{ .adc, .mi, .rm32, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 2, .none },
+ .{ .adc, .mi, .rm64, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 2, .long },
+ .{ .adc, .mr, .rm8, .r8, .none, .none, 1, 0x10, 0x00, 0x00, 0, .none },
+ .{ .adc, .mr, .rm8, .r8, .none, .none, 1, 0x10, 0x00, 0x00, 0, .rex },
+ .{ .adc, .mr, .rm16, .r16, .none, .none, 1, 0x11, 0x00, 0x00, 0, .none },
+ .{ .adc, .mr, .rm32, .r32, .none, .none, 1, 0x11, 0x00, 0x00, 0, .none },
+ .{ .adc, .mr, .rm64, .r64, .none, .none, 1, 0x11, 0x00, 0x00, 0, .long },
+ .{ .adc, .rm, .r8, .rm8, .none, .none, 1, 0x12, 0x00, 0x00, 0, .none },
+ .{ .adc, .rm, .r8, .rm8, .none, .none, 1, 0x12, 0x00, 0x00, 0, .rex },
+ .{ .adc, .rm, .r16, .rm16, .none, .none, 1, 0x13, 0x00, 0x00, 0, .none },
+ .{ .adc, .rm, .r32, .rm32, .none, .none, 1, 0x13, 0x00, 0x00, 0, .none },
+ .{ .adc, .rm, .r64, .rm64, .none, .none, 1, 0x13, 0x00, 0x00, 0, .long },
+
+ .{ .add, .zi, .al, .imm8, .none, .none, 1, 0x04, 0x00, 0x00, 0, .none },
+ .{ .add, .zi, .ax, .imm16, .none, .none, 1, 0x05, 0x00, 0x00, 0, .none },
+ .{ .add, .zi, .eax, .imm32, .none, .none, 1, 0x05, 0x00, 0x00, 0, .none },
+ .{ .add, .zi, .rax, .imm32s, .none, .none, 1, 0x05, 0x00, 0x00, 0, .long },
+ .{ .add, .mi, .rm8, .imm8, .none, .none, 1, 0x80, 0x00, 0x00, 0, .none },
+ .{ .add, .mi, .rm8, .imm8, .none, .none, 1, 0x80, 0x00, 0x00, 0, .rex },
+ .{ .add, .mi, .rm16, .imm16, .none, .none, 1, 0x81, 0x00, 0x00, 0, .none },
+ .{ .add, .mi, .rm32, .imm32, .none, .none, 1, 0x81, 0x00, 0x00, 0, .none },
+ .{ .add, .mi, .rm64, .imm32s, .none, .none, 1, 0x81, 0x00, 0x00, 0, .long },
+ .{ .add, .mi, .rm16, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 0, .none },
+ .{ .add, .mi, .rm32, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 0, .none },
+ .{ .add, .mi, .rm64, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 0, .long },
+ .{ .add, .mr, .rm8, .r8, .none, .none, 1, 0x00, 0x00, 0x00, 0, .none },
+ .{ .add, .mr, .rm8, .r8, .none, .none, 1, 0x00, 0x00, 0x00, 0, .rex },
+ .{ .add, .mr, .rm16, .r16, .none, .none, 1, 0x01, 0x00, 0x00, 0, .none },
+ .{ .add, .mr, .rm32, .r32, .none, .none, 1, 0x01, 0x00, 0x00, 0, .none },
+ .{ .add, .mr, .rm64, .r64, .none, .none, 1, 0x01, 0x00, 0x00, 0, .long },
+ .{ .add, .rm, .r8, .rm8, .none, .none, 1, 0x02, 0x00, 0x00, 0, .none },
+ .{ .add, .rm, .r8, .rm8, .none, .none, 1, 0x02, 0x00, 0x00, 0, .rex },
+ .{ .add, .rm, .r16, .rm16, .none, .none, 1, 0x03, 0x00, 0x00, 0, .none },
+ .{ .add, .rm, .r32, .rm32, .none, .none, 1, 0x03, 0x00, 0x00, 0, .none },
+ .{ .add, .rm, .r64, .rm64, .none, .none, 1, 0x03, 0x00, 0x00, 0, .long },
+
+ .{ .@"and", .zi, .al, .imm8, .none, .none, 1, 0x24, 0x00, 0x00, 0, .none },
+ .{ .@"and", .zi, .ax, .imm16, .none, .none, 1, 0x25, 0x00, 0x00, 0, .none },
+ .{ .@"and", .zi, .eax, .imm32, .none, .none, 1, 0x25, 0x00, 0x00, 0, .none },
+ .{ .@"and", .zi, .rax, .imm32s, .none, .none, 1, 0x25, 0x00, 0x00, 0, .long },
+ .{ .@"and", .mi, .rm8, .imm8, .none, .none, 1, 0x80, 0x00, 0x00, 4, .none },
+ .{ .@"and", .mi, .rm8, .imm8, .none, .none, 1, 0x80, 0x00, 0x00, 4, .rex },
+ .{ .@"and", .mi, .rm16, .imm16, .none, .none, 1, 0x81, 0x00, 0x00, 4, .none },
+ .{ .@"and", .mi, .rm32, .imm32, .none, .none, 1, 0x81, 0x00, 0x00, 4, .none },
+ .{ .@"and", .mi, .rm64, .imm32s, .none, .none, 1, 0x81, 0x00, 0x00, 4, .long },
+ .{ .@"and", .mi, .rm16, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 4, .none },
+ .{ .@"and", .mi, .rm32, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 4, .none },
+ .{ .@"and", .mi, .rm64, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 4, .long },
+ .{ .@"and", .mr, .rm8, .r8, .none, .none, 1, 0x20, 0x00, 0x00, 0, .none },
+ .{ .@"and", .mr, .rm8, .r8, .none, .none, 1, 0x20, 0x00, 0x00, 0, .rex },
+ .{ .@"and", .mr, .rm16, .r16, .none, .none, 1, 0x21, 0x00, 0x00, 0, .none },
+ .{ .@"and", .mr, .rm32, .r32, .none, .none, 1, 0x21, 0x00, 0x00, 0, .none },
+ .{ .@"and", .mr, .rm64, .r64, .none, .none, 1, 0x21, 0x00, 0x00, 0, .long },
+ .{ .@"and", .rm, .r8, .rm8, .none, .none, 1, 0x22, 0x00, 0x00, 0, .none },
+ .{ .@"and", .rm, .r8, .rm8, .none, .none, 1, 0x22, 0x00, 0x00, 0, .rex },
+ .{ .@"and", .rm, .r16, .rm16, .none, .none, 1, 0x23, 0x00, 0x00, 0, .none },
+ .{ .@"and", .rm, .r32, .rm32, .none, .none, 1, 0x23, 0x00, 0x00, 0, .none },
+ .{ .@"and", .rm, .r64, .rm64, .none, .none, 1, 0x23, 0x00, 0x00, 0, .long },
+
+ // This is M encoding according to Intel, but D makes more sense here.
+ .{ .call, .d, .rel32, .none, .none, .none, 1, 0xe8, 0x00, 0x00, 0, .none },
+ .{ .call, .m, .rm64, .none, .none, .none, 1, 0xff, 0x00, 0x00, 2, .none },
+
+ .{ .cbw, .np, .o16, .none, .none, .none, 1, 0x98, 0x00, 0x00, 0, .none },
+ .{ .cwde, .np, .o32, .none, .none, .none, 1, 0x98, 0x00, 0x00, 0, .none },
+ .{ .cdqe, .np, .o64, .none, .none, .none, 1, 0x98, 0x00, 0x00, 0, .long },
+
+ .{ .cwd, .np, .o16, .none, .none, .none, 1, 0x99, 0x00, 0x00, 0, .none },
+ .{ .cdq, .np, .o32, .none, .none, .none, 1, 0x99, 0x00, 0x00, 0, .none },
+ .{ .cqo, .np, .o64, .none, .none, .none, 1, 0x99, 0x00, 0x00, 0, .long },
+
+ .{ .cmova, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x47, 0x00, 0, .none },
+ .{ .cmova, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x47, 0x00, 0, .none },
+ .{ .cmova, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x47, 0x00, 0, .long },
+ .{ .cmovae, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x43, 0x00, 0, .none },
+ .{ .cmovae, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x43, 0x00, 0, .none },
+ .{ .cmovae, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x43, 0x00, 0, .long },
+ .{ .cmovb, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x42, 0x00, 0, .none },
+ .{ .cmovb, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x42, 0x00, 0, .none },
+ .{ .cmovb, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x42, 0x00, 0, .long },
+ .{ .cmovbe, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x46, 0x00, 0, .none },
+ .{ .cmovbe, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x46, 0x00, 0, .none },
+ .{ .cmovbe, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x46, 0x00, 0, .long },
+ .{ .cmovc, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x42, 0x00, 0, .none },
+ .{ .cmovc, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x42, 0x00, 0, .none },
+ .{ .cmovc, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x42, 0x00, 0, .long },
+ .{ .cmove, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x44, 0x00, 0, .none },
+ .{ .cmove, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x44, 0x00, 0, .none },
+ .{ .cmove, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x44, 0x00, 0, .long },
+ .{ .cmovg, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x4f, 0x00, 0, .none },
+ .{ .cmovg, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x4f, 0x00, 0, .none },
+ .{ .cmovg, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x4f, 0x00, 0, .long },
+ .{ .cmovge, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x4d, 0x00, 0, .none },
+ .{ .cmovge, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x4d, 0x00, 0, .none },
+ .{ .cmovge, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x4d, 0x00, 0, .long },
+ .{ .cmovl, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x4c, 0x00, 0, .none },
+ .{ .cmovl, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x4c, 0x00, 0, .none },
+ .{ .cmovl, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x4c, 0x00, 0, .long },
+ .{ .cmovle, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x4e, 0x00, 0, .none },
+ .{ .cmovle, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x4e, 0x00, 0, .none },
+ .{ .cmovle, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x4e, 0x00, 0, .long },
+ .{ .cmovna, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x46, 0x00, 0, .none },
+ .{ .cmovna, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x46, 0x00, 0, .none },
+ .{ .cmovna, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x46, 0x00, 0, .long },
+ .{ .cmovnae, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x42, 0x00, 0, .none },
+ .{ .cmovnae, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x42, 0x00, 0, .none },
+ .{ .cmovnae, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x42, 0x00, 0, .long },
+ .{ .cmovnb, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x43, 0x00, 0, .none },
+ .{ .cmovnb, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x43, 0x00, 0, .none },
+ .{ .cmovnb, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x43, 0x00, 0, .long },
+ .{ .cmovnbe, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x47, 0x00, 0, .none },
+ .{ .cmovnbe, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x47, 0x00, 0, .none },
+ .{ .cmovnbe, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x47, 0x00, 0, .long },
+ .{ .cmovnc, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x43, 0x00, 0, .none },
+ .{ .cmovnc, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x43, 0x00, 0, .none },
+ .{ .cmovnc, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x43, 0x00, 0, .long },
+ .{ .cmovne, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x45, 0x00, 0, .none },
+ .{ .cmovne, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x45, 0x00, 0, .none },
+ .{ .cmovne, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x45, 0x00, 0, .long },
+ .{ .cmovng, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x4e, 0x00, 0, .none },
+ .{ .cmovng, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x4e, 0x00, 0, .none },
+ .{ .cmovng, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x4e, 0x00, 0, .long },
+ .{ .cmovnge, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x4c, 0x00, 0, .none },
+ .{ .cmovnge, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x4c, 0x00, 0, .none },
+ .{ .cmovnge, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x4c, 0x00, 0, .long },
+ .{ .cmovnl, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x4d, 0x00, 0, .none },
+ .{ .cmovnl, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x4d, 0x00, 0, .none },
+ .{ .cmovnl, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x4d, 0x00, 0, .long },
+ .{ .cmovnle, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x4f, 0x00, 0, .none },
+ .{ .cmovnle, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x4f, 0x00, 0, .none },
+ .{ .cmovnle, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x4f, 0x00, 0, .long },
+ .{ .cmovno, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x41, 0x00, 0, .none },
+ .{ .cmovno, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x41, 0x00, 0, .none },
+ .{ .cmovno, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x41, 0x00, 0, .long },
+ .{ .cmovnp, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x4b, 0x00, 0, .none },
+ .{ .cmovnp, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x4b, 0x00, 0, .none },
+ .{ .cmovnp, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x4b, 0x00, 0, .long },
+ .{ .cmovns, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x49, 0x00, 0, .none },
+ .{ .cmovns, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x49, 0x00, 0, .none },
+ .{ .cmovns, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x49, 0x00, 0, .long },
+ .{ .cmovnz, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x45, 0x00, 0, .none },
+ .{ .cmovnz, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x45, 0x00, 0, .none },
+ .{ .cmovnz, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x45, 0x00, 0, .long },
+ .{ .cmovo, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x40, 0x00, 0, .none },
+ .{ .cmovo, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x40, 0x00, 0, .none },
+ .{ .cmovo, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x40, 0x00, 0, .long },
+ .{ .cmovp, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x4a, 0x00, 0, .none },
+ .{ .cmovp, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x4a, 0x00, 0, .none },
+ .{ .cmovp, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x4a, 0x00, 0, .long },
+ .{ .cmovpe, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x4a, 0x00, 0, .none },
+ .{ .cmovpe, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x4a, 0x00, 0, .none },
+ .{ .cmovpe, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x4a, 0x00, 0, .long },
+ .{ .cmovpo, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x4b, 0x00, 0, .none },
+ .{ .cmovpo, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x4b, 0x00, 0, .none },
+ .{ .cmovpo, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x4b, 0x00, 0, .long },
+ .{ .cmovs, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x48, 0x00, 0, .none },
+ .{ .cmovs, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x48, 0x00, 0, .none },
+ .{ .cmovs, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x48, 0x00, 0, .long },
+ .{ .cmovz, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0x44, 0x00, 0, .none },
+ .{ .cmovz, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0x44, 0x00, 0, .none },
+ .{ .cmovz, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0x44, 0x00, 0, .long },
+
+ .{ .cmp, .zi, .al, .imm8, .none, .none, 1, 0x3c, 0x00, 0x00, 0, .none },
+ .{ .cmp, .zi, .ax, .imm16, .none, .none, 1, 0x3d, 0x00, 0x00, 0, .none },
+ .{ .cmp, .zi, .eax, .imm32, .none, .none, 1, 0x3d, 0x00, 0x00, 0, .none },
+ .{ .cmp, .zi, .rax, .imm32s, .none, .none, 1, 0x3d, 0x00, 0x00, 0, .long },
+ .{ .cmp, .mi, .rm8, .imm8, .none, .none, 1, 0x80, 0x00, 0x00, 7, .none },
+ .{ .cmp, .mi, .rm8, .imm8, .none, .none, 1, 0x80, 0x00, 0x00, 7, .rex },
+ .{ .cmp, .mi, .rm16, .imm16, .none, .none, 1, 0x81, 0x00, 0x00, 7, .none },
+ .{ .cmp, .mi, .rm32, .imm32, .none, .none, 1, 0x81, 0x00, 0x00, 7, .none },
+ .{ .cmp, .mi, .rm64, .imm32s, .none, .none, 1, 0x81, 0x00, 0x00, 7, .long },
+ .{ .cmp, .mi, .rm16, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 7, .none },
+ .{ .cmp, .mi, .rm32, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 7, .none },
+ .{ .cmp, .mi, .rm64, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 7, .long },
+ .{ .cmp, .mr, .rm8, .r8, .none, .none, 1, 0x38, 0x00, 0x00, 0, .none },
+ .{ .cmp, .mr, .rm8, .r8, .none, .none, 1, 0x38, 0x00, 0x00, 0, .rex },
+ .{ .cmp, .mr, .rm16, .r16, .none, .none, 1, 0x39, 0x00, 0x00, 0, .none },
+ .{ .cmp, .mr, .rm32, .r32, .none, .none, 1, 0x39, 0x00, 0x00, 0, .none },
+ .{ .cmp, .mr, .rm64, .r64, .none, .none, 1, 0x39, 0x00, 0x00, 0, .long },
+ .{ .cmp, .rm, .r8, .rm8, .none, .none, 1, 0x3a, 0x00, 0x00, 0, .none },
+ .{ .cmp, .rm, .r8, .rm8, .none, .none, 1, 0x3a, 0x00, 0x00, 0, .rex },
+ .{ .cmp, .rm, .r16, .rm16, .none, .none, 1, 0x3b, 0x00, 0x00, 0, .none },
+ .{ .cmp, .rm, .r32, .rm32, .none, .none, 1, 0x3b, 0x00, 0x00, 0, .none },
+ .{ .cmp, .rm, .r64, .rm64, .none, .none, 1, 0x3b, 0x00, 0x00, 0, .long },
+
+ .{ .div, .m, .rm8, .none, .none, .none, 1, 0xf6, 0x00, 0x00, 6, .none },
+ .{ .div, .m, .rm8, .none, .none, .none, 1, 0xf6, 0x00, 0x00, 6, .rex },
+ .{ .div, .m, .rm16, .none, .none, .none, 1, 0xf7, 0x00, 0x00, 6, .none },
+ .{ .div, .m, .rm32, .none, .none, .none, 1, 0xf7, 0x00, 0x00, 6, .none },
+ .{ .div, .m, .rm64, .none, .none, .none, 1, 0xf7, 0x00, 0x00, 6, .long },
+
+ .{ .fisttp, .m, .m16, .none, .none, .none, 1, 0xdf, 0x00, 0x00, 1, .fpu },
+ .{ .fisttp, .m, .m32, .none, .none, .none, 1, 0xdb, 0x00, 0x00, 1, .fpu },
+ .{ .fisttp, .m, .m64, .none, .none, .none, 1, 0xdd, 0x00, 0x00, 1, .fpu },
+
+ .{ .fld, .m, .m32, .none, .none, .none, 1, 0xd9, 0x00, 0x00, 0, .fpu },
+ .{ .fld, .m, .m64, .none, .none, .none, 1, 0xdd, 0x00, 0x00, 0, .fpu },
+ .{ .fld, .m, .m80, .none, .none, .none, 1, 0xdb, 0x00, 0x00, 5, .fpu },
+
+ .{ .idiv, .m, .rm8, .none, .none, .none, 1, 0xf6, 0x00, 0x00, 7, .none },
+ .{ .idiv, .m, .rm8, .none, .none, .none, 1, 0xf6, 0x00, 0x00, 7, .rex },
+ .{ .idiv, .m, .rm16, .none, .none, .none, 1, 0xf7, 0x00, 0x00, 7, .none },
+ .{ .idiv, .m, .rm32, .none, .none, .none, 1, 0xf7, 0x00, 0x00, 7, .none },
+ .{ .idiv, .m, .rm64, .none, .none, .none, 1, 0xf7, 0x00, 0x00, 7, .long },
+
+ .{ .imul, .m, .rm8, .none, .none, .none, 1, 0xf6, 0x00, 0x00, 5, .none },
+ .{ .imul, .m, .rm8, .none, .none, .none, 1, 0xf6, 0x00, 0x00, 5, .rex },
+ .{ .imul, .m, .rm16, .none, .none, .none, 1, 0xf7, 0x00, 0x00, 5, .none },
+ .{ .imul, .m, .rm32, .none, .none, .none, 1, 0xf7, 0x00, 0x00, 5, .none },
+ .{ .imul, .m, .rm64, .none, .none, .none, 1, 0xf7, 0x00, 0x00, 5, .long },
+ .{ .imul, .rm, .r16, .rm16, .none, .none, 2, 0x0f, 0xaf, 0x00, 0, .none },
+ .{ .imul, .rm, .r32, .rm32, .none, .none, 2, 0x0f, 0xaf, 0x00, 0, .none },
+ .{ .imul, .rm, .r64, .rm64, .none, .none, 2, 0x0f, 0xaf, 0x00, 0, .long },
+ .{ .imul, .rmi, .r16, .rm16, .imm8s, .none, 1, 0x6b, 0x00, 0x00, 0, .none },
+ .{ .imul, .rmi, .r32, .rm32, .imm8s, .none, 1, 0x6b, 0x00, 0x00, 0, .none },
+ .{ .imul, .rmi, .r64, .rm64, .imm8s, .none, 1, 0x6b, 0x00, 0x00, 0, .long },
+ .{ .imul, .rmi, .r16, .rm16, .imm16, .none, 1, 0x69, 0x00, 0x00, 0, .none },
+ .{ .imul, .rmi, .r32, .rm32, .imm32, .none, 1, 0x69, 0x00, 0x00, 0, .none },
+ .{ .imul, .rmi, .r64, .rm64, .imm32, .none, 1, 0x69, 0x00, 0x00, 0, .long },
+
+ .{ .int3, .np, .none, .none, .none, .none, 1, 0xcc, 0x00, 0x00, 0, .none },
+
+ .{ .ja, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x87, 0x00, 0, .none },
+ .{ .jae, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x83, 0x00, 0, .none },
+ .{ .jb, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x82, 0x00, 0, .none },
+ .{ .jbe, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x86, 0x00, 0, .none },
+ .{ .jc, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x82, 0x00, 0, .none },
+ .{ .jrcxz, .d, .rel32, .none, .none, .none, 1, 0xe3, 0x00, 0x00, 0, .none },
+ .{ .je, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x84, 0x00, 0, .none },
+ .{ .jg, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x8f, 0x00, 0, .none },
+ .{ .jge, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x8d, 0x00, 0, .none },
+ .{ .jl, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x8c, 0x00, 0, .none },
+ .{ .jle, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x8e, 0x00, 0, .none },
+ .{ .jna, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x86, 0x00, 0, .none },
+ .{ .jnae, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x82, 0x00, 0, .none },
+ .{ .jnb, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x83, 0x00, 0, .none },
+ .{ .jnbe, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x87, 0x00, 0, .none },
+ .{ .jnc, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x83, 0x00, 0, .none },
+ .{ .jne, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x85, 0x00, 0, .none },
+ .{ .jng, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x8e, 0x00, 0, .none },
+ .{ .jnge, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x8c, 0x00, 0, .none },
+ .{ .jnl, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x8d, 0x00, 0, .none },
+ .{ .jnle, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x8f, 0x00, 0, .none },
+ .{ .jno, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x81, 0x00, 0, .none },
+ .{ .jnp, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x8b, 0x00, 0, .none },
+ .{ .jns, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x89, 0x00, 0, .none },
+ .{ .jnz, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x85, 0x00, 0, .none },
+ .{ .jo, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x80, 0x00, 0, .none },
+ .{ .jp, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x8a, 0x00, 0, .none },
+ .{ .jpe, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x8a, 0x00, 0, .none },
+ .{ .jpo, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x8b, 0x00, 0, .none },
+ .{ .js, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x88, 0x00, 0, .none },
+ .{ .jz, .d, .rel32, .none, .none, .none, 2, 0x0f, 0x84, 0x00, 0, .none },
+
+ .{ .jmp, .d, .rel32, .none, .none, .none, 1, 0xe9, 0x00, 0x00, 0, .none },
+ .{ .jmp, .m, .rm64, .none, .none, .none, 1, 0xff, 0x00, 0x00, 4, .none },
+
+ .{ .lea, .rm, .r16, .m, .none, .none, 1, 0x8d, 0x00, 0x00, 0, .none },
+ .{ .lea, .rm, .r32, .m, .none, .none, 1, 0x8d, 0x00, 0x00, 0, .none },
+ .{ .lea, .rm, .r64, .m, .none, .none, 1, 0x8d, 0x00, 0x00, 0, .long },
+
+ .{ .mov, .mr, .rm8, .r8, .none, .none, 1, 0x88, 0x00, 0x00, 0, .none },
+ .{ .mov, .mr, .rm8, .r8, .none, .none, 1, 0x88, 0x00, 0x00, 0, .rex },
+ .{ .mov, .mr, .rm16, .r16, .none, .none, 1, 0x89, 0x00, 0x00, 0, .none },
+ .{ .mov, .mr, .rm32, .r32, .none, .none, 1, 0x89, 0x00, 0x00, 0, .none },
+ .{ .mov, .mr, .rm64, .r64, .none, .none, 1, 0x89, 0x00, 0x00, 0, .long },
+ .{ .mov, .rm, .r8, .rm8, .none, .none, 1, 0x8a, 0x00, 0x00, 0, .none },
+ .{ .mov, .rm, .r8, .rm8, .none, .none, 1, 0x8a, 0x00, 0x00, 0, .rex },
+ .{ .mov, .rm, .r16, .rm16, .none, .none, 1, 0x8b, 0x00, 0x00, 0, .none },
+ .{ .mov, .rm, .r32, .rm32, .none, .none, 1, 0x8b, 0x00, 0x00, 0, .none },
+ .{ .mov, .rm, .r64, .rm64, .none, .none, 1, 0x8b, 0x00, 0x00, 0, .long },
+ .{ .mov, .mr, .rm16, .sreg, .none, .none, 1, 0x8c, 0x00, 0x00, 0, .none },
+ .{ .mov, .mr, .rm64, .sreg, .none, .none, 1, 0x8c, 0x00, 0x00, 0, .long },
+ .{ .mov, .rm, .sreg, .rm16, .none, .none, 1, 0x8e, 0x00, 0x00, 0, .none },
+ .{ .mov, .rm, .sreg, .rm64, .none, .none, 1, 0x8e, 0x00, 0x00, 0, .long },
+ .{ .mov, .fd, .al, .moffs, .none, .none, 1, 0xa0, 0x00, 0x00, 0, .none },
+ .{ .mov, .fd, .ax, .moffs, .none, .none, 1, 0xa1, 0x00, 0x00, 0, .none },
+ .{ .mov, .fd, .eax, .moffs, .none, .none, 1, 0xa1, 0x00, 0x00, 0, .none },
+ .{ .mov, .fd, .rax, .moffs, .none, .none, 1, 0xa1, 0x00, 0x00, 0, .long },
+ .{ .mov, .td, .moffs, .al, .none, .none, 1, 0xa2, 0x00, 0x00, 0, .none },
+ .{ .mov, .td, .moffs, .ax, .none, .none, 1, 0xa3, 0x00, 0x00, 0, .none },
+ .{ .mov, .td, .moffs, .eax, .none, .none, 1, 0xa3, 0x00, 0x00, 0, .none },
+ .{ .mov, .td, .moffs, .rax, .none, .none, 1, 0xa3, 0x00, 0x00, 0, .long },
+ .{ .mov, .oi, .r8, .imm8, .none, .none, 1, 0xb0, 0x00, 0x00, 0, .none },
+ .{ .mov, .oi, .r8, .imm8, .none, .none, 1, 0xb0, 0x00, 0x00, 0, .rex },
+ .{ .mov, .oi, .r16, .imm16, .none, .none, 1, 0xb8, 0x00, 0x00, 0, .none },
+ .{ .mov, .oi, .r32, .imm32, .none, .none, 1, 0xb8, 0x00, 0x00, 0, .none },
+ .{ .mov, .oi, .r64, .imm64, .none, .none, 1, 0xb8, 0x00, 0x00, 0, .long },
+ .{ .mov, .mi, .rm8, .imm8, .none, .none, 1, 0xc6, 0x00, 0x00, 0, .none },
+ .{ .mov, .mi, .rm8, .imm8, .none, .none, 1, 0xc6, 0x00, 0x00, 0, .rex },
+ .{ .mov, .mi, .rm16, .imm16, .none, .none, 1, 0xc7, 0x00, 0x00, 0, .none },
+ .{ .mov, .mi, .rm32, .imm32, .none, .none, 1, 0xc7, 0x00, 0x00, 0, .none },
+ .{ .mov, .mi, .rm64, .imm32s, .none, .none, 1, 0xc7, 0x00, 0x00, 0, .long },
+
+ .{ .movsx, .rm, .r16, .rm8, .none, .none, 2, 0x0f, 0xbe, 0x00, 0, .none },
+ .{ .movsx, .rm, .r16, .rm8, .none, .none, 2, 0x0f, 0xbe, 0x00, 0, .rex },
+ .{ .movsx, .rm, .r32, .rm8, .none, .none, 2, 0x0f, 0xbe, 0x00, 0, .none },
+ .{ .movsx, .rm, .r32, .rm8, .none, .none, 2, 0x0f, 0xbe, 0x00, 0, .rex },
+ .{ .movsx, .rm, .r64, .rm8, .none, .none, 2, 0x0f, 0xbe, 0x00, 0, .long },
+ .{ .movsx, .rm, .r32, .rm16, .none, .none, 2, 0x0f, 0xbf, 0x00, 0, .none },
+ .{ .movsx, .rm, .r64, .rm16, .none, .none, 2, 0x0f, 0xbf, 0x00, 0, .long },
+
+ // This instruction is discouraged.
+ .{ .movsxd, .rm, .r32, .rm32, .none, .none, 1, 0x63, 0x00, 0x00, 0, .none },
+ .{ .movsxd, .rm, .r64, .rm32, .none, .none, 1, 0x63, 0x00, 0x00, 0, .long },
+
+ .{ .movzx, .rm, .r16, .rm8, .none, .none, 2, 0x0f, 0xb6, 0x00, 0, .none },
+ .{ .movzx, .rm, .r32, .rm8, .none, .none, 2, 0x0f, 0xb6, 0x00, 0, .none },
+ .{ .movzx, .rm, .r64, .rm8, .none, .none, 2, 0x0f, 0xb6, 0x00, 0, .long },
+ .{ .movzx, .rm, .r32, .rm16, .none, .none, 2, 0x0f, 0xb7, 0x00, 0, .none },
+ .{ .movzx, .rm, .r64, .rm16, .none, .none, 2, 0x0f, 0xb7, 0x00, 0, .long },
+
+ .{ .mul, .m, .rm8, .none, .none, .none, 1, 0xf6, 0x00, 0x00, 4, .none },
+ .{ .mul, .m, .rm8, .none, .none, .none, 1, 0xf6, 0x00, 0x00, 4, .rex },
+ .{ .mul, .m, .rm16, .none, .none, .none, 1, 0xf7, 0x00, 0x00, 4, .none },
+ .{ .mul, .m, .rm32, .none, .none, .none, 1, 0xf7, 0x00, 0x00, 4, .none },
+ .{ .mul, .m, .rm64, .none, .none, .none, 1, 0xf7, 0x00, 0x00, 4, .long },
+
+ .{ .nop, .np, .none, .none, .none, .none, 1, 0x90, 0x00, 0x00, 0, .none },
+
+ .{ .@"or", .zi, .al, .imm8, .none, .none, 1, 0x0c, 0x00, 0x00, 0, .none },
+ .{ .@"or", .zi, .ax, .imm16, .none, .none, 1, 0x0d, 0x00, 0x00, 0, .none },
+ .{ .@"or", .zi, .eax, .imm32, .none, .none, 1, 0x0d, 0x00, 0x00, 0, .none },
+ .{ .@"or", .zi, .rax, .imm32s, .none, .none, 1, 0x0d, 0x00, 0x00, 0, .long },
+ .{ .@"or", .mi, .rm8, .imm8, .none, .none, 1, 0x80, 0x00, 0x00, 1, .none },
+ .{ .@"or", .mi, .rm8, .imm8, .none, .none, 1, 0x80, 0x00, 0x00, 1, .rex },
+ .{ .@"or", .mi, .rm16, .imm16, .none, .none, 1, 0x81, 0x00, 0x00, 1, .none },
+ .{ .@"or", .mi, .rm32, .imm32, .none, .none, 1, 0x81, 0x00, 0x00, 1, .none },
+ .{ .@"or", .mi, .rm64, .imm32s, .none, .none, 1, 0x81, 0x00, 0x00, 1, .long },
+ .{ .@"or", .mi, .rm16, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 1, .none },
+ .{ .@"or", .mi, .rm32, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 1, .none },
+ .{ .@"or", .mi, .rm64, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 1, .long },
+ .{ .@"or", .mr, .rm8, .r8, .none, .none, 1, 0x08, 0x00, 0x00, 0, .none },
+ .{ .@"or", .mr, .rm8, .r8, .none, .none, 1, 0x08, 0x00, 0x00, 0, .rex },
+ .{ .@"or", .mr, .rm16, .r16, .none, .none, 1, 0x09, 0x00, 0x00, 0, .none },
+ .{ .@"or", .mr, .rm32, .r32, .none, .none, 1, 0x09, 0x00, 0x00, 0, .none },
+ .{ .@"or", .mr, .rm64, .r64, .none, .none, 1, 0x09, 0x00, 0x00, 0, .long },
+ .{ .@"or", .rm, .r8, .rm8, .none, .none, 1, 0x0a, 0x00, 0x00, 0, .none },
+ .{ .@"or", .rm, .r8, .rm8, .none, .none, 1, 0x0a, 0x00, 0x00, 0, .rex },
+ .{ .@"or", .rm, .r16, .rm16, .none, .none, 1, 0x0b, 0x00, 0x00, 0, .none },
+ .{ .@"or", .rm, .r32, .rm32, .none, .none, 1, 0x0b, 0x00, 0x00, 0, .none },
+ .{ .@"or", .rm, .r64, .rm64, .none, .none, 1, 0x0b, 0x00, 0x00, 0, .long },
+
+ .{ .pop, .o, .r16, .none, .none, .none, 1, 0x58, 0x00, 0x00, 0, .none },
+ .{ .pop, .o, .r64, .none, .none, .none, 1, 0x58, 0x00, 0x00, 0, .none },
+ .{ .pop, .m, .rm16, .none, .none, .none, 1, 0x8f, 0x00, 0x00, 0, .none },
+ .{ .pop, .m, .rm64, .none, .none, .none, 1, 0x8f, 0x00, 0x00, 0, .none },
+
+ .{ .push, .o, .r16, .none, .none, .none, 1, 0x50, 0x00, 0x00, 0, .none },
+ .{ .push, .o, .r64, .none, .none, .none, 1, 0x50, 0x00, 0x00, 0, .none },
+ .{ .push, .m, .rm16, .none, .none, .none, 1, 0xff, 0x00, 0x00, 6, .none },
+ .{ .push, .m, .rm64, .none, .none, .none, 1, 0xff, 0x00, 0x00, 6, .none },
+ .{ .push, .i, .imm8, .none, .none, .none, 1, 0x6a, 0x00, 0x00, 0, .none },
+ .{ .push, .i, .imm16, .none, .none, .none, 1, 0x68, 0x00, 0x00, 0, .none },
+ .{ .push, .i, .imm32, .none, .none, .none, 1, 0x68, 0x00, 0x00, 0, .none },
+
+ .{ .ret, .np, .none, .none, .none, .none, 1, 0xc3, 0x00, 0x00, 0, .none },
+
+ .{ .sal, .m1, .rm8, .unity, .none, .none, 1, 0xd0, 0x00, 0x00, 4, .none },
+ .{ .sal, .m1, .rm8, .unity, .none, .none, 1, 0xd0, 0x00, 0x00, 4, .rex },
+ .{ .sal, .m1, .rm16, .unity, .none, .none, 1, 0xd1, 0x00, 0x00, 4, .none },
+ .{ .sal, .m1, .rm32, .unity, .none, .none, 1, 0xd1, 0x00, 0x00, 4, .none },
+ .{ .sal, .m1, .rm64, .unity, .none, .none, 1, 0xd1, 0x00, 0x00, 4, .long },
+ .{ .sal, .mc, .rm8, .cl, .none, .none, 1, 0xd2, 0x00, 0x00, 4, .none },
+ .{ .sal, .mc, .rm8, .cl, .none, .none, 1, 0xd2, 0x00, 0x00, 4, .rex },
+ .{ .sal, .mc, .rm16, .cl, .none, .none, 1, 0xd3, 0x00, 0x00, 4, .none },
+ .{ .sal, .mc, .rm32, .cl, .none, .none, 1, 0xd3, 0x00, 0x00, 4, .none },
+ .{ .sal, .mc, .rm64, .cl, .none, .none, 1, 0xd3, 0x00, 0x00, 4, .long },
+ .{ .sal, .mi, .rm8, .imm8, .none, .none, 1, 0xc0, 0x00, 0x00, 4, .none },
+ .{ .sal, .mi, .rm8, .imm8, .none, .none, 1, 0xc0, 0x00, 0x00, 4, .rex },
+ .{ .sal, .mi, .rm16, .imm8, .none, .none, 1, 0xc1, 0x00, 0x00, 4, .none },
+ .{ .sal, .mi, .rm32, .imm8, .none, .none, 1, 0xc1, 0x00, 0x00, 4, .none },
+ .{ .sal, .mi, .rm64, .imm8, .none, .none, 1, 0xc1, 0x00, 0x00, 4, .long },
+
+ .{ .sar, .m1, .rm8, .unity, .none, .none, 1, 0xd0, 0x00, 0x00, 7, .none },
+ .{ .sar, .m1, .rm8, .unity, .none, .none, 1, 0xd0, 0x00, 0x00, 7, .rex },
+ .{ .sar, .m1, .rm16, .unity, .none, .none, 1, 0xd1, 0x00, 0x00, 7, .none },
+ .{ .sar, .m1, .rm32, .unity, .none, .none, 1, 0xd1, 0x00, 0x00, 7, .none },
+ .{ .sar, .m1, .rm64, .unity, .none, .none, 1, 0xd1, 0x00, 0x00, 7, .long },
+ .{ .sar, .mc, .rm8, .cl, .none, .none, 1, 0xd2, 0x00, 0x00, 7, .none },
+ .{ .sar, .mc, .rm8, .cl, .none, .none, 1, 0xd2, 0x00, 0x00, 7, .rex },
+ .{ .sar, .mc, .rm16, .cl, .none, .none, 1, 0xd3, 0x00, 0x00, 7, .none },
+ .{ .sar, .mc, .rm32, .cl, .none, .none, 1, 0xd3, 0x00, 0x00, 7, .none },
+ .{ .sar, .mc, .rm64, .cl, .none, .none, 1, 0xd3, 0x00, 0x00, 7, .long },
+ .{ .sar, .mi, .rm8, .imm8, .none, .none, 1, 0xc0, 0x00, 0x00, 7, .none },
+ .{ .sar, .mi, .rm8, .imm8, .none, .none, 1, 0xc0, 0x00, 0x00, 7, .rex },
+ .{ .sar, .mi, .rm16, .imm8, .none, .none, 1, 0xc1, 0x00, 0x00, 7, .none },
+ .{ .sar, .mi, .rm32, .imm8, .none, .none, 1, 0xc1, 0x00, 0x00, 7, .none },
+ .{ .sar, .mi, .rm64, .imm8, .none, .none, 1, 0xc1, 0x00, 0x00, 7, .long },
+
+ .{ .sbb, .zi, .al, .imm8, .none, .none, 1, 0x1c, 0x00, 0x00, 0, .none },
+ .{ .sbb, .zi, .ax, .imm16, .none, .none, 1, 0x1d, 0x00, 0x00, 0, .none },
+ .{ .sbb, .zi, .eax, .imm32, .none, .none, 1, 0x1d, 0x00, 0x00, 0, .none },
+ .{ .sbb, .zi, .rax, .imm32s, .none, .none, 1, 0x1d, 0x00, 0x00, 0, .long },
+ .{ .sbb, .mi, .rm8, .imm8, .none, .none, 1, 0x80, 0x00, 0x00, 3, .none },
+ .{ .sbb, .mi, .rm8, .imm8, .none, .none, 1, 0x80, 0x00, 0x00, 3, .rex },
+ .{ .sbb, .mi, .rm16, .imm16, .none, .none, 1, 0x81, 0x00, 0x00, 3, .none },
+ .{ .sbb, .mi, .rm32, .imm32, .none, .none, 1, 0x81, 0x00, 0x00, 3, .none },
+ .{ .sbb, .mi, .rm64, .imm32s, .none, .none, 1, 0x81, 0x00, 0x00, 3, .long },
+ .{ .sbb, .mi, .rm16, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 3, .none },
+ .{ .sbb, .mi, .rm32, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 3, .none },
+ .{ .sbb, .mi, .rm64, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 3, .long },
+ .{ .sbb, .mr, .rm8, .r8, .none, .none, 1, 0x18, 0x00, 0x00, 0, .none },
+ .{ .sbb, .mr, .rm8, .r8, .none, .none, 1, 0x18, 0x00, 0x00, 0, .rex },
+ .{ .sbb, .mr, .rm16, .r16, .none, .none, 1, 0x19, 0x00, 0x00, 0, .none },
+ .{ .sbb, .mr, .rm32, .r32, .none, .none, 1, 0x19, 0x00, 0x00, 0, .none },
+ .{ .sbb, .mr, .rm64, .r64, .none, .none, 1, 0x19, 0x00, 0x00, 0, .long },
+ .{ .sbb, .rm, .r8, .rm8, .none, .none, 1, 0x1a, 0x00, 0x00, 0, .none },
+ .{ .sbb, .rm, .r8, .rm8, .none, .none, 1, 0x1a, 0x00, 0x00, 0, .rex },
+ .{ .sbb, .rm, .r16, .rm16, .none, .none, 1, 0x1b, 0x00, 0x00, 0, .none },
+ .{ .sbb, .rm, .r32, .rm32, .none, .none, 1, 0x1b, 0x00, 0x00, 0, .none },
+ .{ .sbb, .rm, .r64, .rm64, .none, .none, 1, 0x1b, 0x00, 0x00, 0, .long },
+
+ .{ .seta, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x97, 0x00, 0, .none },
+ .{ .seta, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x97, 0x00, 0, .rex },
+ .{ .setae, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x93, 0x00, 0, .none },
+ .{ .setae, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x93, 0x00, 0, .rex },
+ .{ .setb, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x92, 0x00, 0, .none },
+ .{ .setb, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x92, 0x00, 0, .rex },
+ .{ .setbe, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x96, 0x00, 0, .none },
+ .{ .setbe, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x96, 0x00, 0, .rex },
+ .{ .setc, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x92, 0x00, 0, .none },
+ .{ .setc, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x92, 0x00, 0, .rex },
+ .{ .sete, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x94, 0x00, 0, .none },
+ .{ .sete, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x94, 0x00, 0, .rex },
+ .{ .setg, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9f, 0x00, 0, .none },
+ .{ .setg, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9f, 0x00, 0, .rex },
+ .{ .setge, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9d, 0x00, 0, .none },
+ .{ .setge, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9d, 0x00, 0, .rex },
+ .{ .setl, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9c, 0x00, 0, .none },
+ .{ .setl, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9c, 0x00, 0, .rex },
+ .{ .setle, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9e, 0x00, 0, .none },
+ .{ .setle, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9e, 0x00, 0, .rex },
+ .{ .setna, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x96, 0x00, 0, .none },
+ .{ .setna, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x96, 0x00, 0, .rex },
+ .{ .setnae, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x92, 0x00, 0, .none },
+ .{ .setnae, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x92, 0x00, 0, .rex },
+ .{ .setnb, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x93, 0x00, 0, .none },
+ .{ .setnb, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x93, 0x00, 0, .rex },
+ .{ .setnbe, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x97, 0x00, 0, .none },
+ .{ .setnbe, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x97, 0x00, 0, .rex },
+ .{ .setnc, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x93, 0x00, 0, .none },
+ .{ .setnc, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x93, 0x00, 0, .rex },
+ .{ .setne, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x95, 0x00, 0, .none },
+ .{ .setne, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x95, 0x00, 0, .rex },
+ .{ .setng, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9e, 0x00, 0, .none },
+ .{ .setng, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9e, 0x00, 0, .rex },
+ .{ .setnge, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9c, 0x00, 0, .none },
+ .{ .setnge, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9c, 0x00, 0, .rex },
+ .{ .setnl, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9d, 0x00, 0, .none },
+ .{ .setnl, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9d, 0x00, 0, .rex },
+ .{ .setnle, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9f, 0x00, 0, .none },
+ .{ .setnle, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9f, 0x00, 0, .rex },
+ .{ .setno, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x91, 0x00, 0, .none },
+ .{ .setno, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x91, 0x00, 0, .rex },
+ .{ .setnp, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9b, 0x00, 0, .none },
+ .{ .setnp, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9b, 0x00, 0, .rex },
+ .{ .setns, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x99, 0x00, 0, .none },
+ .{ .setns, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x99, 0x00, 0, .rex },
+ .{ .setnz, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x95, 0x00, 0, .none },
+ .{ .setnz, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x95, 0x00, 0, .rex },
+ .{ .seto, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x90, 0x00, 0, .none },
+ .{ .seto, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x90, 0x00, 0, .rex },
+ .{ .setp, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9a, 0x00, 0, .none },
+ .{ .setp, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9a, 0x00, 0, .rex },
+ .{ .setpe, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9a, 0x00, 0, .none },
+ .{ .setpe, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9a, 0x00, 0, .rex },
+ .{ .setpo, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9b, 0x00, 0, .none },
+ .{ .setpo, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x9b, 0x00, 0, .rex },
+ .{ .sets, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x98, 0x00, 0, .none },
+ .{ .sets, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x98, 0x00, 0, .rex },
+ .{ .setz, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x94, 0x00, 0, .none },
+ .{ .setz, .m, .rm8, .none, .none, .none, 2, 0x0f, 0x94, 0x00, 0, .rex },
+
+ .{ .shl, .m1, .rm8, .unity, .none, .none, 1, 0xd0, 0x00, 0x00, 4, .none },
+ .{ .shl, .m1, .rm8, .unity, .none, .none, 1, 0xd0, 0x00, 0x00, 4, .rex },
+ .{ .shl, .m1, .rm16, .unity, .none, .none, 1, 0xd1, 0x00, 0x00, 4, .none },
+ .{ .shl, .m1, .rm32, .unity, .none, .none, 1, 0xd1, 0x00, 0x00, 4, .none },
+ .{ .shl, .m1, .rm64, .unity, .none, .none, 1, 0xd1, 0x00, 0x00, 4, .long },
+ .{ .shl, .mc, .rm8, .cl, .none, .none, 1, 0xd2, 0x00, 0x00, 4, .none },
+ .{ .shl, .mc, .rm8, .cl, .none, .none, 1, 0xd2, 0x00, 0x00, 4, .rex },
+ .{ .shl, .mc, .rm16, .cl, .none, .none, 1, 0xd3, 0x00, 0x00, 4, .none },
+ .{ .shl, .mc, .rm32, .cl, .none, .none, 1, 0xd3, 0x00, 0x00, 4, .none },
+ .{ .shl, .mc, .rm64, .cl, .none, .none, 1, 0xd3, 0x00, 0x00, 4, .long },
+ .{ .shl, .mi, .rm8, .imm8, .none, .none, 1, 0xc0, 0x00, 0x00, 4, .none },
+ .{ .shl, .mi, .rm8, .imm8, .none, .none, 1, 0xc0, 0x00, 0x00, 4, .rex },
+ .{ .shl, .mi, .rm16, .imm8, .none, .none, 1, 0xc1, 0x00, 0x00, 4, .none },
+ .{ .shl, .mi, .rm32, .imm8, .none, .none, 1, 0xc1, 0x00, 0x00, 4, .none },
+ .{ .shl, .mi, .rm64, .imm8, .none, .none, 1, 0xc1, 0x00, 0x00, 4, .long },
+
+ .{ .shr, .m1, .rm8, .unity, .none, .none, 1, 0xd0, 0x00, 0x00, 5, .none },
+ .{ .shr, .m1, .rm8, .unity, .none, .none, 1, 0xd0, 0x00, 0x00, 5, .rex },
+ .{ .shr, .m1, .rm16, .unity, .none, .none, 1, 0xd1, 0x00, 0x00, 5, .none },
+ .{ .shr, .m1, .rm32, .unity, .none, .none, 1, 0xd1, 0x00, 0x00, 5, .none },
+ .{ .shr, .m1, .rm64, .unity, .none, .none, 1, 0xd1, 0x00, 0x00, 5, .long },
+ .{ .shr, .mc, .rm8, .cl, .none, .none, 1, 0xd2, 0x00, 0x00, 5, .none },
+ .{ .shr, .mc, .rm8, .cl, .none, .none, 1, 0xd2, 0x00, 0x00, 5, .rex },
+ .{ .shr, .mc, .rm16, .cl, .none, .none, 1, 0xd3, 0x00, 0x00, 5, .none },
+ .{ .shr, .mc, .rm32, .cl, .none, .none, 1, 0xd3, 0x00, 0x00, 5, .none },
+ .{ .shr, .mc, .rm64, .cl, .none, .none, 1, 0xd3, 0x00, 0x00, 5, .long },
+ .{ .shr, .mi, .rm8, .imm8, .none, .none, 1, 0xc0, 0x00, 0x00, 5, .none },
+ .{ .shr, .mi, .rm8, .imm8, .none, .none, 1, 0xc0, 0x00, 0x00, 5, .rex },
+ .{ .shr, .mi, .rm16, .imm8, .none, .none, 1, 0xc1, 0x00, 0x00, 5, .none },
+ .{ .shr, .mi, .rm32, .imm8, .none, .none, 1, 0xc1, 0x00, 0x00, 5, .none },
+ .{ .shr, .mi, .rm64, .imm8, .none, .none, 1, 0xc1, 0x00, 0x00, 5, .long },
+
+ .{ .sub, .zi, .al, .imm8, .none, .none, 1, 0x2c, 0x00, 0x00, 0, .none },
+ .{ .sub, .zi, .ax, .imm16, .none, .none, 1, 0x2d, 0x00, 0x00, 0, .none },
+ .{ .sub, .zi, .eax, .imm32, .none, .none, 1, 0x2d, 0x00, 0x00, 0, .none },
+ .{ .sub, .zi, .rax, .imm32s, .none, .none, 1, 0x2d, 0x00, 0x00, 0, .long },
+ .{ .sub, .mi, .rm8, .imm8, .none, .none, 1, 0x80, 0x00, 0x00, 5, .none },
+ .{ .sub, .mi, .rm8, .imm8, .none, .none, 1, 0x80, 0x00, 0x00, 5, .rex },
+ .{ .sub, .mi, .rm16, .imm16, .none, .none, 1, 0x81, 0x00, 0x00, 5, .none },
+ .{ .sub, .mi, .rm32, .imm32, .none, .none, 1, 0x81, 0x00, 0x00, 5, .none },
+ .{ .sub, .mi, .rm64, .imm32s, .none, .none, 1, 0x81, 0x00, 0x00, 5, .long },
+ .{ .sub, .mi, .rm16, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 5, .none },
+ .{ .sub, .mi, .rm32, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 5, .none },
+ .{ .sub, .mi, .rm64, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 5, .long },
+ .{ .sub, .mr, .rm8, .r8, .none, .none, 1, 0x28, 0x00, 0x00, 0, .none },
+ .{ .sub, .mr, .rm8, .r8, .none, .none, 1, 0x28, 0x00, 0x00, 0, .rex },
+ .{ .sub, .mr, .rm16, .r16, .none, .none, 1, 0x29, 0x00, 0x00, 0, .none },
+ .{ .sub, .mr, .rm32, .r32, .none, .none, 1, 0x29, 0x00, 0x00, 0, .none },
+ .{ .sub, .mr, .rm64, .r64, .none, .none, 1, 0x29, 0x00, 0x00, 0, .long },
+ .{ .sub, .rm, .r8, .rm8, .none, .none, 1, 0x2a, 0x00, 0x00, 0, .none },
+ .{ .sub, .rm, .r8, .rm8, .none, .none, 1, 0x2a, 0x00, 0x00, 0, .rex },
+ .{ .sub, .rm, .r16, .rm16, .none, .none, 1, 0x2b, 0x00, 0x00, 0, .none },
+ .{ .sub, .rm, .r32, .rm32, .none, .none, 1, 0x2b, 0x00, 0x00, 0, .none },
+ .{ .sub, .rm, .r64, .rm64, .none, .none, 1, 0x2b, 0x00, 0x00, 0, .long },
+
+ .{ .syscall, .np, .none, .none, .none, .none, 2, 0x0f, 0x05, 0x00, 0, .none },
+
+ .{ .@"test", .zi, .al, .imm8, .none, .none, 1, 0xa8, 0x00, 0x00, 0, .none },
+ .{ .@"test", .zi, .ax, .imm16, .none, .none, 1, 0xa9, 0x00, 0x00, 0, .none },
+ .{ .@"test", .zi, .eax, .imm32, .none, .none, 1, 0xa9, 0x00, 0x00, 0, .none },
+ .{ .@"test", .zi, .rax, .imm32s, .none, .none, 1, 0xa9, 0x00, 0x00, 0, .long },
+ .{ .@"test", .mi, .rm8, .imm8, .none, .none, 1, 0xf6, 0x00, 0x00, 0, .none },
+ .{ .@"test", .mi, .rm8, .imm8, .none, .none, 1, 0xf6, 0x00, 0x00, 0, .rex },
+ .{ .@"test", .mi, .rm16, .imm16, .none, .none, 1, 0xf7, 0x00, 0x00, 0, .none },
+ .{ .@"test", .mi, .rm32, .imm32, .none, .none, 1, 0xf7, 0x00, 0x00, 0, .none },
+ .{ .@"test", .mi, .rm64, .imm32s, .none, .none, 1, 0xf7, 0x00, 0x00, 0, .long },
+ .{ .@"test", .mr, .rm8, .r8, .none, .none, 1, 0x84, 0x00, 0x00, 0, .none },
+ .{ .@"test", .mr, .rm8, .r8, .none, .none, 1, 0x84, 0x00, 0x00, 0, .rex },
+ .{ .@"test", .mr, .rm16, .r16, .none, .none, 1, 0x85, 0x00, 0x00, 0, .none },
+ .{ .@"test", .mr, .rm32, .r32, .none, .none, 1, 0x85, 0x00, 0x00, 0, .none },
+ .{ .@"test", .mr, .rm64, .r64, .none, .none, 1, 0x85, 0x00, 0x00, 0, .long },
+
+ .{ .ud2, .np, .none, .none, .none, .none, 2, 0x0f, 0x0b, 0x00, 0, .none },
+
+ .{ .xor, .zi, .al, .imm8, .none, .none, 1, 0x34, 0x00, 0x00, 0, .none },
+ .{ .xor, .zi, .ax, .imm16, .none, .none, 1, 0x35, 0x00, 0x00, 0, .none },
+ .{ .xor, .zi, .eax, .imm32, .none, .none, 1, 0x35, 0x00, 0x00, 0, .none },
+ .{ .xor, .zi, .rax, .imm32s, .none, .none, 1, 0x35, 0x00, 0x00, 0, .long },
+ .{ .xor, .mi, .rm8, .imm8, .none, .none, 1, 0x80, 0x00, 0x00, 6, .none },
+ .{ .xor, .mi, .rm8, .imm8, .none, .none, 1, 0x80, 0x00, 0x00, 6, .rex },
+ .{ .xor, .mi, .rm16, .imm16, .none, .none, 1, 0x81, 0x00, 0x00, 6, .none },
+ .{ .xor, .mi, .rm32, .imm32, .none, .none, 1, 0x81, 0x00, 0x00, 6, .none },
+ .{ .xor, .mi, .rm64, .imm32s, .none, .none, 1, 0x81, 0x00, 0x00, 6, .long },
+ .{ .xor, .mi, .rm16, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 6, .none },
+ .{ .xor, .mi, .rm32, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 6, .none },
+ .{ .xor, .mi, .rm64, .imm8s, .none, .none, 1, 0x83, 0x00, 0x00, 6, .long },
+ .{ .xor, .mr, .rm8, .r8, .none, .none, 1, 0x30, 0x00, 0x00, 0, .none },
+ .{ .xor, .mr, .rm8, .r8, .none, .none, 1, 0x30, 0x00, 0x00, 0, .rex },
+ .{ .xor, .mr, .rm16, .r16, .none, .none, 1, 0x31, 0x00, 0x00, 0, .none },
+ .{ .xor, .mr, .rm32, .r32, .none, .none, 1, 0x31, 0x00, 0x00, 0, .none },
+ .{ .xor, .mr, .rm64, .r64, .none, .none, 1, 0x31, 0x00, 0x00, 0, .long },
+ .{ .xor, .rm, .r8, .rm8, .none, .none, 1, 0x32, 0x00, 0x00, 0, .none },
+ .{ .xor, .rm, .r8, .rm8, .none, .none, 1, 0x32, 0x00, 0x00, 0, .rex },
+ .{ .xor, .rm, .r16, .rm16, .none, .none, 1, 0x33, 0x00, 0x00, 0, .none },
+ .{ .xor, .rm, .r32, .rm32, .none, .none, 1, 0x33, 0x00, 0x00, 0, .none },
+ .{ .xor, .rm, .r64, .rm64, .none, .none, 1, 0x33, 0x00, 0x00, 0, .long },
+
+ // SSE
+ .{ .addss, .rm, .xmm, .xmm_m32, .none, .none, 3, 0xf3, 0x0f, 0x58, 0, .sse },
+
+ .{ .cmpss, .rmi, .xmm, .xmm_m32, .imm8, .none, 3, 0xf3, 0x0f, 0xc2, 0, .sse },
+
+ .{ .movss, .rm, .xmm, .xmm_m32, .none, .none, 3, 0xf3, 0x0f, 0x10, 0, .sse },
+ .{ .movss, .mr, .xmm_m32, .xmm, .none, .none, 3, 0xf3, 0x0f, 0x11, 0, .sse },
+
+ .{ .ucomiss, .rm, .xmm, .xmm_m32, .none, .none, 2, 0x0f, 0x2e, 0x00, 0, .sse },
+
+ // SSE2
+ .{ .addsd, .rm, .xmm, .xmm_m64, .none, .none, 3, 0xf2, 0x0f, 0x58, 0, .sse2 },
+
+ .{ .cmpsd, .rmi, .xmm, .xmm_m64, .imm8, .none, 3, 0xf2, 0x0f, 0xc2, 0, .sse2 },
+
+ .{ .movq, .rm, .xmm, .xmm_m64, .none, .none, 3, 0xf3, 0x0f, 0x7e, 0, .sse2 },
+ .{ .movq, .mr, .xmm_m64, .xmm, .none, .none, 3, 0x66, 0x0f, 0xd6, 0, .sse2 },
+
+ .{ .movsd, .rm, .xmm, .xmm_m64, .none, .none, 3, 0xf2, 0x0f, 0x10, 0, .sse2 },
+ .{ .movsd, .mr, .xmm_m64, .xmm, .none, .none, 3, 0xf2, 0x0f, 0x11, 0, .sse2 },
+
+ .{ .ucomisd, .rm, .xmm, .xmm_m64, .none, .none, 3, 0x66, 0x0f, 0x2e, 0, .sse2 },
+};
+// zig fmt: on
+
diff --git a/src/codegen.zig b/src/codegen.zig
index df7ceff1f0..a91795841c 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -29,11 +29,10 @@ pub const Result = union(enum) {
fail: *ErrorMsg,
};
-pub const GenerateSymbolError = error{
+pub const CodeGenError = error{
OutOfMemory,
Overflow,
- /// A Decl that this symbol depends on had a semantic analysis failure.
- AnalysisFail,
+ CodegenFail,
};
pub const DebugInfoOutput = union(enum) {
@@ -63,19 +62,6 @@ pub const DebugInfoOutput = union(enum) {
none,
};
-/// Helper struct to denote that the value is in memory but requires a linker relocation fixup:
-/// * got - the value is referenced indirectly via GOT entry index (the linker emits a got-type reloc)
-/// * direct - the value is referenced directly via symbol index index (the linker emits a displacement reloc)
-/// * import - the value is referenced indirectly via import entry index (the linker emits an import-type reloc)
-pub const LinkerLoad = struct {
- type: enum {
- got,
- direct,
- import,
- },
- sym_index: u32,
-};
-
pub fn generateFunction(
bin_file: *link.File,
src_loc: Module.SrcLoc,
@@ -84,7 +70,7 @@ pub fn generateFunction(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
switch (bin_file.options.target.cpu.arch) {
.arm,
.armeb,
@@ -120,7 +106,7 @@ pub fn generateSymbol(
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
const tracy = trace(@src());
defer tracy.end();
@@ -823,7 +809,7 @@ fn lowerDeclRef(
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
-) GenerateSymbolError!Result {
+) CodeGenError!Result {
const target = bin_file.options.target;
const module = bin_file.options.module.?;
if (typed_value.ty.isSlice()) {
@@ -880,6 +866,288 @@ fn lowerDeclRef(
return Result.ok;
}
+/// Helper struct to denote that the value is in memory but requires a linker relocation fixup:
+/// * got - the value is referenced indirectly via GOT entry index (the linker emits a got-type reloc)
+/// * direct - the value is referenced directly via symbol index index (the linker emits a displacement reloc)
+/// * import - the value is referenced indirectly via import entry index (the linker emits an import-type reloc)
+pub const LinkerLoad = struct {
+ type: enum {
+ got,
+ direct,
+ import,
+ },
+ sym_index: u32,
+};
+
+pub const GenResult = union(enum) {
+ mcv: MCValue,
+ fail: *ErrorMsg,
+
+ const MCValue = union(enum) {
+ none,
+ undef,
+ /// The bit-width of the immediate may be smaller than `u64`. For example, on 32-bit targets
+ /// such as ARM, the immediate will never exceed 32-bits.
+ immediate: u64,
+ linker_load: LinkerLoad,
+ /// Direct by-address reference to memory location.
+ memory: u64,
+ };
+
+ fn mcv(val: MCValue) GenResult {
+ return .{ .mcv = val };
+ }
+
+ fn fail(
+ gpa: Allocator,
+ src_loc: Module.SrcLoc,
+ comptime format: []const u8,
+ args: anytype,
+ ) Allocator.Error!GenResult {
+ const msg = try ErrorMsg.create(gpa, src_loc, format, args);
+ return .{ .fail = msg };
+ }
+};
+
+fn genDeclRef(
+ bin_file: *link.File,
+ src_loc: Module.SrcLoc,
+ tv: TypedValue,
+ decl_index: Module.Decl.Index,
+) CodeGenError!GenResult {
+ log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
+
+ const target = bin_file.options.target;
+ const ptr_bits = target.cpu.arch.ptrBitWidth();
+ const ptr_bytes: u64 = @divExact(ptr_bits, 8);
+
+ const module = bin_file.options.module.?;
+ const decl = module.declPtr(decl_index);
+
+ if (decl.ty.zigTypeTag() != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime()) {
+ const imm: u64 = switch (ptr_bytes) {
+ 1 => 0xaa,
+ 2 => 0xaaaa,
+ 4 => 0xaaaaaaaa,
+ 8 => 0xaaaaaaaaaaaaaaaa,
+ else => unreachable,
+ };
+ return GenResult.mcv(.{ .immediate = imm });
+ }
+
+ // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
+ if (tv.ty.zigTypeTag() == .Pointer) blk: {
+ if (tv.ty.castPtrToFn()) |_| break :blk;
+ if (!tv.ty.elemType2().hasRuntimeBits()) {
+ return GenResult.mcv(.none);
+ }
+ }
+
+ module.markDeclAlive(decl);
+
+ if (bin_file.cast(link.File.Elf)) |elf_file| {
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return GenResult.mcv(.{ .memory = atom.getOffsetTableAddress(elf_file) });
+ } else if (bin_file.cast(link.File.MachO)) |macho_file| {
+ const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
+ return GenResult.mcv(.{ .linker_load = .{
+ .type = .got,
+ .sym_index = sym_index,
+ } });
+ } else if (bin_file.cast(link.File.Coff)) |coff_file| {
+ const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
+ return GenResult.mcv(.{ .linker_load = .{
+ .type = .got,
+ .sym_index = sym_index,
+ } });
+ } else if (bin_file.cast(link.File.Plan9)) |p9| {
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
+ return GenResult.mcv(.{ .memory = got_addr });
+ } else {
+ return GenResult.fail(bin_file.allocator, src_loc, "TODO genDeclRef for target {}", .{target});
+ }
+}
+
+fn genUnnamedConst(
+ bin_file: *link.File,
+ src_loc: Module.SrcLoc,
+ tv: TypedValue,
+ owner_decl_index: Module.Decl.Index,
+) CodeGenError!GenResult {
+ log.debug("genUnnamedConst: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
+
+ const target = bin_file.options.target;
+ const local_sym_index = bin_file.lowerUnnamedConst(tv, owner_decl_index) catch |err| {
+ return GenResult.fail(bin_file.allocator, src_loc, "lowering unnamed constant failed: {s}", .{@errorName(err)});
+ };
+ if (bin_file.cast(link.File.Elf)) |elf_file| {
+ return GenResult.mcv(.{ .memory = elf_file.getSymbol(local_sym_index).st_value });
+ } else if (bin_file.cast(link.File.MachO)) |_| {
+ return GenResult.mcv(.{ .linker_load = .{
+ .type = .direct,
+ .sym_index = local_sym_index,
+ } });
+ } else if (bin_file.cast(link.File.Coff)) |_| {
+ return GenResult.mcv(.{ .linker_load = .{
+ .type = .direct,
+ .sym_index = local_sym_index,
+ } });
+ } else if (bin_file.cast(link.File.Plan9)) |p9| {
+ const ptr_bits = target.cpu.arch.ptrBitWidth();
+ const ptr_bytes: u64 = @divExact(ptr_bits, 8);
+ const got_index = local_sym_index; // the plan9 backend returns the got_index
+ const got_addr = p9.bases.data + got_index * ptr_bytes;
+ return GenResult.mcv(.{ .memory = got_addr });
+ } else {
+ return GenResult.fail(bin_file.allocator, src_loc, "TODO genUnnamedConst for target {}", .{target});
+ }
+}
+
+pub fn genTypedValue(
+ bin_file: *link.File,
+ src_loc: Module.SrcLoc,
+ arg_tv: TypedValue,
+ owner_decl_index: Module.Decl.Index,
+) CodeGenError!GenResult {
+ var typed_value = arg_tv;
+ if (typed_value.val.castTag(.runtime_value)) |rt| {
+ typed_value.val = rt.data;
+ }
+
+ log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() });
+
+ if (typed_value.val.isUndef())
+ return GenResult.mcv(.undef);
+
+ const target = bin_file.options.target;
+ const ptr_bits = target.cpu.arch.ptrBitWidth();
+
+ if (typed_value.val.castTag(.decl_ref)) |payload| {
+ return genDeclRef(bin_file, src_loc, typed_value, payload.data);
+ }
+ if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
+ return genDeclRef(bin_file, src_loc, typed_value, payload.data.decl_index);
+ }
+
+ switch (typed_value.ty.zigTypeTag()) {
+ .Void => return GenResult.mcv(.none),
+ .Pointer => switch (typed_value.ty.ptrSize()) {
+ .Slice => {},
+ else => {
+ switch (typed_value.val.tag()) {
+ .int_u64 => {
+ return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(target) });
+ },
+ else => {},
+ }
+ },
+ },
+ .Int => {
+ const info = typed_value.ty.intInfo(target);
+ if (info.bits <= ptr_bits) {
+ const unsigned = switch (info.signedness) {
+ .signed => @bitCast(u64, typed_value.val.toSignedInt(target)),
+ .unsigned => typed_value.val.toUnsignedInt(target),
+ };
+ return GenResult.mcv(.{ .immediate = unsigned });
+ }
+ },
+ .Bool => {
+ return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool()) });
+ },
+ .Optional => {
+ if (typed_value.ty.isPtrLikeOptional()) {
+ if (typed_value.val.isNull())
+ return GenResult.mcv(.{ .immediate = 0 });
+
+ var buf: Type.Payload.ElemType = undefined;
+ return genTypedValue(bin_file, src_loc, .{
+ .ty = typed_value.ty.optionalChild(&buf),
+ .val = typed_value.val,
+ }, owner_decl_index);
+ } else if (typed_value.ty.abiSize(target) == 1) {
+ return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull()) });
+ }
+ },
+ .Enum => {
+ if (typed_value.val.castTag(.enum_field_index)) |field_index| {
+ switch (typed_value.ty.tag()) {
+ .enum_simple => {
+ return GenResult.mcv(.{ .immediate = field_index.data });
+ },
+ .enum_full, .enum_nonexhaustive => {
+ const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data;
+ if (enum_full.values.count() != 0) {
+ const tag_val = enum_full.values.keys()[field_index.data];
+ return genTypedValue(bin_file, src_loc, .{
+ .ty = enum_full.tag_ty,
+ .val = tag_val,
+ }, owner_decl_index);
+ } else {
+ return GenResult.mcv(.{ .immediate = field_index.data });
+ }
+ },
+ else => unreachable,
+ }
+ } else {
+ var int_tag_buffer: Type.Payload.Bits = undefined;
+ const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
+ return genTypedValue(bin_file, src_loc, .{
+ .ty = int_tag_ty,
+ .val = typed_value.val,
+ }, owner_decl_index);
+ }
+ },
+ .ErrorSet => {
+ switch (typed_value.val.tag()) {
+ .@"error" => {
+ const err_name = typed_value.val.castTag(.@"error").?.data.name;
+ const module = bin_file.options.module.?;
+ const global_error_set = module.global_error_set;
+ const error_index = global_error_set.get(err_name).?;
+ return GenResult.mcv(.{ .immediate = error_index });
+ },
+ else => {
+ // In this case we are rendering an error union which has a 0 bits payload.
+ return GenResult.mcv(.{ .immediate = 0 });
+ },
+ }
+ },
+ .ErrorUnion => {
+ const error_type = typed_value.ty.errorUnionSet();
+ const payload_type = typed_value.ty.errorUnionPayload();
+ const is_pl = typed_value.val.errorUnionIsPayload();
+
+ if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
+ // We use the error type directly as the type.
+ const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero);
+ return genTypedValue(bin_file, src_loc, .{
+ .ty = error_type,
+ .val = err_val,
+ }, owner_decl_index);
+ }
+ },
+
+ .ComptimeInt => unreachable,
+ .ComptimeFloat => unreachable,
+ .Type => unreachable,
+ .EnumLiteral => unreachable,
+ .NoReturn => unreachable,
+ .Undefined => unreachable,
+ .Null => unreachable,
+ .Opaque => unreachable,
+
+ else => {},
+ }
+
+ return genUnnamedConst(bin_file, src_loc, typed_value, owner_decl_index);
+}
+
pub fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u64 {
const payload_align = payload_ty.abiAlignment(target);
const error_align = Type.anyerror.abiAlignment(target);
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index cf428d4bd6..519b2b45d5 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -17,12 +17,6 @@ const LazySrcLoc = Module.LazySrcLoc;
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
-const target_util = @import("../target.zig");
-const libcFloatPrefix = target_util.libcFloatPrefix;
-const libcFloatSuffix = target_util.libcFloatSuffix;
-const compilerRtFloatAbbrev = target_util.compilerRtFloatAbbrev;
-const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev;
-
const BigIntLimb = std.math.big.Limb;
const BigInt = std.math.big.int;
@@ -112,11 +106,7 @@ const ValueRenderLocation = enum {
}
};
-const BuiltinInfo = enum {
- None,
- Range,
- Bits,
-};
+const BuiltinInfo = enum { none, bits };
const reserved_idents = std.ComptimeStringMap(void, .{
// C language
@@ -440,16 +430,24 @@ pub const Function = struct {
return f.object.dg.typeToCType(ty, kind);
}
+ fn byteSize(f: *Function, cty: CType) u64 {
+ return f.object.dg.byteSize(cty);
+ }
+
fn renderType(f: *Function, w: anytype, t: Type) !void {
return f.object.dg.renderType(w, t);
}
- fn renderIntCast(f: *Function, w: anytype, dest_ty: Type, src: CValue, src_ty: Type, location: ValueRenderLocation) !void {
- return f.object.dg.renderIntCast(w, dest_ty, .{ .c_value = .{ .f = f, .value = src } }, src_ty, location);
+ fn renderCType(f: *Function, w: anytype, t: CType.Index) !void {
+ return f.object.dg.renderCType(w, t);
+ }
+
+ fn renderIntCast(f: *Function, w: anytype, dest_ty: Type, src: CValue, v: Vectorizer, src_ty: Type, location: ValueRenderLocation) !void {
+ return f.object.dg.renderIntCast(w, dest_ty, .{ .c_value = .{ .f = f, .value = src, .v = v } }, src_ty, location);
}
fn fmtIntLiteral(f: *Function, ty: Type, val: Value) !std.fmt.Formatter(formatIntLiteral) {
- return f.object.dg.fmtIntLiteral(ty, val);
+ return f.object.dg.fmtIntLiteral(ty, val, .Other);
}
fn getLazyFnName(f: *Function, key: LazyFnKey, data: LazyFnValue.Data) ![]const u8 {
@@ -574,9 +572,9 @@ pub const DeclGen = struct {
const len_val = Value.initPayload(&len_pl.base);
if (location == .StaticInitializer) {
- return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val)});
+ return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)});
} else {
- return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val)});
+ return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)});
}
}
@@ -606,7 +604,7 @@ pub const DeclGen = struct {
try writer.writeByte(')');
}
switch (ptr_val.tag()) {
- .int_u64, .one => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val)}),
+ .int_u64, .one => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val, .Other)}),
.decl_ref_mut, .decl_ref, .variable => {
const decl_index = switch (ptr_val.tag()) {
.decl_ref => ptr_val.castTag(.decl_ref).?.data,
@@ -670,7 +668,9 @@ pub const DeclGen = struct {
container_ptr_ty,
location,
);
- try writer.print(" + {})", .{try dg.fmtIntLiteral(Type.usize, byte_offset_val)});
+ try writer.print(" + {})", .{
+ try dg.fmtIntLiteral(Type.usize, byte_offset_val, .Other),
+ });
},
.end => {
try writer.writeAll("((");
@@ -680,7 +680,9 @@ pub const DeclGen = struct {
container_ptr_ty,
location,
);
- try writer.print(") + {})", .{try dg.fmtIntLiteral(Type.usize, Value.one)});
+ try writer.print(") + {})", .{
+ try dg.fmtIntLiteral(Type.usize, Value.one, .Other),
+ });
},
}
},
@@ -746,7 +748,7 @@ pub const DeclGen = struct {
return writer.writeAll("false");
}
},
- .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteralLoc(ty, val, location)}),
+ .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, val, location)}),
.Float => {
const bits = ty.floatBits(target);
var int_pl = Type.Payload.Bits{ .base = .{ .tag = .int_signed }, .data = bits };
@@ -780,11 +782,11 @@ pub const DeclGen = struct {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = ty.slicePtrFieldType(&buf);
try dg.renderType(writer, ptr_ty);
- return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val)});
+ return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
} else {
try writer.writeAll("((");
try dg.renderType(writer, ty);
- return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val)});
+ return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
},
.Optional => {
var opt_buf: Type.Payload.ElemType = undefined;
@@ -831,7 +833,7 @@ pub const DeclGen = struct {
return writer.writeByte('}');
},
- .Packed => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, Value.undef)}),
+ .Packed => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, Value.undef, .Other)}),
},
.Union => {
if (!location.isInitializer()) {
@@ -854,7 +856,7 @@ pub const DeclGen = struct {
if (!field.ty.hasRuntimeBits()) continue;
try dg.renderValue(writer, field.ty, val, initializer_type);
break;
- } else try writer.print("{x}", .{try dg.fmtIntLiteral(Type.u8, Value.undef)});
+ } else try writer.print("{x}", .{try dg.fmtIntLiteral(Type.u8, Value.undef, .Other)});
if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}');
return writer.writeByte('}');
},
@@ -868,7 +870,7 @@ pub const DeclGen = struct {
try writer.writeAll("{ .payload = ");
try dg.renderValue(writer, ty.errorUnionPayload(), val, initializer_type);
return writer.print(", .error = {x} }}", .{
- try dg.fmtIntLiteral(ty.errorUnionSet(), val),
+ try dg.fmtIntLiteral(ty.errorUnionSet(), val, .Other),
});
},
.Array, .Vector => {
@@ -927,7 +929,7 @@ pub const DeclGen = struct {
.decl_ref_mut,
.decl_ref,
=> try dg.renderParentPtr(writer, val, ty, location),
- else => try writer.print("{}", .{try dg.fmtIntLiteralLoc(ty, val, location)}),
+ else => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}),
},
.Float => {
const bits = ty.floatBits(target);
@@ -999,8 +1001,9 @@ pub const DeclGen = struct {
// return dg.fail("Only quiet nans are supported in global variable initializers", .{});
}
- try writer.writeAll("zig_make_special_");
- if (location == .StaticInitializer) try writer.writeAll("constant_");
+ try writer.writeAll("zig_");
+ try writer.writeAll(if (location == .StaticInitializer) "init" else "make");
+ try writer.writeAll("_special_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
if (std.math.signbit(f128_val)) try writer.writeByte('-');
@@ -1020,7 +1023,7 @@ pub const DeclGen = struct {
try writer.writeAll(", ");
empty = false;
}
- try writer.print("{x}", .{try dg.fmtIntLiteralLoc(int_ty, int_val, location)});
+ try writer.print("{x}", .{try dg.fmtIntLiteral(int_ty, int_val, location)});
if (!empty) try writer.writeByte(')');
return;
},
@@ -1069,7 +1072,7 @@ pub const DeclGen = struct {
.int_u64, .one => {
try writer.writeAll("((");
try dg.renderType(writer, ty);
- return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val)});
+ return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
},
.field_ptr,
.elem_ptr,
@@ -1561,6 +1564,10 @@ pub const DeclGen = struct {
return dg.ctypes.typeToCType(dg.gpa, ty, dg.module, kind);
}
+ fn byteSize(dg: *DeclGen, cty: CType) u64 {
+ return cty.byteSize(dg.ctypes.set, dg.module.getTarget());
+ }
+
/// Renders a type as a single identifier, generating intermediate typedefs
/// if necessary.
///
@@ -1573,9 +1580,12 @@ pub const DeclGen = struct {
/// | `renderType` | "uint8_t *" | "uint8_t *[10]" |
///
fn renderType(dg: *DeclGen, w: anytype, t: Type) error{ OutOfMemory, AnalysisFail }!void {
+ try dg.renderCType(w, try dg.typeToIndex(t, .complete));
+ }
+
+ fn renderCType(dg: *DeclGen, w: anytype, idx: CType.Index) error{ OutOfMemory, AnalysisFail }!void {
const store = &dg.ctypes.set;
const module = dg.module;
- const idx = try dg.typeToIndex(t, .complete);
_ = try renderTypePrefix(dg.decl_index, store.*, module, w, idx, .suffix, .{});
try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix, .{});
}
@@ -1584,6 +1594,7 @@ pub const DeclGen = struct {
c_value: struct {
f: *Function,
value: CValue,
+ v: Vectorizer,
},
value: struct {
value: Value,
@@ -1593,6 +1604,7 @@ pub const DeclGen = struct {
switch (self.*) {
.c_value => |v| {
try v.f.writeCValue(w, v.value, location);
+ try v.v.elem(v.f, w);
},
.value => |v| {
try dg.renderValue(w, value_ty, v.value, location);
@@ -1829,111 +1841,93 @@ pub const DeclGen = struct {
dg.module.markDeclAlive(decl);
if (dg.module.decl_exports.get(decl_index)) |exports| {
- return writer.writeAll(exports.items[export_index].options.name);
+ try writer.writeAll(exports.items[export_index].options.name);
} else if (decl.isExtern()) {
- return writer.writeAll(mem.sliceTo(decl.name, 0));
- } else if (dg.module.test_functions.get(decl_index)) |_| {
- const gpa = dg.gpa;
- const name = try decl.getFullyQualifiedName(dg.module);
- defer gpa.free(name);
- return writer.print("{}_{d}", .{ fmtIdent(name), @enumToInt(decl_index) });
+ try writer.writeAll(mem.sliceTo(decl.name, 0));
} else {
- const gpa = dg.gpa;
- const name = try decl.getFullyQualifiedName(dg.module);
- defer gpa.free(name);
-
- // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), expand
- // to 3x the length of its input
- if (name.len > 1365) {
- var hash = ident_hasher_init;
- hash.update(name);
- const ident_hash = hash.finalInt();
- try writer.writeAll("zig_D_");
- return std.fmt.formatIntValue(ident_hash, "x", .{}, writer);
- } else {
- return writer.print("{}", .{fmtIdent(name)});
- }
+ // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case),
+ // expand to 3x the length of its input, but let's cut it off at a much shorter limit.
+ var name: [100]u8 = undefined;
+ var name_stream = std.io.fixedBufferStream(&name);
+ decl.renderFullyQualifiedName(dg.module, name_stream.writer()) catch |err| switch (err) {
+ error.NoSpaceLeft => {},
+ };
+ try writer.print("{}__{d}", .{
+ fmtIdent(name_stream.getWritten()),
+ @enumToInt(decl_index),
+ });
}
}
fn renderTypeForBuiltinFnName(dg: *DeclGen, writer: anytype, ty: Type) !void {
- const target = dg.module.getTarget();
- if (ty.isAbiInt()) {
- const int_info = ty.intInfo(target);
- const c_bits = toCIntBits(int_info.bits) orelse
- return dg.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
- try writer.print("{c}{d}", .{ signAbbrev(int_info.signedness), c_bits });
- } else if (ty.isRuntimeFloat()) {
- try ty.print(writer, dg.module);
- } else if (ty.isPtrAtRuntime()) {
- try writer.print("p{d}", .{ty.bitSize(target)});
- } else if (ty.zigTypeTag() == .Bool) {
- try writer.print("u8", .{});
- } else return dg.fail("TODO: CBE: implement renderTypeForBuiltinFnName for type {}", .{
- ty.fmt(dg.module),
- });
+ try dg.renderCTypeForBuiltinFnName(writer, try dg.typeToCType(ty, .complete));
+ }
+
+ fn renderCTypeForBuiltinFnName(dg: *DeclGen, writer: anytype, cty: CType) !void {
+ switch (cty.tag()) {
+ else => try writer.print("{c}{d}", .{
+ if (cty.isBool())
+ signAbbrev(.unsigned)
+ else if (cty.isInteger())
+ signAbbrev(cty.signedness() orelse .unsigned)
+ else if (cty.isFloat())
+ @as(u8, 'f')
+ else if (cty.isPointer())
+ @as(u8, 'p')
+ else
+ return dg.fail("TODO: CBE: implement renderTypeForBuiltinFnName for type {}", .{
+ cty.tag(),
+ }),
+ if (cty.isFloat()) cty.floatActiveBits(dg.module.getTarget()) else dg.byteSize(cty) * 8,
+ }),
+ .array => try writer.writeAll("big"),
+ }
}
fn renderBuiltinInfo(dg: *DeclGen, writer: anytype, ty: Type, info: BuiltinInfo) !void {
- const target = dg.module.getTarget();
+ const cty = try dg.typeToCType(ty, .complete);
+ const is_big = cty.tag() == .array;
+
switch (info) {
- .None => {},
- .Range => {
- var arena = std.heap.ArenaAllocator.init(dg.gpa);
- defer arena.deinit();
-
- const ExpectedContents = union { u: Value.Payload.U64, i: Value.Payload.I64 };
- var stack align(@alignOf(ExpectedContents)) =
- std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator());
-
- const int_info = ty.intInfo(target);
- if (int_info.signedness == .signed) {
- const min_val = try ty.minInt(stack.get(), target);
- try writer.print(", {x}", .{try dg.fmtIntLiteral(ty, min_val)});
- }
-
- const max_val = try ty.maxInt(stack.get(), target);
- try writer.print(", {x}", .{try dg.fmtIntLiteral(ty, max_val)});
- },
- .Bits => {
- var bits_pl = Value.Payload.U64{
- .base = .{ .tag = .int_u64 },
- .data = ty.bitSize(target),
- };
- const bits_val = Value.initPayload(&bits_pl.base);
- try writer.print(", {}", .{try dg.fmtIntLiteral(Type.u8, bits_val)});
- },
+ .none => if (!is_big) return,
+ .bits => {},
}
+
+ const target = dg.module.getTarget();
+ const int_info = if (ty.isAbiInt()) ty.intInfo(target) else std.builtin.Type.Int{
+ .signedness = .unsigned,
+ .bits = @intCast(u16, ty.bitSize(target)),
+ };
+
+ if (is_big) try writer.print(", {}", .{int_info.signedness == .signed});
+
+ var bits_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = int_info.bits };
+ try writer.print(", {}", .{try dg.fmtIntLiteral(
+ if (is_big) Type.u16 else Type.u8,
+ Value.initPayload(&bits_pl.base),
+ .FunctionArgument,
+ )});
}
fn fmtIntLiteral(
dg: *DeclGen,
ty: Type,
val: Value,
+ loc: ValueRenderLocation,
) !std.fmt.Formatter(formatIntLiteral) {
- const int_info = ty.intInfo(dg.module.getTarget());
- const c_bits = toCIntBits(int_info.bits);
- if (c_bits == null or c_bits.? > 128)
- return dg.fail("TODO implement integer constants larger than 128 bits", .{});
+ const kind: CType.Kind = switch (loc) {
+ .FunctionArgument => .parameter,
+ .Initializer, .Other => .complete,
+ .StaticInitializer => .global,
+ };
return std.fmt.Formatter(formatIntLiteral){ .data = .{
- .ty = ty,
+ .dg = dg,
+ .int_info = ty.intInfo(dg.module.getTarget()),
+ .kind = kind,
+ .cty = try dg.typeToCType(ty, kind),
.val = val,
- .mod = dg.module,
} };
}
-
- fn fmtIntLiteralLoc(
- dg: *DeclGen,
- ty: Type,
- val: Value,
- location: ValueRenderLocation, // TODO: Instead add this as optional arg to fmtIntLiteral
- ) !std.fmt.Formatter(formatIntLiteral) {
- const int_info = ty.intInfo(dg.module.getTarget());
- const c_bits = toCIntBits(int_info.bits);
- if (c_bits == null or c_bits.? > 128)
- return dg.fail("TODO implement integer constants larger than 128 bits", .{});
- return std.fmt.Formatter(formatIntLiteral){ .data = .{ .ty = ty, .val = val, .mod = dg.module, .location = location } };
- }
};
const CTypeFix = enum { prefix, suffix };
@@ -2450,7 +2444,7 @@ pub fn genErrDecls(o: *Object) !void {
const len_val = Value.initPayload(&len_pl.base);
try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{
- fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val),
+ fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val, .Other),
});
}
try writer.writeAll("};\n");
@@ -2501,7 +2495,10 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
var int_pl: Value.Payload.U64 = undefined;
const int_val = tag_val.enumToInt(enum_ty, &int_pl);
- var name_ty_pl = Type.Payload.Len{ .base = .{ .tag = .array_u8_sentinel_0 }, .data = name.len };
+ var name_ty_pl = Type.Payload.Len{
+ .base = .{ .tag = .array_u8_sentinel_0 },
+ .data = name.len,
+ };
const name_ty = Type.initPayload(&name_ty_pl.base);
var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name };
@@ -2510,14 +2507,16 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len };
const len_val = Value.initPayload(&len_pl.base);
- try w.print(" case {}: {{\n static ", .{try o.dg.fmtIntLiteral(enum_ty, int_val)});
+ try w.print(" case {}: {{\n static ", .{
+ try o.dg.fmtIntLiteral(enum_ty, int_val, .Other),
+ });
try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, 0, .complete);
try w.writeAll(" = ");
try o.dg.renderValue(w, name_ty, name_val, .Initializer);
try w.writeAll(";\n return (");
try o.dg.renderType(w, name_slice_ty);
try w.print("){{{}, {}}};\n", .{
- fmtIdent("name"), try o.dg.fmtIntLiteral(Type.usize, len_val),
+ fmtIdent("name"), try o.dg.fmtIntLiteral(Type.usize, len_val, .Other),
});
try w.writeAll(" }\n");
@@ -2535,7 +2534,12 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
const fwd_decl_writer = o.dg.fwd_decl.writer();
try fwd_decl_writer.print("static zig_{s} ", .{@tagName(key)});
- try o.dg.renderFunctionSignature(fwd_decl_writer, fn_decl_index, .forward, .{ .string = fn_name });
+ try o.dg.renderFunctionSignature(
+ fwd_decl_writer,
+ fn_decl_index,
+ .forward,
+ .{ .string = fn_name },
+ );
try fwd_decl_writer.writeAll(";\n");
try w.print("static zig_{s} ", .{@tagName(key)});
@@ -2741,6 +2745,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.const_ty => unreachable, // excluded from function bodies
.arg => try airArg(f, inst),
+ .trap => try airTrap(f.object.writer()),
.breakpoint => try airBreakpoint(f.object.writer()),
.ret_addr => try airRetAddr(f, inst),
.frame_addr => try airFrameAddress(f, inst),
@@ -2752,35 +2757,35 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
// TODO use a different strategy for add, sub, mul, div
// that communicates to the optimizer that wrapping is UB.
- .add => try airBinOp(f, inst, "+", "add", .None),
- .sub => try airBinOp(f, inst, "-", "sub", .None),
- .mul => try airBinOp(f, inst, "*", "mul", .None),
+ .add => try airBinOp(f, inst, "+", "add", .none),
+ .sub => try airBinOp(f, inst, "-", "sub", .none),
+ .mul => try airBinOp(f, inst, "*", "mul", .none),
.neg => try airFloatNeg(f, inst),
- .div_float => try airBinBuiltinCall(f, inst, "div", .None),
+ .div_float => try airBinBuiltinCall(f, inst, "div", .none),
- .div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .None),
+ .div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .none),
.rem => blk: {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const lhs_ty = f.air.typeOf(bin_op.lhs);
+ const lhs_scalar_ty = f.air.typeOf(bin_op.lhs).scalarType();
// For binary operations @TypeOf(lhs)==@TypeOf(rhs),
// so we only check one.
- break :blk if (lhs_ty.isInt())
- try airBinOp(f, inst, "%", "rem", .None)
+ break :blk if (lhs_scalar_ty.isInt())
+ try airBinOp(f, inst, "%", "rem", .none)
else
try airBinFloatOp(f, inst, "fmod");
},
- .div_floor => try airBinBuiltinCall(f, inst, "div_floor", .None),
- .mod => try airBinBuiltinCall(f, inst, "mod", .None),
+ .div_floor => try airBinBuiltinCall(f, inst, "div_floor", .none),
+ .mod => try airBinBuiltinCall(f, inst, "mod", .none),
- .addwrap => try airBinBuiltinCall(f, inst, "addw", .Bits),
- .subwrap => try airBinBuiltinCall(f, inst, "subw", .Bits),
- .mulwrap => try airBinBuiltinCall(f, inst, "mulw", .Bits),
+ .addwrap => try airBinBuiltinCall(f, inst, "addw", .bits),
+ .subwrap => try airBinBuiltinCall(f, inst, "subw", .bits),
+ .mulwrap => try airBinBuiltinCall(f, inst, "mulw", .bits),
- .add_sat => try airBinBuiltinCall(f, inst, "adds", .Bits),
- .sub_sat => try airBinBuiltinCall(f, inst, "subs", .Bits),
- .mul_sat => try airBinBuiltinCall(f, inst, "muls", .Bits),
- .shl_sat => try airBinBuiltinCall(f, inst, "shls", .Bits),
+ .add_sat => try airBinBuiltinCall(f, inst, "adds", .bits),
+ .sub_sat => try airBinBuiltinCall(f, inst, "subs", .bits),
+ .mul_sat => try airBinBuiltinCall(f, inst, "muls", .bits),
+ .shl_sat => try airBinBuiltinCall(f, inst, "shls", .bits),
.sqrt => try airUnFloatOp(f, inst, "sqrt"),
.sin => try airUnFloatOp(f, inst, "sin"),
@@ -2799,34 +2804,38 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.mul_add => try airMulAdd(f, inst),
- .add_with_overflow => try airOverflow(f, inst, "add", .Bits),
- .sub_with_overflow => try airOverflow(f, inst, "sub", .Bits),
- .mul_with_overflow => try airOverflow(f, inst, "mul", .Bits),
- .shl_with_overflow => try airOverflow(f, inst, "shl", .Bits),
+ .add_with_overflow => try airOverflow(f, inst, "add", .bits),
+ .sub_with_overflow => try airOverflow(f, inst, "sub", .bits),
+ .mul_with_overflow => try airOverflow(f, inst, "mul", .bits),
+ .shl_with_overflow => try airOverflow(f, inst, "shl", .bits),
.min => try airMinMax(f, inst, '<', "fmin"),
.max => try airMinMax(f, inst, '>', "fmax"),
.slice => try airSlice(f, inst),
- .cmp_gt => try airCmpOp(f, inst, ">", "gt"),
- .cmp_gte => try airCmpOp(f, inst, ">=", "ge"),
- .cmp_lt => try airCmpOp(f, inst, "<", "lt"),
- .cmp_lte => try airCmpOp(f, inst, "<=", "le"),
+ .cmp_gt => try airCmpOp(f, inst, f.air.instructions.items(.data)[inst].bin_op, .gt),
+ .cmp_gte => try airCmpOp(f, inst, f.air.instructions.items(.data)[inst].bin_op, .gte),
+ .cmp_lt => try airCmpOp(f, inst, f.air.instructions.items(.data)[inst].bin_op, .lt),
+ .cmp_lte => try airCmpOp(f, inst, f.air.instructions.items(.data)[inst].bin_op, .lte),
- .cmp_eq => try airEquality(f, inst, "((", "==", "eq"),
- .cmp_neq => try airEquality(f, inst, "!((", "!=", "ne"),
+ .cmp_eq => try airEquality(f, inst, .eq),
+ .cmp_neq => try airEquality(f, inst, .neq),
- .cmp_vector => return f.fail("TODO: C backend: implement cmp_vector", .{}),
+ .cmp_vector => blk: {
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const extra = f.air.extraData(Air.VectorCmp, ty_pl.payload).data;
+ break :blk try airCmpOp(f, inst, extra, extra.compareOperator());
+ },
.cmp_lt_errors_len => try airCmpLtErrorsLen(f, inst),
// bool_and and bool_or are non-short-circuit operations
- .bool_and, .bit_and => try airBinOp(f, inst, "&", "and", .None),
- .bool_or, .bit_or => try airBinOp(f, inst, "|", "or", .None),
- .xor => try airBinOp(f, inst, "^", "xor", .None),
- .shr, .shr_exact => try airBinBuiltinCall(f, inst, "shr", .None),
- .shl, => try airBinBuiltinCall(f, inst, "shlw", .Bits),
- .shl_exact => try airBinOp(f, inst, "<<", "shl", .None),
+ .bool_and, .bit_and => try airBinOp(f, inst, "&", "and", .none),
+ .bool_or, .bit_or => try airBinOp(f, inst, "|", "or", .none),
+ .xor => try airBinOp(f, inst, "^", "xor", .none),
+ .shr, .shr_exact => try airBinBuiltinCall(f, inst, "shr", .none),
+ .shl, => try airBinBuiltinCall(f, inst, "shlw", .bits),
+ .shl_exact => try airBinOp(f, inst, "<<", "shl", .none),
.not => try airNot (f, inst),
.optional_payload => try airOptionalPayload(f, inst),
@@ -2871,11 +2880,11 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.memcpy => try airMemcpy(f, inst),
.set_union_tag => try airSetUnionTag(f, inst),
.get_union_tag => try airGetUnionTag(f, inst),
- .clz => try airUnBuiltinCall(f, inst, "clz", .Bits),
- .ctz => try airUnBuiltinCall(f, inst, "ctz", .Bits),
- .popcount => try airUnBuiltinCall(f, inst, "popcount", .Bits),
- .byte_swap => try airUnBuiltinCall(f, inst, "byte_swap", .Bits),
- .bit_reverse => try airUnBuiltinCall(f, inst, "bit_reverse", .Bits),
+ .clz => try airUnBuiltinCall(f, inst, "clz", .bits),
+ .ctz => try airUnBuiltinCall(f, inst, "ctz", .bits),
+ .popcount => try airUnBuiltinCall(f, inst, "popcount", .bits),
+ .byte_swap => try airUnBuiltinCall(f, inst, "byte_swap", .bits),
+ .bit_reverse => try airUnBuiltinCall(f, inst, "bit_reverse", .bits),
.tag_name => try airTagName(f, inst),
.error_name => try airErrorName(f, inst),
.splat => try airSplat(f, inst),
@@ -3266,7 +3275,10 @@ fn airArg(f: *Function, inst: Air.Inst.Index) !CValue {
fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- const ptr_info = f.air.typeOf(ty_op.operand).ptrInfo().data;
+
+ const ptr_ty = f.air.typeOf(ty_op.operand);
+ const ptr_scalar_ty = ptr_ty.scalarType();
+ const ptr_info = ptr_scalar_ty.ptrInfo().data;
const src_ty = ptr_info.pointee_type;
if (!src_ty.hasRuntimeBitsIgnoreComptime() or
@@ -3284,20 +3296,23 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const is_aligned = ptr_info.@"align" == 0 or ptr_info.@"align" >= src_ty.abiAlignment(target);
const is_array = lowersToArray(src_ty, target);
const need_memcpy = !is_aligned or is_array;
- const writer = f.object.writer();
+ const writer = f.object.writer();
const local = try f.allocLocal(inst, src_ty);
+ const v = try Vectorizer.start(f, inst, writer, ptr_ty);
if (need_memcpy) {
try writer.writeAll("memcpy(");
if (!is_array) try writer.writeByte('&');
- try f.writeCValue(writer, local, .FunctionArgument);
+ try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(", (const char *)");
try f.writeCValue(writer, operand, .Other);
+ try v.elem(f, writer);
try writer.writeAll(", sizeof(");
try f.renderType(writer, src_ty);
try writer.writeAll("))");
- } else if (ptr_info.host_size != 0) {
+ } else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) {
var host_pl = Type.Payload.Bits{
.base = .{ .tag = .int_unsigned },
.data = ptr_info.host_size * 8,
@@ -3323,6 +3338,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const field_ty = Type.initPayload(&field_pl.base);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = (");
try f.renderType(writer, src_ty);
try writer.writeAll(")zig_wrap_");
@@ -3341,16 +3357,21 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
try f.writeCValueDeref(writer, operand);
+ try v.elem(f, writer);
try writer.print(", {})", .{try f.fmtIntLiteral(bit_offset_ty, bit_offset_val)});
if (cant_cast) try writer.writeByte(')');
- try f.object.dg.renderBuiltinInfo(writer, field_ty, .Bits);
+ try f.object.dg.renderBuiltinInfo(writer, field_ty, .bits);
try writer.writeByte(')');
} else {
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
try f.writeCValueDeref(writer, operand);
+ try v.elem(f, writer);
}
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -3416,15 +3437,22 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const writer = f.object.writer();
- const inst_ty = f.air.typeOfIndex(inst);
- const local = try f.allocLocal(inst, inst_ty);
- const operand_ty = f.air.typeOf(ty_op.operand);
+ const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+ const operand_ty = f.air.typeOf(ty_op.operand);
+ const scalar_ty = operand_ty.scalarType();
+
+ const writer = f.object.writer();
+ const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
- try f.renderIntCast(writer, inst_ty, operand, operand_ty, .Other);
+ try f.renderIntCast(writer, inst_scalar_ty, operand, v, scalar_ty, .Other);
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -3438,34 +3466,40 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.air.typeOfIndex(inst);
- const writer = f.object.writer();
- const local = try f.allocLocal(inst, inst_ty);
+ const inst_scalar_ty = inst_ty.scalarType();
const target = f.object.dg.module.getTarget();
- const dest_int_info = inst_ty.intInfo(target);
+ const dest_int_info = inst_scalar_ty.intInfo(target);
const dest_bits = dest_int_info.bits;
const dest_c_bits = toCIntBits(dest_int_info.bits) orelse
return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
const operand_ty = f.air.typeOf(ty_op.operand);
- const operand_int_info = operand_ty.intInfo(target);
+ const scalar_ty = operand_ty.scalarType();
+ const scalar_int_info = scalar_ty.intInfo(target);
+
+ const writer = f.object.writer();
+ const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
if (dest_c_bits < 64) {
try writer.writeByte('(');
- try f.renderType(writer, inst_ty);
+ try f.renderType(writer, inst_scalar_ty);
try writer.writeByte(')');
}
- const needs_lo = operand_int_info.bits > 64 and dest_bits <= 64;
+ const needs_lo = scalar_int_info.bits > 64 and dest_bits <= 64;
if (needs_lo) {
try writer.writeAll("zig_lo_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
}
if (dest_bits >= 8 and std.math.isPowerOfTwo(dest_bits)) {
try f.writeCValue(writer, operand, .Other);
+ try v.elem(f, writer);
} else switch (dest_int_info.signedness) {
.unsigned => {
var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa);
@@ -3475,15 +3509,16 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator());
- const mask_val = try inst_ty.maxInt(stack.get(), target);
+ const mask_val = try inst_scalar_ty.maxInt(stack.get(), target);
try writer.writeAll("zig_and_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
try f.writeCValue(writer, operand, .FunctionArgument);
- try writer.print(", {x})", .{try f.fmtIntLiteral(operand_ty, mask_val)});
+ try v.elem(f, writer);
+ try writer.print(", {x})", .{try f.fmtIntLiteral(scalar_ty, mask_val)});
},
.signed => {
- const c_bits = toCIntBits(operand_int_info.bits) orelse
+ const c_bits = toCIntBits(scalar_int_info.bits) orelse
return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
var shift_pl = Value.Payload.U64{
.base = .{ .tag = .int_u64 },
@@ -3492,7 +3527,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
const shift_val = Value.initPayload(&shift_pl.base);
try writer.writeAll("zig_shr_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
if (c_bits == 128) {
try writer.print("(zig_bitcast_i{d}(", .{c_bits});
} else {
@@ -3505,6 +3540,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.print("(uint{d}_t)", .{c_bits});
}
try f.writeCValue(writer, operand, .FunctionArgument);
+ try v.elem(f, writer);
if (c_bits == 128) try writer.writeByte(')');
try writer.print(", {})", .{try f.fmtIntLiteral(Type.u8, shift_val)});
if (c_bits == 128) try writer.writeByte(')');
@@ -3514,6 +3550,8 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
if (needs_lo) try writer.writeByte(')');
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -3550,7 +3588,10 @@ fn storeUndefined(f: *Function, lhs_child_ty: Type, dest_ptr: CValue) !CValue {
fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
// *a = b;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const ptr_info = f.air.typeOf(bin_op.lhs).ptrInfo().data;
+
+ const ptr_ty = f.air.typeOf(bin_op.lhs);
+ const ptr_scalar_ty = ptr_ty.scalarType();
+ const ptr_info = ptr_scalar_ty.ptrInfo().data;
if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
@@ -3573,11 +3614,13 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
ptr_info.@"align" >= ptr_info.pointee_type.abiAlignment(target);
const is_array = lowersToArray(ptr_info.pointee_type, target);
const need_memcpy = !is_aligned or is_array;
- const writer = f.object.writer();
const src_val = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
+ const writer = f.object.writer();
+ const v = try Vectorizer.start(f, inst, writer, ptr_ty);
+
if (need_memcpy) {
// For this memcpy to safely work we need the rhs to have the same
// underlying type as the lhs (i.e. they must both be arrays of the same underlying type).
@@ -3598,16 +3641,18 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("memcpy((char *)");
try f.writeCValue(writer, ptr_val, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
if (!is_array) try writer.writeByte('&');
try f.writeCValue(writer, array_src, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", sizeof(");
try f.renderType(writer, src_ty);
try writer.writeAll("))");
if (src_val == .constant) {
try freeLocal(f, inst, array_src.new_local, 0);
}
- } else if (ptr_info.host_size != 0) {
+ } else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) {
const host_bits = ptr_info.host_size * 8;
var host_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = host_bits };
const host_ty = Type.initPayload(&host_pl.base);
@@ -3644,12 +3689,14 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
const mask_val = Value.initPayload(&mask_pl.base);
try f.writeCValueDeref(writer, ptr_val);
+ try v.elem(f, writer);
try writer.writeAll(" = zig_or_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeAll("(zig_and_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
try f.writeCValueDeref(writer, ptr_val);
+ try v.elem(f, writer);
try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(host_ty, mask_val)});
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
@@ -3671,14 +3718,19 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte(')');
}
try f.writeCValue(writer, src_val, .Other);
+ try v.elem(f, writer);
if (cant_cast) try writer.writeByte(')');
try writer.print(", {}))", .{try f.fmtIntLiteral(bit_offset_ty, bit_offset_val)});
} else {
try f.writeCValueDeref(writer, ptr_val);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
try f.writeCValue(writer, src_val, .Other);
+ try v.elem(f, writer);
}
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return .none;
}
@@ -3696,51 +3748,39 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.air.typeOfIndex(inst);
- const vector_ty = f.air.typeOf(bin_op.lhs);
- const scalar_ty = vector_ty.scalarType();
+ const operand_ty = f.air.typeOf(bin_op.lhs);
+ const scalar_ty = operand_ty.scalarType();
+
const w = f.object.writer();
-
const local = try f.allocLocal(inst, inst_ty);
-
- switch (vector_ty.zigTypeTag()) {
- .Vector => {
- try w.writeAll("zig_v");
- try w.writeAll(operation);
- try w.writeAll("o_");
- try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty);
- try w.writeAll("(");
- try f.writeCValueMember(w, local, .{ .field = 1 });
- try w.writeAll(", ");
- try f.writeCValueMember(w, local, .{ .field = 0 });
- try w.print(", {d}, ", .{vector_ty.vectorLen()});
- },
- else => {
- try f.writeCValueMember(w, local, .{ .field = 1 });
- try w.writeAll(" = zig_");
- try w.writeAll(operation);
- try w.writeAll("o_");
- try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty);
- try w.writeAll("(&");
- try f.writeCValueMember(w, local, .{ .field = 0 });
- try w.writeAll(", ");
- },
- }
-
+ const v = try Vectorizer.start(f, inst, w, operand_ty);
+ try f.writeCValueMember(w, local, .{ .field = 1 });
+ try v.elem(f, w);
+ try w.writeAll(" = zig_");
+ try w.writeAll(operation);
+ try w.writeAll("o_");
+ try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty);
+ try w.writeAll("(&");
+ try f.writeCValueMember(w, local, .{ .field = 0 });
+ try v.elem(f, w);
+ try w.writeAll(", ");
try f.writeCValue(w, lhs, .FunctionArgument);
+ try v.elem(f, w);
try w.writeAll(", ");
try f.writeCValue(w, rhs, .FunctionArgument);
+ try v.elem(f, w);
try f.object.dg.renderBuiltinInfo(w, scalar_ty, info);
try w.writeAll(");\n");
+ try v.end(f, inst, w);
return local;
}
fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
- const inst_ty = f.air.typeOfIndex(inst);
- if (inst_ty.tag() != .bool)
- return try airUnBuiltinCall(f, inst, "not", .Bits);
-
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const operand_ty = f.air.typeOf(ty_op.operand);
+ const scalar_ty = operand_ty.scalarType();
+ if (scalar_ty.tag() != .bool) return try airUnBuiltinCall(f, inst, "not", .bits);
if (f.liveness.isUnused(inst)) {
try reap(f, inst, &.{ty_op.operand});
@@ -3750,14 +3790,20 @@ fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
const op = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
+ const inst_ty = f.air.typeOfIndex(inst);
+
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
-
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
try writer.writeByte('!');
try f.writeCValue(writer, op, .Other);
+ try v.elem(f, writer);
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -3770,63 +3816,89 @@ fn airBinOp(
) !CValue {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const operand_ty = f.air.typeOf(bin_op.lhs);
+ const scalar_ty = operand_ty.scalarType();
const target = f.object.dg.module.getTarget();
- if ((operand_ty.isInt() and operand_ty.bitSize(target) > 64) or operand_ty.isRuntimeFloat())
+ if ((scalar_ty.isInt() and scalar_ty.bitSize(target) > 64) or scalar_ty.isRuntimeFloat())
return try airBinBuiltinCall(f, inst, operation, info);
- const lhs = try f.resolveInst(bin_op.lhs);
- const rhs = try f.resolveInst(bin_op.rhs);
-
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
-
- if (f.liveness.isUnused(inst)) return .none;
-
- const inst_ty = f.air.typeOfIndex(inst);
-
- const writer = f.object.writer();
- const local = try f.allocLocal(inst, inst_ty);
- try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = ");
- try f.writeCValue(writer, lhs, .Other);
- try writer.writeByte(' ');
- try writer.writeAll(operator);
- try writer.writeByte(' ');
- try f.writeCValue(writer, rhs, .Other);
- try writer.writeAll(";\n");
-
- return local;
-}
-
-fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: []const u8, operation: []const u8) !CValue {
- const bin_op = f.air.instructions.items(.data)[inst].bin_op;
-
if (f.liveness.isUnused(inst)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
- const operand_ty = f.air.typeOf(bin_op.lhs);
- const target = f.object.dg.module.getTarget();
- if (operand_ty.isInt() and operand_ty.bitSize(target) > 64)
- return try cmpBuiltinCall(f, inst, operator, "cmp");
- if (operand_ty.isRuntimeFloat())
- return try cmpBuiltinCall(f, inst, operator, operation);
-
- const inst_ty = f.air.typeOfIndex(inst);
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
+ const inst_ty = f.air.typeOfIndex(inst);
+
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
try writer.writeByte(' ');
try writer.writeAll(operator);
try writer.writeByte(' ');
try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
+ return local;
+}
+
+fn airCmpOp(
+ f: *Function,
+ inst: Air.Inst.Index,
+ data: anytype,
+ operator: std.math.CompareOperator,
+) !CValue {
+ if (f.liveness.isUnused(inst)) {
+ try reap(f, inst, &.{ data.lhs, data.rhs });
+ return .none;
+ }
+
+ const operand_ty = f.air.typeOf(data.lhs);
+ const scalar_ty = operand_ty.scalarType();
+
+ const target = f.object.dg.module.getTarget();
+ const scalar_bits = scalar_ty.bitSize(target);
+ if (scalar_ty.isInt() and scalar_bits > 64)
+ return airCmpBuiltinCall(
+ f,
+ inst,
+ data,
+ operator,
+ .cmp,
+ if (scalar_bits > 128) .bits else .none,
+ );
+ if (scalar_ty.isRuntimeFloat())
+ return airCmpBuiltinCall(f, inst, data, operator, .operator, .none);
+
+ const inst_ty = f.air.typeOfIndex(inst);
+ const lhs = try f.resolveInst(data.lhs);
+ const rhs = try f.resolveInst(data.rhs);
+ try reap(f, inst, &.{ data.lhs, data.rhs });
+
+ const writer = f.object.writer();
+ const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
+ try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" = ");
+ try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
+ try writer.writeByte(' ');
+ try writer.writeAll(compareOperatorC(operator));
+ try writer.writeByte(' ');
+ try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
return local;
}
@@ -3834,9 +3906,7 @@ fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: []const u8, operation:
fn airEquality(
f: *Function,
inst: Air.Inst.Index,
- negate_prefix: []const u8,
- operator: []const u8,
- operation: []const u8,
+ operator: std.math.CompareOperator,
) !CValue {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
@@ -3847,10 +3917,18 @@ fn airEquality(
const operand_ty = f.air.typeOf(bin_op.lhs);
const target = f.object.dg.module.getTarget();
- if (operand_ty.isInt() and operand_ty.bitSize(target) > 64)
- return try cmpBuiltinCall(f, inst, operator, "cmp");
+ const operand_bits = operand_ty.bitSize(target);
+ if (operand_ty.isInt() and operand_bits > 64)
+ return airCmpBuiltinCall(
+ f,
+ inst,
+ bin_op,
+ operator,
+ .cmp,
+ if (operand_bits > 128) .bits else .none,
+ );
if (operand_ty.isRuntimeFloat())
- return try cmpBuiltinCall(f, inst, operator, operation);
+ return airCmpBuiltinCall(f, inst, bin_op, operator, .operator, .none);
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
@@ -3866,7 +3944,12 @@ fn airEquality(
// (A && B) || (C && (A == B))
// A = lhs.is_null ; B = rhs.is_null ; C = rhs.payload == lhs.payload
- try writer.writeAll(negate_prefix);
+ switch (operator) {
+ .eq => {},
+ .neq => try writer.writeByte('!'),
+ else => unreachable,
+ }
+ try writer.writeAll("((");
try f.writeCValue(writer, lhs, .Other);
try writer.writeAll(".is_null && ");
try f.writeCValue(writer, rhs, .Other);
@@ -3885,7 +3968,7 @@ fn airEquality(
try f.writeCValue(writer, lhs, .Other);
try writer.writeByte(' ');
- try writer.writeAll(operator);
+ try writer.writeAll(compareOperatorC(operator));
try writer.writeByte(' ');
try f.writeCValue(writer, rhs, .Other);
try writer.writeAll(";\n");
@@ -3927,11 +4010,14 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.air.typeOfIndex(inst);
- const elem_ty = inst_ty.elemType2();
+ const inst_scalar_ty = inst_ty.scalarType();
+ const elem_ty = inst_scalar_ty.elemType2();
const local = try f.allocLocal(inst, inst_ty);
const writer = f.object.writer();
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = ");
if (elem_ty.hasRuntimeBitsIgnoreComptime()) {
@@ -3939,19 +4025,26 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
// results in a NULL pointer, or if LHS is NULL. The operation is only UB
// if the result is NULL and then dereferenced.
try writer.writeByte('(');
- try f.renderType(writer, inst_ty);
+ try f.renderType(writer, inst_scalar_ty);
try writer.writeAll(")(((uintptr_t)");
try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll(") ");
try writer.writeByte(operator);
try writer.writeAll(" (");
try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll("*sizeof(");
try f.renderType(writer, elem_ty);
try writer.writeAll(")))");
- } else try f.writeCValue(writer, lhs, .Initializer);
+ } else {
+ try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
+ }
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -3964,10 +4057,12 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons
}
const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+
const target = f.object.dg.module.getTarget();
- if (inst_ty.isInt() and inst_ty.bitSize(target) > 64)
- return try airBinBuiltinCall(f, inst, operation[1..], .None);
- if (inst_ty.isRuntimeFloat())
+ if (inst_scalar_ty.isInt() and inst_scalar_ty.bitSize(target) > 64)
+ return try airBinBuiltinCall(f, inst, operation[1..], .none);
+ if (inst_scalar_ty.isRuntimeFloat())
return try airBinFloatOp(f, inst, operation);
const lhs = try f.resolveInst(bin_op.lhs);
@@ -3976,19 +4071,26 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
// (lhs <> rhs) ? lhs : rhs
try writer.writeAll(" = (");
try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
try writer.writeByte(' ');
try writer.writeByte(operator);
try writer.writeByte(' ');
try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll(") ? ");
try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" : ");
try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
return local;
}
@@ -4412,12 +4514,56 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
// Ensure padding bits have the expected value.
if (dest_ty.isAbiInt()) {
+ const dest_cty = try f.typeToCType(dest_ty, .complete);
+ const dest_info = dest_ty.intInfo(target);
+ var info_ty_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dest_info.signedness) {
+ .unsigned => .int_unsigned,
+ .signed => .int_signed,
+ } }, .data = dest_info.bits };
+ var wrap_cty: ?CType = null;
+ var need_bitcasts = false;
+
try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = zig_wrap_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, dest_ty);
+ if (dest_cty.castTag(.array)) |pl| {
+ try writer.print("[{d}]", .{switch (target.cpu.arch.endian()) {
+ .Little => pl.data.len - 1,
+ .Big => 0,
+ }});
+ const elem_cty = f.indexToCType(pl.data.elem_type);
+ wrap_cty = elem_cty.toSignedness(dest_info.signedness);
+ need_bitcasts = wrap_cty.?.tag() == .zig_i128;
+ info_ty_pl.data -= 1;
+ info_ty_pl.data %= @intCast(u16, f.byteSize(elem_cty) * 8);
+ info_ty_pl.data += 1;
+ }
+ try writer.writeAll(" = ");
+ if (need_bitcasts) {
+ try writer.writeAll("zig_bitcast_");
+ try f.object.dg.renderCTypeForBuiltinFnName(writer, wrap_cty.?.toUnsigned());
+ try writer.writeByte('(');
+ }
+ try writer.writeAll("zig_wrap_");
+ const info_ty = Type.initPayload(&info_ty_pl.base);
+ if (wrap_cty) |cty|
+ try f.object.dg.renderCTypeForBuiltinFnName(writer, cty)
+ else
+ try f.object.dg.renderTypeForBuiltinFnName(writer, info_ty);
try writer.writeByte('(');
+ if (need_bitcasts) {
+ try writer.writeAll("zig_bitcast_");
+ try f.object.dg.renderCTypeForBuiltinFnName(writer, wrap_cty.?);
+ try writer.writeByte('(');
+ }
try f.writeCValue(writer, local, .Other);
- try f.object.dg.renderBuiltinInfo(writer, dest_ty, .Bits);
+ if (dest_cty.castTag(.array)) |pl| {
+ try writer.print("[{d}]", .{switch (target.cpu.arch.endian()) {
+ .Little => pl.data.len - 1,
+ .Big => 0,
+ }});
+ }
+ if (need_bitcasts) try writer.writeByte(')');
+ try f.object.dg.renderBuiltinInfo(writer, info_ty, .bits);
+ if (need_bitcasts) try writer.writeByte(')');
try writer.writeAll(");\n");
}
@@ -4428,6 +4574,11 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
}
+fn airTrap(writer: anytype) !CValue {
+ try writer.writeAll("zig_trap();\n");
+ return .none;
+}
+
fn airBreakpoint(writer: anytype) !CValue {
try writer.writeAll("zig_breakpoint();\n");
return .none;
@@ -5427,7 +5578,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try writer.writeByte(')');
if (cant_cast) try writer.writeByte(')');
- try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .Bits);
+ try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits);
try writer.writeAll(");\n");
if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local;
@@ -5860,7 +6011,7 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, operand, .FunctionArgument);
try writer.writeByte(')');
if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) {
- try f.object.dg.renderBuiltinInfo(writer, inst_ty, .Bits);
+ try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits);
try writer.writeByte(')');
}
try writer.writeAll(";\n");
@@ -5906,19 +6057,35 @@ fn airUnBuiltinCall(
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
const operand_ty = f.air.typeOf(ty_op.operand);
+ const scalar_ty = operand_ty.scalarType();
+
+ const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
+ const ref_ret = inst_scalar_cty.tag() == .array;
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = zig_");
- try writer.writeAll(operation);
- try writer.writeByte('_');
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
+ if (!ref_ret) {
+ try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" = ");
+ }
+ try writer.print("zig_{s}_", .{operation});
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
+ if (ref_ret) {
+ try f.writeCValue(writer, local, .FunctionArgument);
+ try v.elem(f, writer);
+ try writer.writeAll(", ");
+ }
try f.writeCValue(writer, operand, .FunctionArgument);
- try f.object.dg.renderBuiltinInfo(writer, operand_ty, info);
+ try v.elem(f, writer);
+ try f.object.dg.renderBuiltinInfo(writer, scalar_ty, info);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -5935,55 +6102,108 @@ fn airBinBuiltinCall(
return .none;
}
+ const operand_ty = f.air.typeOf(bin_op.lhs);
+ const operand_cty = try f.typeToCType(operand_ty, .complete);
+ const is_big = operand_cty.tag() == .array;
+
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
+ if (!is_big) try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.air.typeOfIndex(inst);
- const operand_ty = f.air.typeOf(bin_op.lhs);
+ const inst_scalar_ty = inst_ty.scalarType();
+ const scalar_ty = operand_ty.scalarType();
+
+ const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
+ const ref_ret = inst_scalar_cty.tag() == .array;
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = zig_");
- try writer.writeAll(operation);
- try writer.writeByte('_');
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ if (is_big) try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
+ if (!ref_ret) {
+ try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" = ");
+ }
+ try writer.print("zig_{s}_", .{operation});
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
+ if (ref_ret) {
+ try f.writeCValue(writer, local, .FunctionArgument);
+ try v.elem(f, writer);
+ try writer.writeAll(", ");
+ }
try f.writeCValue(writer, lhs, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, rhs, .FunctionArgument);
- try f.object.dg.renderBuiltinInfo(writer, operand_ty, info);
+ try v.elem(f, writer);
+ try f.object.dg.renderBuiltinInfo(writer, scalar_ty, info);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
-fn cmpBuiltinCall(
+fn airCmpBuiltinCall(
f: *Function,
inst: Air.Inst.Index,
- operator: []const u8,
- operation: []const u8,
+ data: anytype,
+ operator: std.math.CompareOperator,
+ operation: enum { cmp, operator },
+ info: BuiltinInfo,
) !CValue {
- const inst_ty = f.air.typeOfIndex(inst);
- const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const operand_ty = f.air.typeOf(bin_op.lhs);
+ if (f.liveness.isUnused(inst)) {
+ try reap(f, inst, &.{ data.lhs, data.rhs });
+ return .none;
+ }
- const lhs = try f.resolveInst(bin_op.lhs);
- const rhs = try f.resolveInst(bin_op.rhs);
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
+ const lhs = try f.resolveInst(data.lhs);
+ const rhs = try f.resolveInst(data.rhs);
+ try reap(f, inst, &.{ data.lhs, data.rhs });
+
+ const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+ const operand_ty = f.air.typeOf(data.lhs);
+ const scalar_ty = operand_ty.scalarType();
+
+ const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
+ const ref_ret = inst_scalar_cty.tag() == .array;
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = zig_");
- try writer.writeAll(operation);
- try writer.writeByte('_');
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
+ if (!ref_ret) {
+ try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" = ");
+ }
+ try writer.print("zig_{s}_", .{switch (operation) {
+ else => @tagName(operation),
+ .operator => compareOperatorAbbrev(operator),
+ }});
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
+ if (ref_ret) {
+ try f.writeCValue(writer, local, .FunctionArgument);
+ try v.elem(f, writer);
+ try writer.writeAll(", ");
+ }
try f.writeCValue(writer, lhs, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, rhs, .FunctionArgument);
- try writer.print(") {s} {};\n", .{ operator, try f.fmtIntLiteral(Type.initTag(.i32), Value.zero) });
+ try v.elem(f, writer);
+ try f.object.dg.renderBuiltinInfo(writer, scalar_ty, info);
+ try writer.writeByte(')');
+ if (!ref_ret) try writer.print(" {s} {}", .{
+ compareOperatorC(operator),
+ try f.fmtIntLiteral(Type.initTag(.i32), Value.zero),
+ });
+ try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -6328,33 +6548,120 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue {
fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+
if (f.liveness.isUnused(inst)) {
try reap(f, inst, &.{ty_op.operand});
return .none;
}
- const inst_ty = f.air.typeOfIndex(inst);
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
+
+ const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+ const inst_scalar_cty = try f.typeToIndex(inst_scalar_ty, .complete);
+ const need_memcpy = f.indexToCType(inst_scalar_cty).tag() == .array;
+
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
+ if (need_memcpy) try writer.writeAll("memcpy(&");
try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = ");
+ try v.elem(f, writer);
+ try writer.writeAll(if (need_memcpy) ", &" else " = ");
+ try f.writeCValue(writer, operand, .Other);
+ if (need_memcpy) {
+ try writer.writeAll(", sizeof(");
+ try f.renderCType(writer, inst_scalar_cty);
+ try writer.writeAll("))");
+ }
+ try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
- _ = operand;
- return f.fail("TODO: C backend: implement airSplat", .{});
+ return local;
}
fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue {
- if (f.liveness.isUnused(inst)) return .none;
+ const pl_op = f.air.instructions.items(.data)[inst].pl_op;
+ const extra = f.air.extraData(Air.Bin, pl_op.payload).data;
- return f.fail("TODO: C backend: implement airSelect", .{});
+ if (f.liveness.isUnused(inst)) {
+ try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs });
+ return .none;
+ }
+
+ const pred = try f.resolveInst(pl_op.operand);
+ const lhs = try f.resolveInst(extra.lhs);
+ const rhs = try f.resolveInst(extra.rhs);
+ try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs });
+
+ const inst_ty = f.air.typeOfIndex(inst);
+
+ const writer = f.object.writer();
+ const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
+ try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" = ");
+ try f.writeCValue(writer, pred, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" ? ");
+ try f.writeCValue(writer, lhs, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(" : ");
+ try f.writeCValue(writer, rhs, .Other);
+ try v.elem(f, writer);
+ try writer.writeAll(";\n");
+ try v.end(f, inst, writer);
+
+ return local;
}
fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
- if (f.liveness.isUnused(inst)) return .none;
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data;
- return f.fail("TODO: C backend: implement airShuffle", .{});
+ if (f.liveness.isUnused(inst)) {
+ try reap(f, inst, &.{ extra.a, extra.b });
+ return .none;
+ }
+
+ const mask = f.air.values[extra.mask];
+ const lhs = try f.resolveInst(extra.a);
+ const rhs = try f.resolveInst(extra.b);
+
+ const module = f.object.dg.module;
+ const target = module.getTarget();
+ const inst_ty = f.air.typeOfIndex(inst);
+
+ const writer = f.object.writer();
+ const local = try f.allocLocal(inst, inst_ty);
+ try reap(f, inst, &.{ extra.a, extra.b }); // local cannot alias operands
+ for (0..extra.mask_len) |index| {
+ var dst_pl = Value.Payload.U64{
+ .base = .{ .tag = .int_u64 },
+ .data = @intCast(u64, index),
+ };
+
+ try f.writeCValue(writer, local, .Other);
+ try writer.writeByte('[');
+ try f.object.dg.renderValue(writer, Type.usize, Value.initPayload(&dst_pl.base), .Other);
+ try writer.writeAll("] = ");
+
+ var buf: Value.ElemValueBuffer = undefined;
+ const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(target);
+ var src_pl = Value.Payload.U64{
+ .base = .{ .tag = .int_u64 },
+ .data = @intCast(u64, mask_elem ^ mask_elem >> 63),
+ };
+
+ try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other);
+ try writer.writeByte('[');
+ try f.object.dg.renderValue(writer, Type.usize, Value.initPayload(&src_pl.base), .Other);
+ try writer.writeAll("];\n");
+ }
+
+ return local;
}
fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
@@ -6370,65 +6677,45 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(reduce.operand);
try reap(f, inst, &.{reduce.operand});
const operand_ty = f.air.typeOf(reduce.operand);
- const vector_len = operand_ty.vectorLen();
const writer = f.object.writer();
- const Op = union(enum) {
- call_fn: []const u8,
+ const use_operator = scalar_ty.bitSize(target) <= 64;
+ const op: union(enum) {
+ const Func = struct { operation: []const u8, info: BuiltinInfo = .none };
+ float_op: Func,
+ builtin: Func,
infix: []const u8,
ternary: []const u8,
- };
- var fn_name_buf: [64]u8 = undefined;
- const op: Op = switch (reduce.operation) {
- .And => .{ .infix = " &= " },
- .Or => .{ .infix = " |= " },
- .Xor => .{ .infix = " ^= " },
+ } = switch (reduce.operation) {
+ .And => if (use_operator) .{ .infix = " &= " } else .{ .builtin = .{ .operation = "and" } },
+ .Or => if (use_operator) .{ .infix = " |= " } else .{ .builtin = .{ .operation = "or" } },
+ .Xor => if (use_operator) .{ .infix = " ^= " } else .{ .builtin = .{ .operation = "xor" } },
.Min => switch (scalar_ty.zigTypeTag()) {
- .Int => Op{ .ternary = " < " },
- .Float => op: {
- const float_bits = scalar_ty.floatBits(target);
- break :op Op{
- .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "{s}fmin{s}", .{
- libcFloatPrefix(float_bits), libcFloatSuffix(float_bits),
- }) catch unreachable,
- };
+ .Int => if (use_operator) .{ .ternary = " < " } else .{
+ .builtin = .{ .operation = "min" },
},
+ .Float => .{ .float_op = .{ .operation = "fmin" } },
else => unreachable,
},
.Max => switch (scalar_ty.zigTypeTag()) {
- .Int => Op{ .ternary = " > " },
- .Float => op: {
- const float_bits = scalar_ty.floatBits(target);
- break :op Op{
- .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "{s}fmax{s}", .{
- libcFloatPrefix(float_bits), libcFloatSuffix(float_bits),
- }) catch unreachable,
- };
+ .Int => if (use_operator) .{ .ternary = " > " } else .{
+ .builtin = .{ .operation = "max" },
},
+ .Float => .{ .float_op = .{ .operation = "fmax" } },
else => unreachable,
},
.Add => switch (scalar_ty.zigTypeTag()) {
- .Int => Op{ .infix = " += " },
- .Float => op: {
- const float_bits = scalar_ty.floatBits(target);
- break :op Op{
- .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "__add{s}f3", .{
- compilerRtFloatAbbrev(float_bits),
- }) catch unreachable,
- };
+ .Int => if (use_operator) .{ .infix = " += " } else .{
+ .builtin = .{ .operation = "addw", .info = .bits },
},
+ .Float => .{ .builtin = .{ .operation = "add" } },
else => unreachable,
},
.Mul => switch (scalar_ty.zigTypeTag()) {
- .Int => Op{ .infix = " *= " },
- .Float => op: {
- const float_bits = scalar_ty.floatBits(target);
- break :op Op{
- .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "__mul{s}f3", .{
- compilerRtFloatAbbrev(float_bits),
- }) catch unreachable,
- };
+ .Int => if (use_operator) .{ .infix = " *= " } else .{
+ .builtin = .{ .operation = "mulw", .info = .bits },
},
+ .Float => .{ .builtin = .{ .operation = "mul" } },
else => unreachable,
},
};
@@ -6444,75 +6731,96 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
// }
// break :reduce accum;
// }
- const it = try f.allocLocal(inst, Type.usize);
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll(" = 0;\n");
const accum = try f.allocLocal(inst, scalar_ty);
try f.writeCValue(writer, accum, .Other);
try writer.writeAll(" = ");
- const init_val = switch (reduce.operation) {
- .And, .Or, .Xor, .Add => "0",
+ var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa);
+ defer arena.deinit();
+
+ const ExpectedContents = union {
+ u: Value.Payload.U64,
+ i: Value.Payload.I64,
+ f16: Value.Payload.Float_16,
+ f32: Value.Payload.Float_32,
+ f64: Value.Payload.Float_64,
+ f80: Value.Payload.Float_80,
+ f128: Value.Payload.Float_128,
+ };
+ var stack align(@alignOf(ExpectedContents)) =
+ std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator());
+
+ try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) {
+ .Or, .Xor, .Add => Value.zero,
+ .And => switch (scalar_ty.zigTypeTag()) {
+ .Bool => Value.one,
+ else => switch (scalar_ty.intInfo(target).signedness) {
+ .unsigned => try scalar_ty.maxInt(stack.get(), target),
+ .signed => Value.negative_one,
+ },
+ },
.Min => switch (scalar_ty.zigTypeTag()) {
- .Int => "TODO_intmax",
- .Float => "TODO_nan",
+ .Bool => Value.one,
+ .Int => try scalar_ty.maxInt(stack.get(), target),
+ .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target),
else => unreachable,
},
.Max => switch (scalar_ty.zigTypeTag()) {
- .Int => "TODO_intmin",
- .Float => "TODO_nan",
+ .Bool => Value.zero,
+ .Int => try scalar_ty.minInt(stack.get(), target),
+ .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target),
else => unreachable,
},
- .Mul => "1",
- };
- try writer.writeAll(init_val);
- try writer.writeAll(";");
- try f.object.indent_writer.insertNewline();
- try writer.writeAll("for (;");
- try f.writeCValue(writer, it, .Other);
- try writer.print("<{d};++", .{vector_len});
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll(") ");
- try f.writeCValue(writer, accum, .Other);
+ .Mul => Value.one,
+ }, .Initializer);
+ try writer.writeAll(";\n");
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
+ try f.writeCValue(writer, accum, .Other);
switch (op) {
- .call_fn => |fn_name| {
- try writer.print(" = {s}(", .{fn_name});
+ .float_op => |func| {
+ try writer.writeAll(" = zig_libc_name_");
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
+ try writer.print("({s})(", .{func.operation});
try f.writeCValue(writer, accum, .FunctionArgument);
try writer.writeAll(", ");
try f.writeCValue(writer, operand, .Other);
- try writer.writeAll("[");
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll("])");
+ try v.elem(f, writer);
+ try f.object.dg.renderBuiltinInfo(writer, scalar_ty, func.info);
+ try writer.writeByte(')');
+ },
+ .builtin => |func| {
+ try writer.print(" = zig_{s}_", .{func.operation});
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
+ try writer.writeByte('(');
+ try f.writeCValue(writer, accum, .FunctionArgument);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, operand, .Other);
+ try v.elem(f, writer);
+ try f.object.dg.renderBuiltinInfo(writer, scalar_ty, func.info);
+ try writer.writeByte(')');
},
.infix => |ass| {
try writer.writeAll(ass);
try f.writeCValue(writer, operand, .Other);
- try writer.writeAll("[");
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll("]");
+ try v.elem(f, writer);
},
.ternary => |cmp| {
try writer.writeAll(" = ");
try f.writeCValue(writer, accum, .Other);
try writer.writeAll(cmp);
try f.writeCValue(writer, operand, .Other);
- try writer.writeAll("[");
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll("] ? ");
+ try v.elem(f, writer);
+ try writer.writeAll(" ? ");
try f.writeCValue(writer, accum, .Other);
try writer.writeAll(" : ");
try f.writeCValue(writer, operand, .Other);
- try writer.writeAll("[");
- try f.writeCValue(writer, it, .Other);
- try writer.writeAll("]");
+ try v.elem(f, writer);
},
}
-
try writer.writeAll(";\n");
-
- try freeLocal(f, inst, it.new_local, 0);
+ try v.end(f, inst, writer);
return accum;
}
@@ -6646,7 +6954,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte('(');
if (inst_ty.isAbiInt() and (field_ty.isAbiInt() or field_ty.isPtrAtRuntime())) {
- try f.renderIntCast(writer, inst_ty, element, field_ty, .FunctionArgument);
+ try f.renderIntCast(writer, inst_ty, element, .{}, field_ty, .FunctionArgument);
} else {
try writer.writeByte('(');
try f.renderType(writer, inst_ty);
@@ -6664,7 +6972,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(", ");
try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
- try f.object.dg.renderBuiltinInfo(writer, inst_ty, .Bits);
+ try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits);
try writer.writeByte(')');
if (!empty) try writer.writeByte(')');
@@ -6788,7 +7096,6 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue {
- const inst_ty = f.air.typeOfIndex(inst);
const un_op = f.air.instructions.items(.data)[inst].un_op;
if (f.liveness.isUnused(inst)) {
try reap(f, inst, &.{un_op});
@@ -6797,16 +7104,23 @@ fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
+
const operand_ty = f.air.typeOf(un_op);
+ const scalar_ty = operand_ty.scalarType();
const writer = f.object.writer();
- const local = try f.allocLocal(inst, inst_ty);
+ const local = try f.allocLocal(inst, operand_ty);
+ const v = try Vectorizer.start(f, inst, writer, operand_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = zig_neg_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, operand_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
try f.writeCValue(writer, operand, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -6816,19 +7130,28 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVal
try reap(f, inst, &.{un_op});
return .none;
}
+
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
- const writer = f.object.writer();
+
const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+
+ const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = zig_libc_name_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, inst_scalar_ty);
try writer.writeByte('(');
try writer.writeAll(operation);
try writer.writeAll(")(");
try f.writeCValue(writer, operand, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -6838,23 +7161,32 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
+
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- const writer = f.object.writer();
const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+
+ const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = zig_libc_name_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, inst_scalar_ty);
try writer.writeByte('(');
try writer.writeAll(operation);
try writer.writeAll(")(");
try f.writeCValue(writer, lhs, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, rhs, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -6865,23 +7197,34 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand });
return .none;
}
- const inst_ty = f.air.typeOfIndex(inst);
+
const mulend1 = try f.resolveInst(bin_op.lhs);
const mulend2 = try f.resolveInst(bin_op.rhs);
const addend = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand });
+
+ const inst_ty = f.air.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType();
+
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
+ const v = try Vectorizer.start(f, inst, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
+ try v.elem(f, writer);
try writer.writeAll(" = zig_libc_name_");
- try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
+ try f.object.dg.renderTypeForBuiltinFnName(writer, inst_scalar_ty);
try writer.writeAll("(fma)(");
try f.writeCValue(writer, mulend1, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, mulend2, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, addend, .FunctionArgument);
+ try v.elem(f, writer);
try writer.writeAll(");\n");
+ try v.end(f, inst, writer);
+
return local;
}
@@ -7083,6 +7426,28 @@ fn compilerRtAbbrev(ty: Type, target: std.Target) []const u8 {
} else unreachable;
}
+fn compareOperatorAbbrev(operator: std.math.CompareOperator) []const u8 {
+ return switch (operator) {
+ .lt => "lt",
+ .lte => "le",
+ .eq => "eq",
+ .gte => "ge",
+ .gt => "gt",
+ .neq => "ne",
+ };
+}
+
+fn compareOperatorC(operator: std.math.CompareOperator) []const u8 {
+ return switch (operator) {
+ .lt => "<",
+ .lte => "<=",
+ .eq => "==",
+ .gte => ">=",
+ .gt => ">",
+ .neq => "!=",
+ };
+}
+
fn StringLiteral(comptime WriterType: type) type {
// MSVC has a length limit of 16380 per string literal (before concatenation)
const max_char_len = 4;
@@ -7171,30 +7536,33 @@ fn undefPattern(comptime IntType: type) IntType {
return @bitCast(IntType, @as(UnsignedType, (1 << (int_info.bits | 1)) / 3));
}
-const FormatIntLiteralContext = struct { ty: Type, val: Value, mod: *Module, location: ?ValueRenderLocation = null };
+const FormatIntLiteralContext = struct {
+ dg: *DeclGen,
+ int_info: std.builtin.Type.Int,
+ kind: CType.Kind,
+ cty: CType,
+ val: Value,
+};
fn formatIntLiteral(
data: FormatIntLiteralContext,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
- const target = data.mod.getTarget();
- const int_info = data.ty.intInfo(target);
+ const target = data.dg.module.getTarget();
const ExpectedContents = struct {
const base = 10;
- const limbs_count_128 = BigInt.calcTwosCompLimbCount(128);
- const expected_needed_limbs_count = BigInt.calcToStringLimbsBufferLen(limbs_count_128, base);
- const worst_case_int = BigInt.Const{
- .limbs = &([1]BigIntLimb{std.math.maxInt(BigIntLimb)} ** expected_needed_limbs_count),
- .positive = false,
- };
+ const bits = 128;
+ const limbs_count = BigInt.calcTwosCompLimbCount(bits);
- undef_limbs: [limbs_count_128]BigIntLimb,
- wrap_limbs: [limbs_count_128]BigIntLimb,
+ undef_limbs: [limbs_count]BigIntLimb,
+ wrap_limbs: [limbs_count]BigIntLimb,
+ to_string_buf: [bits]u8,
+ to_string_limbs: [BigInt.calcToStringLimbsBufferLen(limbs_count, base)]BigIntLimb,
};
var stack align(@alignOf(ExpectedContents)) =
- std.heap.stackFallback(@sizeOf(ExpectedContents), data.mod.gpa);
+ std.heap.stackFallback(@sizeOf(ExpectedContents), data.dg.gpa);
const allocator = stack.get();
var undef_limbs: []BigIntLimb = &.{};
@@ -7202,7 +7570,7 @@ fn formatIntLiteral(
var int_buf: Value.BigIntSpace = undefined;
const int = if (data.val.isUndefDeep()) blk: {
- undef_limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(int_info.bits));
+ undef_limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(data.int_info.bits));
std.mem.set(BigIntLimb, undef_limbs, undefPattern(BigIntLimb));
var undef_int = BigInt.Mutable{
@@ -7210,165 +7578,194 @@ fn formatIntLiteral(
.len = undef_limbs.len,
.positive = true,
};
- undef_int.truncate(undef_int.toConst(), int_info.signedness, int_info.bits);
+ undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits);
break :blk undef_int.toConst();
} else data.val.toBigInt(&int_buf, target);
- assert(int.fitsInTwosComp(int_info.signedness, int_info.bits));
+ assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits));
- const c_bits = toCIntBits(int_info.bits) orelse unreachable;
+ const c_bits = @intCast(usize, data.cty.byteSize(data.dg.ctypes.set, target) * 8);
var one_limbs: [BigInt.calcLimbLen(1)]BigIntLimb = undefined;
const one = BigInt.Mutable.init(&one_limbs, 1).toConst();
- const wrap_limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(c_bits));
- defer allocator.free(wrap_limbs);
- var wrap = BigInt.Mutable{ .limbs = wrap_limbs, .len = undefined, .positive = undefined };
- if (wrap.addWrap(int, one, int_info.signedness, c_bits) or
- int_info.signedness == .signed and wrap.subWrap(int, one, int_info.signedness, c_bits))
- {
- const abbrev = switch (data.ty.tag()) {
- .c_short, .c_ushort => "SHRT",
- .c_int, .c_uint => "INT",
- .c_long, .c_ulong => "LONG",
- .c_longlong, .c_ulonglong => "LLONG",
- .isize, .usize => "INTPTR",
- else => return writer.print("zig_{s}Int_{c}{d}", .{
- if (int.positive) "max" else "min", signAbbrev(int_info.signedness), c_bits,
- }),
- };
- if (int_info.signedness == .unsigned) try writer.writeByte('U');
- return writer.print("{s}_{s}", .{ abbrev, if (int.positive) "MAX" else "MIN" });
- }
+ var wrap = BigInt.Mutable{
+ .limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(c_bits)),
+ .len = undefined,
+ .positive = undefined,
+ };
+ defer allocator.free(wrap.limbs);
- var use_twos_comp = false;
- if (!int.positive) {
- if (c_bits > 64) {
- // TODO: Can this be done for decimal literals as well?
- if (fmt.len == 1 and fmt[0] != 'd') {
- use_twos_comp = true;
- } else {
- // TODO: Use fmtIntLiteral for 0?
- try writer.print("zig_sub_{c}{d}(zig_make_{c}{d}(0, 0), ", .{ signAbbrev(int_info.signedness), c_bits, signAbbrev(int_info.signedness), c_bits });
- }
- } else {
- try writer.writeByte('-');
- }
- }
-
- switch (data.ty.tag()) {
- .c_short, .c_ushort, .c_int, .c_uint, .c_long, .c_ulong, .c_longlong, .c_ulonglong => {},
- else => {
- if (int_info.bits <= 64) {
- try writer.print("{s}INT{d}_C(", .{ switch (int_info.signedness) {
- .signed => "",
- .unsigned => "U",
- }, c_bits });
- } else if (data.location != null and data.location.? == .StaticInitializer) {
- // MSVC treats casting the struct initializer as not constant (C2099), so an alternate form is used in global initializers
- try writer.print("zig_make_constant_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits });
- } else {
- try writer.print("zig_make_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits });
- }
+ const c_limb_info: struct {
+ cty: CType,
+ count: usize,
+ endian: std.builtin.Endian,
+ homogeneous: bool,
+ } = switch (data.cty.tag()) {
+ else => .{
+ .cty = CType.initTag(.void),
+ .count = 1,
+ .endian = .Little,
+ .homogeneous = true,
},
- }
+ .zig_u128, .zig_i128 => .{
+ .cty = CType.initTag(.uint64_t),
+ .count = 2,
+ .endian = .Big,
+ .homogeneous = false,
+ },
+ .array => info: {
+ const array_data = data.cty.castTag(.array).?.data;
+ break :info .{
+ .cty = data.dg.indexToCType(array_data.elem_type),
+ .count = @intCast(usize, array_data.len),
+ .endian = target.cpu.arch.endian(),
+ .homogeneous = true,
+ };
+ },
+ };
+ if (c_limb_info.count == 1) {
+ if (wrap.addWrap(int, one, data.int_info.signedness, c_bits) or
+ data.int_info.signedness == .signed and wrap.subWrap(int, one, data.int_info.signedness, c_bits))
+ return writer.print("{s}_{s}", .{
+ data.cty.getStandardDefineAbbrev() orelse return writer.print("zig_{s}Int_{c}{d}", .{
+ if (int.positive) "max" else "min", signAbbrev(data.int_info.signedness), c_bits,
+ }),
+ if (int.positive) "MAX" else "MIN",
+ });
- const limbs_count_64 = @divExact(64, @bitSizeOf(BigIntLimb));
- if (c_bits <= 64) {
- var base: u8 = undefined;
- var case: std.fmt.Case = undefined;
- switch (fmt.len) {
- 0 => base = 10,
+ if (!int.positive) try writer.writeByte('-');
+ try data.cty.renderLiteralPrefix(writer, data.kind);
+
+ const style: struct { base: u8, case: std.fmt.Case = undefined } = switch (fmt.len) {
+ 0 => .{ .base = 10 },
1 => switch (fmt[0]) {
- 'b' => {
- base = 2;
+ 'b' => style: {
try writer.writeAll("0b");
+ break :style .{ .base = 2 };
},
- 'o' => {
- base = 8;
+ 'o' => style: {
try writer.writeByte('0');
+ break :style .{ .base = 8 };
},
- 'd' => base = 10,
- 'x' => {
- base = 16;
- case = .lower;
- try writer.writeAll("0x");
- },
- 'X' => {
- base = 16;
- case = .upper;
+ 'd' => .{ .base = 10 },
+ 'x', 'X' => |base| style: {
try writer.writeAll("0x");
+ break :style .{ .base = 16, .case = switch (base) {
+ 'x' => .lower,
+ 'X' => .upper,
+ else => unreachable,
+ } };
},
else => @compileError("Invalid fmt: " ++ fmt),
},
else => @compileError("Invalid fmt: " ++ fmt),
- }
+ };
- var str: [64]u8 = undefined;
- var limbs_buf: [BigInt.calcToStringLimbsBufferLen(limbs_count_64, 10)]BigIntLimb = undefined;
- try writer.writeAll(str[0..int.abs().toString(&str, base, case, &limbs_buf)]);
+ const string = try int.abs().toStringAlloc(allocator, style.base, style.case);
+ defer allocator.free(string);
+ try writer.writeAll(string);
} else {
- assert(c_bits == 128);
- const split = std.math.min(int.limbs.len, limbs_count_64);
- var twos_comp_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined;
+ try data.cty.renderLiteralPrefix(writer, data.kind);
+ wrap.convertToTwosComplement(int, data.int_info.signedness, c_bits);
+ std.mem.set(BigIntLimb, wrap.limbs[wrap.len..], 0);
+ wrap.len = wrap.limbs.len;
+ const limbs_per_c_limb = @divExact(wrap.len, c_limb_info.count);
- // Adding a negation in the C code before the doesn't work in all cases:
- // - struct versions would require an extra zig_sub_ call to negate, which wouldn't work in constant expressions
- // - negating the f80 int representation (i128) doesn't make sense
- // Instead we write out the literal as a negative number in twos complement
- var limbs = int.limbs;
+ var c_limb_int_info = std.builtin.Type.Int{
+ .signedness = undefined,
+ .bits = @intCast(u16, @divExact(c_bits, c_limb_info.count)),
+ };
+ var c_limb_cty: CType = undefined;
- if (use_twos_comp) {
- var twos_comp = BigInt.Mutable{
- .limbs = &twos_comp_limbs,
- .positive = undefined,
+ var limb_offset: usize = 0;
+ const most_significant_limb_i = wrap.len - limbs_per_c_limb;
+ while (limb_offset < wrap.len) : (limb_offset += limbs_per_c_limb) {
+ const limb_i = switch (c_limb_info.endian) {
+ .Little => limb_offset,
+ .Big => most_significant_limb_i - limb_offset,
+ };
+ var c_limb_mut = BigInt.Mutable{
+ .limbs = wrap.limbs[limb_i..][0..limbs_per_c_limb],
.len = undefined,
+ .positive = true,
+ };
+ c_limb_mut.normalize(limbs_per_c_limb);
+
+ if (limb_i == most_significant_limb_i and
+ !c_limb_info.homogeneous and data.int_info.signedness == .signed)
+ {
+ // most significant limb is actually signed
+ c_limb_int_info.signedness = .signed;
+ c_limb_cty = c_limb_info.cty.toSigned();
+
+ c_limb_mut.positive = wrap.positive;
+ c_limb_mut.truncate(
+ c_limb_mut.toConst(),
+ .signed,
+ data.int_info.bits - limb_i * @bitSizeOf(BigIntLimb),
+ );
+ } else {
+ c_limb_int_info.signedness = .unsigned;
+ c_limb_cty = c_limb_info.cty;
+ }
+ var c_limb_val_pl = Value.Payload.BigInt{
+ .base = .{ .tag = if (c_limb_mut.positive) .int_big_positive else .int_big_negative },
+ .data = c_limb_mut.limbs[0..c_limb_mut.len],
};
- twos_comp.convertToTwosComplement(int, .signed, int_info.bits);
- limbs = twos_comp.limbs;
+ if (limb_offset > 0) try writer.writeAll(", ");
+ try formatIntLiteral(.{
+ .dg = data.dg,
+ .int_info = c_limb_int_info,
+ .kind = data.kind,
+ .cty = c_limb_cty,
+ .val = Value.initPayload(&c_limb_val_pl.base),
+ }, fmt, options, writer);
}
-
- var upper_pl = Value.Payload.BigInt{
- .base = .{ .tag = .int_big_positive },
- .data = limbs[split..],
- };
- const upper_val = Value.initPayload(&upper_pl.base);
- try formatIntLiteral(.{
- .ty = switch (int_info.signedness) {
- .unsigned => Type.u64,
- .signed => if (use_twos_comp) Type.u64 else Type.i64,
- },
- .val = upper_val,
- .mod = data.mod,
- }, fmt, options, writer);
-
- try writer.writeAll(", ");
-
- var lower_pl = Value.Payload.BigInt{
- .base = .{ .tag = .int_big_positive },
- .data = limbs[0..split],
- };
- const lower_val = Value.initPayload(&lower_pl.base);
- try formatIntLiteral(.{
- .ty = Type.u64,
- .val = lower_val,
- .mod = data.mod,
- }, fmt, options, writer);
-
- if (!int.positive and c_bits > 64 and !use_twos_comp) try writer.writeByte(')');
- return writer.writeByte(')');
- }
-
- switch (data.ty.tag()) {
- .c_short, .c_ushort, .c_int => {},
- .c_uint => try writer.writeAll("u"),
- .c_long => try writer.writeAll("l"),
- .c_ulong => try writer.writeAll("ul"),
- .c_longlong => try writer.writeAll("ll"),
- .c_ulonglong => try writer.writeAll("ull"),
- else => try writer.writeByte(')'),
}
+ try data.cty.renderLiteralSuffix(writer);
}
+const Vectorizer = struct {
+ index: CValue = .none,
+
+ pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorizer {
+ return if (ty.zigTypeTag() == .Vector) index: {
+ var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen() };
+
+ const local = try f.allocLocal(inst, Type.usize);
+
+ try writer.writeAll("for (");
+ try f.writeCValue(writer, local, .Other);
+ try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, Value.zero)});
+ try f.writeCValue(writer, local, .Other);
+ try writer.print(" < {d}; ", .{
+ try f.fmtIntLiteral(Type.usize, Value.initPayload(&len_pl.base)),
+ });
+ try f.writeCValue(writer, local, .Other);
+ try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, Value.one)});
+ f.object.indent_writer.pushIndent();
+
+ break :index .{ .index = local };
+ } else .{};
+ }
+
+ pub fn elem(self: Vectorizer, f: *Function, writer: anytype) !void {
+ if (self.index != .none) {
+ try writer.writeByte('[');
+ try f.writeCValue(writer, self.index, .Other);
+ try writer.writeByte(']');
+ }
+ }
+
+ pub fn end(self: Vectorizer, f: *Function, inst: Air.Inst.Index, writer: anytype) !void {
+ if (self.index != .none) {
+ f.object.indent_writer.popIndent();
+ try writer.writeAll("}\n");
+ try freeLocal(f, inst, self.index.new_local, 0);
+ }
+ }
+};
+
fn isByRef(ty: Type) bool {
_ = ty;
return false;
diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig
index 1f1a220cd2..038f53f186 100644
--- a/src/codegen/c/type.zig
+++ b/src/codegen/c/type.zig
@@ -496,6 +496,427 @@ pub const CType = extern union {
}
};
+ pub fn isBool(self: CType) bool {
+ return switch (self.tag()) {
+ ._Bool,
+ .bool,
+ => true,
+ else => false,
+ };
+ }
+
+ pub fn isInteger(self: CType) bool {
+ return switch (self.tag()) {
+ .char,
+ .@"signed char",
+ .short,
+ .int,
+ .long,
+ .@"long long",
+ .@"unsigned char",
+ .@"unsigned short",
+ .@"unsigned int",
+ .@"unsigned long",
+ .@"unsigned long long",
+ .size_t,
+ .ptrdiff_t,
+ .uint8_t,
+ .int8_t,
+ .uint16_t,
+ .int16_t,
+ .uint32_t,
+ .int32_t,
+ .uint64_t,
+ .int64_t,
+ .uintptr_t,
+ .intptr_t,
+ .zig_u128,
+ .zig_i128,
+ => true,
+ else => false,
+ };
+ }
+
+ pub fn signedness(self: CType) ?std.builtin.Signedness {
+ return switch (self.tag()) {
+ .char => null, // unknown signedness
+ .@"signed char",
+ .short,
+ .int,
+ .long,
+ .@"long long",
+ .ptrdiff_t,
+ .int8_t,
+ .int16_t,
+ .int32_t,
+ .int64_t,
+ .intptr_t,
+ .zig_i128,
+ => .signed,
+ .@"unsigned char",
+ .@"unsigned short",
+ .@"unsigned int",
+ .@"unsigned long",
+ .@"unsigned long long",
+ .size_t,
+ .uint8_t,
+ .uint16_t,
+ .uint32_t,
+ .uint64_t,
+ .uintptr_t,
+ .zig_u128,
+ => .unsigned,
+ else => unreachable,
+ };
+ }
+
+ pub fn isFloat(self: CType) bool {
+ return switch (self.tag()) {
+ .float,
+ .double,
+ .@"long double",
+ .zig_f16,
+ .zig_f32,
+ .zig_f64,
+ .zig_f80,
+ .zig_f128,
+ .zig_c_longdouble,
+ => true,
+ else => false,
+ };
+ }
+
+ pub fn isPointer(self: CType) bool {
+ return switch (self.tag()) {
+ .pointer,
+ .pointer_const,
+ .pointer_volatile,
+ .pointer_const_volatile,
+ => true,
+ else => false,
+ };
+ }
+
+ pub fn isFunction(self: CType) bool {
+ return switch (self.tag()) {
+ .function,
+ .varargs_function,
+ => true,
+ else => false,
+ };
+ }
+
+ pub fn toSigned(self: CType) CType {
+ return CType.initTag(switch (self.tag()) {
+ .char, .@"signed char", .@"unsigned char" => .@"signed char",
+ .short, .@"unsigned short" => .short,
+ .int, .@"unsigned int" => .int,
+ .long, .@"unsigned long" => .long,
+ .@"long long", .@"unsigned long long" => .@"long long",
+ .size_t, .ptrdiff_t => .ptrdiff_t,
+ .uint8_t, .int8_t => .int8_t,
+ .uint16_t, .int16_t => .int16_t,
+ .uint32_t, .int32_t => .int32_t,
+ .uint64_t, .int64_t => .int64_t,
+ .uintptr_t, .intptr_t => .intptr_t,
+ .zig_u128, .zig_i128 => .zig_i128,
+ .float,
+ .double,
+ .@"long double",
+ .zig_f16,
+ .zig_f32,
+ .zig_f80,
+ .zig_f128,
+ .zig_c_longdouble,
+ => |t| t,
+ else => unreachable,
+ });
+ }
+
+ pub fn toUnsigned(self: CType) CType {
+ return CType.initTag(switch (self.tag()) {
+ .char, .@"signed char", .@"unsigned char" => .@"unsigned char",
+ .short, .@"unsigned short" => .@"unsigned short",
+ .int, .@"unsigned int" => .@"unsigned int",
+ .long, .@"unsigned long" => .@"unsigned long",
+ .@"long long", .@"unsigned long long" => .@"unsigned long long",
+ .size_t, .ptrdiff_t => .size_t,
+ .uint8_t, .int8_t => .uint8_t,
+ .uint16_t, .int16_t => .uint16_t,
+ .uint32_t, .int32_t => .uint32_t,
+ .uint64_t, .int64_t => .uint64_t,
+ .uintptr_t, .intptr_t => .uintptr_t,
+ .zig_u128, .zig_i128 => .zig_u128,
+ else => unreachable,
+ });
+ }
+
+ pub fn toSignedness(self: CType, s: std.builtin.Signedness) CType {
+ return switch (s) {
+ .unsigned => self.toUnsigned(),
+ .signed => self.toSigned(),
+ };
+ }
+
+ pub fn getStandardDefineAbbrev(self: CType) ?[]const u8 {
+ return switch (self.tag()) {
+ .char => "CHAR",
+ .@"signed char" => "SCHAR",
+ .short => "SHRT",
+ .int => "INT",
+ .long => "LONG",
+ .@"long long" => "LLONG",
+ .@"unsigned char" => "UCHAR",
+ .@"unsigned short" => "USHRT",
+ .@"unsigned int" => "UINT",
+ .@"unsigned long" => "ULONG",
+ .@"unsigned long long" => "ULLONG",
+ .float => "FLT",
+ .double => "DBL",
+ .@"long double" => "LDBL",
+ .size_t => "SIZE",
+ .ptrdiff_t => "PTRDIFF",
+ .uint8_t => "UINT8",
+ .int8_t => "INT8",
+ .uint16_t => "UINT16",
+ .int16_t => "INT16",
+ .uint32_t => "UINT32",
+ .int32_t => "INT32",
+ .uint64_t => "UINT64",
+ .int64_t => "INT64",
+ .uintptr_t => "UINTPTR",
+ .intptr_t => "INTPTR",
+ else => null,
+ };
+ }
+
+ pub fn renderLiteralPrefix(self: CType, writer: anytype, kind: Kind) @TypeOf(writer).Error!void {
+ switch (self.tag()) {
+ .void => unreachable,
+ ._Bool,
+ .char,
+ .@"signed char",
+ .short,
+ .@"unsigned short",
+ .bool,
+ .size_t,
+ .ptrdiff_t,
+ .uintptr_t,
+ .intptr_t,
+ => |t| switch (kind) {
+ else => try writer.print("({s})", .{@tagName(t)}),
+ .global => {},
+ },
+ .int,
+ .long,
+ .@"long long",
+ .@"unsigned char",
+ .@"unsigned int",
+ .@"unsigned long",
+ .@"unsigned long long",
+ .float,
+ .double,
+ .@"long double",
+ => {},
+ .uint8_t,
+ .int8_t,
+ .uint16_t,
+ .int16_t,
+ .uint32_t,
+ .int32_t,
+ .uint64_t,
+ .int64_t,
+ => try writer.print("{s}_C(", .{self.getStandardDefineAbbrev().?}),
+ .zig_u128,
+ .zig_i128,
+ .zig_f16,
+ .zig_f32,
+ .zig_f64,
+ .zig_f80,
+ .zig_f128,
+ .zig_c_longdouble,
+ => |t| try writer.print("zig_{s}_{s}(", .{
+ switch (kind) {
+ else => "make",
+ .global => "init",
+ },
+ @tagName(t)["zig_".len..],
+ }),
+ .pointer,
+ .pointer_const,
+ .pointer_volatile,
+ .pointer_const_volatile,
+ => unreachable,
+ .array,
+ .vector,
+ => try writer.writeByte('{'),
+ .fwd_anon_struct,
+ .fwd_anon_union,
+ .fwd_struct,
+ .fwd_union,
+ .unnamed_struct,
+ .unnamed_union,
+ .packed_unnamed_struct,
+ .packed_unnamed_union,
+ .anon_struct,
+ .anon_union,
+ .@"struct",
+ .@"union",
+ .packed_struct,
+ .packed_union,
+ .function,
+ .varargs_function,
+ => unreachable,
+ }
+ }
+
+ pub fn renderLiteralSuffix(self: CType, writer: anytype) @TypeOf(writer).Error!void {
+ switch (self.tag()) {
+ .void => unreachable,
+ ._Bool => {},
+ .char,
+ .@"signed char",
+ .short,
+ .int,
+ => {},
+ .long => try writer.writeByte('l'),
+ .@"long long" => try writer.writeAll("ll"),
+ .@"unsigned char",
+ .@"unsigned short",
+ .@"unsigned int",
+ => try writer.writeByte('u'),
+ .@"unsigned long",
+ .size_t,
+ .uintptr_t,
+ => try writer.writeAll("ul"),
+ .@"unsigned long long" => try writer.writeAll("ull"),
+ .float => try writer.writeByte('f'),
+ .double => {},
+ .@"long double" => try writer.writeByte('l'),
+ .bool,
+ .ptrdiff_t,
+ .intptr_t,
+ => {},
+ .uint8_t,
+ .int8_t,
+ .uint16_t,
+ .int16_t,
+ .uint32_t,
+ .int32_t,
+ .uint64_t,
+ .int64_t,
+ .zig_u128,
+ .zig_i128,
+ .zig_f16,
+ .zig_f32,
+ .zig_f64,
+ .zig_f80,
+ .zig_f128,
+ .zig_c_longdouble,
+ => try writer.writeByte(')'),
+ .pointer,
+ .pointer_const,
+ .pointer_volatile,
+ .pointer_const_volatile,
+ => unreachable,
+ .array,
+ .vector,
+ => try writer.writeByte('}'),
+ .fwd_anon_struct,
+ .fwd_anon_union,
+ .fwd_struct,
+ .fwd_union,
+ .unnamed_struct,
+ .unnamed_union,
+ .packed_unnamed_struct,
+ .packed_unnamed_union,
+ .anon_struct,
+ .anon_union,
+ .@"struct",
+ .@"union",
+ .packed_struct,
+ .packed_union,
+ .function,
+ .varargs_function,
+ => unreachable,
+ }
+ }
+
+ pub fn floatActiveBits(self: CType, target: Target) u16 {
+ return switch (self.tag()) {
+ .float => target.c_type_bit_size(.float),
+ .double => target.c_type_bit_size(.double),
+ .@"long double", .zig_c_longdouble => target.c_type_bit_size(.longdouble),
+ .zig_f16 => 16,
+ .zig_f32 => 32,
+ .zig_f64 => 64,
+ .zig_f80 => 80,
+ .zig_f128 => 128,
+ else => unreachable,
+ };
+ }
+
+ pub fn byteSize(self: CType, store: Store.Set, target: Target) u64 {
+ return switch (self.tag()) {
+ .void => 0,
+ .char, .@"signed char", ._Bool, .@"unsigned char", .bool, .uint8_t, .int8_t => 1,
+ .short => target.c_type_byte_size(.short),
+ .int => target.c_type_byte_size(.int),
+ .long => target.c_type_byte_size(.long),
+ .@"long long" => target.c_type_byte_size(.longlong),
+ .@"unsigned short" => target.c_type_byte_size(.ushort),
+ .@"unsigned int" => target.c_type_byte_size(.uint),
+ .@"unsigned long" => target.c_type_byte_size(.ulong),
+ .@"unsigned long long" => target.c_type_byte_size(.ulonglong),
+ .float => target.c_type_byte_size(.float),
+ .double => target.c_type_byte_size(.double),
+ .@"long double" => target.c_type_byte_size(.longdouble),
+ .size_t,
+ .ptrdiff_t,
+ .uintptr_t,
+ .intptr_t,
+ .pointer,
+ .pointer_const,
+ .pointer_volatile,
+ .pointer_const_volatile,
+ => @divExact(target.cpu.arch.ptrBitWidth(), 8),
+ .uint16_t, .int16_t, .zig_f16 => 2,
+ .uint32_t, .int32_t, .zig_f32 => 4,
+ .uint64_t, .int64_t, .zig_f64 => 8,
+ .zig_u128, .zig_i128, .zig_f128 => 16,
+ .zig_f80 => if (target.c_type_bit_size(.longdouble) == 80)
+ target.c_type_byte_size(.longdouble)
+ else
+ 16,
+ .zig_c_longdouble => target.c_type_byte_size(.longdouble),
+
+ .array,
+ .vector,
+ => {
+ const data = self.cast(Payload.Sequence).?.data;
+ return data.len * store.indexToCType(data.elem_type).byteSize(store, target);
+ },
+
+ .fwd_anon_struct,
+ .fwd_anon_union,
+ .fwd_struct,
+ .fwd_union,
+ .unnamed_struct,
+ .unnamed_union,
+ .packed_unnamed_struct,
+ .packed_unnamed_union,
+ .anon_struct,
+ .anon_union,
+ .@"struct",
+ .@"union",
+ .packed_struct,
+ .packed_union,
+ .function,
+ .varargs_function,
+ => unreachable,
+ };
+ }
+
pub fn isPacked(self: CType) bool {
return switch (self.tag()) {
else => false,
@@ -787,26 +1208,26 @@ pub const CType = extern union {
};
}
- fn tagFromIntInfo(signedness: std.builtin.Signedness, bits: u16) Tag {
- return switch (bits) {
+ fn tagFromIntInfo(int_info: std.builtin.Type.Int) Tag {
+ return switch (int_info.bits) {
0 => .void,
- 1...8 => switch (signedness) {
+ 1...8 => switch (int_info.signedness) {
.unsigned => .uint8_t,
.signed => .int8_t,
},
- 9...16 => switch (signedness) {
+ 9...16 => switch (int_info.signedness) {
.unsigned => .uint16_t,
.signed => .int16_t,
},
- 17...32 => switch (signedness) {
+ 17...32 => switch (int_info.signedness) {
.unsigned => .uint32_t,
.signed => .int32_t,
},
- 33...64 => switch (signedness) {
+ 33...64 => switch (int_info.signedness) {
.unsigned => .uint64_t,
.signed => .int64_t,
},
- 65...128 => switch (signedness) {
+ 65...128 => switch (int_info.signedness) {
.unsigned => .zig_u128,
.signed => .zig_i128,
},
@@ -945,31 +1366,27 @@ pub const CType = extern union {
.c_ulong => self.init(.@"unsigned long"),
.c_longlong => self.init(.@"long long"),
.c_ulonglong => self.init(.@"unsigned long long"),
- else => {
- const info = ty.intInfo(target);
- const t = tagFromIntInfo(info.signedness, info.bits);
- switch (t) {
- .void => unreachable,
- else => self.init(t),
- .array => switch (kind) {
- .forward, .complete, .global => {
- const abi_size = ty.abiSize(target);
- const abi_align = ty.abiAlignment(target);
- self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{
- .len = @divExact(abi_size, abi_align),
- .elem_type = tagFromIntInfo(
- .unsigned,
- @intCast(u16, abi_align * 8),
- ).toIndex(),
- } } };
- self.value = .{ .cty = initPayload(&self.storage.seq) };
- },
- .forward_parameter,
- .parameter,
- => try self.initArrayParameter(ty, kind, lookup),
- .payload => unreachable,
+ else => switch (tagFromIntInfo(ty.intInfo(target))) {
+ .void => unreachable,
+ else => |t| self.init(t),
+ .array => switch (kind) {
+ .forward, .complete, .global => {
+ const abi_size = ty.abiSize(target);
+ const abi_align = ty.abiAlignment(target);
+ self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{
+ .len = @divExact(abi_size, abi_align),
+ .elem_type = tagFromIntInfo(.{
+ .signedness = .unsigned,
+ .bits = @intCast(u16, abi_align * 8),
+ }).toIndex(),
+ } } };
+ self.value = .{ .cty = initPayload(&self.storage.seq) };
},
- }
+ .forward_parameter,
+ .parameter,
+ => try self.initArrayParameter(ty, kind, lookup),
+ .payload => unreachable,
+ },
},
} else switch (ty.zigTypeTag()) {
.Frame => unreachable,
@@ -1048,7 +1465,7 @@ pub const CType = extern union {
.base = .{ .tag = .int_unsigned },
.data = info.host_size * 8,
};
- const pointee_ty = if (info.host_size > 0)
+ const pointee_ty = if (info.host_size > 0 and info.vector_index == .none)
Type.initPayload(&host_int_pl.base)
else
info.pointee_type;
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 22cf76f29a..6c46e5b76a 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -1778,7 +1778,7 @@ pub const Object = struct {
if (ty.optionalReprIsPayload()) {
const ptr_di_ty = try o.lowerDebugType(child_ty, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .mod = o.module });
+ try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module });
return ptr_di_ty;
}
@@ -4595,6 +4595,7 @@ pub const FuncGen = struct {
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.switch_br => try self.airSwitchBr(inst),
+ .trap => try self.airTrap(inst),
.breakpoint => try self.airBreakpoint(inst),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
@@ -8261,6 +8262,14 @@ pub const FuncGen = struct {
return fg.load(ptr, ptr_ty);
}
+ fn airTrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ _ = inst;
+ const llvm_fn = self.getIntrinsic("llvm.trap", &.{});
+ _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, undefined, 0, .Cold, .Auto, "");
+ _ = self.builder.buildUnreachable();
+ return null;
+ }
+
fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
_ = inst;
const llvm_fn = self.getIntrinsic("llvm.debugtrap", &.{});
diff --git a/src/glibc.zig b/src/glibc.zig
index 3021e7c7ba..b37398bffd 100644
--- a/src/glibc.zig
+++ b/src/glibc.zig
@@ -161,7 +161,7 @@ pub const CRTFile = enum {
libc_nonshared_a,
};
-pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
+pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progress.Node) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
@@ -196,7 +196,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
"-DASSEMBLER",
"-Wa,--noexecstack",
});
- return comp.build_crt_file("crti", .Obj, &[1]Compilation.CSourceFile{
+ return comp.build_crt_file("crti", .Obj, .@"glibc crti.o", prog_node, &.{
.{
.src_path = try start_asm_path(comp, arena, "crti.S"),
.cache_exempt_flags = args.items,
@@ -215,7 +215,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
"-DASSEMBLER",
"-Wa,--noexecstack",
});
- return comp.build_crt_file("crtn", .Obj, &[1]Compilation.CSourceFile{
+ return comp.build_crt_file("crtn", .Obj, .@"glibc crtn.o", prog_node, &.{
.{
.src_path = try start_asm_path(comp, arena, "crtn.S"),
.cache_exempt_flags = args.items,
@@ -265,7 +265,9 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
.cache_exempt_flags = args.items,
};
};
- return comp.build_crt_file("Scrt1", .Obj, &[_]Compilation.CSourceFile{ start_o, abi_note_o });
+ return comp.build_crt_file("Scrt1", .Obj, .@"glibc Scrt1.o", prog_node, &.{
+ start_o, abi_note_o,
+ });
},
.libc_nonshared_a => {
const s = path.sep_str;
@@ -366,7 +368,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
files_index += 1;
}
const files = files_buf[0..files_index];
- return comp.build_crt_file("c_nonshared", .Lib, files);
+ return comp.build_crt_file("c_nonshared", .Lib, .@"glibc libc_nonshared.a", prog_node, files);
},
}
}
@@ -639,7 +641,7 @@ pub const BuiltSharedObjects = struct {
const all_map_basename = "all.map";
-pub fn buildSharedObjects(comp: *Compilation) !void {
+pub fn buildSharedObjects(comp: *Compilation, prog_node: *std.Progress.Node) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1023,7 +1025,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable;
try o_directory.handle.writeFile(asm_file_basename, stubs_asm.items);
- try buildSharedLib(comp, arena, comp.global_cache_directory, o_directory, asm_file_basename, lib);
+ try buildSharedLib(comp, arena, comp.global_cache_directory, o_directory, asm_file_basename, lib, prog_node);
}
man.writeManifest() catch |err| {
@@ -1046,6 +1048,7 @@ fn buildSharedLib(
bin_directory: Compilation.Directory,
asm_file_basename: []const u8,
lib: Lib,
+ prog_node: *std.Progress.Node,
) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1105,7 +1108,7 @@ fn buildSharedLib(
});
defer sub_compilation.destroy();
- try sub_compilation.updateSubCompilation();
+ try comp.updateSubCompilation(sub_compilation, .@"glibc shared object", prog_node);
}
// Return true if glibc has crti/crtn sources for that architecture.
diff --git a/src/libcxx.zig b/src/libcxx.zig
index 7ca405cf15..9c5dc9426f 100644
--- a/src/libcxx.zig
+++ b/src/libcxx.zig
@@ -96,7 +96,7 @@ const libcxx_files = [_][]const u8{
"src/verbose_abort.cpp",
};
-pub fn buildLibCXX(comp: *Compilation) !void {
+pub fn buildLibCXX(comp: *Compilation, prog_node: *std.Progress.Node) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
@@ -258,7 +258,7 @@ pub fn buildLibCXX(comp: *Compilation) !void {
});
defer sub_compilation.destroy();
- try sub_compilation.updateSubCompilation();
+ try comp.updateSubCompilation(sub_compilation, .libcxx, prog_node);
assert(comp.libcxx_static_lib == null);
comp.libcxx_static_lib = Compilation.CRTFile{
@@ -269,7 +269,7 @@ pub fn buildLibCXX(comp: *Compilation) !void {
};
}
-pub fn buildLibCXXABI(comp: *Compilation) !void {
+pub fn buildLibCXXABI(comp: *Compilation, prog_node: *std.Progress.Node) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
@@ -418,7 +418,7 @@ pub fn buildLibCXXABI(comp: *Compilation) !void {
});
defer sub_compilation.destroy();
- try sub_compilation.updateSubCompilation();
+ try comp.updateSubCompilation(sub_compilation, .libcxxabi, prog_node);
assert(comp.libcxxabi_static_lib == null);
comp.libcxxabi_static_lib = Compilation.CRTFile{
diff --git a/src/libtsan.zig b/src/libtsan.zig
index 16e40c16f8..54bf00e4b6 100644
--- a/src/libtsan.zig
+++ b/src/libtsan.zig
@@ -5,7 +5,7 @@ const Compilation = @import("Compilation.zig");
const build_options = @import("build_options");
const trace = @import("tracy.zig").trace;
-pub fn buildTsan(comp: *Compilation) !void {
+pub fn buildTsan(comp: *Compilation, prog_node: *std.Progress.Node) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
@@ -235,7 +235,7 @@ pub fn buildTsan(comp: *Compilation) !void {
});
defer sub_compilation.destroy();
- try sub_compilation.updateSubCompilation();
+ try comp.updateSubCompilation(sub_compilation, .libtsan, prog_node);
assert(comp.tsan_static_lib == null);
comp.tsan_static_lib = Compilation.CRTFile{
diff --git a/src/libunwind.zig b/src/libunwind.zig
index a20b5e81f7..aefbfb457d 100644
--- a/src/libunwind.zig
+++ b/src/libunwind.zig
@@ -7,7 +7,7 @@ const Compilation = @import("Compilation.zig");
const build_options = @import("build_options");
const trace = @import("tracy.zig").trace;
-pub fn buildStaticLib(comp: *Compilation) !void {
+pub fn buildStaticLib(comp: *Compilation, prog_node: *std.Progress.Node) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
@@ -130,7 +130,7 @@ pub fn buildStaticLib(comp: *Compilation) !void {
});
defer sub_compilation.destroy();
- try sub_compilation.updateSubCompilation();
+ try comp.updateSubCompilation(sub_compilation, .libunwind, prog_node);
assert(comp.libunwind_static_lib == null);
diff --git a/src/link.zig b/src/link.zig
index 4c4915441d..e68f9c97d0 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -264,6 +264,8 @@ pub const File = struct {
/// of this linking operation.
lock: ?Cache.Lock = null,
+ child_pid: ?std.ChildProcess.Id = null,
+
/// Attempts incremental linking, if the file already exists. If
/// incremental linking fails, falls back to truncating the file and
/// rewriting it. A malicious file is detected as incremental link failure
@@ -376,6 +378,26 @@ pub const File = struct {
if (build_options.only_c) unreachable;
if (base.file != null) return;
const emit = base.options.emit orelse return;
+ if (base.child_pid) |pid| {
+ // If we try to open the output file in write mode while it is running,
+ // it will return ETXTBSY. So instead, we copy the file, atomically rename it
+ // over top of the exe path, and then proceed normally. This changes the inode,
+ // avoiding the error.
+ const tmp_sub_path = try std.fmt.allocPrint(base.allocator, "{s}-{x}", .{
+ emit.sub_path, std.crypto.random.int(u32),
+ });
+ try emit.directory.handle.copyFile(emit.sub_path, emit.directory.handle, tmp_sub_path, .{});
+ try emit.directory.handle.rename(tmp_sub_path, emit.sub_path);
+ switch (builtin.os.tag) {
+ .linux => {
+ switch (std.os.errno(std.os.linux.ptrace(std.os.linux.PTRACE.ATTACH, pid, 0, 0, 0))) {
+ .SUCCESS => {},
+ else => |errno| log.warn("ptrace failure: {s}", .{@tagName(errno)}),
+ }
+ },
+ else => return error.HotSwapUnavailableOnHostOperatingSystem,
+ }
+ }
base.file = try emit.directory.handle.createFile(emit.sub_path, .{
.truncate = false,
.read = true,
@@ -424,6 +446,18 @@ pub const File = struct {
}
f.close();
base.file = null;
+
+ if (base.child_pid) |pid| {
+ switch (builtin.os.tag) {
+ .linux => {
+ switch (std.os.errno(std.os.linux.ptrace(std.os.linux.PTRACE.DETACH, pid, 0, 0, 0))) {
+ .SUCCESS => {},
+ else => |errno| log.warn("ptrace failure: {s}", .{@tagName(errno)}),
+ }
+ },
+ else => return error.HotSwapUnavailableOnHostOperatingSystem,
+ }
+ }
},
.c, .spirv, .nvptx => {},
}
@@ -460,6 +494,9 @@ pub const File = struct {
CurrentWorkingDirectoryUnlinked,
LockViolation,
NetNameDeleted,
+ DeviceBusy,
+ InvalidArgument,
+ HotSwapUnavailableOnHostOperatingSystem,
};
/// Called from within the CodeGen to lower a local variable instantion as an unnamed
@@ -1051,9 +1088,11 @@ pub const File = struct {
log.warn("failed to save archive hash digest file: {s}", .{@errorName(err)});
};
- man.writeManifest() catch |err| {
- log.warn("failed to write cache manifest when archiving: {s}", .{@errorName(err)});
- };
+ if (man.have_exclusive_lock) {
+ man.writeManifest() catch |err| {
+ log.warn("failed to write cache manifest when archiving: {s}", .{@errorName(err)});
+ };
+ }
base.lock = man.toOwnedLock();
}
diff --git a/src/link/C.zig b/src/link/C.zig
index 5663ba71e2..7e3ad2eddd 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -221,14 +221,19 @@ pub fn flush(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) !void
return self.flushModule(comp, prog_node);
}
-fn abiDefine(comp: *Compilation) ?[]const u8 {
- return switch (comp.getTarget().abi) {
- .msvc => "#define ZIG_TARGET_ABI_MSVC\n",
- else => null,
- };
+fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) {
+ var defines = std.ArrayList(u8).init(self.base.allocator);
+ errdefer defines.deinit();
+ const writer = defines.writer();
+ switch (target.abi) {
+ .msvc => try writer.writeAll("#define ZIG_TARGET_ABI_MSVC\n"),
+ else => {},
+ }
+ try writer.print("#define ZIG_TARGET_MAX_INT_ALIGNMENT {d}\n", .{target.maxIntAlignment()});
+ return defines;
}
-pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) !void {
+pub fn flushModule(self: *C, _: *Compilation, prog_node: *std.Progress.Node) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -245,12 +250,13 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
var f: Flush = .{};
defer f.deinit(gpa);
- const abi_define = abiDefine(comp);
+ const abi_defines = try self.abiDefines(module.getTarget());
+ defer abi_defines.deinit();
// Covers defines, zig.h, ctypes, asm, lazy fwd.
try f.all_buffers.ensureUnusedCapacity(gpa, 5);
- if (abi_define) |buf| f.appendBufAssumeCapacity(buf);
+ f.appendBufAssumeCapacity(abi_defines.items);
f.appendBufAssumeCapacity(zig_h);
const ctypes_index = f.all_buffers.items.len;
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index c0ac7e0b88..f210f2f2b3 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1060,7 +1060,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
log.err("{s}", .{em.msg});
- return error.AnalysisFail;
+ return error.CodegenFail;
},
};
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 1a9d594c56..f1ab98372e 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -467,7 +467,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p_paddr = entry_addr,
.p_memsz = file_size,
.p_align = p_align,
- .p_flags = elf.PF_X | elf.PF_R,
+ .p_flags = elf.PF_X | elf.PF_R | elf.PF_W,
});
self.entry_addr = null;
self.phdr_table_dirty = true;
@@ -493,7 +493,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p_paddr = got_addr,
.p_memsz = file_size,
.p_align = p_align,
- .p_flags = elf.PF_R,
+ .p_flags = elf.PF_R | elf.PF_W,
});
self.phdr_table_dirty = true;
}
@@ -516,7 +516,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p_paddr = rodata_addr,
.p_memsz = file_size,
.p_align = p_align,
- .p_flags = elf.PF_R,
+ .p_flags = elf.PF_R | elf.PF_W,
});
self.phdr_table_dirty = true;
}
@@ -2097,9 +2097,16 @@ fn freeAtom(self: *Elf, atom_index: Atom.Index) void {
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
const local_sym_index = atom.getSymbolIndex().?;
+ log.debug("adding %{d} to local symbols free list", .{local_sym_index});
self.local_symbol_free_list.append(gpa, local_sym_index) catch {};
- self.local_symbols.items[local_sym_index].st_info = 0;
- self.local_symbols.items[local_sym_index].st_shndx = 0;
+ self.local_symbols.items[local_sym_index] = .{
+ .st_name = 0,
+ .st_info = 0,
+ .st_other = 0,
+ .st_shndx = 0,
+ .st_value = 0,
+ .st_size = 0,
+ };
_ = self.atom_by_index_table.remove(local_sym_index);
self.getAtomPtr(atom_index).local_sym_index = 0;
@@ -2159,7 +2166,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme
// First we look for an appropriately sized free list node.
// The list is unordered. We'll just take the first thing that works.
const vaddr = blk: {
- var i: usize = 0;
+ var i: usize = if (self.base.child_pid == null) 0 else free_list.items.len;
while (i < free_list.items.len) {
const big_atom_index = free_list.items[i];
const big_atom = self.getAtom(big_atom_index);
@@ -2390,7 +2397,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
const atom = self.getAtom(atom_index);
const shdr_index = decl_metadata.shdr;
- if (atom.getSymbol(self).st_size != 0) {
+ if (atom.getSymbol(self).st_size != 0 and self.base.child_pid == null) {
const local_sym = atom.getSymbolPtr(self);
local_sym.st_name = try self.shstrtab.insert(gpa, decl_name);
local_sym.st_info = (elf.STB_LOCAL << 4) | stt_bits;
@@ -2444,6 +2451,28 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
const phdr_index = self.sections.items(.phdr_index)[shdr_index];
const section_offset = local_sym.st_value - self.program_headers.items[phdr_index].p_vaddr;
const file_offset = self.sections.items(.shdr)[shdr_index].sh_offset + section_offset;
+
+ if (self.base.child_pid) |pid| {
+ switch (builtin.os.tag) {
+ .linux => {
+ var code_vec: [1]std.os.iovec_const = .{.{
+ .iov_base = code.ptr,
+ .iov_len = code.len,
+ }};
+ var remote_vec: [1]std.os.iovec_const = .{.{
+ .iov_base = @intToPtr([*]u8, @intCast(usize, local_sym.st_value)),
+ .iov_len = code.len,
+ }};
+ const rc = std.os.linux.process_vm_writev(pid, &code_vec, &remote_vec, 0);
+ switch (std.os.errno(rc)) {
+ .SUCCESS => assert(rc == code.len),
+ else => |errno| log.warn("process_vm_writev failure: {s}", .{@tagName(errno)}),
+ }
+ },
+ else => return error.HotSwapUnavailableOnHostOperatingSystem,
+ }
+ }
+
try self.base.file.?.pwriteAll(code, file_offset);
return local_sym;
@@ -2618,7 +2647,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
log.err("{s}", .{em.msg});
- return error.AnalysisFail;
+ return error.CodegenFail;
},
};
@@ -2813,6 +2842,8 @@ fn writeOffsetTableEntry(self: *Elf, index: usize) !void {
const endian = self.base.options.target.cpu.arch.endian();
const shdr = &self.sections.items(.shdr)[self.got_section_index.?];
const off = shdr.sh_offset + @as(u64, entry_size) * index;
+ const phdr = &self.program_headers.items[self.phdr_got_index.?];
+ const vaddr = phdr.p_vaddr + @as(u64, entry_size) * index;
switch (entry_size) {
2 => {
var buf: [2]u8 = undefined;
@@ -2828,6 +2859,27 @@ fn writeOffsetTableEntry(self: *Elf, index: usize) !void {
var buf: [8]u8 = undefined;
mem.writeInt(u64, &buf, self.offset_table.items[index], endian);
try self.base.file.?.pwriteAll(&buf, off);
+
+ if (self.base.child_pid) |pid| {
+ switch (builtin.os.tag) {
+ .linux => {
+ var local_vec: [1]std.os.iovec_const = .{.{
+ .iov_base = &buf,
+ .iov_len = buf.len,
+ }};
+ var remote_vec: [1]std.os.iovec_const = .{.{
+ .iov_base = @intToPtr([*]u8, @intCast(usize, vaddr)),
+ .iov_len = buf.len,
+ }};
+ const rc = std.os.linux.process_vm_writev(pid, &local_vec, &remote_vec, 0);
+ switch (std.os.errno(rc)) {
+ .SUCCESS => assert(rc == buf.len),
+ else => |errno| log.warn("process_vm_writev failure: {s}", .{@tagName(errno)}),
+ }
+ },
+ else => return error.HotSwapUnavailableOnHostOperatingSystem,
+ }
+ }
},
else => unreachable,
}
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 7c1d4776af..eaf16e4009 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -2089,7 +2089,7 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
log.err("{s}", .{em.msg});
- return error.AnalysisFail;
+ return error.CodegenFail;
},
};
diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig
index 8bc00d9181..6d1cd7b536 100644
--- a/src/link/MachO/CodeSignature.zig
+++ b/src/link/MachO/CodeSignature.zig
@@ -7,12 +7,12 @@ const log = std.log.scoped(.link);
const macho = std.macho;
const mem = std.mem;
const testing = std.testing;
+const ThreadPool = std.Thread.Pool;
+const WaitGroup = std.Thread.WaitGroup;
const Allocator = mem.Allocator;
const Compilation = @import("../../Compilation.zig");
const Sha256 = std.crypto.hash.sha2.Sha256;
-const ThreadPool = @import("../../ThreadPool.zig");
-const WaitGroup = @import("../../WaitGroup.zig");
const hash_size = Sha256.digest_length;
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index 87e3ca5c22..cf6e4f8418 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -377,7 +377,7 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
log.err("{s}", .{em.msg});
- return error.AnalysisFail;
+ return error.CodegenFail;
},
};
// duped_code is freed when the unnamed const is freed
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 74525138a1..e998a8d50e 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -468,6 +468,7 @@ fn createSyntheticSymbol(wasm: *Wasm, name: []const u8, tag: Symbol.Tag) !Symbol
.flags = 0,
.tag = tag,
.index = undefined,
+ .virtual_address = undefined,
});
try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, loc, {});
try wasm.globals.put(wasm.base.allocator, name_offset, loc);
@@ -886,32 +887,12 @@ fn resolveLazySymbols(wasm: *Wasm) !void {
const loc = try wasm.createSyntheticSymbol("__heap_base", .data);
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(loc); // we don't want to emit this symbol, only use it for relocations.
-
- // TODO: Can we use `createAtom` here while also re-using the symbol
- // from `createSyntheticSymbol`.
- const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
- const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
- atom.* = Atom.empty;
- atom.sym_index = loc.index;
- atom.alignment = 1;
-
- try wasm.parseAtom(atom_index, .{ .data = .synthetic });
- try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom_index);
}
if (wasm.undefs.fetchSwapRemove("__heap_end")) |kv| {
const loc = try wasm.createSyntheticSymbol("__heap_end", .data);
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(loc);
-
- const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
- const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
- atom.* = Atom.empty;
- atom.sym_index = loc.index;
- atom.alignment = 1;
-
- try wasm.parseAtom(atom_index, .{ .data = .synthetic });
- try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom_index);
}
}
@@ -1011,6 +992,7 @@ pub fn allocateSymbol(wasm: *Wasm) !u32 {
.flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
.tag = undefined, // will be set after updateDecl
.index = undefined, // will be set after updateDecl
+ .virtual_address = undefined, // will be set during atom allocation
};
if (wasm.symbols_free_list.popOrNull()) |index| {
wasm.symbols.items[index] = symbol;
@@ -1246,6 +1228,7 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In
.flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
.tag = .data,
.index = undefined,
+ .virtual_address = undefined,
};
try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, atom.symbolLoc(), {});
@@ -1265,7 +1248,7 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In
.fail => |em| {
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
- return error.AnalysisFail;
+ return error.CodegenFail;
},
};
};
@@ -1292,6 +1275,7 @@ pub fn getGlobalSymbol(wasm: *Wasm, name: []const u8) !u32 {
.flags = 0,
.index = undefined, // index to type will be set after merging function symbols
.tag = .function,
+ .virtual_address = undefined,
};
symbol.setGlobal(true);
symbol.setUndefined(true);
@@ -1610,7 +1594,6 @@ const Kind = union(enum) {
read_only,
uninitialized,
initialized,
- synthetic,
},
function: void,
@@ -1621,7 +1604,6 @@ const Kind = union(enum) {
.read_only => return ".rodata.",
.uninitialized => return ".bss.",
.initialized => return ".data.",
- .synthetic => return ".synthetic",
}
}
};
@@ -1788,6 +1770,30 @@ fn allocateAtoms(wasm: *Wasm) !void {
}
}
+/// For each data symbol, sets the virtual address.
+fn allocateVirtualAddresses(wasm: *Wasm) void {
+ for (wasm.resolved_symbols.keys()) |loc| {
+ const symbol = loc.getSymbol(wasm);
+ if (symbol.tag != .data) {
+ continue; // only data symbols have virtual addresses
+ }
+ const atom_index = wasm.symbol_atom.get(loc) orelse {
+ // synthetic symbol that does not contain an atom
+ continue;
+ };
+
+ const atom = wasm.getAtom(atom_index);
+ const merge_segment = wasm.base.options.output_mode != .Obj;
+ const segment_info = if (atom.file) |object_index| blk: {
+ break :blk wasm.objects.items[object_index].segment_info;
+ } else wasm.segment_info.values();
+ const segment_name = segment_info[symbol.index].outputName(merge_segment);
+ const segment_index = wasm.data_segments.get(segment_name).?;
+ const segment = wasm.segments.items[segment_index];
+ symbol.virtual_address = atom.offset + segment.offset;
+ }
+}
+
fn sortDataSegments(wasm: *Wasm) !void {
var new_mapping: std.StringArrayHashMapUnmanaged(u32) = .{};
try new_mapping.ensureUnusedCapacity(wasm.base.allocator, wasm.data_segments.count());
@@ -1805,7 +1811,6 @@ fn sortDataSegments(wasm: *Wasm) !void {
if (mem.startsWith(u8, name, ".rodata")) return 0;
if (mem.startsWith(u8, name, ".data")) return 1;
if (mem.startsWith(u8, name, ".text")) return 2;
- if (mem.startsWith(u8, name, ".synthetic")) return 100; // always at end
return 3;
}
};
@@ -2137,13 +2142,10 @@ fn setupExports(wasm: *Wasm) !void {
break :blk try wasm.string_table.put(wasm.base.allocator, sym_name);
};
const exp: types.Export = if (symbol.tag == .data) exp: {
- const atom_index = wasm.symbol_atom.get(sym_loc).?;
- const atom = wasm.getAtom(atom_index);
- const va = atom.getVA(wasm, symbol);
const global_index = @intCast(u32, wasm.imported_globals_count + wasm.wasm_globals.items.len);
try wasm.wasm_globals.append(wasm.base.allocator, .{
.global_type = .{ .valtype = .i32, .mutable = false },
- .init = .{ .i32_const = @intCast(i32, va) },
+ .init = .{ .i32_const = @intCast(i32, symbol.virtual_address) },
});
break :exp .{
.name = export_name,
@@ -2220,10 +2222,6 @@ fn setupMemory(wasm: *Wasm) !void {
var offset: u32 = @intCast(u32, memory_ptr);
var data_seg_it = wasm.data_segments.iterator();
while (data_seg_it.next()) |entry| {
- if (mem.eql(u8, entry.key_ptr.*, ".synthetic")) {
- // do not update synthetic segments as they are not part of the output
- continue;
- }
const segment = &wasm.segments.items[entry.value_ptr.*];
memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, segment.alignment);
memory_ptr += segment.size;
@@ -2240,12 +2238,8 @@ fn setupMemory(wasm: *Wasm) !void {
// One of the linked object files has a reference to the __heap_base symbol.
// We must set its virtual address so it can be used in relocations.
if (wasm.findGlobalSymbol("__heap_base")) |loc| {
- const segment_index = wasm.data_segments.get(".synthetic").?;
- const segment = &wasm.segments.items[segment_index];
- segment.offset = 0; // for simplicity we store the entire VA into atom's offset.
- const atom_index = wasm.symbol_atom.get(loc).?;
- const atom = wasm.getAtomPtr(atom_index);
- atom.offset = @intCast(u32, mem.alignForwardGeneric(u64, memory_ptr, heap_alignment));
+ const symbol = loc.getSymbol(wasm);
+ symbol.virtual_address = @intCast(u32, mem.alignForwardGeneric(u64, memory_ptr, heap_alignment));
}
// Setup the max amount of pages
@@ -2274,12 +2268,8 @@ fn setupMemory(wasm: *Wasm) !void {
log.debug("Total memory pages: {d}", .{wasm.memories.limits.min});
if (wasm.findGlobalSymbol("__heap_end")) |loc| {
- const segment_index = wasm.data_segments.get(".synthetic").?;
- const segment = &wasm.segments.items[segment_index];
- segment.offset = 0;
- const atom_index = wasm.symbol_atom.get(loc).?;
- const atom = wasm.getAtomPtr(atom_index);
- atom.offset = @intCast(u32, memory_ptr);
+ const symbol = loc.getSymbol(wasm);
+ symbol.virtual_address = @intCast(u32, memory_ptr);
}
if (wasm.base.options.max_memory) |max_memory| {
@@ -2417,6 +2407,7 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
.tag = .data,
.flags = 0,
.index = 0,
+ .virtual_address = undefined,
};
symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
@@ -2449,6 +2440,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
.tag = .data,
.flags = 0,
.index = 0,
+ .virtual_address = undefined,
};
names_symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
@@ -2635,6 +2627,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
try man.addOptionalFile(compiler_rt_path);
man.hash.addOptionalBytes(options.entry);
man.hash.addOptional(options.stack_size_override);
+ man.hash.add(wasm.base.options.build_id);
man.hash.add(options.import_memory);
man.hash.add(options.import_table);
man.hash.add(options.export_table);
@@ -2748,6 +2741,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
try wasm.allocateAtoms();
try wasm.setupMemory();
+ wasm.allocateVirtualAddresses();
wasm.mapFunctionTable();
try wasm.mergeSections();
try wasm.mergeTypes();
@@ -2866,6 +2860,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
try wasm.allocateAtoms();
try wasm.setupMemory();
+ wasm.allocateVirtualAddresses();
wasm.mapFunctionTable();
try wasm.mergeSections();
try wasm.mergeTypes();
@@ -3225,6 +3220,12 @@ fn writeToFile(
}
if (!wasm.base.options.strip) {
+ // The build id must be computed on the main sections only,
+ // so we have to do it now, before the debug sections.
+ if (wasm.base.options.build_id) {
+ try emitBuildIdSection(&binary_bytes);
+ }
+
// if (wasm.dwarf) |*dwarf| {
// const mod = wasm.base.options.module.?;
// try dwarf.writeDbgAbbrev();
@@ -3363,6 +3364,33 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void {
);
}
+fn emitBuildIdSection(binary_bytes: *std.ArrayList(u8)) !void {
+ const header_offset = try reserveCustomSectionHeader(binary_bytes);
+
+ const writer = binary_bytes.writer();
+ const build_id = "build_id";
+ try leb.writeULEB128(writer, @intCast(u32, build_id.len));
+ try writer.writeAll(build_id);
+
+ var id: [16]u8 = undefined;
+ std.crypto.hash.sha3.TurboShake128(null).hash(binary_bytes.items, &id, .{});
+ var uuid: [36]u8 = undefined;
+ _ = try std.fmt.bufPrint(&uuid, "{s}-{s}-{s}-{s}-{s}", .{
+ std.fmt.fmtSliceHexLower(id[0..4]), std.fmt.fmtSliceHexLower(id[4..6]), std.fmt.fmtSliceHexLower(id[6..8]),
+ std.fmt.fmtSliceHexLower(id[8..10]), std.fmt.fmtSliceHexLower(id[10..]),
+ });
+
+ try leb.writeULEB128(writer, @as(u32, 1));
+ try leb.writeULEB128(writer, @as(u32, uuid.len));
+ try writer.writeAll(&uuid);
+
+ try writeCustomSectionHeader(
+ binary_bytes.items,
+ header_offset,
+ @intCast(u32, binary_bytes.items.len - header_offset - 6),
+ );
+}
+
fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []const bool, features_count: u32) !void {
const header_offset = try reserveCustomSectionHeader(binary_bytes);
@@ -3426,8 +3454,6 @@ fn emitNameSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), arena: std.mem
// bss section is not emitted when this condition holds true, so we also
// do not output a name for it.
if (!wasm.base.options.import_memory and std.mem.eql(u8, key, ".bss")) continue;
- // Synthetic segments are not emitted
- if (std.mem.eql(u8, key, ".synthetic")) continue;
segments.appendAssumeCapacity(.{ .index = data_segment_index, .name = key });
data_segment_index += 1;
}
@@ -3594,6 +3620,7 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
try man.addOptionalFile(compiler_rt_path);
man.hash.addOptionalBytes(wasm.base.options.entry);
man.hash.addOptional(wasm.base.options.stack_size_override);
+ man.hash.add(wasm.base.options.build_id);
man.hash.add(wasm.base.options.import_memory);
man.hash.add(wasm.base.options.import_table);
man.hash.add(wasm.base.options.export_table);
@@ -3760,6 +3787,12 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
if (wasm.base.options.import_symbols) {
try argv.append("--allow-undefined");
}
+
+ // XXX - TODO: add when wasm-ld supports --build-id.
+ // if (wasm.base.options.build_id) {
+ // try argv.append("--build-id=tree");
+ // }
+
try argv.appendSlice(&.{ "-o", full_out_path });
if (target.cpu.arch == .wasm64) {
diff --git a/src/link/Wasm/Atom.zig b/src/link/Wasm/Atom.zig
index e719f8dfcc..0c9d761f05 100644
--- a/src/link/Wasm/Atom.zig
+++ b/src/link/Wasm/Atom.zig
@@ -89,21 +89,6 @@ pub fn getSymbolIndex(atom: Atom) ?u32 {
return atom.sym_index;
}
-/// Returns the virtual address of the `Atom`. This is the address starting
-/// from the first entry within a section.
-pub fn getVA(atom: Atom, wasm: *const Wasm, symbol: *const Symbol) u32 {
- if (symbol.tag == .function) return atom.offset;
- std.debug.assert(symbol.tag == .data);
- const merge_segment = wasm.base.options.output_mode != .Obj;
- const segment_info = if (atom.file) |object_index| blk: {
- break :blk wasm.objects.items[object_index].segment_info;
- } else wasm.segment_info.values();
- const segment_name = segment_info[symbol.index].outputName(merge_segment);
- const segment_index = wasm.data_segments.get(segment_name).?;
- const segment = wasm.segments.items[segment_index];
- return segment.offset + atom.offset;
-}
-
/// Resolves the relocations within the atom, writing the new value
/// at the calculated offset.
pub fn resolveRelocs(atom: *Atom, wasm_bin: *const Wasm) void {
@@ -186,14 +171,7 @@ fn relocationValue(atom: Atom, relocation: types.Relocation, wasm_bin: *const Wa
if (symbol.isUndefined()) {
return 0;
}
- const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse {
- // this can only occur during incremental-compilation when a relocation
- // still points to a freed decl. It is fine to emit the value 0 here
- // as no actual code will point towards it.
- return 0;
- };
- const target_atom = wasm_bin.getAtom(target_atom_index);
- const va = @intCast(i32, target_atom.getVA(wasm_bin, symbol));
+ const va = @intCast(i64, symbol.virtual_address);
return @intCast(u32, va + relocation.addend);
},
.R_WASM_EVENT_INDEX_LEB => return symbol.index,
diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig
index 82cab2528a..45c9464ec8 100644
--- a/src/link/Wasm/Object.zig
+++ b/src/link/Wasm/Object.zig
@@ -270,6 +270,7 @@ fn checkLegacyIndirectFunctionTable(object: *Object) !?Symbol {
.name = table_import.name,
.tag = .table,
.index = 0,
+ .virtual_address = undefined,
};
table_symbol.setFlag(.WASM_SYM_UNDEFINED);
table_symbol.setFlag(.WASM_SYM_NO_STRIP);
@@ -758,6 +759,7 @@ fn Parser(comptime ReaderType: type) type {
.tag = tag,
.name = undefined,
.index = undefined,
+ .virtual_address = undefined,
};
switch (tag) {
diff --git a/src/link/Wasm/Symbol.zig b/src/link/Wasm/Symbol.zig
index 089eee289e..156b507a32 100644
--- a/src/link/Wasm/Symbol.zig
+++ b/src/link/Wasm/Symbol.zig
@@ -20,6 +20,9 @@ name: u32,
index: u32,
/// Represents the kind of the symbol, such as a function or global.
tag: Tag,
+/// Contains the virtual address of the symbol, relative to the start of its section.
+/// This differs from the offset of an `Atom` which is relative to the start of a segment.
+virtual_address: u32,
pub const Tag = enum {
function,
diff --git a/src/main.zig b/src/main.zig
index fb02628c61..e7d5c647b5 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -9,6 +9,8 @@ const Allocator = mem.Allocator;
const ArrayList = std.ArrayList;
const Ast = std.zig.Ast;
const warn = std.log.warn;
+const ThreadPool = std.Thread.Pool;
+const cleanExit = std.process.cleanExit;
const tracy = @import("tracy.zig");
const Compilation = @import("Compilation.zig");
@@ -22,8 +24,10 @@ const translate_c = @import("translate_c.zig");
const clang = @import("clang.zig");
const Cache = std.Build.Cache;
const target_util = @import("target.zig");
-const ThreadPool = @import("ThreadPool.zig");
const crash_report = @import("crash_report.zig");
+const Module = @import("Module.zig");
+const AstGen = @import("AstGen.zig");
+const Server = std.zig.Server;
pub const std_options = struct {
pub const wasiCwd = wasi_cwd;
@@ -361,7 +365,6 @@ const usage_build_generic =
\\
\\General Options:
\\ -h, --help Print this help and exit
- \\ --watch Enable compiler REPL
\\ --color [auto|off|on] Enable or disable colored error messages
\\ -femit-bin[=path] (default) Output machine code
\\ -fno-emit-bin Do not output machine code
@@ -666,6 +669,16 @@ const ArgMode = union(enum) {
run,
};
+/// Avoid dragging networking into zig2.c because it adds dependencies on some
+/// linker symbols that are annoying to satisfy while bootstrapping.
+const Ip4Address = if (build_options.omit_pkg_fetching_code) void else std.net.Ip4Address;
+
+const Listen = union(enum) {
+ none,
+ ip4: Ip4Address,
+ stdio,
+};
+
fn buildOutputType(
gpa: Allocator,
arena: Allocator,
@@ -686,7 +699,7 @@ fn buildOutputType(
var formatted_panics: ?bool = null;
var function_sections = false;
var no_builtin = false;
- var watch = false;
+ var listen: Listen = .none;
var debug_compile_errors = false;
var verbose_link = (builtin.os.tag != .wasi or builtin.link_libc) and std.process.hasEnvVarConstant("ZIG_VERBOSE_LINK");
var verbose_cc = (builtin.os.tag != .wasi or builtin.link_libc) and std.process.hasEnvVarConstant("ZIG_VERBOSE_CC");
@@ -1144,6 +1157,23 @@ fn buildOutputType(
} else {
try log_scopes.append(gpa, args_iter.nextOrFatal());
}
+ } else if (mem.eql(u8, arg, "--listen")) {
+ const next_arg = args_iter.nextOrFatal();
+ if (mem.eql(u8, next_arg, "-")) {
+ listen = .stdio;
+ } else {
+ if (build_options.omit_pkg_fetching_code) unreachable;
+ // example: --listen 127.0.0.1:9000
+ var it = std.mem.split(u8, next_arg, ":");
+ const host = it.next().?;
+ const port_text = it.next() orelse "14735";
+ const port = std.fmt.parseInt(u16, port_text, 10) catch |err|
+ fatal("invalid port number: '{s}': {s}", .{ port_text, @errorName(err) });
+ listen = .{ .ip4 = std.net.Ip4Address.parse(host, port) catch |err|
+ fatal("invalid host: '{s}': {s}", .{ host, @errorName(err) }) };
+ }
+ } else if (mem.eql(u8, arg, "--listen=-")) {
+ listen = .stdio;
} else if (mem.eql(u8, arg, "--debug-link-snapshot")) {
if (!build_options.enable_link_snapshots) {
std.log.warn("Zig was compiled without linker snapshots enabled (-Dlink-snapshot). --debug-link-snapshot has no effect.", .{});
@@ -1172,8 +1202,6 @@ fn buildOutputType(
test_evented_io = true;
} else if (mem.eql(u8, arg, "--test-no-exec")) {
test_no_exec = true;
- } else if (mem.eql(u8, arg, "--watch")) {
- watch = true;
} else if (mem.eql(u8, arg, "-ftime-report")) {
time_report = true;
} else if (mem.eql(u8, arg, "-fstack-report")) {
@@ -2999,7 +3027,7 @@ fn buildOutputType(
defer zig_lib_directory.handle.close();
var thread_pool: ThreadPool = undefined;
- try thread_pool.init(gpa);
+ try thread_pool.init(.{ .allocator = gpa });
defer thread_pool.deinit();
var libc_installation: ?LibCInstallation = null;
@@ -3259,8 +3287,52 @@ fn buildOutputType(
if (show_builtin) {
return std.io.getStdOut().writeAll(try comp.generateBuiltinZigSource(arena));
}
+ switch (listen) {
+ .none => {},
+ .stdio => {
+ if (build_options.only_c) unreachable;
+ try serve(
+ comp,
+ std.io.getStdIn(),
+ std.io.getStdOut(),
+ test_exec_args.items,
+ self_exe_path,
+ arg_mode,
+ all_args,
+ runtime_args_start,
+ );
+ return cleanExit();
+ },
+ .ip4 => |ip4_addr| {
+ if (build_options.omit_pkg_fetching_code) unreachable;
+
+ var server = std.net.StreamServer.init(.{
+ .reuse_address = true,
+ });
+ defer server.deinit();
+
+ try server.listen(.{ .in = ip4_addr });
+
+ while (true) {
+ const conn = try server.accept();
+ defer conn.stream.close();
+
+ try serve(
+ comp,
+ .{ .handle = conn.stream.handle },
+ .{ .handle = conn.stream.handle },
+ test_exec_args.items,
+ self_exe_path,
+ arg_mode,
+ all_args,
+ runtime_args_start,
+ );
+ }
+ },
+ }
+
if (arg_mode == .translate_c) {
- return cmdTranslateC(comp, arena, have_enable_cache);
+ return cmdTranslateC(comp, arena, null);
}
const hook: AfterUpdateHook = blk: {
@@ -3276,7 +3348,7 @@ fn buildOutputType(
};
updateModule(gpa, comp, hook) catch |err| switch (err) {
- error.SemanticAnalyzeFail => if (!watch) process.exit(1),
+ error.SemanticAnalyzeFail => if (listen == .none) process.exit(1),
else => |e| return e,
};
if (build_options.only_c) return cleanExit();
@@ -3332,7 +3404,6 @@ fn buildOutputType(
self_exe_path.?,
arg_mode,
target_info,
- watch,
&comp_destroyed,
all_args,
runtime_args_start,
@@ -3340,111 +3411,217 @@ fn buildOutputType(
);
}
- const stdin = std.io.getStdIn().reader();
- const stderr = std.io.getStdErr().writer();
- var repl_buf: [1024]u8 = undefined;
-
- const ReplCmd = enum {
- update,
- help,
- run,
- update_and_run,
- };
-
- var last_cmd: ReplCmd = .help;
-
- while (watch) {
- try stderr.print("(zig) ", .{});
- try comp.makeBinFileExecutable();
- if (stdin.readUntilDelimiterOrEof(&repl_buf, '\n') catch |err| {
- try stderr.print("\nUnable to parse command: {s}\n", .{@errorName(err)});
- continue;
- }) |line| {
- const actual_line = mem.trimRight(u8, line, "\r\n ");
- const cmd: ReplCmd = blk: {
- if (mem.eql(u8, actual_line, "update")) {
- break :blk .update;
- } else if (mem.eql(u8, actual_line, "exit")) {
- break;
- } else if (mem.eql(u8, actual_line, "help")) {
- break :blk .help;
- } else if (mem.eql(u8, actual_line, "run")) {
- break :blk .run;
- } else if (mem.eql(u8, actual_line, "update-and-run")) {
- break :blk .update_and_run;
- } else if (actual_line.len == 0) {
- break :blk last_cmd;
- } else {
- try stderr.print("unknown command: {s}\n", .{actual_line});
- continue;
- }
- };
- last_cmd = cmd;
- switch (cmd) {
- .update => {
- tracy.frameMark();
- if (output_mode == .Exe) {
- try comp.makeBinFileWritable();
- }
- updateModule(gpa, comp, hook) catch |err| switch (err) {
- error.SemanticAnalyzeFail => continue,
- else => |e| return e,
- };
- },
- .help => {
- try stderr.writeAll(repl_help);
- },
- .run => {
- tracy.frameMark();
- try runOrTest(
- comp,
- gpa,
- arena,
- test_exec_args.items,
- self_exe_path.?,
- arg_mode,
- target_info,
- watch,
- &comp_destroyed,
- all_args,
- runtime_args_start,
- link_libc,
- );
- },
- .update_and_run => {
- tracy.frameMark();
- if (output_mode == .Exe) {
- try comp.makeBinFileWritable();
- }
- updateModule(gpa, comp, hook) catch |err| switch (err) {
- error.SemanticAnalyzeFail => continue,
- else => |e| return e,
- };
- try comp.makeBinFileExecutable();
- try runOrTest(
- comp,
- gpa,
- arena,
- test_exec_args.items,
- self_exe_path.?,
- arg_mode,
- target_info,
- watch,
- &comp_destroyed,
- all_args,
- runtime_args_start,
- link_libc,
- );
- },
- }
- } else {
- break;
- }
- }
// Skip resource deallocation in release builds; let the OS do it.
return cleanExit();
}
+fn serve(
+ comp: *Compilation,
+ in: fs.File,
+ out: fs.File,
+ test_exec_args: []const ?[]const u8,
+ self_exe_path: ?[]const u8,
+ arg_mode: ArgMode,
+ all_args: []const []const u8,
+ runtime_args_start: ?usize,
+) !void {
+ const gpa = comp.gpa;
+
+ var server = try Server.init(.{
+ .gpa = gpa,
+ .in = in,
+ .out = out,
+ .zig_version = build_options.version,
+ });
+ defer server.deinit();
+
+ var child_pid: ?std.ChildProcess.Id = null;
+
+ var progress: std.Progress = .{
+ .terminal = null,
+ .root = .{
+ .context = undefined,
+ .parent = null,
+ .name = "",
+ .unprotected_estimated_total_items = 0,
+ .unprotected_completed_items = 0,
+ },
+ .columns_written = 0,
+ .prev_refresh_timestamp = 0,
+ .timer = null,
+ .done = false,
+ };
+ const main_progress_node = &progress.root;
+ main_progress_node.context = &progress;
+
+ while (true) {
+ const hdr = try server.receiveMessage();
+
+ switch (hdr.tag) {
+ .exit => {
+ return cleanExit();
+ },
+ .update => {
+ assert(main_progress_node.recently_updated_child == null);
+ tracy.frameMark();
+
+ if (arg_mode == .translate_c) {
+ var arena_instance = std.heap.ArenaAllocator.init(gpa);
+ defer arena_instance.deinit();
+ const arena = arena_instance.allocator();
+ var output: TranslateCOutput = undefined;
+ try cmdTranslateC(comp, arena, &output);
+ try server.serveEmitBinPath(output.path, .{
+ .flags = .{ .cache_hit = output.cache_hit },
+ });
+ continue;
+ }
+
+ if (comp.bin_file.options.output_mode == .Exe) {
+ try comp.makeBinFileWritable();
+ }
+
+ {
+ var reset: std.Thread.ResetEvent = .{};
+
+ var progress_thread = try std.Thread.spawn(.{}, progressThread, .{
+ &progress, &server, &reset,
+ });
+ defer {
+ reset.set();
+ progress_thread.join();
+ }
+
+ try comp.update(main_progress_node);
+ }
+
+ try comp.makeBinFileExecutable();
+ try serveUpdateResults(&server, comp);
+ },
+ .run => {
+ if (child_pid != null) {
+ @panic("TODO block until the child exits");
+ }
+ @panic("TODO call runOrTest");
+ //try runOrTest(
+ // comp,
+ // gpa,
+ // arena,
+ // test_exec_args,
+ // self_exe_path.?,
+ // arg_mode,
+ // target_info,
+ // true,
+ // &comp_destroyed,
+ // all_args,
+ // runtime_args_start,
+ // link_libc,
+ //);
+ },
+ .hot_update => {
+ tracy.frameMark();
+ assert(main_progress_node.recently_updated_child == null);
+ if (child_pid) |pid| {
+ try comp.hotCodeSwap(main_progress_node, pid);
+ try serveUpdateResults(&server, comp);
+ } else {
+ if (comp.bin_file.options.output_mode == .Exe) {
+ try comp.makeBinFileWritable();
+ }
+ try comp.update(main_progress_node);
+ try comp.makeBinFileExecutable();
+ try serveUpdateResults(&server, comp);
+
+ child_pid = try runOrTestHotSwap(
+ comp,
+ gpa,
+ test_exec_args,
+ self_exe_path.?,
+ arg_mode,
+ all_args,
+ runtime_args_start,
+ );
+ }
+ },
+ else => {
+ fatal("unrecognized message from client: 0x{x}", .{@enumToInt(hdr.tag)});
+ },
+ }
+ }
+}
+
+fn progressThread(progress: *std.Progress, server: *const Server, reset: *std.Thread.ResetEvent) void {
+ while (true) {
+ if (reset.timedWait(500 * std.time.ns_per_ms)) |_| {
+ // The Compilation update has completed.
+ return;
+ } else |err| switch (err) {
+ error.Timeout => {},
+ }
+
+ var buf: std.BoundedArray(u8, 160) = .{};
+
+ {
+ progress.update_mutex.lock();
+ defer progress.update_mutex.unlock();
+
+ var need_ellipse = false;
+ var maybe_node: ?*std.Progress.Node = &progress.root;
+ while (maybe_node) |node| {
+ if (need_ellipse) {
+ buf.appendSlice("... ") catch {};
+ }
+ need_ellipse = false;
+ const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .Monotonic);
+ const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .Monotonic);
+ const current_item = completed_items + 1;
+ if (node.name.len != 0 or eti > 0) {
+ if (node.name.len != 0) {
+ buf.appendSlice(node.name) catch {};
+ need_ellipse = true;
+ }
+ if (eti > 0) {
+ if (need_ellipse) buf.appendSlice(" ") catch {};
+ buf.writer().print("[{d}/{d}] ", .{ current_item, eti }) catch {};
+ need_ellipse = false;
+ } else if (completed_items != 0) {
+ if (need_ellipse) buf.appendSlice(" ") catch {};
+ buf.writer().print("[{d}] ", .{current_item}) catch {};
+ need_ellipse = false;
+ }
+ }
+ maybe_node = @atomicLoad(?*std.Progress.Node, &node.recently_updated_child, .Acquire);
+ }
+ }
+
+ const progress_string = buf.slice();
+
+ server.serveMessage(.{
+ .tag = .progress,
+ .bytes_len = @intCast(u32, progress_string.len),
+ }, &.{
+ progress_string,
+ }) catch |err| {
+ fatal("unable to write to client: {s}", .{@errorName(err)});
+ };
+ }
+}
+
+fn serveUpdateResults(s: *Server, comp: *Compilation) !void {
+ const gpa = comp.gpa;
+ var error_bundle = try comp.getAllErrorsAlloc();
+ defer error_bundle.deinit(gpa);
+ if (error_bundle.errorMessageCount() > 0) {
+ try s.serveErrorBundle(error_bundle);
+ } else if (comp.bin_file.options.emit) |emit| {
+ const full_path = try emit.directory.join(gpa, &.{emit.sub_path});
+ defer gpa.free(full_path);
+ try s.serveEmitBinPath(full_path, .{
+ .flags = .{ .cache_hit = comp.last_update_was_cache_hit },
+ });
+ }
+}
+
const ModuleDepIterator = struct {
split: mem.SplitIterator(u8),
@@ -3530,7 +3707,6 @@ fn runOrTest(
self_exe_path: []const u8,
arg_mode: ArgMode,
target_info: std.zig.system.NativeTargetInfo,
- watch: bool,
comp_destroyed: *bool,
all_args: []const []const u8,
runtime_args_start: ?usize,
@@ -3561,7 +3737,7 @@ fn runOrTest(
// We do not execve for tests because if the test fails we want to print
// the error message and invocation below.
- if (std.process.can_execv and arg_mode == .run and !watch) {
+ if (std.process.can_execv and arg_mode == .run) {
// execv releases the locks; no need to destroy the Compilation here.
const err = std.process.execve(gpa, argv.items, &env_map);
try warnAboutForeignBinaries(arena, arg_mode, target_info, link_libc);
@@ -3574,12 +3750,10 @@ fn runOrTest(
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
- if (!watch) {
- // Here we release all the locks associated with the Compilation so
- // that whatever this child process wants to do won't deadlock.
- comp.destroy();
- comp_destroyed.* = true;
- }
+ // Here we release all the locks associated with the Compilation so
+ // that whatever this child process wants to do won't deadlock.
+ comp.destroy();
+ comp_destroyed.* = true;
const term = child.spawnAndWait() catch |err| {
try warnAboutForeignBinaries(arena, arg_mode, target_info, link_libc);
@@ -3591,19 +3765,13 @@ fn runOrTest(
switch (term) {
.Exited => |code| {
if (code == 0) {
- if (!watch) return cleanExit();
- } else if (watch) {
- warn("process exited with code {d}", .{code});
+ return cleanExit();
} else {
process.exit(code);
}
},
else => {
- if (watch) {
- warn("process aborted abnormally", .{});
- } else {
- process.exit(1);
- }
+ process.exit(1);
},
}
},
@@ -3611,7 +3779,7 @@ fn runOrTest(
switch (term) {
.Exited => |code| {
if (code == 0) {
- if (!watch) return cleanExit();
+ return cleanExit();
} else {
const cmd = try std.mem.join(arena, " ", argv.items);
fatal("the following test command failed with exit code {d}:\n{s}", .{ code, cmd });
@@ -3631,6 +3799,62 @@ fn runOrTest(
}
}
+fn runOrTestHotSwap(
+ comp: *Compilation,
+ gpa: Allocator,
+ test_exec_args: []const ?[]const u8,
+ self_exe_path: []const u8,
+ arg_mode: ArgMode,
+ all_args: []const []const u8,
+ runtime_args_start: ?usize,
+) !std.ChildProcess.Id {
+ const exe_emit = comp.bin_file.options.emit.?;
+ // A naive `directory.join` here will indeed get the correct path to the binary,
+ // however, in the case of cwd, we actually want `./foo` so that the path can be executed.
+ const exe_path = try fs.path.join(gpa, &[_][]const u8{
+ exe_emit.directory.path orelse ".", exe_emit.sub_path,
+ });
+ defer gpa.free(exe_path);
+
+ var argv = std.ArrayList([]const u8).init(gpa);
+ defer argv.deinit();
+
+ if (test_exec_args.len == 0) {
+ // when testing pass the zig_exe_path to argv
+ if (arg_mode == .zig_test)
+ try argv.appendSlice(&[_][]const u8{
+ exe_path, self_exe_path,
+ })
+ // when running just pass the current exe
+ else
+ try argv.appendSlice(&[_][]const u8{
+ exe_path,
+ });
+ } else {
+ for (test_exec_args) |arg| {
+ if (arg) |a| {
+ try argv.append(a);
+ } else {
+ try argv.appendSlice(&[_][]const u8{
+ exe_path, self_exe_path,
+ });
+ }
+ }
+ }
+ if (runtime_args_start) |i| {
+ try argv.appendSlice(all_args[i..]);
+ }
+ var child = std.ChildProcess.init(argv.items, gpa);
+
+ child.stdin_behavior = .Inherit;
+ child.stdout_behavior = .Inherit;
+ child.stderr_behavior = .Inherit;
+
+ try child.spawn();
+
+ return child.id;
+}
+
const AfterUpdateHook = union(enum) {
none,
print_emit_bin_dir_path,
@@ -3638,24 +3862,30 @@ const AfterUpdateHook = union(enum) {
};
fn updateModule(gpa: Allocator, comp: *Compilation, hook: AfterUpdateHook) !void {
- try comp.update();
+ {
+ // If the terminal is dumb, we dont want to show the user all the output.
+ var progress: std.Progress = .{ .dont_print_on_dumb = true };
+ const main_progress_node = progress.start("", 0);
+ defer main_progress_node.end();
+ switch (comp.color) {
+ .off => {
+ progress.terminal = null;
+ },
+ .on => {
+ progress.terminal = std.io.getStdErr();
+ progress.supports_ansi_escape_codes = true;
+ },
+ .auto => {},
+ }
+
+ try comp.update(main_progress_node);
+ }
var errors = try comp.getAllErrorsAlloc();
defer errors.deinit(comp.gpa);
- if (errors.list.len != 0) {
- const ttyconf: std.debug.TTY.Config = switch (comp.color) {
- .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
- .on => .escape_codes,
- .off => .no_color,
- };
- for (errors.list) |full_err_msg| {
- full_err_msg.renderToStdErr(ttyconf);
- }
- const log_text = comp.getCompileLogOutput();
- if (log_text.len != 0) {
- std.debug.print("\nCompile Log Output:\n{s}", .{log_text});
- }
+ if (errors.errorMessageCount() > 0) {
+ errors.renderToStdErr(renderOptions(comp.color));
return error.SemanticAnalyzeFail;
} else switch (hook) {
.none => {},
@@ -3697,7 +3927,12 @@ fn updateModule(gpa: Allocator, comp: *Compilation, hook: AfterUpdateHook) !void
}
}
-fn cmdTranslateC(comp: *Compilation, arena: Allocator, enable_cache: bool) !void {
+const TranslateCOutput = struct {
+ path: []const u8,
+ cache_hit: bool,
+};
+
+fn cmdTranslateC(comp: *Compilation, arena: Allocator, fancy_output: ?*TranslateCOutput) !void {
if (!build_options.have_llvm)
fatal("cannot translate-c: compiler built without LLVM extensions", .{});
@@ -3708,14 +3943,16 @@ fn cmdTranslateC(comp: *Compilation, arena: Allocator, enable_cache: bool) !void
var man: Cache.Manifest = comp.obtainCObjectCacheManifest();
man.want_shared_lock = false;
- defer if (enable_cache) man.deinit();
+ defer man.deinit();
man.hash.add(@as(u16, 0xb945)); // Random number to distinguish translate-c from compiling C objects
Compilation.cache_helpers.hashCSource(&man, c_source_file) catch |err| {
fatal("unable to process '{s}': {s}", .{ c_source_file.src_path, @errorName(err) });
};
+ if (fancy_output) |p| p.cache_hit = true;
const digest = if (try man.hit()) man.final() else digest: {
+ if (fancy_output) |p| p.cache_hit = false;
var argv = std.ArrayList([]const u8).init(arena);
try argv.append(""); // argv[0] is program name, actual args start at [1]
@@ -3766,6 +4003,7 @@ fn cmdTranslateC(comp: *Compilation, arena: Allocator, enable_cache: bool) !void
error.OutOfMemory => return error.OutOfMemory,
error.ASTUnitFailure => fatal("clang API returned errors but due to a clang bug, it is not exposing the errors for zig to see. For more details: https://github.com/ziglang/zig/issues/4455", .{}),
error.SemanticAnalyzeFail => {
+ // TODO convert these to zig errors
for (clang_errors) |clang_err| {
std.debug.print("{s}:{d}:{d}: {s}\n", .{
if (clang_err.filename_ptr) |p| p[0..clang_err.filename_len] else "(no file)",
@@ -3810,12 +4048,11 @@ fn cmdTranslateC(comp: *Compilation, arena: Allocator, enable_cache: bool) !void
break :digest digest;
};
- if (enable_cache) {
+ if (fancy_output) |p| {
const full_zig_path = try comp.local_cache_directory.join(arena, &[_][]const u8{
"o", &digest, translated_zig_basename,
});
- try io.getStdOut().writer().print("{s}\n", .{full_zig_path});
- return cleanExit();
+ p.path = full_zig_path;
} else {
const out_zig_path = try fs.path.join(arena, &[_][]const u8{ "o", &digest, translated_zig_basename });
const zig_file = comp.local_cache_directory.handle.openFile(out_zig_path, .{}) catch |err| {
@@ -4009,6 +4246,8 @@ pub const usage_build =
\\Options:
\\ -freference-trace[=num] How many lines of reference trace should be shown per compile error
\\ -fno-reference-trace Disable reference trace
+ \\ -fsummary Print the build summary, even on success
+ \\ -fno-summary Omit the build summary, even on failure
\\ --build-file [file] Override path to build.zig
\\ --cache-dir [path] Override path to local Zig cache directory
\\ --global-cache-dir [path] Override path to global Zig cache directory
@@ -4021,7 +4260,6 @@ pub const usage_build =
pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var color: Color = .auto;
- var prominent_compile_errors: bool = false;
// We want to release all the locks before executing the child process, so we make a nice
// big block here to ensure the cleanup gets run when we extract out our argv.
@@ -4082,8 +4320,6 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
i += 1;
override_global_cache_dir = args[i];
continue;
- } else if (mem.eql(u8, arg, "--prominent-compile-errors")) {
- prominent_compile_errors = true;
} else if (mem.eql(u8, arg, "-freference-trace")) {
try child_argv.append(arg);
reference_trace = 256;
@@ -4201,7 +4437,7 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
.basename = exe_basename,
};
var thread_pool: ThreadPool = undefined;
- try thread_pool.init(gpa);
+ try thread_pool.init(.{ .allocator = gpa });
defer thread_pool.deinit();
var cleanup_build_runner_dir: ?fs.Dir = null;
@@ -4228,6 +4464,10 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
.root_src_path = "build_runner.zig",
};
+ var build_pkg: Package = .{
+ .root_src_directory = build_directory,
+ .root_src_path = build_zig_basename,
+ };
if (!build_options.omit_pkg_fetching_code) {
var http_client: std.http.Client = .{ .allocator = gpa };
defer http_client.deinit();
@@ -4247,9 +4487,14 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
var all_modules: Package.AllModules = .{};
defer all_modules.deinit(gpa);
+ var wip_errors: std.zig.ErrorBundle.Wip = undefined;
+ try wip_errors.init(gpa);
+ defer wip_errors.deinit();
+
// Here we borrow main package's table and will replace it with a fresh
// one after this process completes.
- main_pkg.fetchAndAddDependencies(
+ const fetch_result = build_pkg.fetchAndAddDependencies(
+ &main_pkg,
arena,
&thread_pool,
&http_client,
@@ -4259,12 +4504,16 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
&dependencies_source,
&build_roots_source,
"",
- color,
+ &wip_errors,
&all_modules,
- ) catch |err| switch (err) {
- error.PackageFetchFailed => process.exit(1),
- else => |e| return e,
- };
+ );
+ if (wip_errors.root_list.items.len > 0) {
+ var errors = try wip_errors.toOwnedBundle("");
+ defer errors.deinit(gpa);
+ errors.renderToStdErr(renderOptions(color));
+ process.exit(1);
+ }
+ try fetch_result;
try dependencies_source.appendSlice("};\npub const build_root = struct {\n");
try dependencies_source.appendSlice(build_roots_source.items);
@@ -4280,11 +4529,6 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
mem.swap(Package.Table, &main_pkg.table, &deps_pkg.table);
try main_pkg.add(gpa, "@dependencies", deps_pkg);
}
-
- var build_pkg: Package = .{
- .root_src_directory = build_directory,
- .root_src_path = build_zig_basename,
- };
try main_pkg.add(gpa, "@build", &build_pkg);
const comp = Compilation.create(gpa, .{
@@ -4312,7 +4556,7 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
defer comp.destroy();
updateModule(gpa, comp, .none) catch |err| switch (err) {
- error.SemanticAnalyzeFail => process.exit(1),
+ error.SemanticAnalyzeFail => process.exit(2),
else => |e| return e,
};
try comp.makeBinFileExecutable();
@@ -4336,13 +4580,13 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
switch (term) {
.Exited => |code| {
if (code == 0) return cleanExit();
+ // Indicates that the build runner has reported compile errors
+ // and this parent process does not need to report any further
+ // diagnostics.
+ if (code == 2) process.exit(2);
- if (prominent_compile_errors) {
- fatal("the build command failed with exit code {d}", .{code});
- } else {
- const cmd = try std.mem.join(arena, " ", child_argv);
- fatal("the following build command failed with exit code {d}:\n{s}", .{ code, cmd });
- }
+ const cmd = try std.mem.join(arena, " ", child_argv);
+ fatal("the following build command failed with exit code {d}:\n{s}", .{ code, cmd });
},
else => {
const cmd = try std.mem.join(arena, " ", child_argv);
@@ -4356,7 +4600,7 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
}
fn readSourceFileToEndAlloc(
- allocator: mem.Allocator,
+ allocator: Allocator,
input: *const fs.File,
size_hint: ?usize,
) ![:0]u8 {
@@ -4500,12 +4744,7 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
};
defer tree.deinit(gpa);
- try printErrsMsgToStdErr(gpa, arena, tree, "", color);
- var has_ast_error = false;
if (check_ast_flag) {
- const Module = @import("Module.zig");
- const AstGen = @import("AstGen.zig");
-
var file: Module.File = .{
.status = .never_loaded,
.source_loaded = true,
@@ -4528,25 +4767,18 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
defer file.zir.deinit(gpa);
if (file.zir.hasCompileErrors()) {
- var arena_instance = std.heap.ArenaAllocator.init(gpa);
- defer arena_instance.deinit();
- var errors = std.ArrayList(Compilation.AllErrors.Message).init(gpa);
- defer errors.deinit();
-
- try Compilation.AllErrors.addZir(arena_instance.allocator(), &errors, &file);
- const ttyconf: std.debug.TTY.Config = switch (color) {
- .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
- .on => .escape_codes,
- .off => .no_color,
- };
- for (errors.items) |full_err_msg| {
- full_err_msg.renderToStdErr(ttyconf);
- }
- has_ast_error = true;
+ var wip_errors: std.zig.ErrorBundle.Wip = undefined;
+ try wip_errors.init(gpa);
+ defer wip_errors.deinit();
+ try Compilation.addZirErrorMessages(&wip_errors, &file);
+ var error_bundle = try wip_errors.toOwnedBundle("");
+ defer error_bundle.deinit(gpa);
+ error_bundle.renderToStdErr(renderOptions(color));
+ process.exit(2);
}
- }
- if (tree.errors.len != 0 or has_ast_error) {
- process.exit(1);
+ } else if (tree.errors.len != 0) {
+ try printAstErrorsToStderr(gpa, tree, "", color);
+ process.exit(2);
}
const formatted = try tree.render(gpa);
defer gpa.free(formatted);
@@ -4622,6 +4854,7 @@ const FmtError = error{
ConnectionResetByPeer,
LockViolation,
NetNameDeleted,
+ InvalidArgument,
} || fs.File.OpenError;
fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: fs.Dir, sub_path: []const u8) FmtError!void {
@@ -4687,12 +4920,13 @@ fn fmtPathFile(
if (stat.kind == .Directory)
return error.IsDir;
+ const gpa = fmt.gpa;
const source_code = try readSourceFileToEndAlloc(
- fmt.gpa,
+ gpa,
&source_file,
std.math.cast(usize, stat.size) orelse return error.FileTooBig,
);
- defer fmt.gpa.free(source_code);
+ defer gpa.free(source_code);
source_file.close();
file_closed = true;
@@ -4700,19 +4934,16 @@ fn fmtPathFile(
// Add to set after no longer possible to get error.IsDir.
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
- var tree = try Ast.parse(fmt.gpa, source_code, .zig);
- defer tree.deinit(fmt.gpa);
+ var tree = try Ast.parse(gpa, source_code, .zig);
+ defer tree.deinit(gpa);
- try printErrsMsgToStdErr(fmt.gpa, fmt.arena, tree, file_path, fmt.color);
if (tree.errors.len != 0) {
+ try printAstErrorsToStderr(gpa, tree, file_path, fmt.color);
fmt.any_error = true;
return;
}
if (fmt.check_ast) {
- const Module = @import("Module.zig");
- const AstGen = @import("AstGen.zig");
-
var file: Module.File = .{
.status = .never_loaded,
.source_loaded = true,
@@ -4731,31 +4962,24 @@ fn fmtPathFile(
.root_decl = .none,
};
- file.pkg = try Package.create(fmt.gpa, null, file.sub_file_path);
- defer file.pkg.destroy(fmt.gpa);
+ file.pkg = try Package.create(gpa, null, file.sub_file_path);
+ defer file.pkg.destroy(gpa);
if (stat.size > max_src_size)
return error.FileTooBig;
- file.zir = try AstGen.generate(fmt.gpa, file.tree);
+ file.zir = try AstGen.generate(gpa, file.tree);
file.zir_loaded = true;
- defer file.zir.deinit(fmt.gpa);
+ defer file.zir.deinit(gpa);
if (file.zir.hasCompileErrors()) {
- var arena_instance = std.heap.ArenaAllocator.init(fmt.gpa);
- defer arena_instance.deinit();
- var errors = std.ArrayList(Compilation.AllErrors.Message).init(fmt.gpa);
- defer errors.deinit();
-
- try Compilation.AllErrors.addZir(arena_instance.allocator(), &errors, &file);
- const ttyconf: std.debug.TTY.Config = switch (fmt.color) {
- .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
- .on => .escape_codes,
- .off => .no_color,
- };
- for (errors.items) |full_err_msg| {
- full_err_msg.renderToStdErr(ttyconf);
- }
+ var wip_errors: std.zig.ErrorBundle.Wip = undefined;
+ try wip_errors.init(gpa);
+ defer wip_errors.deinit();
+ try Compilation.addZirErrorMessages(&wip_errors, &file);
+ var error_bundle = try wip_errors.toOwnedBundle("");
+ defer error_bundle.deinit(gpa);
+ error_bundle.renderToStdErr(renderOptions(fmt.color));
fmt.any_error = true;
}
}
@@ -4783,100 +5007,50 @@ fn fmtPathFile(
}
}
-pub fn printErrsMsgToStdErr(
- gpa: mem.Allocator,
- arena: mem.Allocator,
+fn printAstErrorsToStderr(gpa: Allocator, tree: Ast, path: []const u8, color: Color) !void {
+ var wip_errors: std.zig.ErrorBundle.Wip = undefined;
+ try wip_errors.init(gpa);
+ defer wip_errors.deinit();
+
+ try putAstErrorsIntoBundle(gpa, tree, path, &wip_errors);
+
+ var error_bundle = try wip_errors.toOwnedBundle("");
+ defer error_bundle.deinit(gpa);
+ error_bundle.renderToStdErr(renderOptions(color));
+}
+
+pub fn putAstErrorsIntoBundle(
+ gpa: Allocator,
tree: Ast,
path: []const u8,
- color: Color,
+ wip_errors: *std.zig.ErrorBundle.Wip,
) !void {
- const parse_errors: []const Ast.Error = tree.errors;
- var i: usize = 0;
- while (i < parse_errors.len) : (i += 1) {
- const parse_error = parse_errors[i];
- const lok_token = parse_error.token;
- const token_tags = tree.tokens.items(.tag);
- const start_loc = tree.tokenLocation(0, lok_token);
- const source_line = tree.source[start_loc.line_start..start_loc.line_end];
+ var file: Module.File = .{
+ .status = .never_loaded,
+ .source_loaded = true,
+ .zir_loaded = false,
+ .sub_file_path = path,
+ .source = tree.source,
+ .stat = .{
+ .size = 0,
+ .inode = 0,
+ .mtime = 0,
+ },
+ .tree = tree,
+ .tree_loaded = true,
+ .zir = undefined,
+ .pkg = undefined,
+ .root_decl = .none,
+ };
- var text_buf = std.ArrayList(u8).init(gpa);
- defer text_buf.deinit();
- const writer = text_buf.writer();
- try tree.renderError(parse_error, writer);
- const text = try arena.dupe(u8, text_buf.items);
+ file.pkg = try Package.create(gpa, null, path);
+ defer file.pkg.destroy(gpa);
- var notes_buffer: [2]Compilation.AllErrors.Message = undefined;
- var notes_len: usize = 0;
+ file.zir = try AstGen.generate(gpa, file.tree);
+ file.zir_loaded = true;
+ defer file.zir.deinit(gpa);
- if (token_tags[parse_error.token + @boolToInt(parse_error.token_is_prev)] == .invalid) {
- const bad_off = @intCast(u32, tree.tokenSlice(parse_error.token + @boolToInt(parse_error.token_is_prev)).len);
- const byte_offset = @intCast(u32, start_loc.line_start) + @intCast(u32, start_loc.column) + bad_off;
- notes_buffer[notes_len] = .{
- .src = .{
- .src_path = path,
- .msg = try std.fmt.allocPrint(arena, "invalid byte: '{'}'", .{
- std.zig.fmtEscapes(tree.source[byte_offset..][0..1]),
- }),
- .span = .{ .start = byte_offset, .end = byte_offset + 1, .main = byte_offset },
- .line = @intCast(u32, start_loc.line),
- .column = @intCast(u32, start_loc.column) + bad_off,
- .source_line = source_line,
- },
- };
- notes_len += 1;
- }
-
- for (parse_errors[i + 1 ..]) |note| {
- if (!note.is_note) break;
-
- text_buf.items.len = 0;
- try tree.renderError(note, writer);
- const note_loc = tree.tokenLocation(0, note.token);
- const byte_offset = @intCast(u32, note_loc.line_start);
- notes_buffer[notes_len] = .{
- .src = .{
- .src_path = path,
- .msg = try arena.dupe(u8, text_buf.items),
- .span = .{
- .start = byte_offset,
- .end = byte_offset + @intCast(u32, tree.tokenSlice(note.token).len),
- .main = byte_offset,
- },
- .line = @intCast(u32, note_loc.line),
- .column = @intCast(u32, note_loc.column),
- .source_line = tree.source[note_loc.line_start..note_loc.line_end],
- },
- };
- i += 1;
- notes_len += 1;
- }
-
- const extra_offset = tree.errorOffset(parse_error);
- const byte_offset = @intCast(u32, start_loc.line_start) + extra_offset;
- const message: Compilation.AllErrors.Message = .{
- .src = .{
- .src_path = path,
- .msg = text,
- .span = .{
- .start = byte_offset,
- .end = byte_offset + @intCast(u32, tree.tokenSlice(lok_token).len),
- .main = byte_offset,
- },
- .line = @intCast(u32, start_loc.line),
- .column = @intCast(u32, start_loc.column) + extra_offset,
- .source_line = source_line,
- .notes = notes_buffer[0..notes_len],
- },
- };
-
- const ttyconf: std.debug.TTY.Config = switch (color) {
- .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
- .on => .escape_codes,
- .off => .no_color,
- };
-
- message.renderToStdErr(ttyconf);
- }
+ try Compilation.addZirErrorMessages(wip_errors, &file);
}
pub const info_zen =
@@ -5324,19 +5498,6 @@ fn detectNativeTargetInfo(cross_target: std.zig.CrossTarget) !std.zig.system.Nat
return std.zig.system.NativeTargetInfo.detect(cross_target);
}
-/// Indicate that we are now terminating with a successful exit code.
-/// In debug builds, this is a no-op, so that the calling code's
-/// cleanup mechanisms are tested and so that external tools that
-/// check for resource leaks can be accurate. In release builds, this
-/// calls exit(0), and does not return.
-pub fn cleanExit() void {
- if (builtin.mode == .Debug) {
- return;
- } else {
- process.exit(0);
- }
-}
-
const usage_ast_check =
\\Usage: zig ast-check [file]
\\
@@ -5359,8 +5520,6 @@ pub fn cmdAstCheck(
arena: Allocator,
args: []const []const u8,
) !void {
- const Module = @import("Module.zig");
- const AstGen = @import("AstGen.zig");
const Zir = @import("Zir.zig");
var color: Color = .auto;
@@ -5450,26 +5609,18 @@ pub fn cmdAstCheck(
file.tree_loaded = true;
defer file.tree.deinit(gpa);
- try printErrsMsgToStdErr(gpa, arena, file.tree, file.sub_file_path, color);
- if (file.tree.errors.len != 0) {
- process.exit(1);
- }
-
file.zir = try AstGen.generate(gpa, file.tree);
file.zir_loaded = true;
defer file.zir.deinit(gpa);
if (file.zir.hasCompileErrors()) {
- var errors = std.ArrayList(Compilation.AllErrors.Message).init(arena);
- try Compilation.AllErrors.addZir(arena, &errors, &file);
- const ttyconf: std.debug.TTY.Config = switch (color) {
- .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
- .on => .escape_codes,
- .off => .no_color,
- };
- for (errors.items) |full_err_msg| {
- full_err_msg.renderToStdErr(ttyconf);
- }
+ var wip_errors: std.zig.ErrorBundle.Wip = undefined;
+ try wip_errors.init(gpa);
+ defer wip_errors.deinit();
+ try Compilation.addZirErrorMessages(&wip_errors, &file);
+ var error_bundle = try wip_errors.toOwnedBundle("");
+ defer error_bundle.deinit(gpa);
+ error_bundle.renderToStdErr(renderOptions(color));
process.exit(1);
}
@@ -5527,8 +5678,7 @@ pub fn cmdChangelist(
arena: Allocator,
args: []const []const u8,
) !void {
- const Module = @import("Module.zig");
- const AstGen = @import("AstGen.zig");
+ const color: Color = .auto;
const Zir = @import("Zir.zig");
const old_source_file = args[0];
@@ -5576,22 +5726,18 @@ pub fn cmdChangelist(
file.tree_loaded = true;
defer file.tree.deinit(gpa);
- try printErrsMsgToStdErr(gpa, arena, file.tree, old_source_file, .auto);
- if (file.tree.errors.len != 0) {
- process.exit(1);
- }
-
file.zir = try AstGen.generate(gpa, file.tree);
file.zir_loaded = true;
defer file.zir.deinit(gpa);
if (file.zir.hasCompileErrors()) {
- var errors = std.ArrayList(Compilation.AllErrors.Message).init(arena);
- try Compilation.AllErrors.addZir(arena, &errors, &file);
- const ttyconf = std.debug.detectTTYConfig(std.io.getStdErr());
- for (errors.items) |full_err_msg| {
- full_err_msg.renderToStdErr(ttyconf);
- }
+ var wip_errors: std.zig.ErrorBundle.Wip = undefined;
+ try wip_errors.init(gpa);
+ defer wip_errors.deinit();
+ try Compilation.addZirErrorMessages(&wip_errors, &file);
+ var error_bundle = try wip_errors.toOwnedBundle("");
+ defer error_bundle.deinit(gpa);
+ error_bundle.renderToStdErr(renderOptions(color));
process.exit(1);
}
@@ -5613,11 +5759,6 @@ pub fn cmdChangelist(
var new_tree = try Ast.parse(gpa, new_source, .zig);
defer new_tree.deinit(gpa);
- try printErrsMsgToStdErr(gpa, arena, new_tree, new_source_file, .auto);
- if (new_tree.errors.len != 0) {
- process.exit(1);
- }
-
var old_zir = file.zir;
defer old_zir.deinit(gpa);
file.zir_loaded = false;
@@ -5625,12 +5766,13 @@ pub fn cmdChangelist(
file.zir_loaded = true;
if (file.zir.hasCompileErrors()) {
- var errors = std.ArrayList(Compilation.AllErrors.Message).init(arena);
- try Compilation.AllErrors.addZir(arena, &errors, &file);
- const ttyconf = std.debug.detectTTYConfig(std.io.getStdErr());
- for (errors.items) |full_err_msg| {
- full_err_msg.renderToStdErr(ttyconf);
- }
+ var wip_errors: std.zig.ErrorBundle.Wip = undefined;
+ try wip_errors.init(gpa);
+ defer wip_errors.deinit();
+ try Compilation.addZirErrorMessages(&wip_errors, &file);
+ var error_bundle = try wip_errors.toOwnedBundle("");
+ defer error_bundle.deinit(gpa);
+ error_bundle.renderToStdErr(renderOptions(color));
process.exit(1);
}
@@ -5891,3 +6033,20 @@ const ClangSearchSanitizer = struct {
iframework: bool = false,
};
};
+
+fn get_tty_conf(color: Color) std.debug.TTY.Config {
+ return switch (color) {
+ .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
+ .on => .escape_codes,
+ .off => .no_color,
+ };
+}
+
+fn renderOptions(color: Color) std.zig.ErrorBundle.RenderOptions {
+ const ttyconf = get_tty_conf(color);
+ return .{
+ .ttyconf = ttyconf,
+ .include_source_line = ttyconf != .no_color,
+ .include_reference_trace = ttyconf != .no_color,
+ };
+}
diff --git a/src/mingw.zig b/src/mingw.zig
index 9e9e180945..a85645e80b 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -19,7 +19,7 @@ pub const CRTFile = enum {
uuid_lib,
};
-pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
+pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progress.Node) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
@@ -41,7 +41,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
//"-D_UNICODE",
//"-DWPRFLAG=1",
});
- return comp.build_crt_file("crt2", .Obj, &[1]Compilation.CSourceFile{
+ return comp.build_crt_file("crt2", .Obj, .@"mingw-w64 crt2.o", prog_node, &.{
.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "mingw", "crt", "crtexe.c",
@@ -60,7 +60,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
"-U__CRTDLL__",
"-D__MSVCRT__",
});
- return comp.build_crt_file("dllcrt2", .Obj, &[1]Compilation.CSourceFile{
+ return comp.build_crt_file("dllcrt2", .Obj, .@"mingw-w64 dllcrt2.o", prog_node, &.{
.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "mingw", "crt", "crtdll.c",
@@ -100,7 +100,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
.extra_flags = args.items,
};
}
- return comp.build_crt_file("mingw32", .Lib, &c_source_files);
+ return comp.build_crt_file("mingw32", .Lib, .@"mingw-w64 mingw32.lib", prog_node, &c_source_files);
},
.msvcrt_os_lib => {
@@ -148,7 +148,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
};
}
}
- return comp.build_crt_file("msvcrt-os", .Lib, c_source_files.items);
+ return comp.build_crt_file("msvcrt-os", .Lib, .@"mingw-w64 msvcrt-os.lib", prog_node, c_source_files.items);
},
.mingwex_lib => {
@@ -211,7 +211,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
} else {
@panic("unsupported arch");
}
- return comp.build_crt_file("mingwex", .Lib, c_source_files.items);
+ return comp.build_crt_file("mingwex", .Lib, .@"mingw-w64 mingwex.lib", prog_node, c_source_files.items);
},
.uuid_lib => {
@@ -244,7 +244,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
.extra_flags = extra_flags,
};
}
- return comp.build_crt_file("uuid", .Lib, &c_source_files);
+ return comp.build_crt_file("uuid", .Lib, .@"mingw-w64 uuid.lib", prog_node, &c_source_files);
},
}
}
diff --git a/src/musl.zig b/src/musl.zig
index 18e618df8f..4a3f1e6dde 100644
--- a/src/musl.zig
+++ b/src/musl.zig
@@ -17,7 +17,7 @@ pub const CRTFile = enum {
libc_so,
};
-pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
+pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progress.Node) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
@@ -33,7 +33,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
try args.appendSlice(&[_][]const u8{
"-Qunused-arguments",
});
- return comp.build_crt_file("crti", .Obj, &[1]Compilation.CSourceFile{
+ return comp.build_crt_file("crti", .Obj, .@"musl crti.o", prog_node, &.{
.{
.src_path = try start_asm_path(comp, arena, "crti.s"),
.extra_flags = args.items,
@@ -46,7 +46,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
try args.appendSlice(&[_][]const u8{
"-Qunused-arguments",
});
- return comp.build_crt_file("crtn", .Obj, &[1]Compilation.CSourceFile{
+ return comp.build_crt_file("crtn", .Obj, .@"musl crtn.o", prog_node, &.{
.{
.src_path = try start_asm_path(comp, arena, "crtn.s"),
.extra_flags = args.items,
@@ -60,7 +60,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
"-fno-stack-protector",
"-DCRT",
});
- return comp.build_crt_file("crt1", .Obj, &[1]Compilation.CSourceFile{
+ return comp.build_crt_file("crt1", .Obj, .@"musl crt1.o", prog_node, &.{
.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "musl", "crt", "crt1.c",
@@ -77,7 +77,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
"-fno-stack-protector",
"-DCRT",
});
- return comp.build_crt_file("rcrt1", .Obj, &[1]Compilation.CSourceFile{
+ return comp.build_crt_file("rcrt1", .Obj, .@"musl rcrt1.o", prog_node, &.{
.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "musl", "crt", "rcrt1.c",
@@ -94,7 +94,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
"-fno-stack-protector",
"-DCRT",
});
- return comp.build_crt_file("Scrt1", .Obj, &[1]Compilation.CSourceFile{
+ return comp.build_crt_file("Scrt1", .Obj, .@"musl Scrt1.o", prog_node, &.{
.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "musl", "crt", "Scrt1.c",
@@ -187,7 +187,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
.extra_flags = args.items,
};
}
- return comp.build_crt_file("c", .Lib, c_source_files.items);
+ return comp.build_crt_file("c", .Lib, .@"musl libc.a", prog_node, c_source_files.items);
},
.libc_so => {
const target = comp.getTarget();
@@ -241,7 +241,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
});
defer sub_compilation.destroy();
- try sub_compilation.updateSubCompilation();
+ try comp.updateSubCompilation(sub_compilation, .@"musl libc.so", prog_node);
try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1);
diff --git a/src/objcopy.zig b/src/objcopy.zig
index 31e3d60d0d..c3305e8c04 100644
--- a/src/objcopy.zig
+++ b/src/objcopy.zig
@@ -4,22 +4,25 @@ const fs = std.fs;
const elf = std.elf;
const Allocator = std.mem.Allocator;
const File = std.fs.File;
+const assert = std.debug.assert;
+
const main = @import("main.zig");
const fatal = main.fatal;
-const cleanExit = main.cleanExit;
+const Server = std.zig.Server;
+const build_options = @import("build_options");
pub fn cmdObjCopy(
gpa: Allocator,
arena: Allocator,
args: []const []const u8,
) !void {
- _ = gpa;
var i: usize = 0;
var opt_out_fmt: ?std.Target.ObjectFormat = null;
var opt_input: ?[]const u8 = null;
var opt_output: ?[]const u8 = null;
var only_section: ?[]const u8 = null;
var pad_to: ?u64 = null;
+ var listen = false;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (!mem.startsWith(u8, arg, "-")) {
@@ -54,6 +57,8 @@ pub fn cmdObjCopy(
i += 1;
if (i >= args.len) fatal("expected another argument after '{s}'", .{arg});
only_section = args[i];
+ } else if (mem.eql(u8, arg, "--listen=-")) {
+ listen = true;
} else if (mem.startsWith(u8, arg, "--only-section=")) {
only_section = arg["--output-target=".len..];
} else if (mem.eql(u8, arg, "--pad-to")) {
@@ -102,10 +107,45 @@ pub fn cmdObjCopy(
.only_section = only_section,
.pad_to = pad_to,
});
- return cleanExit();
},
else => fatal("unsupported output object format: {s}", .{@tagName(out_fmt)}),
}
+
+ if (listen) {
+ var server = try Server.init(.{
+ .gpa = gpa,
+ .in = std.io.getStdIn(),
+ .out = std.io.getStdOut(),
+ .zig_version = build_options.version,
+ });
+ defer server.deinit();
+
+ var seen_update = false;
+ while (true) {
+ const hdr = try server.receiveMessage();
+ switch (hdr.tag) {
+ .exit => {
+ return std.process.cleanExit();
+ },
+ .update => {
+ if (seen_update) {
+ std.debug.print("zig objcopy only supports 1 update for now\n", .{});
+ std.process.exit(1);
+ }
+ seen_update = true;
+
+ try server.serveEmitBinPath(output, .{
+ .flags = .{ .cache_hit = false },
+ });
+ },
+ else => {
+ std.debug.print("unsupported message: {s}", .{@tagName(hdr.tag)});
+ std.process.exit(1);
+ },
+ }
+ }
+ }
+ return std.process.cleanExit();
}
const usage =
@@ -417,7 +457,7 @@ const HexWriter = struct {
}
fn Address(address: u32) Record {
- std.debug.assert(address > 0xFFFF);
+ assert(address > 0xFFFF);
const segment = @intCast(u16, address / 0x10000);
if (address > 0xFFFFF) {
return Record{
@@ -460,7 +500,7 @@ const HexWriter = struct {
const BUFSIZE = 1 + (1 + 2 + 1 + MAX_PAYLOAD_LEN + 1) * 2 + linesep.len;
var outbuf: [BUFSIZE]u8 = undefined;
const payload_bytes = self.getPayloadBytes();
- std.debug.assert(payload_bytes.len <= MAX_PAYLOAD_LEN);
+ assert(payload_bytes.len <= MAX_PAYLOAD_LEN);
const line = try std.fmt.bufPrint(&outbuf, ":{0X:0>2}{1X:0>4}{2X:0>2}{3s}{4X:0>2}" ++ linesep, .{
@intCast(u8, payload_bytes.len),
diff --git a/src/print_air.zig b/src/print_air.zig
index 447af5a9c7..f5c06daae2 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -194,6 +194,7 @@ const Writer = struct {
.c_va_end,
=> try w.writeUnOp(s, inst),
+ .trap,
.breakpoint,
.unreach,
.ret_addr,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index fb9031296d..5e7d0d45de 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -196,7 +196,6 @@ const Writer = struct {
.error_name,
.panic,
.panic_comptime,
- .set_cold,
.set_runtime_safety,
.sqrt,
.sin,
@@ -411,6 +410,7 @@ const Writer = struct {
.alloc_inferred_comptime_mut,
.ret_ptr,
.ret_type,
+ .trap,
=> try self.writeNode(stream, inst),
.error_value,
@@ -503,6 +503,7 @@ const Writer = struct {
.fence,
.set_float_mode,
.set_align_stack,
+ .set_cold,
.wasm_memory_size,
.error_to_int,
.int_to_error,
diff --git a/src/register_manager.zig b/src/register_manager.zig
index 2fe0cd2b6a..4d16348c27 100644
--- a/src/register_manager.zig
+++ b/src/register_manager.zig
@@ -19,6 +19,9 @@ pub const AllocateRegistersError = error{
/// Can happen when spilling an instruction in codegen runs out of
/// memory, so we propagate that error
OutOfMemory,
+ /// Can happen when spilling an instruction in codegen triggers integer
+ /// overflow, so we propagate that error
+ Overflow,
/// Can happen when spilling an instruction triggers a codegen
/// error, so we propagate that error
CodegenFail,
diff --git a/src/test.zig b/src/test.zig
deleted file mode 100644
index 61cdb705e3..0000000000
--- a/src/test.zig
+++ /dev/null
@@ -1,1984 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const Allocator = std.mem.Allocator;
-const CrossTarget = std.zig.CrossTarget;
-const print = std.debug.print;
-const assert = std.debug.assert;
-
-const link = @import("link.zig");
-const Compilation = @import("Compilation.zig");
-const Package = @import("Package.zig");
-const introspect = @import("introspect.zig");
-const build_options = @import("build_options");
-const ThreadPool = @import("ThreadPool.zig");
-const WaitGroup = @import("WaitGroup.zig");
-const zig_h = link.File.C.zig_h;
-
-const enable_qemu: bool = build_options.enable_qemu;
-const enable_wine: bool = build_options.enable_wine;
-const enable_wasmtime: bool = build_options.enable_wasmtime;
-const enable_darling: bool = build_options.enable_darling;
-const enable_rosetta: bool = build_options.enable_rosetta;
-const glibc_runtimes_dir: ?[]const u8 = build_options.glibc_runtimes_dir;
-const skip_stage1 = true;
-
-const hr = "=" ** 80;
-
-test {
- const use_gpa = build_options.force_gpa or !builtin.link_libc;
- const gpa = gpa: {
- if (use_gpa) {
- break :gpa std.testing.allocator;
- }
- // We would prefer to use raw libc allocator here, but cannot
- // use it if it won't support the alignment we need.
- if (@alignOf(std.c.max_align_t) < @alignOf(i128)) {
- break :gpa std.heap.c_allocator;
- }
- break :gpa std.heap.raw_c_allocator;
- };
-
- var arena_allocator = std.heap.ArenaAllocator.init(gpa);
- defer arena_allocator.deinit();
- const arena = arena_allocator.allocator();
-
- var ctx = TestContext.init(gpa, arena);
- defer ctx.deinit();
-
- {
- const dir_path = try std.fs.path.join(arena, &.{
- std.fs.path.dirname(@src().file).?, "..", "test", "cases",
- });
-
- var dir = try std.fs.cwd().openIterableDir(dir_path, .{});
- defer dir.close();
-
- ctx.addTestCasesFromDir(dir);
- }
-
- try @import("../test/cases.zig").addCases(&ctx);
-
- try ctx.run();
-}
-
-const ErrorMsg = union(enum) {
- src: struct {
- src_path: []const u8,
- msg: []const u8,
- // maxint means match anything
- // this is a workaround for stage1 compiler bug I ran into when making it ?u32
- line: u32,
- // maxint means match anything
- // this is a workaround for stage1 compiler bug I ran into when making it ?u32
- column: u32,
- kind: Kind,
- count: u32,
- },
- plain: struct {
- msg: []const u8,
- kind: Kind,
- count: u32,
- },
-
- const Kind = enum {
- @"error",
- note,
- };
-
- fn init(other: Compilation.AllErrors.Message, kind: Kind) ErrorMsg {
- switch (other) {
- .src => |src| return .{
- .src = .{
- .src_path = src.src_path,
- .msg = src.msg,
- .line = @intCast(u32, src.line),
- .column = @intCast(u32, src.column),
- .kind = kind,
- .count = src.count,
- },
- },
- .plain => |plain| return .{
- .plain = .{
- .msg = plain.msg,
- .kind = kind,
- .count = plain.count,
- },
- },
- }
- }
-
- pub fn format(
- self: ErrorMsg,
- comptime fmt: []const u8,
- options: std.fmt.FormatOptions,
- writer: anytype,
- ) !void {
- _ = fmt;
- _ = options;
- switch (self) {
- .src => |src| {
- if (!std.mem.eql(u8, src.src_path, "?") or
- src.line != std.math.maxInt(u32) or
- src.column != std.math.maxInt(u32))
- {
- try writer.print("{s}:", .{src.src_path});
- if (src.line != std.math.maxInt(u32)) {
- try writer.print("{d}:", .{src.line + 1});
- } else {
- try writer.writeAll("?:");
- }
- if (src.column != std.math.maxInt(u32)) {
- try writer.print("{d}: ", .{src.column + 1});
- } else {
- try writer.writeAll("?: ");
- }
- }
- try writer.print("{s}: {s}", .{ @tagName(src.kind), src.msg });
- if (src.count != 1) {
- try writer.print(" ({d} times)", .{src.count});
- }
- },
- .plain => |plain| {
- try writer.print("{s}: {s}", .{ @tagName(plain.kind), plain.msg });
- if (plain.count != 1) {
- try writer.print(" ({d} times)", .{plain.count});
- }
- },
- }
- }
-};
-
-/// Default config values for known test manifest key-value pairings.
-/// Currently handled defaults are:
-/// * backend
-/// * target
-/// * output_mode
-/// * is_test
-const TestManifestConfigDefaults = struct {
- /// Asserts if the key doesn't exist - yep, it's an oversight alright.
- fn get(@"type": TestManifest.Type, key: []const u8) []const u8 {
- if (std.mem.eql(u8, key, "backend")) {
- return "stage2";
- } else if (std.mem.eql(u8, key, "target")) {
- comptime {
- var defaults: []const u8 = "";
- // TODO should we only return "mainstream" targets by default here?
- // TODO we should also specify ABIs explicitly as the backends are
- // getting more and more complete
- // Linux
- inline for (&[_][]const u8{ "x86_64", "arm", "aarch64" }) |arch| {
- defaults = defaults ++ arch ++ "-linux" ++ ",";
- }
- // macOS
- inline for (&[_][]const u8{ "x86_64", "aarch64" }) |arch| {
- defaults = defaults ++ arch ++ "-macos" ++ ",";
- }
- // Windows
- defaults = defaults ++ "x86_64-windows" ++ ",";
- // Wasm
- defaults = defaults ++ "wasm32-wasi";
- return defaults;
- }
- } else if (std.mem.eql(u8, key, "output_mode")) {
- return switch (@"type") {
- .@"error" => "Obj",
- .run => "Exe",
- .cli => @panic("TODO test harness for CLI tests"),
- };
- } else if (std.mem.eql(u8, key, "is_test")) {
- return "0";
- } else unreachable;
- }
-};
-
-/// Manifest syntax example:
-/// (see https://github.com/ziglang/zig/issues/11288)
-///
-/// error
-/// backend=stage1,stage2
-/// output_mode=exe
-///
-/// :3:19: error: foo
-///
-/// run
-/// target=x86_64-linux,aarch64-macos
-///
-/// I am expected stdout! Hello!
-///
-/// cli
-///
-/// build test
-const TestManifest = struct {
- type: Type,
- config_map: std.StringHashMap([]const u8),
- trailing_bytes: []const u8 = "",
-
- const Type = enum {
- @"error",
- run,
- cli,
- };
-
- const TrailingIterator = struct {
- inner: std.mem.TokenIterator(u8),
-
- fn next(self: *TrailingIterator) ?[]const u8 {
- const next_inner = self.inner.next() orelse return null;
- return std.mem.trim(u8, next_inner[2..], " \t");
- }
- };
-
- fn ConfigValueIterator(comptime T: type) type {
- return struct {
- inner: std.mem.SplitIterator(u8),
-
- fn next(self: *@This()) !?T {
- const next_raw = self.inner.next() orelse return null;
- const parseFn = getDefaultParser(T);
- return try parseFn(next_raw);
- }
- };
- }
-
- fn parse(arena: Allocator, bytes: []const u8) !TestManifest {
- // The manifest is the last contiguous block of comments in the file
- // We scan for the beginning by searching backward for the first non-empty line that does not start with "//"
- var start: ?usize = null;
- var end: usize = bytes.len;
- if (bytes.len > 0) {
- var cursor: usize = bytes.len - 1;
- while (true) {
- // Move to beginning of line
- while (cursor > 0 and bytes[cursor - 1] != '\n') cursor -= 1;
-
- if (std.mem.startsWith(u8, bytes[cursor..], "//")) {
- start = cursor; // Contiguous comment line, include in manifest
- } else {
- if (start != null) break; // Encountered non-comment line, end of manifest
-
- // We ignore all-whitespace lines following the comment block, but anything else
- // means that there is no manifest present.
- if (std.mem.trim(u8, bytes[cursor..end], " \r\n\t").len == 0) {
- end = cursor;
- } else break; // If it's not whitespace, there is no manifest
- }
-
- // Move to previous line
- if (cursor != 0) cursor -= 1 else break;
- }
- }
-
- const actual_start = start orelse return error.MissingTestManifest;
- const manifest_bytes = bytes[actual_start..end];
-
- var it = std.mem.tokenize(u8, manifest_bytes, "\r\n");
-
- // First line is the test type
- const tt: Type = blk: {
- const line = it.next() orelse return error.MissingTestCaseType;
- const raw = std.mem.trim(u8, line[2..], " \t");
- if (std.mem.eql(u8, raw, "error")) {
- break :blk .@"error";
- } else if (std.mem.eql(u8, raw, "run")) {
- break :blk .run;
- } else if (std.mem.eql(u8, raw, "cli")) {
- break :blk .cli;
- } else {
- std.log.warn("unknown test case type requested: {s}", .{raw});
- return error.UnknownTestCaseType;
- }
- };
-
- var manifest: TestManifest = .{
- .type = tt,
- .config_map = std.StringHashMap([]const u8).init(arena),
- };
-
- // Any subsequent line until a blank comment line is key=value(s) pair
- while (it.next()) |line| {
- const trimmed = std.mem.trim(u8, line[2..], " \t");
- if (trimmed.len == 0) break;
-
- // Parse key=value(s)
- var kv_it = std.mem.split(u8, trimmed, "=");
- const key = kv_it.first();
- try manifest.config_map.putNoClobber(key, kv_it.next() orelse return error.MissingValuesForConfig);
- }
-
- // Finally, trailing is expected output
- manifest.trailing_bytes = manifest_bytes[it.index..];
-
- return manifest;
- }
-
- fn getConfigForKey(
- self: TestManifest,
- key: []const u8,
- comptime T: type,
- ) ConfigValueIterator(T) {
- const bytes = self.config_map.get(key) orelse TestManifestConfigDefaults.get(self.type, key);
- return ConfigValueIterator(T){
- .inner = std.mem.split(u8, bytes, ","),
- };
- }
-
- fn getConfigForKeyAlloc(
- self: TestManifest,
- allocator: Allocator,
- key: []const u8,
- comptime T: type,
- ) ![]const T {
- var out = std.ArrayList(T).init(allocator);
- defer out.deinit();
- var it = self.getConfigForKey(key, T);
- while (try it.next()) |item| {
- try out.append(item);
- }
- return try out.toOwnedSlice();
- }
-
- fn getConfigForKeyAssertSingle(self: TestManifest, key: []const u8, comptime T: type) !T {
- var it = self.getConfigForKey(key, T);
- const res = (try it.next()) orelse unreachable;
- assert((try it.next()) == null);
- return res;
- }
-
- fn trailing(self: TestManifest) TrailingIterator {
- return .{
- .inner = std.mem.tokenize(u8, self.trailing_bytes, "\r\n"),
- };
- }
-
- fn trailingAlloc(self: TestManifest, allocator: Allocator) error{OutOfMemory}![]const []const u8 {
- var out = std.ArrayList([]const u8).init(allocator);
- defer out.deinit();
- var it = self.trailing();
- while (it.next()) |line| {
- try out.append(line);
- }
- return try out.toOwnedSlice();
- }
-
- fn ParseFn(comptime T: type) type {
- return fn ([]const u8) anyerror!T;
- }
-
- fn getDefaultParser(comptime T: type) ParseFn(T) {
- if (T == CrossTarget) return struct {
- fn parse(str: []const u8) anyerror!T {
- var opts = CrossTarget.ParseOptions{
- .arch_os_abi = str,
- };
- return try CrossTarget.parse(opts);
- }
- }.parse;
-
- switch (@typeInfo(T)) {
- .Int => return struct {
- fn parse(str: []const u8) anyerror!T {
- return try std.fmt.parseInt(T, str, 0);
- }
- }.parse,
- .Bool => return struct {
- fn parse(str: []const u8) anyerror!T {
- const as_int = try std.fmt.parseInt(u1, str, 0);
- return as_int > 0;
- }
- }.parse,
- .Enum => return struct {
- fn parse(str: []const u8) anyerror!T {
- return std.meta.stringToEnum(T, str) orelse {
- std.log.err("unknown enum variant for {s}: {s}", .{ @typeName(T), str });
- return error.UnknownEnumVariant;
- };
- }
- }.parse,
- .Struct => @compileError("no default parser for " ++ @typeName(T)),
- else => @compileError("no default parser for " ++ @typeName(T)),
- }
- }
-};
-
-const TestStrategy = enum {
- /// Execute tests as independent compilations, unless they are explicitly
- /// incremental ("foo.0.zig", "foo.1.zig", etc.)
- independent,
- /// Execute all tests as incremental updates to a single compilation. Explicitly
- /// incremental tests ("foo.0.zig", "foo.1.zig", etc.) still execute in order
- incremental,
-};
-
-/// Iterates a set of filenames extracting batches that are either incremental
-/// ("foo.0.zig", "foo.1.zig", etc.) or independent ("foo.zig", "bar.zig", etc.).
-/// Assumes filenames are sorted.
-const TestIterator = struct {
- start: usize = 0,
- end: usize = 0,
- filenames: []const []const u8,
- /// reset on each call to `next`
- index: usize = 0,
-
- const Error = error{InvalidIncrementalTestIndex};
-
- fn next(it: *TestIterator) Error!?[]const []const u8 {
- try it.nextInner();
- if (it.start == it.end) return null;
- return it.filenames[it.start..it.end];
- }
-
- fn nextInner(it: *TestIterator) Error!void {
- it.start = it.end;
- if (it.end == it.filenames.len) return;
- if (it.end + 1 == it.filenames.len) {
- it.end += 1;
- return;
- }
-
- const remaining = it.filenames[it.end..];
- it.index = 0;
- while (it.index < remaining.len - 1) : (it.index += 1) {
- // First, check if this file is part of an incremental update sequence
- // Split filename into ".."
- const prev_parts = getTestFileNameParts(remaining[it.index]);
- const new_parts = getTestFileNameParts(remaining[it.index + 1]);
-
- // If base_name and file_ext match, these files are in the same test sequence
- // and the new one should be the incremented version of the previous test
- if (std.mem.eql(u8, prev_parts.base_name, new_parts.base_name) and
- std.mem.eql(u8, prev_parts.file_ext, new_parts.file_ext))
- {
- // This is "foo.X.zig" followed by "foo.Y.zig". Make sure that X = Y + 1
- if (prev_parts.test_index == null)
- return error.InvalidIncrementalTestIndex;
- if (new_parts.test_index == null)
- return error.InvalidIncrementalTestIndex;
- if (new_parts.test_index.? != prev_parts.test_index.? + 1)
- return error.InvalidIncrementalTestIndex;
- } else {
- // This is not the same test sequence, so the new file must be the first file
- // in a new sequence ("*.0.zig") or an independent test file ("*.zig")
- if (new_parts.test_index != null and new_parts.test_index.? != 0)
- return error.InvalidIncrementalTestIndex;
-
- it.end += it.index + 1;
- break;
- }
- } else {
- it.end += remaining.len;
- }
- }
-
- /// In the event of an `error.InvalidIncrementalTestIndex`, this function can
- /// be used to find the current filename that was being processed.
- /// Asserts the iterator hasn't reached the end.
- fn currentFilename(it: TestIterator) []const u8 {
- assert(it.end != it.filenames.len);
- const remaining = it.filenames[it.end..];
- return remaining[it.index + 1];
- }
-};
-
-/// For a filename in the format ".X." or ".", returns
-/// "", "" and X parsed as a decimal number. If X is not present, or
-/// cannot be parsed as a decimal number, it is treated as part of
-fn getTestFileNameParts(name: []const u8) struct {
- base_name: []const u8,
- file_ext: []const u8,
- test_index: ?usize,
-} {
- const file_ext = std.fs.path.extension(name);
- const trimmed = name[0 .. name.len - file_ext.len]; // Trim off "."
- const maybe_index = std.fs.path.extension(trimmed); // Extract ".X"
-
- // Attempt to parse index
- const index: ?usize = if (maybe_index.len > 0)
- std.fmt.parseInt(usize, maybe_index[1..], 10) catch null
- else
- null;
-
- // Adjust "" extent based on parsing success
- const base_name_end = trimmed.len - if (index != null) maybe_index.len else 0;
- return .{
- .base_name = name[0..base_name_end],
- .file_ext = if (file_ext.len > 0) file_ext[1..] else file_ext,
- .test_index = index,
- };
-}
-
-/// Sort test filenames in-place, so that incremental test cases ("foo.0.zig",
-/// "foo.1.zig", etc.) are contiguous and appear in numerical order.
-fn sortTestFilenames(filenames: [][]const u8) void {
- const Context = struct {
- pub fn lessThan(_: @This(), a: []const u8, b: []const u8) bool {
- const a_parts = getTestFileNameParts(a);
- const b_parts = getTestFileNameParts(b);
-
- // Sort ".X." based on "" and "" first
- return switch (std.mem.order(u8, a_parts.base_name, b_parts.base_name)) {
- .lt => true,
- .gt => false,
- .eq => switch (std.mem.order(u8, a_parts.file_ext, b_parts.file_ext)) {
- .lt => true,
- .gt => false,
- .eq => {
- // a and b differ only in their ".X" part
-
- // Sort "." before any ".X."
- if (a_parts.test_index) |a_index| {
- if (b_parts.test_index) |b_index| {
- // Make sure that incremental tests appear in linear order
- return a_index < b_index;
- } else {
- return false;
- }
- } else {
- return b_parts.test_index != null;
- }
- },
- },
- };
- }
- };
- std.sort.sort([]const u8, filenames, Context{}, Context.lessThan);
-}
-
-pub const TestContext = struct {
- gpa: Allocator,
- arena: Allocator,
- cases: std.ArrayList(Case),
-
- pub const Update = struct {
- /// The input to the current update. We simulate an incremental update
- /// with the file's contents changed to this value each update.
- ///
- /// This value can change entirely between updates, which would be akin
- /// to deleting the source file and creating a new one from scratch; or
- /// you can keep it mostly consistent, with small changes, testing the
- /// effects of the incremental compilation.
- src: [:0]const u8,
- name: []const u8,
- case: union(enum) {
- /// Check the main binary output file against an expected set of bytes.
- /// This is most useful with, for example, `-ofmt=c`.
- CompareObjectFile: []const u8,
- /// An error update attempts to compile bad code, and ensures that it
- /// fails to compile, and for the expected reasons.
- /// A slice containing the expected errors *in sequential order*.
- Error: []const ErrorMsg,
- /// An execution update compiles and runs the input, testing the
- /// stdout against the expected results
- /// This is a slice containing the expected message.
- Execution: []const u8,
- /// A header update compiles the input with the equivalent of
- /// `-femit-h` and tests the produced header against the
- /// expected result
- Header: []const u8,
- },
- };
-
- pub const File = struct {
- /// Contents of the importable file. Doesn't yet support incremental updates.
- src: [:0]const u8,
- path: []const u8,
- };
-
- pub const DepModule = struct {
- name: []const u8,
- path: []const u8,
- };
-
- pub const Backend = enum {
- stage1,
- stage2,
- llvm,
- };
-
- /// A `Case` consists of a list of `Update`. The same `Compilation` is used for each
- /// update, so each update's source is treated as a single file being
- /// updated by the test harness and incrementally compiled.
- pub const Case = struct {
- /// The name of the test case. This is shown if a test fails, and
- /// otherwise ignored.
- name: []const u8,
- /// The platform the test targets. For non-native platforms, an emulator
- /// such as QEMU is required for tests to complete.
- target: CrossTarget,
- /// In order to be able to run e.g. Execution updates, this must be set
- /// to Executable.
- output_mode: std.builtin.OutputMode,
- optimize_mode: std.builtin.Mode = .Debug,
- updates: std.ArrayList(Update),
- emit_h: bool = false,
- is_test: bool = false,
- expect_exact: bool = false,
- backend: Backend = .stage2,
- link_libc: bool = false,
-
- files: std.ArrayList(File),
- deps: std.ArrayList(DepModule),
-
- result: anyerror!void = {},
-
- pub fn addSourceFile(case: *Case, name: []const u8, src: [:0]const u8) void {
- case.files.append(.{ .path = name, .src = src }) catch @panic("out of memory");
- }
-
- pub fn addDepModule(case: *Case, name: []const u8, path: []const u8) void {
- case.deps.append(.{
- .name = name,
- .path = path,
- }) catch @panic("out of memory");
- }
-
- /// Adds a subcase in which the module is updated with `src`, and a C
- /// header is generated.
- pub fn addHeader(self: *Case, src: [:0]const u8, result: [:0]const u8) void {
- self.emit_h = true;
- self.updates.append(.{
- .src = src,
- .name = "update",
- .case = .{ .Header = result },
- }) catch @panic("out of memory");
- }
-
- /// Adds a subcase in which the module is updated with `src`, compiled,
- /// run, and the output is tested against `result`.
- pub fn addCompareOutput(self: *Case, src: [:0]const u8, result: []const u8) void {
- self.updates.append(.{
- .src = src,
- .name = "update",
- .case = .{ .Execution = result },
- }) catch @panic("out of memory");
- }
-
- /// Adds a subcase in which the module is updated with `src`, compiled,
- /// and the object file data is compared against `result`.
- pub fn addCompareObjectFile(self: *Case, src: [:0]const u8, result: []const u8) void {
- self.updates.append(.{
- .src = src,
- .name = "update",
- .case = .{ .CompareObjectFile = result },
- }) catch @panic("out of memory");
- }
-
- pub fn addError(self: *Case, src: [:0]const u8, errors: []const []const u8) void {
- return self.addErrorNamed("update", src, errors);
- }
-
- /// Adds a subcase in which the module is updated with `src`, which
- /// should contain invalid input, and ensures that compilation fails
- /// for the expected reasons, given in sequential order in `errors` in
- /// the form `:line:column: error: message`.
- pub fn addErrorNamed(
- self: *Case,
- name: []const u8,
- src: [:0]const u8,
- errors: []const []const u8,
- ) void {
- var array = self.updates.allocator.alloc(ErrorMsg, errors.len) catch @panic("out of memory");
- for (errors, 0..) |err_msg_line, i| {
- if (std.mem.startsWith(u8, err_msg_line, "error: ")) {
- array[i] = .{
- .plain = .{
- .msg = err_msg_line["error: ".len..],
- .kind = .@"error",
- .count = 1,
- },
- };
- continue;
- } else if (std.mem.startsWith(u8, err_msg_line, "note: ")) {
- array[i] = .{
- .plain = .{
- .msg = err_msg_line["note: ".len..],
- .kind = .note,
- .count = 1,
- },
- };
- continue;
- }
- // example: "file.zig:1:2: error: bad thing happened"
- var it = std.mem.split(u8, err_msg_line, ":");
- const src_path = it.first();
- const line_text = it.next() orelse @panic("missing line");
- const col_text = it.next() orelse @panic("missing column");
- const kind_text = it.next() orelse @panic("missing 'error'/'note'");
- var msg = it.rest()[1..]; // skip over the space at end of "error: "
-
- const line: ?u32 = if (std.mem.eql(u8, line_text, "?"))
- null
- else
- std.fmt.parseInt(u32, line_text, 10) catch @panic("bad line number");
- const column: ?u32 = if (std.mem.eql(u8, line_text, "?"))
- null
- else
- std.fmt.parseInt(u32, col_text, 10) catch @panic("bad column number");
- const kind: ErrorMsg.Kind = if (std.mem.eql(u8, kind_text, " error"))
- .@"error"
- else if (std.mem.eql(u8, kind_text, " note"))
- .note
- else
- @panic("expected 'error'/'note'");
-
- const line_0based: u32 = if (line) |n| blk: {
- if (n == 0) {
- print("{s}: line must be specified starting at one\n", .{self.name});
- return;
- }
- break :blk n - 1;
- } else std.math.maxInt(u32);
-
- const column_0based: u32 = if (column) |n| blk: {
- if (n == 0) {
- print("{s}: line must be specified starting at one\n", .{self.name});
- return;
- }
- break :blk n - 1;
- } else std.math.maxInt(u32);
-
- const suffix = " times)";
- const count = if (std.mem.endsWith(u8, msg, suffix)) count: {
- const lparen = std.mem.lastIndexOfScalar(u8, msg, '(').?;
- const count = std.fmt.parseInt(u32, msg[lparen + 1 .. msg.len - suffix.len], 10) catch @panic("bad error note count number");
- msg = msg[0 .. lparen - 1];
- break :count count;
- } else 1;
-
- array[i] = .{
- .src = .{
- .src_path = src_path,
- .msg = msg,
- .line = line_0based,
- .column = column_0based,
- .kind = kind,
- .count = count,
- },
- };
- }
- self.updates.append(.{
- .src = src,
- .name = name,
- .case = .{ .Error = array },
- }) catch @panic("out of memory");
- }
-
- /// Adds a subcase in which the module is updated with `src`, and
- /// asserts that it compiles without issue
- pub fn compiles(self: *Case, src: [:0]const u8) void {
- self.addError(src, &[_][]const u8{});
- }
- };
-
- pub fn addExe(
- ctx: *TestContext,
- name: []const u8,
- target: CrossTarget,
- ) *Case {
- ctx.cases.append(Case{
- .name = name,
- .target = target,
- .updates = std.ArrayList(Update).init(ctx.cases.allocator),
- .output_mode = .Exe,
- .files = std.ArrayList(File).init(ctx.arena),
- .deps = std.ArrayList(DepModule).init(ctx.arena),
- }) catch @panic("out of memory");
- return &ctx.cases.items[ctx.cases.items.len - 1];
- }
-
- /// Adds a test case for Zig input, producing an executable
- pub fn exe(ctx: *TestContext, name: []const u8, target: CrossTarget) *Case {
- return ctx.addExe(name, target);
- }
-
- pub fn exeFromCompiledC(ctx: *TestContext, name: []const u8, target: CrossTarget) *Case {
- const prefixed_name = std.fmt.allocPrint(ctx.arena, "CBE: {s}", .{name}) catch
- @panic("out of memory");
- var target_adjusted = target;
- target_adjusted.ofmt = std.Target.ObjectFormat.c;
- ctx.cases.append(Case{
- .name = prefixed_name,
- .target = target_adjusted,
- .updates = std.ArrayList(Update).init(ctx.cases.allocator),
- .output_mode = .Exe,
- .files = std.ArrayList(File).init(ctx.arena),
- .deps = std.ArrayList(DepModule).init(ctx.arena),
- .link_libc = true,
- }) catch @panic("out of memory");
- return &ctx.cases.items[ctx.cases.items.len - 1];
- }
-
- /// Adds a test case that uses the LLVM backend to emit an executable.
- /// Currently this implies linking libc, because only then we can generate a testable executable.
- pub fn exeUsingLlvmBackend(ctx: *TestContext, name: []const u8, target: CrossTarget) *Case {
- ctx.cases.append(Case{
- .name = name,
- .target = target,
- .updates = std.ArrayList(Update).init(ctx.cases.allocator),
- .output_mode = .Exe,
- .files = std.ArrayList(File).init(ctx.arena),
- .deps = std.ArrayList(DepModule).init(ctx.arena),
- .backend = .llvm,
- .link_libc = true,
- }) catch @panic("out of memory");
- return &ctx.cases.items[ctx.cases.items.len - 1];
- }
-
- pub fn addObj(
- ctx: *TestContext,
- name: []const u8,
- target: CrossTarget,
- ) *Case {
- ctx.cases.append(Case{
- .name = name,
- .target = target,
- .updates = std.ArrayList(Update).init(ctx.cases.allocator),
- .output_mode = .Obj,
- .files = std.ArrayList(File).init(ctx.arena),
- .deps = std.ArrayList(DepModule).init(ctx.arena),
- }) catch @panic("out of memory");
- return &ctx.cases.items[ctx.cases.items.len - 1];
- }
-
- pub fn addTest(
- ctx: *TestContext,
- name: []const u8,
- target: CrossTarget,
- ) *Case {
- ctx.cases.append(Case{
- .name = name,
- .target = target,
- .updates = std.ArrayList(Update).init(ctx.cases.allocator),
- .output_mode = .Exe,
- .is_test = true,
- .files = std.ArrayList(File).init(ctx.arena),
- .deps = std.ArrayList(DepModule).init(ctx.arena),
- }) catch @panic("out of memory");
- return &ctx.cases.items[ctx.cases.items.len - 1];
- }
-
- /// Adds a test case for Zig input, producing an object file.
- pub fn obj(ctx: *TestContext, name: []const u8, target: CrossTarget) *Case {
- return ctx.addObj(name, target);
- }
-
- /// Adds a test case for ZIR input, producing an object file.
- pub fn objZIR(ctx: *TestContext, name: []const u8, target: CrossTarget) *Case {
- return ctx.addObj(name, target, .ZIR);
- }
-
- /// Adds a test case for Zig or ZIR input, producing C code.
- pub fn addC(ctx: *TestContext, name: []const u8, target: CrossTarget) *Case {
- var target_adjusted = target;
- target_adjusted.ofmt = std.Target.ObjectFormat.c;
- ctx.cases.append(Case{
- .name = name,
- .target = target_adjusted,
- .updates = std.ArrayList(Update).init(ctx.cases.allocator),
- .output_mode = .Obj,
- .files = std.ArrayList(File).init(ctx.arena),
- .deps = std.ArrayList(DepModule).init(ctx.arena),
- }) catch @panic("out of memory");
- return &ctx.cases.items[ctx.cases.items.len - 1];
- }
-
- pub fn c(ctx: *TestContext, name: []const u8, target: CrossTarget, src: [:0]const u8, comptime out: [:0]const u8) void {
- ctx.addC(name, target).addCompareObjectFile(src, zig_h ++ out);
- }
-
- pub fn h(ctx: *TestContext, name: []const u8, target: CrossTarget, src: [:0]const u8, comptime out: [:0]const u8) void {
- ctx.addC(name, target).addHeader(src, zig_h ++ out);
- }
-
- pub fn objErrStage1(
- ctx: *TestContext,
- name: []const u8,
- src: [:0]const u8,
- expected_errors: []const []const u8,
- ) void {
- const case = ctx.addObj(name, .{});
- case.backend = .stage1;
- case.addError(src, expected_errors);
- }
-
- pub fn testErrStage1(
- ctx: *TestContext,
- name: []const u8,
- src: [:0]const u8,
- expected_errors: []const []const u8,
- ) void {
- const case = ctx.addTest(name, .{});
- case.backend = .stage1;
- case.addError(src, expected_errors);
- }
-
- pub fn exeErrStage1(
- ctx: *TestContext,
- name: []const u8,
- src: [:0]const u8,
- expected_errors: []const []const u8,
- ) void {
- const case = ctx.addExe(name, .{});
- case.backend = .stage1;
- case.addError(src, expected_errors);
- }
-
- pub fn addCompareOutput(
- ctx: *TestContext,
- name: []const u8,
- src: [:0]const u8,
- expected_stdout: []const u8,
- ) void {
- ctx.addExe(name, .{}).addCompareOutput(src, expected_stdout);
- }
-
- /// Adds a test case that compiles the Zig source given in `src`, executes
- /// it, runs it, and tests the output against `expected_stdout`
- pub fn compareOutput(
- ctx: *TestContext,
- name: []const u8,
- src: [:0]const u8,
- expected_stdout: []const u8,
- ) void {
- return ctx.addCompareOutput(name, src, expected_stdout);
- }
-
- /// Adds a test case that compiles the ZIR source given in `src`, executes
- /// it, runs it, and tests the output against `expected_stdout`
- pub fn compareOutputZIR(
- ctx: *TestContext,
- name: []const u8,
- src: [:0]const u8,
- expected_stdout: []const u8,
- ) void {
- ctx.addCompareOutput(name, .ZIR, src, expected_stdout);
- }
-
- pub fn addTransform(
- ctx: *TestContext,
- name: []const u8,
- target: CrossTarget,
- src: [:0]const u8,
- result: [:0]const u8,
- ) void {
- ctx.addObj(name, target).addTransform(src, result);
- }
-
- /// Adds a test case that compiles the Zig given in `src` to ZIR and tests
- /// the ZIR against `result`
- pub fn transform(
- ctx: *TestContext,
- name: []const u8,
- target: CrossTarget,
- src: [:0]const u8,
- result: [:0]const u8,
- ) void {
- ctx.addTransform(name, target, src, result);
- }
-
- pub fn addError(
- ctx: *TestContext,
- name: []const u8,
- target: CrossTarget,
- src: [:0]const u8,
- expected_errors: []const []const u8,
- ) void {
- ctx.addObj(name, target).addError(src, expected_errors);
- }
-
- /// Adds a test case that ensures that the Zig given in `src` fails to
- /// compile for the expected reasons, given in sequential order in
- /// `expected_errors` in the form `:line:column: error: message`.
- pub fn compileError(
- ctx: *TestContext,
- name: []const u8,
- target: CrossTarget,
- src: [:0]const u8,
- expected_errors: []const []const u8,
- ) void {
- ctx.addError(name, target, src, expected_errors);
- }
-
- /// Adds a test case that ensures that the ZIR given in `src` fails to
- /// compile for the expected reasons, given in sequential order in
- /// `expected_errors` in the form `:line:column: error: message`.
- pub fn compileErrorZIR(
- ctx: *TestContext,
- name: []const u8,
- target: CrossTarget,
- src: [:0]const u8,
- expected_errors: []const []const u8,
- ) void {
- ctx.addError(name, target, .ZIR, src, expected_errors);
- }
-
- pub fn addCompiles(
- ctx: *TestContext,
- name: []const u8,
- target: CrossTarget,
- src: [:0]const u8,
- ) void {
- ctx.addObj(name, target).compiles(src);
- }
-
- /// Adds a test case that asserts that the Zig given in `src` compiles
- /// without any errors.
- pub fn compiles(
- ctx: *TestContext,
- name: []const u8,
- target: CrossTarget,
- src: [:0]const u8,
- ) void {
- ctx.addCompiles(name, target, src);
- }
-
- /// Adds a test case that asserts that the ZIR given in `src` compiles
- /// without any errors.
- pub fn compilesZIR(
- ctx: *TestContext,
- name: []const u8,
- target: CrossTarget,
- src: [:0]const u8,
- ) void {
- ctx.addCompiles(name, target, .ZIR, src);
- }
-
- /// Adds a test case that first ensures that the Zig given in `src` fails
- /// to compile for the reasons given in sequential order in
- /// `expected_errors` in the form `:line:column: error: message`, then
- /// asserts that fixing the source (updating with `fixed_src`) isn't broken
- /// by incremental compilation.
- pub fn incrementalFailure(
- ctx: *TestContext,
- name: []const u8,
- target: CrossTarget,
- src: [:0]const u8,
- expected_errors: []const []const u8,
- fixed_src: [:0]const u8,
- ) void {
- var case = ctx.addObj(name, target);
- case.addError(src, expected_errors);
- case.compiles(fixed_src);
- }
-
- /// Adds a test case that first ensures that the ZIR given in `src` fails
- /// to compile for the reasons given in sequential order in
- /// `expected_errors` in the form `:line:column: error: message`, then
- /// asserts that fixing the source (updating with `fixed_src`) isn't broken
- /// by incremental compilation.
- pub fn incrementalFailureZIR(
- ctx: *TestContext,
- name: []const u8,
- target: CrossTarget,
- src: [:0]const u8,
- expected_errors: []const []const u8,
- fixed_src: [:0]const u8,
- ) void {
- var case = ctx.addObj(name, target, .ZIR);
- case.addError(src, expected_errors);
- case.compiles(fixed_src);
- }
-
- /// Adds a test for each file in the provided directory.
- /// Testing strategy (TestStrategy) is inferred automatically from filenames.
- /// Recurses nested directories.
- ///
- /// Each file should include a test manifest as a contiguous block of comments at
- /// the end of the file. The first line should be the test type, followed by a set of
- /// key-value config values, followed by a blank line, then the expected output.
- pub fn addTestCasesFromDir(ctx: *TestContext, dir: std.fs.IterableDir) void {
- var current_file: []const u8 = "none";
- ctx.addTestCasesFromDirInner(dir, ¤t_file) catch |err| {
- std.debug.panic("test harness failed to process file '{s}': {s}\n", .{
- current_file, @errorName(err),
- });
- };
- }
-
- fn addTestCasesFromDirInner(
- ctx: *TestContext,
- iterable_dir: std.fs.IterableDir,
- /// This is kept up to date with the currently being processed file so
- /// that if any errors occur the caller knows it happened during this file.
- current_file: *[]const u8,
- ) !void {
- var it = try iterable_dir.walk(ctx.arena);
- var filenames = std.ArrayList([]const u8).init(ctx.arena);
-
- while (try it.next()) |entry| {
- if (entry.kind != .File) continue;
-
- // Ignore stuff such as .swp files
- switch (Compilation.classifyFileExt(entry.basename)) {
- .unknown => continue,
- else => {},
- }
- try filenames.append(try ctx.arena.dupe(u8, entry.path));
- }
-
- // Sort filenames, so that incremental tests are contiguous and in-order
- sortTestFilenames(filenames.items);
-
- var test_it = TestIterator{ .filenames = filenames.items };
- while (test_it.next()) |maybe_batch| {
- const batch = maybe_batch orelse break;
- const strategy: TestStrategy = if (batch.len > 1) .incremental else .independent;
- var cases = std.ArrayList(usize).init(ctx.arena);
-
- for (batch) |filename| {
- current_file.* = filename;
-
- const max_file_size = 10 * 1024 * 1024;
- const src = try iterable_dir.dir.readFileAllocOptions(ctx.arena, filename, max_file_size, null, 1, 0);
-
- // Parse the manifest
- var manifest = try TestManifest.parse(ctx.arena, src);
-
- if (cases.items.len == 0) {
- const backends = try manifest.getConfigForKeyAlloc(ctx.arena, "backend", Backend);
- const targets = try manifest.getConfigForKeyAlloc(ctx.arena, "target", CrossTarget);
- const is_test = try manifest.getConfigForKeyAssertSingle("is_test", bool);
- const output_mode = try manifest.getConfigForKeyAssertSingle("output_mode", std.builtin.OutputMode);
-
- const name_prefix = blk: {
- const ext_index = std.mem.lastIndexOfScalar(u8, current_file.*, '.') orelse
- return error.InvalidFilename;
- const index = std.mem.lastIndexOfScalar(u8, current_file.*[0..ext_index], '.') orelse ext_index;
- break :blk current_file.*[0..index];
- };
-
- // Cross-product to get all possible test combinations
- for (backends) |backend| {
- for (targets) |target| {
- const name = try std.fmt.allocPrint(ctx.arena, "{s} ({s}, {s})", .{
- name_prefix,
- @tagName(backend),
- try target.zigTriple(ctx.arena),
- });
- const next = ctx.cases.items.len;
- try ctx.cases.append(.{
- .name = name,
- .target = target,
- .backend = backend,
- .updates = std.ArrayList(TestContext.Update).init(ctx.cases.allocator),
- .is_test = is_test,
- .output_mode = output_mode,
- .link_libc = backend == .llvm,
- .files = std.ArrayList(TestContext.File).init(ctx.cases.allocator),
- .deps = std.ArrayList(DepModule).init(ctx.cases.allocator),
- });
- try cases.append(next);
- }
- }
- }
-
- for (cases.items) |case_index| {
- const case = &ctx.cases.items[case_index];
- switch (manifest.type) {
- .@"error" => {
- const errors = try manifest.trailingAlloc(ctx.arena);
- switch (strategy) {
- .independent => {
- case.addError(src, errors);
- },
- .incremental => {
- case.addErrorNamed("update", src, errors);
- },
- }
- },
- .run => {
- var output = std.ArrayList(u8).init(ctx.arena);
- var trailing_it = manifest.trailing();
- while (trailing_it.next()) |line| {
- try output.appendSlice(line);
- try output.append('\n');
- }
- if (output.items.len > 0) {
- try output.resize(output.items.len - 1);
- }
- case.addCompareOutput(src, try output.toOwnedSlice());
- },
- .cli => @panic("TODO cli tests"),
- }
- }
- }
- } else |err| {
- // make sure the current file is set to the file that produced an error
- current_file.* = test_it.currentFilename();
- return err;
- }
- }
-
- fn init(gpa: Allocator, arena: Allocator) TestContext {
- return .{
- .gpa = gpa,
- .cases = std.ArrayList(Case).init(gpa),
- .arena = arena,
- };
- }
-
- fn deinit(self: *TestContext) void {
- for (self.cases.items) |case| {
- for (case.updates.items) |u| {
- if (u.case == .Error) {
- case.updates.allocator.free(u.case.Error);
- }
- }
- case.updates.deinit();
- }
- self.cases.deinit();
- self.* = undefined;
- }
-
- fn run(self: *TestContext) !void {
- const host = try std.zig.system.NativeTargetInfo.detect(.{});
- const zig_exe_path = try std.process.getEnvVarOwned(self.arena, "ZIG_EXE");
-
- var progress = std.Progress{};
- const root_node = progress.start("compiler", self.cases.items.len);
- defer root_node.end();
-
- var zig_lib_directory = try introspect.findZigLibDir(self.gpa);
- defer zig_lib_directory.handle.close();
- defer self.gpa.free(zig_lib_directory.path.?);
-
- var aux_thread_pool: ThreadPool = undefined;
- try aux_thread_pool.init(self.gpa);
- defer aux_thread_pool.deinit();
-
- // Use the same global cache dir for all the tests, such that we for example don't have to
- // rebuild musl libc for every case (when LLVM backend is enabled).
- var global_tmp = std.testing.tmpDir(.{});
- defer global_tmp.cleanup();
-
- var cache_dir = try global_tmp.dir.makeOpenPath("zig-cache", .{});
- defer cache_dir.close();
- const tmp_dir_path = try std.fs.path.join(self.gpa, &[_][]const u8{ ".", "zig-cache", "tmp", &global_tmp.sub_path });
- defer self.gpa.free(tmp_dir_path);
-
- const global_cache_directory: Compilation.Directory = .{
- .handle = cache_dir,
- .path = try std.fs.path.join(self.gpa, &[_][]const u8{ tmp_dir_path, "zig-cache" }),
- };
- defer self.gpa.free(global_cache_directory.path.?);
-
- {
- for (self.cases.items) |*case| {
- if (build_options.skip_non_native) {
- if (case.target.getCpuArch() != builtin.cpu.arch)
- continue;
- if (case.target.getObjectFormat() != builtin.object_format)
- continue;
- }
-
- // Skip tests that require LLVM backend when it is not available
- if (!build_options.have_llvm and case.backend == .llvm)
- continue;
-
- if (skip_stage1 and case.backend == .stage1)
- continue;
-
- if (build_options.test_filter) |test_filter| {
- if (std.mem.indexOf(u8, case.name, test_filter) == null) continue;
- }
-
- var prg_node = root_node.start(case.name, case.updates.items.len);
- prg_node.activate();
- defer prg_node.end();
-
- case.result = runOneCase(
- self.gpa,
- &prg_node,
- case.*,
- zig_lib_directory,
- zig_exe_path,
- &aux_thread_pool,
- global_cache_directory,
- host,
- );
- }
- }
-
- var fail_count: usize = 0;
- for (self.cases.items) |*case| {
- case.result catch |err| {
- fail_count += 1;
- print("{s} failed: {s}\n", .{ case.name, @errorName(err) });
- };
- }
-
- if (fail_count != 0) {
- print("{d} tests failed\n", .{fail_count});
- return error.TestFailed;
- }
- }
-
- fn runOneCase(
- allocator: Allocator,
- root_node: *std.Progress.Node,
- case: Case,
- zig_lib_directory: Compilation.Directory,
- zig_exe_path: []const u8,
- thread_pool: *ThreadPool,
- global_cache_directory: Compilation.Directory,
- host: std.zig.system.NativeTargetInfo,
- ) !void {
- const target_info = try std.zig.system.NativeTargetInfo.detect(case.target);
- const target = target_info.target;
-
- var arena_allocator = std.heap.ArenaAllocator.init(allocator);
- defer arena_allocator.deinit();
- const arena = arena_allocator.allocator();
-
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
-
- var cache_dir = try tmp.dir.makeOpenPath("zig-cache", .{});
- defer cache_dir.close();
-
- const tmp_dir_path = try std.fs.path.join(
- arena,
- &[_][]const u8{ ".", "zig-cache", "tmp", &tmp.sub_path },
- );
- const tmp_dir_path_plus_slash = try std.fmt.allocPrint(
- arena,
- "{s}" ++ std.fs.path.sep_str,
- .{tmp_dir_path},
- );
- const local_cache_path = try std.fs.path.join(
- arena,
- &[_][]const u8{ tmp_dir_path, "zig-cache" },
- );
-
- for (case.files.items) |file| {
- try tmp.dir.writeFile(file.path, file.src);
- }
-
- if (case.backend == .stage1) {
- // stage1 backend has limitations:
- // * leaks memory
- // * calls exit() when a compile error happens
- // * cannot handle updates
- // because of this we must spawn a child process rather than
- // using Compilation directly.
-
- if (!std.process.can_spawn) {
- print("Unable to spawn child processes on {s}, skipping test.\n", .{@tagName(builtin.os.tag)});
- return; // Pass test.
- }
-
- assert(case.updates.items.len == 1);
- const update = case.updates.items[0];
- try tmp.dir.writeFile(tmp_src_path, update.src);
-
- var zig_args = std.ArrayList([]const u8).init(arena);
- try zig_args.append(zig_exe_path);
-
- if (case.is_test) {
- try zig_args.append("test");
- } else if (update.case == .Execution) {
- try zig_args.append("run");
- } else switch (case.output_mode) {
- .Obj => try zig_args.append("build-obj"),
- .Exe => try zig_args.append("build-exe"),
- .Lib => try zig_args.append("build-lib"),
- }
-
- try zig_args.append(try std.fs.path.join(arena, &.{ tmp_dir_path, tmp_src_path }));
-
- try zig_args.append("--name");
- try zig_args.append("test");
-
- try zig_args.append("--cache-dir");
- try zig_args.append(local_cache_path);
-
- try zig_args.append("--global-cache-dir");
- try zig_args.append(global_cache_directory.path orelse ".");
-
- if (!case.target.isNative()) {
- try zig_args.append("-target");
- try zig_args.append(try target.zigTriple(arena));
- }
-
- try zig_args.append("-O");
- try zig_args.append(@tagName(case.optimize_mode));
-
- // Prevent sub-process progress bar from interfering with the
- // one in this parent process.
- try zig_args.append("--color");
- try zig_args.append("off");
-
- const result = try std.ChildProcess.exec(.{
- .allocator = arena,
- .argv = zig_args.items,
- });
- switch (update.case) {
- .Error => |case_error_list| {
- switch (result.term) {
- .Exited => |code| {
- if (code == 0) {
- dumpArgs(zig_args.items);
- return error.CompilationIncorrectlySucceeded;
- }
- },
- else => {
- std.debug.print("{s}", .{result.stderr});
- dumpArgs(zig_args.items);
- return error.CompilationCrashed;
- },
- }
- var ok = true;
- if (case.expect_exact) {
- var err_iter = std.mem.split(u8, result.stderr, "\n");
- var i: usize = 0;
- ok = while (err_iter.next()) |line| : (i += 1) {
- if (i >= case_error_list.len) break false;
- const expected = try std.mem.replaceOwned(
- u8,
- arena,
- try std.fmt.allocPrint(arena, "{s}", .{case_error_list[i]}),
- "${DIR}",
- tmp_dir_path_plus_slash,
- );
-
- if (std.mem.indexOf(u8, line, expected) == null) break false;
- continue;
- } else true;
-
- ok = ok and i == case_error_list.len;
-
- if (!ok) {
- print("\n======== Expected these compile errors: ========\n", .{});
- for (case_error_list) |msg| {
- const expected = try std.fmt.allocPrint(arena, "{s}", .{msg});
- print("{s}\n", .{expected});
- }
- }
- } else {
- for (case_error_list) |msg| {
- const expected = try std.mem.replaceOwned(
- u8,
- arena,
- try std.fmt.allocPrint(arena, "{s}", .{msg}),
- "${DIR}",
- tmp_dir_path_plus_slash,
- );
- if (std.mem.indexOf(u8, result.stderr, expected) == null) {
- print(
- \\
- \\=========== Expected compile error: ============
- \\{s}
- \\
- , .{expected});
- ok = false;
- break;
- }
- }
- }
-
- if (!ok) {
- print(
- \\================= Full output: =================
- \\{s}
- \\================================================
- \\
- , .{result.stderr});
- return error.TestFailed;
- }
- },
- .CompareObjectFile => @panic("TODO implement in the test harness"),
- .Execution => |expected_stdout| {
- switch (result.term) {
- .Exited => |code| {
- if (code != 0) {
- std.debug.print("{s}", .{result.stderr});
- dumpArgs(zig_args.items);
- return error.CompilationFailed;
- }
- },
- else => {
- std.debug.print("{s}", .{result.stderr});
- dumpArgs(zig_args.items);
- return error.CompilationCrashed;
- },
- }
- try std.testing.expectEqualStrings("", result.stderr);
- try std.testing.expectEqualStrings(expected_stdout, result.stdout);
- },
- .Header => @panic("TODO implement in the test harness"),
- }
- return;
- }
-
- const zig_cache_directory: Compilation.Directory = .{
- .handle = cache_dir,
- .path = local_cache_path,
- };
-
- var main_pkg: Package = .{
- .root_src_directory = .{ .path = tmp_dir_path, .handle = tmp.dir },
- .root_src_path = tmp_src_path,
- };
- defer {
- var it = main_pkg.table.iterator();
- while (it.next()) |kv| {
- allocator.free(kv.key_ptr.*);
- kv.value_ptr.*.destroy(allocator);
- }
- main_pkg.table.deinit(allocator);
- }
-
- for (case.deps.items) |dep| {
- var pkg = try Package.create(
- allocator,
- tmp_dir_path,
- dep.path,
- );
- errdefer pkg.destroy(allocator);
- try main_pkg.add(allocator, dep.name, pkg);
- }
-
- const bin_name = try std.zig.binNameAlloc(arena, .{
- .root_name = "test_case",
- .target = target,
- .output_mode = case.output_mode,
- });
-
- const emit_directory: Compilation.Directory = .{
- .path = tmp_dir_path,
- .handle = tmp.dir,
- };
- const emit_bin: Compilation.EmitLoc = .{
- .directory = emit_directory,
- .basename = bin_name,
- };
- const emit_h: ?Compilation.EmitLoc = if (case.emit_h) .{
- .directory = emit_directory,
- .basename = "test_case.h",
- } else null;
- const use_llvm: bool = switch (case.backend) {
- .llvm => true,
- else => false,
- };
- const comp = try Compilation.create(allocator, .{
- .local_cache_directory = zig_cache_directory,
- .global_cache_directory = global_cache_directory,
- .zig_lib_directory = zig_lib_directory,
- .thread_pool = thread_pool,
- .root_name = "test_case",
- .target = target,
- // TODO: support tests for object file building, and library builds
- // and linking. This will require a rework to support multi-file
- // tests.
- .output_mode = case.output_mode,
- .is_test = case.is_test,
- .optimize_mode = case.optimize_mode,
- .emit_bin = emit_bin,
- .emit_h = emit_h,
- .main_pkg = &main_pkg,
- .keep_source_files_loaded = true,
- .is_native_os = case.target.isNativeOs(),
- .is_native_abi = case.target.isNativeAbi(),
- .dynamic_linker = target_info.dynamic_linker.get(),
- .link_libc = case.link_libc,
- .use_llvm = use_llvm,
- .self_exe_path = zig_exe_path,
- // TODO instead of turning off color, pass in a std.Progress.Node
- .color = .off,
- .reference_trace = 0,
- // TODO: force self-hosted linkers with stage2 backend to avoid LLD creeping in
- // until the auto-select mechanism deems them worthy
- .use_lld = switch (case.backend) {
- .stage2 => false,
- else => null,
- },
- });
- defer comp.destroy();
-
- update: for (case.updates.items, 0..) |update, update_index| {
- var update_node = root_node.start(update.name, 3);
- update_node.activate();
- defer update_node.end();
-
- var sync_node = update_node.start("write", 0);
- sync_node.activate();
- try tmp.dir.writeFile(tmp_src_path, update.src);
- sync_node.end();
-
- var module_node = update_node.start("parse/analysis/codegen", 0);
- module_node.activate();
- module_node.context.refresh();
- try comp.makeBinFileWritable();
- try comp.update();
- module_node.end();
-
- if (update.case != .Error) {
- var all_errors = try comp.getAllErrorsAlloc();
- defer all_errors.deinit(allocator);
- if (all_errors.list.len != 0) {
- print(
- "\nCase '{s}': unexpected errors at update_index={d}:\n{s}\n",
- .{ case.name, update_index, hr },
- );
- for (all_errors.list) |err_msg| {
- switch (err_msg) {
- .src => |src| {
- print("{s}:{d}:{d}: error: {s}\n{s}\n", .{
- src.src_path, src.line + 1, src.column + 1, src.msg, hr,
- });
- },
- .plain => |plain| {
- print("error: {s}\n{s}\n", .{ plain.msg, hr });
- },
- }
- }
- // TODO print generated C code
- return error.UnexpectedCompileErrors;
- }
- }
-
- switch (update.case) {
- .Header => |expected_output| {
- var file = try tmp.dir.openFile("test_case.h", .{ .mode = .read_only });
- defer file.close();
- const out = try file.reader().readAllAlloc(arena, 5 * 1024 * 1024);
-
- try std.testing.expectEqualStrings(expected_output, out);
- },
- .CompareObjectFile => |expected_output| {
- var file = try tmp.dir.openFile(bin_name, .{ .mode = .read_only });
- defer file.close();
- const out = try file.reader().readAllAlloc(arena, 5 * 1024 * 1024);
-
- try std.testing.expectEqualStrings(expected_output, out);
- },
- .Error => |case_error_list| {
- var test_node = update_node.start("assert", 0);
- test_node.activate();
- defer test_node.end();
-
- const handled_errors = try arena.alloc(bool, case_error_list.len);
- std.mem.set(bool, handled_errors, false);
-
- var actual_errors = try comp.getAllErrorsAlloc();
- defer actual_errors.deinit(allocator);
-
- var any_failed = false;
- var notes_to_check = std.ArrayList(*const Compilation.AllErrors.Message).init(allocator);
- defer notes_to_check.deinit();
-
- for (actual_errors.list) |actual_error| {
- for (case_error_list, 0..) |case_msg, i| {
- if (handled_errors[i]) continue;
-
- const ex_tag: std.meta.Tag(@TypeOf(case_msg)) = case_msg;
- switch (actual_error) {
- .src => |actual_msg| {
- for (actual_msg.notes) |*note| {
- try notes_to_check.append(note);
- }
-
- if (ex_tag != .src) continue;
-
- const src_path_ok = case_msg.src.src_path.len == 0 or
- std.mem.eql(u8, case_msg.src.src_path, actual_msg.src_path);
-
- const expected_msg = try std.mem.replaceOwned(
- u8,
- arena,
- case_msg.src.msg,
- "${DIR}",
- tmp_dir_path_plus_slash,
- );
-
- var buf: [1024]u8 = undefined;
- const rendered_msg = blk: {
- var msg: Compilation.AllErrors.Message = actual_error;
- msg.src.src_path = case_msg.src.src_path;
- msg.src.notes = &.{};
- msg.src.source_line = null;
- var fib = std.io.fixedBufferStream(&buf);
- try msg.renderToWriter(.no_color, fib.writer(), "error", .Red, 0);
- var it = std.mem.split(u8, fib.getWritten(), "error: ");
- _ = it.first();
- const rendered = it.rest();
- break :blk rendered[0 .. rendered.len - 1]; // trim final newline
- };
-
- if (src_path_ok and
- (case_msg.src.line == std.math.maxInt(u32) or
- actual_msg.line == case_msg.src.line) and
- (case_msg.src.column == std.math.maxInt(u32) or
- actual_msg.column == case_msg.src.column) and
- std.mem.eql(u8, expected_msg, rendered_msg) and
- case_msg.src.kind == .@"error" and
- actual_msg.count == case_msg.src.count)
- {
- handled_errors[i] = true;
- break;
- }
- },
- .plain => |plain| {
- if (ex_tag != .plain) continue;
-
- if (std.mem.eql(u8, case_msg.plain.msg, plain.msg) and
- case_msg.plain.kind == .@"error" and
- case_msg.plain.count == plain.count)
- {
- handled_errors[i] = true;
- break;
- }
- },
- }
- } else {
- print(
- "\nUnexpected error:\n{s}\n{}\n{s}",
- .{ hr, ErrorMsg.init(actual_error, .@"error"), hr },
- );
- any_failed = true;
- }
- }
- while (notes_to_check.popOrNull()) |note| {
- for (case_error_list, 0..) |case_msg, i| {
- const ex_tag: std.meta.Tag(@TypeOf(case_msg)) = case_msg;
- switch (note.*) {
- .src => |actual_msg| {
- for (actual_msg.notes) |*sub_note| {
- try notes_to_check.append(sub_note);
- }
- if (ex_tag != .src) continue;
-
- const expected_msg = try std.mem.replaceOwned(
- u8,
- arena,
- case_msg.src.msg,
- "${DIR}",
- tmp_dir_path_plus_slash,
- );
-
- if ((case_msg.src.line == std.math.maxInt(u32) or
- actual_msg.line == case_msg.src.line) and
- (case_msg.src.column == std.math.maxInt(u32) or
- actual_msg.column == case_msg.src.column) and
- std.mem.eql(u8, expected_msg, actual_msg.msg) and
- case_msg.src.kind == .note and
- actual_msg.count == case_msg.src.count)
- {
- handled_errors[i] = true;
- break;
- }
- },
- .plain => |plain| {
- if (ex_tag != .plain) continue;
-
- if (std.mem.eql(u8, case_msg.plain.msg, plain.msg) and
- case_msg.plain.kind == .note and
- case_msg.plain.count == plain.count)
- {
- handled_errors[i] = true;
- break;
- }
- },
- }
- } else {
- print(
- "\nUnexpected note:\n{s}\n{}\n{s}",
- .{ hr, ErrorMsg.init(note.*, .note), hr },
- );
- any_failed = true;
- }
- }
-
- for (handled_errors, 0..) |handled, i| {
- if (!handled) {
- print(
- "\nExpected error not found:\n{s}\n{}\n{s}",
- .{ hr, case_error_list[i], hr },
- );
- any_failed = true;
- }
- }
-
- if (any_failed) {
- print("\nupdate_index={d}\n", .{update_index});
- return error.WrongCompileErrors;
- }
- },
- .Execution => |expected_stdout| {
- if (!std.process.can_spawn) {
- print("Unable to spawn child processes on {s}, skipping test.\n", .{@tagName(builtin.os.tag)});
- continue :update; // Pass test.
- }
-
- update_node.setEstimatedTotalItems(4);
-
- var argv = std.ArrayList([]const u8).init(allocator);
- defer argv.deinit();
-
- var exec_result = x: {
- var exec_node = update_node.start("execute", 0);
- exec_node.activate();
- defer exec_node.end();
-
- // We go out of our way here to use the unique temporary directory name in
- // the exe_path so that it makes its way into the cache hash, avoiding
- // cache collisions from multiple threads doing `zig run` at the same time
- // on the same test_case.c input filename.
- const ss = std.fs.path.sep_str;
- const exe_path = try std.fmt.allocPrint(
- arena,
- ".." ++ ss ++ "{s}" ++ ss ++ "{s}",
- .{ &tmp.sub_path, bin_name },
- );
- if (case.target.ofmt != null and case.target.ofmt.? == .c) {
- if (host.getExternalExecutor(target_info, .{ .link_libc = true }) != .native) {
- // We wouldn't be able to run the compiled C code.
- continue :update; // Pass test.
- }
- try argv.appendSlice(&[_][]const u8{
- zig_exe_path,
- "run",
- "-cflags",
- "-std=c99",
- "-pedantic",
- "-Werror",
- "-Wno-incompatible-library-redeclaration", // https://github.com/ziglang/zig/issues/875
- "--",
- "-lc",
- exe_path,
- });
- if (zig_lib_directory.path) |p| {
- try argv.appendSlice(&.{ "-I", p });
- }
- } else switch (host.getExternalExecutor(target_info, .{ .link_libc = case.link_libc })) {
- .native => {
- if (case.backend == .stage2 and case.target.getCpuArch() == .arm) {
- // https://github.com/ziglang/zig/issues/13623
- continue :update; // Pass test.
- }
- try argv.append(exe_path);
- },
- .bad_dl, .bad_os_or_cpu => continue :update, // Pass test.
-
- .rosetta => if (enable_rosetta) {
- try argv.append(exe_path);
- } else {
- continue :update; // Rosetta not available, pass test.
- },
-
- .qemu => |qemu_bin_name| if (enable_qemu) {
- const need_cross_glibc = target.isGnuLibC() and case.link_libc;
- const glibc_dir_arg: ?[]const u8 = if (need_cross_glibc)
- glibc_runtimes_dir orelse continue :update // glibc dir not available; pass test
- else
- null;
- try argv.append(qemu_bin_name);
- if (glibc_dir_arg) |dir| {
- const linux_triple = try target.linuxTriple(arena);
- const full_dir = try std.fs.path.join(arena, &[_][]const u8{
- dir,
- linux_triple,
- });
-
- try argv.append("-L");
- try argv.append(full_dir);
- }
- try argv.append(exe_path);
- } else {
- continue :update; // QEMU not available; pass test.
- },
-
- .wine => |wine_bin_name| if (enable_wine) {
- try argv.append(wine_bin_name);
- try argv.append(exe_path);
- } else {
- continue :update; // Wine not available; pass test.
- },
-
- .wasmtime => |wasmtime_bin_name| if (enable_wasmtime) {
- try argv.append(wasmtime_bin_name);
- try argv.append("--dir=.");
- try argv.append(exe_path);
- } else {
- continue :update; // wasmtime not available; pass test.
- },
-
- .darling => |darling_bin_name| if (enable_darling) {
- try argv.append(darling_bin_name);
- // Since we use relative to cwd here, we invoke darling with
- // "shell" subcommand.
- try argv.append("shell");
- try argv.append(exe_path);
- } else {
- continue :update; // Darling not available; pass test.
- },
- }
-
- try comp.makeBinFileExecutable();
-
- while (true) {
- break :x std.ChildProcess.exec(.{
- .allocator = allocator,
- .argv = argv.items,
- .cwd_dir = tmp.dir,
- .cwd = tmp_dir_path,
- }) catch |err| switch (err) {
- error.FileBusy => {
- // There is a fundamental design flaw in Unix systems with how
- // ETXTBSY interacts with fork+exec.
- // https://github.com/golang/go/issues/22315
- // https://bugs.openjdk.org/browse/JDK-8068370
- // Unfortunately, this could be a real error, but we can't
- // tell the difference here.
- continue;
- },
- else => {
- print("\n{s}.{d} The following command failed with {s}:\n", .{
- case.name, update_index, @errorName(err),
- });
- dumpArgs(argv.items);
- return error.ChildProcessExecution;
- },
- };
- }
- };
- var test_node = update_node.start("test", 0);
- test_node.activate();
- defer test_node.end();
- defer allocator.free(exec_result.stdout);
- defer allocator.free(exec_result.stderr);
- switch (exec_result.term) {
- .Exited => |code| {
- if (code != 0) {
- print("\n{s}\n{s}: execution exited with code {d}:\n", .{
- exec_result.stderr, case.name, code,
- });
- dumpArgs(argv.items);
- return error.ChildProcessExecution;
- }
- },
- else => {
- print("\n{s}\n{s}: execution crashed:\n", .{
- exec_result.stderr, case.name,
- });
- dumpArgs(argv.items);
- return error.ChildProcessExecution;
- },
- }
- try std.testing.expectEqualStrings(expected_stdout, exec_result.stdout);
- // We allow stderr to have garbage in it because wasmtime prints a
- // warning about --invoke even though we don't pass it.
- //std.testing.expectEqualStrings("", exec_result.stderr);
- },
- }
- }
- }
-};
-
-fn dumpArgs(argv: []const []const u8) void {
- for (argv) |arg| {
- print("{s} ", .{arg});
- }
- print("\n", .{});
-}
-
-const tmp_src_path = "tmp.zig";
diff --git a/src/translate_c.zig b/src/translate_c.zig
index de0c666232..45b9f38b75 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -784,9 +784,9 @@ fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]co
const qual_type = var_decl.getTypeSourceInfo_getType();
const storage_class = var_decl.getStorageClass();
- const is_const = qual_type.isConstQualified();
const has_init = var_decl.hasInit();
const decl_init = var_decl.getInit();
+ var is_const = qual_type.isConstQualified();
// In C extern variables with initializers behave like Zig exports.
// extern int foo = 2;
@@ -843,6 +843,20 @@ fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]co
// std.mem.zeroes(T)
init_node = try Tag.std_mem_zeroes.create(c.arena, type_node);
+ } else if (qual_type.getTypeClass() == .IncompleteArray) {
+ // Oh no, an extern array of unknown size! These are really fun because there's no
+ // direct equivalent in Zig. To translate correctly, we'll have to create a C-pointer
+ // to the data initialized via @extern.
+
+ const name_str = try std.fmt.allocPrint(c.arena, "\"{s}\"", .{var_name});
+ init_node = try Tag.builtin_extern.create(c.arena, .{
+ .type = type_node,
+ .name = try Tag.string_literal.create(c.arena, name_str),
+ });
+
+ // Since this is really a pointer to the underlying data, we tweak a few properties.
+ is_extern = false;
+ is_const = true;
}
const linksection_string = blk: {
diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig
index 81a19eb39d..688235c2d3 100644
--- a/src/translate_c/ast.zig
+++ b/src/translate_c/ast.zig
@@ -158,6 +158,8 @@ pub const Node = extern union {
vector_zero_init,
/// @shuffle(type, a, b, mask)
shuffle,
+ /// @extern(ty, .{ .name = n })
+ builtin_extern,
/// @import("std").zig.c_translation.MacroArithmetic.(lhs, rhs)
macro_arithmetic,
@@ -373,6 +375,7 @@ pub const Node = extern union {
.field_access => Payload.FieldAccess,
.string_slice => Payload.StringSlice,
.shuffle => Payload.Shuffle,
+ .builtin_extern => Payload.Extern,
.macro_arithmetic => Payload.MacroArithmetic,
};
}
@@ -718,6 +721,14 @@ pub const Payload = struct {
},
};
+ pub const Extern = struct {
+ base: Payload,
+ data: struct {
+ type: Node,
+ name: Node,
+ },
+ };
+
pub const MacroArithmetic = struct {
base: Payload,
data: struct {
@@ -1409,6 +1420,22 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
payload.mask_vector,
});
},
+ .builtin_extern => {
+ const payload = node.castTag(.builtin_extern).?.data;
+
+ var info_inits: [1]Payload.ContainerInitDot.Initializer = .{
+ .{ .name = "name", .value = payload.name },
+ };
+ var info_payload: Payload.ContainerInitDot = .{
+ .base = .{ .tag = .container_init_dot },
+ .data = &info_inits,
+ };
+
+ return renderBuiltinCall(c, "@extern", &.{
+ payload.type,
+ .{ .ptr_otherwise = &info_payload.base },
+ });
+ },
.macro_arithmetic => {
const payload = node.castTag(.macro_arithmetic).?.data;
const op = @tagName(payload.op);
@@ -2348,6 +2375,7 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex {
.div_exact,
.offset_of,
.shuffle,
+ .builtin_extern,
.static_local_var,
.mut_str,
.macro_arithmetic,
diff --git a/src/value.zig b/src/value.zig
index 4a5683df36..e5283d1270 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1365,6 +1365,17 @@ pub const Value = extern union {
if (val.isDeclRef()) return error.ReinterpretDeclRef;
return val.writeToMemory(Type.usize, mod, buffer);
},
+ .Optional => {
+ assert(ty.isPtrLikeOptional());
+ var buf: Type.Payload.ElemType = undefined;
+ const child = ty.optionalChild(&buf);
+ const opt_val = val.optionalValue();
+ if (opt_val) |some| {
+ return some.writeToMemory(child, mod, buffer);
+ } else {
+ return writeToMemory(Value.zero, Type.usize, mod, buffer);
+ }
+ },
else => @panic("TODO implement writeToMemory for more types"),
}
}
@@ -1471,6 +1482,17 @@ pub const Value = extern union {
if (val.isDeclRef()) return error.ReinterpretDeclRef;
return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset);
},
+ .Optional => {
+ assert(ty.isPtrLikeOptional());
+ var buf: Type.Payload.ElemType = undefined;
+ const child = ty.optionalChild(&buf);
+ const opt_val = val.optionalValue();
+ if (opt_val) |some| {
+ return some.writeToPackedMemory(child, mod, buffer, bit_offset);
+ } else {
+ return writeToPackedMemory(Value.zero, Type.usize, mod, buffer, bit_offset);
+ }
+ },
else => @panic("TODO implement writeToPackedMemory for more types"),
}
}
@@ -1579,6 +1601,12 @@ pub const Value = extern union {
assert(!ty.isSlice()); // No well defined layout.
return readFromMemory(Type.usize, mod, buffer, arena);
},
+ .Optional => {
+ assert(ty.isPtrLikeOptional());
+ var buf: Type.Payload.ElemType = undefined;
+ const child = ty.optionalChild(&buf);
+ return readFromMemory(child, mod, buffer, arena);
+ },
else => @panic("TODO implement readFromMemory for more types"),
}
}
@@ -1670,6 +1698,12 @@ pub const Value = extern union {
assert(!ty.isSlice()); // No well defined layout.
return readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena);
},
+ .Optional => {
+ assert(ty.isPtrLikeOptional());
+ var buf: Type.Payload.ElemType = undefined;
+ const child = ty.optionalChild(&buf);
+ return readFromPackedMemory(child, mod, buffer, bit_offset, arena);
+ },
else => @panic("TODO implement readFromPackedMemory for more types"),
}
}
@@ -3144,13 +3178,32 @@ pub const Value = extern union {
/// TODO: check for cases such as array that is not marked undef but all the element
/// values are marked undef, or struct that is not marked undef but all fields are marked
/// undef, etc.
- pub fn anyUndef(self: Value) bool {
- if (self.castTag(.aggregate)) |aggregate| {
- for (aggregate.data) |val| {
- if (val.anyUndef()) return true;
- }
+ pub fn anyUndef(self: Value, mod: *Module) bool {
+ switch (self.tag()) {
+ .slice => {
+ const payload = self.castTag(.slice).?;
+ const len = payload.data.len.toUnsignedInt(mod.getTarget());
+
+ var elem_value_buf: ElemValueBuffer = undefined;
+ var i: usize = 0;
+ while (i < len) : (i += 1) {
+ const elem_val = payload.data.ptr.elemValueBuffer(mod, i, &elem_value_buf);
+ if (elem_val.anyUndef(mod)) return true;
+ }
+ },
+
+ .aggregate => {
+ const payload = self.castTag(.aggregate).?;
+ for (payload.data) |val| {
+ if (val.anyUndef(mod)) return true;
+ }
+ },
+
+ .undef => return true,
+ else => {},
}
- return self.isUndef();
+
+ return false;
}
/// Asserts the value is not undefined and not unreachable.
@@ -3319,7 +3372,7 @@ pub const Value = extern union {
}
}
- fn floatToValue(float: f128, arena: Allocator, dest_ty: Type, target: Target) !Value {
+ pub fn floatToValue(float: f128, arena: Allocator, dest_ty: Type, target: Target) !Value {
switch (dest_ty.floatBits(target)) {
16 => return Value.Tag.float_16.create(arena, @floatCast(f16, float)),
32 => return Value.Tag.float_32.create(arena, @floatCast(f32, float)),
diff --git a/src/wasi_libc.zig b/src/wasi_libc.zig
index fc8c81d5af..38a4f17190 100644
--- a/src/wasi_libc.zig
+++ b/src/wasi_libc.zig
@@ -59,7 +59,7 @@ pub fn execModelCrtFileFullName(wasi_exec_model: std.builtin.WasiExecModel) []co
};
}
-pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
+pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progress.Node) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
@@ -74,7 +74,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
var args = std.ArrayList([]const u8).init(arena);
try addCCArgs(comp, arena, &args, false);
try addLibcBottomHalfIncludes(comp, arena, &args);
- return comp.build_crt_file("crt1-reactor", .Obj, &[1]Compilation.CSourceFile{
+ return comp.build_crt_file("crt1-reactor", .Obj, .@"wasi crt1-reactor.o", prog_node, &.{
.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", try sanitize(arena, crt1_reactor_src_file),
@@ -87,7 +87,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
var args = std.ArrayList([]const u8).init(arena);
try addCCArgs(comp, arena, &args, false);
try addLibcBottomHalfIncludes(comp, arena, &args);
- return comp.build_crt_file("crt1-command", .Obj, &[1]Compilation.CSourceFile{
+ return comp.build_crt_file("crt1-command", .Obj, .@"wasi crt1-command.o", prog_node, &.{
.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", try sanitize(arena, crt1_command_src_file),
@@ -145,7 +145,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
}
}
- try comp.build_crt_file("c", .Lib, libc_sources.items);
+ try comp.build_crt_file("c", .Lib, .@"wasi libc.a", prog_node, libc_sources.items);
},
.libwasi_emulated_process_clocks_a => {
var args = std.ArrayList([]const u8).init(arena);
@@ -161,7 +161,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
.extra_flags = args.items,
});
}
- try comp.build_crt_file("wasi-emulated-process-clocks", .Lib, emu_clocks_sources.items);
+ try comp.build_crt_file("wasi-emulated-process-clocks", .Lib, .@"libwasi-emulated-process-clocks.a", prog_node, emu_clocks_sources.items);
},
.libwasi_emulated_getpid_a => {
var args = std.ArrayList([]const u8).init(arena);
@@ -177,7 +177,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
.extra_flags = args.items,
});
}
- try comp.build_crt_file("wasi-emulated-getpid", .Lib, emu_getpid_sources.items);
+ try comp.build_crt_file("wasi-emulated-getpid", .Lib, .@"libwasi-emulated-getpid.a", prog_node, emu_getpid_sources.items);
},
.libwasi_emulated_mman_a => {
var args = std.ArrayList([]const u8).init(arena);
@@ -193,7 +193,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
.extra_flags = args.items,
});
}
- try comp.build_crt_file("wasi-emulated-mman", .Lib, emu_mman_sources.items);
+ try comp.build_crt_file("wasi-emulated-mman", .Lib, .@"libwasi-emulated-mman.a", prog_node, emu_mman_sources.items);
},
.libwasi_emulated_signal_a => {
var emu_signal_sources = std.ArrayList(Compilation.CSourceFile).init(arena);
@@ -228,7 +228,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
}
}
- try comp.build_crt_file("wasi-emulated-signal", .Lib, emu_signal_sources.items);
+ try comp.build_crt_file("wasi-emulated-signal", .Lib, .@"libwasi-emulated-signal.a", prog_node, emu_signal_sources.items);
},
}
}
diff --git a/stage1/wasi.c b/stage1/wasi.c
index 911ce6e520..6c4ac48a50 100644
--- a/stage1/wasi.c
+++ b/stage1/wasi.c
@@ -497,8 +497,6 @@ uint32_t wasi_snapshot_preview1_fd_read(uint32_t fd, uint32_t iovs, uint32_t iov
size_t read_size = 0;
if (fds[fd].stream != NULL)
read_size = fread(&m[iovs_ptr[i].ptr], 1, iovs_ptr[i].len, fds[fd].stream);
- else
- panic("unimplemented");
size += read_size;
if (read_size < iovs_ptr[i].len) break;
}
diff --git a/stage1/zig.h b/stage1/zig.h
index 0756d9f731..59c3ddd695 100644
--- a/stage1/zig.h
+++ b/stage1/zig.h
@@ -1,8 +1,11 @@
#undef linux
+#ifndef __STDC_WANT_IEC_60559_TYPES_EXT__
#define __STDC_WANT_IEC_60559_TYPES_EXT__
+#endif
#include
#include
+#include
#include
#include
@@ -34,6 +37,14 @@ typedef char bool;
#define zig_has_attribute(attribute) 0
#endif
+#if __LITTLE_ENDIAN__ || _MSC_VER
+#define zig_little_endian 1
+#define zig_big_endian 0
+#else
+#define zig_little_endian 0
+#define zig_big_endian 1
+#endif
+
#if __STDC_VERSION__ >= 201112L
#define zig_threadlocal _Thread_local
#elif defined(__GNUC__)
@@ -75,6 +86,32 @@ typedef char bool;
#define zig_cold
#endif
+#if zig_has_attribute(flatten)
+#define zig_maybe_flatten __attribute__((flatten))
+#else
+#define zig_maybe_flatten
+#endif
+
+#if zig_has_attribute(noinline)
+#define zig_never_inline __attribute__((noinline)) zig_maybe_flatten
+#elif defined(_MSC_VER)
+#define zig_never_inline __declspec(noinline) zig_maybe_flatten
+#else
+#define zig_never_inline zig_never_inline_unavailable
+#endif
+
+#if zig_has_attribute(not_tail_called)
+#define zig_never_tail __attribute__((not_tail_called)) zig_never_inline
+#else
+#define zig_never_tail zig_never_tail_unavailable
+#endif
+
+#if zig_has_attribute(always_inline)
+#define zig_always_tail __attribute__((musttail))
+#else
+#define zig_always_tail zig_always_tail_unavailable
+#endif
+
#if __STDC_VERSION__ >= 199901L
#define zig_restrict restrict
#elif defined(__GNUC__)
@@ -151,16 +188,38 @@ typedef char bool;
#define zig_export(sig, symbol, name) __asm(name " = " symbol)
#endif
+#if zig_has_builtin(trap)
+#define zig_trap() __builtin_trap()
+#elif _MSC_VER && (_M_IX86 || _M_X64)
+#define zig_trap() __ud2()
+#elif _MSC_VER
+#define zig_trap() __fastfail(0)
+#elif defined(__i386__) || defined(__x86_64__)
+#define zig_trap() __asm__ volatile("ud2");
+#elif defined(__arm__) || defined(__aarch64__)
+#define zig_breakpoint() __asm__ volatile("udf #0");
+#else
+#include
+#define zig_trap() abort()
+#endif
+
#if zig_has_builtin(debugtrap)
#define zig_breakpoint() __builtin_debugtrap()
-#elif zig_has_builtin(trap) || defined(zig_gnuc)
-#define zig_breakpoint() __builtin_trap()
#elif defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__)
#define zig_breakpoint() __debugbreak()
#elif defined(__i386__) || defined(__x86_64__)
#define zig_breakpoint() __asm__ volatile("int $0x03");
+#elif defined(__arm__)
+#define zig_breakpoint() __asm__ volatile("bkpt #0");
+#elif defined(__aarch64__)
+#define zig_breakpoint() __asm__ volatile("brk #0");
#else
+#include
+#if defined(SIGTRAP)
#define zig_breakpoint() raise(SIGTRAP)
+#else
+#define zig_breakpoint() zig_breakpoint_unavailable
+#endif
#endif
#if zig_has_builtin(return_address) || defined(zig_gnuc)
@@ -286,701 +345,656 @@ typedef char bool;
#endif
#if __STDC_VERSION__ >= 201112L
-#define zig_noreturn _Noreturn void
+#define zig_noreturn _Noreturn
#elif zig_has_attribute(noreturn) || defined(zig_gnuc)
-#define zig_noreturn __attribute__((noreturn)) void
+#define zig_noreturn __attribute__((noreturn))
#elif _MSC_VER
-#define zig_noreturn __declspec(noreturn) void
+#define zig_noreturn __declspec(noreturn)
#else
-#define zig_noreturn void
+#define zig_noreturn
#endif
#define zig_bitSizeOf(T) (CHAR_BIT * sizeof(T))
-typedef uintptr_t zig_usize;
-typedef intptr_t zig_isize;
-typedef signed short int zig_c_short;
-typedef unsigned short int zig_c_ushort;
-typedef signed int zig_c_int;
-typedef unsigned int zig_c_uint;
-typedef signed long int zig_c_long;
-typedef unsigned long int zig_c_ulong;
-typedef signed long long int zig_c_longlong;
-typedef unsigned long long int zig_c_ulonglong;
+#define zig_compiler_rt_abbrev_uint32_t si
+#define zig_compiler_rt_abbrev_int32_t si
+#define zig_compiler_rt_abbrev_uint64_t di
+#define zig_compiler_rt_abbrev_int64_t di
+#define zig_compiler_rt_abbrev_zig_u128 ti
+#define zig_compiler_rt_abbrev_zig_i128 ti
+#define zig_compiler_rt_abbrev_zig_f16 hf
+#define zig_compiler_rt_abbrev_zig_f32 sf
+#define zig_compiler_rt_abbrev_zig_f64 df
+#define zig_compiler_rt_abbrev_zig_f80 xf
+#define zig_compiler_rt_abbrev_zig_f128 tf
-typedef uint8_t zig_u8;
-typedef int8_t zig_i8;
-typedef uint16_t zig_u16;
-typedef int16_t zig_i16;
-typedef uint32_t zig_u32;
-typedef int32_t zig_i32;
-typedef uint64_t zig_u64;
-typedef int64_t zig_i64;
+zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, size_t);
+zig_extern void *memset (void *, int, size_t);
-#define zig_as_u8(val) UINT8_C(val)
-#define zig_as_i8(val) INT8_C(val)
-#define zig_as_u16(val) UINT16_C(val)
-#define zig_as_i16(val) INT16_C(val)
-#define zig_as_u32(val) UINT32_C(val)
-#define zig_as_i32(val) INT32_C(val)
-#define zig_as_u64(val) UINT64_C(val)
-#define zig_as_i64(val) INT64_C(val)
+/* ===================== 8/16/32/64-bit Integer Support ===================== */
+
+#if __STDC_VERSION__ >= 199901L || _MSC_VER
+#include
+#else
+
+#if SCHAR_MIN == ~0x7F && SCHAR_MAX == 0x7F && UCHAR_MAX == 0xFF
+typedef unsigned char uint8_t;
+typedef signed char int8_t;
+#define INT8_C(c) c
+#define UINT8_C(c) c##U
+#elif SHRT_MIN == ~0x7F && SHRT_MAX == 0x7F && USHRT_MAX == 0xFF
+typedef unsigned short uint8_t;
+typedef signed short int8_t;
+#define INT8_C(c) c
+#define UINT8_C(c) c##U
+#elif INT_MIN == ~0x7F && INT_MAX == 0x7F && UINT_MAX == 0xFF
+typedef unsigned int uint8_t;
+typedef signed int int8_t;
+#define INT8_C(c) c
+#define UINT8_C(c) c##U
+#elif LONG_MIN == ~0x7F && LONG_MAX == 0x7F && ULONG_MAX == 0xFF
+typedef unsigned long uint8_t;
+typedef signed long int8_t;
+#define INT8_C(c) c##L
+#define UINT8_C(c) c##LU
+#elif LLONG_MIN == ~0x7F && LLONG_MAX == 0x7F && ULLONG_MAX == 0xFF
+typedef unsigned long long uint8_t;
+typedef signed long long int8_t;
+#define INT8_C(c) c##LL
+#define UINT8_C(c) c##LLU
+#endif
+#define INT8_MIN (~INT8_C(0x7F))
+#define INT8_MAX ( INT8_C(0x7F))
+#define UINT8_MAX ( INT8_C(0xFF))
+
+#if SCHAR_MIN == ~0x7FFF && SCHAR_MAX == 0x7FFF && UCHAR_MAX == 0xFFFF
+typedef unsigned char uint16_t;
+typedef signed char int16_t;
+#define INT16_C(c) c
+#define UINT16_C(c) c##U
+#elif SHRT_MIN == ~0x7FFF && SHRT_MAX == 0x7FFF && USHRT_MAX == 0xFFFF
+typedef unsigned short uint16_t;
+typedef signed short int16_t;
+#define INT16_C(c) c
+#define UINT16_C(c) c##U
+#elif INT_MIN == ~0x7FFF && INT_MAX == 0x7FFF && UINT_MAX == 0xFFFF
+typedef unsigned int uint16_t;
+typedef signed int int16_t;
+#define INT16_C(c) c
+#define UINT16_C(c) c##U
+#elif LONG_MIN == ~0x7FFF && LONG_MAX == 0x7FFF && ULONG_MAX == 0xFFFF
+typedef unsigned long uint16_t;
+typedef signed long int16_t;
+#define INT16_C(c) c##L
+#define UINT16_C(c) c##LU
+#elif LLONG_MIN == ~0x7FFF && LLONG_MAX == 0x7FFF && ULLONG_MAX == 0xFFFF
+typedef unsigned long long uint16_t;
+typedef signed long long int16_t;
+#define INT16_C(c) c##LL
+#define UINT16_C(c) c##LLU
+#endif
+#define INT16_MIN (~INT16_C(0x7FFF))
+#define INT16_MAX ( INT16_C(0x7FFF))
+#define UINT16_MAX ( INT16_C(0xFFFF))
+
+#if SCHAR_MIN == ~0x7FFFFFFF && SCHAR_MAX == 0x7FFFFFFF && UCHAR_MAX == 0xFFFFFFFF
+typedef unsigned char uint32_t;
+typedef signed char int32_t;
+#define INT32_C(c) c
+#define UINT32_C(c) c##U
+#elif SHRT_MIN == ~0x7FFFFFFF && SHRT_MAX == 0x7FFFFFFF && USHRT_MAX == 0xFFFFFFFF
+typedef unsigned short uint32_t;
+typedef signed short int32_t;
+#define INT32_C(c) c
+#define UINT32_C(c) c##U
+#elif INT_MIN == ~0x7FFFFFFF && INT_MAX == 0x7FFFFFFF && UINT_MAX == 0xFFFFFFFF
+typedef unsigned int uint32_t;
+typedef signed int int32_t;
+#define INT32_C(c) c
+#define UINT32_C(c) c##U
+#elif LONG_MIN == ~0x7FFFFFFF && LONG_MAX == 0x7FFFFFFF && ULONG_MAX == 0xFFFFFFFF
+typedef unsigned long uint32_t;
+typedef signed long int32_t;
+#define INT32_C(c) c##L
+#define UINT32_C(c) c##LU
+#elif LLONG_MIN == ~0x7FFFFFFF && LLONG_MAX == 0x7FFFFFFF && ULLONG_MAX == 0xFFFFFFFF
+typedef unsigned long long uint32_t;
+typedef signed long long int32_t;
+#define INT32_C(c) c##LL
+#define UINT32_C(c) c##LLU
+#endif
+#define INT32_MIN (~INT32_C(0x7FFFFFFF))
+#define INT32_MAX ( INT32_C(0x7FFFFFFF))
+#define UINT32_MAX ( INT32_C(0xFFFFFFFF))
+
+#if SCHAR_MIN == ~0x7FFFFFFFFFFFFFFF && SCHAR_MAX == 0x7FFFFFFFFFFFFFFF && UCHAR_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned char uint64_t;
+typedef signed char int64_t;
+#define INT64_C(c) c
+#define UINT64_C(c) c##U
+#elif SHRT_MIN == ~0x7FFFFFFFFFFFFFFF && SHRT_MAX == 0x7FFFFFFFFFFFFFFF && USHRT_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned short uint64_t;
+typedef signed short int64_t;
+#define INT64_C(c) c
+#define UINT64_C(c) c##U
+#elif INT_MIN == ~0x7FFFFFFFFFFFFFFF && INT_MAX == 0x7FFFFFFFFFFFFFFF && UINT_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned int uint64_t;
+typedef signed int int64_t;
+#define INT64_C(c) c
+#define UINT64_C(c) c##U
+#elif LONG_MIN == ~0x7FFFFFFFFFFFFFFF && LONG_MAX == 0x7FFFFFFFFFFFFFFF && ULONG_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned long uint64_t;
+typedef signed long int64_t;
+#define INT64_C(c) c##L
+#define UINT64_C(c) c##LU
+#elif LLONG_MIN == ~0x7FFFFFFFFFFFFFFF && LLONG_MAX == 0x7FFFFFFFFFFFFFFF && ULLONG_MAX == 0xFFFFFFFFFFFFFFFF
+typedef unsigned long long uint64_t;
+typedef signed long long int64_t;
+#define INT64_C(c) c##LL
+#define UINT64_C(c) c##LLU
+#endif
+#define INT64_MIN (~INT64_C(0x7FFFFFFFFFFFFFFF))
+#define INT64_MAX ( INT64_C(0x7FFFFFFFFFFFFFFF))
+#define UINT64_MAX ( INT64_C(0xFFFFFFFFFFFFFFFF))
+
+typedef size_t uintptr_t;
+typedef ptrdiff_t intptr_t;
+
+#endif
-#define zig_minInt_u8 zig_as_u8(0)
-#define zig_maxInt_u8 UINT8_MAX
#define zig_minInt_i8 INT8_MIN
#define zig_maxInt_i8 INT8_MAX
-#define zig_minInt_u16 zig_as_u16(0)
-#define zig_maxInt_u16 UINT16_MAX
+#define zig_minInt_u8 UINT8_C(0)
+#define zig_maxInt_u8 UINT8_MAX
#define zig_minInt_i16 INT16_MIN
#define zig_maxInt_i16 INT16_MAX
-#define zig_minInt_u32 zig_as_u32(0)
-#define zig_maxInt_u32 UINT32_MAX
+#define zig_minInt_u16 UINT16_C(0)
+#define zig_maxInt_u16 UINT16_MAX
#define zig_minInt_i32 INT32_MIN
#define zig_maxInt_i32 INT32_MAX
-#define zig_minInt_u64 zig_as_u64(0)
-#define zig_maxInt_u64 UINT64_MAX
+#define zig_minInt_u32 UINT32_C(0)
+#define zig_maxInt_u32 UINT32_MAX
#define zig_minInt_i64 INT64_MIN
#define zig_maxInt_i64 INT64_MAX
+#define zig_minInt_u64 UINT64_C(0)
+#define zig_maxInt_u64 UINT64_MAX
-#define zig_compiler_rt_abbrev_u32 si
-#define zig_compiler_rt_abbrev_i32 si
-#define zig_compiler_rt_abbrev_u64 di
-#define zig_compiler_rt_abbrev_i64 di
-#define zig_compiler_rt_abbrev_u128 ti
-#define zig_compiler_rt_abbrev_i128 ti
-#define zig_compiler_rt_abbrev_f16 hf
-#define zig_compiler_rt_abbrev_f32 sf
-#define zig_compiler_rt_abbrev_f64 df
-#define zig_compiler_rt_abbrev_f80 xf
-#define zig_compiler_rt_abbrev_f128 tf
-
-zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, zig_usize);
-zig_extern void *memset (void *, int, zig_usize);
-
-/* ==================== 8/16/32/64-bit Integer Routines ===================== */
-
-#define zig_maxInt(Type, bits) zig_shr_##Type(zig_maxInt_##Type, (zig_bitSizeOf(zig_##Type) - bits))
-#define zig_expand_maxInt(Type, bits) zig_maxInt(Type, bits)
-#define zig_minInt(Type, bits) zig_not_##Type(zig_maxInt(Type, bits), bits)
-#define zig_expand_minInt(Type, bits) zig_minInt(Type, bits)
+#define zig_intLimit(s, w, limit, bits) zig_shr_##s##w(zig_##limit##Int_##s##w, w - (bits))
+#define zig_minInt_i(w, bits) zig_intLimit(i, w, min, bits)
+#define zig_maxInt_i(w, bits) zig_intLimit(i, w, max, bits)
+#define zig_minInt_u(w, bits) zig_intLimit(u, w, min, bits)
+#define zig_maxInt_u(w, bits) zig_intLimit(u, w, max, bits)
#define zig_int_operator(Type, RhsType, operation, operator) \
- static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##RhsType rhs) { \
+ static inline Type zig_##operation(Type lhs, RhsType rhs) { \
return lhs operator rhs; \
}
#define zig_int_basic_operator(Type, operation, operator) \
- zig_int_operator(Type, Type, operation, operator)
+ zig_int_operator(Type, Type, operation, operator)
#define zig_int_shift_operator(Type, operation, operator) \
- zig_int_operator(Type, u8, operation, operator)
+ zig_int_operator(Type, uint8_t, operation, operator)
#define zig_int_helpers(w) \
- zig_int_basic_operator(u##w, and, &) \
- zig_int_basic_operator(i##w, and, &) \
- zig_int_basic_operator(u##w, or, |) \
- zig_int_basic_operator(i##w, or, |) \
- zig_int_basic_operator(u##w, xor, ^) \
- zig_int_basic_operator(i##w, xor, ^) \
- zig_int_shift_operator(u##w, shl, <<) \
- zig_int_shift_operator(i##w, shl, <<) \
- zig_int_shift_operator(u##w, shr, >>) \
+ zig_int_basic_operator(uint##w##_t, and_u##w, &) \
+ zig_int_basic_operator( int##w##_t, and_i##w, &) \
+ zig_int_basic_operator(uint##w##_t, or_u##w, |) \
+ zig_int_basic_operator( int##w##_t, or_i##w, |) \
+ zig_int_basic_operator(uint##w##_t, xor_u##w, ^) \
+ zig_int_basic_operator( int##w##_t, xor_i##w, ^) \
+ zig_int_shift_operator(uint##w##_t, shl_u##w, <<) \
+ zig_int_shift_operator( int##w##_t, shl_i##w, <<) \
+ zig_int_shift_operator(uint##w##_t, shr_u##w, >>) \
\
- static inline zig_i##w zig_shr_i##w(zig_i##w lhs, zig_u8 rhs) { \
- zig_i##w sign_mask = lhs < zig_as_i##w(0) ? -zig_as_i##w(1) : zig_as_i##w(0); \
+ static inline int##w##_t zig_shr_i##w(int##w##_t lhs, uint8_t rhs) { \
+ int##w##_t sign_mask = lhs < INT##w##_C(0) ? -INT##w##_C(1) : INT##w##_C(0); \
return ((lhs ^ sign_mask) >> rhs) ^ sign_mask; \
} \
\
- static inline zig_u##w zig_not_u##w(zig_u##w val, zig_u8 bits) { \
- return val ^ zig_maxInt(u##w, bits); \
+ static inline uint##w##_t zig_not_u##w(uint##w##_t val, uint8_t bits) { \
+ return val ^ zig_maxInt_u(w, bits); \
} \
\
- static inline zig_i##w zig_not_i##w(zig_i##w val, zig_u8 bits) { \
+ static inline int##w##_t zig_not_i##w(int##w##_t val, uint8_t bits) { \
(void)bits; \
return ~val; \
} \
\
- static inline zig_u##w zig_wrap_u##w(zig_u##w val, zig_u8 bits) { \
- return val & zig_maxInt(u##w, bits); \
+ static inline uint##w##_t zig_wrap_u##w(uint##w##_t val, uint8_t bits) { \
+ return val & zig_maxInt_u(w, bits); \
} \
\
- static inline zig_i##w zig_wrap_i##w(zig_i##w val, zig_u8 bits) { \
- return (val & zig_as_u##w(1) << (bits - zig_as_u8(1))) != 0 \
- ? val | zig_minInt(i##w, bits) : val & zig_maxInt(i##w, bits); \
+ static inline int##w##_t zig_wrap_i##w(int##w##_t val, uint8_t bits) { \
+ return (val & UINT##w##_C(1) << (bits - UINT8_C(1))) != 0 \
+ ? val | zig_minInt_i(w, bits) : val & zig_maxInt_i(w, bits); \
} \
\
- zig_int_basic_operator(u##w, div_floor, /) \
+ zig_int_basic_operator(uint##w##_t, div_floor_u##w, /) \
\
- static inline zig_i##w zig_div_floor_i##w(zig_i##w lhs, zig_i##w rhs) { \
- return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < zig_as_i##w(0)); \
+ static inline int##w##_t zig_div_floor_i##w(int##w##_t lhs, int##w##_t rhs) { \
+ return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < INT##w##_C(0)); \
} \
\
- zig_int_basic_operator(u##w, mod, %) \
+ zig_int_basic_operator(uint##w##_t, mod_u##w, %) \
\
- static inline zig_i##w zig_mod_i##w(zig_i##w lhs, zig_i##w rhs) { \
- zig_i##w rem = lhs % rhs; \
- return rem + (((lhs ^ rhs) & rem) < zig_as_i##w(0) ? rhs : zig_as_i##w(0)); \
+ static inline int##w##_t zig_mod_i##w(int##w##_t lhs, int##w##_t rhs) { \
+ int##w##_t rem = lhs % rhs; \
+ return rem + (((lhs ^ rhs) & rem) < INT##w##_C(0) ? rhs : INT##w##_C(0)); \
} \
\
- static inline zig_u##w zig_shlw_u##w(zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ static inline uint##w##_t zig_shlw_u##w(uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \
return zig_wrap_u##w(zig_shl_u##w(lhs, rhs), bits); \
} \
\
- static inline zig_i##w zig_shlw_i##w(zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \
- return zig_wrap_i##w((zig_i##w)zig_shl_u##w((zig_u##w)lhs, (zig_u##w)rhs), bits); \
+ static inline int##w##_t zig_shlw_i##w(int##w##_t lhs, uint8_t rhs, uint8_t bits) { \
+ return zig_wrap_i##w((int##w##_t)zig_shl_u##w((uint##w##_t)lhs, (uint##w##_t)rhs), bits); \
} \
\
- static inline zig_u##w zig_addw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ static inline uint##w##_t zig_addw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
return zig_wrap_u##w(lhs + rhs, bits); \
} \
\
- static inline zig_i##w zig_addw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs + (zig_u##w)rhs), bits); \
+ static inline int##w##_t zig_addw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs + (uint##w##_t)rhs), bits); \
} \
\
- static inline zig_u##w zig_subw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ static inline uint##w##_t zig_subw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
return zig_wrap_u##w(lhs - rhs, bits); \
} \
\
- static inline zig_i##w zig_subw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs - (zig_u##w)rhs), bits); \
+ static inline int##w##_t zig_subw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs - (uint##w##_t)rhs), bits); \
} \
\
- static inline zig_u##w zig_mulw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ static inline uint##w##_t zig_mulw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
return zig_wrap_u##w(lhs * rhs, bits); \
} \
\
- static inline zig_i##w zig_mulw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs * (zig_u##w)rhs), bits); \
+ static inline int##w##_t zig_mulw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs * (uint##w##_t)rhs), bits); \
}
zig_int_helpers(8)
zig_int_helpers(16)
zig_int_helpers(32)
zig_int_helpers(64)
-static inline bool zig_addo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+static inline bool zig_addo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
- return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
+ return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits);
#else
*res = zig_addw_u32(lhs, rhs, bits);
return *res < lhs;
#endif
}
-static inline void zig_vaddo_u32(zig_u8 *ov, zig_u32 *res, int n,
- const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i32 __addosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
-static inline bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+zig_extern int32_t __addosi4(int32_t lhs, int32_t rhs, int *overflow);
+static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i32 full_res = __addosi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int32_t full_res = __addosi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i32(full_res, bits);
- return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
+ return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vaddo_i32(zig_u8 *ov, zig_i32 *res, int n,
- const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_u64 full_res;
+ uint64_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
- return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
+ return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits);
#else
*res = zig_addw_u64(lhs, rhs, bits);
return *res < lhs;
#endif
}
-static inline void zig_vaddo_u64(zig_u8 *ov, zig_u64 *res, int n,
- const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i64 __addodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
-static inline bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+zig_extern int64_t __addodi4(int64_t lhs, int64_t rhs, int *overflow);
+static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_i64 full_res;
+ int64_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i64 full_res = __addodi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int64_t full_res = __addodi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i64(full_res, bits);
- return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
+ return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vaddo_i64(zig_u8 *ov, zig_i64 *res, int n,
- const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_addo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_u8 full_res;
+ uint8_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
- return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
+ return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u8)full_res;
+ *res = (uint8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vaddo_u8(zig_u8 *ov, zig_u8 *res, int n,
- const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+static inline bool zig_addo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_i8 full_res;
+ int8_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
- return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
+ return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i8)full_res;
+ *res = (int8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vaddo_i8(zig_u8 *ov, zig_i8 *res, int n,
- const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+static inline bool zig_addo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_u16 full_res;
+ uint16_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
- return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
+ return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u16)full_res;
+ *res = (uint16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vaddo_u16(zig_u8 *ov, zig_u16 *res, int n,
- const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+static inline bool zig_addo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
- zig_i16 full_res;
+ int16_t full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
- return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
+ return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i16)full_res;
+ *res = (int16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vaddo_i16(zig_u8 *ov, zig_i16 *res, int n,
- const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_addo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
- return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
+ return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits);
#else
*res = zig_subw_u32(lhs, rhs, bits);
return *res > lhs;
#endif
}
-static inline void zig_vsubo_u32(zig_u8 *ov, zig_u32 *res, int n,
- const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i32 __subosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
-static inline bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+zig_extern int32_t __subosi4(int32_t lhs, int32_t rhs, int *overflow);
+static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i32 full_res = __subosi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int32_t full_res = __subosi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i32(full_res, bits);
- return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
+ return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vsubo_i32(zig_u8 *ov, zig_i32 *res, int n,
- const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_u64 full_res;
+ uint64_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
- return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
+ return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits);
#else
*res = zig_subw_u64(lhs, rhs, bits);
return *res > lhs;
#endif
}
-static inline void zig_vsubo_u64(zig_u8 *ov, zig_u64 *res, int n,
- const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i64 __subodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
-static inline bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+zig_extern int64_t __subodi4(int64_t lhs, int64_t rhs, int *overflow);
+static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_i64 full_res;
+ int64_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i64 full_res = __subodi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int64_t full_res = __subodi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i64(full_res, bits);
- return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
+ return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vsubo_i64(zig_u8 *ov, zig_i64 *res, int n,
- const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_subo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_u8 full_res;
+ uint8_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
- return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
+ return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u8)full_res;
+ *res = (uint8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vsubo_u8(zig_u8 *ov, zig_u8 *res, int n,
- const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+static inline bool zig_subo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_i8 full_res;
+ int8_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
- return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
+ return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i8)full_res;
+ *res = (int8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vsubo_i8(zig_u8 *ov, zig_i8 *res, int n,
- const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
-
-static inline bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+static inline bool zig_subo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_u16 full_res;
+ uint16_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
- return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
+ return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u16)full_res;
+ *res = (uint16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vsubo_u16(zig_u8 *ov, zig_u16 *res, int n,
- const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
-
-static inline bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+static inline bool zig_subo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
- zig_i16 full_res;
+ int16_t full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
- return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
+ return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i16)full_res;
+ *res = (int16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vsubo_i16(zig_u8 *ov, zig_i16 *res, int n,
- const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_subo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
- return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
+ return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits);
#else
*res = zig_mulw_u32(lhs, rhs, bits);
- return rhs != zig_as_u32(0) && lhs > zig_maxInt(u32, bits) / rhs;
+ return rhs != UINT32_C(0) && lhs > zig_maxInt_u(32, bits) / rhs;
#endif
}
-static inline void zig_vmulo_u32(zig_u8 *ov, zig_u32 *res, int n,
- const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u32(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i32 __mulosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
-static inline bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+zig_extern int32_t __mulosi4(int32_t lhs, int32_t rhs, int *overflow);
+static inline bool zig_mulo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i32 full_res = __mulosi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int32_t full_res = __mulosi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i32(full_res, bits);
- return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
+ return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
-static inline void zig_vmulo_i32(zig_u8 *ov, zig_i32 *res, int n,
- const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i32(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_u64 full_res;
+ uint64_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
- return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
+ return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits);
#else
*res = zig_mulw_u64(lhs, rhs, bits);
- return rhs != zig_as_u64(0) && lhs > zig_maxInt(u64, bits) / rhs;
+ return rhs != UINT64_C(0) && lhs > zig_maxInt_u(64, bits) / rhs;
#endif
}
-static inline void zig_vmulo_u64(zig_u8 *ov, zig_u64 *res, int n,
- const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u64(&res[i], lhs[i], rhs[i], bits);
-}
-
-zig_extern zig_i64 __mulodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
-static inline bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+zig_extern int64_t __mulodi4(int64_t lhs, int64_t rhs, int *overflow);
+static inline bool zig_mulo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_i64 full_res;
+ int64_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
- zig_i64 full_res = __mulodi4(lhs, rhs, &overflow_int);
+ int overflow_int;
+ int64_t full_res = __mulodi4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i64(full_res, bits);
- return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
+ return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
-static inline void zig_vmulo_i64(zig_u8 *ov, zig_i64 *res, int n,
- const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i64(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_u8 full_res;
+ uint8_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
- return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
+ return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u8)full_res;
+ *res = (uint8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vmulo_u8(zig_u8 *ov, zig_u8 *res, int n,
- const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+static inline bool zig_mulo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_i8 full_res;
+ int8_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
- return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
+ return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i8)full_res;
+ *res = (int8_t)full_res;
return overflow;
#endif
}
-static inline void zig_vmulo_i8(zig_u8 *ov, zig_i8 *res, int n,
- const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i8(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_u16 full_res;
+ uint16_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
- return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
+ return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits);
#else
- zig_u32 full_res;
+ uint32_t full_res;
bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits);
- *res = (zig_u16)full_res;
+ *res = (uint16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vmulo_u16(zig_u8 *ov, zig_u16 *res, int n,
- const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u16(&res[i], lhs[i], rhs[i], bits);
-}
-
-static inline bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
- zig_i16 full_res;
+ int16_t full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
- return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
+ return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits);
#else
- zig_i32 full_res;
+ int32_t full_res;
bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits);
- *res = (zig_i16)full_res;
+ *res = (int16_t)full_res;
return overflow;
#endif
}
-static inline void zig_vmulo_i16(zig_u8 *ov, zig_i16 *res, int n,
- const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits)
-{
- for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i16(&res[i], lhs[i], rhs[i], bits);
-}
-
#define zig_int_builtins(w) \
- static inline bool zig_shlo_u##w(zig_u##w *res, zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ static inline bool zig_shlo_u##w(uint##w##_t *res, uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \
*res = zig_shlw_u##w(lhs, rhs, bits); \
- return lhs > zig_maxInt(u##w, bits) >> rhs; \
+ return lhs > zig_maxInt_u(w, bits) >> rhs; \
} \
\
- static inline bool zig_shlo_i##w(zig_i##w *res, zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ static inline bool zig_shlo_i##w(int##w##_t *res, int##w##_t lhs, uint8_t rhs, uint8_t bits) { \
*res = zig_shlw_i##w(lhs, rhs, bits); \
- zig_i##w mask = (zig_i##w)(zig_maxInt_u##w << (bits - rhs - 1)); \
- return (lhs & mask) != zig_as_i##w(0) && (lhs & mask) != mask; \
+ int##w##_t mask = (int##w##_t)(UINT##w##_MAX << (bits - rhs - 1)); \
+ return (lhs & mask) != INT##w##_C(0) && (lhs & mask) != mask; \
} \
\
- static inline zig_u##w zig_shls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
- zig_u##w res; \
- if (rhs >= bits) return lhs != zig_as_u##w(0) ? zig_maxInt(u##w, bits) : lhs; \
- return zig_shlo_u##w(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u##w, bits) : res; \
+ static inline uint##w##_t zig_shls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
+ uint##w##_t res; \
+ if (rhs >= bits) return lhs != UINT##w##_C(0) ? zig_maxInt_u(w, bits) : lhs; \
+ return zig_shlo_u##w(&res, lhs, (uint8_t)rhs, bits) ? zig_maxInt_u(w, bits) : res; \
} \
\
- static inline zig_i##w zig_shls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- zig_i##w res; \
- if ((zig_u##w)rhs < (zig_u##w)bits && !zig_shlo_i##w(&res, lhs, rhs, bits)) return res; \
- return lhs < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ static inline int##w##_t zig_shls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ int##w##_t res; \
+ if ((uint##w##_t)rhs < (uint##w##_t)bits && !zig_shlo_i##w(&res, lhs, (uint8_t)rhs, bits)) return res; \
+ return lhs < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
} \
\
- static inline zig_u##w zig_adds_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
- zig_u##w res; \
- return zig_addo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \
+ static inline uint##w##_t zig_adds_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
+ uint##w##_t res; \
+ return zig_addo_u##w(&res, lhs, rhs, bits) ? zig_maxInt_u(w, bits) : res; \
} \
\
- static inline zig_i##w zig_adds_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- zig_i##w res; \
+ static inline int##w##_t zig_adds_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ int##w##_t res; \
if (!zig_addo_i##w(&res, lhs, rhs, bits)) return res; \
- return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ return res >= INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
} \
\
- static inline zig_u##w zig_subs_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
- zig_u##w res; \
- return zig_subo_u##w(&res, lhs, rhs, bits) ? zig_minInt(u##w, bits) : res; \
+ static inline uint##w##_t zig_subs_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
+ uint##w##_t res; \
+ return zig_subo_u##w(&res, lhs, rhs, bits) ? zig_minInt_u(w, bits) : res; \
} \
\
- static inline zig_i##w zig_subs_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- zig_i##w res; \
+ static inline int##w##_t zig_subs_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ int##w##_t res; \
if (!zig_subo_i##w(&res, lhs, rhs, bits)) return res; \
- return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ return res >= INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
} \
\
- static inline zig_u##w zig_muls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
- zig_u##w res; \
- return zig_mulo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \
+ static inline uint##w##_t zig_muls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
+ uint##w##_t res; \
+ return zig_mulo_u##w(&res, lhs, rhs, bits) ? zig_maxInt_u(w, bits) : res; \
} \
\
- static inline zig_i##w zig_muls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
- zig_i##w res; \
+ static inline int##w##_t zig_muls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
+ int##w##_t res; \
if (!zig_mulo_i##w(&res, lhs, rhs, bits)) return res; \
- return (lhs ^ rhs) < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ return (lhs ^ rhs) < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
}
zig_int_builtins(8)
zig_int_builtins(16)
@@ -988,89 +1002,89 @@ zig_int_builtins(32)
zig_int_builtins(64)
#define zig_builtin8(name, val) __builtin_##name(val)
-typedef zig_c_uint zig_Builtin8;
+typedef unsigned int zig_Builtin8;
#define zig_builtin16(name, val) __builtin_##name(val)
-typedef zig_c_uint zig_Builtin16;
+typedef unsigned int zig_Builtin16;
#if INT_MIN <= INT32_MIN
#define zig_builtin32(name, val) __builtin_##name(val)
-typedef zig_c_uint zig_Builtin32;
+typedef unsigned int zig_Builtin32;
#elif LONG_MIN <= INT32_MIN
#define zig_builtin32(name, val) __builtin_##name##l(val)
-typedef zig_c_ulong zig_Builtin32;
+typedef unsigned long zig_Builtin32;
#endif
#if INT_MIN <= INT64_MIN
#define zig_builtin64(name, val) __builtin_##name(val)
-typedef zig_c_uint zig_Builtin64;
+typedef unsigned int zig_Builtin64;
#elif LONG_MIN <= INT64_MIN
#define zig_builtin64(name, val) __builtin_##name##l(val)
-typedef zig_c_ulong zig_Builtin64;
+typedef unsigned long zig_Builtin64;
#elif LLONG_MIN <= INT64_MIN
#define zig_builtin64(name, val) __builtin_##name##ll(val)
-typedef zig_c_ulonglong zig_Builtin64;
+typedef unsigned long long zig_Builtin64;
#endif
-static inline zig_u8 zig_byte_swap_u8(zig_u8 val, zig_u8 bits) {
+static inline uint8_t zig_byte_swap_u8(uint8_t val, uint8_t bits) {
return zig_wrap_u8(val >> (8 - bits), bits);
}
-static inline zig_i8 zig_byte_swap_i8(zig_i8 val, zig_u8 bits) {
- return zig_wrap_i8((zig_i8)zig_byte_swap_u8((zig_u8)val, bits), bits);
+static inline int8_t zig_byte_swap_i8(int8_t val, uint8_t bits) {
+ return zig_wrap_i8((int8_t)zig_byte_swap_u8((uint8_t)val, bits), bits);
}
-static inline zig_u16 zig_byte_swap_u16(zig_u16 val, zig_u8 bits) {
- zig_u16 full_res;
+static inline uint16_t zig_byte_swap_u16(uint16_t val, uint8_t bits) {
+ uint16_t full_res;
#if zig_has_builtin(bswap16) || defined(zig_gnuc)
full_res = __builtin_bswap16(val);
#else
- full_res = (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 0), 8) << 8 |
- (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 8), 8) >> 0;
+ full_res = (uint16_t)zig_byte_swap_u8((uint8_t)(val >> 0), 8) << 8 |
+ (uint16_t)zig_byte_swap_u8((uint8_t)(val >> 8), 8) >> 0;
#endif
return zig_wrap_u16(full_res >> (16 - bits), bits);
}
-static inline zig_i16 zig_byte_swap_i16(zig_i16 val, zig_u8 bits) {
- return zig_wrap_i16((zig_i16)zig_byte_swap_u16((zig_u16)val, bits), bits);
+static inline int16_t zig_byte_swap_i16(int16_t val, uint8_t bits) {
+ return zig_wrap_i16((int16_t)zig_byte_swap_u16((uint16_t)val, bits), bits);
}
-static inline zig_u32 zig_byte_swap_u32(zig_u32 val, zig_u8 bits) {
- zig_u32 full_res;
+static inline uint32_t zig_byte_swap_u32(uint32_t val, uint8_t bits) {
+ uint32_t full_res;
#if zig_has_builtin(bswap32) || defined(zig_gnuc)
full_res = __builtin_bswap32(val);
#else
- full_res = (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 0), 16) << 16 |
- (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 16), 16) >> 0;
+ full_res = (uint32_t)zig_byte_swap_u16((uint16_t)(val >> 0), 16) << 16 |
+ (uint32_t)zig_byte_swap_u16((uint16_t)(val >> 16), 16) >> 0;
#endif
return zig_wrap_u32(full_res >> (32 - bits), bits);
}
-static inline zig_i32 zig_byte_swap_i32(zig_i32 val, zig_u8 bits) {
- return zig_wrap_i32((zig_i32)zig_byte_swap_u32((zig_u32)val, bits), bits);
+static inline int32_t zig_byte_swap_i32(int32_t val, uint8_t bits) {
+ return zig_wrap_i32((int32_t)zig_byte_swap_u32((uint32_t)val, bits), bits);
}
-static inline zig_u64 zig_byte_swap_u64(zig_u64 val, zig_u8 bits) {
- zig_u64 full_res;
+static inline uint64_t zig_byte_swap_u64(uint64_t val, uint8_t bits) {
+ uint64_t full_res;
#if zig_has_builtin(bswap64) || defined(zig_gnuc)
full_res = __builtin_bswap64(val);
#else
- full_res = (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 0), 32) << 32 |
- (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 32), 32) >> 0;
+ full_res = (uint64_t)zig_byte_swap_u32((uint32_t)(val >> 0), 32) << 32 |
+ (uint64_t)zig_byte_swap_u32((uint32_t)(val >> 32), 32) >> 0;
#endif
return zig_wrap_u64(full_res >> (64 - bits), bits);
}
-static inline zig_i64 zig_byte_swap_i64(zig_i64 val, zig_u8 bits) {
- return zig_wrap_i64((zig_i64)zig_byte_swap_u64((zig_u64)val, bits), bits);
+static inline int64_t zig_byte_swap_i64(int64_t val, uint8_t bits) {
+ return zig_wrap_i64((int64_t)zig_byte_swap_u64((uint64_t)val, bits), bits);
}
-static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) {
- zig_u8 full_res;
+static inline uint8_t zig_bit_reverse_u8(uint8_t val, uint8_t bits) {
+ uint8_t full_res;
#if zig_has_builtin(bitreverse8)
full_res = __builtin_bitreverse8(val);
#else
- static zig_u8 const lut[0x10] = {
+ static uint8_t const lut[0x10] = {
0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe,
0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf
};
@@ -1079,62 +1093,62 @@ static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) {
return zig_wrap_u8(full_res >> (8 - bits), bits);
}
-static inline zig_i8 zig_bit_reverse_i8(zig_i8 val, zig_u8 bits) {
- return zig_wrap_i8((zig_i8)zig_bit_reverse_u8((zig_u8)val, bits), bits);
+static inline int8_t zig_bit_reverse_i8(int8_t val, uint8_t bits) {
+ return zig_wrap_i8((int8_t)zig_bit_reverse_u8((uint8_t)val, bits), bits);
}
-static inline zig_u16 zig_bit_reverse_u16(zig_u16 val, zig_u8 bits) {
- zig_u16 full_res;
+static inline uint16_t zig_bit_reverse_u16(uint16_t val, uint8_t bits) {
+ uint16_t full_res;
#if zig_has_builtin(bitreverse16)
full_res = __builtin_bitreverse16(val);
#else
- full_res = (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 0), 8) << 8 |
- (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 8), 8) >> 0;
+ full_res = (uint16_t)zig_bit_reverse_u8((uint8_t)(val >> 0), 8) << 8 |
+ (uint16_t)zig_bit_reverse_u8((uint8_t)(val >> 8), 8) >> 0;
#endif
return zig_wrap_u16(full_res >> (16 - bits), bits);
}
-static inline zig_i16 zig_bit_reverse_i16(zig_i16 val, zig_u8 bits) {
- return zig_wrap_i16((zig_i16)zig_bit_reverse_u16((zig_u16)val, bits), bits);
+static inline int16_t zig_bit_reverse_i16(int16_t val, uint8_t bits) {
+ return zig_wrap_i16((int16_t)zig_bit_reverse_u16((uint16_t)val, bits), bits);
}
-static inline zig_u32 zig_bit_reverse_u32(zig_u32 val, zig_u8 bits) {
- zig_u32 full_res;
+static inline uint32_t zig_bit_reverse_u32(uint32_t val, uint8_t bits) {
+ uint32_t full_res;
#if zig_has_builtin(bitreverse32)
full_res = __builtin_bitreverse32(val);
#else
- full_res = (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 0), 16) << 16 |
- (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 16), 16) >> 0;
+ full_res = (uint32_t)zig_bit_reverse_u16((uint16_t)(val >> 0), 16) << 16 |
+ (uint32_t)zig_bit_reverse_u16((uint16_t)(val >> 16), 16) >> 0;
#endif
return zig_wrap_u32(full_res >> (32 - bits), bits);
}
-static inline zig_i32 zig_bit_reverse_i32(zig_i32 val, zig_u8 bits) {
- return zig_wrap_i32((zig_i32)zig_bit_reverse_u32((zig_u32)val, bits), bits);
+static inline int32_t zig_bit_reverse_i32(int32_t val, uint8_t bits) {
+ return zig_wrap_i32((int32_t)zig_bit_reverse_u32((uint32_t)val, bits), bits);
}
-static inline zig_u64 zig_bit_reverse_u64(zig_u64 val, zig_u8 bits) {
- zig_u64 full_res;
+static inline uint64_t zig_bit_reverse_u64(uint64_t val, uint8_t bits) {
+ uint64_t full_res;
#if zig_has_builtin(bitreverse64)
full_res = __builtin_bitreverse64(val);
#else
- full_res = (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 0), 32) << 32 |
- (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 32), 32) >> 0;
+ full_res = (uint64_t)zig_bit_reverse_u32((uint32_t)(val >> 0), 32) << 32 |
+ (uint64_t)zig_bit_reverse_u32((uint32_t)(val >> 32), 32) >> 0;
#endif
return zig_wrap_u64(full_res >> (64 - bits), bits);
}
-static inline zig_i64 zig_bit_reverse_i64(zig_i64 val, zig_u8 bits) {
- return zig_wrap_i64((zig_i64)zig_bit_reverse_u64((zig_u64)val, bits), bits);
+static inline int64_t zig_bit_reverse_i64(int64_t val, uint8_t bits) {
+ return zig_wrap_i64((int64_t)zig_bit_reverse_u64((uint64_t)val, bits), bits);
}
#define zig_builtin_popcount_common(w) \
- static inline zig_u8 zig_popcount_i##w(zig_i##w val, zig_u8 bits) { \
- return zig_popcount_u##w((zig_u##w)val, bits); \
+ static inline uint8_t zig_popcount_i##w(int##w##_t val, uint8_t bits) { \
+ return zig_popcount_u##w((uint##w##_t)val, bits); \
}
#if zig_has_builtin(popcount) || defined(zig_gnuc)
#define zig_builtin_popcount(w) \
- static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_popcount_u##w(uint##w##_t val, uint8_t bits) { \
(void)bits; \
return zig_builtin##w(popcount, val); \
} \
@@ -1142,12 +1156,12 @@ static inline zig_i64 zig_bit_reverse_i64(zig_i64 val, zig_u8 bits) {
zig_builtin_popcount_common(w)
#else
#define zig_builtin_popcount(w) \
- static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_popcount_u##w(uint##w##_t val, uint8_t bits) { \
(void)bits; \
- zig_u##w temp = val - ((val >> 1) & (zig_maxInt_u##w / 3)); \
- temp = (temp & (zig_maxInt_u##w / 5)) + ((temp >> 2) & (zig_maxInt_u##w / 5)); \
- temp = (temp + (temp >> 4)) & (zig_maxInt_u##w / 17); \
- return temp * (zig_maxInt_u##w / 255) >> (w - 8); \
+ uint##w##_t temp = val - ((val >> 1) & (UINT##w##_MAX / 3)); \
+ temp = (temp & (UINT##w##_MAX / 5)) + ((temp >> 2) & (UINT##w##_MAX / 5)); \
+ temp = (temp + (temp >> 4)) & (UINT##w##_MAX / 17); \
+ return temp * (UINT##w##_MAX / 255) >> (w - 8); \
} \
\
zig_builtin_popcount_common(w)
@@ -1158,12 +1172,12 @@ zig_builtin_popcount(32)
zig_builtin_popcount(64)
#define zig_builtin_ctz_common(w) \
- static inline zig_u8 zig_ctz_i##w(zig_i##w val, zig_u8 bits) { \
- return zig_ctz_u##w((zig_u##w)val, bits); \
+ static inline uint8_t zig_ctz_i##w(int##w##_t val, uint8_t bits) { \
+ return zig_ctz_u##w((uint##w##_t)val, bits); \
}
#if zig_has_builtin(ctz) || defined(zig_gnuc)
#define zig_builtin_ctz(w) \
- static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_ctz_u##w(uint##w##_t val, uint8_t bits) { \
if (val == 0) return bits; \
return zig_builtin##w(ctz, val); \
} \
@@ -1171,7 +1185,7 @@ zig_builtin_popcount(64)
zig_builtin_ctz_common(w)
#else
#define zig_builtin_ctz(w) \
- static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_ctz_u##w(uint##w##_t val, uint8_t bits) { \
return zig_popcount_u##w(zig_not_u##w(val, bits) & zig_subw_u##w(val, 1, bits), bits); \
} \
\
@@ -1183,12 +1197,12 @@ zig_builtin_ctz(32)
zig_builtin_ctz(64)
#define zig_builtin_clz_common(w) \
- static inline zig_u8 zig_clz_i##w(zig_i##w val, zig_u8 bits) { \
- return zig_clz_u##w((zig_u##w)val, bits); \
+ static inline uint8_t zig_clz_i##w(int##w##_t val, uint8_t bits) { \
+ return zig_clz_u##w((uint##w##_t)val, bits); \
}
#if zig_has_builtin(clz) || defined(zig_gnuc)
#define zig_builtin_clz(w) \
- static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_clz_u##w(uint##w##_t val, uint8_t bits) { \
if (val == 0) return bits; \
return zig_builtin##w(clz, val) - (zig_bitSizeOf(zig_Builtin##w) - bits); \
} \
@@ -1196,7 +1210,7 @@ zig_builtin_ctz(64)
zig_builtin_clz_common(w)
#else
#define zig_builtin_clz(w) \
- static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \
+ static inline uint8_t zig_clz_u##w(uint##w##_t val, uint8_t bits) { \
return zig_ctz_u##w(zig_bit_reverse_u##w(val, bits), bits); \
} \
\
@@ -1207,7 +1221,7 @@ zig_builtin_clz(16)
zig_builtin_clz(32)
zig_builtin_clz(64)
-/* ======================== 128-bit Integer Routines ======================== */
+/* ======================== 128-bit Integer Support ========================= */
#if !defined(zig_has_int128)
# if defined(__SIZEOF_INT128__)
@@ -1222,18 +1236,18 @@ zig_builtin_clz(64)
typedef unsigned __int128 zig_u128;
typedef signed __int128 zig_i128;
-#define zig_as_u128(hi, lo) ((zig_u128)(hi)<<64|(lo))
-#define zig_as_i128(hi, lo) ((zig_i128)zig_as_u128(hi, lo))
-#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo)
-#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo)
-#define zig_hi_u128(val) ((zig_u64)((val) >> 64))
-#define zig_lo_u128(val) ((zig_u64)((val) >> 0))
-#define zig_hi_i128(val) ((zig_i64)((val) >> 64))
-#define zig_lo_i128(val) ((zig_u64)((val) >> 0))
+#define zig_make_u128(hi, lo) ((zig_u128)(hi)<<64|(lo))
+#define zig_make_i128(hi, lo) ((zig_i128)zig_make_u128(hi, lo))
+#define zig_init_u128(hi, lo) zig_make_u128(hi, lo)
+#define zig_init_i128(hi, lo) zig_make_i128(hi, lo)
+#define zig_hi_u128(val) ((uint64_t)((val) >> 64))
+#define zig_lo_u128(val) ((uint64_t)((val) >> 0))
+#define zig_hi_i128(val) (( int64_t)((val) >> 64))
+#define zig_lo_i128(val) ((uint64_t)((val) >> 0))
#define zig_bitcast_u128(val) ((zig_u128)(val))
#define zig_bitcast_i128(val) ((zig_i128)(val))
#define zig_cmp_int128(Type) \
- static inline zig_i32 zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ static inline int32_t zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \
return (lhs > rhs) - (lhs < rhs); \
}
#define zig_bit_int128(Type, operation, operator) \
@@ -1243,32 +1257,32 @@ typedef signed __int128 zig_i128;
#else /* zig_has_int128 */
-#if __LITTLE_ENDIAN__ || _MSC_VER
-typedef struct { zig_align(16) zig_u64 lo; zig_u64 hi; } zig_u128;
-typedef struct { zig_align(16) zig_u64 lo; zig_i64 hi; } zig_i128;
+#if zig_little_endian
+typedef struct { zig_align(16) uint64_t lo; uint64_t hi; } zig_u128;
+typedef struct { zig_align(16) uint64_t lo; int64_t hi; } zig_i128;
#else
-typedef struct { zig_align(16) zig_u64 hi; zig_u64 lo; } zig_u128;
-typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128;
+typedef struct { zig_align(16) uint64_t hi; uint64_t lo; } zig_u128;
+typedef struct { zig_align(16) int64_t hi; uint64_t lo; } zig_i128;
#endif
-#define zig_as_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) })
-#define zig_as_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) })
+#define zig_make_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) })
+#define zig_make_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) })
-#if _MSC_VER
-#define zig_as_constant_u128(hi, lo) { .h##i = (hi), .l##o = (lo) }
-#define zig_as_constant_i128(hi, lo) { .h##i = (hi), .l##o = (lo) }
-#else
-#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo)
-#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo)
+#if _MSC_VER /* MSVC doesn't allow struct literals in constant expressions */
+#define zig_init_u128(hi, lo) { .h##i = (hi), .l##o = (lo) }
+#define zig_init_i128(hi, lo) { .h##i = (hi), .l##o = (lo) }
+#else /* But non-MSVC doesn't like the unprotected commas */
+#define zig_init_u128(hi, lo) zig_make_u128(hi, lo)
+#define zig_init_i128(hi, lo) zig_make_i128(hi, lo)
#endif
#define zig_hi_u128(val) ((val).hi)
#define zig_lo_u128(val) ((val).lo)
#define zig_hi_i128(val) ((val).hi)
#define zig_lo_i128(val) ((val).lo)
-#define zig_bitcast_u128(val) zig_as_u128((zig_u64)(val).hi, (val).lo)
-#define zig_bitcast_i128(val) zig_as_i128((zig_i64)(val).hi, (val).lo)
+#define zig_bitcast_u128(val) zig_make_u128((uint64_t)(val).hi, (val).lo)
+#define zig_bitcast_i128(val) zig_make_i128(( int64_t)(val).hi, (val).lo)
#define zig_cmp_int128(Type) \
- static inline zig_i32 zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ static inline int32_t zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \
return (lhs.hi == rhs.hi) \
? (lhs.lo > rhs.lo) - (lhs.lo < rhs.lo) \
: (lhs.hi > rhs.hi) - (lhs.hi < rhs.hi); \
@@ -1280,10 +1294,10 @@ typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128;
#endif /* zig_has_int128 */
-#define zig_minInt_u128 zig_as_u128(zig_minInt_u64, zig_minInt_u64)
-#define zig_maxInt_u128 zig_as_u128(zig_maxInt_u64, zig_maxInt_u64)
-#define zig_minInt_i128 zig_as_i128(zig_minInt_i64, zig_minInt_u64)
-#define zig_maxInt_i128 zig_as_i128(zig_maxInt_i64, zig_maxInt_u64)
+#define zig_minInt_u128 zig_make_u128(zig_minInt_u64, zig_minInt_u64)
+#define zig_maxInt_u128 zig_make_u128(zig_maxInt_u64, zig_maxInt_u64)
+#define zig_minInt_i128 zig_make_i128(zig_minInt_i64, zig_minInt_u64)
+#define zig_maxInt_i128 zig_make_i128(zig_maxInt_i64, zig_maxInt_u64)
zig_cmp_int128(u128)
zig_cmp_int128(i128)
@@ -1297,28 +1311,33 @@ zig_bit_int128(i128, or, |)
zig_bit_int128(u128, xor, ^)
zig_bit_int128(i128, xor, ^)
-static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs);
+static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs);
#if zig_has_int128
-static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) {
- return val ^ zig_maxInt(u128, bits);
+static inline zig_u128 zig_not_u128(zig_u128 val, uint8_t bits) {
+ return val ^ zig_maxInt_u(128, bits);
}
-static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) {
+static inline zig_i128 zig_not_i128(zig_i128 val, uint8_t bits) {
(void)bits;
return ~val;
}
-static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) {
+static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs) {
return lhs >> rhs;
}
-static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) {
+static inline zig_u128 zig_shl_u128(zig_u128 lhs, uint8_t rhs) {
return lhs << rhs;
}
-static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) {
+static inline zig_i128 zig_shr_i128(zig_i128 lhs, uint8_t rhs) {
+ zig_i128 sign_mask = lhs < zig_make_i128(0, 0) ? -zig_make_i128(0, 1) : zig_make_i128(0, 0);
+ return ((lhs ^ sign_mask) >> rhs) ^ sign_mask;
+}
+
+static inline zig_i128 zig_shl_i128(zig_i128 lhs, uint8_t rhs) {
return lhs << rhs;
}
@@ -1363,40 +1382,46 @@ static inline zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) {
}
static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_as_i128(0, 0));
+ return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_make_i128(0, 0));
}
static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
zig_i128 rem = zig_rem_i128(lhs, rhs);
- return rem + (((lhs ^ rhs) & rem) < zig_as_i128(0, 0) ? rhs : zig_as_i128(0, 0));
+ return rem + (((lhs ^ rhs) & rem) < zig_make_i128(0, 0) ? rhs : zig_make_i128(0, 0));
}
#else /* zig_has_int128 */
-static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) {
- return (zig_u128){ .hi = zig_not_u64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) };
+static inline zig_u128 zig_not_u128(zig_u128 val, uint8_t bits) {
+ return (zig_u128){ .hi = zig_not_u64(val.hi, bits - UINT8_C(64)), .lo = zig_not_u64(val.lo, UINT8_C(64)) };
}
-static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) {
- return (zig_i128){ .hi = zig_not_i64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) };
+static inline zig_i128 zig_not_i128(zig_i128 val, uint8_t bits) {
+ return (zig_i128){ .hi = zig_not_i64(val.hi, bits - UINT8_C(64)), .lo = zig_not_u64(val.lo, UINT8_C(64)) };
}
-static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) {
- if (rhs == zig_as_u8(0)) return lhs;
- if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - zig_as_u8(64)) };
- return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (zig_as_u8(64) - rhs) | lhs.lo >> rhs };
+static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs) {
+ if (rhs == UINT8_C(0)) return lhs;
+ if (rhs >= UINT8_C(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - UINT8_C(64)) };
+ return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (UINT8_C(64) - rhs) | lhs.lo >> rhs };
}
-static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) {
- if (rhs == zig_as_u8(0)) return lhs;
- if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.lo << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
- return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
+static inline zig_u128 zig_shl_u128(zig_u128 lhs, uint8_t rhs) {
+ if (rhs == UINT8_C(0)) return lhs;
+ if (rhs >= UINT8_C(64)) return (zig_u128){ .hi = lhs.lo << (rhs - UINT8_C(64)), .lo = zig_minInt_u64 };
+ return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (UINT8_C(64) - rhs), .lo = lhs.lo << rhs };
}
-static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) {
- if (rhs == zig_as_u8(0)) return lhs;
- if (rhs >= zig_as_u8(64)) return (zig_i128){ .hi = lhs.lo << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
- return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
+static inline zig_i128 zig_shr_i128(zig_i128 lhs, uint8_t rhs) {
+ if (rhs == UINT8_C(0)) return lhs;
+ if (rhs >= UINT8_C(64)) return (zig_i128){ .hi = zig_shr_i64(lhs.hi, 63), .lo = zig_shr_i64(lhs.hi, (rhs - UINT8_C(64))) };
+ return (zig_i128){ .hi = zig_shr_i64(lhs.hi, rhs), .lo = lhs.lo >> rhs | (uint64_t)lhs.hi << (UINT8_C(64) - rhs) };
+}
+
+static inline zig_i128 zig_shl_i128(zig_i128 lhs, uint8_t rhs) {
+ if (rhs == UINT8_C(0)) return lhs;
+ if (rhs >= UINT8_C(64)) return (zig_i128){ .hi = lhs.lo << (rhs - UINT8_C(64)), .lo = zig_minInt_u64 };
+ return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (UINT8_C(64) - rhs), .lo = lhs.lo << rhs };
}
static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) {
@@ -1424,14 +1449,14 @@ static inline zig_i128 zig_sub_i128(zig_i128 lhs, zig_i128 rhs) {
}
zig_extern zig_i128 __multi3(zig_i128 lhs, zig_i128 rhs);
-static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) {
- return zig_bitcast_u128(__multi3(zig_bitcast_i128(lhs), zig_bitcast_i128(rhs)));
-}
-
static zig_i128 zig_mul_i128(zig_i128 lhs, zig_i128 rhs) {
return __multi3(lhs, rhs);
}
+static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) {
+ return zig_bitcast_u128(zig_mul_i128(zig_bitcast_i128(lhs), zig_bitcast_i128(rhs)));
+}
+
zig_extern zig_u128 __udivti3(zig_u128 lhs, zig_u128 rhs);
static zig_u128 zig_div_trunc_u128(zig_u128 lhs, zig_u128 rhs) {
return __udivti3(lhs, rhs);
@@ -1454,11 +1479,11 @@ static zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) {
static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
zig_i128 rem = zig_rem_i128(lhs, rhs);
- return zig_add_i128(rem, (((lhs.hi ^ rhs.hi) & rem.hi) < zig_as_i64(0) ? rhs : zig_as_i128(0, 0)));
+ return zig_add_i128(rem, ((lhs.hi ^ rhs.hi) & rem.hi) < INT64_C(0) ? rhs : zig_make_i128(0, 0));
}
static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_as_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_as_i128(0, 0)) < zig_as_i32(0)));
+ return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_make_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_make_i128(0, 0)) < INT32_C(0)));
}
#endif /* zig_has_int128 */
@@ -1471,326 +1496,1303 @@ static inline zig_u128 zig_nand_u128(zig_u128 lhs, zig_u128 rhs) {
}
static inline zig_u128 zig_min_u128(zig_u128 lhs, zig_u128 rhs) {
- return zig_cmp_u128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs;
+ return zig_cmp_u128(lhs, rhs) < INT32_C(0) ? lhs : rhs;
}
static inline zig_i128 zig_min_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_cmp_i128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs;
+ return zig_cmp_i128(lhs, rhs) < INT32_C(0) ? lhs : rhs;
}
static inline zig_u128 zig_max_u128(zig_u128 lhs, zig_u128 rhs) {
- return zig_cmp_u128(lhs, rhs) > zig_as_i32(0) ? lhs : rhs;
+ return zig_cmp_u128(lhs, rhs) > INT32_C(0) ? lhs : rhs;
}
static inline zig_i128 zig_max_i128(zig_i128 lhs, zig_i128 rhs) {
- return zig_cmp_i128(lhs, rhs) > zig_as_i32(0) ? lhs : rhs;
+ return zig_cmp_i128(lhs, rhs) > INT32_C(0) ? lhs : rhs;
}
-static inline zig_i128 zig_shr_i128(zig_i128 lhs, zig_u8 rhs) {
- zig_i128 sign_mask = zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_sub_i128(zig_as_i128(0, 0), zig_as_i128(0, 1)) : zig_as_i128(0, 0);
- return zig_xor_i128(zig_bitcast_i128(zig_shr_u128(zig_bitcast_u128(zig_xor_i128(lhs, sign_mask)), rhs)), sign_mask);
+static inline zig_u128 zig_wrap_u128(zig_u128 val, uint8_t bits) {
+ return zig_and_u128(val, zig_maxInt_u(128, bits));
}
-static inline zig_u128 zig_wrap_u128(zig_u128 val, zig_u8 bits) {
- return zig_and_u128(val, zig_maxInt(u128, bits));
+static inline zig_i128 zig_wrap_i128(zig_i128 val, uint8_t bits) {
+ if (bits > UINT8_C(64)) return zig_make_i128(zig_wrap_i64(zig_hi_i128(val), bits - UINT8_C(64)), zig_lo_i128(val));
+ int64_t lo = zig_wrap_i64((int64_t)zig_lo_i128(val), bits);
+ return zig_make_i128(zig_shr_i64(lo, 63), (uint64_t)lo);
}
-static inline zig_i128 zig_wrap_i128(zig_i128 val, zig_u8 bits) {
- return zig_as_i128(zig_wrap_i64(zig_hi_i128(val), bits - zig_as_u8(64)), zig_lo_i128(val));
-}
-
-static inline zig_u128 zig_shlw_u128(zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline zig_u128 zig_shlw_u128(zig_u128 lhs, uint8_t rhs, uint8_t bits) {
return zig_wrap_u128(zig_shl_u128(lhs, rhs), bits);
}
-static inline zig_i128 zig_shlw_i128(zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline zig_i128 zig_shlw_i128(zig_i128 lhs, uint8_t rhs, uint8_t bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_shl_u128(zig_bitcast_u128(lhs), rhs)), bits);
}
-static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
return zig_wrap_u128(zig_add_u128(lhs, rhs), bits);
}
-static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_add_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
-static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
return zig_wrap_u128(zig_sub_u128(lhs, rhs), bits);
}
-static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_sub_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
-static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
return zig_wrap_u128(zig_mul_u128(lhs, rhs), bits);
}
-static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
return zig_wrap_i128(zig_bitcast_i128(zig_mul_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
#if zig_has_int128
-static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow)
zig_u128 full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u128(full_res, bits);
- return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
+ return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits);
#else
*res = zig_addw_u128(lhs, rhs, bits);
return *res < lhs;
#endif
}
-zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
+static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow)
zig_i128 full_res;
bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
+ int overflow_int;
zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i128(full_res, bits);
- return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
+ return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits);
}
-static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow)
zig_u128 full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u128(full_res, bits);
- return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
+ return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits);
#else
*res = zig_subw_u128(lhs, rhs, bits);
return *res > lhs;
#endif
}
-zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
+static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow)
zig_i128 full_res;
bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
+ int overflow_int;
zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i128(full_res, bits);
- return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
+ return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits);
}
-static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow)
zig_u128 full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u128(full_res, bits);
- return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
+ return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits);
#else
*res = zig_mulw_u128(lhs, rhs, bits);
- return rhs != zig_as_u128(0, 0) && lhs > zig_maxInt(u128, bits) / rhs;
+ return rhs != zig_make_u128(0, 0) && lhs > zig_maxInt_u(128, bits) / rhs;
#endif
}
-zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
+static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow)
zig_i128 full_res;
bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
- zig_c_int overflow_int;
+ int overflow_int;
zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int);
bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i128(full_res, bits);
- return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
+ return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits);
}
#else /* zig_has_int128 */
-static inline bool zig_overflow_u128(bool overflow, zig_u128 full_res, zig_u8 bits) {
- return overflow ||
- zig_cmp_u128(full_res, zig_minInt(u128, bits)) < zig_as_i32(0) ||
- zig_cmp_u128(full_res, zig_maxInt(u128, bits)) > zig_as_i32(0);
+static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
+ uint64_t hi;
+ bool overflow = zig_addo_u64(&hi, lhs.hi, rhs.hi, bits - 64);
+ return overflow ^ zig_addo_u64(&res->hi, hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64);
}
-static inline bool zig_overflow_i128(bool overflow, zig_i128 full_res, zig_u8 bits) {
- return overflow ||
- zig_cmp_i128(full_res, zig_minInt(i128, bits)) < zig_as_i32(0) ||
- zig_cmp_i128(full_res, zig_maxInt(i128, bits)) > zig_as_i32(0);
+static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
+ int64_t hi;
+ bool overflow = zig_addo_i64(&hi, lhs.hi, rhs.hi, bits - 64);
+ return overflow ^ zig_addo_i64(&res->hi, hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64);
}
-static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
- zig_u128 full_res;
- bool overflow =
- zig_addo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) |
- zig_addo_u64(&full_res.hi, full_res.hi, zig_addo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64);
- *res = zig_wrap_u128(full_res, bits);
- return zig_overflow_u128(overflow, full_res, bits);
+static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
+ uint64_t hi;
+ bool overflow = zig_subo_u64(&hi, lhs.hi, rhs.hi, bits - 64);
+ return overflow ^ zig_subo_u64(&res->hi, hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64);
}
-zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
- zig_c_int overflow_int;
- zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int);
- *res = zig_wrap_i128(full_res, bits);
- return zig_overflow_i128(overflow_int, full_res, bits);
+static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
+ int64_t hi;
+ bool overflow = zig_subo_i64(&hi, lhs.hi, rhs.hi, bits - 64);
+ return overflow ^ zig_subo_i64(&res->hi, hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64);
}
-static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
- zig_u128 full_res;
- bool overflow =
- zig_subo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) |
- zig_subo_u64(&full_res.hi, full_res.hi, zig_subo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64);
- *res = zig_wrap_u128(full_res, bits);
- return zig_overflow_u128(overflow, full_res, bits);
-}
-
-zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
- zig_c_int overflow_int;
- zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int);
- *res = zig_wrap_i128(full_res, bits);
- return zig_overflow_i128(overflow_int, full_res, bits);
-}
-
-static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
*res = zig_mulw_u128(lhs, rhs, bits);
- return zig_cmp_u128(*res, zig_as_u128(0, 0)) != zig_as_i32(0) &&
- zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0);
+ return zig_cmp_u128(*res, zig_make_u128(0, 0)) != INT32_C(0) &&
+ zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt_u(128, bits), rhs)) > INT32_C(0);
}
-zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
- zig_c_int overflow_int;
+zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, int *overflow);
+static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
+ int overflow_int;
zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int);
+ bool overflow = overflow_int != 0 ||
+ zig_cmp_i128(full_res, zig_minInt_i(128, bits)) < INT32_C(0) ||
+ zig_cmp_i128(full_res, zig_maxInt_i(128, bits)) > INT32_C(0);
*res = zig_wrap_i128(full_res, bits);
- return zig_overflow_i128(overflow_int, full_res, bits);
+ return overflow;
}
#endif /* zig_has_int128 */
-static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, uint8_t rhs, uint8_t bits) {
*res = zig_shlw_u128(lhs, rhs, bits);
- return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0);
+ return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt_u(128, bits), rhs)) > INT32_C(0);
}
-static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, uint8_t rhs, uint8_t bits) {
*res = zig_shlw_i128(lhs, rhs, bits);
- zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1)));
- return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != zig_as_i32(0) &&
- zig_cmp_i128(zig_and_i128(lhs, mask), mask) != zig_as_i32(0);
+ zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - UINT8_C(1)));
+ return zig_cmp_i128(zig_and_i128(lhs, mask), zig_make_i128(0, 0)) != INT32_C(0) &&
+ zig_cmp_i128(zig_and_i128(lhs, mask), mask) != INT32_C(0);
}
-static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
- if (zig_cmp_u128(rhs, zig_as_u128(0, bits)) >= zig_as_i32(0))
- return zig_cmp_u128(lhs, zig_as_u128(0, 0)) != zig_as_i32(0) ? zig_maxInt(u128, bits) : lhs;
-
-#if zig_has_int128
- return zig_shlo_u128(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u128, bits) : res;
-#else
- return zig_shlo_u128(&res, lhs, (zig_u8)rhs.lo, bits) ? zig_maxInt(u128, bits) : res;
-#endif
+ if (zig_cmp_u128(rhs, zig_make_u128(0, bits)) >= INT32_C(0))
+ return zig_cmp_u128(lhs, zig_make_u128(0, 0)) != INT32_C(0) ? zig_maxInt_u(128, bits) : lhs;
+ return zig_shlo_u128(&res, lhs, (uint8_t)zig_lo_u128(rhs), bits) ? zig_maxInt_u(128, bits) : res;
}
-static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
zig_i128 res;
- if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_as_u128(0, bits)) < zig_as_i32(0) && !zig_shlo_i128(&res, lhs, zig_lo_i128(rhs), bits)) return res;
- return zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+ if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_i128(&res, lhs, (uint8_t)zig_lo_i128(rhs), bits)) return res;
+ return zig_cmp_i128(lhs, zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
}
-static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
- return zig_addo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res;
+ return zig_addo_u128(&res, lhs, rhs, bits) ? zig_maxInt_u(128, bits) : res;
}
-static inline zig_i128 zig_adds_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_adds_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
zig_i128 res;
if (!zig_addo_i128(&res, lhs, rhs, bits)) return res;
- return zig_cmp_i128(res, zig_as_i128(0, 0)) >= zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+ return zig_cmp_i128(res, zig_make_i128(0, 0)) >= INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
}
-static inline zig_u128 zig_subs_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_subs_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
- return zig_subo_u128(&res, lhs, rhs, bits) ? zig_minInt(u128, bits) : res;
+ return zig_subo_u128(&res, lhs, rhs, bits) ? zig_minInt_u(128, bits) : res;
}
-static inline zig_i128 zig_subs_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_subs_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
zig_i128 res;
if (!zig_subo_i128(&res, lhs, rhs, bits)) return res;
- return zig_cmp_i128(res, zig_as_i128(0, 0)) >= zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+ return zig_cmp_i128(res, zig_make_i128(0, 0)) >= INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
}
-static inline zig_u128 zig_muls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline zig_u128 zig_muls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
- return zig_mulo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res;
+ return zig_mulo_u128(&res, lhs, rhs, bits) ? zig_maxInt_u(128, bits) : res;
}
-static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
zig_i128 res;
if (!zig_mulo_i128(&res, lhs, rhs, bits)) return res;
- return zig_cmp_i128(zig_xor_i128(lhs, rhs), zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+ return zig_cmp_i128(zig_xor_i128(lhs, rhs), zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
}
-static inline zig_u8 zig_clz_u128(zig_u128 val, zig_u8 bits) {
- if (bits <= zig_as_u8(64)) return zig_clz_u64(zig_lo_u128(val), bits);
- if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - zig_as_u8(64));
- return zig_clz_u64(zig_lo_u128(val), zig_as_u8(64)) + (bits - zig_as_u8(64));
+static inline uint8_t zig_clz_u128(zig_u128 val, uint8_t bits) {
+ if (bits <= UINT8_C(64)) return zig_clz_u64(zig_lo_u128(val), bits);
+ if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - UINT8_C(64));
+ return zig_clz_u64(zig_lo_u128(val), UINT8_C(64)) + (bits - UINT8_C(64));
}
-static inline zig_u8 zig_clz_i128(zig_i128 val, zig_u8 bits) {
+static inline uint8_t zig_clz_i128(zig_i128 val, uint8_t bits) {
return zig_clz_u128(zig_bitcast_u128(val), bits);
}
-static inline zig_u8 zig_ctz_u128(zig_u128 val, zig_u8 bits) {
- if (zig_lo_u128(val) != 0) return zig_ctz_u64(zig_lo_u128(val), zig_as_u8(64));
- return zig_ctz_u64(zig_hi_u128(val), bits - zig_as_u8(64)) + zig_as_u8(64);
+static inline uint8_t zig_ctz_u128(zig_u128 val, uint8_t bits) {
+ if (zig_lo_u128(val) != 0) return zig_ctz_u64(zig_lo_u128(val), UINT8_C(64));
+ return zig_ctz_u64(zig_hi_u128(val), bits - UINT8_C(64)) + UINT8_C(64);
}
-static inline zig_u8 zig_ctz_i128(zig_i128 val, zig_u8 bits) {
+static inline uint8_t zig_ctz_i128(zig_i128 val, uint8_t bits) {
return zig_ctz_u128(zig_bitcast_u128(val), bits);
}
-static inline zig_u8 zig_popcount_u128(zig_u128 val, zig_u8 bits) {
- return zig_popcount_u64(zig_hi_u128(val), bits - zig_as_u8(64)) +
- zig_popcount_u64(zig_lo_u128(val), zig_as_u8(64));
+static inline uint8_t zig_popcount_u128(zig_u128 val, uint8_t bits) {
+ return zig_popcount_u64(zig_hi_u128(val), bits - UINT8_C(64)) +
+ zig_popcount_u64(zig_lo_u128(val), UINT8_C(64));
}
-static inline zig_u8 zig_popcount_i128(zig_i128 val, zig_u8 bits) {
+static inline uint8_t zig_popcount_i128(zig_i128 val, uint8_t bits) {
return zig_popcount_u128(zig_bitcast_u128(val), bits);
}
-static inline zig_u128 zig_byte_swap_u128(zig_u128 val, zig_u8 bits) {
+static inline zig_u128 zig_byte_swap_u128(zig_u128 val, uint8_t bits) {
zig_u128 full_res;
#if zig_has_builtin(bswap128)
full_res = __builtin_bswap128(val);
#else
- full_res = zig_as_u128(zig_byte_swap_u64(zig_lo_u128(val), zig_as_u8(64)),
- zig_byte_swap_u64(zig_hi_u128(val), zig_as_u8(64)));
+ full_res = zig_make_u128(zig_byte_swap_u64(zig_lo_u128(val), UINT8_C(64)),
+ zig_byte_swap_u64(zig_hi_u128(val), UINT8_C(64)));
#endif
- return zig_shr_u128(full_res, zig_as_u8(128) - bits);
+ return zig_shr_u128(full_res, UINT8_C(128) - bits);
}
-static inline zig_i128 zig_byte_swap_i128(zig_i128 val, zig_u8 bits) {
+static inline zig_i128 zig_byte_swap_i128(zig_i128 val, uint8_t bits) {
return zig_bitcast_i128(zig_byte_swap_u128(zig_bitcast_u128(val), bits));
}
-static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, zig_u8 bits) {
- return zig_shr_u128(zig_as_u128(zig_bit_reverse_u64(zig_lo_u128(val), zig_as_u8(64)),
- zig_bit_reverse_u64(zig_hi_u128(val), zig_as_u8(64))),
- zig_as_u8(128) - bits);
+static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, uint8_t bits) {
+ return zig_shr_u128(zig_make_u128(zig_bit_reverse_u64(zig_lo_u128(val), UINT8_C(64)),
+ zig_bit_reverse_u64(zig_hi_u128(val), UINT8_C(64))),
+ UINT8_C(128) - bits);
}
-static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) {
+static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) {
return zig_bitcast_i128(zig_bit_reverse_u128(zig_bitcast_u128(val), bits));
}
+/* ========================== Big Integer Support =========================== */
+
+static inline uint16_t zig_int_bytes(uint16_t bits) {
+ uint16_t bytes = (bits + CHAR_BIT - 1) / CHAR_BIT;
+ uint16_t alignment = ZIG_TARGET_MAX_INT_ALIGNMENT;
+ while (alignment / 2 >= bytes) alignment /= 2;
+ return (bytes + alignment - 1) / alignment * alignment;
+}
+
+static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ bool do_signed = is_signed;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+
+#if zig_little_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ int32_t limb_cmp;
+
+#if zig_little_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_cmp = zig_cmp_i128(lhs_limb, rhs_limb);
+ do_signed = false;
+ } else {
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_cmp = zig_cmp_u128(lhs_limb, rhs_limb);
+ }
+
+ if (limb_cmp != 0) return limb_cmp;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (do_signed) {
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ do_signed = false;
+ } else {
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb);
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return 0;
+}
+
+static inline bool zig_addo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ uint8_t *res_bytes = res;
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t top_bits = remaining_bytes * 8 - bits;
+ bool overflow = false;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ uint16_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 128 / CHAR_BIT && is_signed) {
+ zig_i128 res_limb;
+ zig_i128 tmp_limb;
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ zig_u128 res_limb;
+ zig_u128 tmp_limb;
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+ uint16_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 64 / CHAR_BIT && is_signed) {
+ int64_t res_limb;
+ int64_t tmp_limb;
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint64_t res_limb;
+ uint64_t tmp_limb;
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+ uint16_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 32 / CHAR_BIT && is_signed) {
+ int32_t res_limb;
+ int32_t tmp_limb;
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint32_t res_limb;
+ uint32_t tmp_limb;
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+ uint16_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 16 / CHAR_BIT && is_signed) {
+ int16_t res_limb;
+ int16_t tmp_limb;
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint16_t res_limb;
+ uint16_t tmp_limb;
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+ uint16_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 8 / CHAR_BIT && is_signed) {
+ int8_t res_limb;
+ int8_t tmp_limb;
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint8_t res_limb;
+ uint8_t tmp_limb;
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_addo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_addo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return overflow;
+}
+
+static inline bool zig_subo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ uint8_t *res_bytes = res;
+ const uint8_t *lhs_bytes = lhs;
+ const uint8_t *rhs_bytes = rhs;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t top_bits = remaining_bytes * 8 - bits;
+ bool overflow = false;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+ uint16_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 128 / CHAR_BIT && is_signed) {
+ zig_i128 res_limb;
+ zig_i128 tmp_limb;
+ zig_i128 lhs_limb;
+ zig_i128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ zig_u128 res_limb;
+ zig_u128 tmp_limb;
+ zig_u128 lhs_limb;
+ zig_u128 rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+ uint16_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 64 / CHAR_BIT && is_signed) {
+ int64_t res_limb;
+ int64_t tmp_limb;
+ int64_t lhs_limb;
+ int64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint64_t res_limb;
+ uint64_t tmp_limb;
+ uint64_t lhs_limb;
+ uint64_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+ uint16_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 32 / CHAR_BIT && is_signed) {
+ int32_t res_limb;
+ int32_t tmp_limb;
+ int32_t lhs_limb;
+ int32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint32_t res_limb;
+ uint32_t tmp_limb;
+ uint32_t lhs_limb;
+ uint32_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+ uint16_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 16 / CHAR_BIT && is_signed) {
+ int16_t res_limb;
+ int16_t tmp_limb;
+ int16_t lhs_limb;
+ int16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint16_t res_limb;
+ uint16_t tmp_limb;
+ uint16_t lhs_limb;
+ uint16_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+ uint16_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0);
+
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ if (remaining_bytes == 8 / CHAR_BIT && is_signed) {
+ int8_t res_limb;
+ int8_t tmp_limb;
+ int8_t lhs_limb;
+ int8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ } else {
+ uint8_t res_limb;
+ uint8_t tmp_limb;
+ uint8_t lhs_limb;
+ uint8_t rhs_limb;
+ bool limb_overflow;
+
+ memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
+ memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
+ limb_overflow = zig_subo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
+ overflow = limb_overflow ^ zig_subo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits);
+ memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return overflow;
+}
+
+static inline void zig_addw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ (void)zig_addo_big(res, lhs, rhs, is_signed, bits);
+}
+
+static inline void zig_subw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ (void)zig_subo_big(res, lhs, rhs, is_signed, bits);
+}
+
+zig_extern void __udivei4(uint32_t *res, const uint32_t *lhs, const uint32_t *rhs, uintptr_t bits);
+static inline void zig_div_trunc_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ if (!is_signed) {
+ __udivei4(res, lhs, rhs, bits);
+ return;
+ }
+
+ zig_trap();
+}
+
+static inline void zig_div_floor_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ if (!is_signed) {
+ zig_div_trunc_big(res, lhs, rhs, is_signed, bits);
+ return;
+ }
+
+ zig_trap();
+}
+
+zig_extern void __umodei4(uint32_t *res, const uint32_t *lhs, const uint32_t *rhs, uintptr_t bits);
+static inline void zig_rem_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ if (!is_signed) {
+ __umodei4(res, lhs, rhs, bits);
+ return;
+ }
+
+ zig_trap();
+}
+
+static inline void zig_mod_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
+ if (!is_signed) {
+ zig_rem_big(res, lhs, rhs, is_signed, bits);
+ return;
+ }
+
+ zig_trap();
+}
+
+static inline uint16_t zig_clz_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t skip_bits = remaining_bytes * 8 - bits;
+ uint16_t total_lz = 0;
+ uint16_t limb_lz;
+ (void)is_signed;
+
+#if zig_little_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u128(val_limb, 128 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 128 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u64(val_limb, 64 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 64 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u32(val_limb, 32 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 32 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u16(val_limb, 16 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 16 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_little_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_lz = zig_clz_u8(val_limb, 8 - skip_bits);
+ }
+
+ total_lz += limb_lz;
+ if (limb_lz < 8 - skip_bits) return total_lz;
+ skip_bits = 0;
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_big_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_lz;
+}
+
+static inline uint16_t zig_ctz_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t total_tz = 0;
+ uint16_t limb_tz;
+ (void)is_signed;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u128(val_limb, 128);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 128) return total_tz;
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u64(val_limb, 64);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 64) return total_tz;
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u32(val_limb, 32);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 32) return total_tz;
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u16(val_limb, 16);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 16) return total_tz;
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ limb_tz = zig_ctz_u8(val_limb, 8);
+ }
+
+ total_tz += limb_tz;
+ if (limb_tz < 8) return total_tz;
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_tz;
+}
+
+static inline uint16_t zig_popcount_big(const void *val, bool is_signed, uint16_t bits) {
+ const uint8_t *val_bytes = val;
+ uint16_t byte_offset = 0;
+ uint16_t remaining_bytes = zig_int_bytes(bits);
+ uint16_t total_pc = 0;
+ (void)is_signed;
+
+#if zig_big_endian
+ byte_offset = remaining_bytes;
+#endif
+
+ while (remaining_bytes >= 128 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 128 / CHAR_BIT;
+#endif
+
+ {
+ zig_u128 val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u128(val_limb, 128);
+ }
+
+ remaining_bytes -= 128 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 128 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 64 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 64 / CHAR_BIT;
+#endif
+
+ {
+ uint64_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u64(val_limb, 64);
+ }
+
+ remaining_bytes -= 64 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 64 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 32 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 32 / CHAR_BIT;
+#endif
+
+ {
+ uint32_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc += zig_popcount_u32(val_limb, 32);
+ }
+
+ remaining_bytes -= 32 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 32 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 16 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 16 / CHAR_BIT;
+#endif
+
+ {
+ uint16_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc = zig_popcount_u16(val_limb, 16);
+ }
+
+ remaining_bytes -= 16 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 16 / CHAR_BIT;
+#endif
+ }
+
+ while (remaining_bytes >= 8 / CHAR_BIT) {
+#if zig_big_endian
+ byte_offset -= 8 / CHAR_BIT;
+#endif
+
+ {
+ uint8_t val_limb;
+
+ memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb));
+ total_pc = zig_popcount_u8(val_limb, 8);
+ }
+
+ remaining_bytes -= 8 / CHAR_BIT;
+
+#if zig_little_endian
+ byte_offset += 8 / CHAR_BIT;
+#endif
+ }
+
+ return total_pc;
+}
+
/* ========================= Floating Point Support ========================= */
#if _MSC_VER
@@ -1810,252 +2812,253 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) {
#if (zig_has_builtin(nan) && zig_has_builtin(nans) && zig_has_builtin(inf)) || defined(zig_gnuc)
#define zig_has_float_builtins 1
-#define zig_as_special_f16(sign, name, arg, repr) sign zig_as_f16(__builtin_##name, )(arg)
-#define zig_as_special_f32(sign, name, arg, repr) sign zig_as_f32(__builtin_##name, )(arg)
-#define zig_as_special_f64(sign, name, arg, repr) sign zig_as_f64(__builtin_##name, )(arg)
-#define zig_as_special_f80(sign, name, arg, repr) sign zig_as_f80(__builtin_##name, )(arg)
-#define zig_as_special_f128(sign, name, arg, repr) sign zig_as_f128(__builtin_##name, )(arg)
-#define zig_as_special_c_longdouble(sign, name, arg, repr) sign zig_as_c_longdouble(__builtin_##name, )(arg)
+#define zig_make_special_f16(sign, name, arg, repr) sign zig_make_f16(__builtin_##name, )(arg)
+#define zig_make_special_f32(sign, name, arg, repr) sign zig_make_f32(__builtin_##name, )(arg)
+#define zig_make_special_f64(sign, name, arg, repr) sign zig_make_f64(__builtin_##name, )(arg)
+#define zig_make_special_f80(sign, name, arg, repr) sign zig_make_f80(__builtin_##name, )(arg)
+#define zig_make_special_f128(sign, name, arg, repr) sign zig_make_f128(__builtin_##name, )(arg)
#else
#define zig_has_float_builtins 0
-#define zig_as_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr)
-#define zig_as_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr)
-#define zig_as_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr)
-#define zig_as_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr)
-#define zig_as_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr)
-#define zig_as_special_c_longdouble(sign, name, arg, repr) zig_float_from_repr_c_longdouble(repr)
+#define zig_make_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr)
+#define zig_make_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr)
+#define zig_make_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr)
+#define zig_make_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr)
+#define zig_make_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr)
#endif
#define zig_has_f16 1
#define zig_bitSizeOf_f16 16
+typedef int16_t zig_repr_f16;
#define zig_libc_name_f16(name) __##name##h
-#define zig_as_special_constant_f16(sign, name, arg, repr) zig_as_special_f16(sign, name, arg, repr)
+#define zig_init_special_f16(sign, name, arg, repr) zig_make_special_f16(sign, name, arg, repr)
#if FLT_MANT_DIG == 11
typedef float zig_f16;
-#define zig_as_f16(fp, repr) fp##f
+#define zig_make_f16(fp, repr) fp##f
#elif DBL_MANT_DIG == 11
typedef double zig_f16;
-#define zig_as_f16(fp, repr) fp
+#define zig_make_f16(fp, repr) fp
#elif LDBL_MANT_DIG == 11
#define zig_bitSizeOf_c_longdouble 16
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f16 zig_repr_c_longdouble;
+#endif
typedef long double zig_f16;
-#define zig_as_f16(fp, repr) fp##l
+#define zig_make_f16(fp, repr) fp##l
#elif FLT16_MANT_DIG == 11 && (zig_has_builtin(inff16) || defined(zig_gnuc))
typedef _Float16 zig_f16;
-#define zig_as_f16(fp, repr) fp##f16
+#define zig_make_f16(fp, repr) fp##f16
#elif defined(__SIZEOF_FP16__)
typedef __fp16 zig_f16;
-#define zig_as_f16(fp, repr) fp##f16
+#define zig_make_f16(fp, repr) fp##f16
#else
#undef zig_has_f16
#define zig_has_f16 0
-#define zig_repr_f16 i16
-typedef zig_i16 zig_f16;
-#define zig_as_f16(fp, repr) repr
-#undef zig_as_special_f16
-#define zig_as_special_f16(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f16
-#define zig_as_special_constant_f16(sign, name, arg, repr) repr
+#define zig_bitSizeOf_repr_f16 16
+typedef int16_t zig_f16;
+#define zig_make_f16(fp, repr) repr
+#undef zig_make_special_f16
+#define zig_make_special_f16(sign, name, arg, repr) repr
+#undef zig_init_special_f16
+#define zig_init_special_f16(sign, name, arg, repr) repr
#endif
#define zig_has_f32 1
#define zig_bitSizeOf_f32 32
+typedef int32_t zig_repr_f32;
#define zig_libc_name_f32(name) name##f
#if _MSC_VER
-#define zig_as_special_constant_f32(sign, name, arg, repr) sign zig_as_f32(zig_msvc_flt_##name, )
+#define zig_init_special_f32(sign, name, arg, repr) sign zig_make_f32(zig_msvc_flt_##name, )
#else
-#define zig_as_special_constant_f32(sign, name, arg, repr) zig_as_special_f32(sign, name, arg, repr)
+#define zig_init_special_f32(sign, name, arg, repr) zig_make_special_f32(sign, name, arg, repr)
#endif
#if FLT_MANT_DIG == 24
typedef float zig_f32;
-#define zig_as_f32(fp, repr) fp##f
+#define zig_make_f32(fp, repr) fp##f
#elif DBL_MANT_DIG == 24
typedef double zig_f32;
-#define zig_as_f32(fp, repr) fp
+#define zig_make_f32(fp, repr) fp
#elif LDBL_MANT_DIG == 24
#define zig_bitSizeOf_c_longdouble 32
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f32 zig_repr_c_longdouble;
+#endif
typedef long double zig_f32;
-#define zig_as_f32(fp, repr) fp##l
+#define zig_make_f32(fp, repr) fp##l
#elif FLT32_MANT_DIG == 24
typedef _Float32 zig_f32;
-#define zig_as_f32(fp, repr) fp##f32
+#define zig_make_f32(fp, repr) fp##f32
#else
#undef zig_has_f32
#define zig_has_f32 0
-#define zig_repr_f32 i32
-typedef zig_i32 zig_f32;
-#define zig_as_f32(fp, repr) repr
-#undef zig_as_special_f32
-#define zig_as_special_f32(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f32
-#define zig_as_special_constant_f32(sign, name, arg, repr) repr
+#define zig_bitSizeOf_repr_f32 32
+typedef int32_t zig_f32;
+#define zig_make_f32(fp, repr) repr
+#undef zig_make_special_f32
+#define zig_make_special_f32(sign, name, arg, repr) repr
+#undef zig_init_special_f32
+#define zig_init_special_f32(sign, name, arg, repr) repr
#endif
#define zig_has_f64 1
#define zig_bitSizeOf_f64 64
+typedef int64_t zig_repr_f64;
#define zig_libc_name_f64(name) name
#if _MSC_VER
#ifdef ZIG_TARGET_ABI_MSVC
#define zig_bitSizeOf_c_longdouble 64
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f64 zig_repr_c_longdouble;
#endif
-#define zig_as_special_constant_f64(sign, name, arg, repr) sign zig_as_f64(zig_msvc_flt_##name, )
+#endif
+#define zig_init_special_f64(sign, name, arg, repr) sign zig_make_f64(zig_msvc_flt_##name, )
#else /* _MSC_VER */
-#define zig_as_special_constant_f64(sign, name, arg, repr) zig_as_special_f64(sign, name, arg, repr)
+#define zig_init_special_f64(sign, name, arg, repr) zig_make_special_f64(sign, name, arg, repr)
#endif /* _MSC_VER */
#if FLT_MANT_DIG == 53
typedef float zig_f64;
-#define zig_as_f64(fp, repr) fp##f
+#define zig_make_f64(fp, repr) fp##f
#elif DBL_MANT_DIG == 53
typedef double zig_f64;
-#define zig_as_f64(fp, repr) fp
+#define zig_make_f64(fp, repr) fp
#elif LDBL_MANT_DIG == 53
#define zig_bitSizeOf_c_longdouble 64
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f64 zig_repr_c_longdouble;
+#endif
typedef long double zig_f64;
-#define zig_as_f64(fp, repr) fp##l
+#define zig_make_f64(fp, repr) fp##l
#elif FLT64_MANT_DIG == 53
typedef _Float64 zig_f64;
-#define zig_as_f64(fp, repr) fp##f64
+#define zig_make_f64(fp, repr) fp##f64
#elif FLT32X_MANT_DIG == 53
typedef _Float32x zig_f64;
-#define zig_as_f64(fp, repr) fp##f32x
+#define zig_make_f64(fp, repr) fp##f32x
#else
#undef zig_has_f64
#define zig_has_f64 0
-#define zig_repr_f64 i64
-typedef zig_i64 zig_f64;
-#define zig_as_f64(fp, repr) repr
-#undef zig_as_special_f64
-#define zig_as_special_f64(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f64
-#define zig_as_special_constant_f64(sign, name, arg, repr) repr
+#define zig_bitSizeOf_repr_f64 64
+typedef int64_t zig_f64;
+#define zig_make_f64(fp, repr) repr
+#undef zig_make_special_f64
+#define zig_make_special_f64(sign, name, arg, repr) repr
+#undef zig_init_special_f64
+#define zig_init_special_f64(sign, name, arg, repr) repr
#endif
#define zig_has_f80 1
#define zig_bitSizeOf_f80 80
+typedef zig_i128 zig_repr_f80;
#define zig_libc_name_f80(name) __##name##x
-#define zig_as_special_constant_f80(sign, name, arg, repr) zig_as_special_f80(sign, name, arg, repr)
+#define zig_init_special_f80(sign, name, arg, repr) zig_make_special_f80(sign, name, arg, repr)
#if FLT_MANT_DIG == 64
typedef float zig_f80;
-#define zig_as_f80(fp, repr) fp##f
+#define zig_make_f80(fp, repr) fp##f
#elif DBL_MANT_DIG == 64
typedef double zig_f80;
-#define zig_as_f80(fp, repr) fp
+#define zig_make_f80(fp, repr) fp
#elif LDBL_MANT_DIG == 64
#define zig_bitSizeOf_c_longdouble 80
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f80 zig_repr_c_longdouble;
+#endif
typedef long double zig_f80;
-#define zig_as_f80(fp, repr) fp##l
+#define zig_make_f80(fp, repr) fp##l
#elif FLT80_MANT_DIG == 64
typedef _Float80 zig_f80;
-#define zig_as_f80(fp, repr) fp##f80
+#define zig_make_f80(fp, repr) fp##f80
#elif FLT64X_MANT_DIG == 64
typedef _Float64x zig_f80;
-#define zig_as_f80(fp, repr) fp##f64x
+#define zig_make_f80(fp, repr) fp##f64x
#elif defined(__SIZEOF_FLOAT80__)
typedef __float80 zig_f80;
-#define zig_as_f80(fp, repr) fp##l
+#define zig_make_f80(fp, repr) fp##l
#else
#undef zig_has_f80
#define zig_has_f80 0
-#define zig_repr_f80 i128
+#define zig_bitSizeOf_repr_f80 128
typedef zig_i128 zig_f80;
-#define zig_as_f80(fp, repr) repr
-#undef zig_as_special_f80
-#define zig_as_special_f80(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f80
-#define zig_as_special_constant_f80(sign, name, arg, repr) repr
+#define zig_make_f80(fp, repr) repr
+#undef zig_make_special_f80
+#define zig_make_special_f80(sign, name, arg, repr) repr
+#undef zig_init_special_f80
+#define zig_init_special_f80(sign, name, arg, repr) repr
#endif
#define zig_has_f128 1
#define zig_bitSizeOf_f128 128
+typedef zig_i128 zig_repr_f128;
#define zig_libc_name_f128(name) name##q
-#define zig_as_special_constant_f128(sign, name, arg, repr) zig_as_special_f128(sign, name, arg, repr)
+#define zig_init_special_f128(sign, name, arg, repr) zig_make_special_f128(sign, name, arg, repr)
#if FLT_MANT_DIG == 113
typedef float zig_f128;
-#define zig_as_f128(fp, repr) fp##f
+#define zig_make_f128(fp, repr) fp##f
#elif DBL_MANT_DIG == 113
typedef double zig_f128;
-#define zig_as_f128(fp, repr) fp
+#define zig_make_f128(fp, repr) fp
#elif LDBL_MANT_DIG == 113
#define zig_bitSizeOf_c_longdouble 128
+#ifndef ZIG_TARGET_ABI_MSVC
+typedef zig_repr_f128 zig_repr_c_longdouble;
+#endif
typedef long double zig_f128;
-#define zig_as_f128(fp, repr) fp##l
+#define zig_make_f128(fp, repr) fp##l
#elif FLT128_MANT_DIG == 113
typedef _Float128 zig_f128;
-#define zig_as_f128(fp, repr) fp##f128
+#define zig_make_f128(fp, repr) fp##f128
#elif FLT64X_MANT_DIG == 113
typedef _Float64x zig_f128;
-#define zig_as_f128(fp, repr) fp##f64x
+#define zig_make_f128(fp, repr) fp##f64x
#elif defined(__SIZEOF_FLOAT128__)
typedef __float128 zig_f128;
-#define zig_as_f128(fp, repr) fp##q
-#undef zig_as_special_f128
-#define zig_as_special_f128(sign, name, arg, repr) sign __builtin_##name##f128(arg)
+#define zig_make_f128(fp, repr) fp##q
+#undef zig_make_special_f128
+#define zig_make_special_f128(sign, name, arg, repr) sign __builtin_##name##f128(arg)
#else
#undef zig_has_f128
#define zig_has_f128 0
-#define zig_repr_f128 i128
+#define zig_bitSizeOf_repr_f128 128
typedef zig_i128 zig_f128;
-#define zig_as_f128(fp, repr) repr
-#undef zig_as_special_f128
-#define zig_as_special_f128(sign, name, arg, repr) repr
-#undef zig_as_special_constant_f128
-#define zig_as_special_constant_f128(sign, name, arg, repr) repr
+#define zig_make_f128(fp, repr) repr
+#undef zig_make_special_f128
+#define zig_make_special_f128(sign, name, arg, repr) repr
+#undef zig_init_special_f128
+#define zig_init_special_f128(sign, name, arg, repr) repr
#endif
-#define zig_has_c_longdouble 1
-
-#ifdef ZIG_TARGET_ABI_MSVC
-#define zig_libc_name_c_longdouble(name) name
-#else
-#define zig_libc_name_c_longdouble(name) name##l
-#endif
-
-#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) zig_as_special_c_longdouble(sign, name, arg, repr)
#ifdef zig_bitSizeOf_c_longdouble
+#define zig_has_c_longdouble 1
#ifdef ZIG_TARGET_ABI_MSVC
-typedef double zig_c_longdouble;
#undef zig_bitSizeOf_c_longdouble
#define zig_bitSizeOf_c_longdouble 64
-#define zig_as_c_longdouble(fp, repr) fp
+typedef zig_f64 zig_c_longdouble;
+typedef zig_repr_f64 zig_repr_c_longdouble;
#else
typedef long double zig_c_longdouble;
-#define zig_as_c_longdouble(fp, repr) fp##l
#endif
#else /* zig_bitSizeOf_c_longdouble */
-#undef zig_has_c_longdouble
#define zig_has_c_longdouble 0
-#define zig_bitSizeOf_c_longdouble 80
-#define zig_compiler_rt_abbrev_c_longdouble zig_compiler_rt_abbrev_f80
-#define zig_repr_c_longdouble i128
-typedef zig_i128 zig_c_longdouble;
-#define zig_as_c_longdouble(fp, repr) repr
-#undef zig_as_special_c_longdouble
-#define zig_as_special_c_longdouble(sign, name, arg, repr) repr
-#undef zig_as_special_constant_c_longdouble
-#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) repr
+#define zig_bitSizeOf_repr_c_longdouble 128
+typedef zig_f128 zig_c_longdouble;
+typedef zig_repr_f128 zig_repr_c_longdouble;
#endif /* zig_bitSizeOf_c_longdouble */
#if !zig_has_float_builtins
-#define zig_float_from_repr(Type, ReprType) \
- static inline zig_##Type zig_float_from_repr_##Type(zig_##ReprType repr) { \
- return *((zig_##Type*)&repr); \
+#define zig_float_from_repr(Type) \
+ static inline zig_##Type zig_float_from_repr_##Type(zig_repr_##Type repr) { \
+ zig_##Type result; \
+ memcpy(&result, &repr, sizeof(result)); \
+ return result; \
}
-zig_float_from_repr(f16, u16)
-zig_float_from_repr(f32, u32)
-zig_float_from_repr(f64, u64)
-zig_float_from_repr(f80, u128)
-zig_float_from_repr(f128, u128)
-#if zig_bitSizeOf_c_longdouble == 80
-zig_float_from_repr(c_longdouble, u128)
-#else
-#define zig_expand_float_from_repr(Type, ReprType) zig_float_from_repr(Type, ReprType)
-zig_expand_float_from_repr(c_longdouble, zig_expand_concat(u, zig_bitSizeOf_c_longdouble))
-#endif
+zig_float_from_repr(f16)
+zig_float_from_repr(f32)
+zig_float_from_repr(f64)
+zig_float_from_repr(f80)
+zig_float_from_repr(f128)
#endif
#define zig_cast_f16 (zig_f16)
@@ -2064,41 +3067,42 @@ zig_expand_float_from_repr(c_longdouble, zig_expand_concat(u, zig_bitSizeOf_c_lo
#if _MSC_VER && !zig_has_f128
#define zig_cast_f80
-#define zig_cast_c_longdouble
#define zig_cast_f128
#else
#define zig_cast_f80 (zig_f80)
-#define zig_cast_c_longdouble (zig_c_longdouble)
#define zig_cast_f128 (zig_f128)
#endif
#define zig_convert_builtin(ResType, operation, ArgType, version) \
- zig_extern zig_##ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \
- zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(zig_##ArgType);
-zig_convert_builtin(f16, trunc, f32, 2)
-zig_convert_builtin(f16, trunc, f64, 2)
-zig_convert_builtin(f16, trunc, f80, 2)
-zig_convert_builtin(f16, trunc, f128, 2)
-zig_convert_builtin(f32, extend, f16, 2)
-zig_convert_builtin(f32, trunc, f64, 2)
-zig_convert_builtin(f32, trunc, f80, 2)
-zig_convert_builtin(f32, trunc, f128, 2)
-zig_convert_builtin(f64, extend, f16, 2)
-zig_convert_builtin(f64, extend, f32, 2)
-zig_convert_builtin(f64, trunc, f80, 2)
-zig_convert_builtin(f64, trunc, f128, 2)
-zig_convert_builtin(f80, extend, f16, 2)
-zig_convert_builtin(f80, extend, f32, 2)
-zig_convert_builtin(f80, extend, f64, 2)
-zig_convert_builtin(f80, trunc, f128, 2)
-zig_convert_builtin(f128, extend, f16, 2)
-zig_convert_builtin(f128, extend, f32, 2)
-zig_convert_builtin(f128, extend, f64, 2)
-zig_convert_builtin(f128, extend, f80, 2)
+ zig_extern ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \
+ zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(ArgType);
+zig_convert_builtin(zig_f16, trunc, zig_f32, 2)
+zig_convert_builtin(zig_f16, trunc, zig_f64, 2)
+zig_convert_builtin(zig_f16, trunc, zig_f80, 2)
+zig_convert_builtin(zig_f16, trunc, zig_f128, 2)
+zig_convert_builtin(zig_f32, extend, zig_f16, 2)
+zig_convert_builtin(zig_f32, trunc, zig_f64, 2)
+zig_convert_builtin(zig_f32, trunc, zig_f80, 2)
+zig_convert_builtin(zig_f32, trunc, zig_f128, 2)
+zig_convert_builtin(zig_f64, extend, zig_f16, 2)
+zig_convert_builtin(zig_f64, extend, zig_f32, 2)
+zig_convert_builtin(zig_f64, trunc, zig_f80, 2)
+zig_convert_builtin(zig_f64, trunc, zig_f128, 2)
+zig_convert_builtin(zig_f80, extend, zig_f16, 2)
+zig_convert_builtin(zig_f80, extend, zig_f32, 2)
+zig_convert_builtin(zig_f80, extend, zig_f64, 2)
+zig_convert_builtin(zig_f80, trunc, zig_f128, 2)
+zig_convert_builtin(zig_f128, extend, zig_f16, 2)
+zig_convert_builtin(zig_f128, extend, zig_f32, 2)
+zig_convert_builtin(zig_f128, extend, zig_f64, 2)
+zig_convert_builtin(zig_f128, extend, zig_f80, 2)
#define zig_float_negate_builtin_0(Type) \
static inline zig_##Type zig_neg_##Type(zig_##Type arg) { \
- return zig_expand_concat(zig_xor_, zig_repr_##Type)(arg, zig_expand_minInt(zig_repr_##Type, zig_bitSizeOf_##Type)); \
+ return zig_expand_concat(zig_xor_i, zig_bitSizeOf_repr_##Type)( \
+ arg, \
+ zig_minInt_i(zig_bitSizeOf_repr_##Type, zig_bitSizeOf_##Type) \
+ ); \
}
#define zig_float_negate_builtin_1(Type) \
static inline zig_##Type zig_neg_##Type(zig_##Type arg) { \
@@ -2106,28 +3110,28 @@ zig_convert_builtin(f128, extend, f80, 2)
}
#define zig_float_less_builtin_0(Type, operation) \
- zig_extern zig_i32 zig_expand_concat(zig_expand_concat(__##operation, \
- zig_compiler_rt_abbrev_##Type), 2)(zig_##Type, zig_##Type); \
- static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
- return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_##Type), 2)(lhs, rhs); \
+ zig_extern int32_t zig_expand_concat(zig_expand_concat(__##operation, \
+ zig_compiler_rt_abbrev_zig_##Type), 2)(zig_##Type, zig_##Type); \
+ static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_zig_##Type), 2)(lhs, rhs); \
}
#define zig_float_less_builtin_1(Type, operation) \
- static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
return (!(lhs <= rhs) - (lhs < rhs)); \
}
#define zig_float_greater_builtin_0(Type, operation) \
zig_float_less_builtin_0(Type, operation)
#define zig_float_greater_builtin_1(Type, operation) \
- static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
+ static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
return ((lhs > rhs) - !(lhs >= rhs)); \
}
#define zig_float_binary_builtin_0(Type, operation, operator) \
zig_extern zig_##Type zig_expand_concat(zig_expand_concat(__##operation, \
- zig_compiler_rt_abbrev_##Type), 3)(zig_##Type, zig_##Type); \
+ zig_compiler_rt_abbrev_zig_##Type), 3)(zig_##Type, zig_##Type); \
static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
- return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_##Type), 3)(lhs, rhs); \
+ return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_zig_##Type), 3)(lhs, rhs); \
}
#define zig_float_binary_builtin_1(Type, operation, operator) \
static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \
@@ -2135,18 +3139,18 @@ zig_convert_builtin(f128, extend, f80, 2)
}
#define zig_float_builtins(Type) \
- zig_convert_builtin(i32, fix, Type, ) \
- zig_convert_builtin(u32, fixuns, Type, ) \
- zig_convert_builtin(i64, fix, Type, ) \
- zig_convert_builtin(u64, fixuns, Type, ) \
- zig_convert_builtin(i128, fix, Type, ) \
- zig_convert_builtin(u128, fixuns, Type, ) \
- zig_convert_builtin(Type, float, i32, ) \
- zig_convert_builtin(Type, floatun, u32, ) \
- zig_convert_builtin(Type, float, i64, ) \
- zig_convert_builtin(Type, floatun, u64, ) \
- zig_convert_builtin(Type, float, i128, ) \
- zig_convert_builtin(Type, floatun, u128, ) \
+ zig_convert_builtin( int32_t, fix, zig_##Type, ) \
+ zig_convert_builtin(uint32_t, fixuns, zig_##Type, ) \
+ zig_convert_builtin( int64_t, fix, zig_##Type, ) \
+ zig_convert_builtin(uint64_t, fixuns, zig_##Type, ) \
+ zig_convert_builtin(zig_i128, fix, zig_##Type, ) \
+ zig_convert_builtin(zig_u128, fixuns, zig_##Type, ) \
+ zig_convert_builtin(zig_##Type, float, int32_t, ) \
+ zig_convert_builtin(zig_##Type, floatun, uint32_t, ) \
+ zig_convert_builtin(zig_##Type, float, int64_t, ) \
+ zig_convert_builtin(zig_##Type, floatun, uint64_t, ) \
+ zig_convert_builtin(zig_##Type, float, zig_i128, ) \
+ zig_convert_builtin(zig_##Type, floatun, zig_u128, ) \
zig_expand_concat(zig_float_negate_builtin_, zig_has_##Type)(Type) \
zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, cmp) \
zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, ne) \
@@ -2194,155 +3198,162 @@ zig_float_builtins(f32)
zig_float_builtins(f64)
zig_float_builtins(f80)
zig_float_builtins(f128)
-zig_float_builtins(c_longdouble)
#if _MSC_VER && (_M_IX86 || _M_X64)
// TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64
-#define zig_msvc_atomics(Type, suffix) \
- static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \
- zig_##Type comparand = *expected; \
- zig_##Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \
+#define zig_msvc_atomics(ZigType, Type, suffix) \
+ static inline bool zig_msvc_cmpxchg_##ZigType(Type volatile* obj, Type* expected, Type desired) { \
+ Type comparand = *expected; \
+ Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \
bool exchanged = initial == comparand; \
if (!exchanged) { \
*expected = initial; \
} \
return exchanged; \
} \
- static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_xchg_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedExchange##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_add_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedExchangeAdd##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_sub_##ZigType(Type volatile* obj, Type value) { \
bool success = false; \
- zig_##Type new; \
- zig_##Type prev; \
+ Type new; \
+ Type prev; \
while (!success) { \
prev = *obj; \
new = prev - value; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \
} \
return prev; \
} \
- static inline zig_##Type zig_msvc_atomicrmw_or_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_or_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedOr##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_xor_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_xor_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedXor##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_and_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_and_##ZigType(Type volatile* obj, Type value) { \
return _InterlockedAnd##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomicrmw_nand_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_nand_##ZigType(Type volatile* obj, Type value) { \
bool success = false; \
- zig_##Type new; \
- zig_##Type prev; \
+ Type new; \
+ Type prev; \
while (!success) { \
prev = *obj; \
new = ~(prev & value); \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \
} \
return prev; \
} \
- static inline zig_##Type zig_msvc_atomicrmw_min_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_min_##ZigType(Type volatile* obj, Type value) { \
bool success = false; \
- zig_##Type new; \
- zig_##Type prev; \
+ Type new; \
+ Type prev; \
while (!success) { \
prev = *obj; \
new = value < prev ? value : prev; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \
} \
return prev; \
} \
- static inline zig_##Type zig_msvc_atomicrmw_max_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline Type zig_msvc_atomicrmw_max_##ZigType(Type volatile* obj, Type value) { \
bool success = false; \
- zig_##Type new; \
- zig_##Type prev; \
+ Type new; \
+ Type prev; \
while (!success) { \
prev = *obj; \
new = value > prev ? value : prev; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \
+ success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \
} \
return prev; \
} \
- static inline void zig_msvc_atomic_store_##Type(zig_##Type volatile* obj, zig_##Type value) { \
+ static inline void zig_msvc_atomic_store_##ZigType(Type volatile* obj, Type value) { \
_InterlockedExchange##suffix(obj, value); \
} \
- static inline zig_##Type zig_msvc_atomic_load_##Type(zig_##Type volatile* obj) { \
+ static inline Type zig_msvc_atomic_load_##ZigType(Type volatile* obj) { \
return _InterlockedOr##suffix(obj, 0); \
}
-zig_msvc_atomics(u8, 8)
-zig_msvc_atomics(i8, 8)
-zig_msvc_atomics(u16, 16)
-zig_msvc_atomics(i16, 16)
-zig_msvc_atomics(u32, )
-zig_msvc_atomics(i32, )
+zig_msvc_atomics( u8, uint8_t, 8)
+zig_msvc_atomics( i8, int8_t, 8)
+zig_msvc_atomics(u16, uint16_t, 16)
+zig_msvc_atomics(i16, int16_t, 16)
+zig_msvc_atomics(u32, uint32_t, )
+zig_msvc_atomics(i32, int32_t, )
#if _M_X64
-zig_msvc_atomics(u64, 64)
-zig_msvc_atomics(i64, 64)
+zig_msvc_atomics(u64, uint64_t, 64)
+zig_msvc_atomics(i64, int64_t, 64)
#endif
#define zig_msvc_flt_atomics(Type, ReprType, suffix) \
static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \
- zig_##ReprType comparand = *((zig_##ReprType*)expected); \
- zig_##ReprType initial = _InterlockedCompareExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&desired), comparand); \
- bool exchanged = initial == comparand; \
- if (!exchanged) { \
- *expected = *((zig_##Type*)&initial); \
- } \
- return exchanged; \
+ ReprType exchange; \
+ ReprType comparand; \
+ ReprType initial; \
+ bool success; \
+ memcpy(&comparand, expected, sizeof(comparand)); \
+ memcpy(&exchange, &desired, sizeof(exchange)); \
+ initial = _InterlockedCompareExchange##suffix((ReprType volatile*)obj, exchange, comparand); \
+ success = initial == comparand; \
+ if (!success) memcpy(expected, &initial, sizeof(*expected)); \
+ return success; \
} \
static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \
- zig_##ReprType initial = _InterlockedExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&value)); \
- return *((zig_##Type*)&initial); \
+ ReprType repr; \
+ ReprType initial; \
+ zig_##Type result; \
+ memcpy(&repr, &value, sizeof(repr)); \
+ initial = _InterlockedExchange##suffix((ReprType volatile*)obj, repr); \
+ memcpy(&result, &initial, sizeof(result)); \
+ return result; \
} \
static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \
- bool success = false; \
- zig_##ReprType new; \
- zig_##Type prev; \
- while (!success) { \
- prev = *obj; \
- new = prev + value; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \
- } \
- return prev; \
+ ReprType repr; \
+ zig_##Type expected; \
+ zig_##Type desired; \
+ repr = *(ReprType volatile*)obj; \
+ memcpy(&expected, &repr, sizeof(expected)); \
+ do { \
+ desired = expected + value; \
+ } while (!zig_msvc_cmpxchg_##Type(obj, &expected, desired)); \
+ return expected; \
} \
static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \
- bool success = false; \
- zig_##ReprType new; \
- zig_##Type prev; \
- while (!success) { \
- prev = *obj; \
- new = prev - value; \
- success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \
- } \
- return prev; \
+ ReprType repr; \
+ zig_##Type expected; \
+ zig_##Type desired; \
+ repr = *(ReprType volatile*)obj; \
+ memcpy(&expected, &repr, sizeof(expected)); \
+ do { \
+ desired = expected - value; \
+ } while (!zig_msvc_cmpxchg_##Type(obj, &expected, desired)); \
+ return expected; \
}
-zig_msvc_flt_atomics(f32, u32, )
+zig_msvc_flt_atomics(f32, uint32_t, )
#if _M_X64
-zig_msvc_flt_atomics(f64, u64, 64)
+zig_msvc_flt_atomics(f64, uint64_t, 64)
#endif
#if _M_IX86
static inline void zig_msvc_atomic_barrier() {
- zig_i32 barrier;
+ int32_t barrier;
__asm {
xchg barrier, eax
}
}
-static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, zig_u32* arg) {
+static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, void* arg) {
return _InterlockedExchangePointer(obj, arg);
}
-static inline void zig_msvc_atomic_store_p32(void** obj, zig_u32* arg) {
+static inline void zig_msvc_atomic_store_p32(void** obj, void* arg) {
_InterlockedExchangePointer(obj, arg);
}
@@ -2360,11 +3371,11 @@ static inline bool zig_msvc_cmpxchg_p32(void** obj, void** expected, void* desir
return exchanged;
}
#else /* _M_IX86 */
-static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, zig_u64* arg) {
+static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, void* arg) {
return _InterlockedExchangePointer(obj, arg);
}
-static inline void zig_msvc_atomic_store_p64(void** obj, zig_u64* arg) {
+static inline void zig_msvc_atomic_store_p64(void** obj, void* arg) {
_InterlockedExchangePointer(obj, arg);
}
@@ -2383,11 +3394,11 @@ static inline bool zig_msvc_cmpxchg_p64(void** obj, void** expected, void* desir
}
static inline bool zig_msvc_cmpxchg_u128(zig_u128 volatile* obj, zig_u128* expected, zig_u128 desired) {
- return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_i64*)expected);
+ return _InterlockedCompareExchange128((int64_t volatile*)obj, desired.hi, desired.lo, (int64_t*)expected);
}
static inline bool zig_msvc_cmpxchg_i128(zig_i128 volatile* obj, zig_i128* expected, zig_i128 desired) {
- return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_u64*)expected);
+ return _InterlockedCompareExchange128((int64_t volatile*)obj, desired.hi, desired.lo, (uint64_t*)expected);
}
#define zig_msvc_atomics_128xchg(Type) \
@@ -2429,7 +3440,7 @@ zig_msvc_atomics_128op(u128, max)
#endif /* _MSC_VER && (_M_IX86 || _M_X64) */
-/* ========================= Special Case Intrinsics ========================= */
+/* ======================== Special Case Intrinsics ========================= */
#if (_MSC_VER && _M_X64) || defined(__x86_64__)
@@ -2459,8 +3470,8 @@ static inline void* zig_x86_windows_teb(void) {
#if (_MSC_VER && (_M_IX86 || _M_X64)) || defined(__i386__) || defined(__x86_64__)
-static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, zig_u32* ebx, zig_u32* ecx, zig_u32* edx) {
- zig_u32 cpu_info[4];
+static inline void zig_x86_cpuid(uint32_t leaf_id, uint32_t subid, uint32_t* eax, uint32_t* ebx, uint32_t* ecx, uint32_t* edx) {
+ uint32_t cpu_info[4];
#if _MSC_VER
__cpuidex(cpu_info, leaf_id, subid);
#else
@@ -2472,12 +3483,12 @@ static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, z
*edx = cpu_info[3];
}
-static inline zig_u32 zig_x86_get_xcr0(void) {
+static inline uint32_t zig_x86_get_xcr0(void) {
#if _MSC_VER
- return (zig_u32)_xgetbv(0);
+ return (uint32_t)_xgetbv(0);
#else
- zig_u32 eax;
- zig_u32 edx;
+ uint32_t eax;
+ uint32_t edx;
__asm__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(0));
return eax;
#endif
diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm
index d7bf519b41..fa2b2efa03 100644
Binary files a/stage1/zig1.wasm and b/stage1/zig1.wasm differ
diff --git a/test/behavior.zig b/test/behavior.zig
index 4f8ad67203..ed731377d8 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -141,6 +141,7 @@ test {
_ = @import("behavior/bugs/13664.zig");
_ = @import("behavior/bugs/13714.zig");
_ = @import("behavior/bugs/13785.zig");
+ _ = @import("behavior/bugs/14854.zig");
_ = @import("behavior/byteswap.zig");
_ = @import("behavior/byval_arg_var.zig");
_ = @import("behavior/call.zig");
diff --git a/test/behavior/align.zig b/test/behavior/align.zig
index 901ea3697a..9d626dad66 100644
--- a/test/behavior/align.zig
+++ b/test/behavior/align.zig
@@ -568,7 +568,6 @@ fn overaligned_fn() align(0x1000) i32 {
}
test "comptime alloc alignment" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/array.zig b/test/behavior/array.zig
index a5ecd6f115..c78bf4ab85 100644
--- a/test/behavior/array.zig
+++ b/test/behavior/array.zig
@@ -48,16 +48,23 @@ fn getArrayLen(a: []const u32) usize {
test "array concat with undefined" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
- {
- var array = "hello".* ++ @as([5]u8, undefined);
- array[5..10].* = "world".*;
- try std.testing.expect(std.mem.eql(u8, &array, "helloworld"));
- }
- {
- var array = @as([5]u8, undefined) ++ "world".*;
- array[0..5].* = "hello".*;
- try std.testing.expect(std.mem.eql(u8, &array, "helloworld"));
- }
+ const S = struct {
+ fn doTheTest() !void {
+ {
+ var array = "hello".* ++ @as([5]u8, undefined);
+ array[5..10].* = "world".*;
+ try std.testing.expect(std.mem.eql(u8, &array, "helloworld"));
+ }
+ {
+ var array = @as([5]u8, undefined) ++ "world".*;
+ array[0..5].* = "hello".*;
+ try std.testing.expect(std.mem.eql(u8, &array, "helloworld"));
+ }
+ }
+ };
+
+ try S.doTheTest();
+ comptime try S.doTheTest();
}
test "array concat with tuple" {
@@ -77,6 +84,7 @@ test "array concat with tuple" {
}
test "array init with concat" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const a = 'a';
var i: [4]u8 = [2]u8{ a, 'b' } ++ [2]u8{ 'c', 'd' };
try expect(std.mem.eql(u8, &i, "abcd"));
@@ -584,7 +592,6 @@ test "type coercion of anon struct literal to array" {
test "type coercion of pointer to anon struct literal to pointer to array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
diff --git a/test/behavior/bit_shifting.zig b/test/behavior/bit_shifting.zig
index 97186eb54a..8ad71400fe 100644
--- a/test/behavior/bit_shifting.zig
+++ b/test/behavior/bit_shifting.zig
@@ -63,7 +63,6 @@ fn ShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, compt
test "sharded table" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// realistic 16-way sharding
diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig
index f8a1928dd1..552080c836 100644
--- a/test/behavior/bitcast.zig
+++ b/test/behavior/bitcast.zig
@@ -34,7 +34,6 @@ test "@bitCast iX -> uX (8, 16, 128)" {
test "@bitCast iX -> uX exotic integers" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@@ -81,7 +80,6 @@ fn conv_uN(comptime N: usize, x: std.meta.Int(.unsigned, N)) std.meta.Int(.signe
test "bitcast uX to bytes" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@@ -368,7 +366,6 @@ test "comptime @bitCast packed struct to int and back" {
}
test "comptime bitcast with fields following f80" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
diff --git a/test/behavior/bitreverse.zig b/test/behavior/bitreverse.zig
index aa830144d1..9a24090c0e 100644
--- a/test/behavior/bitreverse.zig
+++ b/test/behavior/bitreverse.zig
@@ -96,7 +96,6 @@ fn vector8() !void {
test "bitReverse vectors u8" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@@ -115,7 +114,6 @@ fn vector16() !void {
test "bitReverse vectors u16" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@@ -134,7 +132,6 @@ fn vector24() !void {
test "bitReverse vectors u24" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@@ -153,7 +150,6 @@ fn vector0() !void {
test "bitReverse vectors u0" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
comptime try vector0();
try vector0();
diff --git a/test/behavior/bugs/10147.zig b/test/behavior/bugs/10147.zig
index 3ca9085805..77c513caa6 100644
--- a/test/behavior/bugs/10147.zig
+++ b/test/behavior/bugs/10147.zig
@@ -6,7 +6,6 @@ test "test calling @clz on both vector and scalar inputs" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: u32 = 0x1;
diff --git a/test/behavior/bugs/1076.zig b/test/behavior/bugs/1076.zig
index 6fe4fbd38f..ba2b61e3db 100644
--- a/test/behavior/bugs/1076.zig
+++ b/test/behavior/bugs/1076.zig
@@ -4,7 +4,6 @@ const mem = std.mem;
const expect = std.testing.expect;
test "comptime code should not modify constant data" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/bugs/11046.zig b/test/behavior/bugs/11046.zig
index ba6c9d1a83..a13e02e45c 100644
--- a/test/behavior/bugs/11046.zig
+++ b/test/behavior/bugs/11046.zig
@@ -10,7 +10,6 @@ fn bar() !void {
}
test "fixed" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/bugs/11787.zig b/test/behavior/bugs/11787.zig
index 8678f0789a..6d17730a47 100644
--- a/test/behavior/bugs/11787.zig
+++ b/test/behavior/bugs/11787.zig
@@ -4,7 +4,6 @@ const testing = std.testing;
test "slicing zero length array field of struct" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/bugs/12000.zig b/test/behavior/bugs/12000.zig
index c29fb84270..a823ce6a0a 100644
--- a/test/behavior/bugs/12000.zig
+++ b/test/behavior/bugs/12000.zig
@@ -7,7 +7,6 @@ const T = struct {
test {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/bugs/12119.zig b/test/behavior/bugs/12119.zig
index bb12e3565a..8c734ad6d6 100644
--- a/test/behavior/bugs/12119.zig
+++ b/test/behavior/bugs/12119.zig
@@ -6,7 +6,6 @@ const u32x8 = @Vector(8, u32);
test {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/bugs/12169.zig b/test/behavior/bugs/12169.zig
index 5dd3fdefa9..b3db56e20b 100644
--- a/test/behavior/bugs/12169.zig
+++ b/test/behavior/bugs/12169.zig
@@ -3,7 +3,6 @@ const builtin = @import("builtin");
test {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/bugs/12890.zig b/test/behavior/bugs/12890.zig
index e6095ac33d..1316c2745e 100644
--- a/test/behavior/bugs/12890.zig
+++ b/test/behavior/bugs/12890.zig
@@ -10,7 +10,6 @@ fn a(b: []u3, c: u3) void {
}
test {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var arr: [8]u3 = undefined;
diff --git a/test/behavior/bugs/13113.zig b/test/behavior/bugs/13113.zig
index cfbf7b6650..f9e0c8e7bb 100644
--- a/test/behavior/bugs/13113.zig
+++ b/test/behavior/bugs/13113.zig
@@ -8,7 +8,6 @@ const Foo = extern struct {
test {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/bugs/1421.zig b/test/behavior/bugs/1421.zig
index fbd05fb73c..1c85c3089e 100644
--- a/test/behavior/bugs/1421.zig
+++ b/test/behavior/bugs/1421.zig
@@ -9,7 +9,6 @@ const S = struct {
};
test "functions with return type required to be comptime are generic" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const ti = S.method();
try expect(@as(std.builtin.TypeId, ti) == std.builtin.TypeId.Struct);
}
diff --git a/test/behavior/bugs/14854.zig b/test/behavior/bugs/14854.zig
new file mode 100644
index 0000000000..b34dd49406
--- /dev/null
+++ b/test/behavior/bugs/14854.zig
@@ -0,0 +1,13 @@
+const testing = @import("std").testing;
+
+test {
+ try testing.expect(getGeneric(u8, getU8) == 123);
+}
+
+fn getU8() callconv(.C) u8 {
+ return 123;
+}
+
+fn getGeneric(comptime T: type, supplier: fn () callconv(.C) T) T {
+ return supplier();
+}
diff --git a/test/behavior/bugs/1607.zig b/test/behavior/bugs/1607.zig
index d9e97e37b7..a60a406b75 100644
--- a/test/behavior/bugs/1607.zig
+++ b/test/behavior/bugs/1607.zig
@@ -13,7 +13,6 @@ fn checkAddress(s: []const u8) !void {
test "slices pointing at the same address as global array." {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try checkAddress(&a);
comptime try checkAddress(&a);
diff --git a/test/behavior/bugs/2622.zig b/test/behavior/bugs/2622.zig
index 8a0d1a06ba..89130a3974 100644
--- a/test/behavior/bugs/2622.zig
+++ b/test/behavior/bugs/2622.zig
@@ -4,7 +4,6 @@ var buf: []u8 = undefined;
test "reslice of undefined global var slice" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/bugs/2727.zig b/test/behavior/bugs/2727.zig
index 0478d41b63..9e0def70d4 100644
--- a/test/behavior/bugs/2727.zig
+++ b/test/behavior/bugs/2727.zig
@@ -6,7 +6,6 @@ fn t() bool {
test {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/bugs/3742.zig b/test/behavior/bugs/3742.zig
index a984f0d8e4..1ee88b8b64 100644
--- a/test/behavior/bugs/3742.zig
+++ b/test/behavior/bugs/3742.zig
@@ -37,7 +37,6 @@ pub const ArgSerializer = struct {
test "fixed" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm and
builtin.cpu.arch == .aarch64 and builtin.os.tag == .windows) return error.SkipZigTest;
diff --git a/test/behavior/bugs/394.zig b/test/behavior/bugs/394.zig
index 02e90258bf..37864edbc5 100644
--- a/test/behavior/bugs/394.zig
+++ b/test/behavior/bugs/394.zig
@@ -11,7 +11,6 @@ const expect = @import("std").testing.expect;
const builtin = @import("builtin");
test "fixed" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const x = S{
.x = 3,
.y = E{ .B = 1 },
diff --git a/test/behavior/bugs/421.zig b/test/behavior/bugs/421.zig
index 500493e7d1..69ecbd2331 100644
--- a/test/behavior/bugs/421.zig
+++ b/test/behavior/bugs/421.zig
@@ -2,7 +2,6 @@ const builtin = @import("builtin");
const expect = @import("std").testing.expect;
test "bitCast to array" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/bugs/5398.zig b/test/behavior/bugs/5398.zig
index 78d31914d0..6f75bd9436 100644
--- a/test/behavior/bugs/5398.zig
+++ b/test/behavior/bugs/5398.zig
@@ -21,7 +21,6 @@ var renderable: Renderable = undefined;
test "assignment of field with padding" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
renderable = Renderable{
diff --git a/test/behavior/bugs/5487.zig b/test/behavior/bugs/5487.zig
index d901a692cd..3ea8cad220 100644
--- a/test/behavior/bugs/5487.zig
+++ b/test/behavior/bugs/5487.zig
@@ -12,7 +12,6 @@ pub fn writer() io.Writer(void, @typeInfo(@typeInfo(@TypeOf(write)).Fn.return_ty
test "crash" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
_ = io.multiWriter(.{writer()});
}
diff --git a/test/behavior/bugs/656.zig b/test/behavior/bugs/656.zig
index fa9e3ecc1e..216c9d8e1c 100644
--- a/test/behavior/bugs/656.zig
+++ b/test/behavior/bugs/656.zig
@@ -13,7 +13,6 @@ const Value = struct {
test "optional if after an if in a switch prong of a switch with 2 prongs in an else" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try foo(false, true);
}
diff --git a/test/behavior/bugs/6947.zig b/test/behavior/bugs/6947.zig
index 2e891ac5b3..c2b538c3fa 100644
--- a/test/behavior/bugs/6947.zig
+++ b/test/behavior/bugs/6947.zig
@@ -6,7 +6,6 @@ fn destroy(ptr: *void) void {
test {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/builtin_functions_returning_void_or_noreturn.zig b/test/behavior/builtin_functions_returning_void_or_noreturn.zig
new file mode 100644
index 0000000000..072f5576cc
--- /dev/null
+++ b/test/behavior/builtin_functions_returning_void_or_noreturn.zig
@@ -0,0 +1,32 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const testing = std.testing;
+
+var x: u8 = 1;
+
+// This excludes builtin functions that return void or noreturn that cannot be tested.
+test {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
+
+ var val: u8 = undefined;
+ try testing.expectEqual({}, @atomicStore(u8, &val, 0, .Unordered));
+ try testing.expectEqual(void, @TypeOf(@breakpoint()));
+ try testing.expectEqual({}, @export(x, .{ .name = "x" }));
+ try testing.expectEqual({}, @fence(.Acquire));
+ try testing.expectEqual({}, @memcpy(@intToPtr([*]u8, 1), @intToPtr([*]u8, 1), 0));
+ try testing.expectEqual({}, @memset(@intToPtr([*]u8, 1), undefined, 0));
+ try testing.expectEqual(noreturn, @TypeOf(if (true) @panic("") else {}));
+ try testing.expectEqual({}, @prefetch(&val, .{}));
+ try testing.expectEqual({}, @setAlignStack(16));
+ try testing.expectEqual({}, @setCold(true));
+ try testing.expectEqual({}, @setEvalBranchQuota(0));
+ try testing.expectEqual({}, @setFloatMode(.Optimized));
+ try testing.expectEqual({}, @setRuntimeSafety(true));
+ try testing.expectEqual(noreturn, @TypeOf(if (true) @trap() else {}));
+}
diff --git a/test/behavior/byteswap.zig b/test/behavior/byteswap.zig
index fc385e0443..8bd6fec6e3 100644
--- a/test/behavior/byteswap.zig
+++ b/test/behavior/byteswap.zig
@@ -62,7 +62,6 @@ fn vector8() !void {
test "@byteSwap vectors u8" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@@ -81,7 +80,6 @@ fn vector16() !void {
test "@byteSwap vectors u16" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@@ -100,7 +98,6 @@ fn vector24() !void {
test "@byteSwap vectors u24" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@@ -119,7 +116,6 @@ fn vector0() !void {
test "@byteSwap vectors u0" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
comptime try vector0();
try vector0();
diff --git a/test/behavior/byval_arg_var.zig b/test/behavior/byval_arg_var.zig
index 476d0d2e4e..01b5f90ef7 100644
--- a/test/behavior/byval_arg_var.zig
+++ b/test/behavior/byval_arg_var.zig
@@ -4,7 +4,6 @@ const builtin = @import("builtin");
var result: []const u8 = "wrong";
test "pass string literal byvalue to a generic var param" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
start();
diff --git a/test/behavior/call.zig b/test/behavior/call.zig
index 9622aa3144..b51a459932 100644
--- a/test/behavior/call.zig
+++ b/test/behavior/call.zig
@@ -21,7 +21,6 @@ test "super basic invocations" {
test "basic invocations" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -56,7 +55,6 @@ test "basic invocations" {
test "tuple parameters" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const add = struct {
diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig
index 927caa965b..275533d6ec 100644
--- a/test/behavior/cast.zig
+++ b/test/behavior/cast.zig
@@ -176,7 +176,6 @@ fn expectFloatToInt(comptime F: type, f: F, comptime I: type, i: I) !void {
test "implicitly cast indirect pointer to maybe-indirect pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -318,7 +317,6 @@ test "peer result null and comptime_int" {
test "*const ?[*]const T to [*c]const [*c]const T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var array = [_]u8{ 'o', 'k' };
@@ -588,7 +586,6 @@ fn testCastPtrOfArrayToSliceAndPtr() !void {
test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const window_name = [1][*]const u8{"window name"};
@@ -598,7 +595,6 @@ test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
test "vector casts" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -928,7 +924,6 @@ test "peer cast *[N:x]T to *[N]T" {
test "peer cast [*:x]T to [*]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -949,7 +944,6 @@ test "peer cast [*:x]T to [*]T" {
test "peer cast [:x]T to [*:x]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -990,7 +984,6 @@ test "peer type resolution implicit cast to return type" {
test "peer type resolution implicit cast to variable type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1113,8 +1106,6 @@ fn incrementVoidPtrArray(array: ?*anyopaque, len: usize) void {
}
test "compile time int to ptr of function" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
-
try foobar(FUNCTION_CONSTANT);
}
@@ -1506,7 +1497,6 @@ test "implicit cast from [:0]T to [*c]T" {
test "bitcast packed struct with u0" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const S = packed struct(u2) { a: u0, b: u2 };
const s = @bitCast(S, @as(u2, 2));
diff --git a/test/behavior/comptime_memory.zig b/test/behavior/comptime_memory.zig
index a4f9f2f7a9..71d177395b 100644
--- a/test/behavior/comptime_memory.zig
+++ b/test/behavior/comptime_memory.zig
@@ -412,3 +412,11 @@ test "bitcast packed union to integer" {
try testing.expectEqual(@as(u2, 2), cast_b);
}
}
+
+test "mutate entire slice at comptime" {
+ comptime {
+ var buf: [3]u8 = undefined;
+ const x: [2]u8 = .{ 1, 2 }; // Avoid RLS
+ buf[1..3].* = x;
+ }
+}
diff --git a/test/behavior/defer.zig b/test/behavior/defer.zig
index 32f63b1a46..26c2adc271 100644
--- a/test/behavior/defer.zig
+++ b/test/behavior/defer.zig
@@ -50,7 +50,6 @@ fn testNestedFnErrDefer() anyerror!void {
}
test "return variable while defer expression in scope to modify it" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig
index 095f3b740b..9076f9f9ac 100644
--- a/test/behavior/enum.zig
+++ b/test/behavior/enum.zig
@@ -937,7 +937,6 @@ test "enum literal casting to error union with payload enum" {
}
test "constant enum initialization with differing sizes" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1137,7 +1136,6 @@ test "tag name functions are unique" {
}
test "size of enum with only one tag which has explicit integer tag type" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/error.zig b/test/behavior/error.zig
index f30290eb91..9d4b154311 100644
--- a/test/behavior/error.zig
+++ b/test/behavior/error.zig
@@ -144,15 +144,11 @@ test "implicit cast to optional to error union to return result loc" {
}
test "fn returning empty error set can be passed as fn returning any error" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
-
entry();
comptime entry();
}
test "fn returning empty error set can be passed as fn returning any error - pointer" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
-
entryPtr();
comptime entryPtr();
}
@@ -219,7 +215,6 @@ fn testErrorSetType() !void {
}
test "explicit error set cast" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testExplicitErrorSetCast(Set1.A);
@@ -238,7 +233,6 @@ fn testExplicitErrorSetCast(set1: Set1) !void {
}
test "comptime test error for empty error set" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testComptimeTestErrorEmptySet(1234);
@@ -255,8 +249,6 @@ fn testComptimeTestErrorEmptySet(x: EmptyErrorSet!i32) !void {
}
test "comptime err to int of error set with only 1 possible value" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
-
testErrToIntWithOnePossibleValue(error.A, @errorToInt(error.A));
comptime testErrToIntWithOnePossibleValue(error.A, @errorToInt(error.A));
}
@@ -426,7 +418,6 @@ test "nested error union function call in optional unwrap" {
}
test "return function call to error set from error union function" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -486,7 +477,6 @@ test "nested catch" {
}
test "function pointer with return type that is error union with payload which is pointer of parent struct" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig
index 8364196f94..52b30f9aed 100644
--- a/test/behavior/eval.zig
+++ b/test/behavior/eval.zig
@@ -138,7 +138,6 @@ test "pointer to type" {
}
test "a type constructed in a global expression" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1515,7 +1514,6 @@ test "x or true is comptime-known true" {
}
test "non-optional and optional array elements concatenated" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/field_parent_ptr.zig b/test/behavior/field_parent_ptr.zig
index 6bbd6ad7ef..bf99fd1795 100644
--- a/test/behavior/field_parent_ptr.zig
+++ b/test/behavior/field_parent_ptr.zig
@@ -3,6 +3,7 @@ const builtin = @import("builtin");
test "@fieldParentPtr non-first field" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testParentFieldPtr(&foo.c);
comptime try testParentFieldPtr(&foo.c);
@@ -10,6 +11,7 @@ test "@fieldParentPtr non-first field" {
test "@fieldParentPtr first field" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testParentFieldPtrFirst(&foo.a);
comptime try testParentFieldPtrFirst(&foo.a);
@@ -47,6 +49,7 @@ fn testParentFieldPtrFirst(a: *const bool) !void {
test "@fieldParentPtr untagged union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -73,6 +76,7 @@ fn testFieldParentPtrUnion(c: *const i32) !void {
test "@fieldParentPtr tagged union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -99,6 +103,7 @@ fn testFieldParentPtrTaggedUnion(c: *const i32) !void {
test "@fieldParentPtr extern union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig
index 7befa41380..a93949cd88 100644
--- a/test/behavior/floatop.zig
+++ b/test/behavior/floatop.zig
@@ -21,7 +21,6 @@ fn epsForType(comptime T: type) T {
test "floating point comparisons" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testFloatComparisons();
@@ -91,7 +90,6 @@ fn testDifferentSizedFloatComparisons() !void {
test "negative f128 floatToInt at compile-time" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -141,7 +139,6 @@ fn testSqrt() !void {
test "@sqrt with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -234,7 +231,6 @@ fn testSin() !void {
test "@sin with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -275,7 +271,6 @@ fn testCos() !void {
test "@cos with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -315,7 +310,6 @@ fn testExp() !void {
test "@exp with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -335,7 +329,6 @@ fn testExpWithVectors() !void {
test "@exp2" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -355,7 +348,6 @@ fn testExp2() !void {
test "@exp2" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -409,7 +401,6 @@ test "@log with @vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
{
@@ -447,7 +438,6 @@ test "@log2 with vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
// https://github.com/ziglang/zig/issues/13681
if (builtin.zig_backend == .stage2_llvm and
builtin.cpu.arch == .aarch64 and
@@ -491,7 +481,6 @@ test "@log10 with vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
comptime try testLog10WithVectors();
try testLog10WithVectors();
@@ -508,7 +497,6 @@ fn testLog10WithVectors() !void {
test "@fabs" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -537,7 +525,6 @@ fn testFabs() !void {
test "@fabs with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -636,7 +623,6 @@ test "a third @fabs test, surely there should not be three fabs tests" {
test "@floor" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -660,7 +646,6 @@ fn testFloor() !void {
test "@floor with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -730,7 +715,6 @@ fn testFloorLegacy(comptime T: type, x: T) !void {
test "@ceil" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -754,7 +738,6 @@ fn testCeil() !void {
test "@ceil with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -824,7 +807,6 @@ fn testCeilLegacy(comptime T: type, x: T) !void {
test "@trunc" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -848,7 +830,6 @@ fn testTrunc() !void {
test "@trunc with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig
index 9c37b9a8d9..5113e21452 100644
--- a/test/behavior/fn.zig
+++ b/test/behavior/fn.zig
@@ -275,7 +275,6 @@ test "implicit cast fn call result to optional in field result" {
}
test "void parameters" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@@ -300,7 +299,6 @@ fn acceptsString(foo: []u8) void {
}
test "function pointers" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/for.zig b/test/behavior/for.zig
index 67a20e4840..98ffff85a3 100644
--- a/test/behavior/for.zig
+++ b/test/behavior/for.zig
@@ -110,7 +110,6 @@ test "basic for loop" {
}
test "for with null and T peer types and inferred result location type" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -152,7 +151,6 @@ test "2 break statements and an else" {
}
test "for loop with pointer elem var" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -198,7 +196,6 @@ test "for copies its payload" {
}
test "for on slice with allowzero ptr" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -253,7 +250,6 @@ test "for loop with else branch" {
test "count over fixed range" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
var sum: usize = 0;
for (0..6) |i| {
@@ -266,7 +262,6 @@ test "count over fixed range" {
test "two counters" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
var sum: usize = 0;
for (0..10, 10..20) |i, j| {
@@ -280,7 +275,6 @@ test "two counters" {
test "1-based counter and ptr to array" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
var ok: usize = 0;
@@ -401,7 +395,6 @@ test "raw pointer and counter" {
test "inline for with slice as the comptime-known" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const comptime_slice = "hello";
@@ -432,7 +425,6 @@ test "inline for with slice as the comptime-known" {
test "inline for with counter as the comptime-known" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
var runtime_slice = "hello";
diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig
index 205823430c..0e002b2016 100644
--- a/test/behavior/generics.zig
+++ b/test/behavior/generics.zig
@@ -56,7 +56,6 @@ fn sameButWithFloats(a: f64, b: f64) f64 {
}
test "fn with comptime args" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -67,7 +66,6 @@ test "fn with comptime args" {
}
test "anytype params" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -93,7 +91,6 @@ fn max_f64(a: f64, b: f64) f64 {
}
test "type constructed by comptime function call" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/if.zig b/test/behavior/if.zig
index ac11a6585d..6632cdd5c2 100644
--- a/test/behavior/if.zig
+++ b/test/behavior/if.zig
@@ -78,7 +78,6 @@ test "const result loc, runtime if cond, else unreachable" {
}
test "if copies its payload" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/inline_switch.zig b/test/behavior/inline_switch.zig
index 90e8b36284..dcd603c94f 100644
--- a/test/behavior/inline_switch.zig
+++ b/test/behavior/inline_switch.zig
@@ -46,7 +46,6 @@ const U = union(E) { a: void, b: u2, c: u3, d: u4 };
test "inline switch unions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: U = .a;
diff --git a/test/behavior/int_div.zig b/test/behavior/int_div.zig
index 6ae794d377..954f6be220 100644
--- a/test/behavior/int_div.zig
+++ b/test/behavior/int_div.zig
@@ -91,3 +91,22 @@ fn mod(comptime T: type, a: T, b: T) T {
fn rem(comptime T: type, a: T, b: T) T {
return @rem(a, b);
}
+
+test "large integer division" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
+ {
+ var numerator: u256 = 99999999999999999997315645440;
+ var divisor: u256 = 10000000000000000000000000000;
+ try expect(numerator / divisor == 9);
+ }
+ {
+ var numerator: u256 = 99999999999999999999000000000000000000000;
+ var divisor: u256 = 10000000000000000000000000000000000000000;
+ try expect(numerator / divisor == 9);
+ }
+}
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index 54263e1daf..d7b8e4764b 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -100,7 +100,6 @@ test "@clz vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testClzVectors();
@@ -163,7 +162,6 @@ test "@ctz vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
@@ -1526,7 +1524,6 @@ fn testNanEqNan(comptime F: type) !void {
}
test "vector comparison" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -1563,6 +1560,12 @@ test "signed zeros are represented properly" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and
+ builtin.zig_backend == .stage2_c)
+ {
+ return error.SkipZigTest;
+ }
+
const S = struct {
fn doTheTest() !void {
try testOne(f16);
diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig
index 133a543d42..34a7d0976a 100644
--- a/test/behavior/maximum_minimum.zig
+++ b/test/behavior/maximum_minimum.zig
@@ -25,7 +25,6 @@ test "@max" {
test "@max on vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -75,7 +74,6 @@ test "@min for vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
diff --git a/test/behavior/member_func.zig b/test/behavior/member_func.zig
index a6229846d6..e7b19d5c01 100644
--- a/test/behavior/member_func.zig
+++ b/test/behavior/member_func.zig
@@ -27,7 +27,6 @@ const HasFuncs = struct {
};
test "standard field calls" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -71,7 +70,6 @@ test "standard field calls" {
}
test "@field field calls" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/muladd.zig b/test/behavior/muladd.zig
index a2d9e6d16d..25ed3641b8 100644
--- a/test/behavior/muladd.zig
+++ b/test/behavior/muladd.zig
@@ -100,7 +100,6 @@ fn vector16() !void {
}
test "vector f16" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -124,7 +123,6 @@ fn vector32() !void {
}
test "vector f32" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -148,7 +146,6 @@ fn vector64() !void {
}
test "vector f64" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -171,7 +168,6 @@ fn vector80() !void {
}
test "vector f80" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -195,13 +191,19 @@ fn vector128() !void {
}
test "vector f128" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and
+ builtin.zig_backend == .stage2_c)
+ {
+ // https://github.com/ziglang/zig/issues/13876
+ return error.SkipZigTest;
+ }
+
comptime try vector128();
try vector128();
}
diff --git a/test/behavior/null.zig b/test/behavior/null.zig
index 223be69084..c78a995833 100644
--- a/test/behavior/null.zig
+++ b/test/behavior/null.zig
@@ -50,7 +50,6 @@ test "rhs maybe unwrap return" {
}
test "maybe return" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -71,7 +70,6 @@ fn foo(x: ?i32) ?bool {
}
test "test null runtime" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -184,7 +182,6 @@ const SillyStruct = struct {
const here_is_a_null_literal = SillyStruct{ .context = null };
test "unwrap optional which is field of global var" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig
index 3a5b7b008b..bbcc5b3ce6 100644
--- a/test/behavior/optional.zig
+++ b/test/behavior/optional.zig
@@ -274,7 +274,6 @@ test "0-bit child type coerced to optional return ptr result location" {
}
test "0-bit child type coerced to optional" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig
index ec4ff332cf..e5ccfec543 100644
--- a/test/behavior/pointers.zig
+++ b/test/behavior/pointers.zig
@@ -190,7 +190,6 @@ test "compare equality of optional and non-optional pointer" {
test "allowzero pointer and slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -510,7 +509,6 @@ test "ptrToInt on a generic function" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const S = struct {
fn generic(i: anytype) @TypeOf(i) {
diff --git a/test/behavior/popcount.zig b/test/behavior/popcount.zig
index b27d5d77d3..9dce5820cd 100644
--- a/test/behavior/popcount.zig
+++ b/test/behavior/popcount.zig
@@ -67,7 +67,6 @@ fn testPopCountIntegers() !void {
}
test "@popCount vectors" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig
index 9336d58641..845ea3751e 100644
--- a/test/behavior/ptrcast.zig
+++ b/test/behavior/ptrcast.zig
@@ -53,7 +53,6 @@ fn testReinterpretStructWrappedBytesAsInteger() !void {
}
test "reinterpret bytes of an array into an extern struct" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -171,6 +170,7 @@ test "lower reinterpreted comptime field ptr" {
test "reinterpret struct field at comptime" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const numNative = comptime Bytes.init(0x12345678);
if (native_endian != .Little) {
@@ -233,6 +233,7 @@ test "ptrcast of const integer has the correct object size" {
test "implicit optional pointer to optional anyopaque pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
var buf: [4]u8 = "aoeu".*;
var x: ?[*]u8 = &buf;
diff --git a/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig b/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig
index cd1f67dd11..bb6d5b1359 100644
--- a/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig
+++ b/test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig
@@ -5,7 +5,6 @@ const mem = std.mem;
var ok: bool = false;
test "reference a variable in an if after an if in the 2nd switch prong" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/reflection.zig b/test/behavior/reflection.zig
index 4c3f8ccad5..aea84bc45a 100644
--- a/test/behavior/reflection.zig
+++ b/test/behavior/reflection.zig
@@ -27,7 +27,6 @@ fn dummy(a: bool, b: i32, c: f32) i32 {
test "reflection: @field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var f = Foo{
diff --git a/test/behavior/select.zig b/test/behavior/select.zig
index d09683b67c..73d69c6530 100644
--- a/test/behavior/select.zig
+++ b/test/behavior/select.zig
@@ -4,7 +4,6 @@ const mem = std.mem;
const expect = std.testing.expect;
test "@select vectors" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -33,7 +32,6 @@ fn selectVectors() !void {
}
test "@select arrays" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/test/behavior/shuffle.zig b/test/behavior/shuffle.zig
index bcc4618aee..97223cc263 100644
--- a/test/behavior/shuffle.zig
+++ b/test/behavior/shuffle.zig
@@ -8,7 +8,6 @@ test "@shuffle int" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -50,7 +49,6 @@ test "@shuffle bool 1" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -71,7 +69,6 @@ test "@shuffle bool 2" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm) {
@@ -83,7 +80,7 @@ test "@shuffle bool 2" {
fn doTheTest() !void {
var x: @Vector(3, bool) = [3]bool{ false, true, false };
var v: @Vector(2, bool) = [2]bool{ true, false };
- const mask: @Vector(4, i32) = [4]i32{ 0, ~@as(i32, 1), 1, 2 };
+ const mask = [4]i32{ 0, ~@as(i32, 1), 1, 2 };
var res = @shuffle(bool, x, v, mask);
try expect(mem.eql(bool, &@as([4]bool, res), &[4]bool{ false, false, true, false }));
}
diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig
index cfe948ac02..940ceda107 100644
--- a/test/behavior/sizeof_and_typeof.zig
+++ b/test/behavior/sizeof_and_typeof.zig
@@ -18,7 +18,6 @@ test "@sizeOf on compile-time types" {
}
test "@TypeOf() with multiple arguments" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
{
var var_1: u32 = undefined;
var var_2: u8 = undefined;
@@ -138,7 +137,6 @@ test "@sizeOf(T) == 0 doesn't force resolving struct size" {
}
test "@TypeOf() has no runtime side effects" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const S = struct {
fn foo(comptime T: type, ptr: *T) T {
ptr.* += 1;
@@ -153,7 +151,6 @@ test "@TypeOf() has no runtime side effects" {
test "branching logic inside @TypeOf" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const S = struct {
var data: i32 = 0;
fn foo() anyerror!i32 {
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index 435e1887bb..2a0944a5b6 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -119,7 +119,6 @@ test "slice of type" {
}
test "generic malloc free" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const a = memAlloc(u8, 10) catch unreachable;
@@ -171,7 +170,6 @@ test "comptime pointer cast array and then slice" {
test "slicing zero length array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const s1 = ""[0..];
@@ -185,8 +183,6 @@ test "slicing zero length array" {
const x = @intToPtr([*]i32, 0x1000)[0..0x500];
const y = x[0x100..];
test "compile time slice of pointer to hard coded address" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try expect(@ptrToInt(x) == 0x1000);
try expect(x.len == 0x500);
@@ -231,6 +227,7 @@ fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 {
test "C pointer" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
var buf: [*c]const u8 = "kjdhfkjdhfdkjhfkfjhdfkjdhfkdjhfdkjhf";
var len: u32 = 10;
@@ -342,7 +339,6 @@ test "@ptrCast slice to pointer" {
}
test "slice syntax resulting in pointer-to-array" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -477,7 +473,6 @@ test "slice syntax resulting in pointer-to-array" {
}
test "slice pointer-to-array null terminated" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -497,7 +492,6 @@ test "slice pointer-to-array null terminated" {
}
test "slice pointer-to-array zero length" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
comptime {
@@ -530,7 +524,6 @@ test "slice pointer-to-array zero length" {
}
test "type coercion of pointer to anon struct literal to pointer to slice" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -730,7 +723,6 @@ test "slice with dereferenced value" {
test "empty slice ptr is non null" {
if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64 and (builtin.os.tag == .macos or builtin.os.tag == .windows)) return error.SkipZigTest; // TODO
const empty_slice: []u8 = &[_]u8{};
const p: [*]u8 = empty_slice.ptr + 0;
@@ -747,3 +739,23 @@ test "slice decays to many pointer" {
const p: [*:0]const u8 = buf[0..7 :0];
try expectEqualStrings(buf[0..7], std.mem.span(p));
}
+
+test "write through pointer to optional slice arg" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
+ const S = struct {
+ fn bar(foo: *?[]const u8) !void {
+ foo.* = try baz();
+ }
+
+ fn baz() ![]const u8 {
+ return "ok";
+ }
+ };
+ var foo: ?[]const u8 = null;
+ try S.bar(&foo);
+ try expectEqualStrings(foo.?, "ok");
+}
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index 348e269682..2a1acebc0f 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -342,7 +342,6 @@ fn testPassSliceOfEmptyStructToFn(slice: []const EmptyStruct2) usize {
test "self-referencing struct via array member" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const T = struct {
@@ -536,7 +535,6 @@ test "implicit cast packed struct field to const ptr" {
}
test "zero-bit field in packed struct" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = packed struct {
@@ -719,7 +717,6 @@ test "pointer to packed struct member in a stack variable" {
}
test "packed struct with u0 field access" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = packed struct {
@@ -1018,7 +1015,6 @@ test "type coercion of anon struct literal to struct" {
}
test "type coercion of pointer to anon struct literal to pointer to struct" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1088,7 +1084,6 @@ test "packed struct with undefined initializers" {
}
test "for loop over pointers to struct, getting field from struct pointer" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/struct_contains_null_ptr_itself.zig b/test/behavior/struct_contains_null_ptr_itself.zig
index d60e04a91a..7f0182af22 100644
--- a/test/behavior/struct_contains_null_ptr_itself.zig
+++ b/test/behavior/struct_contains_null_ptr_itself.zig
@@ -4,7 +4,6 @@ const builtin = @import("builtin");
test "struct contains null pointer which contains original struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: ?*NodeLineComment = null;
diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig
index b8c367eb44..9129b73f16 100644
--- a/test/behavior/switch.zig
+++ b/test/behavior/switch.zig
@@ -403,7 +403,6 @@ fn return_a_number() anyerror!i32 {
}
test "switch on integer with else capturing expr" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -577,7 +576,6 @@ test "switch prongs with cases with identical payload types" {
}
test "switch on pointer type" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/threadlocal.zig b/test/behavior/threadlocal.zig
index ebeb1177c2..1f1bc6bea4 100644
--- a/test/behavior/threadlocal.zig
+++ b/test/behavior/threadlocal.zig
@@ -4,7 +4,6 @@ const expect = std.testing.expect;
test "thread local variable" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) {
@@ -40,7 +39,6 @@ threadlocal var buffer: [11]u8 = undefined;
test "reference a global threadlocal variable" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) {
diff --git a/test/behavior/translate_c_macros.zig b/test/behavior/translate_c_macros.zig
index 8143b1bddd..6d8d4eca6d 100644
--- a/test/behavior/translate_c_macros.zig
+++ b/test/behavior/translate_c_macros.zig
@@ -79,7 +79,6 @@ test "casting to union with a macro" {
test "casting or calling a value with a paren-surrounded macro" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/truncate.zig b/test/behavior/truncate.zig
index c81abebe68..e70d33eea2 100644
--- a/test/behavior/truncate.zig
+++ b/test/behavior/truncate.zig
@@ -60,7 +60,6 @@ test "truncate on comptime integer" {
}
test "truncate on vectors" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig
index 13b02b40e8..79db21424e 100644
--- a/test/behavior/tuple.zig
+++ b/test/behavior/tuple.zig
@@ -126,7 +126,6 @@ test "tuple initializer for var" {
}
test "array-like initializer for tuple types" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -397,3 +396,22 @@ test "nested runtime conditionals in tuple initializer" {
};
try expectEqualStrings("up", x[0]);
}
+
+test "sentinel slice in tuple with other fields" {
+ const S = struct {
+ a: u32,
+ b: u32,
+ };
+
+ const Submission = union(enum) {
+ open: struct { *S, [:0]const u8, u32 },
+ };
+
+ _ = Submission;
+}
+
+test "sentinel slice in tuple" {
+ const S = struct { [:0]const u8 };
+
+ _ = S;
+}
diff --git a/test/behavior/type.zig b/test/behavior/type.zig
index 325bf0a8ed..7f44f350d1 100644
--- a/test/behavior/type.zig
+++ b/test/behavior/type.zig
@@ -449,8 +449,6 @@ test "Type.Union" {
}
test "Type.Union from Type.Enum" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
-
const Tag = @Type(.{
.Enum = .{
.tag_type = u0,
@@ -475,8 +473,6 @@ test "Type.Union from Type.Enum" {
}
test "Type.Union from regular enum" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
-
const E = enum { working_as_expected };
const T = @Type(.{
.Union = .{
diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig
index 6f64c92006..495c1f3195 100644
--- a/test/behavior/type_info.zig
+++ b/test/behavior/type_info.zig
@@ -509,7 +509,6 @@ test "type info for async frames" {
}
test "Declarations are returned in declaration order" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -532,7 +531,6 @@ test "Struct.is_tuple for anon list literal" {
}
test "Struct.is_tuple for anon struct literal" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const info = @typeInfo(@TypeOf(.{ .a = 0 }));
diff --git a/test/behavior/union.zig b/test/behavior/union.zig
index 87691cf3cb..9b49f8bf47 100644
--- a/test/behavior/union.zig
+++ b/test/behavior/union.zig
@@ -11,7 +11,6 @@ const FooWithFloats = union {
};
test "basic unions with floats" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -215,7 +214,6 @@ const Payload = union(Letter) {
};
test "union with specified enum tag" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -225,7 +223,6 @@ test "union with specified enum tag" {
}
test "packed union generates correctly aligned type" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -379,7 +376,6 @@ test "union with only 1 field which is void should be zero bits" {
}
test "tagged union initialization with runtime void" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -400,7 +396,6 @@ fn testTaggedUnionInit(x: anytype) bool {
pub const UnionEnumNoPayloads = union(enum) { A, B };
test "tagged union with no payloads" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -466,7 +461,6 @@ pub const FooUnion = union(enum) {
var glbl_array: [2]FooUnion = undefined;
test "initialize global array of union" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -494,7 +488,6 @@ test "update the tag value for zero-sized unions" {
test "union initializer generates padding only if needed" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -561,7 +554,6 @@ const FooNoVoid = union(enum) {
const Baz = enum { A, B, C, D };
test "tagged union type" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const foo1 = TaggedFoo{ .One = 13 };
@@ -598,7 +590,6 @@ fn returnAnInt(x: i32) TaggedFoo {
}
test "tagged union with all void fields but a meaningful tag" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -777,7 +768,6 @@ fn Setter(comptime attr: Attribute) type {
}
test "return union init with void payload" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -913,7 +903,6 @@ test "extern union doesn't trigger field check at comptime" {
test "anonymous union literal syntax" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
diff --git a/test/behavior/union_with_members.zig b/test/behavior/union_with_members.zig
index 0cb06a81ab..8e9c2db475 100644
--- a/test/behavior/union_with_members.zig
+++ b/test/behavior/union_with_members.zig
@@ -18,7 +18,6 @@ const ET = union(enum) {
test "enum with members" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 50fef7f646..562e9aba20 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -25,7 +25,6 @@ test "implicit cast vector to array - bool" {
test "vector wrap operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -48,7 +47,6 @@ test "vector wrap operators" {
test "vector bin compares with mem.eql" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -93,12 +91,18 @@ test "vector int operators" {
test "vector float operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64 and
+ builtin.zig_backend == .stage2_c)
+ {
+ // https://github.com/ziglang/zig/issues/13876
+ return error.SkipZigTest;
+ }
+
inline for ([_]type{ f16, f32, f64, f80, f128 }) |T| {
const S = struct {
fn doTheTest() !void {
@@ -117,7 +121,6 @@ test "vector float operators" {
test "vector bit operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -237,7 +240,6 @@ test "vector casts of sizes not divisible by 8" {
}
test "vector @splat" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -380,7 +382,6 @@ test "store vector elements via runtime index" {
}
test "initialize vector which is a struct field" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -403,7 +404,6 @@ test "initialize vector which is a struct field" {
test "vector comparison operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -444,7 +444,6 @@ test "vector comparison operators" {
test "vector division operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -527,7 +526,6 @@ test "vector division operators" {
test "vector bitwise not operator" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -559,7 +557,6 @@ test "vector bitwise not operator" {
test "vector shift operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -653,7 +650,6 @@ test "vector shift operators" {
test "vector reduce operation" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -709,7 +705,7 @@ test "vector reduce operation" {
// LLVM 11 ERROR: Cannot select type
// https://github.com/ziglang/zig/issues/7138
- if (builtin.target.cpu.arch != .aarch64) {
+ if (builtin.zig_backend != .stage2_llvm or builtin.target.cpu.arch != .aarch64) {
try testReduce(.Min, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, -386));
try testReduce(.Min, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 9));
}
@@ -727,7 +723,7 @@ test "vector reduce operation" {
// LLVM 11 ERROR: Cannot select type
// https://github.com/ziglang/zig/issues/7138
- if (builtin.target.cpu.arch != .aarch64) {
+ if (builtin.zig_backend != .stage2_llvm or builtin.target.cpu.arch != .aarch64) {
try testReduce(.Max, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, 1234567));
try testReduce(.Max, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 99999));
}
@@ -775,14 +771,14 @@ test "vector reduce operation" {
// LLVM 11 ERROR: Cannot select type
// https://github.com/ziglang/zig/issues/7138
- if (false) {
- try testReduce(.Min, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan);
- try testReduce(.Min, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, f32_nan);
- try testReduce(.Min, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, f64_nan);
+ if (builtin.zig_backend != .stage2_llvm) {
+ try testReduce(.Min, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, @as(f16, -1.9));
+ try testReduce(.Min, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, @as(f32, -1.9));
+ try testReduce(.Min, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, @as(f64, -1.9));
- try testReduce(.Max, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan);
- try testReduce(.Max, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, f32_nan);
- try testReduce(.Max, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, f64_nan);
+ try testReduce(.Max, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, @as(f16, 100.0));
+ try testReduce(.Max, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, @as(f32, 100.0));
+ try testReduce(.Max, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, @as(f64, 100.0));
}
try testReduce(.Mul, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan);
@@ -797,7 +793,6 @@ test "vector reduce operation" {
test "vector @reduce comptime" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -813,7 +808,6 @@ test "vector @reduce comptime" {
test "mask parameter of @shuffle is comptime scope" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -833,7 +827,6 @@ test "mask parameter of @shuffle is comptime scope" {
test "saturating add" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -865,7 +858,6 @@ test "saturating add" {
test "saturating subtraction" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -888,7 +880,6 @@ test "saturating subtraction" {
test "saturating multiplication" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -915,7 +906,6 @@ test "saturating multiplication" {
test "saturating shift-left" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -1049,7 +1039,6 @@ test "@mulWithOverflow" {
}
test "@shlWithOverflow" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -1078,7 +1067,6 @@ test "alignment of vectors" {
test "loading the second vector from a slice of vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1131,7 +1119,6 @@ test "byte vector initialized in inline function" {
}
test "byte vector initialized in inline function" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -1160,7 +1147,6 @@ test "byte vector initialized in inline function" {
test "zero divisor" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1181,7 +1167,6 @@ test "zero divisor" {
test "zero multiplicand" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1204,7 +1189,6 @@ test "zero multiplicand" {
test "@intCast to u0" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -1228,7 +1212,6 @@ test "modRem with zero divisor" {
test "array operands to shuffle are coerced to vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -1246,8 +1229,6 @@ test "load packed vector element" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var x: @Vector(2, u15) = .{ 1, 4 };
try expect((&x[0]).* == 1);
@@ -1260,7 +1241,6 @@ test "store packed vector element" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var v = @Vector(4, u1){ 1, 1, 1, 1 };
try expectEqual(@Vector(4, u1){ 1, 1, 1, 1 }, v);
diff --git a/test/behavior/void.zig b/test/behavior/void.zig
index 9b6c05d07d..8c6269123d 100644
--- a/test/behavior/void.zig
+++ b/test/behavior/void.zig
@@ -33,7 +33,6 @@ fn times(n: usize) []const void {
}
test "void optional" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/while.zig b/test/behavior/while.zig
index 6a97f96763..956aa30f7b 100644
--- a/test/behavior/while.zig
+++ b/test/behavior/while.zig
@@ -104,7 +104,6 @@ fn testBreakOuter() void {
}
test "while copies its payload" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -143,7 +142,6 @@ fn runContinueAndBreakTest() !void {
}
test "while with optional as condition" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -156,7 +154,6 @@ test "while with optional as condition" {
}
test "while with optional as condition with else" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -174,7 +171,6 @@ test "while with optional as condition with else" {
}
test "while with error union condition" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
numbers_left = 10;
@@ -205,7 +201,6 @@ test "while on bool with else result follow break prong" {
}
test "while on optional with else result follow else prong" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -217,7 +212,6 @@ test "while on optional with else result follow else prong" {
}
test "while on optional with else result follow break prong" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -290,7 +284,6 @@ test "while bool 2 break statements and an else" {
}
test "while optional 2 break statements and an else" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -310,7 +303,6 @@ test "while optional 2 break statements and an else" {
}
test "while error 2 break statements and an else" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/widening.zig b/test/behavior/widening.zig
index ddd438d5d3..0992943fc3 100644
--- a/test/behavior/widening.zig
+++ b/test/behavior/widening.zig
@@ -28,7 +28,6 @@ test "integer widening u0 to u8" {
}
test "implicit unsigned integer to signed integer" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
diff --git a/test/cases.zig b/test/cases.zig
index 412b4cb5e2..ffe046c70e 100644
--- a/test/cases.zig
+++ b/test/cases.zig
@@ -1,8 +1,8 @@
const std = @import("std");
-const TestContext = @import("../src/test.zig").TestContext;
+const Cases = @import("src/Cases.zig");
-pub fn addCases(ctx: *TestContext) !void {
- try @import("compile_errors.zig").addCases(ctx);
- try @import("stage2/cbe.zig").addCases(ctx);
- try @import("stage2/nvptx.zig").addCases(ctx);
+pub fn addCases(cases: *Cases) !void {
+ try @import("compile_errors.zig").addCases(cases);
+ try @import("cbe.zig").addCases(cases);
+ try @import("nvptx.zig").addCases(cases);
}
diff --git a/test/cases/compile_errors/access_inactive_union_field_comptime.zig b/test/cases/compile_errors/access_inactive_union_field_comptime.zig
index d990a85f9e..2098b19d14 100644
--- a/test/cases/compile_errors/access_inactive_union_field_comptime.zig
+++ b/test/cases/compile_errors/access_inactive_union_field_comptime.zig
@@ -21,3 +21,4 @@ pub export fn entry1() void {
// :9:15: error: access of union field 'a' while field 'b' is active
// :2:21: note: union declared here
// :14:16: error: access of union field 'a' while field 'b' is active
+// :2:21: note: union declared here
diff --git a/test/cases/compile_errors/attempted_implicit_cast_from_T_to_long_array_ptr.zig b/test/cases/compile_errors/attempted_implicit_cast_from_T_to_long_array_ptr.zig
new file mode 100644
index 0000000000..135081c15c
--- /dev/null
+++ b/test/cases/compile_errors/attempted_implicit_cast_from_T_to_long_array_ptr.zig
@@ -0,0 +1,18 @@
+export fn entry0(single: *u32) void {
+ _ = @as(*const [0]u32, single);
+}
+export fn entry1(single: *u32) void {
+ _ = @as(*const [1]u32, single);
+}
+export fn entry2(single: *u32) void {
+ _ = @as(*const [2]u32, single);
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:28: error: expected type '*const [0]u32', found '*u32'
+// :2:28: note: pointer type child 'u32' cannot cast into pointer type child '[0]u32'
+// :8:28: error: expected type '*const [2]u32', found '*u32'
+// :8:28: note: pointer type child 'u32' cannot cast into pointer type child '[2]u32'
diff --git a/test/cases/compile_errors/bad_import.zig b/test/cases/compile_errors/bad_import.zig
index 49e78a4be4..e624d7104c 100644
--- a/test/cases/compile_errors/bad_import.zig
+++ b/test/cases/compile_errors/bad_import.zig
@@ -4,4 +4,4 @@ const bogus = @import("bogus-does-not-exist.zig",);
// backend=stage2
// target=native
//
-// :1:23: error: unable to load '${DIR}bogus-does-not-exist.zig': FileNotFound
+// bogus-does-not-exist.zig': FileNotFound
diff --git a/test/cases/compile_errors/break_void_result_location.zig b/test/cases/compile_errors/break_void_result_location.zig
new file mode 100644
index 0000000000..696ea39667
--- /dev/null
+++ b/test/cases/compile_errors/break_void_result_location.zig
@@ -0,0 +1,32 @@
+export fn f1() void {
+ const x: usize = for ("hello") |_| {};
+ _ = x;
+}
+export fn f2() void {
+ const x: usize = for ("hello") |_| {
+ break;
+ };
+ _ = x;
+}
+export fn f3() void {
+ var t: bool = true;
+ const x: usize = while (t) {
+ break;
+ };
+ _ = x;
+}
+export fn f4() void {
+ const x: usize = blk: {
+ break :blk;
+ };
+ _ = x;
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:22: error: expected type 'usize', found 'void'
+// :7:9: error: expected type 'usize', found 'void'
+// :14:9: error: expected type 'usize', found 'void'
+// :18:1: error: expected type 'usize', found 'void'
diff --git a/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig b/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig
index 9189eeb48d..55676f9230 100644
--- a/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig
+++ b/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig
@@ -15,3 +15,7 @@ pub export fn entry() void {
// target=native
//
// :6:5: error: found compile log statement
+//
+// Compile Log Output:
+// @as(tmp.Bar, .{ .X = 123 })
+// @as(tmp.Bar, [runtime value])
diff --git a/test/cases/compile_errors/compile_log.zig b/test/cases/compile_errors/compile_log.zig
index 772853b023..e1ea460dc3 100644
--- a/test/cases/compile_errors/compile_log.zig
+++ b/test/cases/compile_errors/compile_log.zig
@@ -17,3 +17,12 @@ export fn baz() void {
//
// :5:5: error: found compile log statement
// :11:5: note: also here
+//
+// Compile Log Output:
+// @as(*const [5:0]u8, "begin")
+// @as(*const [1:0]u8, "a"), @as(i32, 12), @as(*const [1:0]u8, "b"), @as([]const u8, "hi")
+// @as(*const [3:0]u8, "end")
+// @as(comptime_int, 4)
+// @as(*const [5:0]u8, "begin")
+// @as(*const [1:0]u8, "a"), @as(i32, [runtime value]), @as(*const [1:0]u8, "b"), @as([]const u8, [runtime value])
+// @as(*const [3:0]u8, "end")
diff --git a/test/cases/compile_errors/compile_log_a_pointer_to_an_opaque_value.zig b/test/cases/compile_errors/compile_log_a_pointer_to_an_opaque_value.zig
index 252b6e5f14..73de52fc97 100644
--- a/test/cases/compile_errors/compile_log_a_pointer_to_an_opaque_value.zig
+++ b/test/cases/compile_errors/compile_log_a_pointer_to_an_opaque_value.zig
@@ -1,5 +1,5 @@
export fn entry() void {
- @compileLog(@ptrCast(*const anyopaque, &entry));
+ @compileLog(@as(*align(1) const anyopaque, @ptrCast(*const anyopaque, &entry)));
}
// error
@@ -7,3 +7,6 @@ export fn entry() void {
// target=native
//
// :2:5: error: found compile log statement
+//
+// Compile Log Output:
+// @as(*const anyopaque, (function 'entry'))
diff --git a/test/cases/compile_errors/compile_log_statement_inside_function_which_must_be_comptime_evaluated.zig b/test/cases/compile_errors/compile_log_statement_inside_function_which_must_be_comptime_evaluated.zig
index 0bc45eae0a..8a39fdec46 100644
--- a/test/cases/compile_errors/compile_log_statement_inside_function_which_must_be_comptime_evaluated.zig
+++ b/test/cases/compile_errors/compile_log_statement_inside_function_which_must_be_comptime_evaluated.zig
@@ -12,3 +12,6 @@ export fn entry() void {
// target=native
//
// :2:5: error: found compile log statement
+//
+// Compile Log Output:
+// @as(*const [3:0]u8, "i32\x00")
diff --git a/test/cases/compile_errors/compile_log_statement_warning_deduplication_in_generic_fn.zig b/test/cases/compile_errors/compile_log_statement_warning_deduplication_in_generic_fn.zig
index 76e1c80cf9..4b31d9924a 100644
--- a/test/cases/compile_errors/compile_log_statement_warning_deduplication_in_generic_fn.zig
+++ b/test/cases/compile_errors/compile_log_statement_warning_deduplication_in_generic_fn.zig
@@ -13,3 +13,8 @@ fn inner(comptime n: usize) void {
//
// :7:39: error: found compile log statement
// :7:39: note: also here
+//
+// Compile Log Output:
+// @as(*const [4:0]u8, "!@#$")
+// @as(*const [4:0]u8, "!@#$")
+// @as(*const [4:0]u8, "!@#$")
diff --git a/test/cases/compile_errors/comptime_try_non_error.zig b/test/cases/compile_errors/comptime_try_non_error.zig
new file mode 100644
index 0000000000..935148414c
--- /dev/null
+++ b/test/cases/compile_errors/comptime_try_non_error.zig
@@ -0,0 +1,18 @@
+comptime {
+ foo();
+}
+
+fn foo() void {
+ try bar();
+}
+
+pub fn bar() u8 {
+ return 0;
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :6:12: error: expected error union type, found 'u8'
+// :2:8: note: called from here
diff --git a/test/cases/compile_errors/condition_comptime_reason_explained.zig b/test/cases/compile_errors/condition_comptime_reason_explained.zig
index 332ae8afc8..d0193986a8 100644
--- a/test/cases/compile_errors/condition_comptime_reason_explained.zig
+++ b/test/cases/compile_errors/condition_comptime_reason_explained.zig
@@ -45,4 +45,6 @@ pub export fn entry2() void {
// :22:13: error: unable to resolve comptime value
// :22:13: note: condition in comptime switch must be comptime-known
// :21:17: note: expression is evaluated at comptime because the function returns a comptime-only type 'tmp.S'
+// :2:12: note: struct requires comptime because of this field
+// :2:12: note: use '*const fn() void' for a function pointer type
// :32:19: note: called from here
diff --git a/test/cases/compile_errors/directly_embedding_opaque_type_in_struct_and_union.zig b/test/cases/compile_errors/directly_embedding_opaque_type_in_struct_and_union.zig
index 2a64326093..ace90bccfc 100644
--- a/test/cases/compile_errors/directly_embedding_opaque_type_in_struct_and_union.zig
+++ b/test/cases/compile_errors/directly_embedding_opaque_type_in_struct_and_union.zig
@@ -32,6 +32,7 @@ export fn d() void {
// :3:8: error: opaque types have unknown size and therefore cannot be directly embedded in structs
// :1:11: note: opaque declared here
// :7:10: error: opaque types have unknown size and therefore cannot be directly embedded in unions
+// :1:11: note: opaque declared here
// :19:18: error: opaque types have unknown size and therefore cannot be directly embedded in structs
// :18:22: note: opaque declared here
// :24:23: error: opaque types have unknown size and therefore cannot be directly embedded in structs
diff --git a/test/cases/compile_errors/exporting_primitive_values.zig b/test/cases/compile_errors/exporting_primitive_values.zig
new file mode 100644
index 0000000000..bf3c38a553
--- /dev/null
+++ b/test/cases/compile_errors/exporting_primitive_values.zig
@@ -0,0 +1,29 @@
+pub export fn entry1() void {
+ @export(u100, .{ .name = "a" });
+}
+pub export fn entry3() void {
+ @export(undefined, .{ .name = "b" });
+}
+pub export fn entry4() void {
+ @export(null, .{ .name = "c" });
+}
+pub export fn entry5() void {
+ @export(false, .{ .name = "d" });
+}
+pub export fn entry6() void {
+ @export(u8, .{ .name = "e" });
+}
+pub export fn entry7() void {
+ @export(u65535, .{ .name = "f" });
+}
+
+// error
+// backend=llvm
+// target=native
+//
+// :2:13: error: unable to export primitive value
+// :5:13: error: unable to export primitive value
+// :8:13: error: unable to export primitive value
+// :11:13: error: unable to export primitive value
+// :14:13: error: unable to export primitive value
+// :17:13: error: unable to export primitive value
diff --git a/test/cases/compile_errors/extern_function_with_comptime_parameter.zig b/test/cases/compile_errors/extern_function_with_comptime_parameter.zig
index de69fa409f..58f15f7fab 100644
--- a/test/cases/compile_errors/extern_function_with_comptime_parameter.zig
+++ b/test/cases/compile_errors/extern_function_with_comptime_parameter.zig
@@ -12,6 +12,6 @@ comptime { _ = entry2; }
// backend=stage2
// target=native
//
-// :1:15: error: comptime parameters not allowed in function with calling convention 'C'
// :5:30: error: comptime parameters not allowed in function with calling convention 'C'
// :6:30: error: generic parameters not allowed in function with calling convention 'C'
+// :1:15: error: comptime parameters not allowed in function with calling convention 'C'
diff --git a/test/cases/compile_errors/function_parameter_is_opaque.zig b/test/cases/compile_errors/function_parameter_is_opaque.zig
index 1f92274577..57c89bd7f4 100644
--- a/test/cases/compile_errors/function_parameter_is_opaque.zig
+++ b/test/cases/compile_errors/function_parameter_is_opaque.zig
@@ -27,4 +27,5 @@ export fn entry4() void {
// :1:17: note: opaque declared here
// :8:28: error: parameter of type '@TypeOf(null)' not allowed
// :12:8: error: parameter of opaque type 'tmp.FooType' not allowed
+// :1:17: note: opaque declared here
// :17:8: error: parameter of type '@TypeOf(null)' not allowed
diff --git a/test/cases/compile_errors/helpful_return_type_error_message.zig b/test/cases/compile_errors/helpful_return_type_error_message.zig
index 871e948537..83342c7ec3 100644
--- a/test/cases/compile_errors/helpful_return_type_error_message.zig
+++ b/test/cases/compile_errors/helpful_return_type_error_message.zig
@@ -24,9 +24,9 @@ export fn quux() u32 {
// :8:5: error: expected type 'void', found '@typeInfo(@typeInfo(@TypeOf(tmp.bar)).Fn.return_type.?).ErrorUnion.error_set'
// :7:17: note: function cannot return an error
// :11:15: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(tmp.bar)).Fn.return_type.?).ErrorUnion.error_set!u32'
-// :10:17: note: function cannot return an error
// :11:15: note: cannot convert error union to payload type
// :11:15: note: consider using 'try', 'catch', or 'if'
+// :10:17: note: function cannot return an error
// :15:14: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(tmp.bar)).Fn.return_type.?).ErrorUnion.error_set!u32'
// :15:14: note: cannot convert error union to payload type
// :15:14: note: consider using 'try', 'catch', or 'if'
diff --git a/test/cases/compile_errors/implicit_semicolon-block_expr.zig b/test/cases/compile_errors/implicit_semicolon-block_expr.zig
index 7dd82b897b..bab8ec29c0 100644
--- a/test/cases/compile_errors/implicit_semicolon-block_expr.zig
+++ b/test/cases/compile_errors/implicit_semicolon-block_expr.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
_ = {}
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/implicit_semicolon-block_statement.zig b/test/cases/compile_errors/implicit_semicolon-block_statement.zig
index 189ba84d98..912ccbc790 100644
--- a/test/cases/compile_errors/implicit_semicolon-block_statement.zig
+++ b/test/cases/compile_errors/implicit_semicolon-block_statement.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
({})
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/implicit_semicolon-comptime_expression.zig b/test/cases/compile_errors/implicit_semicolon-comptime_expression.zig
index decbc352e8..e8dc8bb534 100644
--- a/test/cases/compile_errors/implicit_semicolon-comptime_expression.zig
+++ b/test/cases/compile_errors/implicit_semicolon-comptime_expression.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
_ = comptime {}
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/implicit_semicolon-comptime_statement.zig b/test/cases/compile_errors/implicit_semicolon-comptime_statement.zig
index d17db15924..afc1798669 100644
--- a/test/cases/compile_errors/implicit_semicolon-comptime_statement.zig
+++ b/test/cases/compile_errors/implicit_semicolon-comptime_statement.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
comptime ({})
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/implicit_semicolon-defer.zig b/test/cases/compile_errors/implicit_semicolon-defer.zig
index 57fd3a2626..e91dbae7f8 100644
--- a/test/cases/compile_errors/implicit_semicolon-defer.zig
+++ b/test/cases/compile_errors/implicit_semicolon-defer.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
defer ({})
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/implicit_semicolon-for_expression.zig b/test/cases/compile_errors/implicit_semicolon-for_expression.zig
index c751384e11..1fbe4dd3ad 100644
--- a/test/cases/compile_errors/implicit_semicolon-for_expression.zig
+++ b/test/cases/compile_errors/implicit_semicolon-for_expression.zig
@@ -3,7 +3,10 @@ export fn entry() void {
var good = {};
_ = for(foo()) |_| {}
var bad = {};
+ _ = good;
+ _ = bad;
}
+fn foo() void {}
// error
// backend=stage2
diff --git a/test/cases/compile_errors/implicit_semicolon-for_statement.zig b/test/cases/compile_errors/implicit_semicolon-for_statement.zig
index 14709cef4c..2830293b70 100644
--- a/test/cases/compile_errors/implicit_semicolon-for_statement.zig
+++ b/test/cases/compile_errors/implicit_semicolon-for_statement.zig
@@ -3,7 +3,10 @@ export fn entry() void {
var good = {};
for(foo()) |_| ({})
var bad = {};
+ _ = good;
+ _ = bad;
}
+fn foo() void {}
// error
// backend=stage2
diff --git a/test/cases/compile_errors/implicit_semicolon-if-else-if-else_expression.zig b/test/cases/compile_errors/implicit_semicolon-if-else-if-else_expression.zig
index 72a4fa7d3e..9e99421cd1 100644
--- a/test/cases/compile_errors/implicit_semicolon-if-else-if-else_expression.zig
+++ b/test/cases/compile_errors/implicit_semicolon-if-else-if-else_expression.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
_ = if(true) {} else if(true) {} else {}
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/implicit_semicolon-if-else-if-else_statement.zig b/test/cases/compile_errors/implicit_semicolon-if-else-if-else_statement.zig
index 95135006ba..e2e7b7e3b3 100644
--- a/test/cases/compile_errors/implicit_semicolon-if-else-if-else_statement.zig
+++ b/test/cases/compile_errors/implicit_semicolon-if-else-if-else_statement.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
if(true) ({}) else if(true) ({}) else ({})
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/implicit_semicolon-if-else-if_expression.zig b/test/cases/compile_errors/implicit_semicolon-if-else-if_expression.zig
index a29636bd1d..33ca6ab600 100644
--- a/test/cases/compile_errors/implicit_semicolon-if-else-if_expression.zig
+++ b/test/cases/compile_errors/implicit_semicolon-if-else-if_expression.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
_ = if(true) {} else if(true) {}
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/implicit_semicolon-if-else-if_statement.zig b/test/cases/compile_errors/implicit_semicolon-if-else-if_statement.zig
index c62430a0a2..e3d004fee1 100644
--- a/test/cases/compile_errors/implicit_semicolon-if-else-if_statement.zig
+++ b/test/cases/compile_errors/implicit_semicolon-if-else-if_statement.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
if(true) ({}) else if(true) ({})
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/implicit_semicolon-if-else_expression.zig b/test/cases/compile_errors/implicit_semicolon-if-else_expression.zig
index d5bee6e52b..a23809528b 100644
--- a/test/cases/compile_errors/implicit_semicolon-if-else_expression.zig
+++ b/test/cases/compile_errors/implicit_semicolon-if-else_expression.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
_ = if(true) {} else {}
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/implicit_semicolon-if-else_statement.zig b/test/cases/compile_errors/implicit_semicolon-if-else_statement.zig
index 94df128626..ed01aa7df2 100644
--- a/test/cases/compile_errors/implicit_semicolon-if-else_statement.zig
+++ b/test/cases/compile_errors/implicit_semicolon-if-else_statement.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
if(true) ({}) else ({})
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/implicit_semicolon-if_expression.zig b/test/cases/compile_errors/implicit_semicolon-if_expression.zig
index 339a5378cf..e28f8616e2 100644
--- a/test/cases/compile_errors/implicit_semicolon-if_expression.zig
+++ b/test/cases/compile_errors/implicit_semicolon-if_expression.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
_ = if(true) {}
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/implicit_semicolon-if_statement.zig b/test/cases/compile_errors/implicit_semicolon-if_statement.zig
index b8ccb5e401..3067c07767 100644
--- a/test/cases/compile_errors/implicit_semicolon-if_statement.zig
+++ b/test/cases/compile_errors/implicit_semicolon-if_statement.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
if(true) ({})
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/implicit_semicolon-test_expression.zig b/test/cases/compile_errors/implicit_semicolon-test_expression.zig
index 2a37c0aa0e..0bb345b387 100644
--- a/test/cases/compile_errors/implicit_semicolon-test_expression.zig
+++ b/test/cases/compile_errors/implicit_semicolon-test_expression.zig
@@ -3,7 +3,10 @@ export fn entry() void {
var good = {};
_ = if (foo()) |_| {}
var bad = {};
+ _ = good;
+ _ = bad;
}
+fn foo() void {}
// error
// backend=stage2
diff --git a/test/cases/compile_errors/implicit_semicolon-test_statement.zig b/test/cases/compile_errors/implicit_semicolon-test_statement.zig
index afe00eba75..3a4eb8b5ba 100644
--- a/test/cases/compile_errors/implicit_semicolon-test_statement.zig
+++ b/test/cases/compile_errors/implicit_semicolon-test_statement.zig
@@ -3,7 +3,10 @@ export fn entry() void {
var good = {};
if (foo()) |_| ({})
var bad = {};
+ _ = good;
+ _ = bad;
}
+fn foo() void {}
// error
// backend=stage2
diff --git a/test/cases/compile_errors/implicit_semicolon-while-continue_expression.zig b/test/cases/compile_errors/implicit_semicolon-while-continue_expression.zig
index 5587627597..f05c70bc14 100644
--- a/test/cases/compile_errors/implicit_semicolon-while-continue_expression.zig
+++ b/test/cases/compile_errors/implicit_semicolon-while-continue_expression.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
_ = while(true):({}) {}
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/implicit_semicolon-while-continue_statement.zig b/test/cases/compile_errors/implicit_semicolon-while-continue_statement.zig
index 9bebe3861e..2d27824f6b 100644
--- a/test/cases/compile_errors/implicit_semicolon-while-continue_statement.zig
+++ b/test/cases/compile_errors/implicit_semicolon-while-continue_statement.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
while(true):({}) ({})
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/implicit_semicolon-while_expression.zig b/test/cases/compile_errors/implicit_semicolon-while_expression.zig
index df388a7c39..4b39ed7c16 100644
--- a/test/cases/compile_errors/implicit_semicolon-while_expression.zig
+++ b/test/cases/compile_errors/implicit_semicolon-while_expression.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
_ = while(true) {}
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/implicit_semicolon-while_statement.zig b/test/cases/compile_errors/implicit_semicolon-while_statement.zig
index d9ed3d1e2a..538a56faf1 100644
--- a/test/cases/compile_errors/implicit_semicolon-while_statement.zig
+++ b/test/cases/compile_errors/implicit_semicolon-while_statement.zig
@@ -3,6 +3,8 @@ export fn entry() void {
var good = {};
while(true) 1
var bad = {};
+ _ = good;
+ _ = bad;
}
// error
diff --git a/test/cases/compile_errors/invalid_member_of_builtin_enum.zig b/test/cases/compile_errors/invalid_member_of_builtin_enum.zig
index b0a176d792..f3ea66ae1c 100644
--- a/test/cases/compile_errors/invalid_member_of_builtin_enum.zig
+++ b/test/cases/compile_errors/invalid_member_of_builtin_enum.zig
@@ -9,4 +9,4 @@ export fn entry() void {
// target=native
//
// :3:38: error: enum 'builtin.OptimizeMode' has no member named 'x86'
-// :?:18: note: enum declared here
+// : note: enum declared here
diff --git a/test/cases/compile_errors/invalid_store_to_comptime_field.zig b/test/cases/compile_errors/invalid_store_to_comptime_field.zig
index 0f444ba78c..fd6fff5e17 100644
--- a/test/cases/compile_errors/invalid_store_to_comptime_field.zig
+++ b/test/cases/compile_errors/invalid_store_to_comptime_field.zig
@@ -73,11 +73,11 @@ pub export fn entry8() void {
//
// :6:19: error: value stored in comptime field does not match the default value of the field
// :14:19: error: value stored in comptime field does not match the default value of the field
-// :53:16: error: value stored in comptime field does not match the default value of the field
// :19:38: error: value stored in comptime field does not match the default value of the field
// :31:19: error: value stored in comptime field does not match the default value of the field
// :25:29: note: default value set here
// :41:16: error: value stored in comptime field does not match the default value of the field
// :45:12: error: value stored in comptime field does not match the default value of the field
+// :53:16: error: value stored in comptime field does not match the default value of the field
// :66:43: error: value stored in comptime field does not match the default value of the field
// :59:35: error: value stored in comptime field does not match the default value of the field
diff --git a/test/cases/compile_errors/invalid_struct_field.zig b/test/cases/compile_errors/invalid_struct_field.zig
index 4450375cb8..ff8c96a0b6 100644
--- a/test/cases/compile_errors/invalid_struct_field.zig
+++ b/test/cases/compile_errors/invalid_struct_field.zig
@@ -25,5 +25,6 @@ export fn e() void {
// :4:7: error: no field named 'foo' in struct 'tmp.A'
// :1:11: note: struct declared here
// :10:17: error: no field named 'bar' in struct 'tmp.A'
+// :1:11: note: struct declared here
// :18:45: error: no field named 'f' in struct 'tmp.e.B'
// :14:15: note: struct declared here
diff --git a/test/cases/compile_errors/method_call_with_first_arg_type_primitive.zig b/test/cases/compile_errors/method_call_with_first_arg_type_primitive.zig
index 1cecac6fac..2a5167adf2 100644
--- a/test/cases/compile_errors/method_call_with_first_arg_type_primitive.zig
+++ b/test/cases/compile_errors/method_call_with_first_arg_type_primitive.zig
@@ -2,7 +2,7 @@ const Foo = struct {
x: i32,
fn init(x: i32) Foo {
- return Foo {
+ return Foo{
.x = x,
};
}
@@ -20,3 +20,4 @@ export fn f() void {
//
// :14:9: error: no field or member function named 'init' in 'tmp.Foo'
// :1:13: note: struct declared here
+// :4:5: note: 'init' is not a member function
diff --git a/test/cases/compile_errors/method_call_with_first_arg_type_wrong_container.zig b/test/cases/compile_errors/method_call_with_first_arg_type_wrong_container.zig
index ad481a6158..0653bda3ea 100644
--- a/test/cases/compile_errors/method_call_with_first_arg_type_wrong_container.zig
+++ b/test/cases/compile_errors/method_call_with_first_arg_type_wrong_container.zig
@@ -29,3 +29,4 @@ export fn foo() void {
//
// :23:6: error: no field or member function named 'init' in 'tmp.List'
// :1:18: note: struct declared here
+// :5:9: note: 'init' is not a member function
diff --git a/test/cases/compile_errors/missing_main_fn_in_executable.zig b/test/cases/compile_errors/missing_main_fn_in_executable.zig
index 2d608ad2b8..3c1ae631ac 100644
--- a/test/cases/compile_errors/missing_main_fn_in_executable.zig
+++ b/test/cases/compile_errors/missing_main_fn_in_executable.zig
@@ -5,5 +5,7 @@
// target=x86_64-linux
// output_mode=Exe
//
-// :?:?: error: root struct of file 'tmp' has no member named 'main'
-// :?:?: note: called from here
+// : error: root struct of file 'tmp' has no member named 'main'
+// : note: called from here
+// : note: called from here
+// : note: called from here
diff --git a/test/cases/compile_errors/private_main_fn.zig b/test/cases/compile_errors/private_main_fn.zig
index 26ad3d22db..6e53fbdce2 100644
--- a/test/cases/compile_errors/private_main_fn.zig
+++ b/test/cases/compile_errors/private_main_fn.zig
@@ -5,6 +5,8 @@ fn main() void {}
// target=x86_64-linux
// output_mode=Exe
//
-// :?:?: error: 'main' is not marked 'pub'
+// : error: 'main' is not marked 'pub'
// :1:1: note: declared here
-// :?:?: note: called from here
+// : note: called from here
+// : note: called from here
+// : note: called from here
diff --git a/test/cases/compile_errors/reify_type_with_undefined.zig b/test/cases/compile_errors/reify_type_with_undefined.zig
index e5753fa420..59c0314773 100644
--- a/test/cases/compile_errors/reify_type_with_undefined.zig
+++ b/test/cases/compile_errors/reify_type_with_undefined.zig
@@ -11,6 +11,18 @@ comptime {
},
});
}
+comptime {
+ const std = @import("std");
+ const fields: [1]std.builtin.Type.StructField = undefined;
+ _ = @Type(.{
+ .Struct = .{
+ .layout = .Auto,
+ .fields = &fields,
+ .decls = &.{},
+ .is_tuple = false,
+ },
+ });
+}
// error
// backend=stage2
@@ -18,3 +30,4 @@ comptime {
//
// :2:9: error: use of undefined value here causes undefined behavior
// :5:9: error: use of undefined value here causes undefined behavior
+// :17:9: error: use of undefined value here causes undefined behavior
diff --git a/test/cases/compile_errors/runtime_index_into_comptime_type_slice.zig b/test/cases/compile_errors/runtime_index_into_comptime_type_slice.zig
index 4c235ed94b..9cd0fe1798 100644
--- a/test/cases/compile_errors/runtime_index_into_comptime_type_slice.zig
+++ b/test/cases/compile_errors/runtime_index_into_comptime_type_slice.zig
@@ -15,5 +15,6 @@ export fn entry() void {
// target=native
//
// :9:51: error: values of type '[]const builtin.Type.StructField' must be comptime-known, but index value is runtime-known
-// :?:21: note: struct requires comptime because of this field
-// :?:21: note: types are not available at runtime
+// : note: struct requires comptime because of this field
+// : note: types are not available at runtime
+// : struct requires comptime because of this field
diff --git a/test/cases/compile_errors/struct_type_mismatch_in_arg.zig b/test/cases/compile_errors/struct_type_mismatch_in_arg.zig
index a52bdfab6c..d051966c52 100644
--- a/test/cases/compile_errors/struct_type_mismatch_in_arg.zig
+++ b/test/cases/compile_errors/struct_type_mismatch_in_arg.zig
@@ -13,6 +13,6 @@ comptime {
// target=native
//
// :7:16: error: expected type 'tmp.Foo', found 'tmp.Bar'
-// :1:13: note: struct declared here
// :2:13: note: struct declared here
+// :1:13: note: struct declared here
// :4:18: note: parameter type declared here
diff --git a/test/cases/compile_errors/undefined_as_field_type_is_rejected.zig b/test/cases/compile_errors/undefined_as_field_type_is_rejected.zig
index e78cadc878..b6eb059661 100644
--- a/test/cases/compile_errors/undefined_as_field_type_is_rejected.zig
+++ b/test/cases/compile_errors/undefined_as_field_type_is_rejected.zig
@@ -1,9 +1,13 @@
-export fn a() void {
- b();
+const Foo = struct {
+ a: undefined,
+};
+export fn entry1() void {
+ const foo: Foo = undefined;
+ _ = foo;
}
// error
-// backend=stage2
+// backend=stage1
// target=native
//
-// :2:5: error: use of undeclared identifier 'b'
+// tmp.zig:2:8: error: use of undefined value here causes undefined behavior
diff --git a/test/cases/compile_errors/union_init_with_none_or_multiple_fields.zig b/test/cases/compile_errors/union_init_with_none_or_multiple_fields.zig
index 5f486bf2b7..a700f0d0f2 100644
--- a/test/cases/compile_errors/union_init_with_none_or_multiple_fields.zig
+++ b/test/cases/compile_errors/union_init_with_none_or_multiple_fields.zig
@@ -28,10 +28,11 @@ export fn u2m() void {
// target=native
//
// :9:1: error: union initializer must initialize one field
+// :1:12: note: union declared here
// :14:20: error: cannot initialize multiple union fields at once, unions can only have one active field
// :14:31: note: additional initializer here
+// :1:12: note: union declared here
// :18:21: error: union initializer must initialize one field
// :22:20: error: cannot initialize multiple union fields at once, unions can only have one active field
// :22:31: note: additional initializer here
-// :1:12: note: union declared here
// :5:12: note: union declared here
diff --git a/test/cases/compile_log.0.zig b/test/cases/compile_log.0.zig
index 161a6f37ca..27dfbc706d 100644
--- a/test/cases/compile_log.0.zig
+++ b/test/cases/compile_log.0.zig
@@ -15,3 +15,8 @@ fn x() void {}
// error
//
// :6:23: error: expected type 'usize', found 'bool'
+//
+// Compile Log Output:
+// @as(bool, true), @as(comptime_int, 20), @as(u32, [runtime value]), @as(fn() void, (function 'x'))
+// @as(comptime_int, 1000)
+// @as(comptime_int, 1234)
diff --git a/test/cases/compile_log.1.zig b/test/cases/compile_log.1.zig
index 12e6641542..286722653c 100644
--- a/test/cases/compile_log.1.zig
+++ b/test/cases/compile_log.1.zig
@@ -14,3 +14,7 @@ fn x() void {}
//
// :9:5: error: found compile log statement
// :4:5: note: also here
+//
+// Compile Log Output:
+// @as(bool, true), @as(comptime_int, 20), @as(u32, [runtime value]), @as(fn() void, (function 'x'))
+// @as(comptime_int, 1000)
diff --git a/test/cases/f32_passed_to_variadic_fn.zig b/test/cases/f32_passed_to_variadic_fn.zig
index c029b4b69f..4ae1d2cf08 100644
--- a/test/cases/f32_passed_to_variadic_fn.zig
+++ b/test/cases/f32_passed_to_variadic_fn.zig
@@ -9,7 +9,8 @@ pub fn main() void {
// run
// backend=llvm
// target=x86_64-linux-gnu
+// link_libc=1
//
// f64: 2.000000
// f32: 10.000000
-//
\ No newline at end of file
+//
diff --git a/test/cases/fn_typeinfo_passed_to_comptime_fn.zig b/test/cases/fn_typeinfo_passed_to_comptime_fn.zig
index 31673e5b81..fb64788126 100644
--- a/test/cases/fn_typeinfo_passed_to_comptime_fn.zig
+++ b/test/cases/fn_typeinfo_passed_to_comptime_fn.zig
@@ -14,4 +14,5 @@ fn foo(comptime info: std.builtin.Type) !void {
// run
// is_test=1
+// backend=llvm
//
diff --git a/test/cases/llvm/address_space_pointer_access_chaining_pointer_to_optional_array.zig b/test/cases/llvm/address_space_pointer_access_chaining_pointer_to_optional_array.zig
index cf43513159..00d4a7ecc9 100644
--- a/test/cases/llvm/address_space_pointer_access_chaining_pointer_to_optional_array.zig
+++ b/test/cases/llvm/address_space_pointer_access_chaining_pointer_to_optional_array.zig
@@ -5,7 +5,7 @@ pub fn main() void {
_ = entry;
}
-// error
+// compile
// output_mode=Exe
// backend=llvm
// target=x86_64-linux,x86_64-macos
diff --git a/test/cases/llvm/address_spaces_pointer_access_chaining_array_pointer.zig b/test/cases/llvm/address_spaces_pointer_access_chaining_array_pointer.zig
index 5907c1dad5..f23498e955 100644
--- a/test/cases/llvm/address_spaces_pointer_access_chaining_array_pointer.zig
+++ b/test/cases/llvm/address_spaces_pointer_access_chaining_array_pointer.zig
@@ -5,7 +5,7 @@ pub fn main() void {
_ = entry;
}
-// error
+// compile
// output_mode=Exe
// backend=stage2,llvm
// target=x86_64-linux,x86_64-macos
diff --git a/test/cases/llvm/address_spaces_pointer_access_chaining_complex.zig b/test/cases/llvm/address_spaces_pointer_access_chaining_complex.zig
index ece0614f73..4f54f38e6b 100644
--- a/test/cases/llvm/address_spaces_pointer_access_chaining_complex.zig
+++ b/test/cases/llvm/address_spaces_pointer_access_chaining_complex.zig
@@ -6,7 +6,7 @@ pub fn main() void {
_ = entry;
}
-// error
+// compile
// output_mode=Exe
// backend=llvm
// target=x86_64-linux,x86_64-macos
diff --git a/test/cases/llvm/address_spaces_pointer_access_chaining_struct_pointer.zig b/test/cases/llvm/address_spaces_pointer_access_chaining_struct_pointer.zig
index 9175bcbc0e..84695cb35b 100644
--- a/test/cases/llvm/address_spaces_pointer_access_chaining_struct_pointer.zig
+++ b/test/cases/llvm/address_spaces_pointer_access_chaining_struct_pointer.zig
@@ -6,7 +6,7 @@ pub fn main() void {
_ = entry;
}
-// error
+// compile
// output_mode=Exe
// backend=stage2,llvm
// target=x86_64-linux,x86_64-macos
diff --git a/test/cases/llvm/dereferencing_though_multiple_pointers_with_address_spaces.zig b/test/cases/llvm/dereferencing_though_multiple_pointers_with_address_spaces.zig
index 8f36700757..badab821d3 100644
--- a/test/cases/llvm/dereferencing_though_multiple_pointers_with_address_spaces.zig
+++ b/test/cases/llvm/dereferencing_though_multiple_pointers_with_address_spaces.zig
@@ -5,7 +5,7 @@ pub fn main() void {
_ = entry;
}
-// error
+// compile
// output_mode=Exe
// backend=stage2,llvm
// target=x86_64-linux,x86_64-macos
diff --git a/test/cases/llvm/hello_world.zig b/test/cases/llvm/hello_world.zig
index 4243191b0f..0f75f624ec 100644
--- a/test/cases/llvm/hello_world.zig
+++ b/test/cases/llvm/hello_world.zig
@@ -7,6 +7,7 @@ pub fn main() void {
// run
// backend=llvm
// target=x86_64-linux,x86_64-macos
+// link_libc=1
//
// hello world!
//
diff --git a/test/cases/llvm/pointer_keeps_address_space.zig b/test/cases/llvm/pointer_keeps_address_space.zig
index bfd40566f8..f894c96d7b 100644
--- a/test/cases/llvm/pointer_keeps_address_space.zig
+++ b/test/cases/llvm/pointer_keeps_address_space.zig
@@ -5,7 +5,7 @@ pub fn main() void {
_ = entry;
}
-// error
+// compile
// output_mode=Exe
// backend=stage2,llvm
// target=x86_64-linux,x86_64-macos
diff --git a/test/cases/llvm/pointer_keeps_address_space_when_taking_address_of_dereference.zig b/test/cases/llvm/pointer_keeps_address_space_when_taking_address_of_dereference.zig
index 8114e86c5d..b5803a3076 100644
--- a/test/cases/llvm/pointer_keeps_address_space_when_taking_address_of_dereference.zig
+++ b/test/cases/llvm/pointer_keeps_address_space_when_taking_address_of_dereference.zig
@@ -5,7 +5,7 @@ pub fn main() void {
_ = entry;
}
-// error
+// compile
// output_mode=Exe
// backend=stage2,llvm
// target=x86_64-linux,x86_64-macos
diff --git a/test/cases/llvm/pointer_to_explicit_generic_address_space_coerces_to_implicit_pointer.zig b/test/cases/llvm/pointer_to_explicit_generic_address_space_coerces_to_implicit_pointer.zig
index 78bc3e4bd6..b3c0116983 100644
--- a/test/cases/llvm/pointer_to_explicit_generic_address_space_coerces_to_implicit_pointer.zig
+++ b/test/cases/llvm/pointer_to_explicit_generic_address_space_coerces_to_implicit_pointer.zig
@@ -5,7 +5,7 @@ pub fn main() void {
_ = entry;
}
-// error
+// compile
// output_mode=Exe
// backend=stage2,llvm
// target=x86_64-linux,x86_64-macos
diff --git a/test/stage2/cbe.zig b/test/cbe.zig
similarity index 92%
rename from test/stage2/cbe.zig
rename to test/cbe.zig
index e9750853a6..25ac3cb137 100644
--- a/test/stage2/cbe.zig
+++ b/test/cbe.zig
@@ -1,5 +1,5 @@
const std = @import("std");
-const TestContext = @import("../../src/test.zig").TestContext;
+const Cases = @import("src/Cases.zig");
// These tests should work with all platforms, but we're using linux_x64 for
// now for consistency. Will be expanded eventually.
@@ -8,7 +8,7 @@ const linux_x64 = std.zig.CrossTarget{
.os_tag = .linux,
};
-pub fn addCases(ctx: *TestContext) !void {
+pub fn addCases(ctx: *Cases) !void {
{
var case = ctx.exeFromCompiledC("hello world with updates", .{});
@@ -71,7 +71,7 @@ pub fn addCases(ctx: *TestContext) !void {
}
{
- var case = ctx.exeFromCompiledC("@intToError", .{});
+ var case = ctx.exeFromCompiledC("intToError", .{});
case.addCompareOutput(
\\pub export fn main() c_int {
@@ -837,7 +837,7 @@ pub fn addCases(ctx: *TestContext) !void {
}
{
- var case = ctx.exeFromCompiledC("shift right + left", .{});
+ var case = ctx.exeFromCompiledC("shift right and left", .{});
case.addCompareOutput(
\\pub export fn main() c_int {
\\ var i: u32 = 16;
@@ -883,7 +883,7 @@ pub fn addCases(ctx: *TestContext) !void {
{
// TODO: add u64 tests, ran into issues with the literal generated for std.math.maxInt(u64)
- var case = ctx.exeFromCompiledC("add/sub wrapping operations", .{});
+ var case = ctx.exeFromCompiledC("add and sub wrapping operations", .{});
case.addCompareOutput(
\\pub export fn main() c_int {
\\ // Addition
@@ -932,7 +932,7 @@ pub fn addCases(ctx: *TestContext) !void {
}
{
- var case = ctx.exeFromCompiledC("@rem", linux_x64);
+ var case = ctx.exeFromCompiledC("rem", linux_x64);
case.addCompareOutput(
\\fn assert(ok: bool) void {
\\ if (!ok) unreachable;
@@ -947,69 +947,4 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
, "");
}
-
- ctx.h("simple header", linux_x64,
- \\export fn start() void{}
- ,
- \\zig_extern void start(void);
- \\
- );
- ctx.h("header with single param function", linux_x64,
- \\export fn start(a: u8) void{
- \\ _ = a;
- \\}
- ,
- \\zig_extern void start(uint8_t const a0);
- \\
- );
- ctx.h("header with multiple param function", linux_x64,
- \\export fn start(a: u8, b: u8, c: u8) void{
- \\ _ = a; _ = b; _ = c;
- \\}
- ,
- \\zig_extern void start(uint8_t const a0, uint8_t const a1, uint8_t const a2);
- \\
- );
- ctx.h("header with u32 param function", linux_x64,
- \\export fn start(a: u32) void{ _ = a; }
- ,
- \\zig_extern void start(uint32_t const a0);
- \\
- );
- ctx.h("header with usize param function", linux_x64,
- \\export fn start(a: usize) void{ _ = a; }
- ,
- \\zig_extern void start(uintptr_t const a0);
- \\
- );
- ctx.h("header with bool param function", linux_x64,
- \\export fn start(a: bool) void{_ = a;}
- ,
- \\zig_extern void start(bool const a0);
- \\
- );
- ctx.h("header with noreturn function", linux_x64,
- \\export fn start() noreturn {
- \\ unreachable;
- \\}
- ,
- \\zig_extern zig_noreturn void start(void);
- \\
- );
- ctx.h("header with multiple functions", linux_x64,
- \\export fn a() void{}
- \\export fn b() void{}
- \\export fn c() void{}
- ,
- \\zig_extern void a(void);
- \\zig_extern void b(void);
- \\zig_extern void c(void);
- \\
- );
- ctx.h("header with multiple includes", linux_x64,
- \\export fn start(a: u32, b: usize) void{ _ = a; _ = b; }
- ,
- \\zig_extern void start(uint32_t const a0, uintptr_t const a1);
- \\
- );
}
diff --git a/test/cli.zig b/test/cli.zig
deleted file mode 100644
index 57f26f73d7..0000000000
--- a/test/cli.zig
+++ /dev/null
@@ -1,195 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const testing = std.testing;
-const process = std.process;
-const fs = std.fs;
-const ChildProcess = std.ChildProcess;
-
-var a: std.mem.Allocator = undefined;
-
-pub fn main() !void {
- var gpa = std.heap.GeneralPurposeAllocator(.{}){};
- defer _ = gpa.deinit();
- var arena = std.heap.ArenaAllocator.init(gpa.allocator());
- defer arena.deinit();
-
- a = arena.allocator();
- var arg_it = try process.argsWithAllocator(a);
-
- // skip my own exe name
- _ = arg_it.skip();
-
- const zig_exe_rel = arg_it.next() orelse {
- std.debug.print("Expected first argument to be path to zig compiler\n", .{});
- return error.InvalidArgs;
- };
- const cache_root = arg_it.next() orelse {
- std.debug.print("Expected second argument to be cache root directory path\n", .{});
- return error.InvalidArgs;
- };
- const zig_exe = try fs.path.resolve(a, &[_][]const u8{zig_exe_rel});
-
- const dir_path = try fs.path.join(a, &[_][]const u8{ cache_root, "clitest" });
- defer fs.cwd().deleteTree(dir_path) catch {};
-
- const TestFn = fn ([]const u8, []const u8) anyerror!void;
- const Test = struct {
- func: TestFn,
- name: []const u8,
- };
- const tests = [_]Test{
- .{ .func = testZigInitLib, .name = "zig init-lib" },
- .{ .func = testZigInitExe, .name = "zig init-exe" },
- .{ .func = testGodboltApi, .name = "godbolt API" },
- .{ .func = testMissingOutputPath, .name = "missing output path" },
- .{ .func = testZigFmt, .name = "zig fmt" },
- };
- inline for (tests) |t| {
- try fs.cwd().deleteTree(dir_path);
- try fs.cwd().makeDir(dir_path);
- t.func(zig_exe, dir_path) catch |err| {
- std.debug.print("test '{s}' failed: {s}\n", .{
- t.name, @errorName(err),
- });
- return err;
- };
- }
-}
-
-fn printCmd(cwd: []const u8, argv: []const []const u8) void {
- std.debug.print("cd {s} && ", .{cwd});
- for (argv) |arg| {
- std.debug.print("{s} ", .{arg});
- }
- std.debug.print("\n", .{});
-}
-
-fn exec(cwd: []const u8, expect_0: bool, argv: []const []const u8) !ChildProcess.ExecResult {
- const max_output_size = 100 * 1024;
- const result = ChildProcess.exec(.{
- .allocator = a,
- .argv = argv,
- .cwd = cwd,
- .max_output_bytes = max_output_size,
- }) catch |err| {
- std.debug.print("The following command failed:\n", .{});
- printCmd(cwd, argv);
- return err;
- };
- switch (result.term) {
- .Exited => |code| {
- if ((code != 0) == expect_0) {
- std.debug.print("The following command exited with error code {}:\n", .{code});
- printCmd(cwd, argv);
- std.debug.print("stderr:\n{s}\n", .{result.stderr});
- return error.CommandFailed;
- }
- },
- else => {
- std.debug.print("The following command terminated unexpectedly:\n", .{});
- printCmd(cwd, argv);
- std.debug.print("stderr:\n{s}\n", .{result.stderr});
- return error.CommandFailed;
- },
- }
- return result;
-}
-
-fn testZigInitLib(zig_exe: []const u8, dir_path: []const u8) !void {
- _ = try exec(dir_path, true, &[_][]const u8{ zig_exe, "init-lib" });
- const test_result = try exec(dir_path, true, &[_][]const u8{ zig_exe, "build", "test" });
- try testing.expectStringEndsWith(test_result.stderr, "All 1 tests passed.\n");
-}
-
-fn testZigInitExe(zig_exe: []const u8, dir_path: []const u8) !void {
- _ = try exec(dir_path, true, &[_][]const u8{ zig_exe, "init-exe" });
- const run_result = try exec(dir_path, true, &[_][]const u8{ zig_exe, "build", "run" });
- try testing.expectEqualStrings("All your codebase are belong to us.\n", run_result.stderr);
- try testing.expectEqualStrings("Run `zig build test` to run the tests.\n", run_result.stdout);
-}
-
-fn testGodboltApi(zig_exe: []const u8, dir_path: []const u8) anyerror!void {
- if (builtin.os.tag != .linux or builtin.cpu.arch != .x86_64) return;
-
- const example_zig_path = try fs.path.join(a, &[_][]const u8{ dir_path, "example.zig" });
- const example_s_path = try fs.path.join(a, &[_][]const u8{ dir_path, "example.s" });
-
- try fs.cwd().writeFile(example_zig_path,
- \\// Type your code here, or load an example.
- \\export fn square(num: i32) i32 {
- \\ return num * num;
- \\}
- \\extern fn zig_panic() noreturn;
- \\pub fn panic(msg: []const u8, error_return_trace: ?*@import("std").builtin.StackTrace, _: ?usize) noreturn {
- \\ _ = msg;
- \\ _ = error_return_trace;
- \\ zig_panic();
- \\}
- );
-
- var args = std.ArrayList([]const u8).init(a);
- try args.appendSlice(&[_][]const u8{
- zig_exe, "build-obj",
- "--cache-dir", dir_path,
- "--name", "example",
- "-fno-emit-bin", "-fno-emit-h",
- "-fstrip", "-OReleaseFast",
- example_zig_path,
- });
-
- const emit_asm_arg = try std.fmt.allocPrint(a, "-femit-asm={s}", .{example_s_path});
- try args.append(emit_asm_arg);
-
- _ = try exec(dir_path, true, args.items);
-
- const out_asm = try std.fs.cwd().readFileAlloc(a, example_s_path, std.math.maxInt(usize));
- try testing.expect(std.mem.indexOf(u8, out_asm, "square:") != null);
- try testing.expect(std.mem.indexOf(u8, out_asm, "mov\teax, edi") != null);
- try testing.expect(std.mem.indexOf(u8, out_asm, "imul\teax, edi") != null);
-}
-
-fn testMissingOutputPath(zig_exe: []const u8, dir_path: []const u8) !void {
- _ = try exec(dir_path, true, &[_][]const u8{ zig_exe, "init-exe" });
- const output_path = try fs.path.join(a, &[_][]const u8{ "does", "not", "exist", "foo.exe" });
- const output_arg = try std.fmt.allocPrint(a, "-femit-bin={s}", .{output_path});
- const source_path = try fs.path.join(a, &[_][]const u8{ "src", "main.zig" });
- const result = try exec(dir_path, false, &[_][]const u8{ zig_exe, "build-exe", source_path, output_arg });
- const s = std.fs.path.sep_str;
- const expected: []const u8 = "error: unable to open output directory 'does" ++ s ++ "not" ++ s ++ "exist': FileNotFound\n";
- try testing.expectEqualStrings(expected, result.stderr);
-}
-
-fn testZigFmt(zig_exe: []const u8, dir_path: []const u8) !void {
- _ = try exec(dir_path, true, &[_][]const u8{ zig_exe, "init-exe" });
-
- const unformatted_code = " // no reason for indent";
-
- const fmt1_zig_path = try fs.path.join(a, &[_][]const u8{ dir_path, "fmt1.zig" });
- try fs.cwd().writeFile(fmt1_zig_path, unformatted_code);
-
- const run_result1 = try exec(dir_path, true, &[_][]const u8{ zig_exe, "fmt", fmt1_zig_path });
- // stderr should be file path + \n
- try testing.expect(std.mem.startsWith(u8, run_result1.stdout, fmt1_zig_path));
- try testing.expect(run_result1.stdout.len == fmt1_zig_path.len + 1 and run_result1.stdout[run_result1.stdout.len - 1] == '\n');
-
- const fmt2_zig_path = try fs.path.join(a, &[_][]const u8{ dir_path, "fmt2.zig" });
- try fs.cwd().writeFile(fmt2_zig_path, unformatted_code);
-
- const run_result2 = try exec(dir_path, true, &[_][]const u8{ zig_exe, "fmt", dir_path });
- // running it on the dir, only the new file should be changed
- try testing.expect(std.mem.startsWith(u8, run_result2.stdout, fmt2_zig_path));
- try testing.expect(run_result2.stdout.len == fmt2_zig_path.len + 1 and run_result2.stdout[run_result2.stdout.len - 1] == '\n');
-
- const run_result3 = try exec(dir_path, true, &[_][]const u8{ zig_exe, "fmt", dir_path });
- // both files have been formatted, nothing should change now
- try testing.expect(run_result3.stdout.len == 0);
-
- // Check UTF-16 decoding
- const fmt4_zig_path = try fs.path.join(a, &[_][]const u8{ dir_path, "fmt4.zig" });
- var unformatted_code_utf16 = "\xff\xfe \x00 \x00 \x00 \x00/\x00/\x00 \x00n\x00o\x00 \x00r\x00e\x00a\x00s\x00o\x00n\x00";
- try fs.cwd().writeFile(fmt4_zig_path, unformatted_code_utf16);
-
- const run_result4 = try exec(dir_path, true, &[_][]const u8{ zig_exe, "fmt", dir_path });
- try testing.expect(std.mem.startsWith(u8, run_result4.stdout, fmt4_zig_path));
- try testing.expect(run_result4.stdout.len == fmt4_zig_path.len + 1 and run_result4.stdout[run_result4.stdout.len - 1] == '\n');
-}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index e0b78b3000..2d796b9463 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1,146 +1,10 @@
const std = @import("std");
const builtin = @import("builtin");
-const TestContext = @import("../src/test.zig").TestContext;
-
-pub fn addCases(ctx: *TestContext) !void {
- {
- const case = ctx.obj("wrong same named struct", .{});
- case.backend = .stage1;
-
- case.addSourceFile("a.zig",
- \\pub const Foo = struct {
- \\ x: i32,
- \\};
- );
-
- case.addSourceFile("b.zig",
- \\pub const Foo = struct {
- \\ z: f64,
- \\};
- );
-
- case.addError(
- \\const a = @import("a.zig");
- \\const b = @import("b.zig");
- \\
- \\export fn entry() void {
- \\ var a1: a.Foo = undefined;
- \\ bar(&a1);
- \\}
- \\
- \\fn bar(x: *b.Foo) void {_ = x;}
- , &[_][]const u8{
- "tmp.zig:6:10: error: expected type '*b.Foo', found '*a.Foo'",
- "tmp.zig:6:10: note: pointer type child 'a.Foo' cannot cast into pointer type child 'b.Foo'",
- "a.zig:1:17: note: a.Foo declared here",
- "b.zig:1:17: note: b.Foo declared here",
- });
- }
-
- {
- const case = ctx.obj("multiple files with private function error", .{});
- case.backend = .stage1;
-
- case.addSourceFile("foo.zig",
- \\fn privateFunction() void { }
- );
-
- case.addError(
- \\const foo = @import("foo.zig",);
- \\
- \\export fn callPrivFunction() void {
- \\ foo.privateFunction();
- \\}
- , &[_][]const u8{
- "tmp.zig:4:8: error: 'privateFunction' is private",
- "foo.zig:1:1: note: declared here",
- });
- }
-
- {
- const case = ctx.obj("multiple files with private member instance function (canonical invocation) error", .{});
- case.backend = .stage1;
-
- case.addSourceFile("foo.zig",
- \\pub const Foo = struct {
- \\ fn privateFunction(self: *Foo) void { _ = self; }
- \\};
- );
-
- case.addError(
- \\const Foo = @import("foo.zig",).Foo;
- \\
- \\export fn callPrivFunction() void {
- \\ var foo = Foo{};
- \\ Foo.privateFunction(foo);
- \\}
- , &[_][]const u8{
- "tmp.zig:5:8: error: 'privateFunction' is private",
- "foo.zig:2:5: note: declared here",
- });
- }
-
- {
- const case = ctx.obj("multiple files with private member instance function error", .{});
- case.backend = .stage1;
-
- case.addSourceFile("foo.zig",
- \\pub const Foo = struct {
- \\ fn privateFunction(self: *Foo) void { _ = self; }
- \\};
- );
-
- case.addError(
- \\const Foo = @import("foo.zig",).Foo;
- \\
- \\export fn callPrivFunction() void {
- \\ var foo = Foo{};
- \\ foo.privateFunction();
- \\}
- , &[_][]const u8{
- "tmp.zig:5:8: error: 'privateFunction' is private",
- "foo.zig:2:5: note: declared here",
- });
- }
-
- {
- const case = ctx.obj("export collision", .{});
- case.backend = .stage1;
-
- case.addSourceFile("foo.zig",
- \\export fn bar() void {}
- \\pub const baz = 1234;
- );
-
- case.addError(
- \\const foo = @import("foo.zig",);
- \\
- \\export fn bar() usize {
- \\ return foo.baz;
- \\}
- , &[_][]const u8{
- "foo.zig:1:1: error: exported symbol collision: 'bar'",
- "tmp.zig:3:1: note: other symbol here",
- });
- }
-
- ctx.objErrStage1("non-printable invalid character", "\xff\xfe" ++
- "fn foo() bool {\r\n" ++
- " return true;\r\n" ++
- "}\r\n", &[_][]const u8{
- "tmp.zig:1:1: error: expected test, comptime, var decl, or container field, found 'invalid bytes'",
- "tmp.zig:1:1: note: invalid byte: '\\xff'",
- });
-
- ctx.objErrStage1("non-printable invalid character with escape alternative", "fn foo() bool {\n" ++
- "\treturn true;\n" ++
- "}\n", &[_][]const u8{
- "tmp.zig:2:1: error: invalid character: '\\t'",
- });
+const Cases = @import("src/Cases.zig");
+pub fn addCases(ctx: *Cases) !void {
{
const case = ctx.obj("multiline error messages", .{});
- case.backend = .stage2;
case.addError(
\\comptime {
@@ -176,7 +40,6 @@ pub fn addCases(ctx: *TestContext) !void {
{
const case = ctx.obj("isolated carriage return in multiline string literal", .{});
- case.backend = .stage2;
case.addError("const foo = \\\\\test\r\r rogue carriage return\n;", &[_][]const u8{
":1:19: error: expected ';' after declaration",
@@ -195,16 +58,6 @@ pub fn addCases(ctx: *TestContext) !void {
{
const case = ctx.obj("argument causes error", .{});
- case.backend = .stage2;
-
- case.addSourceFile("b.zig",
- \\pub const ElfDynLib = struct {
- \\ pub fn lookup(self: *ElfDynLib, comptime T: type) ?T {
- \\ _ = self;
- \\ return undefined;
- \\ }
- \\};
- );
case.addError(
\\pub export fn entry() void {
@@ -216,15 +69,18 @@ pub fn addCases(ctx: *TestContext) !void {
":3:12: note: argument to function being called at comptime must be comptime-known",
":2:55: note: expression is evaluated at comptime because the generic function was instantiated with a comptime-only return type",
});
+ case.addSourceFile("b.zig",
+ \\pub const ElfDynLib = struct {
+ \\ pub fn lookup(self: *ElfDynLib, comptime T: type) ?T {
+ \\ _ = self;
+ \\ return undefined;
+ \\ }
+ \\};
+ );
}
{
const case = ctx.obj("astgen failure in file struct", .{});
- case.backend = .stage2;
-
- case.addSourceFile("b.zig",
- \\+
- );
case.addError(
\\pub export fn entry() void {
@@ -233,21 +89,13 @@ pub fn addCases(ctx: *TestContext) !void {
, &[_][]const u8{
":1:1: error: expected type expression, found '+'",
});
+ case.addSourceFile("b.zig",
+ \\+
+ );
}
{
const case = ctx.obj("invalid store to comptime field", .{});
- case.backend = .stage2;
-
- case.addSourceFile("a.zig",
- \\pub const S = struct {
- \\ comptime foo: u32 = 1,
- \\ bar: u32,
- \\ pub fn foo(x: @This()) void {
- \\ _ = x;
- \\ }
- \\};
- );
case.addError(
\\const a = @import("a.zig");
@@ -259,44 +107,19 @@ pub fn addCases(ctx: *TestContext) !void {
":4:23: error: value stored in comptime field does not match the default value of the field",
":2:25: note: default value set here",
});
+ case.addSourceFile("a.zig",
+ \\pub const S = struct {
+ \\ comptime foo: u32 = 1,
+ \\ bar: u32,
+ \\ pub fn foo(x: @This()) void {
+ \\ _ = x;
+ \\ }
+ \\};
+ );
}
- // TODO test this in stage2, but we won't even try in stage1
- //ctx.objErrStage1("inline fn calls itself indirectly",
- // \\export fn foo() void {
- // \\ bar();
- // \\}
- // \\fn bar() callconv(.Inline) void {
- // \\ baz();
- // \\ quux();
- // \\}
- // \\fn baz() callconv(.Inline) void {
- // \\ bar();
- // \\ quux();
- // \\}
- // \\extern fn quux() void;
- //, &[_][]const u8{
- // "tmp.zig:4:1: error: unable to inline function",
- //});
-
- //ctx.objErrStage1("save reference to inline function",
- // \\export fn foo() void {
- // \\ quux(@ptrToInt(bar));
- // \\}
- // \\fn bar() callconv(.Inline) void { }
- // \\extern fn quux(usize) void;
- //, &[_][]const u8{
- // "tmp.zig:4:1: error: unable to inline function",
- //});
-
{
const case = ctx.obj("file in multiple modules", .{});
- case.backend = .stage2;
-
- case.addSourceFile("foo.zig",
- \\const dummy = 0;
- );
-
case.addDepModule("foo", "foo.zig");
case.addError(
@@ -309,5 +132,8 @@ pub fn addCases(ctx: *TestContext) !void {
":1:1: note: root of module root.foo",
":3:17: note: imported from module root",
});
+ case.addSourceFile("foo.zig",
+ \\const dummy = 0;
+ );
}
}
diff --git a/test/link.zig b/test/link.zig
index c787e8b1ae..aa0ed4817e 100644
--- a/test/link.zig
+++ b/test/link.zig
@@ -1,213 +1,172 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const tests = @import("tests.zig");
+pub const Case = struct {
+ build_root: []const u8,
+ import: type,
+};
-pub fn addCases(cases: *tests.StandaloneContext) void {
- cases.addBuildFile("test/link/bss/build.zig", .{
- .build_modes = false, // we only guarantee zerofill for undefined in Debug
- });
+pub const cases = [_]Case{
+ .{
+ .build_root = "test/link/bss",
+ .import = @import("link/bss/build.zig"),
+ },
+ .{
+ .build_root = "test/link/common_symbols",
+ .import = @import("link/common_symbols/build.zig"),
+ },
+ .{
+ .build_root = "test/link/common_symbols_alignment",
+ .import = @import("link/common_symbols_alignment/build.zig"),
+ },
+ .{
+ .build_root = "test/link/interdependent_static_c_libs",
+ .import = @import("link/interdependent_static_c_libs/build.zig"),
+ },
- cases.addBuildFile("test/link/common_symbols/build.zig", .{
- .build_modes = true,
- });
+ // WASM Cases
+ .{
+ .build_root = "test/link/wasm/archive",
+ .import = @import("link/wasm/archive/build.zig"),
+ },
+ .{
+ .build_root = "test/link/wasm/basic-features",
+ .import = @import("link/wasm/basic-features/build.zig"),
+ },
+ .{
+ .build_root = "test/link/wasm/bss",
+ .import = @import("link/wasm/bss/build.zig"),
+ },
+ .{
+ .build_root = "test/link/wasm/export",
+ .import = @import("link/wasm/export/build.zig"),
+ },
+ .{
+ .build_root = "test/link/wasm/export-data",
+ .import = @import("link/wasm/export-data/build.zig"),
+ },
+ .{
+ .build_root = "test/link/wasm/extern",
+ .import = @import("link/wasm/extern/build.zig"),
+ },
+ .{
+ .build_root = "test/link/wasm/extern-mangle",
+ .import = @import("link/wasm/extern-mangle/build.zig"),
+ },
+ .{
+ .build_root = "test/link/wasm/function-table",
+ .import = @import("link/wasm/function-table/build.zig"),
+ },
+ .{
+ .build_root = "test/link/wasm/infer-features",
+ .import = @import("link/wasm/infer-features/build.zig"),
+ },
+ .{
+ .build_root = "test/link/wasm/producers",
+ .import = @import("link/wasm/producers/build.zig"),
+ },
+ .{
+ .build_root = "test/link/wasm/segments",
+ .import = @import("link/wasm/segments/build.zig"),
+ },
+ .{
+ .build_root = "test/link/wasm/stack_pointer",
+ .import = @import("link/wasm/stack_pointer/build.zig"),
+ },
+ .{
+ .build_root = "test/link/wasm/type",
+ .import = @import("link/wasm/type/build.zig"),
+ },
- cases.addBuildFile("test/link/common_symbols_alignment/build.zig", .{
- .build_modes = true,
- });
+ // Mach-O Cases
+ .{
+ .build_root = "test/link/macho/bugs/13056",
+ .import = @import("link/macho/bugs/13056/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/bugs/13457",
+ .import = @import("link/macho/bugs/13457/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/dead_strip",
+ .import = @import("link/macho/dead_strip/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/dead_strip_dylibs",
+ .import = @import("link/macho/dead_strip_dylibs/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/dylib",
+ .import = @import("link/macho/dylib/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/empty",
+ .import = @import("link/macho/empty/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/entry",
+ .import = @import("link/macho/entry/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/headerpad",
+ .import = @import("link/macho/headerpad/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/linksection",
+ .import = @import("link/macho/linksection/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/needed_framework",
+ .import = @import("link/macho/needed_framework/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/needed_library",
+ .import = @import("link/macho/needed_library/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/objc",
+ .import = @import("link/macho/objc/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/objcpp",
+ .import = @import("link/macho/objcpp/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/pagezero",
+ .import = @import("link/macho/pagezero/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/search_strategy",
+ .import = @import("link/macho/search_strategy/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/stack_size",
+ .import = @import("link/macho/stack_size/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/strict_validation",
+ .import = @import("link/macho/strict_validation/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/tls",
+ .import = @import("link/macho/tls/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/unwind_info",
+ .import = @import("link/macho/unwind_info/build.zig"),
+ },
+ // TODO: re-enable this test. It currently has some incompatibilities with
+ // the new build system API. In particular, it depends on installing the build
+ // artifacts, which should be unnecessary, and it has a custom build step that
+ // prints directly to stderr instead of failing the step with an error message.
+ //.{
+ // .build_root = "test/link/macho/uuid",
+ // .import = @import("link/macho/uuid/build.zig"),
+ //},
- cases.addBuildFile("test/link/interdependent_static_c_libs/build.zig", .{
- .build_modes = true,
- });
-
- cases.addBuildFile("test/link/static_lib_as_system_lib/build.zig", .{
- .build_modes = true,
- });
-
- addWasmCases(cases);
- addMachOCases(cases);
-}
-
-fn addWasmCases(cases: *tests.StandaloneContext) void {
- cases.addBuildFile("test/link/wasm/archive/build.zig", .{
- .build_modes = true,
- .requires_stage2 = true,
- });
-
- cases.addBuildFile("test/link/wasm/basic-features/build.zig", .{
- .requires_stage2 = true,
- });
-
- cases.addBuildFile("test/link/wasm/bss/build.zig", .{
- .build_modes = false,
- .requires_stage2 = true,
- });
-
- cases.addBuildFile("test/link/wasm/export/build.zig", .{
- .build_modes = true,
- .requires_stage2 = true,
- });
-
- // TODO: Fix open handle in wasm-linker refraining rename from working on Windows.
- if (builtin.os.tag != .windows) {
- cases.addBuildFile("test/link/wasm/export-data/build.zig", .{});
- }
-
- cases.addBuildFile("test/link/wasm/extern/build.zig", .{
- .build_modes = true,
- .requires_stage2 = true,
- .use_emulation = true,
- });
-
- cases.addBuildFile("test/link/wasm/extern-mangle/build.zig", .{
- .build_modes = true,
- .requires_stage2 = true,
- });
-
- cases.addBuildFile("test/link/wasm/function-table/build.zig", .{
- .build_modes = true,
- .requires_stage2 = true,
- });
-
- cases.addBuildFile("test/link/wasm/infer-features/build.zig", .{
- .requires_stage2 = true,
- });
-
- cases.addBuildFile("test/link/wasm/producers/build.zig", .{
- .build_modes = true,
- .requires_stage2 = true,
- });
-
- cases.addBuildFile("test/link/wasm/segments/build.zig", .{
- .build_modes = true,
- .requires_stage2 = true,
- });
-
- cases.addBuildFile("test/link/wasm/stack_pointer/build.zig", .{
- .build_modes = true,
- .requires_stage2 = true,
- });
-
- cases.addBuildFile("test/link/wasm/type/build.zig", .{
- .build_modes = true,
- .requires_stage2 = true,
- });
-}
-
-fn addMachOCases(cases: *tests.StandaloneContext) void {
- cases.addBuildFile("test/link/macho/bugs/13056/build.zig", .{
- .build_modes = true,
- .requires_macos_sdk = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/bugs/13457/build.zig", .{
- .build_modes = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/dead_strip/build.zig", .{
- .build_modes = false,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/dead_strip_dylibs/build.zig", .{
- .build_modes = true,
- .requires_macos_sdk = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/dylib/build.zig", .{
- .build_modes = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/empty/build.zig", .{
- .build_modes = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/entry/build.zig", .{
- .build_modes = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/headerpad/build.zig", .{
- .build_modes = true,
- .requires_macos_sdk = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/linksection/build.zig", .{
- .build_modes = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/needed_framework/build.zig", .{
- .build_modes = true,
- .requires_macos_sdk = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/needed_library/build.zig", .{
- .build_modes = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/objc/build.zig", .{
- .build_modes = true,
- .requires_macos_sdk = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/objcpp/build.zig", .{
- .build_modes = true,
- .requires_macos_sdk = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/pagezero/build.zig", .{
- .build_modes = false,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/search_strategy/build.zig", .{
- .build_modes = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/stack_size/build.zig", .{
- .build_modes = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/strict_validation/build.zig", .{
- .build_modes = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/tls/build.zig", .{
- .build_modes = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/unwind_info/build.zig", .{
- .build_modes = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/uuid/build.zig", .{
- .build_modes = false,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/weak_library/build.zig", .{
- .build_modes = true,
- .requires_symlinks = true,
- });
-
- cases.addBuildFile("test/link/macho/weak_framework/build.zig", .{
- .build_modes = true,
- .requires_macos_sdk = true,
- .requires_symlinks = true,
- });
-}
+ .{
+ .build_root = "test/link/macho/weak_library",
+ .import = @import("link/macho/weak_library/build.zig"),
+ },
+ .{
+ .build_root = "test/link/macho/weak_framework",
+ .import = @import("link/macho/weak_framework/build.zig"),
+ },
+};
diff --git a/test/link/bss/build.zig b/test/link/bss/build.zig
index 0df9c1d323..86600b58f5 100644
--- a/test/link/bss/build.zig
+++ b/test/link/bss/build.zig
@@ -1,17 +1,17 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test");
+ b.default_step = test_step;
const exe = b.addExecutable(.{
.name = "bss",
.root_source_file = .{ .path = "main.zig" },
- .optimize = optimize,
+ .optimize = .Debug,
});
- b.default_step.dependOn(&exe.step);
const run = exe.run();
run.expectStdOutEqual("0, 1, 0\n");
+
test_step.dependOn(&run.step);
}
diff --git a/test/link/bss/main.zig b/test/link/bss/main.zig
index c901f0bb27..d2ecffe982 100644
--- a/test/link/bss/main.zig
+++ b/test/link/bss/main.zig
@@ -1,7 +1,7 @@
const std = @import("std");
// Stress test zerofill layout
-var buffer: [0x1000000]u64 = undefined;
+var buffer: [0x1000000]u64 = [1]u64{0} ** 0x1000000;
pub fn main() anyerror!void {
buffer[0x10] = 1;
diff --git a/test/link/common_symbols/build.zig b/test/link/common_symbols/build.zig
index ee9dd94ebd..a8c276d1f3 100644
--- a/test/link/common_symbols/build.zig
+++ b/test/link/common_symbols/build.zig
@@ -1,8 +1,16 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const lib_a = b.addStaticLibrary(.{
.name = "a",
.optimize = optimize,
@@ -16,6 +24,5 @@ pub fn build(b: *std.Build) void {
});
test_exe.linkLibrary(lib_a);
- const test_step = b.step("test", "Test it");
- test_step.dependOn(&test_exe.step);
+ test_step.dependOn(&test_exe.run().step);
}
diff --git a/test/link/common_symbols_alignment/build.zig b/test/link/common_symbols_alignment/build.zig
index f6efdc784b..83548f2d8a 100644
--- a/test/link/common_symbols_alignment/build.zig
+++ b/test/link/common_symbols_alignment/build.zig
@@ -1,23 +1,28 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target = b.standardTargetOptions(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const lib_a = b.addStaticLibrary(.{
.name = "a",
.optimize = optimize,
- .target = target,
+ .target = .{},
});
lib_a.addCSourceFiles(&.{"a.c"}, &.{"-fcommon"});
const test_exe = b.addTest(.{
.root_source_file = .{ .path = "main.zig" },
.optimize = optimize,
- .target = target,
});
test_exe.linkLibrary(lib_a);
- const test_step = b.step("test", "Test it");
- test_step.dependOn(&test_exe.step);
+ test_step.dependOn(&test_exe.run().step);
}
diff --git a/test/link/interdependent_static_c_libs/build.zig b/test/link/interdependent_static_c_libs/build.zig
index d8962a8e08..0d06410a79 100644
--- a/test/link/interdependent_static_c_libs/build.zig
+++ b/test/link/interdependent_static_c_libs/build.zig
@@ -1,13 +1,20 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target = b.standardTargetOptions(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const lib_a = b.addStaticLibrary(.{
.name = "a",
.optimize = optimize,
- .target = target,
+ .target = .{},
});
lib_a.addCSourceFile("a.c", &[_][]const u8{});
lib_a.addIncludePath(".");
@@ -15,7 +22,7 @@ pub fn build(b: *std.Build) void {
const lib_b = b.addStaticLibrary(.{
.name = "b",
.optimize = optimize,
- .target = target,
+ .target = .{},
});
lib_b.addCSourceFile("b.c", &[_][]const u8{});
lib_b.addIncludePath(".");
@@ -23,12 +30,10 @@ pub fn build(b: *std.Build) void {
const test_exe = b.addTest(.{
.root_source_file = .{ .path = "main.zig" },
.optimize = optimize,
- .target = target,
});
test_exe.linkLibrary(lib_a);
test_exe.linkLibrary(lib_b);
test_exe.addIncludePath(".");
- const test_step = b.step("test", "Test it");
- test_step.dependOn(&test_exe.step);
+ test_step.dependOn(&test_exe.run().step);
}
diff --git a/test/link/macho/bugs/13056/build.zig b/test/link/macho/bugs/13056/build.zig
index 662fd25c92..db7c129cbf 100644
--- a/test/link/macho/bugs/13056/build.zig
+++ b/test/link/macho/bugs/13056/build.zig
@@ -1,20 +1,28 @@
const std = @import("std");
-pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+pub const requires_macos_sdk = true;
+pub const requires_symlinks = true;
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const target_info = std.zig.system.NativeTargetInfo.detect(target) catch unreachable;
const sdk = std.zig.system.darwin.getDarwinSDK(b.allocator, target_info.target) orelse
@panic("macOS SDK is required to run the test");
- const test_step = b.step("test", "Test the program");
-
const exe = b.addExecutable(.{
.name = "test",
.optimize = optimize,
});
- b.default_step.dependOn(&exe.step);
exe.addIncludePath(std.fs.path.join(b.allocator, &.{ sdk.path, "/usr/include" }) catch unreachable);
exe.addIncludePath(std.fs.path.join(b.allocator, &.{ sdk.path, "/usr/include/c++/v1" }) catch unreachable);
exe.addCSourceFile("test.cpp", &.{
diff --git a/test/link/macho/bugs/13457/build.zig b/test/link/macho/bugs/13457/build.zig
index 3560b4a168..89096bba38 100644
--- a/test/link/macho/bugs/13457/build.zig
+++ b/test/link/macho/bugs/13457/build.zig
@@ -1,10 +1,19 @@
const std = @import("std");
-pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target: std.zig.CrossTarget = .{ .os_tag = .macos };
+pub const requires_symlinks = true;
- const test_step = b.step("test", "Test the program");
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
+ const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const exe = b.addExecutable(.{
.name = "test",
@@ -13,6 +22,9 @@ pub fn build(b: *std.Build) void {
.target = target,
});
- const run = exe.runEmulatable();
+ const run = b.addRunArtifact(exe);
+ run.skip_foreign_checks = true;
+ run.expectStdOutEqual("");
+
test_step.dependOn(&run.step);
}
diff --git a/test/link/macho/dead_strip/build.zig b/test/link/macho/dead_strip/build.zig
index d82c81edca..4c739b3d8c 100644
--- a/test/link/macho/dead_strip/build.zig
+++ b/test/link/macho/dead_strip/build.zig
@@ -1,17 +1,19 @@
const std = @import("std");
+pub const requires_symlinks = true;
+
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const optimize: std.builtin.OptimizeMode = .Debug;
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test the program");
- test_step.dependOn(b.getInstallStep());
+ b.default_step = test_step;
{
// Without -dead_strip, we expect `iAmUnused` symbol present
- const exe = createScenario(b, optimize, target);
+ const exe = createScenario(b, optimize, target, "no-gc");
- const check = exe.checkObject(.macho);
+ const check = exe.checkObject();
check.checkInSymtab();
check.checkNext("{*} (__TEXT,__text) external _iAmUnused");
@@ -22,10 +24,10 @@ pub fn build(b: *std.Build) void {
{
// With -dead_strip, no `iAmUnused` symbol should be present
- const exe = createScenario(b, optimize, target);
+ const exe = createScenario(b, optimize, target, "yes-gc");
exe.link_gc_sections = true;
- const check = exe.checkObject(.macho);
+ const check = exe.checkObject();
check.checkInSymtab();
check.checkNotPresent("{*} (__TEXT,__text) external _iAmUnused");
@@ -39,9 +41,10 @@ fn createScenario(
b: *std.Build,
optimize: std.builtin.OptimizeMode,
target: std.zig.CrossTarget,
+ name: []const u8,
) *std.Build.CompileStep {
const exe = b.addExecutable(.{
- .name = "test",
+ .name = name,
.optimize = optimize,
.target = target,
});
diff --git a/test/link/macho/dead_strip_dylibs/build.zig b/test/link/macho/dead_strip_dylibs/build.zig
index af2f5cf0dc..b9b97949c1 100644
--- a/test/link/macho/dead_strip_dylibs/build.zig
+++ b/test/link/macho/dead_strip_dylibs/build.zig
@@ -1,16 +1,24 @@
const std = @import("std");
+pub const requires_macos_sdk = true;
+pub const requires_symlinks = true;
+
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
- const test_step = b.step("test", "Test the program");
- test_step.dependOn(b.getInstallStep());
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
{
// Without -dead_strip_dylibs we expect `-la` to include liba.dylib in the final executable
- const exe = createScenario(b, optimize);
+ const exe = createScenario(b, optimize, "no-dead-strip");
- const check = exe.checkObject(.macho);
+ const check = exe.checkObject();
check.checkStart("cmd LOAD_DYLIB");
check.checkNext("name {*}Cocoa");
@@ -25,18 +33,22 @@ pub fn build(b: *std.Build) void {
{
// With -dead_strip_dylibs, we should include liba.dylib as it's unreachable
- const exe = createScenario(b, optimize);
+ const exe = createScenario(b, optimize, "yes-dead-strip");
exe.dead_strip_dylibs = true;
- const run_cmd = exe.run();
- run_cmd.expected_term = .{ .Exited = @bitCast(u8, @as(i8, -2)) }; // should fail
+ const run_cmd = b.addRunArtifact(exe);
+ run_cmd.expectExitCode(@bitCast(u8, @as(i8, -2))); // should fail
test_step.dependOn(&run_cmd.step);
}
}
-fn createScenario(b: *std.Build, optimize: std.builtin.OptimizeMode) *std.Build.CompileStep {
+fn createScenario(
+ b: *std.Build,
+ optimize: std.builtin.OptimizeMode,
+ name: []const u8,
+) *std.Build.CompileStep {
const exe = b.addExecutable(.{
- .name = "test",
+ .name = name,
.optimize = optimize,
});
exe.addCSourceFile("main.c", &[0][]const u8{});
diff --git a/test/link/macho/dylib/build.zig b/test/link/macho/dylib/build.zig
index 7a1e2d862c..2d775aa23f 100644
--- a/test/link/macho/dylib/build.zig
+++ b/test/link/macho/dylib/build.zig
@@ -1,11 +1,19 @@
const std = @import("std");
-pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target: std.zig.CrossTarget = .{ .os_tag = .macos };
+pub const requires_symlinks = true;
- const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
+ const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const dylib = b.addSharedLibrary(.{
.name = "a",
@@ -15,9 +23,8 @@ pub fn build(b: *std.Build) void {
});
dylib.addCSourceFile("a.c", &.{});
dylib.linkLibC();
- dylib.install();
- const check_dylib = dylib.checkObject(.macho);
+ const check_dylib = dylib.checkObject();
check_dylib.checkStart("cmd ID_DYLIB");
check_dylib.checkNext("name @rpath/liba.dylib");
check_dylib.checkNext("timestamp 2");
@@ -33,11 +40,11 @@ pub fn build(b: *std.Build) void {
});
exe.addCSourceFile("main.c", &.{});
exe.linkSystemLibrary("a");
+ exe.addLibraryPathDirectorySource(dylib.getOutputDirectorySource());
+ exe.addRPathDirectorySource(dylib.getOutputDirectorySource());
exe.linkLibC();
- exe.addLibraryPath(b.pathFromRoot("zig-out/lib/"));
- exe.addRPath(b.pathFromRoot("zig-out/lib"));
- const check_exe = exe.checkObject(.macho);
+ const check_exe = exe.checkObject();
check_exe.checkStart("cmd LOAD_DYLIB");
check_exe.checkNext("name @rpath/liba.dylib");
check_exe.checkNext("timestamp 2");
@@ -45,10 +52,12 @@ pub fn build(b: *std.Build) void {
check_exe.checkNext("compatibility version 10000");
check_exe.checkStart("cmd RPATH");
- check_exe.checkNext(std.fmt.allocPrint(b.allocator, "path {s}", .{b.pathFromRoot("zig-out/lib")}) catch unreachable);
+ // TODO check this (perhaps with `checkNextFileSource(dylib.getOutputDirectorySource())`)
+ //check_exe.checkNext(std.fmt.allocPrint(b.allocator, "path {s}", .{
+ // b.pathFromRoot("zig-out/lib"),
+ //}) catch unreachable);
const run = check_exe.runAndCompare();
- run.cwd = b.pathFromRoot(".");
run.expectStdOutEqual("Hello world");
test_step.dependOn(&run.step);
}
diff --git a/test/link/macho/empty/build.zig b/test/link/macho/empty/build.zig
index 586da1511b..9933746d53 100644
--- a/test/link/macho/empty/build.zig
+++ b/test/link/macho/empty/build.zig
@@ -1,11 +1,19 @@
const std = @import("std");
-pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target: std.zig.CrossTarget = .{ .os_tag = .macos };
+pub const requires_symlinks = true;
- const test_step = b.step("test", "Test the program");
- test_step.dependOn(b.getInstallStep());
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
+ const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const exe = b.addExecutable(.{
.name = "test",
@@ -16,7 +24,8 @@ pub fn build(b: *std.Build) void {
exe.addCSourceFile("empty.c", &[0][]const u8{});
exe.linkLibC();
- const run_cmd = std.Build.EmulatableRunStep.create(b, "run", exe);
+ const run_cmd = b.addRunArtifact(exe);
+ run_cmd.skip_foreign_checks = true;
run_cmd.expectStdOutEqual("Hello!\n");
test_step.dependOn(&run_cmd.step);
}
diff --git a/test/link/macho/entry/build.zig b/test/link/macho/entry/build.zig
index 4504da9c6c..e983bc9391 100644
--- a/test/link/macho/entry/build.zig
+++ b/test/link/macho/entry/build.zig
@@ -1,11 +1,18 @@
const std = @import("std");
+pub const requires_symlinks = true;
+
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
- const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const exe = b.addExecutable(.{
.name = "main",
.optimize = optimize,
@@ -15,7 +22,7 @@ pub fn build(b: *std.Build) void {
exe.linkLibC();
exe.entry_symbol_name = "_non_main";
- const check_exe = exe.checkObject(.macho);
+ const check_exe = exe.checkObject();
check_exe.checkStart("segname __TEXT");
check_exe.checkNext("vmaddr {vmaddr}");
diff --git a/test/link/macho/headerpad/build.zig b/test/link/macho/headerpad/build.zig
index 3ef17573f8..0c9275b8d8 100644
--- a/test/link/macho/headerpad/build.zig
+++ b/test/link/macho/headerpad/build.zig
@@ -1,18 +1,26 @@
const std = @import("std");
const builtin = @import("builtin");
+pub const requires_symlinks = true;
+pub const requires_macos_sdk = true;
+
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
- const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
{
// Test -headerpad_max_install_names
- const exe = simpleExe(b, optimize);
+ const exe = simpleExe(b, optimize, "headerpad_max_install_names");
exe.headerpad_max_install_names = true;
- const check = exe.checkObject(.macho);
+ const check = exe.checkObject();
check.checkStart("sectname __text");
check.checkNext("offset {offset}");
@@ -34,10 +42,10 @@ pub fn build(b: *std.Build) void {
{
// Test -headerpad
- const exe = simpleExe(b, optimize);
+ const exe = simpleExe(b, optimize, "headerpad");
exe.headerpad_size = 0x10000;
- const check = exe.checkObject(.macho);
+ const check = exe.checkObject();
check.checkStart("sectname __text");
check.checkNext("offset {offset}");
check.checkComputeCompare("offset", .{ .op = .gte, .value = .{ .literal = 0x10000 } });
@@ -50,11 +58,11 @@ pub fn build(b: *std.Build) void {
{
// Test both flags with -headerpad overriding -headerpad_max_install_names
- const exe = simpleExe(b, optimize);
+ const exe = simpleExe(b, optimize, "headerpad_overriding");
exe.headerpad_max_install_names = true;
exe.headerpad_size = 0x10000;
- const check = exe.checkObject(.macho);
+ const check = exe.checkObject();
check.checkStart("sectname __text");
check.checkNext("offset {offset}");
check.checkComputeCompare("offset", .{ .op = .gte, .value = .{ .literal = 0x10000 } });
@@ -67,11 +75,11 @@ pub fn build(b: *std.Build) void {
{
// Test both flags with -headerpad_max_install_names overriding -headerpad
- const exe = simpleExe(b, optimize);
+ const exe = simpleExe(b, optimize, "headerpad_max_install_names_overriding");
exe.headerpad_size = 0x1000;
exe.headerpad_max_install_names = true;
- const check = exe.checkObject(.macho);
+ const check = exe.checkObject();
check.checkStart("sectname __text");
check.checkNext("offset {offset}");
@@ -92,9 +100,13 @@ pub fn build(b: *std.Build) void {
}
}
-fn simpleExe(b: *std.Build, optimize: std.builtin.OptimizeMode) *std.Build.CompileStep {
+fn simpleExe(
+ b: *std.Build,
+ optimize: std.builtin.OptimizeMode,
+ name: []const u8,
+) *std.Build.CompileStep {
const exe = b.addExecutable(.{
- .name = "main",
+ .name = name,
.optimize = optimize,
});
exe.addCSourceFile("main.c", &.{});
diff --git a/test/link/macho/linksection/build.zig b/test/link/macho/linksection/build.zig
index 227d4eeb63..b8b3a59f35 100644
--- a/test/link/macho/linksection/build.zig
+++ b/test/link/macho/linksection/build.zig
@@ -1,11 +1,19 @@
const std = @import("std");
-pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target = std.zig.CrossTarget{ .os_tag = .macos };
+pub const requires_symlinks = true;
- const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
+ const target = std.zig.CrossTarget{ .os_tag = .macos };
const obj = b.addObject(.{
.name = "test",
@@ -14,7 +22,7 @@ pub fn build(b: *std.Build) void {
.target = target,
});
- const check = obj.checkObject(.macho);
+ const check = obj.checkObject();
check.checkInSymtab();
check.checkNext("{*} (__DATA,__TestGlobal) external _test_global");
diff --git a/test/link/macho/needed_framework/build.zig b/test/link/macho/needed_framework/build.zig
index 8b6e3dd87f..3de96efbc7 100644
--- a/test/link/macho/needed_framework/build.zig
+++ b/test/link/macho/needed_framework/build.zig
@@ -1,11 +1,19 @@
const std = @import("std");
+pub const requires_symlinks = true;
+pub const requires_macos_sdk = true;
+
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
- const test_step = b.step("test", "Test the program");
- test_step.dependOn(b.getInstallStep());
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
// -dead_strip_dylibs
// -needed_framework Cocoa
const exe = b.addExecutable(.{
@@ -17,7 +25,7 @@ pub fn build(b: *std.Build) void {
exe.linkFrameworkNeeded("Cocoa");
exe.dead_strip_dylibs = true;
- const check = exe.checkObject(.macho);
+ const check = exe.checkObject();
check.checkStart("cmd LOAD_DYLIB");
check.checkNext("name {*}Cocoa");
test_step.dependOn(&check.step);
diff --git a/test/link/macho/needed_library/build.zig b/test/link/macho/needed_library/build.zig
index 92a73d22b7..cb9ea38d4b 100644
--- a/test/link/macho/needed_library/build.zig
+++ b/test/link/macho/needed_library/build.zig
@@ -1,11 +1,19 @@
const std = @import("std");
-pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target: std.zig.CrossTarget = .{ .os_tag = .macos };
+pub const requires_symlinks = true;
- const test_step = b.step("test", "Test the program");
- test_step.dependOn(b.getInstallStep());
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
+ const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const dylib = b.addSharedLibrary(.{
.name = "a",
@@ -15,7 +23,6 @@ pub fn build(b: *std.Build) void {
});
dylib.addCSourceFile("a.c", &.{});
dylib.linkLibC();
- dylib.install();
// -dead_strip_dylibs
// -needed-la
@@ -27,14 +34,15 @@ pub fn build(b: *std.Build) void {
exe.addCSourceFile("main.c", &[0][]const u8{});
exe.linkLibC();
exe.linkSystemLibraryNeeded("a");
- exe.addLibraryPath(b.pathFromRoot("zig-out/lib"));
- exe.addRPath(b.pathFromRoot("zig-out/lib"));
+ exe.addLibraryPathDirectorySource(dylib.getOutputDirectorySource());
+ exe.addRPathDirectorySource(dylib.getOutputDirectorySource());
exe.dead_strip_dylibs = true;
- const check = exe.checkObject(.macho);
+ const check = exe.checkObject();
check.checkStart("cmd LOAD_DYLIB");
check.checkNext("name @rpath/liba.dylib");
const run_cmd = check.runAndCompare();
+ run_cmd.expectStdOutEqual("");
test_step.dependOn(&run_cmd.step);
}
diff --git a/test/link/macho/objc/build.zig b/test/link/macho/objc/build.zig
index 10d293baab..4cd12f786f 100644
--- a/test/link/macho/objc/build.zig
+++ b/test/link/macho/objc/build.zig
@@ -1,10 +1,19 @@
const std = @import("std");
+pub const requires_symlinks = true;
+pub const requires_macos_sdk = true;
+
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
- const test_step = b.step("test", "Test the program");
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const exe = b.addExecutable(.{
.name = "test",
.optimize = optimize,
@@ -17,6 +26,8 @@ pub fn build(b: *std.Build) void {
// populate paths to the sysroot here.
exe.linkFramework("Foundation");
- const run_cmd = std.Build.EmulatableRunStep.create(b, "run", exe);
+ const run_cmd = b.addRunArtifact(exe);
+ run_cmd.skip_foreign_checks = true;
+ run_cmd.expectStdOutEqual("");
test_step.dependOn(&run_cmd.step);
}
diff --git a/test/link/macho/objcpp/build.zig b/test/link/macho/objcpp/build.zig
index 2a3459be50..06876247a9 100644
--- a/test/link/macho/objcpp/build.zig
+++ b/test/link/macho/objcpp/build.zig
@@ -1,10 +1,19 @@
const std = @import("std");
+pub const requires_symlinks = true;
+pub const requires_macos_sdk = true;
+
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
- const test_step = b.step("test", "Test the program");
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const exe = b.addExecutable(.{
.name = "test",
.optimize = optimize,
diff --git a/test/link/macho/pagezero/build.zig b/test/link/macho/pagezero/build.zig
index 0a8471b919..b467df2b20 100644
--- a/test/link/macho/pagezero/build.zig
+++ b/test/link/macho/pagezero/build.zig
@@ -1,11 +1,13 @@
const std = @import("std");
-pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target: std.zig.CrossTarget = .{ .os_tag = .macos };
+pub const requires_symlinks = true;
- const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
+ const target: std.zig.CrossTarget = .{ .os_tag = .macos };
{
const exe = b.addExecutable(.{
@@ -17,7 +19,7 @@ pub fn build(b: *std.Build) void {
exe.linkLibC();
exe.pagezero_size = 0x4000;
- const check = exe.checkObject(.macho);
+ const check = exe.checkObject();
check.checkStart("LC 0");
check.checkNext("segname __PAGEZERO");
check.checkNext("vmaddr 0");
@@ -39,7 +41,7 @@ pub fn build(b: *std.Build) void {
exe.linkLibC();
exe.pagezero_size = 0;
- const check = exe.checkObject(.macho);
+ const check = exe.checkObject();
check.checkStart("LC 0");
check.checkNext("segname __TEXT");
check.checkNext("vmaddr 0");
diff --git a/test/link/macho/search_strategy/build.zig b/test/link/macho/search_strategy/build.zig
index 62757f885b..4777629c8b 100644
--- a/test/link/macho/search_strategy/build.zig
+++ b/test/link/macho/search_strategy/build.zig
@@ -1,34 +1,41 @@
const std = @import("std");
-pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target: std.zig.CrossTarget = .{ .os_tag = .macos };
+pub const requires_symlinks = true;
- const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
+ const target: std.zig.CrossTarget = .{ .os_tag = .macos };
{
// -search_dylibs_first
- const exe = createScenario(b, optimize, target);
+ const exe = createScenario(b, optimize, target, "search_dylibs_first");
exe.search_strategy = .dylibs_first;
- const check = exe.checkObject(.macho);
+ const check = exe.checkObject();
check.checkStart("cmd LOAD_DYLIB");
- check.checkNext("name @rpath/liba.dylib");
+ check.checkNext("name @rpath/libsearch_dylibs_first.dylib");
const run = check.runAndCompare();
- run.cwd = b.pathFromRoot(".");
run.expectStdOutEqual("Hello world");
test_step.dependOn(&run.step);
}
{
// -search_paths_first
- const exe = createScenario(b, optimize, target);
+ const exe = createScenario(b, optimize, target, "search_paths_first");
exe.search_strategy = .paths_first;
- const run = std.Build.EmulatableRunStep.create(b, "run", exe);
- run.cwd = b.pathFromRoot(".");
+ const run = b.addRunArtifact(exe);
+ run.skip_foreign_checks = true;
run.expectStdOutEqual("Hello world");
test_step.dependOn(&run.step);
}
@@ -38,9 +45,10 @@ fn createScenario(
b: *std.Build,
optimize: std.builtin.OptimizeMode,
target: std.zig.CrossTarget,
+ name: []const u8,
) *std.Build.CompileStep {
const static = b.addStaticLibrary(.{
- .name = "a",
+ .name = name,
.optimize = optimize,
.target = target,
});
@@ -49,10 +57,9 @@ fn createScenario(
static.override_dest_dir = std.Build.InstallDir{
.custom = "static",
};
- static.install();
const dylib = b.addSharedLibrary(.{
- .name = "a",
+ .name = name,
.version = .{ .major = 1, .minor = 0 },
.optimize = optimize,
.target = target,
@@ -62,18 +69,17 @@ fn createScenario(
dylib.override_dest_dir = std.Build.InstallDir{
.custom = "dynamic",
};
- dylib.install();
const exe = b.addExecutable(.{
- .name = "main",
+ .name = name,
.optimize = optimize,
.target = target,
});
exe.addCSourceFile("main.c", &.{});
- exe.linkSystemLibraryName("a");
+ exe.linkSystemLibraryName(name);
exe.linkLibC();
- exe.addLibraryPath(b.pathFromRoot("zig-out/static"));
- exe.addLibraryPath(b.pathFromRoot("zig-out/dynamic"));
- exe.addRPath(b.pathFromRoot("zig-out/dynamic"));
+ exe.addLibraryPathDirectorySource(static.getOutputDirectorySource());
+ exe.addLibraryPathDirectorySource(dylib.getOutputDirectorySource());
+ exe.addRPathDirectorySource(dylib.getOutputDirectorySource());
return exe;
}
diff --git a/test/link/macho/stack_size/build.zig b/test/link/macho/stack_size/build.zig
index 3529a134eb..c7d308d004 100644
--- a/test/link/macho/stack_size/build.zig
+++ b/test/link/macho/stack_size/build.zig
@@ -1,11 +1,19 @@
const std = @import("std");
-pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target: std.zig.CrossTarget = .{ .os_tag = .macos };
+pub const requires_symlinks = true;
- const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
+ const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const exe = b.addExecutable(.{
.name = "main",
@@ -16,10 +24,11 @@ pub fn build(b: *std.Build) void {
exe.linkLibC();
exe.stack_size = 0x100000000;
- const check_exe = exe.checkObject(.macho);
+ const check_exe = exe.checkObject();
check_exe.checkStart("cmd MAIN");
check_exe.checkNext("stacksize 100000000");
const run = check_exe.runAndCompare();
+ run.expectStdOutEqual("");
test_step.dependOn(&run.step);
}
diff --git a/test/link/macho/strict_validation/build.zig b/test/link/macho/strict_validation/build.zig
index 408076657b..34a0cd73fc 100644
--- a/test/link/macho/strict_validation/build.zig
+++ b/test/link/macho/strict_validation/build.zig
@@ -1,12 +1,20 @@
const std = @import("std");
const builtin = @import("builtin");
-pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target: std.zig.CrossTarget = .{ .os_tag = .macos };
+pub const requires_symlinks = true;
- const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
+ const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const exe = b.addExecutable(.{
.name = "main",
@@ -16,7 +24,7 @@ pub fn build(b: *std.Build) void {
});
exe.linkLibC();
- const check_exe = exe.checkObject(.macho);
+ const check_exe = exe.checkObject();
check_exe.checkStart("cmd SEGMENT_64");
check_exe.checkNext("segname __LINKEDIT");
diff --git a/test/link/macho/tls/build.zig b/test/link/macho/tls/build.zig
index c77588cb5d..5981fea194 100644
--- a/test/link/macho/tls/build.zig
+++ b/test/link/macho/tls/build.zig
@@ -1,7 +1,18 @@
const std = @import("std");
+pub const requires_symlinks = true;
+
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const lib = b.addSharedLibrary(.{
@@ -21,6 +32,8 @@ pub fn build(b: *std.Build) void {
test_exe.linkLibrary(lib);
test_exe.linkLibC();
- const test_step = b.step("test", "Test it");
- test_step.dependOn(&test_exe.step);
+ const run = test_exe.run();
+ run.skip_foreign_checks = true;
+
+ test_step.dependOn(&run.step);
}
diff --git a/test/link/macho/unwind_info/build.zig b/test/link/macho/unwind_info/build.zig
index 408f762f5d..4ace2a4e96 100644
--- a/test/link/macho/unwind_info/build.zig
+++ b/test/link/macho/unwind_info/build.zig
@@ -1,14 +1,23 @@
const std = @import("std");
const builtin = @import("builtin");
+pub const requires_symlinks = true;
+
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
- const test_step = b.step("test", "Test the program");
-
- testUnwindInfo(b, test_step, optimize, target, false);
- testUnwindInfo(b, test_step, optimize, target, true);
+ testUnwindInfo(b, test_step, optimize, target, false, "no-dead-strip");
+ testUnwindInfo(b, test_step, optimize, target, true, "yes-dead-strip");
}
fn testUnwindInfo(
@@ -17,11 +26,12 @@ fn testUnwindInfo(
optimize: std.builtin.OptimizeMode,
target: std.zig.CrossTarget,
dead_strip: bool,
+ name: []const u8,
) void {
- const exe = createScenario(b, optimize, target);
+ const exe = createScenario(b, optimize, target, name);
exe.link_gc_sections = dead_strip;
- const check = exe.checkObject(.macho);
+ const check = exe.checkObject();
check.checkStart("segname __TEXT");
check.checkNext("sectname __gcc_except_tab");
check.checkNext("sectname __unwind_info");
@@ -54,9 +64,10 @@ fn createScenario(
b: *std.Build,
optimize: std.builtin.OptimizeMode,
target: std.zig.CrossTarget,
+ name: []const u8,
) *std.Build.CompileStep {
const exe = b.addExecutable(.{
- .name = "test",
+ .name = name,
.optimize = optimize,
.target = target,
});
diff --git a/test/link/macho/uuid/build.zig b/test/link/macho/uuid/build.zig
index 5a8c14ae37..df58aeacb7 100644
--- a/test/link/macho/uuid/build.zig
+++ b/test/link/macho/uuid/build.zig
@@ -1,14 +1,16 @@
const std = @import("std");
-const Builder = std.Build.Builder;
const CompileStep = std.Build.CompileStep;
const FileSource = std.Build.FileSource;
const Step = std.Build.Step;
+pub const requires_symlinks = true;
+
pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+ b.default_step = test_step;
- // We force cross-compilation to ensure we always pick a generic CPU with constant set of CPU features.
+ // We force cross-compilation to ensure we always pick a generic CPU with
+ // constant set of CPU features.
const aarch64_macos = std.zig.CrossTarget{
.cpu_arch = .aarch64,
.os_tag = .macos,
@@ -38,13 +40,15 @@ fn testUuid(
// stay the same across builds.
{
const dylib = simpleDylib(b, optimize, target);
- const install_step = installWithRename(dylib, "test1.dylib");
+ const install_step = b.addInstallArtifact(dylib);
+ install_step.dest_sub_path = "test1.dylib";
install_step.step.dependOn(&dylib.step);
}
{
const dylib = simpleDylib(b, optimize, target);
dylib.strip = true;
- const install_step = installWithRename(dylib, "test2.dylib");
+ const install_step = b.addInstallArtifact(dylib);
+ install_step.dest_sub_path = "test2.dylib";
install_step.step.dependOn(&dylib.step);
}
@@ -68,86 +72,43 @@ fn simpleDylib(
return dylib;
}
-fn installWithRename(cs: *CompileStep, name: []const u8) *InstallWithRename {
- const step = InstallWithRename.create(cs.builder, cs.getOutputSource(), name);
- cs.builder.getInstallStep().dependOn(&step.step);
- return step;
-}
-
-const InstallWithRename = struct {
- pub const base_id = .custom;
-
- step: Step,
- builder: *Builder,
- source: FileSource,
- name: []const u8,
-
- pub fn create(
- builder: *Builder,
- source: FileSource,
- name: []const u8,
- ) *InstallWithRename {
- const self = builder.allocator.create(InstallWithRename) catch @panic("OOM");
- self.* = InstallWithRename{
- .builder = builder,
- .step = Step.init(.custom, builder.fmt("install and rename: {s} -> {s}", .{
- source.getDisplayName(),
- name,
- }), builder.allocator, make),
- .source = source,
- .name = builder.dupe(name),
- };
- return self;
- }
-
- fn make(step: *Step) anyerror!void {
- const self = @fieldParentPtr(InstallWithRename, "step", step);
- const source_path = self.source.getPath(self.builder);
- const target_path = self.builder.getInstallPath(.lib, self.name);
- self.builder.updateFile(source_path, target_path) catch |err| {
- std.log.err("Unable to rename: {s} -> {s}", .{ source_path, target_path });
- return err;
- };
- }
-};
-
const CompareUuid = struct {
pub const base_id = .custom;
step: Step,
- builder: *Builder,
lhs: []const u8,
rhs: []const u8,
- pub fn create(builder: *Builder, lhs: []const u8, rhs: []const u8) *CompareUuid {
- const self = builder.allocator.create(CompareUuid) catch @panic("OOM");
+ pub fn create(owner: *std.Build, lhs: []const u8, rhs: []const u8) *CompareUuid {
+ const self = owner.allocator.create(CompareUuid) catch @panic("OOM");
self.* = CompareUuid{
- .builder = builder,
- .step = Step.init(
- .custom,
- builder.fmt("compare uuid: {s} and {s}", .{
+ .step = Step.init(.{
+ .id = base_id,
+ .name = owner.fmt("compare uuid: {s} and {s}", .{
lhs,
rhs,
}),
- builder.allocator,
- make,
- ),
+ .owner = owner,
+ .makeFn = make,
+ }),
.lhs = lhs,
.rhs = rhs,
};
return self;
}
- fn make(step: *Step) anyerror!void {
+ fn make(step: *Step, prog_node: *std.Progress.Node) anyerror!void {
+ _ = prog_node;
+ const b = step.owner;
const self = @fieldParentPtr(CompareUuid, "step", step);
- const gpa = self.builder.allocator;
+ const gpa = b.allocator;
var lhs_uuid: [16]u8 = undefined;
- const lhs_path = self.builder.getInstallPath(.lib, self.lhs);
+ const lhs_path = b.getInstallPath(.lib, self.lhs);
try parseUuid(gpa, lhs_path, &lhs_uuid);
var rhs_uuid: [16]u8 = undefined;
- const rhs_path = self.builder.getInstallPath(.lib, self.rhs);
+ const rhs_path = b.getInstallPath(.lib, self.rhs);
try parseUuid(gpa, rhs_path, &rhs_uuid);
try std.testing.expectEqualStrings(&lhs_uuid, &rhs_uuid);
diff --git a/test/link/macho/weak_framework/build.zig b/test/link/macho/weak_framework/build.zig
index ca28458d77..7cc08f5b9d 100644
--- a/test/link/macho/weak_framework/build.zig
+++ b/test/link/macho/weak_framework/build.zig
@@ -1,11 +1,19 @@
const std = @import("std");
+pub const requires_symlinks = true;
+pub const requires_macos_sdk = true;
+
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
- const test_step = b.step("test", "Test the program");
- test_step.dependOn(b.getInstallStep());
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const exe = b.addExecutable(.{
.name = "test",
.optimize = optimize,
@@ -14,7 +22,7 @@ pub fn build(b: *std.Build) void {
exe.linkLibC();
exe.linkFrameworkWeak("Cocoa");
- const check = exe.checkObject(.macho);
+ const check = exe.checkObject();
check.checkStart("cmd LOAD_WEAK_DYLIB");
check.checkNext("name {*}Cocoa");
test_step.dependOn(&check.step);
diff --git a/test/link/macho/weak_library/build.zig b/test/link/macho/weak_library/build.zig
index de5aa45e30..b12ec087f5 100644
--- a/test/link/macho/weak_library/build.zig
+++ b/test/link/macho/weak_library/build.zig
@@ -1,11 +1,19 @@
const std = @import("std");
-pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target: std.zig.CrossTarget = .{ .os_tag = .macos };
+pub const requires_symlinks = true;
- const test_step = b.step("test", "Test the program");
- test_step.dependOn(b.getInstallStep());
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
+ const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const dylib = b.addSharedLibrary(.{
.name = "a",
@@ -25,10 +33,10 @@ pub fn build(b: *std.Build) void {
exe.addCSourceFile("main.c", &[0][]const u8{});
exe.linkLibC();
exe.linkSystemLibraryWeak("a");
- exe.addLibraryPath(b.pathFromRoot("zig-out/lib"));
- exe.addRPath(b.pathFromRoot("zig-out/lib"));
+ exe.addLibraryPathDirectorySource(dylib.getOutputDirectorySource());
+ exe.addRPathDirectorySource(dylib.getOutputDirectorySource());
- const check = exe.checkObject(.macho);
+ const check = exe.checkObject();
check.checkStart("cmd LOAD_WEAK_DYLIB");
check.checkNext("name @rpath/liba.dylib");
diff --git a/test/link/static_lib_as_system_lib/a.c b/test/link/static_lib_as_system_lib/a.c
deleted file mode 100644
index ee9da97a3a..0000000000
--- a/test/link/static_lib_as_system_lib/a.c
+++ /dev/null
@@ -1,4 +0,0 @@
-#include "a.h"
-int32_t add(int32_t a, int32_t b) {
- return a + b;
-}
diff --git a/test/link/static_lib_as_system_lib/a.h b/test/link/static_lib_as_system_lib/a.h
deleted file mode 100644
index 7b45d54d56..0000000000
--- a/test/link/static_lib_as_system_lib/a.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#include
-int32_t add(int32_t a, int32_t b);
diff --git a/test/link/static_lib_as_system_lib/build.zig b/test/link/static_lib_as_system_lib/build.zig
deleted file mode 100644
index b6cf32d711..0000000000
--- a/test/link/static_lib_as_system_lib/build.zig
+++ /dev/null
@@ -1,29 +0,0 @@
-const std = @import("std");
-
-pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target = b.standardTargetOptions(.{});
-
- const lib_a = b.addStaticLibrary(.{
- .name = "a",
- .optimize = optimize,
- .target = target,
- });
- lib_a.addCSourceFile("a.c", &[_][]const u8{});
- lib_a.addIncludePath(".");
- lib_a.install();
-
- const test_exe = b.addTest(.{
- .root_source_file = .{ .path = "main.zig" },
- .optimize = optimize,
- .target = target,
- });
- test_exe.linkSystemLibrary("a"); // force linking liba.a as -la
- test_exe.addSystemIncludePath(".");
- const search_path = std.fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "lib" }) catch unreachable;
- test_exe.addLibraryPath(search_path);
-
- const test_step = b.step("test", "Test it");
- test_step.dependOn(b.getInstallStep());
- test_step.dependOn(&test_exe.step);
-}
diff --git a/test/link/static_lib_as_system_lib/main.zig b/test/link/static_lib_as_system_lib/main.zig
deleted file mode 100644
index 0b9c46217f..0000000000
--- a/test/link/static_lib_as_system_lib/main.zig
+++ /dev/null
@@ -1,8 +0,0 @@
-const std = @import("std");
-const expect = std.testing.expect;
-const c = @cImport(@cInclude("a.h"));
-
-test "import C add" {
- const result = c.add(2, 1);
- try expect(result == 3);
-}
diff --git a/test/link/wasm/archive/build.zig b/test/link/wasm/archive/build.zig
index 342c4c08d1..6c242a6bf1 100644
--- a/test/link/wasm/archive/build.zig
+++ b/test/link/wasm/archive/build.zig
@@ -1,22 +1,31 @@
const std = @import("std");
-pub fn build(b: *std.Build) void {
- const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+pub const requires_stage2 = true;
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
// The code in question will pull-in compiler-rt,
// and therefore link with its archive file.
const lib = b.addSharedLibrary(.{
.name = "main",
.root_source_file = .{ .path = "main.zig" },
- .optimize = b.standardOptimizeOption(.{}),
+ .optimize = optimize,
.target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
});
lib.use_llvm = false;
lib.use_lld = false;
lib.strip = false;
- const check = lib.checkObject(.wasm);
+ const check = lib.checkObject();
check.checkStart("Section custom");
check.checkNext("name __truncsfhf2"); // Ensure it was imported and resolved
diff --git a/test/link/wasm/basic-features/build.zig b/test/link/wasm/basic-features/build.zig
index 9f57066518..be709a698f 100644
--- a/test/link/wasm/basic-features/build.zig
+++ b/test/link/wasm/basic-features/build.zig
@@ -1,11 +1,13 @@
const std = @import("std");
+pub const requires_stage2 = true;
+
pub fn build(b: *std.Build) void {
// Library with explicitly set cpu features
const lib = b.addSharedLibrary(.{
.name = "lib",
.root_source_file = .{ .path = "main.zig" },
- .optimize = b.standardOptimizeOption(.{}),
+ .optimize = .Debug,
.target = .{
.cpu_arch = .wasm32,
.cpu_model = .{ .explicit = &std.Target.wasm.cpu.mvp },
@@ -17,11 +19,12 @@ pub fn build(b: *std.Build) void {
lib.use_lld = false;
// Verify the result contains the features explicitly set on the target for the library.
- const check = lib.checkObject(.wasm);
+ const check = lib.checkObject();
check.checkStart("name target_features");
check.checkNext("features 1");
check.checkNext("+ atomics");
const test_step = b.step("test", "Run linker test");
test_step.dependOn(&check.step);
+ b.default_step = test_step;
}
diff --git a/test/link/wasm/bss/build.zig b/test/link/wasm/bss/build.zig
index 1017e70a71..bba2e7c602 100644
--- a/test/link/wasm/bss/build.zig
+++ b/test/link/wasm/bss/build.zig
@@ -1,14 +1,16 @@
const std = @import("std");
+pub const requires_stage2 = true;
+
pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+ b.default_step = test_step;
const lib = b.addSharedLibrary(.{
.name = "lib",
.root_source_file = .{ .path = "lib.zig" },
.target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
- .optimize = b.standardOptimizeOption(.{}),
+ .optimize = .Debug,
});
lib.use_llvm = false;
lib.use_lld = false;
@@ -17,7 +19,7 @@ pub fn build(b: *std.Build) void {
lib.import_memory = true;
lib.install();
- const check_lib = lib.checkObject(.wasm);
+ const check_lib = lib.checkObject();
// since we import memory, make sure it exists with the correct naming
check_lib.checkStart("Section import");
@@ -36,5 +38,6 @@ pub fn build(b: *std.Build) void {
check_lib.checkNext("name .rodata");
check_lib.checkNext("index 1"); // bss section always last
check_lib.checkNext("name .bss");
+
test_step.dependOn(&check_lib.step);
}
diff --git a/test/link/wasm/export-data/build.zig b/test/link/wasm/export-data/build.zig
index c989153e47..38b8c3e19e 100644
--- a/test/link/wasm/export-data/build.zig
+++ b/test/link/wasm/export-data/build.zig
@@ -2,7 +2,12 @@ const std = @import("std");
pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+ b.default_step = test_step;
+
+ if (@import("builtin").os.tag == .windows) {
+ // TODO: Fix open handle in wasm-linker refraining rename from working on Windows.
+ return;
+ }
const lib = b.addSharedLibrary(.{
.name = "lib",
@@ -14,7 +19,7 @@ pub fn build(b: *std.Build) void {
lib.export_symbol_names = &.{ "foo", "bar" };
lib.global_base = 0; // put data section at address 0 to make data symbols easier to parse
- const check_lib = lib.checkObject(.wasm);
+ const check_lib = lib.checkObject();
check_lib.checkStart("Section global");
check_lib.checkNext("entries 3");
diff --git a/test/link/wasm/export/build.zig b/test/link/wasm/export/build.zig
index 69c34a320e..794201dbf6 100644
--- a/test/link/wasm/export/build.zig
+++ b/test/link/wasm/export/build.zig
@@ -1,8 +1,18 @@
const std = @import("std");
-pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+pub const requires_stage2 = true;
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const no_export = b.addSharedLibrary(.{
.name = "no-export",
.root_source_file = .{ .path = "main.zig" },
@@ -32,25 +42,24 @@ pub fn build(b: *std.Build) void {
force_export.use_llvm = false;
force_export.use_lld = false;
- const check_no_export = no_export.checkObject(.wasm);
+ const check_no_export = no_export.checkObject();
check_no_export.checkStart("Section export");
check_no_export.checkNext("entries 1");
check_no_export.checkNext("name memory");
check_no_export.checkNext("kind memory");
- const check_dynamic_export = dynamic_export.checkObject(.wasm);
+ const check_dynamic_export = dynamic_export.checkObject();
check_dynamic_export.checkStart("Section export");
check_dynamic_export.checkNext("entries 2");
check_dynamic_export.checkNext("name foo");
check_dynamic_export.checkNext("kind function");
- const check_force_export = force_export.checkObject(.wasm);
+ const check_force_export = force_export.checkObject();
check_force_export.checkStart("Section export");
check_force_export.checkNext("entries 2");
check_force_export.checkNext("name foo");
check_force_export.checkNext("kind function");
- const test_step = b.step("test", "Run linker test");
test_step.dependOn(&check_no_export.step);
test_step.dependOn(&check_dynamic_export.step);
test_step.dependOn(&check_force_export.step);
diff --git a/test/link/wasm/extern-mangle/build.zig b/test/link/wasm/extern-mangle/build.zig
index 19913e6eca..6c292acbab 100644
--- a/test/link/wasm/extern-mangle/build.zig
+++ b/test/link/wasm/extern-mangle/build.zig
@@ -1,20 +1,26 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const lib = b.addSharedLibrary(.{
.name = "lib",
.root_source_file = .{ .path = "lib.zig" },
.target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
- .optimize = b.standardOptimizeOption(.{}),
+ .optimize = optimize,
});
lib.import_symbols = true; // import `a` and `b`
lib.rdynamic = true; // export `foo`
- lib.install();
- const check_lib = lib.checkObject(.wasm);
+ const check_lib = lib.checkObject();
check_lib.checkStart("Section import");
check_lib.checkNext("entries 2"); // a.hello & b.hello
check_lib.checkNext("module a");
diff --git a/test/link/wasm/extern/build.zig b/test/link/wasm/extern/build.zig
index 569d94091a..55562143c2 100644
--- a/test/link/wasm/extern/build.zig
+++ b/test/link/wasm/extern/build.zig
@@ -1,19 +1,31 @@
const std = @import("std");
+pub const requires_stage2 = true;
+
pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const exe = b.addExecutable(.{
.name = "extern",
.root_source_file = .{ .path = "main.zig" },
- .optimize = b.standardOptimizeOption(.{}),
+ .optimize = optimize,
.target = .{ .cpu_arch = .wasm32, .os_tag = .wasi },
});
exe.addCSourceFile("foo.c", &.{});
exe.use_llvm = false;
exe.use_lld = false;
- const run = exe.runEmulatable();
+ const run = b.addRunArtifact(exe);
+ run.skip_foreign_checks = true;
run.expectStdOutEqual("Result: 30");
- const test_step = b.step("test", "Run linker test");
test_step.dependOn(&run.step);
}
diff --git a/test/link/wasm/function-table/build.zig b/test/link/wasm/function-table/build.zig
index 4c25d0d860..4ce6294727 100644
--- a/test/link/wasm/function-table/build.zig
+++ b/test/link/wasm/function-table/build.zig
@@ -1,13 +1,20 @@
const std = @import("std");
+pub const requires_stage2 = true;
+
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
- const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const import_table = b.addSharedLibrary(.{
- .name = "lib",
+ .name = "import_table",
.root_source_file = .{ .path = "lib.zig" },
.target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
.optimize = optimize,
@@ -17,7 +24,7 @@ pub fn build(b: *std.Build) void {
import_table.import_table = true;
const export_table = b.addSharedLibrary(.{
- .name = "lib",
+ .name = "export_table",
.root_source_file = .{ .path = "lib.zig" },
.target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
.optimize = optimize,
@@ -27,7 +34,7 @@ pub fn build(b: *std.Build) void {
export_table.export_table = true;
const regular_table = b.addSharedLibrary(.{
- .name = "lib",
+ .name = "regular_table",
.root_source_file = .{ .path = "lib.zig" },
.target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
.optimize = optimize,
@@ -35,9 +42,9 @@ pub fn build(b: *std.Build) void {
regular_table.use_llvm = false;
regular_table.use_lld = false;
- const check_import = import_table.checkObject(.wasm);
- const check_export = export_table.checkObject(.wasm);
- const check_regular = regular_table.checkObject(.wasm);
+ const check_import = import_table.checkObject();
+ const check_export = export_table.checkObject();
+ const check_regular = regular_table.checkObject();
check_import.checkStart("Section import");
check_import.checkNext("entries 1");
diff --git a/test/link/wasm/infer-features/build.zig b/test/link/wasm/infer-features/build.zig
index d6d706a33d..00fb48651b 100644
--- a/test/link/wasm/infer-features/build.zig
+++ b/test/link/wasm/infer-features/build.zig
@@ -1,12 +1,12 @@
const std = @import("std");
-pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+pub const requires_stage2 = true;
+pub fn build(b: *std.Build) void {
// Wasm Object file which we will use to infer the features from
const c_obj = b.addObject(.{
.name = "c_obj",
- .optimize = optimize,
+ .optimize = .Debug,
.target = .{
.cpu_arch = .wasm32,
.cpu_model = .{ .explicit = &std.Target.wasm.cpu.bleeding_edge },
@@ -20,7 +20,7 @@ pub fn build(b: *std.Build) void {
const lib = b.addSharedLibrary(.{
.name = "lib",
.root_source_file = .{ .path = "main.zig" },
- .optimize = optimize,
+ .optimize = .Debug,
.target = .{
.cpu_arch = .wasm32,
.cpu_model = .{ .explicit = &std.Target.wasm.cpu.mvp },
@@ -32,7 +32,7 @@ pub fn build(b: *std.Build) void {
lib.addObject(c_obj);
// Verify the result contains the features from the C Object file.
- const check = lib.checkObject(.wasm);
+ const check = lib.checkObject();
check.checkStart("name target_features");
check.checkNext("features 7");
check.checkNext("+ atomics");
@@ -45,4 +45,5 @@ pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Run linker test");
test_step.dependOn(&check.step);
+ b.default_step = test_step;
}
diff --git a/test/link/wasm/producers/build.zig b/test/link/wasm/producers/build.zig
index 2589b0dfcf..7b7cefd7e0 100644
--- a/test/link/wasm/producers/build.zig
+++ b/test/link/wasm/producers/build.zig
@@ -1,26 +1,33 @@
const std = @import("std");
const builtin = @import("builtin");
-pub fn build(b: *std.Build) void {
- const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+pub const requires_stage2 = true;
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const lib = b.addSharedLibrary(.{
.name = "lib",
.root_source_file = .{ .path = "lib.zig" },
.target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
- .optimize = b.standardOptimizeOption(.{}),
+ .optimize = optimize,
});
lib.use_llvm = false;
lib.use_lld = false;
lib.strip = false;
lib.install();
- const zig_version = builtin.zig_version;
- var version_buf: [100]u8 = undefined;
- const version_fmt = std.fmt.bufPrint(&version_buf, "version {}", .{zig_version}) catch unreachable;
+ const version_fmt = "version " ++ builtin.zig_version_string;
- const check_lib = lib.checkObject(.wasm);
+ const check_lib = lib.checkObject();
check_lib.checkStart("name producers");
check_lib.checkNext("fields 2");
check_lib.checkNext("field_name language");
diff --git a/test/link/wasm/segments/build.zig b/test/link/wasm/segments/build.zig
index 76160e905f..281d8ae32b 100644
--- a/test/link/wasm/segments/build.zig
+++ b/test/link/wasm/segments/build.zig
@@ -1,21 +1,30 @@
const std = @import("std");
-pub fn build(b: *std.Build) void {
- const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+pub const requires_stage2 = true;
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const lib = b.addSharedLibrary(.{
.name = "lib",
.root_source_file = .{ .path = "lib.zig" },
.target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
- .optimize = b.standardOptimizeOption(.{}),
+ .optimize = optimize,
});
lib.use_llvm = false;
lib.use_lld = false;
lib.strip = false;
lib.install();
- const check_lib = lib.checkObject(.wasm);
+ const check_lib = lib.checkObject();
check_lib.checkStart("Section data");
check_lib.checkNext("entries 2"); // rodata & data, no bss because we're exporting memory
diff --git a/test/link/wasm/stack_pointer/build.zig b/test/link/wasm/stack_pointer/build.zig
index 95c7643880..794b7d27fb 100644
--- a/test/link/wasm/stack_pointer/build.zig
+++ b/test/link/wasm/stack_pointer/build.zig
@@ -1,14 +1,23 @@
const std = @import("std");
-pub fn build(b: *std.Build) void {
- const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+pub const requires_stage2 = true;
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const lib = b.addSharedLibrary(.{
.name = "lib",
.root_source_file = .{ .path = "lib.zig" },
.target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
- .optimize = b.standardOptimizeOption(.{}),
+ .optimize = optimize,
});
lib.use_llvm = false;
lib.use_lld = false;
@@ -16,7 +25,7 @@ pub fn build(b: *std.Build) void {
lib.stack_size = std.wasm.page_size * 2; // set an explicit stack size
lib.install();
- const check_lib = lib.checkObject(.wasm);
+ const check_lib = lib.checkObject();
// ensure global exists and its initial value is equal to explitic stack size
check_lib.checkStart("Section global");
diff --git a/test/link/wasm/type/build.zig b/test/link/wasm/type/build.zig
index 816b57ccab..4a8395645f 100644
--- a/test/link/wasm/type/build.zig
+++ b/test/link/wasm/type/build.zig
@@ -1,21 +1,30 @@
const std = @import("std");
-pub fn build(b: *std.Build) void {
- const test_step = b.step("test", "Test");
- test_step.dependOn(b.getInstallStep());
+pub const requires_stage2 = true;
+pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const lib = b.addSharedLibrary(.{
.name = "lib",
.root_source_file = .{ .path = "lib.zig" },
.target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
- .optimize = b.standardOptimizeOption(.{}),
+ .optimize = optimize,
});
lib.use_llvm = false;
lib.use_lld = false;
lib.strip = false;
lib.install();
- const check_lib = lib.checkObject(.wasm);
+ const check_lib = lib.checkObject();
check_lib.checkStart("Section type");
// only 2 entries, although we have 3 functions.
// This is to test functions with the same function signature
diff --git a/test/stage2/nvptx.zig b/test/nvptx.zig
similarity index 76%
rename from test/stage2/nvptx.zig
rename to test/nvptx.zig
index f08aa9fca4..57853a657d 100644
--- a/test/stage2/nvptx.zig
+++ b/test/nvptx.zig
@@ -1,11 +1,11 @@
const std = @import("std");
-const TestContext = @import("../../src/test.zig").TestContext;
+const Cases = @import("src/Cases.zig");
-pub fn addCases(ctx: *TestContext) !void {
+pub fn addCases(ctx: *Cases) !void {
{
- var case = addPtx(ctx, "nvptx: simple addition and subtraction");
+ var case = addPtx(ctx, "simple addition and subtraction");
- case.compiles(
+ case.addCompile(
\\fn add(a: i32, b: i32) i32 {
\\ return a + b;
\\}
@@ -20,9 +20,9 @@ pub fn addCases(ctx: *TestContext) !void {
}
{
- var case = addPtx(ctx, "nvptx: read special registers");
+ var case = addPtx(ctx, "read special registers");
- case.compiles(
+ case.addCompile(
\\fn threadIdX() u32 {
\\ return asm ("mov.u32 \t%[r], %tid.x;"
\\ : [r] "=r" (-> u32),
@@ -37,9 +37,9 @@ pub fn addCases(ctx: *TestContext) !void {
}
{
- var case = addPtx(ctx, "nvptx: address spaces");
+ var case = addPtx(ctx, "address spaces");
- case.compiles(
+ case.addCompile(
\\var x: i32 addrspace(.global) = 0;
\\
\\pub export fn increment(out: *i32) callconv(.PtxKernel) void {
@@ -50,8 +50,8 @@ pub fn addCases(ctx: *TestContext) !void {
}
{
- var case = addPtx(ctx, "nvptx: reduce in shared mem");
- case.compiles(
+ var case = addPtx(ctx, "reduce in shared mem");
+ case.addCompile(
\\fn threadIdX() u32 {
\\ return asm ("mov.u32 \t%[r], %tid.x;"
\\ : [r] "=r" (-> u32),
@@ -88,16 +88,15 @@ const nvptx_target = std.zig.CrossTarget{
};
pub fn addPtx(
- ctx: *TestContext,
+ ctx: *Cases,
name: []const u8,
-) *TestContext.Case {
- ctx.cases.append(TestContext.Case{
+) *Cases.Case {
+ ctx.cases.append(.{
.name = name,
.target = nvptx_target,
- .updates = std.ArrayList(TestContext.Update).init(ctx.cases.allocator),
+ .updates = std.ArrayList(Cases.Update).init(ctx.cases.allocator),
.output_mode = .Obj,
- .files = std.ArrayList(TestContext.File).init(ctx.cases.allocator),
- .deps = std.ArrayList(TestContext.DepModule).init(ctx.cases.allocator),
+ .deps = std.ArrayList(Cases.DepModule).init(ctx.cases.allocator),
.link_libc = false,
.backend = .llvm,
// Bug in Debug mode
diff --git a/test/src/Cases.zig b/test/src/Cases.zig
new file mode 100644
index 0000000000..c3a4c1df47
--- /dev/null
+++ b/test/src/Cases.zig
@@ -0,0 +1,1587 @@
+gpa: Allocator,
+arena: Allocator,
+cases: std.ArrayList(Case),
+incremental_cases: std.ArrayList(IncrementalCase),
+
+pub const IncrementalCase = struct {
+ base_path: []const u8,
+};
+
+pub const Update = struct {
+ /// The input to the current update. We simulate an incremental update
+ /// with the file's contents changed to this value each update.
+ ///
+ /// This value can change entirely between updates, which would be akin
+ /// to deleting the source file and creating a new one from scratch; or
+ /// you can keep it mostly consistent, with small changes, testing the
+ /// effects of the incremental compilation.
+ files: std.ArrayList(File),
+ /// This is a description of what happens with the update, for debugging
+ /// purposes.
+ name: []const u8,
+ case: union(enum) {
+ /// Check that it compiles with no errors.
+ Compile: void,
+ /// Check the main binary output file against an expected set of bytes.
+ /// This is most useful with, for example, `-ofmt=c`.
+ CompareObjectFile: []const u8,
+ /// An error update attempts to compile bad code, and ensures that it
+ /// fails to compile, and for the expected reasons.
+ /// A slice containing the expected stderr template, which
+ /// gets some values substituted.
+ Error: []const []const u8,
+ /// An execution update compiles and runs the input, testing the
+ /// stdout against the expected results
+ /// This is a slice containing the expected message.
+ Execution: []const u8,
+ /// A header update compiles the input with the equivalent of
+ /// `-femit-h` and tests the produced header against the
+ /// expected result
+ Header: []const u8,
+ },
+
+ pub fn addSourceFile(update: *Update, name: []const u8, src: [:0]const u8) void {
+ update.files.append(.{ .path = name, .src = src }) catch @panic("out of memory");
+ }
+};
+
+pub const File = struct {
+ src: [:0]const u8,
+ path: []const u8,
+};
+
+pub const DepModule = struct {
+ name: []const u8,
+ path: []const u8,
+};
+
+pub const Backend = enum {
+ stage1,
+ stage2,
+ llvm,
+};
+
+/// A `Case` consists of a list of `Update`. The same `Compilation` is used for each
+/// update, so each update's source is treated as a single file being
+/// updated by the test harness and incrementally compiled.
+pub const Case = struct {
+ /// The name of the test case. This is shown if a test fails, and
+ /// otherwise ignored.
+ name: []const u8,
+ /// The platform the test targets. For non-native platforms, an emulator
+ /// such as QEMU is required for tests to complete.
+ target: CrossTarget,
+ /// In order to be able to run e.g. Execution updates, this must be set
+ /// to Executable.
+ output_mode: std.builtin.OutputMode,
+ optimize_mode: std.builtin.Mode = .Debug,
+ updates: std.ArrayList(Update),
+ emit_h: bool = false,
+ is_test: bool = false,
+ expect_exact: bool = false,
+ backend: Backend = .stage2,
+ link_libc: bool = false,
+
+ deps: std.ArrayList(DepModule),
+
+ pub fn addSourceFile(case: *Case, name: []const u8, src: [:0]const u8) void {
+ const update = &case.updates.items[case.updates.items.len - 1];
+ update.files.append(.{ .path = name, .src = src }) catch @panic("OOM");
+ }
+
+ pub fn addDepModule(case: *Case, name: []const u8, path: []const u8) void {
+ case.deps.append(.{
+ .name = name,
+ .path = path,
+ }) catch @panic("out of memory");
+ }
+
+ /// Adds a subcase in which the module is updated with `src`, compiled,
+ /// run, and the output is tested against `result`.
+ pub fn addCompareOutput(self: *Case, src: [:0]const u8, result: []const u8) void {
+ self.updates.append(.{
+ .files = std.ArrayList(File).init(self.updates.allocator),
+ .name = "update",
+ .case = .{ .Execution = result },
+ }) catch @panic("out of memory");
+ addSourceFile(self, "tmp.zig", src);
+ }
+
+ pub fn addError(self: *Case, src: [:0]const u8, errors: []const []const u8) void {
+ return self.addErrorNamed("update", src, errors);
+ }
+
+ /// Adds a subcase in which the module is updated with `src`, which
+ /// should contain invalid input, and ensures that compilation fails
+ /// for the expected reasons, given in sequential order in `errors` in
+ /// the form `:line:column: error: message`.
+ pub fn addErrorNamed(
+ self: *Case,
+ name: []const u8,
+ src: [:0]const u8,
+ errors: []const []const u8,
+ ) void {
+ assert(errors.len != 0);
+ self.updates.append(.{
+ .files = std.ArrayList(File).init(self.updates.allocator),
+ .name = name,
+ .case = .{ .Error = errors },
+ }) catch @panic("out of memory");
+ addSourceFile(self, "tmp.zig", src);
+ }
+
+ /// Adds a subcase in which the module is updated with `src`, and
+ /// asserts that it compiles without issue
+ pub fn addCompile(self: *Case, src: [:0]const u8) void {
+ self.updates.append(.{
+ .files = std.ArrayList(File).init(self.updates.allocator),
+ .name = "compile",
+ .case = .{ .Compile = {} },
+ }) catch @panic("out of memory");
+ addSourceFile(self, "tmp.zig", src);
+ }
+};
+
+pub fn addExe(
+ ctx: *Cases,
+ name: []const u8,
+ target: CrossTarget,
+) *Case {
+ ctx.cases.append(Case{
+ .name = name,
+ .target = target,
+ .updates = std.ArrayList(Update).init(ctx.cases.allocator),
+ .output_mode = .Exe,
+ .deps = std.ArrayList(DepModule).init(ctx.arena),
+ }) catch @panic("out of memory");
+ return &ctx.cases.items[ctx.cases.items.len - 1];
+}
+
+/// Adds a test case for Zig input, producing an executable
+pub fn exe(ctx: *Cases, name: []const u8, target: CrossTarget) *Case {
+ return ctx.addExe(name, target);
+}
+
+pub fn exeFromCompiledC(ctx: *Cases, name: []const u8, target: CrossTarget) *Case {
+ var target_adjusted = target;
+ target_adjusted.ofmt = .c;
+ ctx.cases.append(Case{
+ .name = name,
+ .target = target_adjusted,
+ .updates = std.ArrayList(Update).init(ctx.cases.allocator),
+ .output_mode = .Exe,
+ .deps = std.ArrayList(DepModule).init(ctx.arena),
+ .link_libc = true,
+ }) catch @panic("out of memory");
+ return &ctx.cases.items[ctx.cases.items.len - 1];
+}
+
+/// Adds a test case that uses the LLVM backend to emit an executable.
+/// Currently this implies linking libc, because only then we can generate a testable executable.
+pub fn exeUsingLlvmBackend(ctx: *Cases, name: []const u8, target: CrossTarget) *Case {
+ ctx.cases.append(Case{
+ .name = name,
+ .target = target,
+ .updates = std.ArrayList(Update).init(ctx.cases.allocator),
+ .output_mode = .Exe,
+ .deps = std.ArrayList(DepModule).init(ctx.arena),
+ .backend = .llvm,
+ .link_libc = true,
+ }) catch @panic("out of memory");
+ return &ctx.cases.items[ctx.cases.items.len - 1];
+}
+
+pub fn addObj(
+ ctx: *Cases,
+ name: []const u8,
+ target: CrossTarget,
+) *Case {
+ ctx.cases.append(Case{
+ .name = name,
+ .target = target,
+ .updates = std.ArrayList(Update).init(ctx.cases.allocator),
+ .output_mode = .Obj,
+ .deps = std.ArrayList(DepModule).init(ctx.arena),
+ }) catch @panic("out of memory");
+ return &ctx.cases.items[ctx.cases.items.len - 1];
+}
+
+pub fn addTest(
+ ctx: *Cases,
+ name: []const u8,
+ target: CrossTarget,
+) *Case {
+ ctx.cases.append(Case{
+ .name = name,
+ .target = target,
+ .updates = std.ArrayList(Update).init(ctx.cases.allocator),
+ .output_mode = .Exe,
+ .is_test = true,
+ .deps = std.ArrayList(DepModule).init(ctx.arena),
+ }) catch @panic("out of memory");
+ return &ctx.cases.items[ctx.cases.items.len - 1];
+}
+
+/// Adds a test case for Zig input, producing an object file.
+pub fn obj(ctx: *Cases, name: []const u8, target: CrossTarget) *Case {
+ return ctx.addObj(name, target);
+}
+
+/// Adds a test case for ZIR input, producing an object file.
+pub fn objZIR(ctx: *Cases, name: []const u8, target: CrossTarget) *Case {
+ return ctx.addObj(name, target, .ZIR);
+}
+
+/// Adds a test case for Zig or ZIR input, producing C code.
+pub fn addC(ctx: *Cases, name: []const u8, target: CrossTarget) *Case {
+ var target_adjusted = target;
+ target_adjusted.ofmt = std.Target.ObjectFormat.c;
+ ctx.cases.append(Case{
+ .name = name,
+ .target = target_adjusted,
+ .updates = std.ArrayList(Update).init(ctx.cases.allocator),
+ .output_mode = .Obj,
+ .deps = std.ArrayList(DepModule).init(ctx.arena),
+ }) catch @panic("out of memory");
+ return &ctx.cases.items[ctx.cases.items.len - 1];
+}
+
+pub fn addCompareOutput(
+ ctx: *Cases,
+ name: []const u8,
+ src: [:0]const u8,
+ expected_stdout: []const u8,
+) void {
+ ctx.addExe(name, .{}).addCompareOutput(src, expected_stdout);
+}
+
+/// Adds a test case that compiles the Zig source given in `src`, executes
+/// it, runs it, and tests the output against `expected_stdout`
+pub fn compareOutput(
+ ctx: *Cases,
+ name: []const u8,
+ src: [:0]const u8,
+ expected_stdout: []const u8,
+) void {
+ return ctx.addCompareOutput(name, src, expected_stdout);
+}
+
+pub fn addTransform(
+ ctx: *Cases,
+ name: []const u8,
+ target: CrossTarget,
+ src: [:0]const u8,
+ result: [:0]const u8,
+) void {
+ ctx.addObj(name, target).addTransform(src, result);
+}
+
+/// Adds a test case that compiles the Zig given in `src` to ZIR and tests
+/// the ZIR against `result`
+pub fn transform(
+ ctx: *Cases,
+ name: []const u8,
+ target: CrossTarget,
+ src: [:0]const u8,
+ result: [:0]const u8,
+) void {
+ ctx.addTransform(name, target, src, result);
+}
+
+pub fn addError(
+ ctx: *Cases,
+ name: []const u8,
+ target: CrossTarget,
+ src: [:0]const u8,
+ expected_errors: []const []const u8,
+) void {
+ ctx.addObj(name, target).addError(src, expected_errors);
+}
+
+/// Adds a test case that ensures that the Zig given in `src` fails to
+/// compile for the expected reasons, given in sequential order in
+/// `expected_errors` in the form `:line:column: error: message`.
+pub fn compileError(
+ ctx: *Cases,
+ name: []const u8,
+ target: CrossTarget,
+ src: [:0]const u8,
+ expected_errors: []const []const u8,
+) void {
+ ctx.addError(name, target, src, expected_errors);
+}
+
+/// Adds a test case that asserts that the Zig given in `src` compiles
+/// without any errors.
+pub fn addCompile(
+ ctx: *Cases,
+ name: []const u8,
+ target: CrossTarget,
+ src: [:0]const u8,
+) void {
+ ctx.addObj(name, target).addCompile(src);
+}
+
+/// Adds a test for each file in the provided directory.
+/// Testing strategy (TestStrategy) is inferred automatically from filenames.
+/// Recurses nested directories.
+///
+/// Each file should include a test manifest as a contiguous block of comments at
+/// the end of the file. The first line should be the test type, followed by a set of
+/// key-value config values, followed by a blank line, then the expected output.
+pub fn addFromDir(ctx: *Cases, dir: std.fs.IterableDir) void {
+ var current_file: []const u8 = "none";
+ ctx.addFromDirInner(dir, ¤t_file) catch |err| {
+ std.debug.panic("test harness failed to process file '{s}': {s}\n", .{
+ current_file, @errorName(err),
+ });
+ };
+}
+
+fn addFromDirInner(
+ ctx: *Cases,
+ iterable_dir: std.fs.IterableDir,
+ /// This is kept up to date with the currently being processed file so
+ /// that if any errors occur the caller knows it happened during this file.
+ current_file: *[]const u8,
+) !void {
+ var it = try iterable_dir.walk(ctx.arena);
+ var filenames = std.ArrayList([]const u8).init(ctx.arena);
+
+ while (try it.next()) |entry| {
+ if (entry.kind != .File) continue;
+
+ // Ignore stuff such as .swp files
+ switch (Compilation.classifyFileExt(entry.basename)) {
+ .unknown => continue,
+ else => {},
+ }
+ try filenames.append(try ctx.arena.dupe(u8, entry.path));
+ }
+
+ // Sort filenames, so that incremental tests are contiguous and in-order
+ sortTestFilenames(filenames.items);
+
+ var test_it = TestIterator{ .filenames = filenames.items };
+ while (test_it.next()) |maybe_batch| {
+ const batch = maybe_batch orelse break;
+ const strategy: TestStrategy = if (batch.len > 1) .incremental else .independent;
+ const filename = batch[0];
+ current_file.* = filename;
+ if (strategy == .incremental) {
+ try ctx.incremental_cases.append(.{ .base_path = filename });
+ continue;
+ }
+
+ const max_file_size = 10 * 1024 * 1024;
+ const src = try iterable_dir.dir.readFileAllocOptions(ctx.arena, filename, max_file_size, null, 1, 0);
+
+ // Parse the manifest
+ var manifest = try TestManifest.parse(ctx.arena, src);
+
+ const backends = try manifest.getConfigForKeyAlloc(ctx.arena, "backend", Backend);
+ const targets = try manifest.getConfigForKeyAlloc(ctx.arena, "target", CrossTarget);
+ const is_test = try manifest.getConfigForKeyAssertSingle("is_test", bool);
+ const link_libc = try manifest.getConfigForKeyAssertSingle("link_libc", bool);
+ const output_mode = try manifest.getConfigForKeyAssertSingle("output_mode", std.builtin.OutputMode);
+
+ var cases = std.ArrayList(usize).init(ctx.arena);
+
+ // Cross-product to get all possible test combinations
+ for (backends) |backend| {
+ for (targets) |target| {
+ const next = ctx.cases.items.len;
+ try ctx.cases.append(.{
+ .name = std.fs.path.stem(filename),
+ .target = target,
+ .backend = backend,
+ .updates = std.ArrayList(Cases.Update).init(ctx.cases.allocator),
+ .is_test = is_test,
+ .output_mode = output_mode,
+ .link_libc = link_libc,
+ .deps = std.ArrayList(DepModule).init(ctx.cases.allocator),
+ });
+ try cases.append(next);
+ }
+ }
+
+ for (cases.items) |case_index| {
+ const case = &ctx.cases.items[case_index];
+ switch (manifest.type) {
+ .compile => {
+ case.addCompile(src);
+ },
+ .@"error" => {
+ const errors = try manifest.trailingAlloc(ctx.arena);
+ case.addError(src, errors);
+ },
+ .run => {
+ var output = std.ArrayList(u8).init(ctx.arena);
+ var trailing_it = manifest.trailing();
+ while (trailing_it.next()) |line| {
+ try output.appendSlice(line);
+ try output.append('\n');
+ }
+ if (output.items.len > 0) {
+ try output.resize(output.items.len - 1);
+ }
+ case.addCompareOutput(src, try output.toOwnedSlice());
+ },
+ .cli => @panic("TODO cli tests"),
+ }
+ }
+ } else |err| {
+ // make sure the current file is set to the file that produced an error
+ current_file.* = test_it.currentFilename();
+ return err;
+ }
+}
+
+pub fn init(gpa: Allocator, arena: Allocator) Cases {
+ return .{
+ .gpa = gpa,
+ .cases = std.ArrayList(Case).init(gpa),
+ .incremental_cases = std.ArrayList(IncrementalCase).init(gpa),
+ .arena = arena,
+ };
+}
+
+pub fn lowerToBuildSteps(
+ self: *Cases,
+ b: *std.Build,
+ parent_step: *std.Build.Step,
+ opt_test_filter: ?[]const u8,
+ cases_dir_path: []const u8,
+ incremental_exe: *std.Build.CompileStep,
+) void {
+ for (self.incremental_cases.items) |incr_case| {
+ if (opt_test_filter) |test_filter| {
+ if (std.mem.indexOf(u8, incr_case.base_path, test_filter) == null) continue;
+ }
+ const case_base_path_with_dir = std.fs.path.join(b.allocator, &.{
+ cases_dir_path, incr_case.base_path,
+ }) catch @panic("OOM");
+ const run = b.addRunArtifact(incremental_exe);
+ run.setName(incr_case.base_path);
+ run.addArgs(&.{
+ case_base_path_with_dir,
+ b.zig_exe,
+ });
+ run.expectStdOutEqual("");
+ parent_step.dependOn(&run.step);
+ }
+
+ for (self.cases.items) |case| {
+ if (case.updates.items.len != 1) continue; // handled with incremental_cases above
+ assert(case.updates.items.len == 1);
+ const update = case.updates.items[0];
+
+ if (opt_test_filter) |test_filter| {
+ if (std.mem.indexOf(u8, case.name, test_filter) == null) continue;
+ }
+
+ const writefiles = b.addWriteFiles();
+ for (update.files.items) |file| {
+ writefiles.add(file.path, file.src);
+ }
+ const root_source_file = writefiles.getFileSource(update.files.items[0].path).?;
+
+ const artifact = if (case.is_test) b.addTest(.{
+ .root_source_file = root_source_file,
+ .name = case.name,
+ .target = case.target,
+ .optimize = case.optimize_mode,
+ }) else switch (case.output_mode) {
+ .Obj => b.addObject(.{
+ .root_source_file = root_source_file,
+ .name = case.name,
+ .target = case.target,
+ .optimize = case.optimize_mode,
+ }),
+ .Lib => b.addStaticLibrary(.{
+ .root_source_file = root_source_file,
+ .name = case.name,
+ .target = case.target,
+ .optimize = case.optimize_mode,
+ }),
+ .Exe => b.addExecutable(.{
+ .root_source_file = root_source_file,
+ .name = case.name,
+ .target = case.target,
+ .optimize = case.optimize_mode,
+ }),
+ };
+
+ if (case.link_libc) artifact.linkLibC();
+
+ switch (case.backend) {
+ .stage1 => continue,
+ .stage2 => {
+ artifact.use_llvm = false;
+ artifact.use_lld = false;
+ },
+ .llvm => {
+ artifact.use_llvm = true;
+ },
+ }
+
+ for (case.deps.items) |dep| {
+ artifact.addAnonymousModule(dep.name, .{
+ .source_file = writefiles.getFileSource(dep.path).?,
+ });
+ }
+
+ switch (update.case) {
+ .Compile => {
+ parent_step.dependOn(&artifact.step);
+ },
+ .CompareObjectFile => |expected_output| {
+ const check = b.addCheckFile(artifact.getOutputSource(), .{
+ .expected_exact = expected_output,
+ });
+
+ parent_step.dependOn(&check.step);
+ },
+ .Error => |expected_msgs| {
+ assert(expected_msgs.len != 0);
+ artifact.expect_errors = expected_msgs;
+ parent_step.dependOn(&artifact.step);
+ },
+ .Execution => |expected_stdout| {
+ const run = b.addRunArtifact(artifact);
+ run.skip_foreign_checks = true;
+ if (!case.is_test) {
+ run.expectStdOutEqual(expected_stdout);
+ }
+ parent_step.dependOn(&run.step);
+ },
+ .Header => @panic("TODO"),
+ }
+ }
+}
+
+/// Sort test filenames in-place, so that incremental test cases ("foo.0.zig",
+/// "foo.1.zig", etc.) are contiguous and appear in numerical order.
+fn sortTestFilenames(filenames: [][]const u8) void {
+ const Context = struct {
+ pub fn lessThan(_: @This(), a: []const u8, b: []const u8) bool {
+ const a_parts = getTestFileNameParts(a);
+ const b_parts = getTestFileNameParts(b);
+
+ // Sort ".X." based on "" and "" first
+ return switch (std.mem.order(u8, a_parts.base_name, b_parts.base_name)) {
+ .lt => true,
+ .gt => false,
+ .eq => switch (std.mem.order(u8, a_parts.file_ext, b_parts.file_ext)) {
+ .lt => true,
+ .gt => false,
+ .eq => {
+ // a and b differ only in their ".X" part
+
+ // Sort "." before any ".X."
+ if (a_parts.test_index) |a_index| {
+ if (b_parts.test_index) |b_index| {
+ // Make sure that incremental tests appear in linear order
+ return a_index < b_index;
+ } else {
+ return false;
+ }
+ } else {
+ return b_parts.test_index != null;
+ }
+ },
+ },
+ };
+ }
+ };
+ std.sort.sort([]const u8, filenames, Context{}, Context.lessThan);
+}
+
+/// Iterates a set of filenames extracting batches that are either incremental
+/// ("foo.0.zig", "foo.1.zig", etc.) or independent ("foo.zig", "bar.zig", etc.).
+/// Assumes filenames are sorted.
+const TestIterator = struct {
+ start: usize = 0,
+ end: usize = 0,
+ filenames: []const []const u8,
+ /// reset on each call to `next`
+ index: usize = 0,
+
+ const Error = error{InvalidIncrementalTestIndex};
+
+ fn next(it: *TestIterator) Error!?[]const []const u8 {
+ try it.nextInner();
+ if (it.start == it.end) return null;
+ return it.filenames[it.start..it.end];
+ }
+
+ fn nextInner(it: *TestIterator) Error!void {
+ it.start = it.end;
+ if (it.end == it.filenames.len) return;
+ if (it.end + 1 == it.filenames.len) {
+ it.end += 1;
+ return;
+ }
+
+ const remaining = it.filenames[it.end..];
+ it.index = 0;
+ while (it.index < remaining.len - 1) : (it.index += 1) {
+ // First, check if this file is part of an incremental update sequence
+ // Split filename into ".."
+ const prev_parts = getTestFileNameParts(remaining[it.index]);
+ const new_parts = getTestFileNameParts(remaining[it.index + 1]);
+
+ // If base_name and file_ext match, these files are in the same test sequence
+ // and the new one should be the incremented version of the previous test
+ if (std.mem.eql(u8, prev_parts.base_name, new_parts.base_name) and
+ std.mem.eql(u8, prev_parts.file_ext, new_parts.file_ext))
+ {
+ // This is "foo.X.zig" followed by "foo.Y.zig". Make sure that X = Y + 1
+ if (prev_parts.test_index == null)
+ return error.InvalidIncrementalTestIndex;
+ if (new_parts.test_index == null)
+ return error.InvalidIncrementalTestIndex;
+ if (new_parts.test_index.? != prev_parts.test_index.? + 1)
+ return error.InvalidIncrementalTestIndex;
+ } else {
+ // This is not the same test sequence, so the new file must be the first file
+ // in a new sequence ("*.0.zig") or an independent test file ("*.zig")
+ if (new_parts.test_index != null and new_parts.test_index.? != 0)
+ return error.InvalidIncrementalTestIndex;
+
+ it.end += it.index + 1;
+ break;
+ }
+ } else {
+ it.end += remaining.len;
+ }
+ }
+
+ /// In the event of an `error.InvalidIncrementalTestIndex`, this function can
+ /// be used to find the current filename that was being processed.
+ /// Asserts the iterator hasn't reached the end.
+ fn currentFilename(it: TestIterator) []const u8 {
+ assert(it.end != it.filenames.len);
+ const remaining = it.filenames[it.end..];
+ return remaining[it.index + 1];
+ }
+};
+
+/// For a filename in the format ".X." or ".", returns
+/// "", "" and X parsed as a decimal number. If X is not present, or
+/// cannot be parsed as a decimal number, it is treated as part of
+fn getTestFileNameParts(name: []const u8) struct {
+ base_name: []const u8,
+ file_ext: []const u8,
+ test_index: ?usize,
+} {
+ const file_ext = std.fs.path.extension(name);
+ const trimmed = name[0 .. name.len - file_ext.len]; // Trim off "."
+ const maybe_index = std.fs.path.extension(trimmed); // Extract ".X"
+
+ // Attempt to parse index
+ const index: ?usize = if (maybe_index.len > 0)
+ std.fmt.parseInt(usize, maybe_index[1..], 10) catch null
+ else
+ null;
+
+ // Adjust "" extent based on parsing success
+ const base_name_end = trimmed.len - if (index != null) maybe_index.len else 0;
+ return .{
+ .base_name = name[0..base_name_end],
+ .file_ext = if (file_ext.len > 0) file_ext[1..] else file_ext,
+ .test_index = index,
+ };
+}
+
+const TestStrategy = enum {
+ /// Execute tests as independent compilations, unless they are explicitly
+ /// incremental ("foo.0.zig", "foo.1.zig", etc.)
+ independent,
+ /// Execute all tests as incremental updates to a single compilation. Explicitly
+ /// incremental tests ("foo.0.zig", "foo.1.zig", etc.) still execute in order
+ incremental,
+};
+
+/// Default config values for known test manifest key-value pairings.
+/// Currently handled defaults are:
+/// * backend
+/// * target
+/// * output_mode
+/// * is_test
+const TestManifestConfigDefaults = struct {
+ /// Asserts if the key doesn't exist - yep, it's an oversight alright.
+ fn get(@"type": TestManifest.Type, key: []const u8) []const u8 {
+ if (std.mem.eql(u8, key, "backend")) {
+ return "stage2";
+ } else if (std.mem.eql(u8, key, "target")) {
+ if (@"type" == .@"error") {
+ return "native";
+ }
+ comptime {
+ var defaults: []const u8 = "";
+ // TODO should we only return "mainstream" targets by default here?
+ // TODO we should also specify ABIs explicitly as the backends are
+ // getting more and more complete
+ // Linux
+ inline for (&[_][]const u8{ "x86_64", "arm", "aarch64" }) |arch| {
+ defaults = defaults ++ arch ++ "-linux" ++ ",";
+ }
+ // macOS
+ inline for (&[_][]const u8{ "x86_64", "aarch64" }) |arch| {
+ defaults = defaults ++ arch ++ "-macos" ++ ",";
+ }
+ // Windows
+ defaults = defaults ++ "x86_64-windows" ++ ",";
+ // Wasm
+ defaults = defaults ++ "wasm32-wasi";
+ return defaults;
+ }
+ } else if (std.mem.eql(u8, key, "output_mode")) {
+ return switch (@"type") {
+ .@"error" => "Obj",
+ .run => "Exe",
+ .compile => "Obj",
+ .cli => @panic("TODO test harness for CLI tests"),
+ };
+ } else if (std.mem.eql(u8, key, "is_test")) {
+ return "0";
+ } else if (std.mem.eql(u8, key, "link_libc")) {
+ return "0";
+ } else unreachable;
+ }
+};
+
+/// Manifest syntax example:
+/// (see https://github.com/ziglang/zig/issues/11288)
+///
+/// error
+/// backend=stage1,stage2
+/// output_mode=exe
+///
+/// :3:19: error: foo
+///
+/// run
+/// target=x86_64-linux,aarch64-macos
+///
+/// I am expected stdout! Hello!
+///
+/// cli
+///
+/// build test
+const TestManifest = struct {
+ type: Type,
+ config_map: std.StringHashMap([]const u8),
+ trailing_bytes: []const u8 = "",
+
+ const Type = enum {
+ @"error",
+ run,
+ cli,
+ compile,
+ };
+
+ const TrailingIterator = struct {
+ inner: std.mem.TokenIterator(u8),
+
+ fn next(self: *TrailingIterator) ?[]const u8 {
+ const next_inner = self.inner.next() orelse return null;
+ return std.mem.trim(u8, next_inner[2..], " \t");
+ }
+ };
+
+ fn ConfigValueIterator(comptime T: type) type {
+ return struct {
+ inner: std.mem.SplitIterator(u8),
+
+ fn next(self: *@This()) !?T {
+ const next_raw = self.inner.next() orelse return null;
+ const parseFn = getDefaultParser(T);
+ return try parseFn(next_raw);
+ }
+ };
+ }
+
+ fn parse(arena: Allocator, bytes: []const u8) !TestManifest {
+ // The manifest is the last contiguous block of comments in the file
+ // We scan for the beginning by searching backward for the first non-empty line that does not start with "//"
+ var start: ?usize = null;
+ var end: usize = bytes.len;
+ if (bytes.len > 0) {
+ var cursor: usize = bytes.len - 1;
+ while (true) {
+ // Move to beginning of line
+ while (cursor > 0 and bytes[cursor - 1] != '\n') cursor -= 1;
+
+ if (std.mem.startsWith(u8, bytes[cursor..], "//")) {
+ start = cursor; // Contiguous comment line, include in manifest
+ } else {
+ if (start != null) break; // Encountered non-comment line, end of manifest
+
+ // We ignore all-whitespace lines following the comment block, but anything else
+ // means that there is no manifest present.
+ if (std.mem.trim(u8, bytes[cursor..end], " \r\n\t").len == 0) {
+ end = cursor;
+ } else break; // If it's not whitespace, there is no manifest
+ }
+
+ // Move to previous line
+ if (cursor != 0) cursor -= 1 else break;
+ }
+ }
+
+ const actual_start = start orelse return error.MissingTestManifest;
+ const manifest_bytes = bytes[actual_start..end];
+
+ var it = std.mem.tokenize(u8, manifest_bytes, "\r\n");
+
+ // First line is the test type
+ const tt: Type = blk: {
+ const line = it.next() orelse return error.MissingTestCaseType;
+ const raw = std.mem.trim(u8, line[2..], " \t");
+ if (std.mem.eql(u8, raw, "error")) {
+ break :blk .@"error";
+ } else if (std.mem.eql(u8, raw, "run")) {
+ break :blk .run;
+ } else if (std.mem.eql(u8, raw, "cli")) {
+ break :blk .cli;
+ } else if (std.mem.eql(u8, raw, "compile")) {
+ break :blk .compile;
+ } else {
+ std.log.warn("unknown test case type requested: {s}", .{raw});
+ return error.UnknownTestCaseType;
+ }
+ };
+
+ var manifest: TestManifest = .{
+ .type = tt,
+ .config_map = std.StringHashMap([]const u8).init(arena),
+ };
+
+ // Any subsequent line until a blank comment line is key=value(s) pair
+ while (it.next()) |line| {
+ const trimmed = std.mem.trim(u8, line[2..], " \t");
+ if (trimmed.len == 0) break;
+
+ // Parse key=value(s)
+ var kv_it = std.mem.split(u8, trimmed, "=");
+ const key = kv_it.first();
+ try manifest.config_map.putNoClobber(key, kv_it.next() orelse return error.MissingValuesForConfig);
+ }
+
+ // Finally, trailing is expected output
+ manifest.trailing_bytes = manifest_bytes[it.index..];
+
+ return manifest;
+ }
+
+ fn getConfigForKey(
+ self: TestManifest,
+ key: []const u8,
+ comptime T: type,
+ ) ConfigValueIterator(T) {
+ const bytes = self.config_map.get(key) orelse TestManifestConfigDefaults.get(self.type, key);
+ return ConfigValueIterator(T){
+ .inner = std.mem.split(u8, bytes, ","),
+ };
+ }
+
+ fn getConfigForKeyAlloc(
+ self: TestManifest,
+ allocator: Allocator,
+ key: []const u8,
+ comptime T: type,
+ ) ![]const T {
+ var out = std.ArrayList(T).init(allocator);
+ defer out.deinit();
+ var it = self.getConfigForKey(key, T);
+ while (try it.next()) |item| {
+ try out.append(item);
+ }
+ return try out.toOwnedSlice();
+ }
+
+ fn getConfigForKeyAssertSingle(self: TestManifest, key: []const u8, comptime T: type) !T {
+ var it = self.getConfigForKey(key, T);
+ const res = (try it.next()) orelse unreachable;
+ assert((try it.next()) == null);
+ return res;
+ }
+
+ fn trailing(self: TestManifest) TrailingIterator {
+ return .{
+ .inner = std.mem.tokenize(u8, self.trailing_bytes, "\r\n"),
+ };
+ }
+
+ fn trailingAlloc(self: TestManifest, allocator: Allocator) error{OutOfMemory}![]const []const u8 {
+ var out = std.ArrayList([]const u8).init(allocator);
+ defer out.deinit();
+ var it = self.trailing();
+ while (it.next()) |line| {
+ try out.append(line);
+ }
+ return try out.toOwnedSlice();
+ }
+
+ fn ParseFn(comptime T: type) type {
+ return fn ([]const u8) anyerror!T;
+ }
+
+ fn getDefaultParser(comptime T: type) ParseFn(T) {
+ if (T == CrossTarget) return struct {
+ fn parse(str: []const u8) anyerror!T {
+ var opts = CrossTarget.ParseOptions{
+ .arch_os_abi = str,
+ };
+ return try CrossTarget.parse(opts);
+ }
+ }.parse;
+
+ switch (@typeInfo(T)) {
+ .Int => return struct {
+ fn parse(str: []const u8) anyerror!T {
+ return try std.fmt.parseInt(T, str, 0);
+ }
+ }.parse,
+ .Bool => return struct {
+ fn parse(str: []const u8) anyerror!T {
+ const as_int = try std.fmt.parseInt(u1, str, 0);
+ return as_int > 0;
+ }
+ }.parse,
+ .Enum => return struct {
+ fn parse(str: []const u8) anyerror!T {
+ return std.meta.stringToEnum(T, str) orelse {
+ std.log.err("unknown enum variant for {s}: {s}", .{ @typeName(T), str });
+ return error.UnknownEnumVariant;
+ };
+ }
+ }.parse,
+ .Struct => @compileError("no default parser for " ++ @typeName(T)),
+ else => @compileError("no default parser for " ++ @typeName(T)),
+ }
+ }
+};
+
+const Cases = @This();
+const builtin = @import("builtin");
+const std = @import("std");
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
+const CrossTarget = std.zig.CrossTarget;
+const Compilation = @import("../../src/Compilation.zig");
+const zig_h = @import("../../src/link.zig").File.C.zig_h;
+const introspect = @import("../../src/introspect.zig");
+const ThreadPool = std.Thread.Pool;
+const WaitGroup = std.Thread.WaitGroup;
+const build_options = @import("build_options");
+const Package = @import("../../src/Package.zig");
+
+pub const std_options = struct {
+ pub const log_level: std.log.Level = .err;
+};
+
+var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{
+ .stack_trace_frames = build_options.mem_leak_frames,
+}){};
+
+// TODO: instead of embedding the compiler in this process, spawn the compiler
+// as a sub-process and communicate the updates using the compiler protocol.
+pub fn main() !void {
+ const use_gpa = build_options.force_gpa or !builtin.link_libc;
+ const gpa = gpa: {
+ if (use_gpa) {
+ break :gpa general_purpose_allocator.allocator();
+ }
+ // We would prefer to use raw libc allocator here, but cannot
+ // use it if it won't support the alignment we need.
+ if (@alignOf(std.c.max_align_t) < @alignOf(i128)) {
+ break :gpa std.heap.c_allocator;
+ }
+ break :gpa std.heap.raw_c_allocator;
+ };
+
+ var single_threaded_arena = std.heap.ArenaAllocator.init(gpa);
+ defer single_threaded_arena.deinit();
+
+ var thread_safe_arena: std.heap.ThreadSafeAllocator = .{
+ .child_allocator = single_threaded_arena.allocator(),
+ };
+ const arena = thread_safe_arena.allocator();
+
+ const args = try std.process.argsAlloc(arena);
+ const case_file_path = args[1];
+ const zig_exe_path = args[2];
+
+ var filenames = std.ArrayList([]const u8).init(arena);
+
+ const case_dirname = std.fs.path.dirname(case_file_path).?;
+ var iterable_dir = try std.fs.cwd().openIterableDir(case_dirname, .{});
+ defer iterable_dir.close();
+
+ if (std.mem.endsWith(u8, case_file_path, ".0.zig")) {
+ const stem = case_file_path[case_dirname.len + 1 .. case_file_path.len - "0.zig".len];
+ var it = iterable_dir.iterate();
+ while (try it.next()) |entry| {
+ if (entry.kind != .File) continue;
+ if (!std.mem.startsWith(u8, entry.name, stem)) continue;
+ try filenames.append(try std.fs.path.join(arena, &.{ case_dirname, entry.name }));
+ }
+ } else {
+ try filenames.append(case_file_path);
+ }
+
+ if (filenames.items.len == 0) {
+ std.debug.print("failed to find the input source file(s) from '{s}'\n", .{
+ case_file_path,
+ });
+ std.process.exit(1);
+ }
+
+ // Sort filenames, so that incremental tests are contiguous and in-order
+ sortTestFilenames(filenames.items);
+
+ var ctx = Cases.init(gpa, arena);
+
+ var test_it = TestIterator{ .filenames = filenames.items };
+ while (test_it.next()) |maybe_batch| {
+ const batch = maybe_batch orelse break;
+ const strategy: TestStrategy = if (batch.len > 1) .incremental else .independent;
+ var cases = std.ArrayList(usize).init(arena);
+
+ for (batch) |filename| {
+ const max_file_size = 10 * 1024 * 1024;
+ const src = try iterable_dir.dir.readFileAllocOptions(arena, filename, max_file_size, null, 1, 0);
+
+ // Parse the manifest
+ var manifest = try TestManifest.parse(arena, src);
+
+ if (cases.items.len == 0) {
+ const backends = try manifest.getConfigForKeyAlloc(arena, "backend", Backend);
+ const targets = try manifest.getConfigForKeyAlloc(arena, "target", CrossTarget);
+ const is_test = try manifest.getConfigForKeyAssertSingle("is_test", bool);
+ const output_mode = try manifest.getConfigForKeyAssertSingle("output_mode", std.builtin.OutputMode);
+
+ // Cross-product to get all possible test combinations
+ for (backends) |backend| {
+ for (targets) |target| {
+ const next = ctx.cases.items.len;
+ try ctx.cases.append(.{
+ .name = std.fs.path.stem(filename),
+ .target = target,
+ .backend = backend,
+ .updates = std.ArrayList(Cases.Update).init(ctx.cases.allocator),
+ .is_test = is_test,
+ .output_mode = output_mode,
+ .link_libc = backend == .llvm,
+ .deps = std.ArrayList(DepModule).init(ctx.cases.allocator),
+ });
+ try cases.append(next);
+ }
+ }
+ }
+
+ for (cases.items) |case_index| {
+ const case = &ctx.cases.items[case_index];
+ switch (manifest.type) {
+ .compile => {
+ case.addCompile(src);
+ },
+ .@"error" => {
+ const errors = try manifest.trailingAlloc(arena);
+ switch (strategy) {
+ .independent => {
+ case.addError(src, errors);
+ },
+ .incremental => {
+ case.addErrorNamed("update", src, errors);
+ },
+ }
+ },
+ .run => {
+ var output = std.ArrayList(u8).init(arena);
+ var trailing_it = manifest.trailing();
+ while (trailing_it.next()) |line| {
+ try output.appendSlice(line);
+ try output.append('\n');
+ }
+ if (output.items.len > 0) {
+ try output.resize(output.items.len - 1);
+ }
+ case.addCompareOutput(src, try output.toOwnedSlice());
+ },
+ .cli => @panic("TODO cli tests"),
+ }
+ }
+ }
+ } else |err| {
+ return err;
+ }
+
+ return runCases(&ctx, zig_exe_path);
+}
+
+fn runCases(self: *Cases, zig_exe_path: []const u8) !void {
+ const host = try std.zig.system.NativeTargetInfo.detect(.{});
+
+ var progress = std.Progress{};
+ const root_node = progress.start("compiler", self.cases.items.len);
+ progress.terminal = null;
+ defer root_node.end();
+
+ var zig_lib_directory = try introspect.findZigLibDir(self.gpa);
+ defer zig_lib_directory.handle.close();
+ defer self.gpa.free(zig_lib_directory.path.?);
+
+ var aux_thread_pool: ThreadPool = undefined;
+ try aux_thread_pool.init(.{ .allocator = self.gpa });
+ defer aux_thread_pool.deinit();
+
+ // Use the same global cache dir for all the tests, such that we for example don't have to
+ // rebuild musl libc for every case (when LLVM backend is enabled).
+ var global_tmp = std.testing.tmpDir(.{});
+ defer global_tmp.cleanup();
+
+ var cache_dir = try global_tmp.dir.makeOpenPath("zig-cache", .{});
+ defer cache_dir.close();
+ const tmp_dir_path = try std.fs.path.join(self.gpa, &[_][]const u8{ ".", "zig-cache", "tmp", &global_tmp.sub_path });
+ defer self.gpa.free(tmp_dir_path);
+
+ const global_cache_directory: Compilation.Directory = .{
+ .handle = cache_dir,
+ .path = try std.fs.path.join(self.gpa, &[_][]const u8{ tmp_dir_path, "zig-cache" }),
+ };
+ defer self.gpa.free(global_cache_directory.path.?);
+
+ {
+ for (self.cases.items) |*case| {
+ if (build_options.skip_non_native) {
+ if (case.target.getCpuArch() != builtin.cpu.arch)
+ continue;
+ if (case.target.getObjectFormat() != builtin.object_format)
+ continue;
+ }
+
+ // Skip tests that require LLVM backend when it is not available
+ if (!build_options.have_llvm and case.backend == .llvm)
+ continue;
+
+ assert(case.backend != .stage1);
+
+ if (build_options.test_filter) |test_filter| {
+ if (std.mem.indexOf(u8, case.name, test_filter) == null) continue;
+ }
+
+ var prg_node = root_node.start(case.name, case.updates.items.len);
+ prg_node.activate();
+ defer prg_node.end();
+
+ try runOneCase(
+ self.gpa,
+ &prg_node,
+ case.*,
+ zig_lib_directory,
+ zig_exe_path,
+ &aux_thread_pool,
+ global_cache_directory,
+ host,
+ );
+ }
+ }
+}
+
+fn runOneCase(
+ allocator: Allocator,
+ root_node: *std.Progress.Node,
+ case: Case,
+ zig_lib_directory: Compilation.Directory,
+ zig_exe_path: []const u8,
+ thread_pool: *ThreadPool,
+ global_cache_directory: Compilation.Directory,
+ host: std.zig.system.NativeTargetInfo,
+) !void {
+ const tmp_src_path = "tmp.zig";
+ const enable_rosetta = build_options.enable_rosetta;
+ const enable_qemu = build_options.enable_qemu;
+ const enable_wine = build_options.enable_wine;
+ const enable_wasmtime = build_options.enable_wasmtime;
+ const enable_darling = build_options.enable_darling;
+ const glibc_runtimes_dir: ?[]const u8 = build_options.glibc_runtimes_dir;
+
+ const target_info = try std.zig.system.NativeTargetInfo.detect(case.target);
+ const target = target_info.target;
+
+ var arena_allocator = std.heap.ArenaAllocator.init(allocator);
+ defer arena_allocator.deinit();
+ const arena = arena_allocator.allocator();
+
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
+ var cache_dir = try tmp.dir.makeOpenPath("zig-cache", .{});
+ defer cache_dir.close();
+
+ const tmp_dir_path = try std.fs.path.join(
+ arena,
+ &[_][]const u8{ ".", "zig-cache", "tmp", &tmp.sub_path },
+ );
+ const local_cache_path = try std.fs.path.join(
+ arena,
+ &[_][]const u8{ tmp_dir_path, "zig-cache" },
+ );
+
+ const zig_cache_directory: Compilation.Directory = .{
+ .handle = cache_dir,
+ .path = local_cache_path,
+ };
+
+ var main_pkg: Package = .{
+ .root_src_directory = .{ .path = tmp_dir_path, .handle = tmp.dir },
+ .root_src_path = tmp_src_path,
+ };
+ defer {
+ var it = main_pkg.table.iterator();
+ while (it.next()) |kv| {
+ allocator.free(kv.key_ptr.*);
+ kv.value_ptr.*.destroy(allocator);
+ }
+ main_pkg.table.deinit(allocator);
+ }
+
+ for (case.deps.items) |dep| {
+ var pkg = try Package.create(
+ allocator,
+ tmp_dir_path,
+ dep.path,
+ );
+ errdefer pkg.destroy(allocator);
+ try main_pkg.add(allocator, dep.name, pkg);
+ }
+
+ const bin_name = try std.zig.binNameAlloc(arena, .{
+ .root_name = "test_case",
+ .target = target,
+ .output_mode = case.output_mode,
+ });
+
+ const emit_directory: Compilation.Directory = .{
+ .path = tmp_dir_path,
+ .handle = tmp.dir,
+ };
+ const emit_bin: Compilation.EmitLoc = .{
+ .directory = emit_directory,
+ .basename = bin_name,
+ };
+ const emit_h: ?Compilation.EmitLoc = if (case.emit_h) .{
+ .directory = emit_directory,
+ .basename = "test_case.h",
+ } else null;
+ const use_llvm: bool = switch (case.backend) {
+ .llvm => true,
+ else => false,
+ };
+ const comp = try Compilation.create(allocator, .{
+ .local_cache_directory = zig_cache_directory,
+ .global_cache_directory = global_cache_directory,
+ .zig_lib_directory = zig_lib_directory,
+ .thread_pool = thread_pool,
+ .root_name = "test_case",
+ .target = target,
+ // TODO: support tests for object file building, and library builds
+ // and linking. This will require a rework to support multi-file
+ // tests.
+ .output_mode = case.output_mode,
+ .is_test = case.is_test,
+ .optimize_mode = case.optimize_mode,
+ .emit_bin = emit_bin,
+ .emit_h = emit_h,
+ .main_pkg = &main_pkg,
+ .keep_source_files_loaded = true,
+ .is_native_os = case.target.isNativeOs(),
+ .is_native_abi = case.target.isNativeAbi(),
+ .dynamic_linker = target_info.dynamic_linker.get(),
+ .link_libc = case.link_libc,
+ .use_llvm = use_llvm,
+ .self_exe_path = zig_exe_path,
+ // TODO instead of turning off color, pass in a std.Progress.Node
+ .color = .off,
+ .reference_trace = 0,
+ // TODO: force self-hosted linkers with stage2 backend to avoid LLD creeping in
+ // until the auto-select mechanism deems them worthy
+ .use_lld = switch (case.backend) {
+ .stage2 => false,
+ else => null,
+ },
+ });
+ defer comp.destroy();
+
+ update: for (case.updates.items, 0..) |update, update_index| {
+ var update_node = root_node.start(update.name, 3);
+ update_node.activate();
+ defer update_node.end();
+
+ var sync_node = update_node.start("write", 0);
+ sync_node.activate();
+ for (update.files.items) |file| {
+ try tmp.dir.writeFile(file.path, file.src);
+ }
+ sync_node.end();
+
+ var module_node = update_node.start("parse/analysis/codegen", 0);
+ module_node.activate();
+ try comp.makeBinFileWritable();
+ try comp.update(&module_node);
+ module_node.end();
+
+ if (update.case != .Error) {
+ var all_errors = try comp.getAllErrorsAlloc();
+ defer all_errors.deinit(allocator);
+ if (all_errors.errorMessageCount() > 0) {
+ all_errors.renderToStdErr(.{
+ .ttyconf = std.debug.detectTTYConfig(std.io.getStdErr()),
+ });
+ // TODO print generated C code
+ return error.UnexpectedCompileErrors;
+ }
+ }
+
+ switch (update.case) {
+ .Header => |expected_output| {
+ var file = try tmp.dir.openFile("test_case.h", .{ .mode = .read_only });
+ defer file.close();
+ const out = try file.reader().readAllAlloc(arena, 5 * 1024 * 1024);
+
+ try std.testing.expectEqualStrings(expected_output, out);
+ },
+ .CompareObjectFile => |expected_output| {
+ var file = try tmp.dir.openFile(bin_name, .{ .mode = .read_only });
+ defer file.close();
+ const out = try file.reader().readAllAlloc(arena, 5 * 1024 * 1024);
+
+ try std.testing.expectEqualStrings(expected_output, out);
+ },
+ .Compile => {},
+ .Error => |expected_errors| {
+ var test_node = update_node.start("assert", 0);
+ test_node.activate();
+ defer test_node.end();
+
+ var error_bundle = try comp.getAllErrorsAlloc();
+ defer error_bundle.deinit(allocator);
+
+ if (error_bundle.errorMessageCount() == 0) {
+ return error.ExpectedCompilationErrors;
+ }
+
+ var actual_stderr = std.ArrayList(u8).init(arena);
+ try error_bundle.renderToWriter(.{
+ .ttyconf = .no_color,
+ .include_reference_trace = false,
+ .include_source_line = false,
+ }, actual_stderr.writer());
+
+ // Render the expected lines into a string that we can compare verbatim.
+ var expected_generated = std.ArrayList(u8).init(arena);
+
+ var actual_line_it = std.mem.split(u8, actual_stderr.items, "\n");
+ for (expected_errors) |expect_line| {
+ const actual_line = actual_line_it.next() orelse {
+ try expected_generated.appendSlice(expect_line);
+ try expected_generated.append('\n');
+ continue;
+ };
+ if (std.mem.endsWith(u8, actual_line, expect_line)) {
+ try expected_generated.appendSlice(actual_line);
+ try expected_generated.append('\n');
+ continue;
+ }
+ if (std.mem.startsWith(u8, expect_line, ":?:?: ")) {
+ if (std.mem.endsWith(u8, actual_line, expect_line[":?:?: ".len..])) {
+ try expected_generated.appendSlice(actual_line);
+ try expected_generated.append('\n');
+ continue;
+ }
+ }
+ try expected_generated.appendSlice(expect_line);
+ try expected_generated.append('\n');
+ }
+
+ try std.testing.expectEqualStrings(expected_generated.items, actual_stderr.items);
+ },
+ .Execution => |expected_stdout| {
+ if (!std.process.can_spawn) {
+ std.debug.print("Unable to spawn child processes on {s}, skipping test.\n", .{@tagName(builtin.os.tag)});
+ continue :update; // Pass test.
+ }
+
+ update_node.setEstimatedTotalItems(4);
+
+ var argv = std.ArrayList([]const u8).init(allocator);
+ defer argv.deinit();
+
+ var exec_result = x: {
+ var exec_node = update_node.start("execute", 0);
+ exec_node.activate();
+ defer exec_node.end();
+
+ // We go out of our way here to use the unique temporary directory name in
+ // the exe_path so that it makes its way into the cache hash, avoiding
+ // cache collisions from multiple threads doing `zig run` at the same time
+ // on the same test_case.c input filename.
+ const ss = std.fs.path.sep_str;
+ const exe_path = try std.fmt.allocPrint(
+ arena,
+ ".." ++ ss ++ "{s}" ++ ss ++ "{s}",
+ .{ &tmp.sub_path, bin_name },
+ );
+ if (case.target.ofmt != null and case.target.ofmt.? == .c) {
+ if (host.getExternalExecutor(target_info, .{ .link_libc = true }) != .native) {
+ // We wouldn't be able to run the compiled C code.
+ continue :update; // Pass test.
+ }
+ try argv.appendSlice(&[_][]const u8{
+ zig_exe_path,
+ "run",
+ "-cflags",
+ "-std=c99",
+ "-pedantic",
+ "-Werror",
+ "-Wno-incompatible-library-redeclaration", // https://github.com/ziglang/zig/issues/875
+ "--",
+ "-lc",
+ exe_path,
+ });
+ if (zig_lib_directory.path) |p| {
+ try argv.appendSlice(&.{ "-I", p });
+ }
+ } else switch (host.getExternalExecutor(target_info, .{ .link_libc = case.link_libc })) {
+ .native => {
+ if (case.backend == .stage2 and case.target.getCpuArch() == .arm) {
+ // https://github.com/ziglang/zig/issues/13623
+ continue :update; // Pass test.
+ }
+ try argv.append(exe_path);
+ },
+ .bad_dl, .bad_os_or_cpu => continue :update, // Pass test.
+
+ .rosetta => if (enable_rosetta) {
+ try argv.append(exe_path);
+ } else {
+ continue :update; // Rosetta not available, pass test.
+ },
+
+ .qemu => |qemu_bin_name| if (enable_qemu) {
+ const need_cross_glibc = target.isGnuLibC() and case.link_libc;
+ const glibc_dir_arg: ?[]const u8 = if (need_cross_glibc)
+ glibc_runtimes_dir orelse continue :update // glibc dir not available; pass test
+ else
+ null;
+ try argv.append(qemu_bin_name);
+ if (glibc_dir_arg) |dir| {
+ const linux_triple = try target.linuxTriple(arena);
+ const full_dir = try std.fs.path.join(arena, &[_][]const u8{
+ dir,
+ linux_triple,
+ });
+
+ try argv.append("-L");
+ try argv.append(full_dir);
+ }
+ try argv.append(exe_path);
+ } else {
+ continue :update; // QEMU not available; pass test.
+ },
+
+ .wine => |wine_bin_name| if (enable_wine) {
+ try argv.append(wine_bin_name);
+ try argv.append(exe_path);
+ } else {
+ continue :update; // Wine not available; pass test.
+ },
+
+ .wasmtime => |wasmtime_bin_name| if (enable_wasmtime) {
+ try argv.append(wasmtime_bin_name);
+ try argv.append("--dir=.");
+ try argv.append(exe_path);
+ } else {
+ continue :update; // wasmtime not available; pass test.
+ },
+
+ .darling => |darling_bin_name| if (enable_darling) {
+ try argv.append(darling_bin_name);
+ // Since we use relative to cwd here, we invoke darling with
+ // "shell" subcommand.
+ try argv.append("shell");
+ try argv.append(exe_path);
+ } else {
+ continue :update; // Darling not available; pass test.
+ },
+ }
+
+ try comp.makeBinFileExecutable();
+
+ while (true) {
+ break :x std.ChildProcess.exec(.{
+ .allocator = allocator,
+ .argv = argv.items,
+ .cwd_dir = tmp.dir,
+ .cwd = tmp_dir_path,
+ }) catch |err| switch (err) {
+ error.FileBusy => {
+ // There is a fundamental design flaw in Unix systems with how
+ // ETXTBSY interacts with fork+exec.
+ // https://github.com/golang/go/issues/22315
+ // https://bugs.openjdk.org/browse/JDK-8068370
+ // Unfortunately, this could be a real error, but we can't
+ // tell the difference here.
+ continue;
+ },
+ else => {
+ std.debug.print("\n{s}.{d} The following command failed with {s}:\n", .{
+ case.name, update_index, @errorName(err),
+ });
+ dumpArgs(argv.items);
+ return error.ChildProcessExecution;
+ },
+ };
+ }
+ };
+ var test_node = update_node.start("test", 0);
+ test_node.activate();
+ defer test_node.end();
+ defer allocator.free(exec_result.stdout);
+ defer allocator.free(exec_result.stderr);
+ switch (exec_result.term) {
+ .Exited => |code| {
+ if (code != 0) {
+ std.debug.print("\n{s}\n{s}: execution exited with code {d}:\n", .{
+ exec_result.stderr, case.name, code,
+ });
+ dumpArgs(argv.items);
+ return error.ChildProcessExecution;
+ }
+ },
+ else => {
+ std.debug.print("\n{s}\n{s}: execution crashed:\n", .{
+ exec_result.stderr, case.name,
+ });
+ dumpArgs(argv.items);
+ return error.ChildProcessExecution;
+ },
+ }
+ try std.testing.expectEqualStrings(expected_stdout, exec_result.stdout);
+ // We allow stderr to have garbage in it because wasmtime prints a
+ // warning about --invoke even though we don't pass it.
+ //std.testing.expectEqualStrings("", exec_result.stderr);
+ },
+ }
+ }
+}
+
+fn dumpArgs(argv: []const []const u8) void {
+ for (argv) |arg| {
+ std.debug.print("{s} ", .{arg});
+ }
+ std.debug.print("\n", .{});
+}
diff --git a/test/src/CompareOutput.zig b/test/src/CompareOutput.zig
new file mode 100644
index 0000000000..854bd11f9c
--- /dev/null
+++ b/test/src/CompareOutput.zig
@@ -0,0 +1,174 @@
+//! This is the implementation of the test harness.
+//! For the actual test cases, see test/compare_output.zig.
+
+b: *std.Build,
+step: *std.Build.Step,
+test_index: usize,
+test_filter: ?[]const u8,
+optimize_modes: []const OptimizeMode,
+
+const Special = enum {
+ None,
+ Asm,
+ RuntimeSafety,
+};
+
+const TestCase = struct {
+ name: []const u8,
+ sources: ArrayList(SourceFile),
+ expected_output: []const u8,
+ link_libc: bool,
+ special: Special,
+ cli_args: []const []const u8,
+
+ const SourceFile = struct {
+ filename: []const u8,
+ source: []const u8,
+ };
+
+ pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void {
+ self.sources.append(SourceFile{
+ .filename = filename,
+ .source = source,
+ }) catch @panic("OOM");
+ }
+
+ pub fn setCommandLineArgs(self: *TestCase, args: []const []const u8) void {
+ self.cli_args = args;
+ }
+};
+
+pub fn createExtra(self: *CompareOutput, name: []const u8, source: []const u8, expected_output: []const u8, special: Special) TestCase {
+ var tc = TestCase{
+ .name = name,
+ .sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
+ .expected_output = expected_output,
+ .link_libc = false,
+ .special = special,
+ .cli_args = &[_][]const u8{},
+ };
+ const root_src_name = if (special == Special.Asm) "source.s" else "source.zig";
+ tc.addSourceFile(root_src_name, source);
+ return tc;
+}
+
+pub fn create(self: *CompareOutput, name: []const u8, source: []const u8, expected_output: []const u8) TestCase {
+ return createExtra(self, name, source, expected_output, Special.None);
+}
+
+pub fn addC(self: *CompareOutput, name: []const u8, source: []const u8, expected_output: []const u8) void {
+ var tc = self.create(name, source, expected_output);
+ tc.link_libc = true;
+ self.addCase(tc);
+}
+
+pub fn add(self: *CompareOutput, name: []const u8, source: []const u8, expected_output: []const u8) void {
+ const tc = self.create(name, source, expected_output);
+ self.addCase(tc);
+}
+
+pub fn addAsm(self: *CompareOutput, name: []const u8, source: []const u8, expected_output: []const u8) void {
+ const tc = self.createExtra(name, source, expected_output, Special.Asm);
+ self.addCase(tc);
+}
+
+pub fn addRuntimeSafety(self: *CompareOutput, name: []const u8, source: []const u8) void {
+ const tc = self.createExtra(name, source, undefined, Special.RuntimeSafety);
+ self.addCase(tc);
+}
+
+pub fn addCase(self: *CompareOutput, case: TestCase) void {
+ const b = self.b;
+
+ const write_src = b.addWriteFiles();
+ for (case.sources.items) |src_file| {
+ write_src.add(src_file.filename, src_file.source);
+ }
+
+ switch (case.special) {
+ Special.Asm => {
+ const annotated_case_name = fmt.allocPrint(self.b.allocator, "run assemble-and-link {s}", .{
+ case.name,
+ }) catch @panic("OOM");
+ if (self.test_filter) |filter| {
+ if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
+ }
+
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .target = .{},
+ .optimize = .Debug,
+ });
+ exe.addAssemblyFileSource(write_src.getFileSource(case.sources.items[0].filename).?);
+
+ const run = exe.run();
+ run.setName(annotated_case_name);
+ run.addArgs(case.cli_args);
+ run.expectStdOutEqual(case.expected_output);
+
+ self.step.dependOn(&run.step);
+ },
+ Special.None => {
+ for (self.optimize_modes) |optimize| {
+ const annotated_case_name = fmt.allocPrint(self.b.allocator, "run compare-output {s} ({s})", .{
+ case.name, @tagName(optimize),
+ }) catch @panic("OOM");
+ if (self.test_filter) |filter| {
+ if (mem.indexOf(u8, annotated_case_name, filter) == null) continue;
+ }
+
+ const basename = case.sources.items[0].filename;
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = write_src.getFileSource(basename).?,
+ .optimize = optimize,
+ .target = .{},
+ });
+ if (case.link_libc) {
+ exe.linkSystemLibrary("c");
+ }
+
+ const run = exe.run();
+ run.setName(annotated_case_name);
+ run.addArgs(case.cli_args);
+ run.expectStdOutEqual(case.expected_output);
+
+ self.step.dependOn(&run.step);
+ }
+ },
+ Special.RuntimeSafety => {
+ // TODO iterate over self.optimize_modes and test this in both
+ // debug and release safe mode
+ const annotated_case_name = fmt.allocPrint(self.b.allocator, "run safety {s}", .{case.name}) catch @panic("OOM");
+ if (self.test_filter) |filter| {
+ if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
+ }
+
+ const basename = case.sources.items[0].filename;
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = write_src.getFileSource(basename).?,
+ .target = .{},
+ .optimize = .Debug,
+ });
+ if (case.link_libc) {
+ exe.linkSystemLibrary("c");
+ }
+
+ const run = exe.run();
+ run.setName(annotated_case_name);
+ run.addArgs(case.cli_args);
+ run.expectExitCode(126);
+
+ self.step.dependOn(&run.step);
+ },
+ }
+}
+
+const CompareOutput = @This();
+const std = @import("std");
+const ArrayList = std.ArrayList;
+const fmt = std.fmt;
+const mem = std.mem;
+const fs = std.fs;
+const OptimizeMode = std.builtin.OptimizeMode;
diff --git a/test/src/StackTrace.zig b/test/src/StackTrace.zig
new file mode 100644
index 0000000000..c32720a210
--- /dev/null
+++ b/test/src/StackTrace.zig
@@ -0,0 +1,107 @@
+b: *std.Build,
+step: *Step,
+test_index: usize,
+test_filter: ?[]const u8,
+optimize_modes: []const OptimizeMode,
+check_exe: *std.Build.CompileStep,
+
+const Expect = [@typeInfo(OptimizeMode).Enum.fields.len][]const u8;
+
+pub fn addCase(self: *StackTrace, config: anytype) void {
+ if (@hasField(@TypeOf(config), "exclude")) {
+ if (config.exclude.exclude()) return;
+ }
+ if (@hasField(@TypeOf(config), "exclude_arch")) {
+ const exclude_arch: []const std.Target.Cpu.Arch = &config.exclude_arch;
+ for (exclude_arch) |arch| if (arch == builtin.cpu.arch) return;
+ }
+ if (@hasField(@TypeOf(config), "exclude_os")) {
+ const exclude_os: []const std.Target.Os.Tag = &config.exclude_os;
+ for (exclude_os) |os| if (os == builtin.os.tag) return;
+ }
+ for (self.optimize_modes) |optimize_mode| {
+ switch (optimize_mode) {
+ .Debug => {
+ if (@hasField(@TypeOf(config), "Debug")) {
+ self.addExpect(config.name, config.source, optimize_mode, config.Debug);
+ }
+ },
+ .ReleaseSafe => {
+ if (@hasField(@TypeOf(config), "ReleaseSafe")) {
+ self.addExpect(config.name, config.source, optimize_mode, config.ReleaseSafe);
+ }
+ },
+ .ReleaseFast => {
+ if (@hasField(@TypeOf(config), "ReleaseFast")) {
+ self.addExpect(config.name, config.source, optimize_mode, config.ReleaseFast);
+ }
+ },
+ .ReleaseSmall => {
+ if (@hasField(@TypeOf(config), "ReleaseSmall")) {
+ self.addExpect(config.name, config.source, optimize_mode, config.ReleaseSmall);
+ }
+ },
+ }
+ }
+}
+
+fn addExpect(
+ self: *StackTrace,
+ name: []const u8,
+ source: []const u8,
+ optimize_mode: OptimizeMode,
+ mode_config: anytype,
+) void {
+ if (@hasField(@TypeOf(mode_config), "exclude")) {
+ if (mode_config.exclude.exclude()) return;
+ }
+ if (@hasField(@TypeOf(mode_config), "exclude_arch")) {
+ const exclude_arch: []const std.Target.Cpu.Arch = &mode_config.exclude_arch;
+ for (exclude_arch) |arch| if (arch == builtin.cpu.arch) return;
+ }
+ if (@hasField(@TypeOf(mode_config), "exclude_os")) {
+ const exclude_os: []const std.Target.Os.Tag = &mode_config.exclude_os;
+ for (exclude_os) |os| if (os == builtin.os.tag) return;
+ }
+
+ const b = self.b;
+ const annotated_case_name = fmt.allocPrint(b.allocator, "check {s} ({s})", .{
+ name, @tagName(optimize_mode),
+ }) catch @panic("OOM");
+ if (self.test_filter) |filter| {
+ if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
+ }
+
+ const src_basename = "source.zig";
+ const write_src = b.addWriteFile(src_basename, source);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = write_src.getFileSource(src_basename).?,
+ .optimize = optimize_mode,
+ .target = .{},
+ });
+
+ const run = b.addRunArtifact(exe);
+ run.removeEnvironmentVariable("ZIG_DEBUG_COLOR");
+ run.setEnvironmentVariable("NO_COLOR", "1");
+ run.expectExitCode(1);
+ run.expectStdOutEqual("");
+
+ const check_run = b.addRunArtifact(self.check_exe);
+ check_run.setName(annotated_case_name);
+ check_run.addFileSourceArg(run.captureStdErr());
+ check_run.addArgs(&.{
+ @tagName(optimize_mode),
+ });
+ check_run.expectStdOutEqual(mode_config.expect);
+
+ self.step.dependOn(&check_run.step);
+}
+
+const StackTrace = @This();
+const std = @import("std");
+const builtin = @import("builtin");
+const Step = std.Build.Step;
+const OptimizeMode = std.builtin.OptimizeMode;
+const fmt = std.fmt;
+const mem = std.mem;
diff --git a/test/src/check-stack-trace.zig b/test/src/check-stack-trace.zig
new file mode 100644
index 0000000000..bb1db55076
--- /dev/null
+++ b/test/src/check-stack-trace.zig
@@ -0,0 +1,79 @@
+const builtin = @import("builtin");
+const std = @import("std");
+const mem = std.mem;
+const fs = std.fs;
+
+pub fn main() !void {
+ var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
+ defer arena_instance.deinit();
+ const arena = arena_instance.allocator();
+
+ const args = try std.process.argsAlloc(arena);
+
+ const input_path = args[1];
+ const optimize_mode_text = args[2];
+
+ const input_bytes = try std.fs.cwd().readFileAlloc(arena, input_path, 5 * 1024 * 1024);
+ const optimize_mode = std.meta.stringToEnum(std.builtin.OptimizeMode, optimize_mode_text).?;
+
+ var stderr = input_bytes;
+
+ // process result
+ // - keep only basename of source file path
+ // - replace address with symbolic string
+ // - replace function name with symbolic string when optimize_mode != .Debug
+ // - skip empty lines
+ const got: []const u8 = got_result: {
+ var buf = std.ArrayList(u8).init(arena);
+ defer buf.deinit();
+ if (stderr.len != 0 and stderr[stderr.len - 1] == '\n') stderr = stderr[0 .. stderr.len - 1];
+ var it = mem.split(u8, stderr, "\n");
+ process_lines: while (it.next()) |line| {
+ if (line.len == 0) continue;
+
+ // offset search past `[drive]:` on windows
+ var pos: usize = if (builtin.os.tag == .windows) 2 else 0;
+ // locate delims/anchor
+ const delims = [_][]const u8{ ":", ":", ":", " in ", "(", ")" };
+ var marks = [_]usize{0} ** delims.len;
+ for (delims, 0..) |delim, i| {
+ marks[i] = mem.indexOfPos(u8, line, pos, delim) orelse {
+ // unexpected pattern: emit raw line and cont
+ try buf.appendSlice(line);
+ try buf.appendSlice("\n");
+ continue :process_lines;
+ };
+ pos = marks[i] + delim.len;
+ }
+ // locate source basename
+ pos = mem.lastIndexOfScalar(u8, line[0..marks[0]], fs.path.sep) orelse {
+ // unexpected pattern: emit raw line and cont
+ try buf.appendSlice(line);
+ try buf.appendSlice("\n");
+ continue :process_lines;
+ };
+ // end processing if source basename changes
+ if (!mem.eql(u8, "source.zig", line[pos + 1 .. marks[0]])) break;
+ // emit substituted line
+ try buf.appendSlice(line[pos + 1 .. marks[2] + delims[2].len]);
+ try buf.appendSlice(" [address]");
+ if (optimize_mode == .Debug) {
+ // On certain platforms (windows) or possibly depending on how we choose to link main
+ // the object file extension may be present so we simply strip any extension.
+ if (mem.indexOfScalar(u8, line[marks[4]..marks[5]], '.')) |idot| {
+ try buf.appendSlice(line[marks[3] .. marks[4] + idot]);
+ try buf.appendSlice(line[marks[5]..]);
+ } else {
+ try buf.appendSlice(line[marks[3]..]);
+ }
+ } else {
+ try buf.appendSlice(line[marks[3] .. marks[3] + delims[3].len]);
+ try buf.appendSlice("[function]");
+ }
+ try buf.appendSlice("\n");
+ }
+ break :got_result try buf.toOwnedSlice();
+ };
+
+ try std.io.getStdOut().writeAll(got);
+}
diff --git a/test/src/compare_output.zig b/test/src/compare_output.zig
deleted file mode 100644
index 3bda3bdacd..0000000000
--- a/test/src/compare_output.zig
+++ /dev/null
@@ -1,177 +0,0 @@
-// This is the implementation of the test harness.
-// For the actual test cases, see test/compare_output.zig.
-const std = @import("std");
-const ArrayList = std.ArrayList;
-const fmt = std.fmt;
-const mem = std.mem;
-const fs = std.fs;
-const OptimizeMode = std.builtin.OptimizeMode;
-
-pub const CompareOutputContext = struct {
- b: *std.Build,
- step: *std.Build.Step,
- test_index: usize,
- test_filter: ?[]const u8,
- optimize_modes: []const OptimizeMode,
-
- const Special = enum {
- None,
- Asm,
- RuntimeSafety,
- };
-
- const TestCase = struct {
- name: []const u8,
- sources: ArrayList(SourceFile),
- expected_output: []const u8,
- link_libc: bool,
- special: Special,
- cli_args: []const []const u8,
-
- const SourceFile = struct {
- filename: []const u8,
- source: []const u8,
- };
-
- pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void {
- self.sources.append(SourceFile{
- .filename = filename,
- .source = source,
- }) catch unreachable;
- }
-
- pub fn setCommandLineArgs(self: *TestCase, args: []const []const u8) void {
- self.cli_args = args;
- }
- };
-
- pub fn createExtra(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8, special: Special) TestCase {
- var tc = TestCase{
- .name = name,
- .sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
- .expected_output = expected_output,
- .link_libc = false,
- .special = special,
- .cli_args = &[_][]const u8{},
- };
- const root_src_name = if (special == Special.Asm) "source.s" else "source.zig";
- tc.addSourceFile(root_src_name, source);
- return tc;
- }
-
- pub fn create(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) TestCase {
- return createExtra(self, name, source, expected_output, Special.None);
- }
-
- pub fn addC(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
- var tc = self.create(name, source, expected_output);
- tc.link_libc = true;
- self.addCase(tc);
- }
-
- pub fn add(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
- const tc = self.create(name, source, expected_output);
- self.addCase(tc);
- }
-
- pub fn addAsm(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
- const tc = self.createExtra(name, source, expected_output, Special.Asm);
- self.addCase(tc);
- }
-
- pub fn addRuntimeSafety(self: *CompareOutputContext, name: []const u8, source: []const u8) void {
- const tc = self.createExtra(name, source, undefined, Special.RuntimeSafety);
- self.addCase(tc);
- }
-
- pub fn addCase(self: *CompareOutputContext, case: TestCase) void {
- const b = self.b;
-
- const write_src = b.addWriteFiles();
- for (case.sources.items) |src_file| {
- write_src.add(src_file.filename, src_file.source);
- }
-
- switch (case.special) {
- Special.Asm => {
- const annotated_case_name = fmt.allocPrint(self.b.allocator, "assemble-and-link {s}", .{
- case.name,
- }) catch unreachable;
- if (self.test_filter) |filter| {
- if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
- }
-
- const exe = b.addExecutable(.{
- .name = "test",
- .target = .{},
- .optimize = .Debug,
- });
- exe.addAssemblyFileSource(write_src.getFileSource(case.sources.items[0].filename).?);
-
- const run = exe.run();
- run.addArgs(case.cli_args);
- run.expectStdErrEqual("");
- run.expectStdOutEqual(case.expected_output);
-
- self.step.dependOn(&run.step);
- },
- Special.None => {
- for (self.optimize_modes) |optimize| {
- const annotated_case_name = fmt.allocPrint(self.b.allocator, "{s} {s} ({s})", .{
- "compare-output",
- case.name,
- @tagName(optimize),
- }) catch unreachable;
- if (self.test_filter) |filter| {
- if (mem.indexOf(u8, annotated_case_name, filter) == null) continue;
- }
-
- const basename = case.sources.items[0].filename;
- const exe = b.addExecutable(.{
- .name = "test",
- .root_source_file = write_src.getFileSource(basename).?,
- .optimize = optimize,
- .target = .{},
- });
- if (case.link_libc) {
- exe.linkSystemLibrary("c");
- }
-
- const run = exe.run();
- run.addArgs(case.cli_args);
- run.expectStdErrEqual("");
- run.expectStdOutEqual(case.expected_output);
-
- self.step.dependOn(&run.step);
- }
- },
- Special.RuntimeSafety => {
- // TODO iterate over self.optimize_modes and test this in both
- // debug and release safe mode
- const annotated_case_name = fmt.allocPrint(self.b.allocator, "safety {s}", .{case.name}) catch unreachable;
- if (self.test_filter) |filter| {
- if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
- }
-
- const basename = case.sources.items[0].filename;
- const exe = b.addExecutable(.{
- .name = "test",
- .root_source_file = write_src.getFileSource(basename).?,
- .target = .{},
- .optimize = .Debug,
- });
- if (case.link_libc) {
- exe.linkSystemLibrary("c");
- }
-
- const run = exe.run();
- run.addArgs(case.cli_args);
- run.stderr_action = .ignore;
- run.stdout_action = .ignore;
- run.expected_term = .{ .Exited = 126 };
-
- self.step.dependOn(&run.step);
- },
- }
- }
-};
diff --git a/test/standalone.zig b/test/standalone.zig
index 965139235c..4cf795a85f 100644
--- a/test/standalone.zig
+++ b/test/standalone.zig
@@ -1,116 +1,218 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const tests = @import("tests.zig");
+pub const SimpleCase = struct {
+ src_path: []const u8,
+ link_libc: bool = false,
+ all_modes: bool = false,
+ target: std.zig.CrossTarget = .{},
+ is_test: bool = false,
+ is_exe: bool = true,
+ /// Run only on this OS.
+ os_filter: ?std.Target.Os.Tag = null,
+};
-pub fn addCases(cases: *tests.StandaloneContext) void {
- cases.add("test/standalone/hello_world/hello.zig");
- cases.addC("test/standalone/hello_world/hello_libc.zig");
+pub const BuildCase = struct {
+ build_root: []const u8,
+ import: type,
+};
- cases.addBuildFile("test/standalone/options/build.zig", .{
- .extra_argv = &.{
- "-Dbool_true",
- "-Dbool_false=false",
- "-Dint=1234",
- "-De=two",
- "-Dstring=hello",
+pub const simple_cases = [_]SimpleCase{
+ .{
+ .src_path = "test/standalone/hello_world/hello.zig",
+ .all_modes = true,
+ },
+ .{
+ .src_path = "test/standalone/hello_world/hello_libc.zig",
+ .link_libc = true,
+ .all_modes = true,
+ },
+ .{
+ .src_path = "test/standalone/cat/main.zig",
+ },
+ // https://github.com/ziglang/zig/issues/6025
+ //.{
+ // .src_path = "test/standalone/issue_9693/main.zig",
+ //},
+ .{
+ .src_path = "test/standalone/brace_expansion.zig",
+ .is_test = true,
+ },
+ .{
+ .src_path = "test/standalone/issue_7030.zig",
+ .target = .{
+ .cpu_arch = .wasm32,
+ .os_tag = .freestanding,
},
- });
+ },
- cases.add("test/standalone/cat/main.zig");
- if (builtin.zig_backend == .stage1) { // https://github.com/ziglang/zig/issues/6025
- cases.add("test/standalone/issue_9693/main.zig");
- }
- cases.add("test/standalone/issue_12471/main.zig");
- cases.add("test/standalone/guess_number/main.zig");
- cases.add("test/standalone/main_return_error/error_u8.zig");
- cases.add("test/standalone/main_return_error/error_u8_non_zero.zig");
- cases.add("test/standalone/noreturn_call/inline.zig");
- cases.add("test/standalone/noreturn_call/as_arg.zig");
- cases.addBuildFile("test/standalone/test_runner_path/build.zig", .{ .requires_stage2 = true });
- cases.addBuildFile("test/standalone/issue_13970/build.zig", .{});
- cases.addBuildFile("test/standalone/main_pkg_path/build.zig", .{});
- cases.addBuildFile("test/standalone/shared_library/build.zig", .{});
- cases.addBuildFile("test/standalone/mix_o_files/build.zig", .{});
- cases.addBuildFile("test/standalone/mix_c_files/build.zig", .{
- .build_modes = true,
- .cross_targets = true,
- });
- cases.addBuildFile("test/standalone/global_linkage/build.zig", .{});
- cases.addBuildFile("test/standalone/static_c_lib/build.zig", .{});
- cases.addBuildFile("test/standalone/issue_339/build.zig", .{});
- cases.addBuildFile("test/standalone/issue_8550/build.zig", .{});
- cases.addBuildFile("test/standalone/issue_794/build.zig", .{});
- cases.addBuildFile("test/standalone/issue_5825/build.zig", .{});
- cases.addBuildFile("test/standalone/pkg_import/build.zig", .{});
- cases.addBuildFile("test/standalone/use_alias/build.zig", .{});
- cases.addBuildFile("test/standalone/brace_expansion/build.zig", .{});
- if (builtin.os.tag != .windows or builtin.cpu.arch != .aarch64) {
- // https://github.com/ziglang/zig/issues/13685
- cases.addBuildFile("test/standalone/empty_env/build.zig", .{});
- }
- cases.addBuildFile("test/standalone/issue_7030/build.zig", .{});
- cases.addBuildFile("test/standalone/install_raw_hex/build.zig", .{});
- if (builtin.zig_backend == .stage1) { // https://github.com/ziglang/zig/issues/12194
- cases.addBuildFile("test/standalone/issue_9812/build.zig", .{});
- }
- if (builtin.os.tag != .windows) {
- // https://github.com/ziglang/zig/issues/12419
- cases.addBuildFile("test/standalone/issue_11595/build.zig", .{});
- }
+ .{ .src_path = "test/standalone/issue_12471/main.zig" },
+ .{ .src_path = "test/standalone/guess_number/main.zig" },
+ .{ .src_path = "test/standalone/main_return_error/error_u8.zig" },
+ .{ .src_path = "test/standalone/main_return_error/error_u8_non_zero.zig" },
+ .{ .src_path = "test/standalone/noreturn_call/inline.zig" },
+ .{ .src_path = "test/standalone/noreturn_call/as_arg.zig" },
- if (builtin.os.tag != .wasi and
- // https://github.com/ziglang/zig/issues/13550
- (builtin.os.tag != .macos or builtin.cpu.arch != .aarch64) and
- // https://github.com/ziglang/zig/issues/13686
- (builtin.os.tag != .windows or builtin.cpu.arch != .aarch64))
- {
- cases.addBuildFile("test/standalone/load_dynamic_library/build.zig", .{});
- }
-
- if (builtin.os.tag == .windows) {
- cases.addBuildFile("test/standalone/windows_spawn/build.zig", .{});
- }
-
- cases.addBuildFile("test/standalone/c_compiler/build.zig", .{
- .build_modes = true,
- .cross_targets = true,
- });
-
- if (builtin.os.tag == .windows) {
- cases.addC("test/standalone/issue_9402/main.zig");
- }
- // Try to build and run a PIE executable.
- if (builtin.os.tag == .linux) {
- cases.addBuildFile("test/standalone/pie/build.zig", .{});
- }
- cases.addBuildFile("test/standalone/issue_12706/build.zig", .{});
- if (std.os.have_sigpipe_support) {
- cases.addBuildFile("test/standalone/sigpipe/build.zig", .{});
- }
+ .{
+ .src_path = "test/standalone/issue_9402/main.zig",
+ .os_filter = .windows,
+ .link_libc = true,
+ },
// Ensure the development tools are buildable. Alphabetically sorted.
// No need to build `tools/spirv/grammar.zig`.
- cases.add("tools/extract-grammar.zig");
- cases.add("tools/gen_outline_atomics.zig");
- cases.add("tools/gen_spirv_spec.zig");
- cases.add("tools/gen_stubs.zig");
- cases.add("tools/generate_linux_syscalls.zig");
- cases.add("tools/process_headers.zig");
- cases.add("tools/update-license-headers.zig");
- cases.add("tools/update-linux-headers.zig");
- cases.add("tools/update_clang_options.zig");
- cases.add("tools/update_cpu_features.zig");
- cases.add("tools/update_glibc.zig");
- cases.add("tools/update_spirv_features.zig");
+ .{ .src_path = "tools/extract-grammar.zig" },
+ .{ .src_path = "tools/gen_outline_atomics.zig" },
+ .{ .src_path = "tools/gen_spirv_spec.zig" },
+ .{ .src_path = "tools/gen_stubs.zig" },
+ .{ .src_path = "tools/generate_linux_syscalls.zig" },
+ .{ .src_path = "tools/process_headers.zig" },
+ .{ .src_path = "tools/update-license-headers.zig" },
+ .{ .src_path = "tools/update-linux-headers.zig" },
+ .{ .src_path = "tools/update_clang_options.zig" },
+ .{ .src_path = "tools/update_cpu_features.zig" },
+ .{ .src_path = "tools/update_glibc.zig" },
+ .{ .src_path = "tools/update_spirv_features.zig" },
+};
- cases.addBuildFile("test/standalone/issue_13030/build.zig", .{ .build_modes = true });
- cases.addBuildFile("test/standalone/emit_asm_and_bin/build.zig", .{});
- cases.addBuildFile("test/standalone/issue_12588/build.zig", .{});
- cases.addBuildFile("test/standalone/embed_generated_file/build.zig", .{});
+pub const build_cases = [_]BuildCase{
+ .{
+ .build_root = "test/standalone/test_runner_path",
+ .import = @import("standalone/test_runner_path/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/issue_13970",
+ .import = @import("standalone/issue_13970/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/main_pkg_path",
+ .import = @import("standalone/main_pkg_path/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/shared_library",
+ .import = @import("standalone/shared_library/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/mix_o_files",
+ .import = @import("standalone/mix_o_files/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/mix_c_files",
+ .import = @import("standalone/mix_c_files/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/global_linkage",
+ .import = @import("standalone/global_linkage/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/static_c_lib",
+ .import = @import("standalone/static_c_lib/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/issue_339",
+ .import = @import("standalone/issue_339/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/issue_8550",
+ .import = @import("standalone/issue_8550/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/issue_794",
+ .import = @import("standalone/issue_794/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/issue_5825",
+ .import = @import("standalone/issue_5825/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/pkg_import",
+ .import = @import("standalone/pkg_import/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/use_alias",
+ .import = @import("standalone/use_alias/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/install_raw_hex",
+ .import = @import("standalone/install_raw_hex/build.zig"),
+ },
+ // TODO take away EmitOption.emit_to option and make it give a FileSource
+ //.{
+ // .build_root = "test/standalone/emit_asm_and_bin",
+ // .import = @import("standalone/emit_asm_and_bin/build.zig"),
+ //},
+ // TODO take away EmitOption.emit_to option and make it give a FileSource
+ //.{
+ // .build_root = "test/standalone/issue_12588",
+ // .import = @import("standalone/issue_12588/build.zig"),
+ //},
+ .{
+ .build_root = "test/standalone/embed_generated_file",
+ .import = @import("standalone/embed_generated_file/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/extern",
+ .import = @import("standalone/extern/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/dep_diamond",
+ .import = @import("standalone/dep_diamond/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/dep_triangle",
+ .import = @import("standalone/dep_triangle/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/dep_recursive",
+ .import = @import("standalone/dep_recursive/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/dep_mutually_recursive",
+ .import = @import("standalone/dep_mutually_recursive/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/dep_shared_builtin",
+ .import = @import("standalone/dep_shared_builtin/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/empty_env",
+ .import = @import("standalone/empty_env/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/issue_9812",
+ .import = @import("standalone/issue_9812/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/issue_11595",
+ .import = @import("standalone/issue_11595/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/load_dynamic_library",
+ .import = @import("standalone/load_dynamic_library/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/windows_spawn",
+ .import = @import("standalone/windows_spawn/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/c_compiler",
+ .import = @import("standalone/c_compiler/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/pie",
+ .import = @import("standalone/pie/build.zig"),
+ },
+ .{
+ .build_root = "test/standalone/issue_12706",
+ .import = @import("standalone/issue_12706/build.zig"),
+ },
+ // TODO This test is disabled for doing naughty things in the build script.
+ // The logic needs to get moved to a child process instead of build.zig.
+ //.{
+ // .build_root = "test/standalone/sigpipe",
+ // .import = @import("standalone/sigpipe/build.zig"),
+ //},
+ .{
+ .build_root = "test/standalone/issue_13030",
+ .import = @import("standalone/issue_13030/build.zig"),
+ },
+};
- cases.addBuildFile("test/standalone/dep_diamond/build.zig", .{});
- cases.addBuildFile("test/standalone/dep_triangle/build.zig", .{});
- cases.addBuildFile("test/standalone/dep_recursive/build.zig", .{});
- cases.addBuildFile("test/standalone/dep_mutually_recursive/build.zig", .{});
- cases.addBuildFile("test/standalone/dep_shared_builtin/build.zig", .{});
-}
+const std = @import("std");
diff --git a/test/standalone/brace_expansion/main.zig b/test/standalone/brace_expansion.zig
similarity index 98%
rename from test/standalone/brace_expansion/main.zig
rename to test/standalone/brace_expansion.zig
index dcdcad3865..7a769f6af7 100644
--- a/test/standalone/brace_expansion/main.zig
+++ b/test/standalone/brace_expansion.zig
@@ -234,8 +234,8 @@ pub fn main() !void {
var result_buf = ArrayList(u8).init(global_allocator);
defer result_buf.deinit();
- try expandString(stdin.items, &result_buf);
- try stdout_file.write(result_buf.items);
+ try expandString(stdin, &result_buf);
+ try stdout_file.writeAll(result_buf.items);
}
test "invalid inputs" {
diff --git a/test/standalone/brace_expansion/build.zig b/test/standalone/brace_expansion/build.zig
deleted file mode 100644
index 7c32a09bef..0000000000
--- a/test/standalone/brace_expansion/build.zig
+++ /dev/null
@@ -1,11 +0,0 @@
-const std = @import("std");
-
-pub fn build(b: *std.Build) void {
- const main = b.addTest(.{
- .root_source_file = .{ .path = "main.zig" },
- .optimize = b.standardOptimizeOption(.{}),
- });
-
- const test_step = b.step("test", "Test it");
- test_step.dependOn(&main.step);
-}
diff --git a/test/standalone/c_compiler/build.zig b/test/standalone/c_compiler/build.zig
index dce999d4a2..6c5f2b4db6 100644
--- a/test/standalone/c_compiler/build.zig
+++ b/test/standalone/c_compiler/build.zig
@@ -1,27 +1,24 @@
const std = @import("std");
const builtin = @import("builtin");
-const CrossTarget = std.zig.CrossTarget;
-
-// TODO integrate this with the std.Build executor API
-fn isRunnableTarget(t: CrossTarget) bool {
- if (t.isNative()) return true;
-
- return (t.getOsTag() == builtin.os.tag and
- t.getCpuArch() == builtin.cpu.arch);
-}
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target = b.standardTargetOptions(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
- const test_step = b.step("test", "Test the program");
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
+ const target: std.zig.CrossTarget = .{};
const exe_c = b.addExecutable(.{
.name = "test_c",
.optimize = optimize,
.target = target,
});
- b.default_step.dependOn(&exe_c.step);
exe_c.addCSourceFile("test.c", &[0][]const u8{});
exe_c.linkLibC();
@@ -47,13 +44,13 @@ pub fn build(b: *std.Build) void {
else => {},
}
- if (isRunnableTarget(target)) {
- const run_c_cmd = exe_c.run();
- test_step.dependOn(&run_c_cmd.step);
- const run_cpp_cmd = exe_cpp.run();
- test_step.dependOn(&run_cpp_cmd.step);
- } else {
- test_step.dependOn(&exe_c.step);
- test_step.dependOn(&exe_cpp.step);
- }
+ const run_c_cmd = b.addRunArtifact(exe_c);
+ run_c_cmd.expectExitCode(0);
+ run_c_cmd.skip_foreign_checks = true;
+ test_step.dependOn(&run_c_cmd.step);
+
+ const run_cpp_cmd = b.addRunArtifact(exe_cpp);
+ run_cpp_cmd.expectExitCode(0);
+ run_cpp_cmd.skip_foreign_checks = true;
+ test_step.dependOn(&run_cpp_cmd.step);
}
diff --git a/test/standalone/dep_diamond/build.zig b/test/standalone/dep_diamond/build.zig
index b60f898f0b..12eda4ec5d 100644
--- a/test/standalone/dep_diamond/build.zig
+++ b/test/standalone/dep_diamond/build.zig
@@ -1,7 +1,10 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
const shared = b.createModule(.{
.source_file = .{ .path = "shared.zig" },
@@ -23,6 +26,5 @@ pub fn build(b: *std.Build) void {
const run = exe.run();
- const test_step = b.step("test", "Test it");
test_step.dependOn(&run.step);
}
diff --git a/test/standalone/dep_mutually_recursive/build.zig b/test/standalone/dep_mutually_recursive/build.zig
index 0123646a9a..1a6bff8501 100644
--- a/test/standalone/dep_mutually_recursive/build.zig
+++ b/test/standalone/dep_mutually_recursive/build.zig
@@ -1,7 +1,10 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
const foo = b.createModule(.{
.source_file = .{ .path = "foo.zig" },
@@ -21,6 +24,5 @@ pub fn build(b: *std.Build) void {
const run = exe.run();
- const test_step = b.step("test", "Test it");
test_step.dependOn(&run.step);
}
diff --git a/test/standalone/dep_recursive/build.zig b/test/standalone/dep_recursive/build.zig
index 32d546e283..35b9f3cc47 100644
--- a/test/standalone/dep_recursive/build.zig
+++ b/test/standalone/dep_recursive/build.zig
@@ -1,7 +1,10 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
const foo = b.createModule(.{
.source_file = .{ .path = "foo.zig" },
@@ -17,6 +20,5 @@ pub fn build(b: *std.Build) void {
const run = exe.run();
- const test_step = b.step("test", "Test it");
test_step.dependOn(&run.step);
}
diff --git a/test/standalone/dep_shared_builtin/build.zig b/test/standalone/dep_shared_builtin/build.zig
index 6c029b654b..776794f95e 100644
--- a/test/standalone/dep_shared_builtin/build.zig
+++ b/test/standalone/dep_shared_builtin/build.zig
@@ -1,7 +1,10 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
const exe = b.addExecutable(.{
.name = "test",
@@ -14,6 +17,5 @@ pub fn build(b: *std.Build) void {
const run = exe.run();
- const test_step = b.step("test", "Test it");
test_step.dependOn(&run.step);
}
diff --git a/test/standalone/dep_triangle/build.zig b/test/standalone/dep_triangle/build.zig
index f3b73aaf35..14163df84c 100644
--- a/test/standalone/dep_triangle/build.zig
+++ b/test/standalone/dep_triangle/build.zig
@@ -1,7 +1,10 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
const shared = b.createModule(.{
.source_file = .{ .path = "shared.zig" },
@@ -20,6 +23,5 @@ pub fn build(b: *std.Build) void {
const run = exe.run();
- const test_step = b.step("test", "Test it");
test_step.dependOn(&run.step);
}
diff --git a/test/standalone/embed_generated_file/build.zig b/test/standalone/embed_generated_file/build.zig
index 3b17ff0b8f..af1ae0a00f 100644
--- a/test/standalone/embed_generated_file/build.zig
+++ b/test/standalone/embed_generated_file/build.zig
@@ -1,8 +1,8 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const target = b.standardTargetOptions(.{});
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
const bootloader = b.addExecutable(.{
.name = "bootloader",
@@ -16,13 +16,11 @@ pub fn build(b: *std.Build) void {
const exe = b.addTest(.{
.root_source_file = .{ .path = "main.zig" },
- .target = target,
- .optimize = optimize,
+ .optimize = .Debug,
});
exe.addAnonymousModule("bootloader.elf", .{
.source_file = bootloader.getOutputSource(),
});
- const test_step = b.step("test", "Test the program");
test_step.dependOn(&exe.step);
}
diff --git a/test/standalone/emit_asm_and_bin/build.zig b/test/standalone/emit_asm_and_bin/build.zig
index 5345f0f538..594bf6552e 100644
--- a/test/standalone/emit_asm_and_bin/build.zig
+++ b/test/standalone/emit_asm_and_bin/build.zig
@@ -1,6 +1,9 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
const main = b.addTest(.{
.root_source_file = .{ .path = "main.zig" },
.optimize = b.standardOptimizeOption(.{}),
@@ -8,6 +11,5 @@ pub fn build(b: *std.Build) void {
main.emit_asm = .{ .emit_to = b.pathFromRoot("main.s") };
main.emit_bin = .{ .emit_to = b.pathFromRoot("main") };
- const test_step = b.step("test", "Run test");
- test_step.dependOn(&main.step);
+ test_step.dependOn(&main.run().step);
}
diff --git a/test/standalone/empty_env/build.zig b/test/standalone/empty_env/build.zig
index c4b4846141..27ec75be22 100644
--- a/test/standalone/empty_env/build.zig
+++ b/test/standalone/empty_env/build.zig
@@ -1,15 +1,25 @@
const std = @import("std");
+const builtin = @import("builtin");
pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
+
+ if (builtin.os.tag == .windows and builtin.cpu.arch == .aarch64) {
+ // https://github.com/ziglang/zig/issues/13685
+ return;
+ }
+
const main = b.addExecutable(.{
.name = "main",
.root_source_file = .{ .path = "main.zig" },
- .optimize = b.standardOptimizeOption(.{}),
+ .optimize = optimize,
});
- const run = main.run();
+ const run = b.addRunArtifact(main);
run.clearEnvironment();
- const test_step = b.step("test", "Test it");
test_step.dependOn(&run.step);
}
diff --git a/test/standalone/extern/build.zig b/test/standalone/extern/build.zig
new file mode 100644
index 0000000000..153380e91d
--- /dev/null
+++ b/test/standalone/extern/build.zig
@@ -0,0 +1,20 @@
+const std = @import("std");
+
+pub fn build(b: *std.Build) void {
+ const optimize: std.builtin.OptimizeMode = .Debug;
+
+ const obj = b.addObject(.{
+ .name = "exports",
+ .root_source_file = .{ .path = "exports.zig" },
+ .target = .{},
+ .optimize = optimize,
+ });
+ const main = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ });
+ main.addObject(obj);
+
+ const test_step = b.step("test", "Test it");
+ test_step.dependOn(&main.step);
+}
diff --git a/test/standalone/extern/exports.zig b/test/standalone/extern/exports.zig
new file mode 100644
index 0000000000..93351c4581
--- /dev/null
+++ b/test/standalone/extern/exports.zig
@@ -0,0 +1,12 @@
+var hidden: u32 = 0;
+export fn updateHidden(val: u32) void {
+ hidden = val;
+}
+export fn getHidden() u32 {
+ return hidden;
+}
+
+const T = extern struct { x: u32 };
+
+export var mut_val: f64 = 1.23;
+export const const_val: T = .{ .x = 42 };
diff --git a/test/standalone/extern/main.zig b/test/standalone/extern/main.zig
new file mode 100644
index 0000000000..4cbed184c3
--- /dev/null
+++ b/test/standalone/extern/main.zig
@@ -0,0 +1,21 @@
+const assert = @import("std").debug.assert;
+
+const updateHidden = @extern(*const fn (u32) callconv(.C) void, .{ .name = "updateHidden" });
+const getHidden = @extern(*const fn () callconv(.C) u32, .{ .name = "getHidden" });
+
+const T = extern struct { x: u32 };
+
+test {
+ var mut_val_ptr = @extern(*f64, .{ .name = "mut_val" });
+ var const_val_ptr = @extern(*const T, .{ .name = "const_val" });
+
+ assert(getHidden() == 0);
+ updateHidden(123);
+ assert(getHidden() == 123);
+
+ assert(mut_val_ptr.* == 1.23);
+ mut_val_ptr.* = 10.0;
+ assert(mut_val_ptr.* == 10.0);
+
+ assert(const_val_ptr.x == 42);
+}
diff --git a/test/standalone/global_linkage/build.zig b/test/standalone/global_linkage/build.zig
index 9f79c80fcf..ddcddd612a 100644
--- a/test/standalone/global_linkage/build.zig
+++ b/test/standalone/global_linkage/build.zig
@@ -1,20 +1,24 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test the program");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
+ const target: std.zig.CrossTarget = .{};
const obj1 = b.addStaticLibrary(.{
.name = "obj1",
.root_source_file = .{ .path = "obj1.zig" },
.optimize = optimize,
- .target = .{},
+ .target = target,
});
const obj2 = b.addStaticLibrary(.{
.name = "obj2",
.root_source_file = .{ .path = "obj2.zig" },
.optimize = optimize,
- .target = .{},
+ .target = target,
});
const main = b.addTest(.{
@@ -24,6 +28,5 @@ pub fn build(b: *std.Build) void {
main.linkLibrary(obj1);
main.linkLibrary(obj2);
- const test_step = b.step("test", "Test it");
- test_step.dependOn(&main.step);
+ test_step.dependOn(&main.run().step);
}
diff --git a/test/standalone/install_raw_hex/build.zig b/test/standalone/install_raw_hex/build.zig
index 6ed515e381..b34bb01378 100644
--- a/test/standalone/install_raw_hex/build.zig
+++ b/test/standalone/install_raw_hex/build.zig
@@ -3,8 +3,8 @@ const std = @import("std");
const CheckFileStep = std.Build.CheckFileStep;
pub fn build(b: *std.Build) void {
- const test_step = b.step("test", "Test the program");
- b.default_step.dependOn(test_step);
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
const target = .{
.cpu_arch = .thumb,
@@ -13,7 +13,7 @@ pub fn build(b: *std.Build) void {
.abi = .gnueabihf,
};
- const optimize = b.standardOptimizeOption(.{});
+ const optimize: std.builtin.OptimizeMode = .Debug;
const elf = b.addExecutable(.{
.name = "zig-nrf52-blink.elf",
diff --git a/test/standalone/issue_11595/build.zig b/test/standalone/issue_11595/build.zig
index c335fb73da..7d9530c690 100644
--- a/test/standalone/issue_11595/build.zig
+++ b/test/standalone/issue_11595/build.zig
@@ -1,18 +1,17 @@
const std = @import("std");
const builtin = @import("builtin");
-const CrossTarget = std.zig.CrossTarget;
-
-// TODO integrate this with the std.Build executor API
-fn isRunnableTarget(t: CrossTarget) bool {
- if (t.isNative()) return true;
-
- return (t.getOsTag() == builtin.os.tag and
- t.getCpuArch() == builtin.cpu.arch);
-}
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target = b.standardTargetOptions(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
+ const target: std.zig.CrossTarget = .{};
+
+ if (builtin.os.tag == .windows) {
+ // https://github.com/ziglang/zig/issues/12419
+ return;
+ }
const exe = b.addExecutable(.{
.name = "zigtest",
@@ -44,11 +43,9 @@ pub fn build(b: *std.Build) void {
b.default_step.dependOn(&exe.step);
- const test_step = b.step("test", "Test the program");
- if (isRunnableTarget(target)) {
- const run_cmd = exe.run();
- test_step.dependOn(&run_cmd.step);
- } else {
- test_step.dependOn(&exe.step);
- }
+ const run_cmd = b.addRunArtifact(exe);
+ run_cmd.skip_foreign_checks = true;
+ run_cmd.expectExitCode(0);
+
+ test_step.dependOn(&run_cmd.step);
}
diff --git a/test/standalone/issue_12588/build.zig b/test/standalone/issue_12588/build.zig
index 9f14c53e38..fa22252fcc 100644
--- a/test/standalone/issue_12588/build.zig
+++ b/test/standalone/issue_12588/build.zig
@@ -1,8 +1,11 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target = b.standardTargetOptions(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
+ const target: std.zig.CrossTarget = .{};
const obj = b.addObject(.{
.name = "main",
@@ -15,6 +18,5 @@ pub fn build(b: *std.Build) void {
obj.emit_bin = .no_emit;
b.default_step.dependOn(&obj.step);
- const test_step = b.step("test", "Test the program");
test_step.dependOn(&obj.step);
}
diff --git a/test/standalone/issue_12706/build.zig b/test/standalone/issue_12706/build.zig
index 9d616477a2..04eb826b44 100644
--- a/test/standalone/issue_12706/build.zig
+++ b/test/standalone/issue_12706/build.zig
@@ -2,17 +2,12 @@ const std = @import("std");
const builtin = @import("builtin");
const CrossTarget = std.zig.CrossTarget;
-// TODO integrate this with the std.Build executor API
-fn isRunnableTarget(t: CrossTarget) bool {
- if (t.isNative()) return true;
-
- return (t.getOsTag() == builtin.os.tag and
- t.getCpuArch() == builtin.cpu.arch);
-}
-
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target = b.standardTargetOptions(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
+ const target: std.zig.CrossTarget = .{};
const exe = b.addExecutable(.{
.name = "main",
@@ -20,22 +15,15 @@ pub fn build(b: *std.Build) void {
.optimize = optimize,
.target = target,
});
- exe.install();
const c_sources = [_][]const u8{
"test.c",
};
-
exe.addCSourceFiles(&c_sources, &.{});
exe.linkLibC();
- b.default_step.dependOn(&exe.step);
-
- const test_step = b.step("test", "Test the program");
- if (isRunnableTarget(target)) {
- const run_cmd = exe.run();
- test_step.dependOn(&run_cmd.step);
- } else {
- test_step.dependOn(&exe.step);
- }
+ const run_cmd = b.addRunArtifact(exe);
+ run_cmd.expectExitCode(0);
+ run_cmd.skip_foreign_checks = true;
+ test_step.dependOn(&run_cmd.step);
}
diff --git a/test/standalone/issue_13030/build.zig b/test/standalone/issue_13030/build.zig
index 258d9b7db8..e31863fee2 100644
--- a/test/standalone/issue_13030/build.zig
+++ b/test/standalone/issue_13030/build.zig
@@ -3,17 +3,22 @@ const builtin = @import("builtin");
const CrossTarget = std.zig.CrossTarget;
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target = b.standardTargetOptions(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const obj = b.addObject(.{
.name = "main",
.root_source_file = .{ .path = "main.zig" },
.optimize = optimize,
- .target = target,
+ .target = .{},
});
- b.default_step.dependOn(&obj.step);
- const test_step = b.step("test", "Test the program");
test_step.dependOn(&obj.step);
}
diff --git a/test/standalone/issue_13970/build.zig b/test/standalone/issue_13970/build.zig
index f5e07d8903..1eb8a5a121 100644
--- a/test/standalone/issue_13970/build.zig
+++ b/test/standalone/issue_13970/build.zig
@@ -1,6 +1,9 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
const test1 = b.addTest(.{
.root_source_file = .{ .path = "test_root/empty.zig" },
});
@@ -14,8 +17,7 @@ pub fn build(b: *std.Build) void {
test2.setTestRunner("src/main.zig");
test3.setTestRunner("src/main.zig");
- const test_step = b.step("test", "Test package path resolution of custom test runner");
- test_step.dependOn(&test1.step);
- test_step.dependOn(&test2.step);
- test_step.dependOn(&test3.step);
+ test_step.dependOn(&test1.run().step);
+ test_step.dependOn(&test2.run().step);
+ test_step.dependOn(&test3.run().step);
}
diff --git a/test/standalone/issue_339/build.zig b/test/standalone/issue_339/build.zig
index 62ac128aab..f4215dbb8b 100644
--- a/test/standalone/issue_339/build.zig
+++ b/test/standalone/issue_339/build.zig
@@ -1,13 +1,18 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
+ const target: std.zig.CrossTarget = .{};
+
const obj = b.addObject(.{
.name = "test",
.root_source_file = .{ .path = "test.zig" },
- .target = b.standardTargetOptions(.{}),
- .optimize = b.standardOptimizeOption(.{}),
+ .target = target,
+ .optimize = optimize,
});
- const test_step = b.step("test", "Test the program");
test_step.dependOn(&obj.step);
}
diff --git a/test/standalone/issue_5825/build.zig b/test/standalone/issue_5825/build.zig
index 89272280d4..e8e8d48772 100644
--- a/test/standalone/issue_5825/build.zig
+++ b/test/standalone/issue_5825/build.zig
@@ -1,12 +1,15 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
const target = .{
.cpu_arch = .x86_64,
.os_tag = .windows,
.abi = .msvc,
};
- const optimize = b.standardOptimizeOption(.{});
+ const optimize: std.builtin.OptimizeMode = .Debug;
const obj = b.addObject(.{
.name = "issue_5825",
.root_source_file = .{ .path = "main.zig" },
@@ -24,6 +27,5 @@ pub fn build(b: *std.Build) void {
exe.linkSystemLibrary("ntdll");
exe.addObject(obj);
- const test_step = b.step("test", "Test the program");
test_step.dependOn(&exe.step);
}
diff --git a/test/standalone/issue_7030/main.zig b/test/standalone/issue_7030.zig
similarity index 100%
rename from test/standalone/issue_7030/main.zig
rename to test/standalone/issue_7030.zig
diff --git a/test/standalone/issue_7030/build.zig b/test/standalone/issue_7030/build.zig
deleted file mode 100644
index dc535318cc..0000000000
--- a/test/standalone/issue_7030/build.zig
+++ /dev/null
@@ -1,17 +0,0 @@
-const std = @import("std");
-
-pub fn build(b: *std.Build) void {
- const exe = b.addExecutable(.{
- .name = "issue_7030",
- .root_source_file = .{ .path = "main.zig" },
- .target = .{
- .cpu_arch = .wasm32,
- .os_tag = .freestanding,
- },
- });
- exe.install();
- b.default_step.dependOn(&exe.step);
-
- const test_step = b.step("test", "Test the program");
- test_step.dependOn(&exe.step);
-}
diff --git a/test/standalone/issue_794/build.zig b/test/standalone/issue_794/build.zig
index 3089a28fd0..8527f4af2c 100644
--- a/test/standalone/issue_794/build.zig
+++ b/test/standalone/issue_794/build.zig
@@ -1,13 +1,13 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
const test_artifact = b.addTest(.{
.root_source_file = .{ .path = "main.zig" },
});
test_artifact.addIncludePath("a_directory");
- b.default_step.dependOn(&test_artifact.step);
-
- const test_step = b.step("test", "Test the program");
test_step.dependOn(&test_artifact.step);
}
diff --git a/test/standalone/issue_8550/build.zig b/test/standalone/issue_8550/build.zig
index c3303d55db..8f7631e68f 100644
--- a/test/standalone/issue_8550/build.zig
+++ b/test/standalone/issue_8550/build.zig
@@ -1,6 +1,10 @@
const std = @import("std");
pub fn build(b: *std.Build) !void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
const target = std.zig.CrossTarget{
.os_tag = .freestanding,
.cpu_arch = .arm,
@@ -8,7 +12,7 @@ pub fn build(b: *std.Build) !void {
.explicit = &std.Target.arm.cpu.arm1176jz_s,
},
};
- const optimize = b.standardOptimizeOption(.{});
+
const kernel = b.addExecutable(.{
.name = "kernel",
.root_source_file = .{ .path = "./main.zig" },
@@ -19,6 +23,5 @@ pub fn build(b: *std.Build) !void {
kernel.setLinkerScriptPath(.{ .path = "./linker.ld" });
kernel.install();
- const test_step = b.step("test", "Test it");
test_step.dependOn(&kernel.step);
}
diff --git a/test/standalone/issue_9812/build.zig b/test/standalone/issue_9812/build.zig
index 4ca55ce999..71104b903c 100644
--- a/test/standalone/issue_9812/build.zig
+++ b/test/standalone/issue_9812/build.zig
@@ -1,7 +1,11 @@
const std = @import("std");
pub fn build(b: *std.Build) !void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
+
const zip_add = b.addTest(.{
.root_source_file = .{ .path = "main.zig" },
.optimize = optimize,
@@ -13,6 +17,5 @@ pub fn build(b: *std.Build) !void {
zip_add.addIncludePath("vendor/kuba-zip");
zip_add.linkLibC();
- const test_step = b.step("test", "Test it");
test_step.dependOn(&zip_add.step);
}
diff --git a/test/standalone/load_dynamic_library/build.zig b/test/standalone/load_dynamic_library/build.zig
index 44fc37893c..06a5424a8d 100644
--- a/test/standalone/load_dynamic_library/build.zig
+++ b/test/standalone/load_dynamic_library/build.zig
@@ -1,8 +1,19 @@
const std = @import("std");
+const builtin = @import("builtin");
pub fn build(b: *std.Build) void {
- const target = b.standardTargetOptions(.{});
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
+ const target: std.zig.CrossTarget = .{};
+
+ const ok = (builtin.os.tag != .wasi and
+ // https://github.com/ziglang/zig/issues/13550
+ (builtin.os.tag != .macos or builtin.cpu.arch != .aarch64) and
+ // https://github.com/ziglang/zig/issues/13686
+ (builtin.os.tag != .windows or builtin.cpu.arch != .aarch64));
+ if (!ok) return;
const lib = b.addSharedLibrary(.{
.name = "add",
@@ -19,9 +30,10 @@ pub fn build(b: *std.Build) void {
.target = target,
});
- const run = main.run();
+ const run = b.addRunArtifact(main);
run.addArtifactArg(lib);
+ run.skip_foreign_checks = true;
+ run.expectExitCode(0);
- const test_step = b.step("test", "Test the program");
test_step.dependOn(&run.step);
}
diff --git a/test/standalone/load_dynamic_library/main.zig b/test/standalone/load_dynamic_library/main.zig
index baf47c23ad..b47ea8a81f 100644
--- a/test/standalone/load_dynamic_library/main.zig
+++ b/test/standalone/load_dynamic_library/main.zig
@@ -11,11 +11,7 @@ pub fn main() !void {
var lib = try std.DynLib.open(dynlib_name);
defer lib.close();
- const Add = switch (@import("builtin").zig_backend) {
- .stage1 => fn (i32, i32) callconv(.C) i32,
- else => *const fn (i32, i32) callconv(.C) i32,
- };
-
+ const Add = *const fn (i32, i32) callconv(.C) i32;
const addFn = lib.lookup(Add, "add") orelse return error.SymbolNotFound;
const result = addFn(12, 34);
diff --git a/test/standalone/main_pkg_path/build.zig b/test/standalone/main_pkg_path/build.zig
index f9919d5ab5..a4dd301c43 100644
--- a/test/standalone/main_pkg_path/build.zig
+++ b/test/standalone/main_pkg_path/build.zig
@@ -1,11 +1,13 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
const test_exe = b.addTest(.{
.root_source_file = .{ .path = "a/test.zig" },
});
test_exe.setMainPkgPath(".");
- const test_step = b.step("test", "Test the program");
- test_step.dependOn(&test_exe.step);
+ test_step.dependOn(&test_exe.run().step);
}
diff --git a/test/standalone/mix_c_files/build.zig b/test/standalone/mix_c_files/build.zig
index f2dfb2093f..0ea585e4e0 100644
--- a/test/standalone/mix_c_files/build.zig
+++ b/test/standalone/mix_c_files/build.zig
@@ -1,34 +1,28 @@
const std = @import("std");
-const builtin = @import("builtin");
-const CrossTarget = std.zig.CrossTarget;
-
-// TODO integrate this with the std.Build executor API
-fn isRunnableTarget(t: CrossTarget) bool {
- if (t.isNative()) return true;
-
- return (t.getOsTag() == builtin.os.tag and
- t.getCpuArch() == builtin.cpu.arch);
-}
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target = b.standardTargetOptions(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+ add(b, test_step, .Debug);
+ add(b, test_step, .ReleaseFast);
+ add(b, test_step, .ReleaseSmall);
+ add(b, test_step, .ReleaseSafe);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void {
const exe = b.addExecutable(.{
.name = "test",
.root_source_file = .{ .path = "main.zig" },
.optimize = optimize,
- .target = target,
});
exe.addCSourceFile("test.c", &[_][]const u8{"-std=c11"});
exe.linkLibC();
b.default_step.dependOn(&exe.step);
- const test_step = b.step("test", "Test the program");
- if (isRunnableTarget(target)) {
- const run_cmd = exe.run();
- test_step.dependOn(&run_cmd.step);
- } else {
- test_step.dependOn(&exe.step);
- }
+ const run_cmd = b.addRunArtifact(exe);
+ run_cmd.skip_foreign_checks = true;
+ run_cmd.expectExitCode(0);
+
+ test_step.dependOn(&run_cmd.step);
}
diff --git a/test/standalone/mix_o_files/build.zig b/test/standalone/mix_o_files/build.zig
index 2708343aa5..17ce55a8aa 100644
--- a/test/standalone/mix_o_files/build.zig
+++ b/test/standalone/mix_o_files/build.zig
@@ -1,18 +1,23 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
+ const target: std.zig.CrossTarget = .{};
const obj = b.addObject(.{
.name = "base64",
.root_source_file = .{ .path = "base64.zig" },
.optimize = optimize,
- .target = .{},
+ .target = target,
});
const exe = b.addExecutable(.{
.name = "test",
.optimize = optimize,
+ .target = target,
});
exe.addCSourceFile("test.c", &[_][]const u8{"-std=c99"});
exe.addObject(obj);
@@ -22,6 +27,5 @@ pub fn build(b: *std.Build) void {
const run_cmd = exe.run();
- const test_step = b.step("test", "Test the program");
test_step.dependOn(&run_cmd.step);
}
diff --git a/test/standalone/options/build.zig b/test/standalone/options/build.zig
index 3f1e823359..5e894102a7 100644
--- a/test/standalone/options/build.zig
+++ b/test/standalone/options/build.zig
@@ -20,5 +20,5 @@ pub fn build(b: *std.Build) void {
options.addOption([]const u8, "string", b.option([]const u8, "string", "s").?);
const test_step = b.step("test", "Run unit tests");
- test_step.dependOn(&main.step);
+ test_step.dependOn(&main.run().step);
}
diff --git a/test/standalone/pie/build.zig b/test/standalone/pie/build.zig
index d51ea27328..e7ef5f97cc 100644
--- a/test/standalone/pie/build.zig
+++ b/test/standalone/pie/build.zig
@@ -1,14 +1,24 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
+ const target: std.zig.CrossTarget = .{
+ .os_tag = .linux,
+ .cpu_arch = .x86_64,
+ };
+
const main = b.addTest(.{
.root_source_file = .{ .path = "main.zig" },
- .optimize = b.standardOptimizeOption(.{}),
+ .optimize = optimize,
+ .target = target,
});
main.pie = true;
- const test_step = b.step("test", "Test the program");
- test_step.dependOn(&main.step);
+ const run = main.run();
+ run.skip_foreign_checks = true;
- b.default_step.dependOn(test_step);
+ test_step.dependOn(&run.step);
}
diff --git a/test/standalone/pkg_import/build.zig b/test/standalone/pkg_import/build.zig
index 5ea6c90af7..42799ab896 100644
--- a/test/standalone/pkg_import/build.zig
+++ b/test/standalone/pkg_import/build.zig
@@ -1,7 +1,10 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
const exe = b.addExecutable(.{
.name = "test",
@@ -12,6 +15,5 @@ pub fn build(b: *std.Build) void {
const run = exe.run();
- const test_step = b.step("test", "Test it");
test_step.dependOn(&run.step);
}
diff --git a/test/standalone/shared_library/build.zig b/test/standalone/shared_library/build.zig
index 91f7c8a06a..63370af0cc 100644
--- a/test/standalone/shared_library/build.zig
+++ b/test/standalone/shared_library/build.zig
@@ -1,8 +1,11 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
- const target = b.standardTargetOptions(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
+ const target: std.zig.CrossTarget = .{};
const lib = b.addSharedLibrary(.{
.name = "mathtest",
.root_source_file = .{ .path = "mathtest.zig" },
@@ -20,10 +23,7 @@ pub fn build(b: *std.Build) void {
exe.linkLibrary(lib);
exe.linkSystemLibrary("c");
- b.default_step.dependOn(&exe.step);
-
const run_cmd = exe.run();
- const test_step = b.step("test", "Test the program");
test_step.dependOn(&run_cmd.step);
}
diff --git a/test/standalone/sigpipe/build.zig b/test/standalone/sigpipe/build.zig
index 763df5fe46..6f50a86d68 100644
--- a/test/standalone/sigpipe/build.zig
+++ b/test/standalone/sigpipe/build.zig
@@ -2,7 +2,16 @@ const std = @import("std");
const os = std.os;
pub fn build(b: *std.build.Builder) !void {
- const test_step = b.step("test", "Run the tests");
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ // TODO signal handling code has no business being in a build script.
+ // this logic needs to move to a file called parent.zig which is
+ // added as an executable.
+
+ //if (!std.os.have_sigpipe_support) {
+ // return;
+ //}
// This test runs "breakpipe" as a child process and that process
// depends on inheriting a SIGPIPE disposition of "default".
@@ -23,12 +32,12 @@ pub fn build(b: *std.build.Builder) !void {
.root_source_file = .{ .path = "breakpipe.zig" },
});
exe.addOptions("build_options", options);
- const run = exe.run();
+ const run = b.addRunArtifact(exe);
if (keep_sigpipe) {
- run.expected_term = .{ .Signal = std.os.SIG.PIPE };
+ run.addCheck(.{ .expect_term = .{ .Signal = std.os.SIG.PIPE } });
} else {
- run.stdout_action = .{ .expect_exact = "BrokenPipe\n" };
- run.expected_term = .{ .Exited = 123 };
+ run.addCheck(.{ .expect_stdout_exact = "BrokenPipe\n" });
+ run.addCheck(.{ .expect_term = .{ .Exited = 123 } });
}
test_step.dependOn(&run.step);
}
diff --git a/test/standalone/static_c_lib/build.zig b/test/standalone/static_c_lib/build.zig
index 9937888843..794b813b75 100644
--- a/test/standalone/static_c_lib/build.zig
+++ b/test/standalone/static_c_lib/build.zig
@@ -1,7 +1,10 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
const foo = b.addStaticLibrary(.{
.name = "foo",
@@ -18,6 +21,5 @@ pub fn build(b: *std.Build) void {
test_exe.linkLibrary(foo);
test_exe.addIncludePath(".");
- const test_step = b.step("test", "Test it");
- test_step.dependOn(&test_exe.step);
+ test_step.dependOn(&test_exe.run().step);
}
diff --git a/test/standalone/test_runner_module_imports/build.zig b/test/standalone/test_runner_module_imports/build.zig
index 973365e495..73c5536dc6 100644
--- a/test/standalone/test_runner_module_imports/build.zig
+++ b/test/standalone/test_runner_module_imports/build.zig
@@ -15,5 +15,5 @@ pub fn build(b: *std.Build) void {
t.addModule("module2", module2);
const test_step = b.step("test", "Run unit tests");
- test_step.dependOn(&t.step);
+ test_step.dependOn(&t.run().step);
}
diff --git a/test/standalone/test_runner_path/build.zig b/test/standalone/test_runner_path/build.zig
index f073c55d4a..ce5b668054 100644
--- a/test/standalone/test_runner_path/build.zig
+++ b/test/standalone/test_runner_path/build.zig
@@ -1,14 +1,17 @@
const std = @import("std");
+pub const requires_stage2 = true;
+
pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test the program");
+ b.default_step = test_step;
+
const test_exe = b.addTest(.{
.root_source_file = .{ .path = "test.zig" },
- .kind = .test_exe,
});
test_exe.test_runner = "test_runner.zig";
const test_run = test_exe.run();
- const test_step = b.step("test", "Test the program");
test_step.dependOn(&test_run.step);
}
diff --git a/test/standalone/test_runner_path/test_runner.zig b/test/standalone/test_runner_path/test_runner.zig
index f49ff55cae..2139ea8f68 100644
--- a/test/standalone/test_runner_path/test_runner.zig
+++ b/test/standalone/test_runner_path/test_runner.zig
@@ -1,51 +1,19 @@
const std = @import("std");
-const io = std.io;
const builtin = @import("builtin");
-pub const io_mode: io.Mode = builtin.test_io_mode;
-
pub fn main() void {
- const test_fn_list = builtin.test_functions;
var ok_count: usize = 0;
var skip_count: usize = 0;
var fail_count: usize = 0;
- var async_frame_buffer: []align(std.Target.stack_align) u8 = undefined;
- // TODO this is on the next line (using `undefined` above) because otherwise zig incorrectly
- // ignores the alignment of the slice.
- async_frame_buffer = &[_]u8{};
-
- for (test_fn_list) |test_fn| {
- const result = if (test_fn.async_frame_size) |size| switch (io_mode) {
- .evented => blk: {
- if (async_frame_buffer.len < size) {
- std.heap.page_allocator.free(async_frame_buffer);
- async_frame_buffer = std.heap.page_allocator.alignedAlloc(u8, std.Target.stack_align, size) catch @panic("out of memory");
- }
- const casted_fn = @ptrCast(fn () callconv(.Async) anyerror!void, test_fn.func);
- break :blk await @asyncCall(async_frame_buffer, {}, casted_fn, .{});
- },
- .blocking => {
- skip_count += 1;
- continue;
- },
- } else test_fn.func();
- if (result) |_| {
+ for (builtin.test_functions) |test_fn| {
+ if (test_fn.func()) |_| {
ok_count += 1;
} else |err| switch (err) {
- error.SkipZigTest => {
- skip_count += 1;
- },
- else => {
- fail_count += 1;
- },
+ error.SkipZigTest => skip_count += 1,
+ else => fail_count += 1,
}
}
- if (ok_count == test_fn_list.len) {
- std.debug.print("All {d} tests passed.\n", .{ok_count});
- } else {
- std.debug.print("{d} passed; {d} skipped; {d} failed.\n", .{ ok_count, skip_count, fail_count });
- }
if (ok_count != 1 or skip_count != 1 or fail_count != 1) {
std.process.exit(1);
}
diff --git a/test/standalone/use_alias/build.zig b/test/standalone/use_alias/build.zig
index 89e07efb22..db47fe6692 100644
--- a/test/standalone/use_alias/build.zig
+++ b/test/standalone/use_alias/build.zig
@@ -1,12 +1,16 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
+
const main = b.addTest(.{
.root_source_file = .{ .path = "main.zig" },
- .optimize = b.standardOptimizeOption(.{}),
+ .optimize = optimize,
});
main.addIncludePath(".");
- const test_step = b.step("test", "Test it");
- test_step.dependOn(&main.step);
+ test_step.dependOn(&main.run().step);
}
diff --git a/test/standalone/windows_spawn/build.zig b/test/standalone/windows_spawn/build.zig
index 3ebde5a50c..6c865f0a9f 100644
--- a/test/standalone/windows_spawn/build.zig
+++ b/test/standalone/windows_spawn/build.zig
@@ -1,23 +1,33 @@
const std = @import("std");
+const builtin = @import("builtin");
pub fn build(b: *std.Build) void {
- const optimize = b.standardOptimizeOption(.{});
+ const test_step = b.step("test", "Test it");
+ b.default_step = test_step;
+
+ const optimize: std.builtin.OptimizeMode = .Debug;
+ const target: std.zig.CrossTarget = .{};
+
+ if (builtin.os.tag != .windows) return;
const hello = b.addExecutable(.{
.name = "hello",
.root_source_file = .{ .path = "hello.zig" },
.optimize = optimize,
+ .target = target,
});
const main = b.addExecutable(.{
.name = "main",
.root_source_file = .{ .path = "main.zig" },
.optimize = optimize,
+ .target = target,
});
- const run = main.run();
+ const run = b.addRunArtifact(main);
run.addArtifactArg(hello);
+ run.expectExitCode(0);
+ run.skip_foreign_checks = true;
- const test_step = b.step("test", "Test it");
test_step.dependOn(&run.step);
}
diff --git a/test/tests.zig b/test/tests.zig
index 035311372f..3166fbc14a 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -1,16 +1,9 @@
const std = @import("std");
const builtin = @import("builtin");
-const debug = std.debug;
+const assert = std.debug.assert;
const CrossTarget = std.zig.CrossTarget;
-const io = std.io;
-const fs = std.fs;
const mem = std.mem;
-const fmt = std.fmt;
-const ArrayList = std.ArrayList;
const OptimizeMode = std.builtin.OptimizeMode;
-const CompileStep = std.Build.CompileStep;
-const Allocator = mem.Allocator;
-const ExecError = std.Build.ExecError;
const Step = std.Build.Step;
// Cases
@@ -20,13 +13,13 @@ const stack_traces = @import("stack_traces.zig");
const assemble_and_link = @import("assemble_and_link.zig");
const translate_c = @import("translate_c.zig");
const run_translated_c = @import("run_translated_c.zig");
-const gen_h = @import("gen_h.zig");
const link = @import("link.zig");
// Implementations
pub const TranslateCContext = @import("src/translate_c.zig").TranslateCContext;
pub const RunTranslatedCContext = @import("src/run_translated_c.zig").RunTranslatedCContext;
-pub const CompareOutputContext = @import("src/compare_output.zig").CompareOutputContext;
+pub const CompareOutputContext = @import("src/CompareOutput.zig");
+pub const StackTracesContext = @import("src/StackTrace.zig");
const TestTarget = struct {
target: CrossTarget = @as(CrossTarget, .{}),
@@ -460,835 +453,6 @@ const test_targets = blk: {
};
};
-const max_stdout_size = 1 * 1024 * 1024; // 1 MB
-
-pub fn addCompareOutputTests(b: *std.Build, test_filter: ?[]const u8, optimize_modes: []const OptimizeMode) *Step {
- const cases = b.allocator.create(CompareOutputContext) catch unreachable;
- cases.* = CompareOutputContext{
- .b = b,
- .step = b.step("test-compare-output", "Run the compare output tests"),
- .test_index = 0,
- .test_filter = test_filter,
- .optimize_modes = optimize_modes,
- };
-
- compare_output.addCases(cases);
-
- return cases.step;
-}
-
-pub fn addStackTraceTests(b: *std.Build, test_filter: ?[]const u8, optimize_modes: []const OptimizeMode) *Step {
- const cases = b.allocator.create(StackTracesContext) catch unreachable;
- cases.* = StackTracesContext{
- .b = b,
- .step = b.step("test-stack-traces", "Run the stack trace tests"),
- .test_index = 0,
- .test_filter = test_filter,
- .optimize_modes = optimize_modes,
- };
-
- stack_traces.addCases(cases);
-
- return cases.step;
-}
-
-pub fn addStandaloneTests(
- b: *std.Build,
- test_filter: ?[]const u8,
- optimize_modes: []const OptimizeMode,
- skip_non_native: bool,
- enable_macos_sdk: bool,
- target: std.zig.CrossTarget,
- omit_stage2: bool,
- enable_darling: bool,
- enable_qemu: bool,
- enable_rosetta: bool,
- enable_wasmtime: bool,
- enable_wine: bool,
- enable_symlinks_windows: bool,
-) *Step {
- const cases = b.allocator.create(StandaloneContext) catch unreachable;
- cases.* = StandaloneContext{
- .b = b,
- .step = b.step("test-standalone", "Run the standalone tests"),
- .test_index = 0,
- .test_filter = test_filter,
- .optimize_modes = optimize_modes,
- .skip_non_native = skip_non_native,
- .enable_macos_sdk = enable_macos_sdk,
- .target = target,
- .omit_stage2 = omit_stage2,
- .enable_darling = enable_darling,
- .enable_qemu = enable_qemu,
- .enable_rosetta = enable_rosetta,
- .enable_wasmtime = enable_wasmtime,
- .enable_wine = enable_wine,
- .enable_symlinks_windows = enable_symlinks_windows,
- };
-
- standalone.addCases(cases);
-
- return cases.step;
-}
-
-pub fn addLinkTests(
- b: *std.Build,
- test_filter: ?[]const u8,
- optimize_modes: []const OptimizeMode,
- enable_macos_sdk: bool,
- omit_stage2: bool,
- enable_symlinks_windows: bool,
-) *Step {
- const cases = b.allocator.create(StandaloneContext) catch unreachable;
- cases.* = StandaloneContext{
- .b = b,
- .step = b.step("test-link", "Run the linker tests"),
- .test_index = 0,
- .test_filter = test_filter,
- .optimize_modes = optimize_modes,
- .skip_non_native = true,
- .enable_macos_sdk = enable_macos_sdk,
- .target = .{},
- .omit_stage2 = omit_stage2,
- .enable_symlinks_windows = enable_symlinks_windows,
- };
- link.addCases(cases);
- return cases.step;
-}
-
-pub fn addCliTests(b: *std.Build, test_filter: ?[]const u8, optimize_modes: []const OptimizeMode) *Step {
- _ = test_filter;
- _ = optimize_modes;
- const step = b.step("test-cli", "Test the command line interface");
-
- const exe = b.addExecutable(.{
- .name = "test-cli",
- .root_source_file = .{ .path = "test/cli.zig" },
- .target = .{},
- .optimize = .Debug,
- });
- const run_cmd = exe.run();
- run_cmd.addArgs(&[_][]const u8{
- fs.realpathAlloc(b.allocator, b.zig_exe) catch unreachable,
- b.pathFromRoot(b.cache_root.path orelse "."),
- });
-
- step.dependOn(&run_cmd.step);
- return step;
-}
-
-pub fn addAssembleAndLinkTests(b: *std.Build, test_filter: ?[]const u8, optimize_modes: []const OptimizeMode) *Step {
- const cases = b.allocator.create(CompareOutputContext) catch unreachable;
- cases.* = CompareOutputContext{
- .b = b,
- .step = b.step("test-asm-link", "Run the assemble and link tests"),
- .test_index = 0,
- .test_filter = test_filter,
- .optimize_modes = optimize_modes,
- };
-
- assemble_and_link.addCases(cases);
-
- return cases.step;
-}
-
-pub fn addTranslateCTests(b: *std.Build, test_filter: ?[]const u8) *Step {
- const cases = b.allocator.create(TranslateCContext) catch unreachable;
- cases.* = TranslateCContext{
- .b = b,
- .step = b.step("test-translate-c", "Run the C translation tests"),
- .test_index = 0,
- .test_filter = test_filter,
- };
-
- translate_c.addCases(cases);
-
- return cases.step;
-}
-
-pub fn addRunTranslatedCTests(
- b: *std.Build,
- test_filter: ?[]const u8,
- target: std.zig.CrossTarget,
-) *Step {
- const cases = b.allocator.create(RunTranslatedCContext) catch unreachable;
- cases.* = .{
- .b = b,
- .step = b.step("test-run-translated-c", "Run the Run-Translated-C tests"),
- .test_index = 0,
- .test_filter = test_filter,
- .target = target,
- };
-
- run_translated_c.addCases(cases);
-
- return cases.step;
-}
-
-pub fn addGenHTests(b: *std.Build, test_filter: ?[]const u8) *Step {
- const cases = b.allocator.create(GenHContext) catch unreachable;
- cases.* = GenHContext{
- .b = b,
- .step = b.step("test-gen-h", "Run the C header file generation tests"),
- .test_index = 0,
- .test_filter = test_filter,
- };
-
- gen_h.addCases(cases);
-
- return cases.step;
-}
-
-pub fn addPkgTests(
- b: *std.Build,
- test_filter: ?[]const u8,
- root_src: []const u8,
- name: []const u8,
- desc: []const u8,
- optimize_modes: []const OptimizeMode,
- skip_single_threaded: bool,
- skip_non_native: bool,
- skip_libc: bool,
- skip_stage1: bool,
- skip_stage2: bool,
-) *Step {
- const step = b.step(b.fmt("test-{s}", .{name}), desc);
-
- for (test_targets) |test_target| {
- if (skip_non_native and !test_target.target.isNative())
- continue;
-
- if (skip_libc and test_target.link_libc)
- continue;
-
- if (test_target.link_libc and test_target.target.getOs().requiresLibC()) {
- // This would be a redundant test.
- continue;
- }
-
- if (skip_single_threaded and test_target.single_threaded)
- continue;
-
- if (test_target.disable_native and
- test_target.target.getOsTag() == builtin.os.tag and
- test_target.target.getCpuArch() == builtin.cpu.arch)
- {
- continue;
- }
-
- if (test_target.backend) |backend| switch (backend) {
- .stage1 => if (skip_stage1) continue,
- .stage2_llvm => {},
- else => if (skip_stage2) continue,
- };
-
- const want_this_mode = for (optimize_modes) |m| {
- if (m == test_target.optimize_mode) break true;
- } else false;
- if (!want_this_mode) continue;
-
- const libc_prefix = if (test_target.target.getOs().requiresLibC())
- ""
- else if (test_target.link_libc)
- "c"
- else
- "bare";
-
- const triple_prefix = test_target.target.zigTriple(b.allocator) catch unreachable;
-
- const these_tests = b.addTest(.{
- .root_source_file = .{ .path = root_src },
- .optimize = test_target.optimize_mode,
- .target = test_target.target,
- });
- const single_threaded_txt = if (test_target.single_threaded) "single" else "multi";
- const backend_txt = if (test_target.backend) |backend| @tagName(backend) else "default";
- these_tests.setNamePrefix(b.fmt("{s}-{s}-{s}-{s}-{s}-{s} ", .{
- name,
- triple_prefix,
- @tagName(test_target.optimize_mode),
- libc_prefix,
- single_threaded_txt,
- backend_txt,
- }));
- these_tests.single_threaded = test_target.single_threaded;
- these_tests.setFilter(test_filter);
- if (test_target.link_libc) {
- these_tests.linkSystemLibrary("c");
- }
- these_tests.overrideZigLibDir("lib");
- these_tests.addIncludePath("test");
- if (test_target.backend) |backend| switch (backend) {
- .stage1 => {
- @panic("stage1 testing requested");
- },
- .stage2_llvm => {
- these_tests.use_llvm = true;
- },
- .stage2_c => {
- these_tests.use_llvm = false;
- },
- else => {
- these_tests.use_llvm = false;
- // TODO: force self-hosted linkers to avoid LLD creeping in
- // until the auto-select mechanism deems them worthy
- these_tests.use_lld = false;
- },
- };
-
- step.dependOn(&these_tests.step);
- }
- return step;
-}
-
-pub const StackTracesContext = struct {
- b: *std.Build,
- step: *Step,
- test_index: usize,
- test_filter: ?[]const u8,
- optimize_modes: []const OptimizeMode,
-
- const Expect = [@typeInfo(OptimizeMode).Enum.fields.len][]const u8;
-
- pub fn addCase(self: *StackTracesContext, config: anytype) void {
- if (@hasField(@TypeOf(config), "exclude")) {
- if (config.exclude.exclude()) return;
- }
- if (@hasField(@TypeOf(config), "exclude_arch")) {
- const exclude_arch: []const std.Target.Cpu.Arch = &config.exclude_arch;
- for (exclude_arch) |arch| if (arch == builtin.cpu.arch) return;
- }
- if (@hasField(@TypeOf(config), "exclude_os")) {
- const exclude_os: []const std.Target.Os.Tag = &config.exclude_os;
- for (exclude_os) |os| if (os == builtin.os.tag) return;
- }
- for (self.optimize_modes) |optimize_mode| {
- switch (optimize_mode) {
- .Debug => {
- if (@hasField(@TypeOf(config), "Debug")) {
- self.addExpect(config.name, config.source, optimize_mode, config.Debug);
- }
- },
- .ReleaseSafe => {
- if (@hasField(@TypeOf(config), "ReleaseSafe")) {
- self.addExpect(config.name, config.source, optimize_mode, config.ReleaseSafe);
- }
- },
- .ReleaseFast => {
- if (@hasField(@TypeOf(config), "ReleaseFast")) {
- self.addExpect(config.name, config.source, optimize_mode, config.ReleaseFast);
- }
- },
- .ReleaseSmall => {
- if (@hasField(@TypeOf(config), "ReleaseSmall")) {
- self.addExpect(config.name, config.source, optimize_mode, config.ReleaseSmall);
- }
- },
- }
- }
- }
-
- fn addExpect(
- self: *StackTracesContext,
- name: []const u8,
- source: []const u8,
- optimize_mode: OptimizeMode,
- mode_config: anytype,
- ) void {
- if (@hasField(@TypeOf(mode_config), "exclude")) {
- if (mode_config.exclude.exclude()) return;
- }
- if (@hasField(@TypeOf(mode_config), "exclude_arch")) {
- const exclude_arch: []const std.Target.Cpu.Arch = &mode_config.exclude_arch;
- for (exclude_arch) |arch| if (arch == builtin.cpu.arch) return;
- }
- if (@hasField(@TypeOf(mode_config), "exclude_os")) {
- const exclude_os: []const std.Target.Os.Tag = &mode_config.exclude_os;
- for (exclude_os) |os| if (os == builtin.os.tag) return;
- }
-
- const annotated_case_name = fmt.allocPrint(self.b.allocator, "{s} {s} ({s})", .{
- "stack-trace",
- name,
- @tagName(optimize_mode),
- }) catch unreachable;
- if (self.test_filter) |filter| {
- if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
- }
-
- const b = self.b;
- const src_basename = "source.zig";
- const write_src = b.addWriteFile(src_basename, source);
- const exe = b.addExecutable(.{
- .name = "test",
- .root_source_file = write_src.getFileSource(src_basename).?,
- .optimize = optimize_mode,
- .target = .{},
- });
-
- const run_and_compare = RunAndCompareStep.create(
- self,
- exe,
- annotated_case_name,
- optimize_mode,
- mode_config.expect,
- );
-
- self.step.dependOn(&run_and_compare.step);
- }
-
- const RunAndCompareStep = struct {
- pub const base_id = .custom;
-
- step: Step,
- context: *StackTracesContext,
- exe: *CompileStep,
- name: []const u8,
- optimize_mode: OptimizeMode,
- expect_output: []const u8,
- test_index: usize,
-
- pub fn create(
- context: *StackTracesContext,
- exe: *CompileStep,
- name: []const u8,
- optimize_mode: OptimizeMode,
- expect_output: []const u8,
- ) *RunAndCompareStep {
- const allocator = context.b.allocator;
- const ptr = allocator.create(RunAndCompareStep) catch unreachable;
- ptr.* = RunAndCompareStep{
- .step = Step.init(.custom, "StackTraceCompareOutputStep", allocator, make),
- .context = context,
- .exe = exe,
- .name = name,
- .optimize_mode = optimize_mode,
- .expect_output = expect_output,
- .test_index = context.test_index,
- };
- ptr.step.dependOn(&exe.step);
- context.test_index += 1;
- return ptr;
- }
-
- fn make(step: *Step) !void {
- const self = @fieldParentPtr(RunAndCompareStep, "step", step);
- const b = self.context.b;
-
- const full_exe_path = self.exe.getOutputSource().getPath(b);
- var args = ArrayList([]const u8).init(b.allocator);
- defer args.deinit();
- args.append(full_exe_path) catch unreachable;
-
- std.debug.print("Test {d}/{d} {s}...", .{ self.test_index + 1, self.context.test_index, self.name });
-
- if (!std.process.can_spawn) {
- const cmd = try std.mem.join(b.allocator, " ", args.items);
- std.debug.print("the following command cannot be executed ({s} does not support spawning a child process):\n{s}", .{ @tagName(builtin.os.tag), cmd });
- b.allocator.free(cmd);
- return ExecError.ExecNotSupported;
- }
-
- var child = std.ChildProcess.init(args.items, b.allocator);
- child.stdin_behavior = .Ignore;
- child.stdout_behavior = .Pipe;
- child.stderr_behavior = .Pipe;
- child.env_map = b.env_map;
-
- if (b.verbose) {
- printInvocation(args.items);
- }
- child.spawn() catch |err| debug.panic("Unable to spawn {s}: {s}\n", .{ full_exe_path, @errorName(err) });
-
- const stdout = child.stdout.?.reader().readAllAlloc(b.allocator, max_stdout_size) catch unreachable;
- defer b.allocator.free(stdout);
- const stderrFull = child.stderr.?.reader().readAllAlloc(b.allocator, max_stdout_size) catch unreachable;
- defer b.allocator.free(stderrFull);
- var stderr = stderrFull;
-
- const term = child.wait() catch |err| {
- debug.panic("Unable to spawn {s}: {s}\n", .{ full_exe_path, @errorName(err) });
- };
-
- switch (term) {
- .Exited => |code| {
- const expect_code: u32 = 1;
- if (code != expect_code) {
- std.debug.print("Process {s} exited with error code {d} but expected code {d}\n", .{
- full_exe_path,
- code,
- expect_code,
- });
- printInvocation(args.items);
- return error.TestFailed;
- }
- },
- .Signal => |signum| {
- std.debug.print("Process {s} terminated on signal {d}\n", .{ full_exe_path, signum });
- printInvocation(args.items);
- return error.TestFailed;
- },
- .Stopped => |signum| {
- std.debug.print("Process {s} stopped on signal {d}\n", .{ full_exe_path, signum });
- printInvocation(args.items);
- return error.TestFailed;
- },
- .Unknown => |code| {
- std.debug.print("Process {s} terminated unexpectedly with error code {d}\n", .{ full_exe_path, code });
- printInvocation(args.items);
- return error.TestFailed;
- },
- }
-
- // process result
- // - keep only basename of source file path
- // - replace address with symbolic string
- // - replace function name with symbolic string when optimize_mode != .Debug
- // - skip empty lines
- const got: []const u8 = got_result: {
- var buf = ArrayList(u8).init(b.allocator);
- defer buf.deinit();
- if (stderr.len != 0 and stderr[stderr.len - 1] == '\n') stderr = stderr[0 .. stderr.len - 1];
- var it = mem.split(u8, stderr, "\n");
- process_lines: while (it.next()) |line| {
- if (line.len == 0) continue;
-
- // offset search past `[drive]:` on windows
- var pos: usize = if (builtin.os.tag == .windows) 2 else 0;
- // locate delims/anchor
- const delims = [_][]const u8{ ":", ":", ":", " in ", "(", ")" };
- var marks = [_]usize{0} ** delims.len;
- for (delims, 0..) |delim, i| {
- marks[i] = mem.indexOfPos(u8, line, pos, delim) orelse {
- // unexpected pattern: emit raw line and cont
- try buf.appendSlice(line);
- try buf.appendSlice("\n");
- continue :process_lines;
- };
- pos = marks[i] + delim.len;
- }
- // locate source basename
- pos = mem.lastIndexOfScalar(u8, line[0..marks[0]], fs.path.sep) orelse {
- // unexpected pattern: emit raw line and cont
- try buf.appendSlice(line);
- try buf.appendSlice("\n");
- continue :process_lines;
- };
- // end processing if source basename changes
- if (!mem.eql(u8, "source.zig", line[pos + 1 .. marks[0]])) break;
- // emit substituted line
- try buf.appendSlice(line[pos + 1 .. marks[2] + delims[2].len]);
- try buf.appendSlice(" [address]");
- if (self.optimize_mode == .Debug) {
- // On certain platforms (windows) or possibly depending on how we choose to link main
- // the object file extension may be present so we simply strip any extension.
- if (mem.indexOfScalar(u8, line[marks[4]..marks[5]], '.')) |idot| {
- try buf.appendSlice(line[marks[3] .. marks[4] + idot]);
- try buf.appendSlice(line[marks[5]..]);
- } else {
- try buf.appendSlice(line[marks[3]..]);
- }
- } else {
- try buf.appendSlice(line[marks[3] .. marks[3] + delims[3].len]);
- try buf.appendSlice("[function]");
- }
- try buf.appendSlice("\n");
- }
- break :got_result try buf.toOwnedSlice();
- };
-
- if (!mem.eql(u8, self.expect_output, got)) {
- std.debug.print(
- \\
- \\========= Expected this output: =========
- \\{s}
- \\================================================
- \\{s}
- \\
- , .{ self.expect_output, got });
- return error.TestFailed;
- }
- std.debug.print("OK\n", .{});
- }
- };
-};
-
-pub const StandaloneContext = struct {
- b: *std.Build,
- step: *Step,
- test_index: usize,
- test_filter: ?[]const u8,
- optimize_modes: []const OptimizeMode,
- skip_non_native: bool,
- enable_macos_sdk: bool,
- target: std.zig.CrossTarget,
- omit_stage2: bool,
- enable_darling: bool = false,
- enable_qemu: bool = false,
- enable_rosetta: bool = false,
- enable_wasmtime: bool = false,
- enable_wine: bool = false,
- enable_symlinks_windows: bool,
-
- pub fn addC(self: *StandaloneContext, root_src: []const u8) void {
- self.addAllArgs(root_src, true);
- }
-
- pub fn add(self: *StandaloneContext, root_src: []const u8) void {
- self.addAllArgs(root_src, false);
- }
-
- pub fn addBuildFile(self: *StandaloneContext, build_file: []const u8, features: struct {
- build_modes: bool = false,
- cross_targets: bool = false,
- requires_macos_sdk: bool = false,
- requires_stage2: bool = false,
- use_emulation: bool = false,
- requires_symlinks: bool = false,
- extra_argv: []const []const u8 = &.{},
- }) void {
- const b = self.b;
-
- if (features.requires_macos_sdk and !self.enable_macos_sdk) return;
- if (features.requires_stage2 and self.omit_stage2) return;
- if (features.requires_symlinks and !self.enable_symlinks_windows and builtin.os.tag == .windows) return;
-
- const annotated_case_name = b.fmt("build {s}", .{build_file});
- if (self.test_filter) |filter| {
- if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
- }
-
- var zig_args = ArrayList([]const u8).init(b.allocator);
- const rel_zig_exe = fs.path.relative(b.allocator, b.build_root.path orelse ".", b.zig_exe) catch unreachable;
- zig_args.append(rel_zig_exe) catch unreachable;
- zig_args.append("build") catch unreachable;
-
- zig_args.append("--build-file") catch unreachable;
- zig_args.append(b.pathFromRoot(build_file)) catch unreachable;
-
- zig_args.appendSlice(features.extra_argv) catch unreachable;
-
- zig_args.append("test") catch unreachable;
-
- if (b.verbose) {
- zig_args.append("--verbose") catch unreachable;
- }
-
- if (features.cross_targets and !self.target.isNative()) {
- const target_triple = self.target.zigTriple(b.allocator) catch unreachable;
- const target_arg = fmt.allocPrint(b.allocator, "-Dtarget={s}", .{target_triple}) catch unreachable;
- zig_args.append(target_arg) catch unreachable;
- }
-
- if (features.use_emulation) {
- if (self.enable_darling) {
- zig_args.append("-fdarling") catch unreachable;
- }
- if (self.enable_qemu) {
- zig_args.append("-fqemu") catch unreachable;
- }
- if (self.enable_rosetta) {
- zig_args.append("-frosetta") catch unreachable;
- }
- if (self.enable_wasmtime) {
- zig_args.append("-fwasmtime") catch unreachable;
- }
- if (self.enable_wine) {
- zig_args.append("-fwine") catch unreachable;
- }
- }
-
- const optimize_modes = if (features.build_modes) self.optimize_modes else &[1]OptimizeMode{.Debug};
- for (optimize_modes) |optimize_mode| {
- const arg = switch (optimize_mode) {
- .Debug => "",
- .ReleaseFast => "-Doptimize=ReleaseFast",
- .ReleaseSafe => "-Doptimize=ReleaseSafe",
- .ReleaseSmall => "-Doptimize=ReleaseSmall",
- };
- const zig_args_base_len = zig_args.items.len;
- if (arg.len > 0)
- zig_args.append(arg) catch unreachable;
- defer zig_args.resize(zig_args_base_len) catch unreachable;
-
- const run_cmd = b.addSystemCommand(zig_args.items);
- const log_step = b.addLog("PASS {s} ({s})", .{ annotated_case_name, @tagName(optimize_mode) });
- log_step.step.dependOn(&run_cmd.step);
-
- self.step.dependOn(&log_step.step);
- }
- }
-
- pub fn addAllArgs(self: *StandaloneContext, root_src: []const u8, link_libc: bool) void {
- const b = self.b;
-
- for (self.optimize_modes) |optimize| {
- const annotated_case_name = fmt.allocPrint(self.b.allocator, "build {s} ({s})", .{
- root_src,
- @tagName(optimize),
- }) catch unreachable;
- if (self.test_filter) |filter| {
- if (mem.indexOf(u8, annotated_case_name, filter) == null) continue;
- }
-
- const exe = b.addExecutable(.{
- .name = "test",
- .root_source_file = .{ .path = root_src },
- .optimize = optimize,
- .target = .{},
- });
- if (link_libc) {
- exe.linkSystemLibrary("c");
- }
-
- const log_step = b.addLog("PASS {s}", .{annotated_case_name});
- log_step.step.dependOn(&exe.step);
-
- self.step.dependOn(&log_step.step);
- }
- }
-};
-
-pub const GenHContext = struct {
- b: *std.Build,
- step: *Step,
- test_index: usize,
- test_filter: ?[]const u8,
-
- const TestCase = struct {
- name: []const u8,
- sources: ArrayList(SourceFile),
- expected_lines: ArrayList([]const u8),
-
- const SourceFile = struct {
- filename: []const u8,
- source: []const u8,
- };
-
- pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void {
- self.sources.append(SourceFile{
- .filename = filename,
- .source = source,
- }) catch unreachable;
- }
-
- pub fn addExpectedLine(self: *TestCase, text: []const u8) void {
- self.expected_lines.append(text) catch unreachable;
- }
- };
-
- const GenHCmpOutputStep = struct {
- step: Step,
- context: *GenHContext,
- obj: *CompileStep,
- name: []const u8,
- test_index: usize,
- case: *const TestCase,
-
- pub fn create(
- context: *GenHContext,
- obj: *CompileStep,
- name: []const u8,
- case: *const TestCase,
- ) *GenHCmpOutputStep {
- const allocator = context.b.allocator;
- const ptr = allocator.create(GenHCmpOutputStep) catch unreachable;
- ptr.* = GenHCmpOutputStep{
- .step = Step.init(.Custom, "ParseCCmpOutput", allocator, make),
- .context = context,
- .obj = obj,
- .name = name,
- .test_index = context.test_index,
- .case = case,
- };
- ptr.step.dependOn(&obj.step);
- context.test_index += 1;
- return ptr;
- }
-
- fn make(step: *Step) !void {
- const self = @fieldParentPtr(GenHCmpOutputStep, "step", step);
- const b = self.context.b;
-
- std.debug.print("Test {d}/{d} {s}...", .{ self.test_index + 1, self.context.test_index, self.name });
-
- const full_h_path = self.obj.getOutputHPath();
- const actual_h = try io.readFileAlloc(b.allocator, full_h_path);
-
- for (self.case.expected_lines.items) |expected_line| {
- if (mem.indexOf(u8, actual_h, expected_line) == null) {
- std.debug.print(
- \\
- \\========= Expected this output: ================
- \\{s}
- \\========= But found: ===========================
- \\{s}
- \\
- , .{ expected_line, actual_h });
- return error.TestFailed;
- }
- }
- std.debug.print("OK\n", .{});
- }
- };
-
- pub fn create(
- self: *GenHContext,
- filename: []const u8,
- name: []const u8,
- source: []const u8,
- expected_lines: []const []const u8,
- ) *TestCase {
- const tc = self.b.allocator.create(TestCase) catch unreachable;
- tc.* = TestCase{
- .name = name,
- .sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
- .expected_lines = ArrayList([]const u8).init(self.b.allocator),
- };
-
- tc.addSourceFile(filename, source);
- var arg_i: usize = 0;
- while (arg_i < expected_lines.len) : (arg_i += 1) {
- tc.addExpectedLine(expected_lines[arg_i]);
- }
- return tc;
- }
-
- pub fn add(self: *GenHContext, name: []const u8, source: []const u8, expected_lines: []const []const u8) void {
- const tc = self.create("test.zig", name, source, expected_lines);
- self.addCase(tc);
- }
-
- pub fn addCase(self: *GenHContext, case: *const TestCase) void {
- const b = self.b;
-
- const optimize_mode = std.builtin.OptimizeMode.Debug;
- const annotated_case_name = fmt.allocPrint(self.b.allocator, "gen-h {s} ({s})", .{ case.name, @tagName(optimize_mode) }) catch unreachable;
- if (self.test_filter) |filter| {
- if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
- }
-
- const write_src = b.addWriteFiles();
- for (case.sources.items) |src_file| {
- write_src.add(src_file.filename, src_file.source);
- }
-
- const obj = b.addObjectFromWriteFileStep("test", write_src, case.sources.items[0].filename);
- obj.setBuildMode(optimize_mode);
-
- const cmp_h = GenHCmpOutputStep.create(self, obj, annotated_case_name, case);
-
- self.step.dependOn(&cmp_h.step);
- }
-};
-
-fn printInvocation(args: []const []const u8) void {
- for (args) |arg| {
- std.debug.print("{s} ", .{arg});
- }
- std.debug.print("\n", .{});
-}
-
const c_abi_targets = [_]CrossTarget{
.{},
.{
@@ -1348,42 +512,582 @@ const c_abi_targets = [_]CrossTarget{
},
};
+pub fn addCompareOutputTests(
+ b: *std.Build,
+ test_filter: ?[]const u8,
+ optimize_modes: []const OptimizeMode,
+) *Step {
+ const cases = b.allocator.create(CompareOutputContext) catch @panic("OOM");
+ cases.* = CompareOutputContext{
+ .b = b,
+ .step = b.step("test-compare-output", "Run the compare output tests"),
+ .test_index = 0,
+ .test_filter = test_filter,
+ .optimize_modes = optimize_modes,
+ };
+
+ compare_output.addCases(cases);
+
+ return cases.step;
+}
+
+pub fn addStackTraceTests(
+ b: *std.Build,
+ test_filter: ?[]const u8,
+ optimize_modes: []const OptimizeMode,
+) *Step {
+ const check_exe = b.addExecutable(.{
+ .name = "check-stack-trace",
+ .root_source_file = .{ .path = "test/src/check-stack-trace.zig" },
+ .target = .{},
+ .optimize = .Debug,
+ });
+
+ const cases = b.allocator.create(StackTracesContext) catch @panic("OOM");
+ cases.* = .{
+ .b = b,
+ .step = b.step("test-stack-traces", "Run the stack trace tests"),
+ .test_index = 0,
+ .test_filter = test_filter,
+ .optimize_modes = optimize_modes,
+ .check_exe = check_exe,
+ };
+
+ stack_traces.addCases(cases);
+
+ return cases.step;
+}
+
+pub fn addStandaloneTests(
+ b: *std.Build,
+ optimize_modes: []const OptimizeMode,
+ enable_macos_sdk: bool,
+ omit_stage2: bool,
+ enable_symlinks_windows: bool,
+) *Step {
+ const step = b.step("test-standalone", "Run the standalone tests");
+ const omit_symlinks = builtin.os.tag == .windows and !enable_symlinks_windows;
+
+ for (standalone.simple_cases) |case| {
+ for (optimize_modes) |optimize| {
+ if (!case.all_modes and optimize != .Debug) continue;
+ if (case.os_filter) |os_tag| {
+ if (os_tag != builtin.os.tag) continue;
+ }
+
+ if (case.is_exe) {
+ const exe = b.addExecutable(.{
+ .name = std.fs.path.stem(case.src_path),
+ .root_source_file = .{ .path = case.src_path },
+ .optimize = optimize,
+ .target = case.target,
+ });
+ if (case.link_libc) exe.linkLibC();
+
+ step.dependOn(&exe.step);
+ }
+
+ if (case.is_test) {
+ const exe = b.addTest(.{
+ .name = std.fs.path.stem(case.src_path),
+ .root_source_file = .{ .path = case.src_path },
+ .optimize = optimize,
+ .target = case.target,
+ });
+ if (case.link_libc) exe.linkLibC();
+
+ step.dependOn(&exe.run().step);
+ }
+ }
+ }
+
+ inline for (standalone.build_cases) |case| {
+ const requires_stage2 = @hasDecl(case.import, "requires_stage2") and
+ case.import.requires_stage2;
+ const requires_symlinks = @hasDecl(case.import, "requires_symlinks") and
+ case.import.requires_symlinks;
+ const requires_macos_sdk = @hasDecl(case.import, "requires_macos_sdk") and
+ case.import.requires_macos_sdk;
+ const bad =
+ (requires_stage2 and omit_stage2) or
+ (requires_symlinks and omit_symlinks) or
+ (requires_macos_sdk and !enable_macos_sdk);
+ if (!bad) {
+ const dep = b.anonymousDependency(case.build_root, case.import, .{});
+ const dep_step = dep.builder.default_step;
+ assert(mem.startsWith(u8, dep.builder.dep_prefix, "test."));
+ const dep_prefix_adjusted = dep.builder.dep_prefix["test.".len..];
+ dep_step.name = b.fmt("{s}{s}", .{ dep_prefix_adjusted, dep_step.name });
+ step.dependOn(dep_step);
+ }
+ }
+
+ return step;
+}
+
+pub fn addLinkTests(
+ b: *std.Build,
+ enable_macos_sdk: bool,
+ omit_stage2: bool,
+ enable_symlinks_windows: bool,
+) *Step {
+ const step = b.step("test-link", "Run the linker tests");
+ const omit_symlinks = builtin.os.tag == .windows and !enable_symlinks_windows;
+
+ inline for (link.cases) |case| {
+ const requires_stage2 = @hasDecl(case.import, "requires_stage2") and
+ case.import.requires_stage2;
+ const requires_symlinks = @hasDecl(case.import, "requires_symlinks") and
+ case.import.requires_symlinks;
+ const requires_macos_sdk = @hasDecl(case.import, "requires_macos_sdk") and
+ case.import.requires_macos_sdk;
+ const bad =
+ (requires_stage2 and omit_stage2) or
+ (requires_symlinks and omit_symlinks) or
+ (requires_macos_sdk and !enable_macos_sdk);
+ if (!bad) {
+ const dep = b.anonymousDependency(case.build_root, case.import, .{});
+ const dep_step = dep.builder.default_step;
+ assert(mem.startsWith(u8, dep.builder.dep_prefix, "test."));
+ const dep_prefix_adjusted = dep.builder.dep_prefix["test.".len..];
+ dep_step.name = b.fmt("{s}{s}", .{ dep_prefix_adjusted, dep_step.name });
+ step.dependOn(dep_step);
+ }
+ }
+
+ return step;
+}
+
+pub fn addCliTests(b: *std.Build) *Step {
+ const step = b.step("test-cli", "Test the command line interface");
+ const s = std.fs.path.sep_str;
+
+ {
+
+ // Test `zig init-lib`.
+ const tmp_path = b.makeTempPath();
+ const init_lib = b.addSystemCommand(&.{ b.zig_exe, "init-lib" });
+ init_lib.cwd = tmp_path;
+ init_lib.setName("zig init-lib");
+ init_lib.expectStdOutEqual("");
+ init_lib.expectStdErrEqual("info: Created build.zig\n" ++
+ "info: Created src" ++ s ++ "main.zig\n" ++
+ "info: Next, try `zig build --help` or `zig build test`\n");
+
+ const run_test = b.addSystemCommand(&.{ b.zig_exe, "build", "test" });
+ run_test.cwd = tmp_path;
+ run_test.setName("zig build test");
+ run_test.expectStdOutEqual("");
+ run_test.step.dependOn(&init_lib.step);
+
+ const cleanup = b.addRemoveDirTree(tmp_path);
+ cleanup.step.dependOn(&run_test.step);
+
+ step.dependOn(&cleanup.step);
+ }
+
+ {
+ // Test `zig init-exe`.
+ const tmp_path = b.makeTempPath();
+ const init_exe = b.addSystemCommand(&.{ b.zig_exe, "init-exe" });
+ init_exe.cwd = tmp_path;
+ init_exe.setName("zig init-exe");
+ init_exe.expectStdOutEqual("");
+ init_exe.expectStdErrEqual("info: Created build.zig\n" ++
+ "info: Created src" ++ s ++ "main.zig\n" ++
+ "info: Next, try `zig build --help` or `zig build run`\n");
+
+ // Test missing output path.
+ const bad_out_arg = "-femit-bin=does" ++ s ++ "not" ++ s ++ "exist" ++ s ++ "foo.exe";
+ const ok_src_arg = "src" ++ s ++ "main.zig";
+ const expected = "error: unable to open output directory 'does" ++ s ++ "not" ++ s ++ "exist': FileNotFound\n";
+ const run_bad = b.addSystemCommand(&.{ b.zig_exe, "build-exe", ok_src_arg, bad_out_arg });
+ run_bad.setName("zig build-exe error message for bad -femit-bin arg");
+ run_bad.expectExitCode(1);
+ run_bad.expectStdErrEqual(expected);
+ run_bad.expectStdOutEqual("");
+ run_bad.step.dependOn(&init_exe.step);
+
+ const run_test = b.addSystemCommand(&.{ b.zig_exe, "build", "test" });
+ run_test.cwd = tmp_path;
+ run_test.setName("zig build test");
+ run_test.expectStdOutEqual("");
+ run_test.step.dependOn(&init_exe.step);
+
+ const run_run = b.addSystemCommand(&.{ b.zig_exe, "build", "run" });
+ run_run.cwd = tmp_path;
+ run_run.setName("zig build run");
+ run_run.expectStdOutEqual("Run `zig build test` to run the tests.\n");
+ run_run.expectStdErrEqual("All your codebase are belong to us.\n");
+ run_run.step.dependOn(&init_exe.step);
+
+ const cleanup = b.addRemoveDirTree(tmp_path);
+ cleanup.step.dependOn(&run_test.step);
+ cleanup.step.dependOn(&run_run.step);
+ cleanup.step.dependOn(&run_bad.step);
+
+ step.dependOn(&cleanup.step);
+ }
+
+ // Test Godbolt API
+ if (builtin.os.tag == .linux and builtin.cpu.arch == .x86_64) {
+ const tmp_path = b.makeTempPath();
+
+ const writefile = b.addWriteFile("example.zig",
+ \\// Type your code here, or load an example.
+ \\export fn square(num: i32) i32 {
+ \\ return num * num;
+ \\}
+ \\extern fn zig_panic() noreturn;
+ \\pub fn panic(msg: []const u8, error_return_trace: ?*@import("std").builtin.StackTrace, _: ?usize) noreturn {
+ \\ _ = msg;
+ \\ _ = error_return_trace;
+ \\ zig_panic();
+ \\}
+ );
+
+ // This is intended to be the exact CLI usage used by godbolt.org.
+ const run = b.addSystemCommand(&.{
+ b.zig_exe, "build-obj",
+ "--cache-dir", tmp_path,
+ "--name", "example",
+ "-fno-emit-bin", "-fno-emit-h",
+ "-fstrip", "-OReleaseFast",
+ });
+ run.addFileSourceArg(writefile.getFileSource("example.zig").?);
+ const example_s = run.addPrefixedOutputFileArg("-femit-asm=", "example.s");
+
+ const checkfile = b.addCheckFile(example_s, .{
+ .expected_matches = &.{
+ "square:",
+ "mov\teax, edi",
+ "imul\teax, edi",
+ },
+ });
+ checkfile.setName("check godbolt.org CLI usage generating valid asm");
+
+ const cleanup = b.addRemoveDirTree(tmp_path);
+ cleanup.step.dependOn(&checkfile.step);
+
+ step.dependOn(&cleanup.step);
+ }
+
+ {
+ // Test `zig fmt`.
+ // This test must use a temporary directory rather than a cache
+ // directory because this test will be mutating the files. The cache
+ // system relies on cache directories being mutated only by their
+ // owners.
+ const tmp_path = b.makeTempPath();
+ const unformatted_code = " // no reason for indent";
+
+ var dir = std.fs.cwd().openDir(tmp_path, .{}) catch @panic("unhandled");
+ defer dir.close();
+ dir.writeFile("fmt1.zig", unformatted_code) catch @panic("unhandled");
+ dir.writeFile("fmt2.zig", unformatted_code) catch @panic("unhandled");
+
+ // Test zig fmt affecting only the appropriate files.
+ const run1 = b.addSystemCommand(&.{ b.zig_exe, "fmt", "fmt1.zig" });
+ run1.setName("run zig fmt one file");
+ run1.cwd = tmp_path;
+ run1.has_side_effects = true;
+ // stdout should be file path + \n
+ run1.expectStdOutEqual("fmt1.zig\n");
+
+ // running it on the dir, only the new file should be changed
+ const run2 = b.addSystemCommand(&.{ b.zig_exe, "fmt", "." });
+ run2.setName("run zig fmt the directory");
+ run2.cwd = tmp_path;
+ run2.has_side_effects = true;
+ run2.expectStdOutEqual("." ++ s ++ "fmt2.zig\n");
+ run2.step.dependOn(&run1.step);
+
+ // both files have been formatted, nothing should change now
+ const run3 = b.addSystemCommand(&.{ b.zig_exe, "fmt", "." });
+ run3.setName("run zig fmt with nothing to do");
+ run3.cwd = tmp_path;
+ run3.has_side_effects = true;
+ run3.expectStdOutEqual("");
+ run3.step.dependOn(&run2.step);
+
+ const unformatted_code_utf16 = "\xff\xfe \x00 \x00 \x00 \x00/\x00/\x00 \x00n\x00o\x00 \x00r\x00e\x00a\x00s\x00o\x00n\x00";
+ const fmt4_path = std.fs.path.join(b.allocator, &.{ tmp_path, "fmt4.zig" }) catch @panic("OOM");
+ const write4 = b.addWriteFiles();
+ write4.addBytesToSource(unformatted_code_utf16, fmt4_path);
+ write4.step.dependOn(&run3.step);
+
+ // Test `zig fmt` handling UTF-16 decoding.
+ const run4 = b.addSystemCommand(&.{ b.zig_exe, "fmt", "." });
+ run4.setName("run zig fmt convert UTF-16 to UTF-8");
+ run4.cwd = tmp_path;
+ run4.has_side_effects = true;
+ run4.expectStdOutEqual("." ++ s ++ "fmt4.zig\n");
+ run4.step.dependOn(&write4.step);
+
+ // TODO change this to an exact match
+ const check4 = b.addCheckFile(.{ .path = fmt4_path }, .{
+ .expected_matches = &.{
+ "// no reason",
+ },
+ });
+ check4.step.dependOn(&run4.step);
+
+ const cleanup = b.addRemoveDirTree(tmp_path);
+ cleanup.step.dependOn(&check4.step);
+
+ step.dependOn(&cleanup.step);
+ }
+
+ {
+ // TODO this should move to become a CLI test rather than standalone
+ // cases.addBuildFile("test/standalone/options/build.zig", .{
+ // .extra_argv = &.{
+ // "-Dbool_true",
+ // "-Dbool_false=false",
+ // "-Dint=1234",
+ // "-De=two",
+ // "-Dstring=hello",
+ // },
+ // });
+ }
+
+ return step;
+}
+
+pub fn addAssembleAndLinkTests(b: *std.Build, test_filter: ?[]const u8, optimize_modes: []const OptimizeMode) *Step {
+ const cases = b.allocator.create(CompareOutputContext) catch @panic("OOM");
+ cases.* = CompareOutputContext{
+ .b = b,
+ .step = b.step("test-asm-link", "Run the assemble and link tests"),
+ .test_index = 0,
+ .test_filter = test_filter,
+ .optimize_modes = optimize_modes,
+ };
+
+ assemble_and_link.addCases(cases);
+
+ return cases.step;
+}
+
+pub fn addTranslateCTests(b: *std.Build, test_filter: ?[]const u8) *Step {
+ const cases = b.allocator.create(TranslateCContext) catch @panic("OOM");
+ cases.* = TranslateCContext{
+ .b = b,
+ .step = b.step("test-translate-c", "Run the C translation tests"),
+ .test_index = 0,
+ .test_filter = test_filter,
+ };
+
+ translate_c.addCases(cases);
+
+ return cases.step;
+}
+
+pub fn addRunTranslatedCTests(
+ b: *std.Build,
+ test_filter: ?[]const u8,
+ target: std.zig.CrossTarget,
+) *Step {
+ const cases = b.allocator.create(RunTranslatedCContext) catch @panic("OOM");
+ cases.* = .{
+ .b = b,
+ .step = b.step("test-run-translated-c", "Run the Run-Translated-C tests"),
+ .test_index = 0,
+ .test_filter = test_filter,
+ .target = target,
+ };
+
+ run_translated_c.addCases(cases);
+
+ return cases.step;
+}
+
+const ModuleTestOptions = struct {
+ test_filter: ?[]const u8,
+ root_src: []const u8,
+ name: []const u8,
+ desc: []const u8,
+ optimize_modes: []const OptimizeMode,
+ skip_single_threaded: bool,
+ skip_non_native: bool,
+ skip_libc: bool,
+ skip_stage1: bool,
+ skip_stage2: bool,
+ max_rss: usize = 0,
+};
+
+pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step {
+ const step = b.step(b.fmt("test-{s}", .{options.name}), options.desc);
+
+ for (test_targets) |test_target| {
+ if (options.skip_non_native and !test_target.target.isNative())
+ continue;
+
+ if (options.skip_libc and test_target.link_libc)
+ continue;
+
+ if (test_target.link_libc and test_target.target.getOs().requiresLibC()) {
+ // This would be a redundant test.
+ continue;
+ }
+
+ if (options.skip_single_threaded and test_target.single_threaded)
+ continue;
+
+ if (test_target.disable_native and
+ test_target.target.getOsTag() == builtin.os.tag and
+ test_target.target.getCpuArch() == builtin.cpu.arch)
+ {
+ continue;
+ }
+
+ if (test_target.backend) |backend| switch (backend) {
+ .stage1 => if (options.skip_stage1) continue,
+ .stage2_llvm => {},
+ else => if (options.skip_stage2) continue,
+ };
+
+ const want_this_mode = for (options.optimize_modes) |m| {
+ if (m == test_target.optimize_mode) break true;
+ } else false;
+ if (!want_this_mode) continue;
+
+ const libc_prefix = if (test_target.target.getOs().requiresLibC())
+ ""
+ else if (test_target.link_libc)
+ "c"
+ else
+ "bare";
+
+ const triple_prefix = test_target.target.zigTriple(b.allocator) catch @panic("OOM");
+
+ // wasm32-wasi builds need more RAM, idk why
+ const max_rss = if (test_target.target.getOs().tag == .wasi)
+ options.max_rss * 2
+ else
+ options.max_rss;
+
+ const these_tests = b.addTest(.{
+ .root_source_file = .{ .path = options.root_src },
+ .optimize = test_target.optimize_mode,
+ .target = test_target.target,
+ .max_rss = max_rss,
+ });
+ const single_threaded_txt = if (test_target.single_threaded) "single" else "multi";
+ const backend_txt = if (test_target.backend) |backend| @tagName(backend) else "default";
+ these_tests.single_threaded = test_target.single_threaded;
+ these_tests.setFilter(options.test_filter);
+ if (test_target.link_libc) {
+ these_tests.linkSystemLibrary("c");
+ }
+ these_tests.overrideZigLibDir("lib");
+ these_tests.addIncludePath("test");
+ if (test_target.backend) |backend| switch (backend) {
+ .stage1 => {
+ @panic("stage1 testing requested");
+ },
+ .stage2_llvm => {
+ these_tests.use_llvm = true;
+ },
+ .stage2_c => {
+ these_tests.use_llvm = false;
+ },
+ else => {
+ these_tests.use_llvm = false;
+ // TODO: force self-hosted linkers to avoid LLD creeping in
+ // until the auto-select mechanism deems them worthy
+ these_tests.use_lld = false;
+ },
+ };
+
+ const run = these_tests.run();
+ run.skip_foreign_checks = true;
+ run.setName(b.fmt("run test {s}-{s}-{s}-{s}-{s}-{s}", .{
+ options.name,
+ triple_prefix,
+ @tagName(test_target.optimize_mode),
+ libc_prefix,
+ single_threaded_txt,
+ backend_txt,
+ }));
+
+ step.dependOn(&run.step);
+ }
+ return step;
+}
+
pub fn addCAbiTests(b: *std.Build, skip_non_native: bool, skip_release: bool) *Step {
const step = b.step("test-c-abi", "Run the C ABI tests");
const optimize_modes: [2]OptimizeMode = .{ .Debug, .ReleaseFast };
- for (optimize_modes[0 .. @as(u8, 1) + @boolToInt(!skip_release)]) |optimize_mode| for (c_abi_targets) |c_abi_target| {
- if (skip_non_native and !c_abi_target.isNative())
- continue;
+ for (optimize_modes) |optimize_mode| {
+ if (optimize_mode != .Debug and skip_release) continue;
- const test_step = b.addTest(.{
- .root_source_file = .{ .path = "test/c_abi/main.zig" },
- .optimize = optimize_mode,
- .target = c_abi_target,
- });
- if (c_abi_target.abi != null and c_abi_target.abi.?.isMusl()) {
- // TODO NativeTargetInfo insists on dynamically linking musl
- // for some reason?
- test_step.target_info.dynamic_linker.max_byte = null;
+ for (c_abi_targets) |c_abi_target| {
+ if (skip_non_native and !c_abi_target.isNative()) continue;
+
+ if (c_abi_target.isWindows() and c_abi_target.getCpuArch() == .aarch64) {
+ // https://github.com/ziglang/zig/issues/14908
+ continue;
+ }
+
+ const test_step = b.addTest(.{
+ .root_source_file = .{ .path = "test/c_abi/main.zig" },
+ .optimize = optimize_mode,
+ .target = c_abi_target,
+ });
+ if (c_abi_target.abi != null and c_abi_target.abi.?.isMusl()) {
+ // TODO NativeTargetInfo insists on dynamically linking musl
+ // for some reason?
+ test_step.target_info.dynamic_linker.max_byte = null;
+ }
+ test_step.linkLibC();
+ test_step.addCSourceFile("test/c_abi/cfuncs.c", &.{"-std=c99"});
+
+ // test-c-abi should test both with LTO on and with LTO off. Only
+ // some combinations are passing currently:
+ // https://github.com/ziglang/zig/issues/14908
+ if (c_abi_target.isWindows()) {
+ test_step.want_lto = false;
+ }
+
+ const triple_prefix = c_abi_target.zigTriple(b.allocator) catch @panic("OOM");
+ test_step.setName(b.fmt("test-c-abi-{s}-{s} ", .{
+ triple_prefix, @tagName(optimize_mode),
+ }));
+
+ const run = test_step.run();
+ run.skip_foreign_checks = true;
+ step.dependOn(&run.step);
}
- test_step.linkLibC();
- test_step.addCSourceFile("test/c_abi/cfuncs.c", &.{"-std=c99"});
-
- if (c_abi_target.isWindows() and (c_abi_target.getCpuArch() == .x86 or builtin.target.os.tag == .linux)) {
- // LTO currently incorrectly strips stdcall name-mangled functions
- // LLD crashes in LTO here when cross compiling for windows on linux
- test_step.want_lto = false;
- }
-
- const triple_prefix = c_abi_target.zigTriple(b.allocator) catch unreachable;
- test_step.setNamePrefix(b.fmt("{s}-{s}-{s} ", .{
- "test-c-abi",
- triple_prefix,
- @tagName(optimize_mode),
- }));
-
- step.dependOn(&test_step.step);
- };
+ }
return step;
}
+
+pub fn addCases(
+ b: *std.Build,
+ parent_step: *Step,
+ opt_test_filter: ?[]const u8,
+ check_case_exe: *std.Build.CompileStep,
+) !void {
+ const arena = b.allocator;
+ const gpa = b.allocator;
+
+ var cases = @import("src/Cases.zig").init(gpa, arena);
+
+ var dir = try b.build_root.handle.openIterableDir("test/cases", .{});
+ defer dir.close();
+
+ cases.addFromDir(dir);
+ try @import("cases.zig").addCases(&cases);
+
+ const cases_dir_path = try b.build_root.join(b.allocator, &.{ "test", "cases" });
+ cases.lowerToBuildSteps(
+ b,
+ parent_step,
+ opt_test_filter,
+ cases_dir_path,
+ check_case_exe,
+ );
+}
diff --git a/test/translate_c.zig b/test/translate_c.zig
index a2cad37c08..f4cd374a06 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -3948,4 +3948,12 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\}
});
}
+
+ cases.add("extern array of unknown length",
+ \\extern int foo[];
+ , &[_][]const u8{
+ \\const foo: [*c]c_int = @extern([*c]c_int, .{
+ \\ .name = "foo",
+ \\});
+ });
}
diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py
index 3013fbb43e..b72b6f9760 100644
--- a/tools/lldb_pretty_printers.py
+++ b/tools/lldb_pretty_printers.py
@@ -1,6 +1,6 @@
# pretty printing for the zig language, zig standard library, and zig stage 2 compiler.
# put commands in ~/.lldbinit to run them automatically when starting lldb
-# `command script /path/to/stage2_lldb_pretty_printers.py` to import this file
+# `command script import /path/to/zig/tools/lldb_pretty_printers.py` to import this file
# `type category enable zig` to enable pretty printing for the zig language
# `type category enable zig.std` to enable pretty printing for the zig standard library
# `type category enable zig.stage2` to enable pretty printing for the zig stage 2 compiler
@@ -201,7 +201,7 @@ class std_MultiArrayList_SynthProvider:
value_type = self.value.type
for helper in self.value.target.FindFunctions('%s.dbHelper' % value_type.name, lldb.eFunctionNameTypeFull):
- ptr_self_type, ptr_child_type, ptr_entry_type = helper.function.type.GetFunctionArgumentTypes()
+ ptr_self_type, ptr_child_type, ptr_field_type, ptr_entry_type = helper.function.type.GetFunctionArgumentTypes()
if ptr_self_type.GetPointeeType() == value_type: break
else: return
@@ -221,12 +221,44 @@ class std_MultiArrayList_SynthProvider:
offset = 0
data = lldb.SBData()
for field in self.entry_type.fields:
- ptr_field_type = field.type
- field_size = ptr_field_type.GetPointeeType().size
- data.Append(self.bytes.CreateChildAtOffset(field.name, offset + index * field_size, ptr_field_type).address_of.data)
+ field_type = field.type.GetPointeeType()
+ field_size = field_type.size
+ data.Append(self.bytes.CreateChildAtOffset(field.name, offset + index * field_size, field_type).address_of.data)
offset += self.capacity * field_size
return self.bytes.CreateValueFromData('[%d]' % index, data, self.entry_type)
except: return None
+class std_MultiArrayList_Slice_SynthProvider:
+ def __init__(self, value, _=None): self.value = value
+ def update(self):
+ try:
+ self.len = 0
+
+ value_type = self.value.type
+ for helper in self.value.target.FindFunctions('%s.dbHelper' % value_type.name, lldb.eFunctionNameTypeFull):
+ ptr_self_type, ptr_child_type, ptr_field_type, ptr_entry_type = helper.function.type.GetFunctionArgumentTypes()
+ if ptr_self_type.GetPointeeType() == value_type: break
+ else: return
+
+ self.fields = {member.name: index for index, member in enumerate(ptr_field_type.GetPointeeType().enum_members)}
+ self.entry_type = ptr_entry_type.GetPointeeType()
+ self.ptrs = self.value.GetChildMemberWithName('ptrs')
+ self.len = self.value.GetChildMemberWithName('len').unsigned
+ self.capacity = self.value.GetChildMemberWithName('capacity').unsigned
+ except: pass
+ def has_children(self): return True
+ def num_children(self): return self.len
+ def get_child_index(self, name):
+ try: return int(name.removeprefix('[').removesuffix(']'))
+ except: return -1
+ def get_child_at_index(self, index):
+ try:
+ if index < 0 or index >= self.len: return None
+ data = lldb.SBData()
+ for field in self.entry_type.fields:
+ field_type = field.type.GetPointeeType()
+ data.Append(self.ptrs.child[self.fields[field.name.removesuffix('_ptr')]].CreateChildAtOffset(field.name, index * field_type.size, field_type).address_of.data)
+ return self.ptrs.CreateValueFromData('[%d]' % index, data, self.entry_type)
+ except: return None
class std_HashMapUnmanaged_SynthProvider:
def __init__(self, value, _=None): self.value = value
@@ -556,6 +588,7 @@ def __lldb_init_module(debugger, _=None):
add(debugger, category='zig.std', type='mem.Allocator', summary='${var.ptr}')
add(debugger, category='zig.std', regex=True, type='^segmented_list\\.SegmentedList\\(.*\\)$', identifier='std_SegmentedList', synth=True, expand=True, summary='len=${var.len}')
add(debugger, category='zig.std', regex=True, type='^multi_array_list\\.MultiArrayList\\(.*\\)$', identifier='std_MultiArrayList', synth=True, expand=True, summary='len=${var.len} capacity=${var.capacity}')
+ add(debugger, category='zig.std', regex=True, type='^multi_array_list\\.MultiArrayList\\(.*\\)\\.Slice$', identifier='std_MultiArrayList_Slice', synth=True, expand=True, summary='len=${var.len} capacity=${var.capacity}')
add(debugger, category='zig.std', regex=True, type=MultiArrayList_Entry('.*'), identifier='std_Entry', synth=True, inline_children=True, summary=True)
add(debugger, category='zig.std', regex=True, type='^hash_map\\.HashMapUnmanaged\\(.*\\)$', identifier='std_HashMapUnmanaged', synth=True, expand=True, summary=True)
add(debugger, category='zig.std', regex=True, type='^hash_map\\.HashMapUnmanaged\\(.*\\)\\.Entry$', identifier = 'std_Entry', synth=True, inline_children=True, summary=True)