Merge remote-tracking branch 'origin/master' into llvm13

This commit is contained in:
Andrew Kelley 2021-09-30 21:38:04 -07:00
commit 3eb729b442
195 changed files with 30719 additions and 10501 deletions

View file

@ -1,7 +1,7 @@
image: freebsd/latest
secrets:
- 51bfddf5-86a6-4e01-8576-358c72a4a0a4
- 5cfede76-914e-4071-893e-e5e2e6ae3cea
- 512ed797-0927-475a-83fd-bc997792860c
sources:
- https://github.com/ziglang/zig
tasks:

View file

@ -1,7 +1,7 @@
image: netbsd/latest
secrets:
- 51bfddf5-86a6-4e01-8576-358c72a4a0a4
- 5cfede76-914e-4071-893e-e5e2e6ae3cea
- 512ed797-0927-475a-83fd-bc997792860c
sources:
- https://github.com/ziglang/zig
tasks:

View file

@ -537,41 +537,43 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/zig/system.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/system/x86.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/tokenizer.zig"
"${CMAKE_SOURCE_DIR}/src/Air.zig"
"${CMAKE_SOURCE_DIR}/src/AstGen.zig"
"${CMAKE_SOURCE_DIR}/src/Cache.zig"
"${CMAKE_SOURCE_DIR}/src/Compilation.zig"
"${CMAKE_SOURCE_DIR}/src/DepTokenizer.zig"
"${CMAKE_SOURCE_DIR}/src/Liveness.zig"
"${CMAKE_SOURCE_DIR}/src/Module.zig"
"${CMAKE_SOURCE_DIR}/src/Package.zig"
"${CMAKE_SOURCE_DIR}/src/RangeSet.zig"
"${CMAKE_SOURCE_DIR}/src/Sema.zig"
"${CMAKE_SOURCE_DIR}/src/ThreadPool.zig"
"${CMAKE_SOURCE_DIR}/src/TypedValue.zig"
"${CMAKE_SOURCE_DIR}/src/WaitGroup.zig"
"${CMAKE_SOURCE_DIR}/src/AstGen.zig"
"${CMAKE_SOURCE_DIR}/src/Zir.zig"
"${CMAKE_SOURCE_DIR}/src/arch/aarch64/bits.zig"
"${CMAKE_SOURCE_DIR}/src/arch/arm/bits.zig"
"${CMAKE_SOURCE_DIR}/src/arch/riscv64/bits.zig"
"${CMAKE_SOURCE_DIR}/src/arch/x86_64/bits.zig"
"${CMAKE_SOURCE_DIR}/src/clang.zig"
"${CMAKE_SOURCE_DIR}/src/clang_options.zig"
"${CMAKE_SOURCE_DIR}/src/clang_options_data.zig"
"${CMAKE_SOURCE_DIR}/src/codegen.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/aarch64.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/arm.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/c.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/llvm.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/llvm/bindings.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/riscv64.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/wasm.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/x86_64.zig"
"${CMAKE_SOURCE_DIR}/src/glibc.zig"
"${CMAKE_SOURCE_DIR}/src/introspect.zig"
"${CMAKE_SOURCE_DIR}/src/Air.zig"
"${CMAKE_SOURCE_DIR}/src/libc_installation.zig"
"${CMAKE_SOURCE_DIR}/src/libcxx.zig"
"${CMAKE_SOURCE_DIR}/src/libtsan.zig"
"${CMAKE_SOURCE_DIR}/src/libunwind.zig"
"${CMAKE_SOURCE_DIR}/src/link.zig"
"${CMAKE_SOURCE_DIR}/src/link/C.zig"
"${CMAKE_SOURCE_DIR}/src/link/C/zig.h"
"${CMAKE_SOURCE_DIR}/src/link/Coff.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf.zig"
"${CMAKE_SOURCE_DIR}/src/link/Plan9.zig"
"${CMAKE_SOURCE_DIR}/src/link/Plan9/aout.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/Archive.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/Atom.zig"
@ -582,20 +584,22 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/link/MachO/Trie.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/bind.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/commands.zig"
"${CMAKE_SOURCE_DIR}/src/link/Plan9.zig"
"${CMAKE_SOURCE_DIR}/src/link/Plan9/aout.zig"
"${CMAKE_SOURCE_DIR}/src/link/Wasm.zig"
"${CMAKE_SOURCE_DIR}/src/link/msdos-stub.bin"
"${CMAKE_SOURCE_DIR}/src/link/tapi.zig"
"${CMAKE_SOURCE_DIR}/src/link/tapi/Tokenizer.zig"
"${CMAKE_SOURCE_DIR}/src/link/tapi/parse.zig"
"${CMAKE_SOURCE_DIR}/src/link/tapi/parse/test.zig"
"${CMAKE_SOURCE_DIR}/src/link/tapi/Tokenizer.zig"
"${CMAKE_SOURCE_DIR}/src/link/tapi/yaml.zig"
"${CMAKE_SOURCE_DIR}/src/link/C/zig.h"
"${CMAKE_SOURCE_DIR}/src/link/msdos-stub.bin"
"${CMAKE_SOURCE_DIR}/src/Liveness.zig"
"${CMAKE_SOURCE_DIR}/src/main.zig"
"${CMAKE_SOURCE_DIR}/src/mingw.zig"
"${CMAKE_SOURCE_DIR}/src/musl.zig"
"${CMAKE_SOURCE_DIR}/src/print_air.zig"
"${CMAKE_SOURCE_DIR}/src/print_env.zig"
"${CMAKE_SOURCE_DIR}/src/print_targets.zig"
"${CMAKE_SOURCE_DIR}/src/print_zir.zig"
"${CMAKE_SOURCE_DIR}/src/stage1.zig"
"${CMAKE_SOURCE_DIR}/src/target.zig"
"${CMAKE_SOURCE_DIR}/src/tracy.zig"
@ -605,8 +609,6 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/value.zig"
"${CMAKE_SOURCE_DIR}/src/wasi_libc.zig"
"${CMAKE_SOURCE_DIR}/src/windows_sdk.zig"
"${CMAKE_SOURCE_DIR}/src/Zir.zig"
"${CMAKE_SOURCE_DIR}/src/Sema.zig"
)
if(MSVC)

View file

@ -18,6 +18,7 @@ pub fn build(b: *Builder) !void {
const mode = b.standardReleaseOptions();
const target = b.standardTargetOptions(.{});
const single_threaded = b.option(bool, "single-threaded", "Build artifacts that run in single threaded mode") orelse false;
const use_zig_libcxx = b.option(bool, "use-zig-libcxx", "If libc++ is needed, use zig's bundled version, don't try to integrate with the system") orelse false;
var docgen_exe = b.addExecutable("docgen", "doc/docgen.zig");
docgen_exe.single_threaded = single_threaded;
@ -125,10 +126,18 @@ pub fn build(b: *Builder) !void {
exe.install();
exe.setBuildMode(mode);
exe.setTarget(target);
toolchain_step.dependOn(&exe.step);
if (!skip_stage2_tests) {
toolchain_step.dependOn(&exe.step);
}
b.default_step.dependOn(&exe.step);
exe.single_threaded = single_threaded;
if (target.isWindows() and target.getAbi() == .gnu) {
// LTO is currently broken on mingw, this can be removed when it's fixed.
exe.want_lto = false;
test_stage2.want_lto = false;
}
const exe_options = b.addOptions();
exe.addOptions("build_options", exe_options);
@ -182,8 +191,8 @@ pub fn build(b: *Builder) !void {
b.addSearchPrefix(cfg.cmake_prefix_path);
}
try addCmakeCfgOptionsToExe(b, cfg, exe);
try addCmakeCfgOptionsToExe(b, cfg, test_stage2);
try addCmakeCfgOptionsToExe(b, cfg, exe, use_zig_libcxx);
try addCmakeCfgOptionsToExe(b, cfg, test_stage2, use_zig_libcxx);
} else {
// Here we are -Denable-llvm but no cmake integration.
try addStaticLlvmOptionsToExe(exe);
@ -260,12 +269,24 @@ pub fn build(b: *Builder) !void {
b.allocator,
&[_][]const u8{ tracy_path, "TracyClient.cpp" },
) catch unreachable;
// On mingw, we need to opt into windows 7+ to get some features required by tracy.
const tracy_c_flags: []const []const u8 = if (target.isWindows() and target.getAbi() == .gnu)
&[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined", "-D_WIN32_WINNT=0x601" }
else
&[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" };
exe.addIncludeDir(tracy_path);
exe.addCSourceFile(client_cpp, &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" });
exe.addCSourceFile(client_cpp, tracy_c_flags);
if (!enable_llvm) {
exe.linkSystemLibraryName("c++");
}
exe.linkLibC();
if (target.isWindows()) {
exe.linkSystemLibrary("dbghelp");
exe.linkSystemLibrary("ws2_32");
}
}
const test_filter = b.option([]const u8, "test-filter", "Skip tests that do not match filter");
@ -434,6 +455,7 @@ fn addCmakeCfgOptionsToExe(
b: *Builder,
cfg: CMakeConfig,
exe: *std.build.LibExeObjStep,
use_zig_libcxx: bool,
) !void {
exe.addObjectFile(fs.path.join(b.allocator, &[_][]const u8{
cfg.cmake_binary_dir,
@ -446,28 +468,32 @@ fn addCmakeCfgOptionsToExe(
addCMakeLibraryList(exe, cfg.lld_libraries);
addCMakeLibraryList(exe, cfg.llvm_libraries);
const need_cpp_includes = true;
if (use_zig_libcxx) {
exe.linkLibCpp();
} else {
const need_cpp_includes = true;
// System -lc++ must be used because in this code path we are attempting to link
// against system-provided LLVM, Clang, LLD.
if (exe.target.getOsTag() == .linux) {
// First we try to static link against gcc libstdc++. If that doesn't work,
// we fall back to -lc++ and cross our fingers.
addCxxKnownPath(b, cfg, exe, "libstdc++.a", "", need_cpp_includes) catch |err| switch (err) {
error.RequiredLibraryNotFound => {
exe.linkSystemLibrary("c++");
},
else => |e| return e,
};
exe.linkSystemLibrary("unwind");
} else if (exe.target.isFreeBSD()) {
try addCxxKnownPath(b, cfg, exe, "libc++.a", null, need_cpp_includes);
exe.linkSystemLibrary("pthread");
} else if (exe.target.getOsTag() == .openbsd) {
try addCxxKnownPath(b, cfg, exe, "libc++.a", null, need_cpp_includes);
try addCxxKnownPath(b, cfg, exe, "libc++abi.a", null, need_cpp_includes);
} else if (exe.target.isDarwin()) {
exe.linkSystemLibrary("c++");
// System -lc++ must be used because in this code path we are attempting to link
// against system-provided LLVM, Clang, LLD.
if (exe.target.getOsTag() == .linux) {
// First we try to static link against gcc libstdc++. If that doesn't work,
// we fall back to -lc++ and cross our fingers.
addCxxKnownPath(b, cfg, exe, "libstdc++.a", "", need_cpp_includes) catch |err| switch (err) {
error.RequiredLibraryNotFound => {
exe.linkSystemLibrary("c++");
},
else => |e| return e,
};
exe.linkSystemLibrary("unwind");
} else if (exe.target.isFreeBSD()) {
try addCxxKnownPath(b, cfg, exe, "libc++.a", null, need_cpp_includes);
exe.linkSystemLibrary("pthread");
} else if (exe.target.getOsTag() == .openbsd) {
try addCxxKnownPath(b, cfg, exe, "libc++.a", null, need_cpp_includes);
try addCxxKnownPath(b, cfg, exe, "libc++abi.a", null, need_cpp_includes);
} else if (exe.target.isDarwin()) {
exe.linkSystemLibrary("c++");
}
}
if (cfg.dia_guids_lib.len != 0) {
@ -504,6 +530,7 @@ fn addStaticLlvmOptionsToExe(
if (exe.target.getOs().tag == .windows) {
exe.linkSystemLibrary("version");
exe.linkSystemLibrary("uuid");
exe.linkSystemLibrary("ole32");
}
}

View file

@ -20,7 +20,7 @@ cd $HOME
wget -nv "https://ziglang.org/deps/$CACHE_BASENAME.tar.xz"
tar xf "$CACHE_BASENAME.tar.xz"
QEMUBASE="qemu-linux-x86_64-5.2.0.1"
QEMUBASE="qemu-linux-x86_64-6.1.0.1"
wget -nv "https://ziglang.org/deps/$QEMUBASE.tar.xz"
tar xf "$QEMUBASE.tar.xz"
export PATH="$(pwd)/$QEMUBASE/bin:$PATH"
@ -71,9 +71,22 @@ make $JOBS install
release/bin/zig test ../test/behavior.zig -fno-stage1 -fLLVM -I ../test
release/bin/zig build test-toolchain -Denable-qemu -Denable-wasmtime
release/bin/zig build test-std -Denable-qemu -Denable-wasmtime
release/bin/zig build docs -Denable-qemu -Denable-wasmtime
release/bin/zig build test-behavior -Denable-qemu -Denable-wasmtime
release/bin/zig build test-compiler-rt -Denable-qemu -Denable-wasmtime
release/bin/zig build test-std -Denable-qemu -Denable-wasmtime
release/bin/zig build test-minilibc -Denable-qemu -Denable-wasmtime
release/bin/zig build test-compare-output -Denable-qemu -Denable-wasmtime
release/bin/zig build test-standalone -Denable-qemu -Denable-wasmtime
release/bin/zig build test-stack-traces -Denable-qemu -Denable-wasmtime
release/bin/zig build test-cli -Denable-qemu -Denable-wasmtime
release/bin/zig build test-asm-link -Denable-qemu -Denable-wasmtime
release/bin/zig build test-runtime-safety -Denable-qemu -Denable-wasmtime
release/bin/zig build test-translate-c -Denable-qemu -Denable-wasmtime
release/bin/zig build test-run-translated-c -Denable-qemu -Denable-wasmtime
release/bin/zig build docs -Denable-qemu -Denable-wasmtime
release/bin/zig build # test building self-hosted without LLVM
release/bin/zig build test-fmt -Denable-qemu -Denable-wasmtime
release/bin/zig build test-stage2 -Denable-qemu -Denable-wasmtime
# Look for HTML errors.
tidy -qe ../zig-cache/langref.html

View file

@ -38,9 +38,8 @@ jobs:
timeoutInMinutes: 360
steps:
- powershell: |
(New-Object Net.WebClient).DownloadFile("https://github.com/msys2/msys2-installer/releases/download/2021-06-04/msys2-base-x86_64-20210604.sfx.exe", "sfx.exe")
(New-Object Net.WebClient).DownloadFile("https://github.com/msys2/msys2-installer/releases/download/2021-07-25/msys2-base-x86_64-20210725.sfx.exe", "sfx.exe")
.\sfx.exe -y -o\
del sfx.exe
displayName: Download/Extract/Install MSYS2
- script: |
@REM install updated filesystem package first without dependency checking

View file

@ -1,7 +1,7 @@
@echo on
SET "SRCROOT=%cd%"
SET "PREVPATH=%PATH%"
SET "PREVMSYSEM=%MSYSTEM%"
SET "PREVMSYSTEM=%MSYSTEM%"
set "PATH=%CD:~0,2%\msys64\usr\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem"
SET "MSYSTEM=MINGW64"

View file

@ -901,6 +901,7 @@ fn tokenizeAndPrintRaw(
switch (token.tag) {
.eof => break,
.keyword_addrspace,
.keyword_align,
.keyword_and,
.keyword_asm,
@ -1057,15 +1058,21 @@ fn tokenizeAndPrintRaw(
.plus_equal,
.plus_percent,
.plus_percent_equal,
.plus_pipe,
.plus_pipe_equal,
.minus,
.minus_equal,
.minus_percent,
.minus_percent_equal,
.minus_pipe,
.minus_pipe_equal,
.asterisk,
.asterisk_equal,
.asterisk_asterisk,
.asterisk_percent,
.asterisk_percent_equal,
.asterisk_pipe,
.asterisk_pipe_equal,
.arrow,
.colon,
.slash,
@ -1078,6 +1085,8 @@ fn tokenizeAndPrintRaw(
.angle_bracket_left_equal,
.angle_bracket_angle_bracket_left,
.angle_bracket_angle_bracket_left_equal,
.angle_bracket_angle_bracket_left_pipe,
.angle_bracket_angle_bracket_left_pipe_equal,
.angle_bracket_right,
.angle_bracket_right_equal,
.angle_bracket_angle_bracket_right,
@ -1222,9 +1231,7 @@ fn genHtml(
try printSourceBlock(allocator, tokenizer, out, syntax_block);
// TODO: remove code.just_check_syntax after updating code samples
// that have stopped working due to a change in the compiler.
if (!do_code_tests or code.just_check_syntax) {
if (!do_code_tests) {
continue;
}

View file

@ -1244,8 +1244,9 @@ fn divide(a: i32, b: i32) i32 {
</p>
<p>
Operators such as {#syntax#}+{#endsyntax#} and {#syntax#}-{#endsyntax#} cause undefined behavior on
integer overflow. Also available are operations such as {#syntax#}+%{#endsyntax#} and
{#syntax#}-%{#endsyntax#} which are defined to have wrapping arithmetic on all targets.
integer overflow. Alternative operators are provided for wrapping and saturating arithmetic on all targets.
{#syntax#}+%{#endsyntax#} and {#syntax#}-%{#endsyntax#} perform wrapping arithmetic
while {#syntax#}+|{#endsyntax#} and {#syntax#}-|{#endsyntax#} perform saturating arithmetic.
</p>
<p>
Zig supports arbitrary bit-width integers, referenced by using
@ -1395,6 +1396,23 @@ a +%= b{#endsyntax#}</pre></th>
<pre>{#syntax#}@as(u32, std.math.maxInt(u32)) +% 1 == 0{#endsyntax#}</pre>
</td>
</tr>
<tr>
<th scope="row"><pre>{#syntax#}a +| b
a +|= b{#endsyntax#}</pre></th>
<td>
<ul>
<li>{#link|Integers#}</li>
</ul>
</td>
<td>Saturating Addition.
<ul>
<li>Invokes {#link|Peer Type Resolution#} for the operands.</li>
</ul>
</td>
<td>
<pre>{#syntax#}@as(u32, std.math.maxInt(u32)) +| 1 == @as(u32, std.math.maxInt(u32)){#endsyntax#}</pre>
</td>
</tr>
<tr>
<th scope="row"><pre>{#syntax#}a - b
a -= b{#endsyntax#}</pre></th>
@ -1434,6 +1452,23 @@ a -%= b{#endsyntax#}</pre></th>
<pre>{#syntax#}@as(u32, 0) -% 1 == std.math.maxInt(u32){#endsyntax#}</pre>
</td>
</tr>
<tr>
<th scope="row"><pre>{#syntax#}a -| b
a -|= b{#endsyntax#}</pre></th>
<td>
<ul>
<li>{#link|Integers#}</li>
</ul>
</td>
<td>Saturating Subtraction.
<ul>
<li>Invokes {#link|Peer Type Resolution#} for the operands.</li>
</ul>
</td>
<td>
<pre>{#syntax#}@as(u32, 0) -| 1 == 0{#endsyntax#}</pre>
</td>
</tr>
<tr>
<th scope="row"><pre>{#syntax#}-a{#endsyntax#}</pre></th>
<td>
@ -1508,6 +1543,23 @@ a *%= b{#endsyntax#}</pre></th>
<pre>{#syntax#}@as(u8, 200) *% 2 == 144{#endsyntax#}</pre>
</td>
</tr>
<tr>
<th scope="row"><pre>{#syntax#}a *| b
a *|= b{#endsyntax#}</pre></th>
<td>
<ul>
<li>{#link|Integers#}</li>
</ul>
</td>
<td>Saturating Multiplication.
<ul>
<li>Invokes {#link|Peer Type Resolution#} for the operands.</li>
</ul>
</td>
<td>
<pre>{#syntax#}@as(u8, 200) *| 2 == 255{#endsyntax#}</pre>
</td>
</tr>
<tr>
<th scope="row"><pre>{#syntax#}a / b
a /= b{#endsyntax#}</pre></th>
@ -1577,6 +1629,24 @@ a <<= b{#endsyntax#}</pre></th>
<pre>{#syntax#}1 << 8 == 256{#endsyntax#}</pre>
</td>
</tr>
<tr>
<th scope="row"><pre>{#syntax#}a <<| b
a <<|= b{#endsyntax#}</pre></th>
<td>
<ul>
<li>{#link|Integers#}</li>
</ul>
</td>
<td>Saturating Bit Shift Left.
<ul>
<li>See also {#link|@shlExact#}.</li>
<li>See also {#link|@shlWithOverflow#}.</li>
</ul>
</td>
<td>
<pre>{#syntax#}@as(u8, 1) <<| 8 == 255{#endsyntax#}</pre>
</td>
</tr>
<tr>
<th scope="row"><pre>{#syntax#}a >> b
a >>= b{#endsyntax#}</pre></th>
@ -1968,14 +2038,14 @@ const B = error{Two};
a!b
x{}
!x -x -%x ~x &x ?x
* / % ** *% ||
+ - ++ +% -%
<< >>
* / % ** *% *| ||
+ - ++ +% -% +| -|
<< >> <<|
& ^ | orelse catch
== != < > <= >=
and
or
= *= /= %= += -= <<= >>= &= ^= |={#endsyntax#}</pre>
= *= *%= *|= /= %= += +%= +|= -= -%= -|= <<= <<|= >>= &= ^= |={#endsyntax#}</pre>
{#header_close#}
{#header_close#}
{#header_open|Arrays#}
@ -2125,7 +2195,7 @@ fn dump(args: anytype) !void {
{#header_open|Multidimensional Arrays#}
<p>
Mutlidimensional arrays can be created by nesting arrays:
Multidimensional arrays can be created by nesting arrays:
</p>
{#code_begin|test|multidimensional#}
const std = @import("std");
@ -2898,7 +2968,7 @@ fn bar(x: *const u3) u3 {
}
{#code_end#}
<p>
In this case, the function {#syntax#}bar{#endsyntax#} cannot be called becuse the pointer
In this case, the function {#syntax#}bar{#endsyntax#} cannot be called because the pointer
to the non-ABI-aligned field mentions the bit offset, but the function expects an ABI-aligned pointer.
</p>
<p>
@ -4771,6 +4841,8 @@ test "parse u64" {
{#header_open|catch#}
<p>If you want to provide a default value, you can use the {#syntax#}catch{#endsyntax#} binary operator:</p>
{#code_begin|syntax#}
const parseU64 = @import("error_union_parsing_u64.zig").parseU64;
fn doAThing(str: []u8) void {
const number = parseU64(str, 10) catch 13;
_ = number; // ...
@ -4786,6 +4858,8 @@ fn doAThing(str: []u8) void {
<p>Let's say you wanted to return the error if you got one, otherwise continue with the
function logic:</p>
{#code_begin|syntax#}
const parseU64 = @import("error_union_parsing_u64.zig").parseU64;
fn doAThing(str: []u8) !void {
const number = parseU64(str, 10) catch |err| return err;
_ = number; // ...
@ -4795,6 +4869,8 @@ fn doAThing(str: []u8) !void {
There is a shortcut for this. The {#syntax#}try{#endsyntax#} expression:
</p>
{#code_begin|syntax#}
const parseU64 = @import("error_union_parsing_u64.zig").parseU64;
fn doAThing(str: []u8) !void {
const number = try parseU64(str, 10);
_ = number; // ...
@ -4810,7 +4886,7 @@ fn doAThing(str: []u8) !void {
Maybe you know with complete certainty that an expression will never be an error.
In this case you can do this:
</p>
{#code_begin|syntax#}const number = parseU64("1234", 10) catch unreachable;{#code_end#}
{#syntax#}const number = parseU64("1234", 10) catch unreachable;{#endsyntax#}
<p>
Here we know for sure that "1234" will parse successfully. So we put the
{#syntax#}unreachable{#endsyntax#} value on the right hand side. {#syntax#}unreachable{#endsyntax#} generates
@ -4822,7 +4898,7 @@ fn doAThing(str: []u8) !void {
Finally, you may want to take a different action for every situation. For that, we combine
the {#link|if#} and {#link|switch#} expression:
</p>
{#code_begin|syntax#}
{#syntax_block|zig|handle_all_error_scenarios.zig#}
fn doAThing(str: []u8) void {
if (parseU64(str, 10)) |number| {
doSomethingWithNumber(number);
@ -4834,7 +4910,7 @@ fn doAThing(str: []u8) void {
error.InvalidChar => unreachable,
}
}
{#code_end#}
{#end_syntax_block#}
{#header_open|errdefer#}
<p>
The other component to error handling is defer statements.
@ -4845,7 +4921,7 @@ fn doAThing(str: []u8) void {
<p>
Example:
</p>
{#code_begin|syntax#}
{#syntax_block|zig|errdefer_example.zig#}
fn createFoo(param: i32) !Foo {
const foo = try tryToAllocateFoo();
// now we have allocated foo. we need to free it if the function fails.
@ -4863,7 +4939,7 @@ fn createFoo(param: i32) !Foo {
// but the defer will run!
return foo;
}
{#code_end#}
{#end_syntax_block#}
<p>
The neat thing about this is that you get robust error handling without
the verbosity and cognitive overhead of trying to make sure every exit path
@ -4955,7 +5031,7 @@ test "merge error sets" {
{#header_open|Inferred Error Sets#}
<p>
Because many functions in Zig return a possible error, Zig supports inferring the error set.
To infer the error set for a function, use this syntax:
To infer the error set for a function, prepend the {#syntax#}!{#endsyntax#} operator to the functions return type, like {#syntax#}!T{#endsyntax#}:
</p>
{#code_begin|test|inferred_error_sets#}
// With an inferred error set
@ -5132,12 +5208,12 @@ fn bang2() void {
For the case when no errors are returned, the cost is a single memory write operation, only in the first non-failable function in the call graph that calls a failable function, i.e. when a function returning {#syntax#}void{#endsyntax#} calls a function returning {#syntax#}error{#endsyntax#}.
This is to initialize this struct in the stack memory:
</p>
{#code_begin|syntax#}
{#syntax_block|zig|stack_trace_struct.zig#}
pub const StackTrace = struct {
index: usize,
instruction_addresses: [N]usize,
};
{#code_end#}
{#end_syntax_block#}
<p>
Here, N is the maximum function call depth as determined by call graph analysis. Recursion is ignored and counts for 2.
</p>
@ -5150,13 +5226,13 @@ pub const StackTrace = struct {
<p>
When generating the code for a function that returns an error, just before the {#syntax#}return{#endsyntax#} statement (only for the {#syntax#}return{#endsyntax#} statements that return errors), Zig generates a call to this function:
</p>
{#code_begin|syntax#}
{#syntax_block|zig|zig_return_error_fn.zig#}
// marked as "no-inline" in LLVM IR
fn __zig_return_error(stack_trace: *StackTrace) void {
stack_trace.instruction_addresses[stack_trace.index] = @returnAddress();
stack_trace.index = (stack_trace.index + 1) % N;
}
{#code_end#}
{#end_syntax_block#}
<p>
The cost is 2 math operations plus some memory reads and writes. The memory accessed is constrained and should remain cached for the duration of the error return bubbling.
</p>
@ -5206,16 +5282,16 @@ const optional_int: ?i32 = 5678;
Task: call malloc, if the result is null, return null.
</p>
<p>C code</p>
<pre><code class="cpp">// malloc prototype included for reference
{#syntax_block|c|call_malloc_in_c.c#}// malloc prototype included for reference
void *malloc(size_t size);
struct Foo *do_a_thing(void) {
char *ptr = malloc(1234);
if (!ptr) return NULL;
// ...
}</code></pre>
}{#end_syntax_block#}
<p>Zig code</p>
{#code_begin|syntax#}
{#syntax_block|zig|call_malloc_from_zig.zig#}
// malloc prototype included for reference
extern fn malloc(size: size_t) ?*u8;
@ -5223,7 +5299,7 @@ fn doAThing() ?*Foo {
const ptr = malloc(1234) orelse return null;
_ = ptr; // ...
}
{#code_end#}
{#end_syntax_block#}
<p>
Here, Zig is at least as convenient, if not more, than C. And, the type of "ptr"
is {#syntax#}*u8{#endsyntax#} <em>not</em> {#syntax#}?*u8{#endsyntax#}. The {#syntax#}orelse{#endsyntax#} keyword
@ -5233,7 +5309,7 @@ fn doAThing() ?*Foo {
<p>
The other form of checking against NULL you might see looks like this:
</p>
<pre><code class="cpp">void do_a_thing(struct Foo *foo) {
{#syntax_block|c|checking_null_in_c.c#}void do_a_thing(struct Foo *foo) {
// do some stuff
if (foo) {
@ -5241,11 +5317,14 @@ fn doAThing() ?*Foo {
}
// do some stuff
}</code></pre>
}{#end_syntax_block#}
<p>
In Zig you can accomplish the same thing:
</p>
{#code_begin|syntax#}
{#code_begin|syntax|checking_null_in_zig#}
const Foo = struct{};
fn doSomethingWithFoo(foo: *Foo) void { _ = foo; }
fn doAThing(optional_foo: ?*Foo) void {
// do some stuff
@ -5540,7 +5619,7 @@ test "coerce to optionals" {
}
{#code_end#}
<p>It works nested inside the {#link|Error Union Type#}, too:</p>
{#code_begin|test|test_corerce_optional_wrapped_error_union#}
{#code_begin|test|test_coerce_optional_wrapped_error_union#}
const std = @import("std");
const expect = std.testing.expect;
@ -6111,7 +6190,7 @@ test "perform fn" {
different code. In this example, the function {#syntax#}performFn{#endsyntax#} is generated three different times,
for the different values of {#syntax#}prefix_char{#endsyntax#} provided:
</p>
{#code_begin|syntax#}
{#syntax_block|zig|performFn_1#}
// From the line:
// expect(performFn('t', 1) == 6);
fn performFn(start_value: i32) i32 {
@ -6120,8 +6199,8 @@ fn performFn(start_value: i32) i32 {
result = three(result);
return result;
}
{#code_end#}
{#code_begin|syntax#}
{#end_syntax_block#}
{#syntax_block|zig|performFn_2#}
// From the line:
// expect(performFn('o', 0) == 1);
fn performFn(start_value: i32) i32 {
@ -6129,15 +6208,15 @@ fn performFn(start_value: i32) i32 {
result = one(result);
return result;
}
{#code_end#}
{#code_begin|syntax#}
{#end_syntax_block#}
{#syntax_block|zig|performFn_3#}
// From the line:
// expect(performFn('w', 99) == 99);
fn performFn(start_value: i32) i32 {
var result: i32 = start_value;
return result;
}
{#code_end#}
{#end_syntax_block#}
<p>
Note that this happens even in a debug build; in a release build these generated functions still
pass through rigorous LLVM optimizations. The important thing to note, however, is not that this
@ -6367,11 +6446,11 @@ const Node = struct {
it works fine.
</p>
{#header_close#}
{#header_open|Case Study: printf in Zig#}
{#header_open|Case Study: print in Zig#}
<p>
Putting all of this together, let's see how {#syntax#}printf{#endsyntax#} works in Zig.
Putting all of this together, let's see how {#syntax#}print{#endsyntax#} works in Zig.
</p>
{#code_begin|exe|printf#}
{#code_begin|exe|print#}
const print = @import("std").debug.print;
const a_number: i32 = 1234;
@ -6386,67 +6465,84 @@ pub fn main() void {
Let's crack open the implementation of this and see how it works:
</p>
{#code_begin|syntax#}
/// Calls print and then flushes the buffer.
pub fn printf(self: *Writer, comptime format: []const u8, args: anytype) anyerror!void {
const State = enum {
start,
open_brace,
close_brace,
};
{#code_begin|syntax|poc_print_fn#}
const Writer = struct {
/// Calls print and then flushes the buffer.
pub fn print(self: *Writer, comptime format: []const u8, args: anytype) anyerror!void {
const State = enum {
start,
open_brace,
close_brace,
};
comptime var start_index: usize = 0;
comptime var state = State.start;
comptime var next_arg: usize = 0;
comptime var start_index: usize = 0;
comptime var state = State.start;
comptime var next_arg: usize = 0;
inline for (format) |c, i| {
switch (state) {
State.start => switch (c) {
'{' => {
if (start_index < i) try self.write(format[start_index..i]);
state = State.open_brace;
inline for (format) |c, i| {
switch (state) {
State.start => switch (c) {
'{' => {
if (start_index < i) try self.write(format[start_index..i]);
state = State.open_brace;
},
'}' => {
if (start_index < i) try self.write(format[start_index..i]);
state = State.close_brace;
},
else => {},
},
'}' => {
if (start_index < i) try self.write(format[start_index..i]);
state = State.close_brace;
State.open_brace => switch (c) {
'{' => {
state = State.start;
start_index = i;
},
'}' => {
try self.printValue(args[next_arg]);
next_arg += 1;
state = State.start;
start_index = i + 1;
},
's' => {
continue;
},
else => @compileError("Unknown format character: " ++ [1]u8{c}),
},
else => {},
},
State.open_brace => switch (c) {
'{' => {
state = State.start;
start_index = i;
State.close_brace => switch (c) {
'}' => {
state = State.start;
start_index = i;
},
else => @compileError("Single '}' encountered in format string"),
},
'}' => {
try self.printValue(args[next_arg]);
next_arg += 1;
state = State.start;
start_index = i + 1;
},
else => @compileError("Unknown format character: " ++ c),
},
State.close_brace => switch (c) {
'}' => {
state = State.start;
start_index = i;
},
else => @compileError("Single '}' encountered in format string"),
},
}
}
}
comptime {
if (args.len != next_arg) {
@compileError("Unused arguments");
comptime {
if (args.len != next_arg) {
@compileError("Unused arguments");
}
if (state != State.start) {
@compileError("Incomplete format string: " ++ format);
}
}
if (state != State.Start) {
@compileError("Incomplete format string: " ++ format);
if (start_index < format.len) {
try self.write(format[start_index..format.len]);
}
try self.flush();
}
if (start_index < format.len) {
try self.write(format[start_index..format.len]);
fn write(self: *Writer, value: []const u8) !void {
_ = self;
_ = value;
}
try self.flush();
}
pub fn printValue(self: *Writer, value: anytype) !void {
_ = self;
_ = value;
}
fn flush(self: *Writer) !void {
_ = self;
}
};
{#code_end#}
<p>
This is a proof of concept implementation; the actual function in the standard library has more
@ -6459,8 +6555,8 @@ pub fn printf(self: *Writer, comptime format: []const u8, args: anytype) anyerro
When this function is analyzed from our example code above, Zig partially evaluates the function
and emits a function that actually looks like this:
</p>
{#code_begin|syntax#}
pub fn printf(self: *Writer, arg0: i32, arg1: []const u8) !void {
{#syntax_block|zig|Emitted print Function#}
pub fn print(self: *Writer, arg0: []const u8, arg1: i32) !void {
try self.write("here is a string: '");
try self.printValue(arg0);
try self.write("' here is a number: ");
@ -6468,28 +6564,46 @@ pub fn printf(self: *Writer, arg0: i32, arg1: []const u8) !void {
try self.write("\n");
try self.flush();
}
{#code_end#}
{#end_syntax_block#}
<p>
{#syntax#}printValue{#endsyntax#} is a function that takes a parameter of any type, and does different things depending
on the type:
</p>
{#code_begin|syntax#}
pub fn printValue(self: *Writer, value: anytype) !void {
switch (@typeInfo(@TypeOf(value))) {
.Int => {
return self.printInt(T, value);
},
.Float => {
return self.printFloat(T, value);
},
else => {
@compileError("Unable to print type '" ++ @typeName(T) ++ "'");
},
{#code_begin|syntax|poc_printValue_fn#}
const Writer = struct {
pub fn printValue(self: *Writer, value: anytype) !void {
switch (@typeInfo(@TypeOf(value))) {
.Int => {
return self.writeInt(value);
},
.Float => {
return self.writeFloat(value);
},
.Pointer => {
return self.write(value);
},
else => {
@compileError("Unable to print type '" ++ @typeName(@TypeOf(value)) ++ "'");
},
}
}
}
fn write(self: *Writer, value: []const u8) !void {
_ = self;
_ = value;
}
fn writeInt(self: *Writer, value: anytype) !void {
_ = self;
_ = value;
}
fn writeFloat(self: *Writer, value: anytype) !void {
_ = self;
_ = value;
}
};
{#code_end#}
<p>
And now, what happens if we give too many arguments to {#syntax#}printf{#endsyntax#}?
And now, what happens if we give too many arguments to {#syntax#}print{#endsyntax#}?
</p>
{#code_begin|test_err|Unused argument in 'here is a string: '{s}' here is a number: {}#}
const print = @import("std").debug.print;
@ -6497,7 +6611,7 @@ const print = @import("std").debug.print;
const a_number: i32 = 1234;
const a_string = "foobar";
test "printf too many arguments" {
test "print too many arguments" {
print("here is a string: '{s}' here is a number: {}\n", .{
a_string,
a_number,
@ -6512,7 +6626,7 @@ test "printf too many arguments" {
Zig doesn't care whether the format argument is a string literal,
only that it is a compile-time known value that can be coerced to a {#syntax#}[]const u8{#endsyntax#}:
</p>
{#code_begin|exe|printf#}
{#code_begin|exe|print#}
const print = @import("std").debug.print;
const a_number: i32 = 1234;
@ -7118,16 +7232,6 @@ fn readFile(allocator: *Allocator, filename: []const u8) ![]u8 {
If no overflow or underflow occurs, returns {#syntax#}false{#endsyntax#}.
</p>
{#header_close#}
{#header_open|@addWithSaturation#}
<pre>{#syntax#}@addWithSaturation(a: T, b: T) T{#endsyntax#}</pre>
<p>
Returns {#syntax#}a + b{#endsyntax#}. The result will be clamped between the type maximum and minimum.
</p>
<p>
Once <a href="https://github.com/ziglang/zig/issues/1284">Saturating arithmetic</a>.
is completed, the syntax {#syntax#}a +| b{#endsyntax#} will be equivalent to calling {#syntax#}@addWithSaturation(a, b){#endsyntax#}.
</p>
{#header_close#}
{#header_open|@alignCast#}
<pre>{#syntax#}@alignCast(comptime alignment: u29, ptr: anytype) anytype{#endsyntax#}</pre>
<p>
@ -7216,7 +7320,9 @@ fn func(y: *i32) void {
{#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
{#see_also|@atomicStore|@atomicRmw|@fence|@cmpxchgWeak|@cmpxchgStrong#}
{#header_close#}
{#header_open|@atomicRmw#}
<pre>{#syntax#}@atomicRmw(comptime T: type, ptr: *T, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) T{#endsyntax#}</pre>
<p>
@ -7242,7 +7348,9 @@ fn func(y: *i32) void {
<li>{#syntax#}.Max{#endsyntax#} - stores the operand if it is larger. Supports integers and floats.</li>
<li>{#syntax#}.Min{#endsyntax#} - stores the operand if it is smaller. Supports integers and floats.</li>
</ul>
{#see_also|@atomicStore|@atomicLoad|@fence|@cmpxchgWeak|@cmpxchgStrong#}
{#header_close#}
{#header_open|@atomicStore#}
<pre>{#syntax#}@atomicStore(comptime T: type, ptr: *T, value: T, comptime ordering: builtin.AtomicOrder) void{#endsyntax#}</pre>
<p>
@ -7252,6 +7360,7 @@ fn func(y: *i32) void {
{#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
{#see_also|@atomicLoad|@atomicRmw|@fence|@cmpxchgWeak|@cmpxchgStrong#}
{#header_close#}
{#header_open|@bitCast#}
@ -7396,9 +7505,11 @@ fn add(a: i32, b: i32) i32 {
{#syntax#}@call{#endsyntax#} allows more flexibility than normal function call syntax does. The
{#syntax#}CallOptions{#endsyntax#} struct is reproduced here:
</p>
{#code_begin|syntax#}
{#syntax_block|zig|builtin.CallOptions struct#}
pub const CallOptions = struct {
modifier: Modifier = .auto,
/// Only valid when `Modifier` is `Modifier.async_kw`.
stack: ?[]align(std.Target.stack_align) u8 = null,
pub const Modifier = enum {
@ -7435,7 +7546,7 @@ pub const CallOptions = struct {
compile_time,
};
};
{#code_end#}
{#end_syntax_block#}
{#header_close#}
{#header_open|@cDefine#}
@ -7540,15 +7651,16 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_v
an integer or an enum.
</p>
<p>{#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
{#see_also|Compile Variables|cmpxchgWeak#}
{#see_also|@atomicStore|@atomicLoad|@atomicRmw|@fence|@cmpxchgWeak#}
{#header_close#}
{#header_open|@cmpxchgWeak#}
<pre>{#syntax#}@cmpxchgWeak(comptime T: type, ptr: *T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) ?T{#endsyntax#}</pre>
<p>
This function performs a weak atomic compare exchange operation. It's the equivalent of this code,
except atomic:
</p>
{#code_begin|syntax#}
{#syntax_block|zig|cmpxchgWeakButNotAtomic#}
fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_value: T) ?T {
const old_value = ptr.*;
if (old_value == expected_value and usuallyTrueButSometimesFalse()) {
@ -7558,7 +7670,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
return old_value;
}
}
{#code_end#}
{#end_syntax_block#}
<p>
If you are using cmpxchg in a loop, the sporadic failure will be no problem, and {#syntax#}cmpxchgWeak{#endsyntax#}
is the better choice, because it can be implemented more efficiently in machine instructions.
@ -7569,7 +7681,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
an integer or an enum.
</p>
<p>{#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
{#see_also|Compile Variables|cmpxchgStrong#}
{#see_also|@atomicStore|@atomicLoad|@atomicRmw|@fence|@cmpxchgStrong#}
{#header_close#}
{#header_open|@compileError#}
@ -7617,7 +7729,7 @@ test "main" {
}
{#code_end#}
<p>
will ouput:
will output:
</p>
<p>
If all {#syntax#}@compileLog{#endsyntax#} calls are removed or
@ -7734,7 +7846,7 @@ test "main" {
the tag value is used as the enumeration value.
</p>
<p>
If there is only one possible enum value, the resut is a {#syntax#}comptime_int{#endsyntax#}
If there is only one possible enum value, the result is a {#syntax#}comptime_int{#endsyntax#}
known at {#link|comptime#}.
</p>
{#see_also|@intToEnum#}
@ -7849,7 +7961,7 @@ export fn @"A function name that is a complete sentence."() void {}
<p>
{#syntax#}AtomicOrder{#endsyntax#} can be found with {#syntax#}@import("std").builtin.AtomicOrder{#endsyntax#}.
</p>
{#see_also|Compile Variables#}
{#see_also|@atomicStore|@atomicLoad|@atomicRmw|@cmpxchgWeak|@cmpxchgStrong#}
{#header_close#}
{#header_open|@field#}
@ -8241,22 +8353,6 @@ test "@wasmMemoryGrow" {
</p>
{#header_close#}
{#header_open|@mulWithSaturation#}
<pre>{#syntax#}@mulWithSaturation(a: T, b: T) T{#endsyntax#}</pre>
<p>
Returns {#syntax#}a * b{#endsyntax#}. The result will be clamped between the type maximum and minimum.
</p>
<p>
Once <a href="https://github.com/ziglang/zig/issues/1284">Saturating arithmetic</a>.
is completed, the syntax {#syntax#}a *| b{#endsyntax#} will be equivalent to calling {#syntax#}@mulWithSaturation(a, b){#endsyntax#}.
</p>
<p>
NOTE: Currently there is a bug in the llvm.smul.fix.sat intrinsic which affects {#syntax#}@mulWithSaturation{#endsyntax#} of signed integers.
This may result in an incorrect sign bit when there is overflow. This will be fixed in zig's 0.9.0 release.
Check <a href="https://github.com/ziglang/zig/issues/9643">this issue</a> for more information.
</p>
{#header_close#}
{#header_open|@panic#}
<pre>{#syntax#}@panic(message: []const u8) noreturn{#endsyntax#}</pre>
<p>
@ -8474,14 +8570,16 @@ test "@setRuntimeSafety" {
{#header_open|@shlExact#}
<pre>{#syntax#}@shlExact(value: T, shift_amt: Log2T) T{#endsyntax#}</pre>
<p>
Performs the left shift operation ({#syntax#}<<{#endsyntax#}). Caller guarantees
that the shift will not shift any 1 bits out.
Performs the left shift operation ({#syntax#}<<{#endsyntax#}).
For unsigned integers, the result is {#link|undefined#} if any 1 bits
are shifted out. For signed integers, the result is {#link|undefined#} if
any bits that disagree with the resultant sign bit are shifted out.
</p>
<p>
The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(T.bit_count){#endsyntax#} bits.
This is because {#syntax#}shift_amt >= T.bit_count{#endsyntax#} is undefined behavior.
</p>
{#see_also|@shrExact|@shlWithOverflow|@shlWithSaturation#}
{#see_also|@shrExact|@shlWithOverflow#}
{#header_close#}
{#header_open|@shlWithOverflow#}
@ -8495,24 +8593,9 @@ test "@setRuntimeSafety" {
The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(T.bit_count){#endsyntax#} bits.
This is because {#syntax#}shift_amt >= T.bit_count{#endsyntax#} is undefined behavior.
</p>
{#see_also|@shlExact|@shrExact|@shlWithSaturation#}
{#see_also|@shlExact|@shrExact#}
{#header_close#}
{#header_open|@shlWithSaturation#}
<pre>{#syntax#}@shlWithSaturation(a: T, shift_amt: T) T{#endsyntax#}</pre>
<p>
Returns {#syntax#}a << b{#endsyntax#}. The result will be clamped between type minimum and maximum.
</p>
<p>
Once <a href="https://github.com/ziglang/zig/issues/1284">Saturating arithmetic</a>.
is completed, the syntax {#syntax#}a <<| b{#endsyntax#} will be equivalent to calling {#syntax#}@shlWithSaturation(a, b){#endsyntax#}.
</p>
<p>
Unlike other @shl builtins, shift_amt doesn't need to be a Log2T as saturated overshifting is well defined.
</p>
{#see_also|@shlExact|@shrExact|@shlWithOverflow#}
{#header_close#}
{#header_open|@shrExact#}
<pre>{#syntax#}@shrExact(value: T, shift_amt: Log2T) T{#endsyntax#}</pre>
<p>
@ -8523,7 +8606,7 @@ test "@setRuntimeSafety" {
The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(T.bit_count){#endsyntax#} bits.
This is because {#syntax#}shift_amt >= T.bit_count{#endsyntax#} is undefined behavior.
</p>
{#see_also|@shlExact|@shlWithOverflow|@shlWithSaturation#}
{#see_also|@shlExact|@shlWithOverflow#}
{#header_close#}
{#header_open|@shuffle#}
@ -8684,7 +8767,7 @@ fn doTheTest() !void {
{#header_open|@sin#}
<pre>{#syntax#}@sin(value: anytype) @TypeOf(value){#endsyntax#}</pre>
<p>
Sine trigometric function on a floating point number. Uses a dedicated hardware instruction
Sine trigonometric function on a floating point number. Uses a dedicated hardware instruction
when available.
</p>
<p>
@ -8695,7 +8778,7 @@ fn doTheTest() !void {
{#header_open|@cos#}
<pre>{#syntax#}@cos(value: anytype) @TypeOf(value){#endsyntax#}</pre>
<p>
Cosine trigometric function on a floating point number. Uses a dedicated hardware instruction
Cosine trigonometric function on a floating point number. Uses a dedicated hardware instruction
when available.
</p>
<p>
@ -8783,7 +8866,7 @@ fn doTheTest() !void {
{#header_open|@ceil#}
<pre>{#syntax#}@ceil(value: anytype) @TypeOf(value){#endsyntax#}</pre>
<p>
Returns the largest integral value not less than the given floating point number.
Returns the smallest integral value not less than the given floating point number.
Uses a dedicated hardware instruction when available.
</p>
<p>
@ -8823,17 +8906,6 @@ fn doTheTest() !void {
</p>
{#header_close#}
{#header_open|@subWithSaturation#}
<pre>{#syntax#}@subWithSaturation(a: T, b: T) T{#endsyntax#}</pre>
<p>
Returns {#syntax#}a - b{#endsyntax#}. The result will be clamped between the type maximum and minimum.
</p>
<p>
Once <a href="https://github.com/ziglang/zig/issues/1284">Saturating arithmetic</a>.
is completed, the syntax {#syntax#}a -| b{#endsyntax#} will be equivalent to calling {#syntax#}@subWithSaturation(a, b){#endsyntax#}.
</p>
{#header_close#}
{#header_open|@tagName#}
<pre>{#syntax#}@tagName(value: anytype) [:0]const u8{#endsyntax#}</pre>
<p>
@ -10153,7 +10225,7 @@ pub fn main() void {
This expression is evaluated at compile-time and is used to control
preprocessor directives and include multiple <code class="file">.h</code> files:
</p>
{#code_begin|syntax#}
{#syntax_block|zig|@cImport Expression#}
const builtin = @import("builtin");
const c = @cImport({
@ -10167,7 +10239,7 @@ const c = @cImport({
}
@cInclude("soundio.h");
});
{#code_end#}
{#end_syntax_block#}
{#see_also|@cImport|@cInclude|@cDefine|@cUndef|@import#}
{#header_close#}
@ -10273,7 +10345,7 @@ pub fn main() void {
<p>
Some C constructs cannot be translated to Zig - for example, <em>goto</em>,
structs with bitfields, and token-pasting macros. Zig employs <em>demotion</em> to allow translation
to continue in the face of non-translateable entities.
to continue in the face of non-translatable entities.
</p>
<p>
Demotion comes in three varieties - {#link|opaque#}, <em>extern</em>, and
@ -10283,13 +10355,13 @@ pub fn main() void {
Functions that contain opaque types or code constructs that cannot be translated will be demoted
to {#syntax#}extern{#endsyntax#} declarations.
Thus, non-translateable types can still be used as pointers, and non-translateable functions
Thus, non-translatable types can still be used as pointers, and non-translatable functions
can be called so long as the linker is aware of the compiled function.
</p>
<p>
{#syntax#}@compileError{#endsyntax#} is used when top-level definitions (global variables,
function prototypes, macros) cannot be translated or demoted. Since Zig uses lazy analysis for
top-level declarations, untranslateable entities will not cause a compile error in your code unless
top-level declarations, untranslatable entities will not cause a compile error in your code unless
you actually use them.
</p>
{#see_also|opaque|extern|@compileError#}
@ -10301,7 +10373,7 @@ pub fn main() void {
can be translated to Zig. Macros that cannot be translated will be be demoted to
{#syntax#}@compileError{#endsyntax#}. Note that C code which <em>uses</em> macros will be
translated without any additional issues (since Zig operates on the pre-processed source
with macros expanded). It is merely the macros themselves which may not be translateable to
with macros expanded). It is merely the macros themselves which may not be translatable to
Zig.
</p>
<p>Consider the following example:</p>
@ -10321,7 +10393,7 @@ pub export fn foo() c_int {
}
pub const MAKELOCAL = @compileError("unable to translate C expr: unexpected token .Equal"); // macro.c:1:9
{#code_end#}
<p>Note that {#syntax#}foo{#endsyntax#} was translated correctly despite using a non-translateable
<p>Note that {#syntax#}foo{#endsyntax#} was translated correctly despite using a non-translatable
macro. {#syntax#}MAKELOCAL{#endsyntax#} was demoted to {#syntax#}@compileError{#endsyntax#} since
it cannot be expressed as a Zig function; this simply means that you cannot directly use
{#syntax#}MAKELOCAL{#endsyntax#} from Zig.
@ -10491,8 +10563,8 @@ const typedArray = new Uint8Array(source);
WebAssembly.instantiate(typedArray, {
env: {
print: (result) =&gt; { console.log(`The result is ${result}`); }
}}).then(result =&gt; {
print: (result) => { console.log(`The result is ${result}`); }
}}).then(result => {
const add = result.instance.exports.add;
add(1, 2);
});{#end_syntax_block#}
@ -11787,6 +11859,7 @@ AssignOp
/ PLUSEQUAL
/ MINUSEQUAL
/ LARROW2EQUAL
/ LARROW2PIPEEQUAL
/ RARROW2EQUAL
/ AMPERSANDEQUAL
/ CARETEQUAL
@ -11821,6 +11894,8 @@ AdditionOp
/ PLUS2
/ PLUSPERCENT
/ MINUSPERCENT
/ PLUSPIPE
/ MINUSPIPE
MultiplyOp
&lt;- PIPE2
@ -11829,6 +11904,7 @@ MultiplyOp
/ PERCENT
/ ASTERISK2
/ ASTERISKPERCENT
/ ASTERISKPIPE
PrefixOp
&lt;- EXCLAMATIONMARK
@ -11992,6 +12068,8 @@ ASTERISK2 &lt;- '**' skip
ASTERISKEQUAL &lt;- '*=' skip
ASTERISKPERCENT &lt;- '*%' ![=] skip
ASTERISKPERCENTEQUAL &lt;- '*%=' skip
ASTERISKPIPE &lt;- '*|' ![=] skip
ASTERISKPIPEEQUAL &lt;- '*|=' skip
CARET &lt;- '^' ![=] skip
CARETEQUAL &lt;- '^=' skip
COLON &lt;- ':' skip
@ -12008,6 +12086,8 @@ EXCLAMATIONMARK &lt;- '!' ![=] skip
EXCLAMATIONMARKEQUAL &lt;- '!=' skip
LARROW &lt;- '&lt;' ![&lt;=] skip
LARROW2 &lt;- '&lt;&lt;' ![=] skip
LARROW2PIPE &lt;- '&lt;&lt;|' ![=] skip
LARROW2PIPEEQUAL &lt;- '&lt;&lt;|=' ![=] skip
LARROW2EQUAL &lt;- '&lt;&lt;=' skip
LARROWEQUAL &lt;- '&lt;=' skip
LBRACE &lt;- '{' skip
@ -12017,6 +12097,8 @@ MINUS &lt;- '-' ![%=&gt;] skip
MINUSEQUAL &lt;- '-=' skip
MINUSPERCENT &lt;- '-%' ![=] skip
MINUSPERCENTEQUAL &lt;- '-%=' skip
MINUSPIPE &lt;- '-|' ![=] skip
MINUSPIPEEQUAL &lt;- '-|=' skip
MINUSRARROW &lt;- '-&gt;' skip
PERCENT &lt;- '%' ![=] skip
PERCENTEQUAL &lt;- '%=' skip
@ -12028,6 +12110,8 @@ PLUS2 &lt;- '++' skip
PLUSEQUAL &lt;- '+=' skip
PLUSPERCENT &lt;- '+%' ![=] skip
PLUSPERCENTEQUAL &lt;- '+%=' skip
PLUSPIPE &lt;- '+|' ![=] skip
PLUSPIPEEQUAL &lt;- '+|=' skip
LETTERC &lt;- 'c' skip
QUESTIONMARK &lt;- '?' skip
RARROW &lt;- '&gt;' ![&gt;=] skip

50
lib/libc/mingw/stdio/fseeki64.c vendored Normal file
View file

@ -0,0 +1,50 @@
/**
* This file has no copyright assigned and is placed in the Public Domain.
* This file is part of the mingw-w64 runtime package.
* No warranty is given; refer to the file DISCLAIMER.PD within this package.
*/
#include <stdio.h>
#include <io.h>
#include <errno.h>
#if !defined(__arm__) && !defined(__aarch64__) /* we have F_ARM_ANY(_fseeki64) in msvcrt.def.in */
int __cdecl _fseeki64(FILE* stream, __int64 offset, int whence)
{
fpos_t pos;
if (whence == SEEK_CUR)
{
/* If stream is invalid, fgetpos sets errno. */
if (fgetpos (stream, &pos))
return (-1);
pos += (fpos_t) offset;
}
else if (whence == SEEK_END)
{
/* If writing, we need to flush before getting file length. */
fflush (stream);
pos = (fpos_t) (_filelengthi64 (_fileno (stream)) + offset);
}
else if (whence == SEEK_SET)
pos = (fpos_t) offset;
else
{
errno = EINVAL;
return (-1);
}
return fsetpos (stream, &pos);
}
int __cdecl (*__MINGW_IMP_SYMBOL(_fseeki64))(FILE*, __int64, int) = _fseeki64;
#endif /* !defined(__arm__) && !defined(__aarch64__) */
__int64 __cdecl _ftelli64(FILE* stream)
{
fpos_t pos;
if (fgetpos (stream, &pos))
return -1LL;
else
return (__int64) pos;
}
__int64 __cdecl (*__MINGW_IMP_SYMBOL(_ftelli64))(FILE*) = _ftelli64;

View file

@ -41,6 +41,7 @@ pub const max_name_len = switch (target.os.tag) {
.netbsd => 31,
.freebsd => 15,
.openbsd => 31,
.solaris => 31,
else => 0,
};
@ -112,7 +113,7 @@ pub fn setName(self: Thread, name: []const u8) SetNameError!void {
else => |e| return os.unexpectedErrno(e),
}
},
.netbsd => if (use_pthreads) {
.netbsd, .solaris => if (use_pthreads) {
const err = std.c.pthread_setname_np(self.getHandle(), name_with_terminator.ptr, null);
switch (err) {
.SUCCESS => return,
@ -202,7 +203,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co
else => |e| return os.unexpectedErrno(e),
}
},
.netbsd => if (use_pthreads) {
.netbsd, .solaris => if (use_pthreads) {
const err = std.c.pthread_getname_np(self.getHandle(), buffer.ptr, max_name_len + 1);
switch (err) {
.SUCCESS => return std.mem.sliceTo(buffer, 0),
@ -565,6 +566,16 @@ const PosixThreadImpl = struct {
};
return @intCast(usize, count);
},
.solaris => {
// The "proper" way to get the cpu count would be to query
// /dev/kstat via ioctls, and traverse a linked list for each
// cpu.
const rc = c.sysconf(os._SC.NPROCESSORS_ONLN);
return switch (os.errno(rc)) {
.SUCCESS => @intCast(usize, rc),
else => |err| os.unexpectedErrno(err),
};
},
.haiku => {
var count: u32 = undefined;
var system_info: os.system_info = undefined;

View file

@ -54,7 +54,7 @@ pub const WindowsCondition = struct {
pub fn wait(cond: *WindowsCondition, mutex: *Mutex) void {
const rc = windows.kernel32.SleepConditionVariableSRW(
&cond.cond,
&mutex.srwlock,
&mutex.impl.srwlock,
windows.INFINITE,
@as(windows.ULONG, 0),
);

View file

@ -33,17 +33,29 @@ const testing = std.testing;
const StaticResetEvent = std.thread.StaticResetEvent;
/// Try to acquire the mutex without blocking. Returns `null` if the mutex is
/// unavailable. Otherwise returns `Held`. Call `release` on `Held`.
pub fn tryAcquire(m: *Mutex) ?Impl.Held {
/// unavailable. Otherwise returns `Held`. Call `release` on `Held`, or use
/// releaseDirect().
pub fn tryAcquire(m: *Mutex) ?Held {
return m.impl.tryAcquire();
}
/// Acquire the mutex. Deadlocks if the mutex is already
/// held by the calling thread.
pub fn acquire(m: *Mutex) Impl.Held {
pub fn acquire(m: *Mutex) Held {
return m.impl.acquire();
}
/// Release the mutex. Prefer Held.release() if available.
pub fn releaseDirect(m: *Mutex) void {
return m.impl.releaseDirect();
}
/// A held mutex handle. Call release to allow other threads to
/// take the mutex. Do not call release() more than once.
/// For more complex scenarios, this handle can be discarded
/// and Mutex.releaseDirect can be called instead.
pub const Held = Impl.Held;
const Impl = if (builtin.single_threaded)
Dummy
else if (builtin.os.tag == .windows)
@ -53,6 +65,32 @@ else if (std.Thread.use_pthreads)
else
AtomicMutex;
fn HeldInterface(comptime MutexType: type) type {
return struct {
const Mixin = @This();
pub const Held = struct {
mutex: *MutexType,
pub fn release(held: Mixin.Held) void {
held.mutex.releaseDirect();
}
};
pub fn tryAcquire(m: *MutexType) ?Mixin.Held {
if (m.tryAcquireDirect()) {
return Mixin.Held{ .mutex = m };
} else {
return null;
}
}
pub fn acquire(m: *MutexType) Mixin.Held {
m.acquireDirect();
return Mixin.Held{ .mutex = m };
}
};
}
pub const AtomicMutex = struct {
state: State = .unlocked,
@ -62,39 +100,32 @@ pub const AtomicMutex = struct {
waiting,
};
pub const Held = struct {
mutex: *AtomicMutex,
pub usingnamespace HeldInterface(@This());
pub fn release(held: Held) void {
switch (@atomicRmw(State, &held.mutex.state, .Xchg, .unlocked, .Release)) {
.unlocked => unreachable,
.locked => {},
.waiting => held.mutex.unlockSlow(),
}
}
};
pub fn tryAcquire(m: *AtomicMutex) ?Held {
if (@cmpxchgStrong(
fn tryAcquireDirect(m: *AtomicMutex) bool {
return @cmpxchgStrong(
State,
&m.state,
.unlocked,
.locked,
.Acquire,
.Monotonic,
) == null) {
return Held{ .mutex = m };
} else {
return null;
}
) == null;
}
pub fn acquire(m: *AtomicMutex) Held {
fn acquireDirect(m: *AtomicMutex) void {
switch (@atomicRmw(State, &m.state, .Xchg, .locked, .Acquire)) {
.unlocked => {},
else => |s| m.lockSlow(s),
}
return Held{ .mutex = m };
}
fn releaseDirect(m: *AtomicMutex) void {
switch (@atomicRmw(State, &m.state, .Xchg, .unlocked, .Release)) {
.unlocked => unreachable,
.locked => {},
.waiting => m.unlockSlow(),
}
}
fn lockSlow(m: *AtomicMutex, current_state: State) void {
@ -171,36 +202,20 @@ pub const AtomicMutex = struct {
pub const PthreadMutex = struct {
pthread_mutex: std.c.pthread_mutex_t = .{},
pub const Held = struct {
mutex: *PthreadMutex,
pub usingnamespace HeldInterface(@This());
pub fn release(held: Held) void {
switch (std.c.pthread_mutex_unlock(&held.mutex.pthread_mutex)) {
.SUCCESS => return,
.INVAL => unreachable,
.AGAIN => unreachable,
.PERM => unreachable,
else => unreachable,
}
}
};
/// Try to acquire the mutex without blocking. Returns null if
/// the mutex is unavailable. Otherwise returns Held. Call
/// release on Held.
pub fn tryAcquire(m: *PthreadMutex) ?Held {
if (std.c.pthread_mutex_trylock(&m.pthread_mutex) == .SUCCESS) {
return Held{ .mutex = m };
} else {
return null;
}
/// Try to acquire the mutex without blocking. Returns true if
/// the mutex is unavailable. Otherwise returns false. Call
/// release when done.
fn tryAcquireDirect(m: *PthreadMutex) bool {
return std.c.pthread_mutex_trylock(&m.pthread_mutex) == .SUCCESS;
}
/// Acquire the mutex. Will deadlock if the mutex is already
/// held by the calling thread.
pub fn acquire(m: *PthreadMutex) Held {
fn acquireDirect(m: *PthreadMutex) void {
switch (std.c.pthread_mutex_lock(&m.pthread_mutex)) {
.SUCCESS => return Held{ .mutex = m },
.SUCCESS => {},
.INVAL => unreachable,
.BUSY => unreachable,
.AGAIN => unreachable,
@ -209,6 +224,16 @@ pub const PthreadMutex = struct {
else => unreachable,
}
}
fn releaseDirect(m: *PthreadMutex) void {
switch (std.c.pthread_mutex_unlock(&m.pthread_mutex)) {
.SUCCESS => return,
.INVAL => unreachable,
.AGAIN => unreachable,
.PERM => unreachable,
else => unreachable,
}
}
};
/// This has the sematics as `Mutex`, however it does not actually do any
@ -216,58 +241,50 @@ pub const PthreadMutex = struct {
pub const Dummy = struct {
lock: @TypeOf(lock_init) = lock_init,
pub usingnamespace HeldInterface(@This());
const lock_init = if (std.debug.runtime_safety) false else {};
pub const Held = struct {
mutex: *Dummy,
pub fn release(held: Held) void {
if (std.debug.runtime_safety) {
held.mutex.lock = false;
}
}
};
/// Try to acquire the mutex without blocking. Returns null if
/// the mutex is unavailable. Otherwise returns Held. Call
/// release on Held.
pub fn tryAcquire(m: *Dummy) ?Held {
/// Try to acquire the mutex without blocking. Returns false if
/// the mutex is unavailable. Otherwise returns true.
fn tryAcquireDirect(m: *Dummy) bool {
if (std.debug.runtime_safety) {
if (m.lock) return null;
if (m.lock) return false;
m.lock = true;
}
return Held{ .mutex = m };
return true;
}
/// Acquire the mutex. Will deadlock if the mutex is already
/// held by the calling thread.
pub fn acquire(m: *Dummy) Held {
return m.tryAcquire() orelse @panic("deadlock detected");
fn acquireDirect(m: *Dummy) void {
if (!m.tryAcquireDirect()) {
@panic("deadlock detected");
}
}
fn releaseDirect(m: *Dummy) void {
if (std.debug.runtime_safety) {
m.lock = false;
}
}
};
const WindowsMutex = struct {
srwlock: windows.SRWLOCK = windows.SRWLOCK_INIT,
pub const Held = struct {
mutex: *WindowsMutex,
pub usingnamespace HeldInterface(@This());
pub fn release(held: Held) void {
windows.kernel32.ReleaseSRWLockExclusive(&held.mutex.srwlock);
}
};
pub fn tryAcquire(m: *WindowsMutex) ?Held {
if (windows.kernel32.TryAcquireSRWLockExclusive(&m.srwlock) != windows.FALSE) {
return Held{ .mutex = m };
} else {
return null;
}
fn tryAcquireDirect(m: *WindowsMutex) bool {
return windows.kernel32.TryAcquireSRWLockExclusive(&m.srwlock) != windows.FALSE;
}
pub fn acquire(m: *WindowsMutex) Held {
fn acquireDirect(m: *WindowsMutex) void {
windows.kernel32.AcquireSRWLockExclusive(&m.srwlock);
return Held{ .mutex = m };
}
fn releaseDirect(m: *WindowsMutex) void {
windows.kernel32.ReleaseSRWLockExclusive(&m.srwlock);
}
};

View file

@ -90,7 +90,7 @@ pub fn ArrayHashMap(
/// Modifying the key is allowed only if it does not change the hash.
/// Modifying the value is allowed.
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
/// unless `ensureCapacity` was previously used.
/// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
pub const Entry = Unmanaged.Entry;
/// A KV pair which has been copied out of the backing store
@ -110,7 +110,7 @@ pub fn ArrayHashMap(
/// Modifying the key is allowed only if it does not change the hash.
/// Modifying the value is allowed.
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
/// unless `ensureCapacity` was previously used.
/// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
pub const GetOrPutResult = Unmanaged.GetOrPutResult;
/// An Iterator over Entry pointers.
@ -478,7 +478,7 @@ pub fn ArrayHashMapUnmanaged(
/// Modifying the key is allowed only if it does not change the hash.
/// Modifying the value is allowed.
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
/// unless `ensureCapacity` was previously used.
/// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
pub const Entry = struct {
key_ptr: *K,
value_ptr: *V,
@ -509,7 +509,7 @@ pub fn ArrayHashMapUnmanaged(
/// Modifying the key is allowed only if it does not change the hash.
/// Modifying the value is allowed.
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
/// unless `ensureCapacity` was previously used.
/// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
pub const GetOrPutResult = struct {
key_ptr: *K,
value_ptr: *V,
@ -759,20 +759,20 @@ pub fn ArrayHashMapUnmanaged(
}
pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_capacity: usize, ctx: Context) !void {
if (new_capacity <= linear_scan_max) {
try self.entries.ensureCapacity(allocator, new_capacity);
try self.entries.ensureTotalCapacity(allocator, new_capacity);
return;
}
if (self.index_header) |header| {
if (new_capacity <= header.capacity()) {
try self.entries.ensureCapacity(allocator, new_capacity);
try self.entries.ensureTotalCapacity(allocator, new_capacity);
return;
}
}
const new_bit_index = try IndexHeader.findBitIndex(new_capacity);
const new_header = try IndexHeader.alloc(allocator, new_bit_index);
try self.entries.ensureCapacity(allocator, new_capacity);
try self.entries.ensureTotalCapacity(allocator, new_capacity);
if (self.index_header) |old_header| old_header.free(allocator);
self.insertAllEntriesIntoNewHeader(if (store_hash) {} else ctx, new_header);
@ -1441,7 +1441,7 @@ pub fn ArrayHashMapUnmanaged(
unreachable;
}
/// Must ensureCapacity before calling this.
/// Must `ensureTotalCapacity`/`ensureUnusedCapacity` before calling this.
fn getOrPutInternal(self: *Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type) GetOrPutResult {
const slice = self.entries.slice();
const hashes_array = if (store_hash) slice.items(.hash) else {};
@ -1485,7 +1485,7 @@ pub fn ArrayHashMapUnmanaged(
}
// This pointer survives the following append because we call
// entries.ensureCapacity before getOrPutInternal.
// entries.ensureTotalCapacity before getOrPutInternal.
const hash_match = if (store_hash) h == hashes_array[slot_data.entry_index] else true;
if (hash_match and checkedEql(ctx, key, keys_array[slot_data.entry_index])) {
return .{
@ -1946,7 +1946,7 @@ test "iterator hash map" {
var reset_map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer reset_map.deinit();
// test ensureCapacity with a 0 parameter
// test ensureTotalCapacity with a 0 parameter
try reset_map.ensureTotalCapacity(0);
try reset_map.putNoClobber(0, 11);

View file

@ -684,7 +684,11 @@ pub const Builder = struct {
);
const mcpu = self.option([]const u8, "cpu", "Target CPU features to add or subtract");
const triple = maybe_triple orelse return args.default_target;
if (maybe_triple == null and mcpu == null) {
return args.default_target;
}
const triple = maybe_triple orelse "native";
var diags: CrossTarget.ParseOptions.Diagnostics = .{};
const selected_target = CrossTarget.parse(.{
@ -2432,11 +2436,8 @@ pub const LibExeObjStep = struct {
if (populated_cpu_features.eql(cross.cpu.features)) {
// The CPU name alone is sufficient.
// If it is the baseline CPU, no command line args are required.
if (cross.cpu.model != std.Target.Cpu.baseline(cross.cpu.arch).model) {
try zig_args.append("-mcpu");
try zig_args.append(cross.cpu.model.name);
}
try zig_args.append("-mcpu");
try zig_args.append(cross.cpu.model.name);
} else {
var mcpu_buffer = std.ArrayList(u8).init(builder.allocator);

View file

@ -166,6 +166,15 @@ pub const CallingConvention = enum {
SysV,
};
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const AddressSpace = enum {
generic,
gs,
fs,
ss,
};
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const SourceLocation = struct {
@ -226,6 +235,7 @@ pub const TypeInfo = union(enum) {
is_const: bool,
is_volatile: bool,
alignment: comptime_int,
address_space: AddressSpace,
child: type,
is_allowzero: bool,
@ -700,7 +710,7 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace) noreturn
},
else => {
const first_trace_addr = @returnAddress();
std.debug.panicExtra(error_return_trace, first_trace_addr, "{s}", .{msg});
std.debug.panicImpl(error_return_trace, first_trace_addr, msg);
},
}
}

View file

@ -252,6 +252,33 @@ pub extern "c" fn kevent(
timeout: ?*const c.timespec,
) c_int;
pub extern "c" fn port_create() c.port_t;
pub extern "c" fn port_associate(
port: c.port_t,
source: u32,
object: usize,
events: u32,
user_var: ?*c_void,
) c_int;
pub extern "c" fn port_dissociate(port: c.port_t, source: u32, object: usize) c_int;
pub extern "c" fn port_send(port: c.port_t, events: u32, user_var: ?*c_void) c_int;
pub extern "c" fn port_sendn(
ports: [*]c.port_t,
errors: []u32,
num_ports: u32,
events: u32,
user_var: ?*c_void,
) c_int;
pub extern "c" fn port_get(port: c.port_t, event: *c.port_event, timeout: ?*c.timespec) c_int;
pub extern "c" fn port_getn(
port: c.port_t,
event_list: []c.port_event,
max_events: u32,
events_retrieved: *u32,
timeout: ?*c.timespec,
) c_int;
pub extern "c" fn port_alert(port: c.port_t, flags: u32, events: u32, user_var: ?*c_void) c_int;
pub extern "c" fn getaddrinfo(
noalias node: ?[*:0]const u8,
noalias service: ?[*:0]const u8,

View file

@ -291,6 +291,7 @@ pub const sockaddr = extern struct {
family: sa_family_t,
data: [14]u8,
pub const SS_MAXSIZE = 128;
pub const storage = std.x.os.Socket.Address.Native.Storage;
pub const in = extern struct {
len: u8 = @sizeOf(in),

View file

@ -465,6 +465,7 @@ pub const sockaddr = extern struct {
family: u8,
data: [14]u8,
pub const SS_MAXSIZE = 128;
pub const storage = std.x.os.Socket.Address.Native.Storage;
pub const in = extern struct {

View file

@ -323,6 +323,7 @@ pub const sockaddr = extern struct {
/// actually longer; address value
data: [14]u8,
pub const SS_MAXSIZE = 128;
pub const storage = std.x.os.Socket.Address.Native.Storage;
pub const in = extern struct {

View file

@ -339,6 +339,7 @@ pub const sockaddr = extern struct {
/// actually longer; address value
data: [14]u8,
pub const SS_MAXSIZE = 128;
pub const storage = std.x.os.Socket.Address.Native.Storage;
pub const in = extern struct {

View file

@ -476,6 +476,7 @@ pub const sockaddr = extern struct {
/// actually longer; address value
data: [14]u8,
pub const SS_MAXSIZE = 128;
pub const storage = std.x.os.Socket.Address.Native.Storage;
pub const in = extern struct {

View file

@ -279,6 +279,7 @@ pub const sockaddr = extern struct {
/// actually longer; address value
data: [14]u8,
pub const SS_MAXSIZE = 256;
pub const storage = std.x.os.Socket.Address.Native.Storage;
pub const in = extern struct {

File diff suppressed because it is too large Load diff

View file

@ -195,7 +195,7 @@ pub const ChildProcess = struct {
};
var dead_fds: usize = 0;
// We ask for ensureCapacity with this much extra space. This has more of an
// We ask for ensureTotalCapacity with this much extra space. This has more of an
// effect on small reads because once the reads start to get larger the amount
// of space an ArrayList will allocate grows exponentially.
const bump_amt = 512;
@ -215,7 +215,7 @@ pub const ChildProcess = struct {
if (poll_fds[0].revents & os.POLL.IN != 0) {
// stdout is ready.
const new_capacity = std.math.min(stdout.items.len + bump_amt, max_output_bytes);
try stdout.ensureCapacity(new_capacity);
try stdout.ensureTotalCapacity(new_capacity);
const buf = stdout.unusedCapacitySlice();
if (buf.len == 0) return error.StdoutStreamTooLong;
const nread = try os.read(poll_fds[0].fd, buf);
@ -230,7 +230,7 @@ pub const ChildProcess = struct {
if (poll_fds[1].revents & os.POLL.IN != 0) {
// stderr is ready.
const new_capacity = std.math.min(stderr.items.len + bump_amt, max_output_bytes);
try stderr.ensureCapacity(new_capacity);
try stderr.ensureTotalCapacity(new_capacity);
const buf = stderr.unusedCapacitySlice();
if (buf.len == 0) return error.StderrStreamTooLong;
const nread = try os.read(poll_fds[1].fd, buf);
@ -276,7 +276,8 @@ pub const ChildProcess = struct {
// Windows Async IO requires an initial call to ReadFile before waiting on the handle
for ([_]u1{ 0, 1 }) |i| {
try outs[i].ensureCapacity(bump_amt);
const new_capacity = std.math.min(outs[i].items.len + bump_amt, max_output_bytes);
try outs[i].ensureTotalCapacity(new_capacity);
const buf = outs[i].unusedCapacitySlice();
_ = windows.kernel32.ReadFile(handles[i], buf.ptr, math.cast(u32, buf.len) catch maxInt(u32), null, &overlapped[i]);
wait_objects[wait_object_count] = handles[i];
@ -318,7 +319,7 @@ pub const ChildProcess = struct {
outs[i].items.len += read_bytes;
const new_capacity = std.math.min(outs[i].items.len + bump_amt, max_output_bytes);
try outs[i].ensureCapacity(new_capacity);
try outs[i].ensureTotalCapacity(new_capacity);
const buf = outs[i].unusedCapacitySlice();
if (buf.len == 0) return if (i == 0) error.StdoutStreamTooLong else error.StderrStreamTooLong;
_ = windows.kernel32.ReadFile(handles[i], buf.ptr, math.cast(u32, buf.len) catch maxInt(u32), null, &overlapped[i]);

View file

@ -277,7 +277,7 @@ pub const Coff = struct {
if (self.sections.items.len == self.coff_header.number_of_sections)
return;
try self.sections.ensureCapacity(self.coff_header.number_of_sections);
try self.sections.ensureTotalCapacity(self.coff_header.number_of_sections);
const in = self.in_file.reader();

View file

@ -58,8 +58,10 @@ const Huffman = struct {
}
// All zero.
if (self.count[0] == code_length.len)
if (self.count[0] == code_length.len) {
self.min_code_len = 0;
return;
}
var left: isize = 1;
for (self.count[1..]) |val| {
@ -280,7 +282,7 @@ pub fn InflateStream(comptime ReaderType: type) type {
return self.bits & mask;
}
fn readBits(self: *Self, bits: usize) !u32 {
const val = self.peekBits(bits);
const val = try self.peekBits(bits);
self.discardBits(bits);
return val;
}
@ -487,6 +489,8 @@ pub fn InflateStream(comptime ReaderType: type) type {
// We can't read PREFIX_LUT_BITS as we don't want to read past the
// deflate stream end, use an incremental approach instead.
var code_len = h.min_code_len;
if (code_len == 0)
return error.OutOfCodes;
while (true) {
_ = try self.peekBits(code_len);
// Small optimization win, use as many bits as possible in the
@ -658,11 +662,27 @@ test "lengths overflow" {
// f dy hlit hdist hclen 16 17 18 0 (18) x138 (18) x138 (18) x39 (16) x6
// 1 10 11101 11101 0000 010 010 010 010 (11) 1111111 (11) 1111111 (11) 0011100 (01) 11
const stream = [_]u8{ 0b11101101, 0b00011101, 0b00100100, 0b11101001, 0b11111111, 0b11111111, 0b00111001, 0b00001110 };
const reader = std.io.fixedBufferStream(&stream).reader();
var window: [0x8000]u8 = undefined;
var inflate = inflateStream(reader, &window);
var buf: [1]u8 = undefined;
try std.testing.expectError(error.InvalidLength, inflate.read(&buf));
try std.testing.expectError(error.InvalidLength, testInflate(stream[0..]));
}
test "empty distance alphabet" {
// dynamic block with empty distance alphabet is valid if end of data symbol is used immediately
// f dy hlit hdist hclen 16 17 18 0 8 7 9 6 10 5 11 4 12 3 13 2 14 1 15 (18) x128 (18) x128 (1) ( 0) (256)
// 1 10 00000 00000 1111 000 000 010 010 000 000 000 000 000 000 000 000 000 000 000 000 000 001 000 (11) 1110101 (11) 1110101 (0) (10) (0)
const stream = [_]u8{ 0b00000101, 0b11100000, 0b00000001, 0b00001001, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00010000, 0b01011100, 0b10111111, 0b00101110 };
try testInflate(stream[0..]);
}
test "inflateStream fuzzing" {
// see https://github.com/ziglang/zig/issues/9842
try std.testing.expectError(error.EndOfStream, testInflate("\x950000"));
try std.testing.expectError(error.OutOfCodes, testInflate("\x950\x00\x0000000"));
}
fn testInflate(data: []const u8) !void {
var window: [0x8000]u8 = undefined;
const reader = std.io.fixedBufferStream(data).reader();
var inflate = inflateStream(reader, &window);
var inflated = try inflate.reader().readAllAlloc(std.testing.allocator, std.math.maxInt(usize));
defer std.testing.allocator.free(inflated);
}

View file

@ -1,4 +1,4 @@
// zig run benchmark.zig --release-fast --zig-lib-dir ..
// zig run -O ReleaseFast --zig-lib-dir ../.. benchmark.zig
const std = @import("../std.zig");
const builtin = std.builtin;

View file

@ -398,10 +398,10 @@ pub const Blake3 = struct {
return Blake3.init_internal(context_key_words, DERIVE_KEY_MATERIAL);
}
pub fn hash(in: []const u8, out: []u8, options: Options) void {
var hasher = Blake3.init(options);
hasher.update(in);
hasher.final(out);
pub fn hash(b: []const u8, out: []u8, options: Options) void {
var d = Blake3.init(options);
d.update(b);
d.final(out);
}
fn pushCv(self: *Blake3, cv: [8]u32) void {

View file

@ -228,9 +228,32 @@ pub fn assert(ok: bool) void {
pub fn panic(comptime format: []const u8, args: anytype) noreturn {
@setCold(true);
// TODO: remove conditional once wasi / LLVM defines __builtin_return_address
const first_trace_addr = if (native_os == .wasi) null else @returnAddress();
panicExtra(null, first_trace_addr, format, args);
panicExtra(null, format, args);
}
/// `panicExtra` is useful when you want to print out an `@errorReturnTrace`
/// and also print out some values.
pub fn panicExtra(
trace: ?*builtin.StackTrace,
comptime format: []const u8,
args: anytype,
) noreturn {
@setCold(true);
const size = 0x1000;
const trunc_msg = "(msg truncated)";
var buf: [size + trunc_msg.len]u8 = undefined;
// a minor annoyance with this is that it will result in the NoSpaceLeft
// error being part of the @panic stack trace (but that error should
// only happen rarely)
const msg = std.fmt.bufPrint(buf[0..size], format, args) catch |err| switch (err) {
std.fmt.BufPrintError.NoSpaceLeft => blk: {
std.mem.copy(u8, buf[size..], trunc_msg);
break :blk &buf;
},
};
builtin.panic(msg, trace);
}
/// Non-zero whenever the program triggered a panic.
@ -244,7 +267,9 @@ var panic_mutex = std.Thread.Mutex{};
/// This is used to catch and handle panics triggered by the panic handler.
threadlocal var panic_stage: usize = 0;
pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: anytype) noreturn {
// `panicImpl` could be useful in implementing a custom panic handler which
// calls the default handler (on supported platforms)
pub fn panicImpl(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, msg: []const u8) noreturn {
@setCold(true);
if (enable_segfault_handler) {
@ -271,7 +296,7 @@ pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, c
const current_thread_id = std.Thread.getCurrentId();
stderr.print("thread {} panic: ", .{current_thread_id}) catch os.abort();
}
stderr.print(format ++ "\n", args) catch os.abort();
stderr.print("{s}\n", .{msg}) catch os.abort();
if (trace) |t| {
dumpStackTrace(t.*);
}
@ -654,6 +679,7 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo {
.openbsd,
.macos,
.windows,
.solaris,
=> return DebugInfo.init(allocator),
else => return error.UnsupportedDebugInfo,
}
@ -1420,7 +1446,7 @@ pub const ModuleDebugInfo = switch (native_os) {
};
}
},
.linux, .netbsd, .freebsd, .dragonfly, .openbsd, .haiku => struct {
.linux, .netbsd, .freebsd, .dragonfly, .openbsd, .haiku, .solaris => struct {
base_address: usize,
dwarf: DW.DwarfInfo,
mapped_memory: []const u8,
@ -1468,7 +1494,7 @@ fn getDebugInfoAllocator() *mem.Allocator {
/// Whether or not the current target can print useful debug information when a segfault occurs.
pub const have_segfault_handling_support = switch (native_os) {
.linux, .netbsd => true,
.linux, .netbsd, .solaris => true,
.windows => true,
.freebsd, .openbsd => @hasDecl(os.system, "ucontext_t"),
else => false,
@ -1535,6 +1561,7 @@ fn handleSegfaultLinux(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const c_v
.freebsd => @ptrToInt(info.addr),
.netbsd => @ptrToInt(info.info.reason.fault.addr),
.openbsd => @ptrToInt(info.data.fault.addr),
.solaris => @ptrToInt(info.reason.fault.addr),
else => unreachable,
};
@ -1559,13 +1586,13 @@ fn handleSegfaultLinux(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const c_v
.x86_64 => {
const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
const ip = switch (native_os) {
.linux, .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.RIP]),
.linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RIP]),
.freebsd => @intCast(usize, ctx.mcontext.rip),
.openbsd => @intCast(usize, ctx.sc_rip),
else => unreachable,
};
const bp = switch (native_os) {
.linux, .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.RBP]),
.linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RBP]),
.openbsd => @intCast(usize, ctx.sc_rbp),
.freebsd => @intCast(usize, ctx.mcontext.rbp),
else => unreachable,
@ -1624,9 +1651,14 @@ fn handleSegfaultWindowsExtra(info: *windows.EXCEPTION_POINTERS, comptime msg: u
os.abort();
} else {
switch (msg) {
0 => panicExtra(null, exception_address, format.?, .{}),
1 => panicExtra(null, exception_address, "Segmentation fault at address 0x{x}", .{info.ExceptionRecord.ExceptionInformation[1]}),
2 => panicExtra(null, exception_address, "Illegal Instruction", .{}),
0 => panicImpl(null, exception_address, format.?),
1 => {
const format_item = "Segmentation fault at address 0x{x}";
var buf: [format_item.len + 64]u8 = undefined; // 64 is arbitrary, but sufficiently large
const to_print = std.fmt.bufPrint(buf[0..buf.len], format_item, .{info.ExceptionRecord.ExceptionInformation[1]}) catch unreachable;
panicImpl(null, exception_address, to_print);
},
2 => panicImpl(null, exception_address, "Illegal Instruction"),
else => unreachable,
}
}

View file

@ -14,7 +14,7 @@ const max = std.math.max;
pub const DynLib = switch (builtin.os.tag) {
.linux => if (builtin.link_libc) DlDynlib else ElfDynLib,
.windows => WindowsDynLib,
.macos, .tvos, .watchos, .ios, .freebsd, .netbsd, .openbsd, .dragonfly => DlDynlib,
.macos, .tvos, .watchos, .ios, .freebsd, .netbsd, .openbsd, .dragonfly, .solaris => DlDynlib,
else => void,
};

View file

@ -1591,3 +1591,100 @@ pub const PF_MASKOS = 0x0ff00000;
/// Bits for processor-specific semantics.
pub const PF_MASKPROC = 0xf0000000;
// Special section indexes used in Elf{32,64}_Sym.
pub const SHN_UNDEF = 0;
pub const SHN_LORESERVE = 0xff00;
pub const SHN_LOPROC = 0xff00;
pub const SHN_HIPROC = 0xff1f;
pub const SHN_LIVEPATCH = 0xff20;
pub const SHN_ABS = 0xfff1;
pub const SHN_COMMON = 0xfff2;
pub const SHN_HIRESERVE = 0xffff;
/// AMD x86-64 relocations.
/// No reloc
pub const R_X86_64_NONE = 0;
/// Direct 64 bit
pub const R_X86_64_64 = 1;
/// PC relative 32 bit signed
pub const R_X86_64_PC32 = 2;
/// 32 bit GOT entry
pub const R_X86_64_GOT32 = 3;
/// 32 bit PLT address
pub const R_X86_64_PLT32 = 4;
/// Copy symbol at runtime
pub const R_X86_64_COPY = 5;
/// Create GOT entry
pub const R_X86_64_GLOB_DAT = 6;
/// Create PLT entry
pub const R_X86_64_JUMP_SLOT = 7;
/// Adjust by program base
pub const R_X86_64_RELATIVE = 8;
/// 32 bit signed PC relative offset to GOT
pub const R_X86_64_GOTPCREL = 9;
/// Direct 32 bit zero extended
pub const R_X86_64_32 = 10;
/// Direct 32 bit sign extended
pub const R_X86_64_32S = 11;
/// Direct 16 bit zero extended
pub const R_X86_64_16 = 12;
/// 16 bit sign extended pc relative
pub const R_X86_64_PC16 = 13;
/// Direct 8 bit sign extended
pub const R_X86_64_8 = 14;
/// 8 bit sign extended pc relative
pub const R_X86_64_PC8 = 15;
/// ID of module containing symbol
pub const R_X86_64_DTPMOD64 = 16;
/// Offset in module's TLS block
pub const R_X86_64_DTPOFF64 = 17;
/// Offset in initial TLS block
pub const R_X86_64_TPOFF64 = 18;
/// 32 bit signed PC relative offset to two GOT entries for GD symbol
pub const R_X86_64_TLSGD = 19;
/// 32 bit signed PC relative offset to two GOT entries for LD symbol
pub const R_X86_64_TLSLD = 20;
/// Offset in TLS block
pub const R_X86_64_DTPOFF32 = 21;
/// 32 bit signed PC relative offset to GOT entry for IE symbol
pub const R_X86_64_GOTTPOFF = 22;
/// Offset in initial TLS block
pub const R_X86_64_TPOFF32 = 23;
/// PC relative 64 bit
pub const R_X86_64_PC64 = 24;
/// 64 bit offset to GOT
pub const R_X86_64_GOTOFF64 = 25;
/// 32 bit signed pc relative offset to GOT
pub const R_X86_64_GOTPC32 = 26;
/// 64 bit GOT entry offset
pub const R_X86_64_GOT64 = 27;
/// 64 bit PC relative offset to GOT entry
pub const R_X86_64_GOTPCREL64 = 28;
/// 64 bit PC relative offset to GOT
pub const R_X86_64_GOTPC64 = 29;
/// Like GOT64, says PLT entry needed
pub const R_X86_64_GOTPLT64 = 30;
/// 64-bit GOT relative offset to PLT entry
pub const R_X86_64_PLTOFF64 = 31;
/// Size of symbol plus 32-bit addend
pub const R_X86_64_SIZE32 = 32;
/// Size of symbol plus 64-bit addend
pub const R_X86_64_SIZE64 = 33;
/// GOT offset for TLS descriptor
pub const R_X86_64_GOTPC32_TLSDESC = 34;
/// Marker for call through TLS descriptor
pub const R_X86_64_TLSDESC_CALL = 35;
/// TLS descriptor
pub const R_X86_64_TLSDESC = 36;
/// Adjust indirectly by program base
pub const R_X86_64_IRELATIVE = 37;
/// 64-bit adjust by program base
pub const R_X86_64_RELATIVE64 = 38;
/// 39 Reserved was R_X86_64_PC32_BND
/// 40 Reserved was R_X86_64_PLT32_BND
/// Load from 32 bit signed pc relative offset to GOT entry without REX prefix, relaxable
pub const R_X86_64_GOTPCRELX = 41;
/// Load from 32 bit signed PC relative offset to GOT entry with REX prefix, relaxable
pub const R_X86_64_REX_GOTPCRELX = 42;
pub const R_X86_64_NUM = 43;

View file

@ -119,8 +119,11 @@ pub fn LinearFifo(
}
}
/// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`.
pub const ensureCapacity = ensureTotalCapacity;
/// Ensure that the buffer can fit at least `size` items
pub fn ensureCapacity(self: *Self, size: usize) !void {
pub fn ensureTotalCapacity(self: *Self, size: usize) !void {
if (self.buf.len >= size) return;
if (buffer_type == .Dynamic) {
self.realign();
@ -135,7 +138,7 @@ pub fn LinearFifo(
pub fn ensureUnusedCapacity(self: *Self, size: usize) error{OutOfMemory}!void {
if (self.writableLength() >= size) return;
return try self.ensureCapacity(math.add(usize, self.count, size) catch return error.OutOfMemory);
return try self.ensureTotalCapacity(math.add(usize, self.count, size) catch return error.OutOfMemory);
}
/// Returns number of items currently in fifo
@ -471,7 +474,7 @@ test "LinearFifo(u8, .Dynamic)" {
}
{
try fifo.ensureCapacity(1);
try fifo.ensureTotalCapacity(1);
var in_fbs = std.io.fixedBufferStream("pump test");
var out_buf: [50]u8 = undefined;
var out_fbs = std.io.fixedBufferStream(&out_buf);

View file

@ -35,7 +35,7 @@ pub const Watch = @import("fs/watch.zig").Watch;
/// fit into a UTF-8 encoded array of this length.
/// The byte count includes room for a null sentinel byte.
pub const MAX_PATH_BYTES = switch (builtin.os.tag) {
.linux, .macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd, .haiku => os.PATH_MAX,
.linux, .macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd, .haiku, .solaris => os.PATH_MAX,
// Each UTF-16LE character may be expanded to 3 UTF-8 bytes.
// If it would require 4 UTF-8 bytes, then there would be a surrogate
// pair in the UTF-16LE, and we (over)account 3 bytes for it that way.
@ -298,10 +298,10 @@ pub const Dir = struct {
pub const Kind = File.Kind;
};
const IteratorError = error{AccessDenied} || os.UnexpectedError;
const IteratorError = error{ AccessDenied, SystemResources } || os.UnexpectedError;
pub const Iterator = switch (builtin.os.tag) {
.macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd => struct {
.macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd, .solaris => struct {
dir: Dir,
seek: i64,
buf: [8192]u8, // TODO align(@alignOf(os.system.dirent)),
@ -318,6 +318,7 @@ pub const Dir = struct {
switch (builtin.os.tag) {
.macos, .ios => return self.nextDarwin(),
.freebsd, .netbsd, .dragonfly, .openbsd => return self.nextBsd(),
.solaris => return self.nextSolaris(),
else => @compileError("unimplemented"),
}
}
@ -372,6 +373,60 @@ pub const Dir = struct {
}
}
fn nextSolaris(self: *Self) !?Entry {
start_over: while (true) {
if (self.index >= self.end_index) {
const rc = os.system.getdents(self.dir.fd, &self.buf, self.buf.len);
switch (os.errno(rc)) {
.SUCCESS => {},
.BADF => unreachable, // Dir is invalid or was opened without iteration ability
.FAULT => unreachable,
.NOTDIR => unreachable,
.INVAL => unreachable,
else => |err| return os.unexpectedErrno(err),
}
if (rc == 0) return null;
self.index = 0;
self.end_index = @intCast(usize, rc);
}
const entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]);
const next_index = self.index + entry.reclen();
self.index = next_index;
const name = mem.spanZ(@ptrCast([*:0]u8, &entry.d_name));
if (mem.eql(u8, name, ".") or mem.eql(u8, name, ".."))
continue :start_over;
// Solaris dirent doesn't expose d_type, so we have to call stat to get it.
const stat_info = os.fstatat(
self.dir.fd,
name,
os.AT.SYMLINK_NOFOLLOW,
) catch |err| switch (err) {
error.NameTooLong => unreachable,
error.SymLinkLoop => unreachable,
error.FileNotFound => unreachable, // lost the race
else => |e| return e,
};
const entry_kind = switch (stat_info.mode & os.S.IFMT) {
os.S.IFIFO => Entry.Kind.NamedPipe,
os.S.IFCHR => Entry.Kind.CharacterDevice,
os.S.IFDIR => Entry.Kind.Directory,
os.S.IFBLK => Entry.Kind.BlockDevice,
os.S.IFREG => Entry.Kind.File,
os.S.IFLNK => Entry.Kind.SymLink,
os.S.IFSOCK => Entry.Kind.UnixDomainSocket,
os.S.IFDOOR => Entry.Kind.Door,
os.S.IFPORT => Entry.Kind.EventPort,
else => Entry.Kind.Unknown,
};
return Entry{
.name = name,
.kind = entry_kind,
};
}
}
fn nextBsd(self: *Self) !?Entry {
start_over: while (true) {
if (self.index >= self.end_index) {
@ -704,6 +759,7 @@ pub const Dir = struct {
.netbsd,
.dragonfly,
.openbsd,
.solaris,
=> return Iterator{
.dir = self,
.seek = 0,
@ -1556,7 +1612,7 @@ pub const Dir = struct {
error.AccessDenied => |e| switch (builtin.os.tag) {
// non-Linux POSIX systems return EPERM when trying to delete a directory, so
// we need to handle that case specifically and translate the error
.macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd => {
.macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd, .solaris => {
// Don't follow symlinks to match unlinkat (which acts on symlinks rather than follows them)
const fstat = os.fstatatZ(self.fd, sub_path_c, os.AT.SYMLINK_NOFOLLOW) catch return e;
const is_dir = fstat.mode & os.S.IFMT == os.S.IFDIR;
@ -2441,6 +2497,7 @@ pub fn selfExePath(out_buffer: []u8) SelfExePathError![]u8 {
}
switch (builtin.os.tag) {
.linux => return os.readlinkZ("/proc/self/exe", out_buffer),
.solaris => return os.readlinkZ("/proc/self/path/a.out", out_buffer),
.freebsd, .dragonfly => {
var mib = [4]c_int{ os.CTL.KERN, os.KERN.PROC, os.KERN.PROC_PATHNAME, -1 };
var out_len: usize = out_buffer.len;

View file

@ -41,6 +41,8 @@ pub const File = struct {
File,
UnixDomainSocket,
Whiteout,
Door,
EventPort,
Unknown,
};
@ -320,28 +322,40 @@ pub const File = struct {
const atime = st.atime();
const mtime = st.mtime();
const ctime = st.ctime();
const kind: Kind = if (builtin.os.tag == .wasi and !builtin.link_libc) switch (st.filetype) {
.BLOCK_DEVICE => Kind.BlockDevice,
.CHARACTER_DEVICE => Kind.CharacterDevice,
.DIRECTORY => Kind.Directory,
.SYMBOLIC_LINK => Kind.SymLink,
.REGULAR_FILE => Kind.File,
.SOCKET_STREAM, .SOCKET_DGRAM => Kind.UnixDomainSocket,
else => Kind.Unknown,
} else blk: {
const m = st.mode & os.S.IFMT;
switch (m) {
os.S.IFBLK => break :blk Kind.BlockDevice,
os.S.IFCHR => break :blk Kind.CharacterDevice,
os.S.IFDIR => break :blk Kind.Directory,
os.S.IFIFO => break :blk Kind.NamedPipe,
os.S.IFLNK => break :blk Kind.SymLink,
os.S.IFREG => break :blk Kind.File,
os.S.IFSOCK => break :blk Kind.UnixDomainSocket,
else => {},
}
if (builtin.os.tag == .solaris) switch (m) {
os.S.IFDOOR => break :blk Kind.Door,
os.S.IFPORT => break :blk Kind.EventPort,
else => {},
};
break :blk .Unknown;
};
return Stat{
.inode = st.ino,
.size = @bitCast(u64, st.size),
.mode = st.mode,
.kind = if (builtin.os.tag == .wasi and !builtin.link_libc) switch (st.filetype) {
.BLOCK_DEVICE => Kind.BlockDevice,
.CHARACTER_DEVICE => Kind.CharacterDevice,
.DIRECTORY => Kind.Directory,
.SYMBOLIC_LINK => Kind.SymLink,
.REGULAR_FILE => Kind.File,
.SOCKET_STREAM, .SOCKET_DGRAM => Kind.UnixDomainSocket,
else => Kind.Unknown,
} else switch (st.mode & os.S.IFMT) {
os.S.IFBLK => Kind.BlockDevice,
os.S.IFCHR => Kind.CharacterDevice,
os.S.IFDIR => Kind.Directory,
os.S.IFIFO => Kind.NamedPipe,
os.S.IFLNK => Kind.SymLink,
os.S.IFREG => Kind.File,
os.S.IFSOCK => Kind.UnixDomainSocket,
else => Kind.Unknown,
},
.kind = kind,
.atime = @as(i128, atime.tv_sec) * std.time.ns_per_s + atime.tv_nsec,
.mtime = @as(i128, mtime.tv_sec) * std.time.ns_per_s + mtime.tv_nsec,
.ctime = @as(i128, ctime.tv_sec) * std.time.ns_per_s + ctime.tv_nsec,
@ -852,6 +866,7 @@ pub const File = struct {
pub const LockError = error{
SystemResources,
FileLocksNotSupported,
} || os.UnexpectedError;
/// Blocks when an incompatible lock is held by another process.
@ -914,6 +929,7 @@ pub const File = struct {
return os.flock(file.handle, os.LOCK.UN) catch |err| switch (err) {
error.WouldBlock => unreachable, // unlocking can't block
error.SystemResources => unreachable, // We are deallocating resources.
error.FileLocksNotSupported => unreachable, // We already got the lock.
error.Unexpected => unreachable, // Resource deallocation must succeed.
};
}

View file

@ -44,7 +44,7 @@ pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataD
};
return fs.path.join(allocator, &[_][]const u8{ home_dir, "Library", "Application Support", appname });
},
.linux, .freebsd, .netbsd, .dragonfly, .openbsd => {
.linux, .freebsd, .netbsd, .dragonfly, .openbsd, .solaris => {
const home_dir = os.getenv("HOME") orelse {
// TODO look in /etc/passwd
return error.AppDataDirUnavailable;

View file

@ -188,7 +188,7 @@ fn contains(entries: *const std.ArrayList(Dir.Entry), el: Dir.Entry) bool {
test "Dir.realpath smoke test" {
switch (builtin.os.tag) {
.linux, .windows, .macos, .ios, .watchos, .tvos => {},
.linux, .windows, .macos, .ios, .watchos, .tvos, .solaris => {},
else => return error.SkipZigTest,
}

View file

@ -1,4 +1,4 @@
// zig run benchmark.zig --release-fast --zig-lib-dir ..
// zig run -O ReleaseFast --zig-lib-dir ../.. benchmark.zig
const builtin = std.builtin;
const std = @import("std");

View file

@ -1568,11 +1568,11 @@ test "std.hash_map basic usage" {
try expectEqual(total, sum);
}
test "std.hash_map ensureCapacity" {
test "std.hash_map ensureTotalCapacity" {
var map = AutoHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
try map.ensureCapacity(20);
try map.ensureTotalCapacity(20);
const initial_capacity = map.capacity();
try testing.expect(initial_capacity >= 20);
var i: i32 = 0;
@ -1583,13 +1583,13 @@ test "std.hash_map ensureCapacity" {
try testing.expect(initial_capacity == map.capacity());
}
test "std.hash_map ensureCapacity with tombstones" {
test "std.hash_map ensureUnusedCapacity with tombstones" {
var map = AutoHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
var i: i32 = 0;
while (i < 100) : (i += 1) {
try map.ensureCapacity(@intCast(u32, map.count() + 1));
try map.ensureUnusedCapacity(1);
map.putAssumeCapacity(i, i);
// Remove to create tombstones that still count as load in the hashmap.
_ = map.remove(i);
@ -1669,7 +1669,7 @@ test "std.hash_map clone" {
try expectEqual(b.get(3).?, 3);
}
test "std.hash_map ensureCapacity with existing elements" {
test "std.hash_map ensureTotalCapacity with existing elements" {
var map = AutoHashMap(u32, u32).init(std.testing.allocator);
defer map.deinit();
@ -1677,16 +1677,16 @@ test "std.hash_map ensureCapacity with existing elements" {
try expectEqual(map.count(), 1);
try expectEqual(map.capacity(), @TypeOf(map).Unmanaged.minimal_capacity);
try map.ensureCapacity(65);
try map.ensureTotalCapacity(65);
try expectEqual(map.count(), 1);
try expectEqual(map.capacity(), 128);
}
test "std.hash_map ensureCapacity satisfies max load factor" {
test "std.hash_map ensureTotalCapacity satisfies max load factor" {
var map = AutoHashMap(u32, u32).init(std.testing.allocator);
defer map.deinit();
try map.ensureCapacity(127);
try map.ensureTotalCapacity(127);
try expectEqual(map.capacity(), 256);
}
@ -1870,7 +1870,7 @@ test "std.hash_map putAssumeCapacity" {
var map = AutoHashMap(u32, u32).init(std.testing.allocator);
defer map.deinit();
try map.ensureCapacity(20);
try map.ensureTotalCapacity(20);
var i: u32 = 0;
while (i < 20) : (i += 1) {
map.putAssumeCapacityNoClobber(i, i);

View file

@ -746,10 +746,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const new_aligned_size = math.max(len, ptr_align);
if (new_aligned_size > largest_bucket_object_size) {
try self.large_allocations.ensureCapacity(
self.backing_allocator,
self.large_allocations.count() + 1,
);
try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);

View file

@ -61,7 +61,7 @@ pub fn Reader(
array_list: *std.ArrayListAligned(u8, alignment),
max_append_size: usize,
) !void {
try array_list.ensureCapacity(math.min(max_append_size, 4096));
try array_list.ensureTotalCapacity(math.min(max_append_size, 4096));
const original_len = array_list.items.len;
var start_index: usize = original_len;
while (true) {
@ -81,7 +81,7 @@ pub fn Reader(
}
// This will trigger ArrayList to expand superlinearly at whatever its growth rate is.
try array_list.ensureCapacity(start_index + 1);
try array_list.ensureTotalCapacity(start_index + 1);
}
}

View file

@ -1838,7 +1838,7 @@ fn parseInternal(
else => {},
}
try arraylist.ensureCapacity(arraylist.items.len + 1);
try arraylist.ensureUnusedCapacity(1);
const v = try parseInternal(ptrInfo.child, tok, tokens, options);
arraylist.appendAssumeCapacity(v);
}

View file

@ -76,6 +76,14 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift));
const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift;
if (remaining_bits != -1) return error.Overflow;
} else {
// If we don't overflow and this is the last byte and the number being decoded
// is negative, check that the remaining bits are 1
if ((byte & 0x80 == 0) and (@bitCast(S, temp) < 0)) {
const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift));
const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift;
if (remaining_bits != -1) return error.Overflow;
}
}
value |= temp;
@ -215,6 +223,8 @@ test "deserialize signed LEB128" {
try testing.expectError(error.Overflow, test_read_ileb128(i32, "\x80\x80\x80\x80\x40"));
try testing.expectError(error.Overflow, test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x40"));
try testing.expectError(error.Overflow, test_read_ileb128(i8, "\xff\x7e"));
try testing.expectError(error.Overflow, test_read_ileb128(i32, "\x80\x80\x80\x80\x08"));
try testing.expectError(error.Overflow, test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01"));
// Decode SLEB128
try testing.expect((try test_read_ileb128(i64, "\x00")) == 0);
@ -233,8 +243,8 @@ test "deserialize signed LEB128" {
try testing.expect((try test_read_ileb128(i8, "\xff\x7f")) == -1);
try testing.expect((try test_read_ileb128(i16, "\xff\xff\x7f")) == -1);
try testing.expect((try test_read_ileb128(i32, "\xff\xff\xff\xff\x7f")) == -1);
try testing.expect((try test_read_ileb128(i32, "\x80\x80\x80\x80\x08")) == -0x80000000);
try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01")) == @bitCast(i64, @intCast(u64, 0x8000000000000000)));
try testing.expect((try test_read_ileb128(i32, "\x80\x80\x80\x80\x78")) == -0x80000000);
try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == @bitCast(i64, @intCast(u64, 0x8000000000000000)));
try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x40")) == -0x4000000000000000);
try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == -0x8000000000000000);

View file

@ -552,47 +552,78 @@ pub const Mutable = struct {
r.positive = a.positive;
}
/// r = a | b
/// r = a | b under 2s complement semantics.
/// r may alias with a or b.
///
/// a and b are zero-extended to the longer of a or b.
///
/// Asserts that r has enough limbs to store the result. Upper bound is `math.max(a.limbs.len, b.limbs.len)`.
pub fn bitOr(r: *Mutable, a: Const, b: Const) void {
if (a.limbs.len > b.limbs.len) {
llor(r.limbs[0..], a.limbs[0..a.limbs.len], b.limbs[0..b.limbs.len]);
r.len = a.limbs.len;
// Trivial cases, llsignedor does not support zero.
if (a.eqZero()) {
r.copy(b);
return;
} else if (b.eqZero()) {
r.copy(a);
return;
}
if (a.limbs.len >= b.limbs.len) {
r.positive = llsignedor(r.limbs, a.limbs, a.positive, b.limbs, b.positive);
r.normalize(if (b.positive) a.limbs.len else b.limbs.len);
} else {
llor(r.limbs[0..], b.limbs[0..b.limbs.len], a.limbs[0..a.limbs.len]);
r.len = b.limbs.len;
r.positive = llsignedor(r.limbs, b.limbs, b.positive, a.limbs, a.positive);
r.normalize(if (a.positive) b.limbs.len else a.limbs.len);
}
}
/// r = a & b
/// r = a & b under 2s complement semantics.
/// r may alias with a or b.
///
/// Asserts that r has enough limbs to store the result. Upper bound is `math.min(a.limbs.len, b.limbs.len)`.
/// Asserts that r has enough limbs to store the result.
/// If a or b is positive, the upper bound is `math.min(a.limbs.len, b.limbs.len)`.
/// If a and b are negative, the upper bound is `math.max(a.limbs.len, b.limbs.len) + 1`.
pub fn bitAnd(r: *Mutable, a: Const, b: Const) void {
if (a.limbs.len > b.limbs.len) {
lland(r.limbs[0..], a.limbs[0..a.limbs.len], b.limbs[0..b.limbs.len]);
r.normalize(b.limbs.len);
// Trivial cases, llsignedand does not support zero.
if (a.eqZero()) {
r.copy(a);
return;
} else if (b.eqZero()) {
r.copy(b);
return;
}
if (a.limbs.len >= b.limbs.len) {
r.positive = llsignedand(r.limbs, a.limbs, a.positive, b.limbs, b.positive);
r.normalize(if (a.positive or b.positive) b.limbs.len else a.limbs.len + 1);
} else {
lland(r.limbs[0..], b.limbs[0..b.limbs.len], a.limbs[0..a.limbs.len]);
r.normalize(a.limbs.len);
r.positive = llsignedand(r.limbs, b.limbs, b.positive, a.limbs, a.positive);
r.normalize(if (a.positive or b.positive) a.limbs.len else b.limbs.len + 1);
}
}
/// r = a ^ b
/// r = a ^ b under 2s complement semantics.
/// r may alias with a or b.
///
/// Asserts that r has enough limbs to store the result. Upper bound is `math.max(a.limbs.len, b.limbs.len)`.
/// Asserts that r has enough limbs to store the result. If a and b share the same signedness, the
/// upper bound is `math.max(a.limbs.len, b.limbs.len)`. Otherwise, if either a or b is negative
/// but not both, the upper bound is `math.max(a.limbs.len, b.limbs.len) + 1`.
pub fn bitXor(r: *Mutable, a: Const, b: Const) void {
// Trivial cases, because llsignedxor does not support negative zero.
if (a.eqZero()) {
r.copy(b);
return;
} else if (b.eqZero()) {
r.copy(a);
return;
}
if (a.limbs.len > b.limbs.len) {
llxor(r.limbs[0..], a.limbs[0..a.limbs.len], b.limbs[0..b.limbs.len]);
r.normalize(a.limbs.len);
r.positive = llsignedxor(r.limbs, a.limbs, a.positive, b.limbs, b.positive);
r.normalize(a.limbs.len + @boolToInt(a.positive != b.positive));
} else {
llxor(r.limbs[0..], b.limbs[0..b.limbs.len], a.limbs[0..a.limbs.len]);
r.normalize(b.limbs.len);
r.positive = llsignedxor(r.limbs, b.limbs, b.positive, a.limbs, a.positive);
r.normalize(b.limbs.len + @boolToInt(a.positive != b.positive));
}
}
@ -1834,7 +1865,11 @@ pub const Managed = struct {
/// r = a & b
pub fn bitAnd(r: *Managed, a: Managed, b: Managed) !void {
try r.ensureCapacity(math.min(a.len(), b.len()));
const cap = if (a.isPositive() or b.isPositive())
math.min(a.len(), b.len())
else
math.max(a.len(), b.len()) + 1;
try r.ensureCapacity(cap);
var m = r.toMutable();
m.bitAnd(a.toConst(), b.toConst());
r.setMetadata(m.positive, m.len);
@ -1842,7 +1877,9 @@ pub const Managed = struct {
/// r = a ^ b
pub fn bitXor(r: *Managed, a: Managed, b: Managed) !void {
try r.ensureCapacity(math.max(a.len(), b.len()));
var cap = math.max(a.len(), b.len()) + @boolToInt(a.isPositive() != b.isPositive());
try r.ensureCapacity(cap);
var m = r.toMutable();
m.bitXor(a.toConst(), b.toConst());
r.setMetadata(m.positive, m.len);
@ -2221,42 +2258,299 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
}
}
fn llor(r: []Limb, a: []const Limb, b: []const Limb) void {
// r = a | b with 2s complement semantics.
// r may alias.
// a and b must not be 0.
// Returns `true` when the result is positive.
// When b is positive, r requires at least `a.len` limbs of storage.
// When b is negative, r requires at least `b.len` limbs of storage.
fn llsignedor(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_positive: bool) bool {
@setRuntimeSafety(debug_safety);
assert(r.len >= a.len);
assert(a.len >= b.len);
var i: usize = 0;
while (i < b.len) : (i += 1) {
r[i] = a[i] | b[i];
}
while (i < a.len) : (i += 1) {
r[i] = a[i];
if (a_positive and b_positive) {
// Trivial case, result is positive.
var i: usize = 0;
while (i < b.len) : (i += 1) {
r[i] = a[i] | b[i];
}
while (i < a.len) : (i += 1) {
r[i] = a[i];
}
return true;
} else if (!a_positive and b_positive) {
// Result is negative.
// r = (--a) | b
// = ~(-a - 1) | b
// = ~(-a - 1) | ~~b
// = ~((-a - 1) & ~b)
// = -(((-a - 1) & ~b) + 1)
var i: usize = 0;
var a_borrow: u1 = 1;
var r_carry: u1 = 1;
while (i < b.len) : (i += 1) {
var a_limb: Limb = undefined;
a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &a_limb));
r[i] = a_limb & ~b[i];
r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
}
// In order for r_carry to be nonzero at this point, ~b[i] would need to be
// all ones, which would require b[i] to be zero. This cannot be when
// b is normalized, so there cannot be a carry here.
// Also, x & ~b can only clear bits, so (x & ~b) <= x, meaning (-a - 1) + 1 never overflows.
assert(r_carry == 0);
// With b = 0, we get (-a - 1) & ~0 = -a - 1.
// Note, if a_borrow is zero we do not need to compute anything for
// the higher limbs so we can early return here.
while (i < a.len and a_borrow == 1) : (i += 1) {
a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &r[i]));
}
assert(a_borrow == 0); // a was 0.
return false;
} else if (a_positive and !b_positive) {
// Result is negative.
// r = a | (--b)
// = a | ~(-b - 1)
// = ~~a | ~(-b - 1)
// = ~(~a & (-b - 1))
// = -((~a & (-b - 1)) + 1)
var i: usize = 0;
var b_borrow: u1 = 1;
var r_carry: u1 = 1;
while (i < b.len) : (i += 1) {
var b_limb: Limb = undefined;
b_borrow = @boolToInt(@subWithOverflow(Limb, b[i], b_borrow, &b_limb));
r[i] = ~a[i] & b_limb;
r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
}
// b is at least 1, so this should never underflow.
assert(b_borrow == 0); // b was 0
// x & ~a can only clear bits, so (x & ~a) <= x, meaning (-b - 1) + 1 never overflows.
assert(r_carry == 0);
// With b = 0 and b_borrow = 0, we get ~a & (-0 - 0) = ~a & 0 = 0.
// Omit setting the upper bytes, just deal with those when calling llsignedor.
return false;
} else {
// Result is negative.
// r = (--a) | (--b)
// = ~(-a - 1) | ~(-b - 1)
// = ~((-a - 1) & (-b - 1))
// = -(~(~((-a - 1) & (-b - 1))) + 1)
// = -((-a - 1) & (-b - 1) + 1)
var i: usize = 0;
var a_borrow: u1 = 1;
var b_borrow: u1 = 1;
var r_carry: u1 = 1;
while (i < b.len) : (i += 1) {
var a_limb: Limb = undefined;
a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &a_limb));
var b_limb: Limb = undefined;
b_borrow = @boolToInt(@subWithOverflow(Limb, b[i], b_borrow, &b_limb));
r[i] = a_limb & b_limb;
r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
}
// b is at least 1, so this should never underflow.
assert(b_borrow == 0); // b was 0
// Can never overflow because in order for b_limb to be maxInt(Limb),
// b_borrow would need to equal 1.
// x & y can only clear bits, meaning x & y <= x and x & y <= y. This implies that
// for x = a - 1 and y = b - 1, the +1 term would never cause an overflow.
assert(r_carry == 0);
// With b = 0 and b_borrow = 0 we get (-a - 1) & (-0 - 0) = (-a - 1) & 0 = 0.
// Omit setting the upper bytes, just deal with those when calling llsignedor.
return false;
}
}
fn lland(r: []Limb, a: []const Limb, b: []const Limb) void {
// r = a & b with 2s complement semantics.
// r may alias.
// a and b must not be 0.
// Returns `true` when the result is positive.
// When either or both of a and b are positive, r requires at least `b.len` limbs of storage.
// When both a and b are negative, r requires at least `a.limbs.len + 1` limbs of storage.
fn llsignedand(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_positive: bool) bool {
@setRuntimeSafety(debug_safety);
assert(r.len >= b.len);
assert(a.len >= b.len);
var i: usize = 0;
while (i < b.len) : (i += 1) {
r[i] = a[i] & b[i];
}
}
fn llxor(r: []Limb, a: []const Limb, b: []const Limb) void {
assert(a.len != 0 and b.len != 0);
assert(r.len >= a.len);
assert(a.len >= b.len);
if (a_positive and b_positive) {
// Trivial case, result is positive.
var i: usize = 0;
while (i < b.len) : (i += 1) {
r[i] = a[i] & b[i];
}
// With b = 0 we have a & 0 = 0, so the upper bytes are zero.
// Omit setting them here and simply discard them whenever
// llsignedand is called.
return true;
} else if (!a_positive and b_positive) {
// Result is positive.
// r = (--a) & b
// = ~(-a - 1) & b
var i: usize = 0;
var a_borrow: u1 = 1;
while (i < b.len) : (i += 1) {
var a_limb: Limb = undefined;
a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &a_limb));
r[i] = ~a_limb & b[i];
}
// With b = 0 we have ~(a - 1) & 0 = 0, so the upper bytes are zero.
// Omit setting them here and simply discard them whenever
// llsignedand is called.
return true;
} else if (a_positive and !b_positive) {
// Result is positive.
// r = a & (--b)
// = a & ~(-b - 1)
var i: usize = 0;
var b_borrow: u1 = 1;
while (i < b.len) : (i += 1) {
var a_limb: Limb = undefined;
b_borrow = @boolToInt(@subWithOverflow(Limb, b[i], b_borrow, &a_limb));
r[i] = a[i] & ~a_limb;
}
assert(b_borrow == 0); // b was 0
// With b = 0 and b_borrow = 0 we have a & ~(-0 - 0) = a & 0 = 0, so
// the upper bytes are zero. Omit setting them here and simply discard
// them whenever llsignedand is called.
return true;
} else {
// Result is negative.
// r = (--a) & (--b)
// = ~(-a - 1) & ~(-b - 1)
// = ~((-a - 1) | (-b - 1))
// = -(((-a - 1) | (-b - 1)) + 1)
var i: usize = 0;
var a_borrow: u1 = 1;
var b_borrow: u1 = 1;
var r_carry: u1 = 1;
while (i < b.len) : (i += 1) {
var a_limb: Limb = undefined;
a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &a_limb));
var b_limb: Limb = undefined;
b_borrow = @boolToInt(@subWithOverflow(Limb, b[i], b_borrow, &b_limb));
r[i] = a_limb | b_limb;
r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
}
// b is at least 1, so this should never underflow.
assert(b_borrow == 0); // b was 0
// With b = 0 and b_borrow = 0 we get (-a - 1) | (-0 - 0) = (-a - 1) | 0 = -a - 1.
while (i < a.len) : (i += 1) {
a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &r[i]));
r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
}
assert(a_borrow == 0); // a was 0.
// The final addition can overflow here, so we need to keep that in mind.
r[i] = r_carry;
return false;
}
}
// r = a ^ b with 2s complement semantics.
// r may alias.
// a and b must not be -0.
// Returns `true` when the result is positive.
// If the sign of a and b is equal, then r requires at least `max(a.len, b.len)` limbs are required.
// Otherwise, r requires at least `max(a.len, b.len) + 1` limbs.
fn llsignedxor(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_positive: bool) bool {
@setRuntimeSafety(debug_safety);
assert(a.len != 0 and b.len != 0);
assert(r.len >= a.len);
assert(a.len >= b.len);
// If a and b are positive, the result is positive and r = a ^ b.
// If a negative, b positive, result is negative and we have
// r = --(--a ^ b)
// = --(~(-a - 1) ^ b)
// = -(~(~(-a - 1) ^ b) + 1)
// = -(((-a - 1) ^ b) + 1)
// Same if a is positive and b is negative, sides switched.
// If both a and b are negative, the result is positive and we have
// r = (--a) ^ (--b)
// = ~(-a - 1) ^ ~(-b - 1)
// = (-a - 1) ^ (-b - 1)
// These operations can be made more generic as follows:
// - If a is negative, subtract 1 from |a| before the xor.
// - If b is negative, subtract 1 from |b| before the xor.
// - if the result is supposed to be negative, add 1.
var i: usize = 0;
var a_borrow = @boolToInt(!a_positive);
var b_borrow = @boolToInt(!b_positive);
var r_carry = @boolToInt(a_positive != b_positive);
while (i < b.len) : (i += 1) {
r[i] = a[i] ^ b[i];
var a_limb: Limb = undefined;
a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &a_limb));
var b_limb: Limb = undefined;
b_borrow = @boolToInt(@subWithOverflow(Limb, b[i], b_borrow, &b_limb));
r[i] = a_limb ^ b_limb;
r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
}
while (i < a.len) : (i += 1) {
r[i] = a[i];
a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &r[i]));
r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
}
// If both inputs don't share the same sign, an extra limb is required.
if (a_positive != b_positive) {
r[i] = r_carry;
} else {
assert(r_carry == 0);
}
assert(a_borrow == 0);
assert(b_borrow == 0);
return a_positive == b_positive;
}
/// r MUST NOT alias x.

View file

@ -5,6 +5,7 @@ const Managed = std.math.big.int.Managed;
const Mutable = std.math.big.int.Mutable;
const Limb = std.math.big.Limb;
const DoubleLimb = std.math.big.DoubleLimb;
const SignedDoubleLimb = std.math.big.SignedDoubleLimb;
const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
@ -1364,6 +1365,83 @@ test "big.int bitwise and multi-limb" {
try testing.expect((try a.to(u128)) == 0);
}
test "big.int bitwise and negative-positive simple" {
var a = try Managed.initSet(testing.allocator, -0xffffffff11111111);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, 0xeeeeeeee22222222);
defer b.deinit();
try a.bitAnd(a, b);
try testing.expect((try a.to(u64)) == 0x22222222);
}
test "big.int bitwise and negative-positive multi-limb" {
var a = try Managed.initSet(testing.allocator, -maxInt(Limb) - 1);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, maxInt(Limb));
defer b.deinit();
try a.bitAnd(a, b);
try testing.expect(a.eqZero());
}
test "big.int bitwise and positive-negative simple" {
var a = try Managed.initSet(testing.allocator, 0xffffffff11111111);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, -0xeeeeeeee22222222);
defer b.deinit();
try a.bitAnd(a, b);
try testing.expect((try a.to(u64)) == 0x1111111111111110);
}
test "big.int bitwise and positive-negative multi-limb" {
var a = try Managed.initSet(testing.allocator, maxInt(Limb));
defer a.deinit();
var b = try Managed.initSet(testing.allocator, -maxInt(Limb) - 1);
defer b.deinit();
try a.bitAnd(a, b);
try testing.expect(a.eqZero());
}
test "big.int bitwise and negative-negative simple" {
var a = try Managed.initSet(testing.allocator, -0xffffffff11111111);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, -0xeeeeeeee22222222);
defer b.deinit();
try a.bitAnd(a, b);
try testing.expect((try a.to(i128)) == -0xffffffff33333332);
}
test "big.int bitwise and negative-negative multi-limb" {
var a = try Managed.initSet(testing.allocator, -maxInt(Limb) - 1);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, -maxInt(Limb) - 2);
defer b.deinit();
try a.bitAnd(a, b);
try testing.expect((try a.to(i128)) == -maxInt(Limb) * 2 - 2);
}
test "big.int bitwise and negative overflow" {
var a = try Managed.initSet(testing.allocator, -maxInt(Limb));
defer a.deinit();
var b = try Managed.initSet(testing.allocator, -2);
defer b.deinit();
try a.bitAnd(a, b);
try testing.expect((try a.to(SignedDoubleLimb)) == -maxInt(Limb) - 1);
}
test "big.int bitwise xor simple" {
var a = try Managed.initSet(testing.allocator, 0xffffffff11111111);
defer a.deinit();
@ -1386,6 +1464,72 @@ test "big.int bitwise xor multi-limb" {
try testing.expect((try a.to(DoubleLimb)) == (maxInt(Limb) + 1) ^ maxInt(Limb));
}
test "big.int bitwise xor single negative simple" {
var a = try Managed.initSet(testing.allocator, 0x6b03e381328a3154);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, -0x45fd3acef9191fad);
defer b.deinit();
try a.bitXor(a, b);
try testing.expect((try a.to(i64)) == -0x2efed94fcb932ef9);
}
test "big.int bitwise xor single negative zero" {
var a = try Managed.initSet(testing.allocator, 0);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, -0);
defer b.deinit();
try a.bitXor(a, b);
try testing.expect(a.eqZero());
}
test "big.int bitwise xor single negative multi-limb" {
var a = try Managed.initSet(testing.allocator, -0x9849c6e7a10d66d0e4260d4846254c32);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, 0xf2194e7d1c855272a997fcde16f6d5a8);
defer b.deinit();
try a.bitXor(a, b);
try testing.expect((try a.to(i128)) == -0x6a50889abd8834a24db1f19650d3999a);
}
test "big.int bitwise xor single negative overflow" {
var a = try Managed.initSet(testing.allocator, maxInt(Limb));
defer a.deinit();
var b = try Managed.initSet(testing.allocator, -1);
defer b.deinit();
try a.bitXor(a, b);
try testing.expect((try a.to(SignedDoubleLimb)) == -(maxInt(Limb) + 1));
}
test "big.int bitwise xor double negative simple" {
var a = try Managed.initSet(testing.allocator, -0x8e48bd5f755ef1f3);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, -0x4dd4fa576f3046ac);
defer b.deinit();
try a.bitXor(a, b);
try testing.expect((try a.to(u64)) == 0xc39c47081a6eb759);
}
test "big.int bitwise xor double negative multi-limb" {
var a = try Managed.initSet(testing.allocator, -0x684e5da8f500ec8ca7204c33ccc51c9c);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, -0xcb07736a7b62289c78d967c3985eebeb);
defer b.deinit();
try a.bitXor(a, b);
try testing.expect((try a.to(u128)) == 0xa3492ec28e62c410dff92bf0549bf771);
}
test "big.int bitwise or simple" {
var a = try Managed.initSet(testing.allocator, 0xffffffff11111111);
defer a.deinit();
@ -1409,6 +1553,72 @@ test "big.int bitwise or multi-limb" {
try testing.expect((try a.to(DoubleLimb)) == (maxInt(Limb) + 1) + maxInt(Limb));
}
test "big.int bitwise or negative-positive simple" {
var a = try Managed.initSet(testing.allocator, -0xffffffff11111111);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, 0xeeeeeeee22222222);
defer b.deinit();
try a.bitOr(a, b);
try testing.expect((try a.to(i64)) == -0x1111111111111111);
}
test "big.int bitwise or negative-positive multi-limb" {
var a = try Managed.initSet(testing.allocator, -maxInt(Limb) - 1);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, 1);
defer b.deinit();
try a.bitOr(a, b);
try testing.expect((try a.to(SignedDoubleLimb)) == -maxInt(Limb));
}
test "big.int bitwise or positive-negative simple" {
var a = try Managed.initSet(testing.allocator, 0xffffffff11111111);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, -0xeeeeeeee22222222);
defer b.deinit();
try a.bitOr(a, b);
try testing.expect((try a.to(i64)) == -0x22222221);
}
test "big.int bitwise or positive-negative multi-limb" {
var a = try Managed.initSet(testing.allocator, maxInt(Limb) + 1);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, -1);
defer b.deinit();
try a.bitOr(a, b);
try testing.expect((try a.to(SignedDoubleLimb)) == -1);
}
test "big.int bitwise or negative-negative simple" {
var a = try Managed.initSet(testing.allocator, -0xffffffff11111111);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, -0xeeeeeeee22222222);
defer b.deinit();
try a.bitOr(a, b);
try testing.expect((try a.to(i128)) == -0xeeeeeeee00000001);
}
test "big.int bitwise or negative-negative multi-limb" {
var a = try Managed.initSet(testing.allocator, -maxInt(Limb) - 1);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, -maxInt(Limb));
defer b.deinit();
try a.bitOr(a, b);
try testing.expect((try a.to(SignedDoubleLimb)) == -maxInt(Limb));
}
test "big.int var args" {
var a = try Managed.initSet(testing.allocator, 5);
defer a.deinit();

View file

@ -147,6 +147,46 @@ test "mem.Allocator basics" {
try testing.expectError(error.OutOfMemory, failAllocator.allocSentinel(u8, 1, 0));
}
test "Allocator.resize" {
const primitiveIntTypes = .{
i8,
u8,
i16,
u16,
i32,
u32,
i64,
u64,
i128,
u128,
isize,
usize,
};
inline for (primitiveIntTypes) |T| {
var values = try testing.allocator.alloc(T, 100);
defer testing.allocator.free(values);
for (values) |*v, i| v.* = @intCast(T, i);
values = try testing.allocator.resize(values, values.len + 10);
try testing.expect(values.len == 110);
}
const primitiveFloatTypes = .{
f16,
f32,
f64,
f128,
};
inline for (primitiveFloatTypes) |T| {
var values = try testing.allocator.alloc(T, 100);
defer testing.allocator.free(values);
for (values) |*v, i| v.* = @intToFloat(T, i);
values = try testing.allocator.resize(values, values.len + 10);
try testing.expect(values.len == 110);
}
}
/// Copy all of source into dest at position 0.
/// dest.len must be >= source.len.
/// If the slices overlap, dest.ptr must be <= src.ptr.
@ -2472,6 +2512,7 @@ fn CopyPtrAttrs(comptime source: type, comptime size: std.builtin.TypeInfo.Point
.is_volatile = info.is_volatile,
.is_allowzero = info.is_allowzero,
.alignment = info.alignment,
.address_space = info.address_space,
.child = child,
.sentinel = null,
},
@ -2960,6 +3001,7 @@ fn AlignedSlice(comptime AttributeSource: type, comptime new_alignment: u29) typ
.is_volatile = info.is_volatile,
.is_allowzero = info.is_allowzero,
.alignment = new_alignment,
.address_space = info.address_space,
.child = info.child,
.sentinel = null,
},

View file

@ -313,7 +313,7 @@ pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(ol
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
const rc = try self.resizeFn(self, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
assert(rc == new_byte_count);
const new_byte_slice = old_mem.ptr[0..new_byte_count];
const new_byte_slice = old_byte_slice.ptr[0..new_byte_count];
return mem.bytesAsSlice(T, new_byte_slice);
}

View file

@ -235,6 +235,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
.is_const = info.is_const,
.is_volatile = info.is_volatile,
.alignment = info.alignment,
.address_space = info.address_space,
.child = @Type(.{
.Array = .{
.len = array_info.len,
@ -254,6 +255,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
.is_const = info.is_const,
.is_volatile = info.is_volatile,
.alignment = info.alignment,
.address_space = info.address_space,
.child = info.child,
.is_allowzero = info.is_allowzero,
.sentinel = sentinel_val,
@ -271,6 +273,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
.is_const = ptr_info.is_const,
.is_volatile = ptr_info.is_volatile,
.alignment = ptr_info.alignment,
.address_space = ptr_info.address_space,
.child = ptr_info.child,
.is_allowzero = ptr_info.is_allowzero,
.sentinel = sentinel_val,

View file

@ -189,7 +189,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// sets the given index to the specified element. May reallocate
/// and invalidate iterators.
pub fn insert(self: *Self, gpa: *Allocator, index: usize, elem: S) void {
try self.ensureCapacity(gpa, self.len + 1);
try self.ensureUnusedCapacity(gpa, 1);
self.insertAssumeCapacity(index, elem);
}
@ -376,7 +376,7 @@ pub fn MultiArrayList(comptime S: type) type {
pub fn clone(self: Self, gpa: *Allocator) !Self {
var result = Self{};
errdefer result.deinit(gpa);
try result.ensureCapacity(gpa, self.len);
try result.ensureTotalCapacity(gpa, self.len);
result.len = self.len;
const self_slice = self.slice();
const result_slice = result.slice();

View file

@ -157,7 +157,7 @@ pub const Address = extern union {
unreachable;
}
try std.fmt.format(out_stream, "{s}", .{&self.un.path});
try std.fmt.format(out_stream, "{s}", .{std.mem.sliceTo(&self.un.path, 0)});
},
else => unreachable,
}

View file

@ -90,6 +90,19 @@ test "parse and render IPv4 addresses" {
try testing.expectError(error.NonCanonical, net.Address.parseIp4("127.01.0.1", 0));
}
test "parse and render UNIX addresses" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
if (!net.has_unix_sockets) return error.SkipZigTest;
var buffer: [14]u8 = undefined;
const addr = net.Address.initUnix("/tmp/testpath") catch unreachable;
const fmt_addr = std.fmt.bufPrint(buffer[0..], "{}", .{addr}) catch unreachable;
try std.testing.expectEqualSlices(u8, "/tmp/testpath", fmt_addr);
const too_long = [_]u8{'a'} ** (addr.un.path.len + 1);
try testing.expectError(error.NameTooLong, net.Address.initUnix(too_long[0..]));
}
test "resolve DNS" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;

View file

@ -31,6 +31,7 @@ pub const freebsd = std.c;
pub const haiku = std.c;
pub const netbsd = std.c;
pub const openbsd = std.c;
pub const solaris = std.c;
pub const linux = @import("os/linux.zig");
pub const uefi = @import("os/uefi.zig");
pub const wasi = @import("os/wasi.zig");
@ -64,8 +65,10 @@ else switch (builtin.os.tag) {
};
pub const AF = system.AF;
pub const AF_SUN = system.AF_SUN;
pub const ARCH = system.ARCH;
pub const AT = system.AT;
pub const AT_SUN = system.AT_SUN;
pub const CLOCK = system.CLOCK;
pub const CPU_COUNT = system.CPU_COUNT;
pub const CTL = system.CTL;
@ -101,6 +104,7 @@ pub const RR = system.RR;
pub const S = system.S;
pub const SA = system.SA;
pub const SC = system.SC;
pub const _SC = system._SC;
pub const SEEK = system.SEEK;
pub const SHUT = system.SHUT;
pub const SIG = system.SIG;
@ -143,6 +147,10 @@ pub const off_t = system.off_t;
pub const oflags_t = system.oflags_t;
pub const pid_t = system.pid_t;
pub const pollfd = system.pollfd;
pub const port_t = system.port_t;
pub const port_event = system.port_event;
pub const port_notify = system.port_notify;
pub const file_obj = system.file_obj;
pub const rights_t = system.rights_t;
pub const rlim_t = system.rlim_t;
pub const rlimit = system.rlimit;
@ -2038,6 +2046,7 @@ pub fn unlinkatZ(dirfd: fd_t, file_path_c: [*:0]const u8, flags: u32) UnlinkatEr
.NOTDIR => return error.NotDir,
.NOMEM => return error.SystemResources,
.ROFS => return error.ReadOnlyFileSystem,
.EXIST => return error.DirNotEmpty,
.NOTEMPTY => return error.DirNotEmpty,
.INVAL => unreachable, // invalid flags, or pathname has . as last component
@ -4492,8 +4501,12 @@ pub const FlockError = error{
/// The kernel ran out of memory for allocating file locks
SystemResources,
/// The underlying filesystem does not support file locks
FileLocksNotSupported,
} || UnexpectedError;
/// Depending on the operating system `flock` may or may not interact with `fcntl` locks made by other processes.
pub fn flock(fd: fd_t, operation: i32) FlockError!void {
while (true) {
const rc = system.flock(fd, operation);
@ -4504,6 +4517,7 @@ pub fn flock(fd: fd_t, operation: i32) FlockError!void {
.INVAL => unreachable, // invalid parameters
.NOLCK => return error.SystemResources,
.AGAIN => return error.WouldBlock, // TODO: integrate with async instead of just returning an error
.OPNOTSUPP => return error.FileLocksNotSupported,
else => |err| return unexpectedErrno(err),
}
}
@ -4667,6 +4681,16 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
};
return target;
},
.solaris => {
var procfs_buf: ["/proc/self/path/-2147483648".len:0]u8 = undefined;
const proc_path = std.fmt.bufPrintZ(procfs_buf[0..], "/proc/self/path/{d}", .{fd}) catch unreachable;
const target = readlinkZ(proc_path, out_buffer) catch |err| switch (err) {
error.UnsupportedReparsePointType => unreachable,
else => |e| return e,
};
return target;
},
else => @compileError("querying for canonical path of a handle is unsupported on this host"),
}
}

View file

@ -89,6 +89,7 @@ pub const user_desc = arch_bits.user_desc;
pub const tls = @import("linux/tls.zig");
pub const pie = @import("linux/start_pie.zig");
pub const BPF = @import("linux/bpf.zig");
pub const IOCTL = @import("linux/ioctl.zig");
pub const MAP = struct {
pub usingnamespace arch_bits.MAP;
@ -2585,18 +2586,18 @@ pub const T = struct {
pub const IOCGSID = 0x5429;
pub const IOCGRS485 = 0x542E;
pub const IOCSRS485 = 0x542F;
pub const IOCGPTN = 0x80045430;
pub const IOCSPTLCK = 0x40045431;
pub const IOCGDEV = 0x80045432;
pub const IOCGPTN = IOCTL.IOR('T', 0x30, c_uint);
pub const IOCSPTLCK = IOCTL.IOW('T', 0x31, c_int);
pub const IOCGDEV = IOCTL.IOR('T', 0x32, c_uint);
pub const CGETX = 0x5432;
pub const CSETX = 0x5433;
pub const CSETXF = 0x5434;
pub const CSETXW = 0x5435;
pub const IOCSIG = 0x40045436;
pub const IOCSIG = IOCTL.IOW('T', 0x36, c_int);
pub const IOCVHANGUP = 0x5437;
pub const IOCGPKT = 0x80045438;
pub const IOCGPTLCK = 0x80045439;
pub const IOCGEXCL = 0x80045440;
pub const IOCGPKT = IOCTL.IOR('T', 0x38, c_int);
pub const IOCGPTLCK = IOCTL.IOR('T', 0x39, c_int);
pub const IOCGEXCL = IOCTL.IOR('T', 0x40, c_int);
};
pub const EPOLL = struct {
@ -2923,6 +2924,7 @@ pub const sockaddr = extern struct {
family: sa_family_t,
data: [14]u8,
pub const SS_MAXSIZE = 128;
pub const storage = std.x.os.Socket.Address.Native.Storage;
/// IPv4 socket address

View file

@ -404,6 +404,25 @@ pub const IO_Uring = struct {
return sqe;
}
/// Queues (but does not submit) an SQE to perform a IORING_OP_READ_FIXED.
/// The `buffer` provided must be registered with the kernel by calling `register_buffers` first.
/// The `buffer_index` must be the same as its index in the array provided to `register_buffers`.
///
/// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.
pub fn read_fixed(
self: *IO_Uring,
user_data: u64,
fd: os.fd_t,
buffer: *os.iovec,
offset: u64,
buffer_index: u16,
) !*io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_read_fixed(sqe, fd, buffer, offset, buffer_index);
sqe.user_data = user_data;
return sqe;
}
/// Queues (but does not submit) an SQE to perform a `pwritev()`.
/// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.
/// For example, if you want to do a `pwritev2()` then set `rw_flags` on the returned SQE.
@ -421,6 +440,25 @@ pub const IO_Uring = struct {
return sqe;
}
/// Queues (but does not submit) an SQE to perform a IORING_OP_WRITE_FIXED.
/// The `buffer` provided must be registered with the kernel by calling `register_buffers` first.
/// The `buffer_index` must be the same as its index in the array provided to `register_buffers`.
///
/// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.
pub fn write_fixed(
self: *IO_Uring,
user_data: u64,
fd: os.fd_t,
buffer: *os.iovec,
offset: u64,
buffer_index: u16,
) !*io_uring_sqe {
const sqe = try self.get_sqe();
io_uring_prep_write_fixed(sqe, fd, buffer, offset, buffer_index);
sqe.user_data = user_data;
return sqe;
}
/// Queues (but does not submit) an SQE to perform an `accept4(2)` on a socket.
/// Returns a pointer to the SQE.
pub fn accept(
@ -674,6 +712,29 @@ pub const IO_Uring = struct {
try handle_registration_result(res);
}
/// Registers an array of buffers for use with `read_fixed` and `write_fixed`.
pub fn register_buffers(self: *IO_Uring, buffers: []const os.iovec) !void {
assert(self.fd >= 0);
const res = linux.io_uring_register(
self.fd,
.REGISTER_BUFFERS,
buffers.ptr,
@intCast(u32, buffers.len),
);
try handle_registration_result(res);
}
/// Unregister the registered buffers.
pub fn unregister_buffers(self: *IO_Uring) !void {
assert(self.fd >= 0);
const res = linux.io_uring_register(self.fd, .UNREGISTER_BUFFERS, null, 0);
switch (linux.getErrno(res)) {
.SUCCESS => {},
.NXIO => return error.BuffersNotRegistered,
else => |errno| return os.unexpectedErrno(errno),
}
}
fn handle_registration_result(res: usize) !void {
switch (linux.getErrno(res)) {
.SUCCESS => {},
@ -905,6 +966,16 @@ pub fn io_uring_prep_writev(
io_uring_prep_rw(.WRITEV, sqe, fd, @ptrToInt(iovecs.ptr), iovecs.len, offset);
}
pub fn io_uring_prep_read_fixed(sqe: *io_uring_sqe, fd: os.fd_t, buffer: *os.iovec, offset: u64, buffer_index: u16) void {
io_uring_prep_rw(.READ_FIXED, sqe, fd, @ptrToInt(buffer.iov_base), buffer.iov_len, offset);
sqe.buf_index = buffer_index;
}
pub fn io_uring_prep_write_fixed(sqe: *io_uring_sqe, fd: os.fd_t, buffer: *os.iovec, offset: u64, buffer_index: u16) void {
io_uring_prep_rw(.WRITE_FIXED, sqe, fd, @ptrToInt(buffer.iov_base), buffer.iov_len, offset);
sqe.buf_index = buffer_index;
}
pub fn io_uring_prep_accept(
sqe: *io_uring_sqe,
fd: os.fd_t,
@ -1282,6 +1353,63 @@ test "write/read" {
try testing.expectEqualSlices(u8, buffer_write[0..], buffer_read[0..]);
}
test "write_fixed/read_fixed" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
var ring = IO_Uring.init(2, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
else => return err,
};
defer ring.deinit();
const path = "test_io_uring_write_read_fixed";
const file = try std.fs.cwd().createFile(path, .{ .read = true, .truncate = true });
defer file.close();
defer std.fs.cwd().deleteFile(path) catch {};
const fd = file.handle;
var raw_buffers: [2][11]u8 = undefined;
// First buffer will be written to the file.
std.mem.set(u8, &raw_buffers[0], 'z');
std.mem.copy(u8, &raw_buffers[0], "foobar");
var buffers = [2]os.iovec{
.{ .iov_base = &raw_buffers[0], .iov_len = raw_buffers[0].len },
.{ .iov_base = &raw_buffers[1], .iov_len = raw_buffers[1].len },
};
try ring.register_buffers(&buffers);
const sqe_write = try ring.write_fixed(0x45454545, fd, &buffers[0], 3, 0);
try testing.expectEqual(linux.IORING_OP.WRITE_FIXED, sqe_write.opcode);
try testing.expectEqual(@as(u64, 3), sqe_write.off);
sqe_write.flags |= linux.IOSQE_IO_LINK;
const sqe_read = try ring.read_fixed(0x12121212, fd, &buffers[1], 0, 1);
try testing.expectEqual(linux.IORING_OP.READ_FIXED, sqe_read.opcode);
try testing.expectEqual(@as(u64, 0), sqe_read.off);
try testing.expectEqual(@as(u32, 2), try ring.submit());
const cqe_write = try ring.copy_cqe();
const cqe_read = try ring.copy_cqe();
try testing.expectEqual(linux.io_uring_cqe{
.user_data = 0x45454545,
.res = @intCast(i32, buffers[0].iov_len),
.flags = 0,
}, cqe_write);
try testing.expectEqual(linux.io_uring_cqe{
.user_data = 0x12121212,
.res = @intCast(i32, buffers[1].iov_len),
.flags = 0,
}, cqe_read);
try testing.expectEqualSlices(u8, "\x00\x00\x00", buffers[1].iov_base[0..3]);
try testing.expectEqualSlices(u8, "foobar", buffers[1].iov_base[3..9]);
try testing.expectEqualSlices(u8, "zz", buffers[1].iov_base[9..11]);
}
test "openat" {
if (builtin.os.tag != .linux) return error.SkipZigTest;

View file

@ -0,0 +1,56 @@
const std = @import("../../std.zig");
const bits = switch (@import("builtin").cpu.arch) {
.mips,
.mipsel,
.mips64,
.mips64el,
.powerpc,
.powerpcle,
.powerpc64,
.powerpc64le,
.sparc,
.sparcv9,
.sparcel,
=> .{ .size = 13, .dir = 3, .none = 1, .read = 2, .write = 4 },
else => .{ .size = 14, .dir = 2, .none = 0, .read = 2, .write = 1 },
};
const Direction = std.meta.Int(.unsigned, bits.dir);
pub const Request = packed struct {
nr: u8,
io_type: u8,
size: std.meta.Int(.unsigned, bits.size),
dir: Direction,
};
fn io_impl(dir: Direction, io_type: u8, nr: u8, comptime T: type) u32 {
const request = Request{
.dir = dir,
.size = @sizeOf(T),
.io_type = io_type,
.nr = nr,
};
return @bitCast(u32, request);
}
pub fn IO(io_type: u8, nr: u8) u32 {
return io_impl(bits.none, io_type, nr, void);
}
pub fn IOR(io_type: u8, nr: u8, comptime T: type) u32 {
return io_impl(bits.read, io_type, nr, T);
}
pub fn IOW(io_type: u8, nr: u8, comptime T: type) u32 {
return io_impl(bits.write, io_type, nr, T);
}
pub fn IOWR(io_type: u8, nr: u8, comptime T: type) u32 {
return io_impl(bits.read | bits.write, io_type, nr, T);
}
comptime {
std.debug.assert(@bitSizeOf(Request) == 32);
}

View file

@ -674,6 +674,10 @@ pub const msghdr_const = extern struct {
pub const off_t = i64;
pub const ino_t = u64;
pub const mode_t = u32;
pub const dev_t = usize;
pub const nlink_t = u32;
pub const blksize_t = isize;
pub const blkcnt_t = isize;
// The `stat64` definition used by the kernel.
pub const Stat = extern struct {

View file

@ -188,7 +188,10 @@ fn testReadlink(target_path: []const u8, symlink_path: []const u8) !void {
}
test "link with relative paths" {
if (native_os != .linux) return error.SkipZigTest;
switch (native_os) {
.linux, .solaris => {},
else => return error.SkipZigTest,
}
var cwd = fs.cwd();
cwd.deleteFile("example.txt") catch {};
@ -222,7 +225,10 @@ test "link with relative paths" {
}
test "linkat with different directories" {
if (native_os != .linux) return error.SkipZigTest;
switch (native_os) {
.linux, .solaris => {},
else => return error.SkipZigTest,
}
var cwd = fs.cwd();
var tmp = tmpDir(.{});
@ -634,8 +640,10 @@ test "fcntl" {
}
test "signalfd" {
if (native_os != .linux)
return error.SkipZigTest;
switch (native_os) {
.linux, .solaris => {},
else => return error.SkipZigTest,
}
_ = std.os.signalfd;
}
@ -658,8 +666,10 @@ test "sync" {
}
test "fsync" {
if (native_os != .linux and native_os != .windows)
return error.SkipZigTest;
switch (native_os) {
.linux, .windows, .solaris => {},
else => return error.SkipZigTest,
}
var tmp = tmpDir(.{});
defer tmp.cleanup();
@ -754,7 +764,10 @@ test "sigaction" {
}
test "dup & dup2" {
if (native_os != .linux) return error.SkipZigTest;
switch (native_os) {
.linux, .solaris => {},
else => return error.SkipZigTest,
}
var tmp = tmpDir(.{});
defer tmp.cleanup();

View file

@ -152,6 +152,8 @@ pub extern "kernel32" fn GetCommandLineW() callconv(WINAPI) LPWSTR;
pub extern "kernel32" fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: *DWORD) callconv(WINAPI) BOOL;
pub extern "kernel32" fn GetConsoleOutputCP() callconv(WINAPI) UINT;
pub extern "kernel32" fn GetConsoleScreenBufferInfo(hConsoleOutput: HANDLE, lpConsoleScreenBufferInfo: *CONSOLE_SCREEN_BUFFER_INFO) callconv(WINAPI) BOOL;
pub extern "kernel32" fn FillConsoleOutputCharacterA(hConsoleOutput: HANDLE, cCharacter: CHAR, nLength: DWORD, dwWriteCoord: COORD, lpNumberOfCharsWritten: *DWORD) callconv(WINAPI) BOOL;
pub extern "kernel32" fn FillConsoleOutputCharacterW(hConsoleOutput: HANDLE, cCharacter: WCHAR, nLength: DWORD, dwWriteCoord: COORD, lpNumberOfCharsWritten: *DWORD) callconv(WINAPI) BOOL;
@ -286,6 +288,8 @@ pub extern "kernel32" fn SetConsoleCtrlHandler(
Add: BOOL,
) callconv(WINAPI) BOOL;
pub extern "kernel32" fn SetConsoleOutputCP(wCodePageID: UINT) callconv(WINAPI) BOOL;
pub extern "kernel32" fn SetFileCompletionNotificationModes(
FileHandle: HANDLE,
Flags: UCHAR,

View file

@ -1105,6 +1105,7 @@ pub const sockaddr = extern struct {
family: ADDRESS_FAMILY,
data: [14]u8,
pub const SS_MAXSIZE = 128;
pub const storage = std.x.os.Socket.Address.Native.Storage;
/// IPv4 socket address

View file

@ -43,13 +43,13 @@ pub fn PriorityDequeue(comptime T: type) type {
/// Insert a new element, maintaining priority.
pub fn add(self: *Self, elem: T) !void {
try ensureCapacity(self, self.len + 1);
try self.ensureUnusedCapacity(1);
addUnchecked(self, elem);
}
/// Add each element in `items` to the dequeue.
pub fn addSlice(self: *Self, items: []const T) !void {
try self.ensureCapacity(self.len + items.len);
try self.ensureUnusedCapacity(items.len);
for (items) |e| {
self.addUnchecked(e);
}
@ -359,7 +359,11 @@ pub fn PriorityDequeue(comptime T: type) type {
return queue;
}
pub fn ensureCapacity(self: *Self, new_capacity: usize) !void {
/// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`.
pub const ensureCapacity = ensureTotalCapacity;
/// Ensure that the dequeue can fit at least `new_capacity` items.
pub fn ensureTotalCapacity(self: *Self, new_capacity: usize) !void {
var better_capacity = self.capacity();
if (better_capacity >= new_capacity) return;
while (true) {
@ -369,6 +373,11 @@ pub fn PriorityDequeue(comptime T: type) type {
self.items = try self.allocator.realloc(self.items, better_capacity);
}
/// Ensure that the dequeue can fit at least `additional_count` **more** items.
pub fn ensureUnusedCapacity(self: *Self, additional_count: usize) !void {
return self.ensureTotalCapacity(self.len + additional_count);
}
/// Reduce allocated capacity to `new_len`.
pub fn shrinkAndFree(self: *Self, new_len: usize) void {
assert(new_len <= self.items.len);
@ -824,7 +833,7 @@ test "std.PriorityDequeue: shrinkAndFree" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
try queue.ensureCapacity(4);
try queue.ensureTotalCapacity(4);
try expect(queue.capacity() >= 4);
try queue.add(1);
@ -940,7 +949,7 @@ fn fuzzTestMinMax(rng: *std.rand.Random, queue_size: usize) !void {
fn generateRandomSlice(allocator: *std.mem.Allocator, rng: *std.rand.Random, size: usize) ![]u32 {
var array = std.ArrayList(u32).init(allocator);
try array.ensureCapacity(size);
try array.ensureTotalCapacity(size);
var i: usize = 0;
while (i < size) : (i += 1) {

View file

@ -42,7 +42,7 @@ pub fn PriorityQueue(comptime T: type) type {
/// Insert a new element, maintaining priority.
pub fn add(self: *Self, elem: T) !void {
try ensureCapacity(self, self.len + 1);
try self.ensureUnusedCapacity(1);
addUnchecked(self, elem);
}
@ -69,7 +69,7 @@ pub fn PriorityQueue(comptime T: type) type {
/// Add each element in `items` to the queue.
pub fn addSlice(self: *Self, items: []const T) !void {
try self.ensureCapacity(self.len + items.len);
try self.ensureUnusedCapacity(items.len);
for (items) |e| {
self.addUnchecked(e);
}
@ -175,7 +175,11 @@ pub fn PriorityQueue(comptime T: type) type {
return queue;
}
pub fn ensureCapacity(self: *Self, new_capacity: usize) !void {
/// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`.
pub const ensureCapacity = ensureTotalCapacity;
/// Ensure that the queue can fit at least `new_capacity` items.
pub fn ensureTotalCapacity(self: *Self, new_capacity: usize) !void {
var better_capacity = self.capacity();
if (better_capacity >= new_capacity) return;
while (true) {
@ -185,6 +189,11 @@ pub fn PriorityQueue(comptime T: type) type {
self.items = try self.allocator.realloc(self.items, better_capacity);
}
/// Ensure that the queue can fit at least `additional_count` **more** item.
pub fn ensureUnusedCapacity(self: *Self, additional_count: usize) !void {
return self.ensureTotalCapacity(self.len + additional_count);
}
/// Reduce allocated capacity to `new_len`.
pub fn shrinkAndFree(self: *Self, new_len: usize) void {
assert(new_len <= self.items.len);
@ -483,7 +492,7 @@ test "std.PriorityQueue: shrinkAndFree" {
var queue = PQ.init(testing.allocator, lessThan);
defer queue.deinit();
try queue.ensureCapacity(4);
try queue.ensureTotalCapacity(4);
try expect(queue.capacity() >= 4);
try queue.add(1);

View file

@ -625,7 +625,7 @@ pub const UserInfo = struct {
/// POSIX function which gets a uid from username.
pub fn getUserInfo(name: []const u8) !UserInfo {
return switch (builtin.os.tag) {
.linux, .macos, .watchos, .tvos, .ios, .freebsd, .netbsd, .openbsd, .haiku => posixGetUserInfo(name),
.linux, .macos, .watchos, .tvos, .ios, .freebsd, .netbsd, .openbsd, .haiku, .solaris => posixGetUserInfo(name),
else => @compileError("Unsupported OS"),
};
}
@ -753,6 +753,7 @@ pub fn getSelfExeSharedLibPaths(allocator: *Allocator) error{OutOfMemory}![][:0]
.netbsd,
.dragonfly,
.openbsd,
.solaris,
=> {
var paths = List.init(allocator);
errdefer {

File diff suppressed because it is too large Load diff

1187
lib/std/special/c_stage1.zig Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -3,23 +3,23 @@ const builtin = @import("builtin");
const is_test = builtin.is_test;
pub fn __extendsfdf2(a: f32) callconv(.C) f64 {
return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f64, f32, @bitCast(u32, a) });
return extendXfYf2(f64, f32, @bitCast(u32, a));
}
pub fn __extenddftf2(a: f64) callconv(.C) f128 {
return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f64, @bitCast(u64, a) });
return extendXfYf2(f128, f64, @bitCast(u64, a));
}
pub fn __extendsftf2(a: f32) callconv(.C) f128 {
return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f32, @bitCast(u32, a) });
return extendXfYf2(f128, f32, @bitCast(u32, a));
}
pub fn __extendhfsf2(a: u16) callconv(.C) f32 {
return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f32, f16, a });
return extendXfYf2(f32, f16, a);
}
pub fn __extendhftf2(a: u16) callconv(.C) f128 {
return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f16, a });
return extendXfYf2(f128, f16, a);
}
pub fn __aeabi_h2f(arg: u16) callconv(.AAPCS) f32 {
@ -34,7 +34,7 @@ pub fn __aeabi_f2d(arg: f32) callconv(.AAPCS) f64 {
const CHAR_BIT = 8;
fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) dst_t {
inline fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) dst_t {
@setRuntimeSafety(builtin.is_test);
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);

View file

@ -53,19 +53,6 @@ pub fn zig_probe_stack() callconv(.Naked) void {
},
else => {},
}
if (comptime native_arch.isAARCH64()) {
asm volatile (
\\ lsl x16, x15, #4
\\ mov x17, sp
\\1:
\\ sub x17, x17, #PAGE_SIZE
\\ subs x16, x16, #PAGE_SIZE
\\ ldr xzr, [x17]
\\ b.gt 1b
\\
\\ ret
);
}
unreachable;
}
@ -118,6 +105,21 @@ fn win_probe_stack_only() void {
},
else => {},
}
if (comptime native_arch.isAARCH64()) {
// NOTE: page size hardcoded to 4096 for now
asm volatile (
\\ lsl x16, x15, #4
\\ mov x17, sp
\\1:
\\
\\ sub x17, x17, 4096
\\ subs x16, x16, 4096
\\ ldr xzr, [x17]
\\ b.gt 1b
\\
\\ ret
);
}
unreachable;
}
@ -199,7 +201,9 @@ pub fn _chkstk() callconv(.Naked) void {
}
pub fn __chkstk() callconv(.Naked) void {
@setRuntimeSafety(false);
switch (native_arch) {
if (comptime native_arch.isAARCH64()) {
@call(.{ .modifier = .always_inline }, win_probe_stack_only, .{});
} else switch (native_arch) {
.i386 => @call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{}),
.x86_64 => @call(.{ .modifier = .always_inline }, win_probe_stack_only, .{}),
else => unreachable,

View file

@ -13,6 +13,12 @@ fn processArgs() void {
const args = std.process.argsAlloc(&args_allocator.allocator) catch {
@panic("Too many bytes passed over the CLI to the test runner");
};
if (args.len != 2) {
const self_name = if (args.len >= 1) args[0] else if (builtin.os.tag == .windows) "test.exe" else "test";
const zig_ext = if (builtin.os.tag == .windows) ".exe" else "";
std.debug.print("Usage: {s} path/to/zig{s}\n", .{ self_name, zig_ext });
@panic("Wrong number of command line arguments");
}
std.testing.zig_exe_path = args[1];
}
@ -56,7 +62,7 @@ pub fn main() void {
.evented => blk: {
if (async_frame_buffer.len < size) {
std.heap.page_allocator.free(async_frame_buffer);
async_frame_buffer = try std.heap.page_allocator.alignedAlloc(u8, std.Target.stack_align, size);
async_frame_buffer = std.heap.page_allocator.alignedAlloc(u8, std.Target.stack_align, size) catch @panic("out of memory");
}
const casted_fn = @ptrCast(fn () callconv(.Async) anyerror!void, test_fn.func);
break :blk await @asyncCall(async_frame_buffer, {}, casted_fn, .{});
@ -123,8 +129,16 @@ pub fn log(
}
pub fn main2() anyerror!void {
var bad = false;
// Simpler main(), exercising fewer language features, so that stage2 can handle it.
for (builtin.test_functions) |test_fn| {
try test_fn.func();
test_fn.func() catch |err| {
if (err != error.SkipZigTest) {
bad = true;
}
};
}
if (bad) {
return error.TestsFailed;
}
}

View file

@ -28,6 +28,8 @@ comptime {
if (@typeInfo(@TypeOf(root.main)).Fn.calling_convention != .C) {
@export(main2, .{ .name = "main" });
}
} else if (builtin.os.tag == .windows) {
@export(wWinMainCRTStartup2, .{ .name = "wWinMainCRTStartup" });
} else {
if (!@hasDecl(root, "_start")) {
@export(_start2, .{ .name = "_start" });
@ -87,6 +89,16 @@ fn main2() callconv(.C) c_int {
}
fn _start2() callconv(.Naked) noreturn {
callMain2();
}
fn callMain2() noreturn {
@setAlignStack(16);
root.main();
exit2(0);
}
fn wWinMainCRTStartup2() callconv(.C) noreturn {
root.main();
exit2(0);
}
@ -143,11 +155,16 @@ fn exit2(code: usize) noreturn {
},
else => @compileError("TODO"),
},
.windows => {
ExitProcess(@truncate(u32, code));
},
else => @compileError("TODO"),
}
unreachable;
}
extern "kernel32" fn ExitProcess(exit_code: u32) callconv(.C) noreturn;
////////////////////////////////////////////////////////////////////////////////
fn _DllMainCRTStartup(

View file

@ -235,7 +235,6 @@ pub const Target = struct {
.fuchsia,
.kfreebsd,
.lv2,
.solaris,
.zos,
.haiku,
.minix,
@ -310,6 +309,12 @@ pub const Target = struct {
.max = .{ .major = 6, .minor = 0 },
},
},
.solaris => return .{
.semver = .{
.min = .{ .major = 5, .minor = 11 },
.max = .{ .major = 5, .minor = 11 },
},
},
.linux => return .{
.linux = .{
@ -353,6 +358,7 @@ pub const Target = struct {
.netbsd,
.openbsd,
.dragonfly,
.solaris,
=> return TaggedVersionRange{ .semver = self.version_range.semver },
else => return .none,
@ -385,6 +391,7 @@ pub const Target = struct {
.dragonfly,
.openbsd,
.haiku,
.solaris,
=> true,
.linux,
@ -395,7 +402,6 @@ pub const Target = struct {
.fuchsia,
.kfreebsd,
.lv2,
.solaris,
.zos,
.minix,
.rtems,
@ -1523,6 +1529,7 @@ pub const Target = struct {
.netbsd => return copy(&result, "/libexec/ld.elf_so"),
.openbsd => return copy(&result, "/usr/libexec/ld.so"),
.dragonfly => return copy(&result, "/libexec/ld-elf.so.2"),
.solaris => return copy(&result, "/lib/64/ld.so.1"),
.linux => switch (self.cpu.arch) {
.i386,
.sparc,
@ -1642,7 +1649,6 @@ pub const Target = struct {
.fuchsia,
.kfreebsd,
.lv2,
.solaris,
.zos,
.minix,
.rtems,

View file

@ -553,8 +553,9 @@ fn testDecode(bytes: []const u8) !u21 {
/// Caller must free returned memory.
pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8 {
var result = std.ArrayList(u8).init(allocator);
errdefer result.deinit();
// optimistically guess that it will all be ascii.
try result.ensureCapacity(utf16le.len);
try result.ensureTotalCapacity(utf16le.len);
var out_index: usize = 0;
var it = Utf16LeIterator.init(utf16le);
while (try it.nextCodepoint()) |codepoint| {
@ -569,9 +570,10 @@ pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8
/// Caller must free returned memory.
pub fn utf16leToUtf8AllocZ(allocator: *mem.Allocator, utf16le: []const u16) ![:0]u8 {
var result = try std.ArrayList(u8).initCapacity(allocator, utf16le.len);
var result = std.ArrayList(u8).init(allocator);
errdefer result.deinit();
// optimistically guess that it will all be ascii.
try result.ensureCapacity(utf16le.len);
try result.ensureTotalCapacity(utf16le.len);
var out_index: usize = 0;
var it = Utf16LeIterator.init(utf16le);
while (try it.nextCodepoint()) |codepoint| {
@ -653,12 +655,20 @@ test "utf16leToUtf8" {
defer std.testing.allocator.free(utf8);
try testing.expect(mem.eql(u8, utf8, "\xf4\x8f\xb0\x80"));
}
{
mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 0xdcdc);
mem.writeIntSliceLittle(u16, utf16le_as_bytes[2..], 0xdcdc);
const result = utf16leToUtf8Alloc(std.testing.allocator, &utf16le);
try std.testing.expectError(error.UnexpectedSecondSurrogateHalf, result);
}
}
pub fn utf8ToUtf16LeWithNull(allocator: *mem.Allocator, utf8: []const u8) ![:0]u16 {
var result = std.ArrayList(u16).init(allocator);
errdefer result.deinit();
// optimistically guess that it will not require surrogate pairs
try result.ensureCapacity(utf8.len + 1);
try result.ensureTotalCapacity(utf8.len + 1);
const view = try Utf8View.init(utf8);
var it = view.iterator();
@ -718,6 +728,10 @@ test "utf8ToUtf16Le" {
try testing.expectEqual(@as(usize, 2), length);
try testing.expectEqualSlices(u8, "\xff\xdb\xff\xdf", mem.sliceAsBytes(utf16le[0..]));
}
{
const result = utf8ToUtf16Le(utf16le[0..], "\xf4\x90\x80\x80");
try testing.expectError(error.InvalidUtf8, result);
}
}
test "utf8ToUtf16LeWithNull" {
@ -733,6 +747,10 @@ test "utf8ToUtf16LeWithNull" {
try testing.expectEqualSlices(u8, "\xff\xdb\xff\xdf", mem.sliceAsBytes(utf16[0..]));
try testing.expect(utf16[2] == 0);
}
{
const result = utf8ToUtf16LeWithNull(testing.allocator, "\xf4\x90\x80\x80");
try testing.expectError(error.InvalidUtf8, result);
}
}
/// Converts a UTF-8 string literal into a UTF-16LE string literal.

View file

@ -27,7 +27,7 @@ pub fn resolveScopeId(name: []const u8) !u32 {
return rc;
}
const fd = try os.socket(os.AF.UNIX, os.SOCK.DGRAM, 0);
const fd = try os.socket(os.AF.INET, os.SOCK.DGRAM, 0);
defer os.closeSocket(fd);
var f: os.ifreq = undefined;
@ -566,21 +566,17 @@ test "ipv6: parse & format" {
test "ipv6: parse & format addresses with scope ids" {
if (!have_ifnamesize) return error.SkipZigTest;
const iface = if (native_os.tag == .linux)
"lo"
else
"lo0";
const input = "FF01::FB%" ++ iface;
const output = "ff01::fb%1";
const inputs = [_][]const u8{
"FF01::FB%lo",
const parsed = IPv6.parse(input) catch |err| switch (err) {
error.InterfaceNotFound => return,
else => return err,
};
const outputs = [_][]const u8{
"ff01::fb%1",
};
for (inputs) |input, i| {
const parsed = IPv6.parse(input) catch |err| switch (err) {
error.InterfaceNotFound => continue,
else => return err,
};
try testing.expectFmt(outputs[i], "{}", .{parsed});
}
try testing.expectFmt(output, "{}", .{parsed});
}

View file

@ -37,7 +37,7 @@ pub const Socket = struct {
/// POSIX `sockaddr.storage`. The expected size and alignment is specified in IETF RFC 2553.
pub const Storage = extern struct {
pub const expected_size = 128;
pub const expected_size = os.sockaddr.SS_MAXSIZE;
pub const expected_alignment = 8;
pub const padding_size = expected_size -

View file

@ -262,6 +262,9 @@ pub fn renderError(tree: Tree, parse_error: Error, stream: anytype) !void {
token_tags[parse_error.token].symbol(),
});
},
.extra_addrspace_qualifier => {
return stream.writeAll("extra addrspace qualifier");
},
.extra_align_qualifier => {
return stream.writeAll("extra align qualifier");
},
@ -392,14 +395,18 @@ pub fn firstToken(tree: Tree, node: Node.Index) TokenIndex {
.assign_mod,
.assign_add,
.assign_sub,
.assign_bit_shift_left,
.assign_bit_shift_right,
.assign_shl,
.assign_shl_sat,
.assign_shr,
.assign_bit_and,
.assign_bit_xor,
.assign_bit_or,
.assign_mul_wrap,
.assign_add_wrap,
.assign_sub_wrap,
.assign_mul_sat,
.assign_add_sat,
.assign_sub_sat,
.assign,
.merge_error_sets,
.mul,
@ -407,13 +414,17 @@ pub fn firstToken(tree: Tree, node: Node.Index) TokenIndex {
.mod,
.array_mult,
.mul_wrap,
.mul_sat,
.add,
.sub,
.array_cat,
.add_wrap,
.sub_wrap,
.bit_shift_left,
.bit_shift_right,
.add_sat,
.sub_sat,
.shl,
.shl_sat,
.shr,
.bit_and,
.bit_xor,
.bit_or,
@ -648,14 +659,18 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex {
.assign_mod,
.assign_add,
.assign_sub,
.assign_bit_shift_left,
.assign_bit_shift_right,
.assign_shl,
.assign_shl_sat,
.assign_shr,
.assign_bit_and,
.assign_bit_xor,
.assign_bit_or,
.assign_mul_wrap,
.assign_add_wrap,
.assign_sub_wrap,
.assign_mul_sat,
.assign_add_sat,
.assign_sub_sat,
.assign,
.merge_error_sets,
.mul,
@ -663,13 +678,17 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex {
.mod,
.array_mult,
.mul_wrap,
.mul_sat,
.add,
.sub,
.array_cat,
.add_wrap,
.sub_wrap,
.bit_shift_left,
.bit_shift_right,
.add_sat,
.sub_sat,
.shl,
.shl_sat,
.shr,
.bit_and,
.bit_xor,
.bit_or,
@ -1021,7 +1040,7 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex {
},
.fn_proto_one => {
const extra = tree.extraData(datas[n].lhs, Node.FnProtoOne);
// linksection, callconv, align can appear in any order, so we
// addrspace, linksection, callconv, align can appear in any order, so we
// find the last one here.
var max_node: Node.Index = datas[n].rhs;
var max_start = token_starts[main_tokens[max_node]];
@ -1034,6 +1053,14 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex {
max_offset = 1; // for the rparen
}
}
if (extra.addrspace_expr != 0) {
const start = token_starts[main_tokens[extra.addrspace_expr]];
if (start > max_start) {
max_node = extra.addrspace_expr;
max_start = start;
max_offset = 1; // for the rparen
}
}
if (extra.section_expr != 0) {
const start = token_starts[main_tokens[extra.section_expr]];
if (start > max_start) {
@ -1055,7 +1082,7 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex {
},
.fn_proto => {
const extra = tree.extraData(datas[n].lhs, Node.FnProto);
// linksection, callconv, align can appear in any order, so we
// addrspace, linksection, callconv, align can appear in any order, so we
// find the last one here.
var max_node: Node.Index = datas[n].rhs;
var max_start = token_starts[main_tokens[max_node]];
@ -1068,6 +1095,14 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex {
max_offset = 1; // for the rparen
}
}
if (extra.addrspace_expr != 0) {
const start = token_starts[main_tokens[extra.addrspace_expr]];
if (start > max_start) {
max_node = extra.addrspace_expr;
max_start = start;
max_offset = 1; // for the rparen
}
}
if (extra.section_expr != 0) {
const start = token_starts[main_tokens[extra.section_expr]];
if (start > max_start) {
@ -1138,6 +1173,7 @@ pub fn globalVarDecl(tree: Tree, node: Node.Index) full.VarDecl {
return tree.fullVarDecl(.{
.type_node = extra.type_node,
.align_node = extra.align_node,
.addrspace_node = extra.addrspace_node,
.section_node = extra.section_node,
.init_node = data.rhs,
.mut_token = tree.nodes.items(.main_token)[node],
@ -1151,6 +1187,7 @@ pub fn localVarDecl(tree: Tree, node: Node.Index) full.VarDecl {
return tree.fullVarDecl(.{
.type_node = extra.type_node,
.align_node = extra.align_node,
.addrspace_node = 0,
.section_node = 0,
.init_node = data.rhs,
.mut_token = tree.nodes.items(.main_token)[node],
@ -1163,6 +1200,7 @@ pub fn simpleVarDecl(tree: Tree, node: Node.Index) full.VarDecl {
return tree.fullVarDecl(.{
.type_node = data.lhs,
.align_node = 0,
.addrspace_node = 0,
.section_node = 0,
.init_node = data.rhs,
.mut_token = tree.nodes.items(.main_token)[node],
@ -1175,6 +1213,7 @@ pub fn alignedVarDecl(tree: Tree, node: Node.Index) full.VarDecl {
return tree.fullVarDecl(.{
.type_node = 0,
.align_node = data.lhs,
.addrspace_node = 0,
.section_node = 0,
.init_node = data.rhs,
.mut_token = tree.nodes.items(.main_token)[node],
@ -1249,6 +1288,7 @@ pub fn fnProtoSimple(tree: Tree, buffer: *[1]Node.Index, node: Node.Index) full.
.return_type = data.rhs,
.params = params,
.align_expr = 0,
.addrspace_expr = 0,
.section_expr = 0,
.callconv_expr = 0,
});
@ -1265,6 +1305,7 @@ pub fn fnProtoMulti(tree: Tree, node: Node.Index) full.FnProto {
.return_type = data.rhs,
.params = params,
.align_expr = 0,
.addrspace_expr = 0,
.section_expr = 0,
.callconv_expr = 0,
});
@ -1282,6 +1323,7 @@ pub fn fnProtoOne(tree: Tree, buffer: *[1]Node.Index, node: Node.Index) full.FnP
.return_type = data.rhs,
.params = params,
.align_expr = extra.align_expr,
.addrspace_expr = extra.addrspace_expr,
.section_expr = extra.section_expr,
.callconv_expr = extra.callconv_expr,
});
@ -1298,6 +1340,7 @@ pub fn fnProto(tree: Tree, node: Node.Index) full.FnProto {
.return_type = data.rhs,
.params = params,
.align_expr = extra.align_expr,
.addrspace_expr = extra.addrspace_expr,
.section_expr = extra.section_expr,
.callconv_expr = extra.callconv_expr,
});
@ -1453,6 +1496,7 @@ pub fn ptrTypeAligned(tree: Tree, node: Node.Index) full.PtrType {
return tree.fullPtrType(.{
.main_token = tree.nodes.items(.main_token)[node],
.align_node = data.lhs,
.addrspace_node = 0,
.sentinel = 0,
.bit_range_start = 0,
.bit_range_end = 0,
@ -1466,6 +1510,7 @@ pub fn ptrTypeSentinel(tree: Tree, node: Node.Index) full.PtrType {
return tree.fullPtrType(.{
.main_token = tree.nodes.items(.main_token)[node],
.align_node = 0,
.addrspace_node = 0,
.sentinel = data.lhs,
.bit_range_start = 0,
.bit_range_end = 0,
@ -1480,6 +1525,7 @@ pub fn ptrType(tree: Tree, node: Node.Index) full.PtrType {
return tree.fullPtrType(.{
.main_token = tree.nodes.items(.main_token)[node],
.align_node = extra.align_node,
.addrspace_node = extra.addrspace_node,
.sentinel = extra.sentinel,
.bit_range_start = 0,
.bit_range_end = 0,
@ -1494,6 +1540,7 @@ pub fn ptrTypeBitRange(tree: Tree, node: Node.Index) full.PtrType {
return tree.fullPtrType(.{
.main_token = tree.nodes.items(.main_token)[node],
.align_node = extra.align_node,
.addrspace_node = extra.addrspace_node,
.sentinel = extra.sentinel,
.bit_range_start = extra.bit_range_start,
.bit_range_end = extra.bit_range_end,
@ -2063,6 +2110,7 @@ pub const full = struct {
mut_token: TokenIndex,
type_node: Node.Index,
align_node: Node.Index,
addrspace_node: Node.Index,
section_node: Node.Index,
init_node: Node.Index,
};
@ -2130,6 +2178,7 @@ pub const full = struct {
return_type: Node.Index,
params: []const Node.Index,
align_expr: Node.Index,
addrspace_expr: Node.Index,
section_expr: Node.Index,
callconv_expr: Node.Index,
};
@ -2288,6 +2337,7 @@ pub const full = struct {
pub const Components = struct {
main_token: TokenIndex,
align_node: Node.Index,
addrspace_node: Node.Index,
sentinel: Node.Index,
bit_range_start: Node.Index,
bit_range_end: Node.Index,
@ -2397,6 +2447,7 @@ pub const Error = struct {
expected_var_decl_or_fn,
expected_loop_payload,
expected_container,
extra_addrspace_qualifier,
extra_align_qualifier,
extra_allowzero_qualifier,
extra_const_qualifier,
@ -2489,9 +2540,11 @@ pub const Node = struct {
/// `lhs -= rhs`. main_token is op.
assign_sub,
/// `lhs <<= rhs`. main_token is op.
assign_bit_shift_left,
assign_shl,
/// `lhs <<|= rhs`. main_token is op.
assign_shl_sat,
/// `lhs >>= rhs`. main_token is op.
assign_bit_shift_right,
assign_shr,
/// `lhs &= rhs`. main_token is op.
assign_bit_and,
/// `lhs ^= rhs`. main_token is op.
@ -2504,6 +2557,12 @@ pub const Node = struct {
assign_add_wrap,
/// `lhs -%= rhs`. main_token is op.
assign_sub_wrap,
/// `lhs *|= rhs`. main_token is op.
assign_mul_sat,
/// `lhs +|= rhs`. main_token is op.
assign_add_sat,
/// `lhs -|= rhs`. main_token is op.
assign_sub_sat,
/// `lhs = rhs`. main_token is op.
assign,
/// `lhs || rhs`. main_token is the `||`.
@ -2518,6 +2577,8 @@ pub const Node = struct {
array_mult,
/// `lhs *% rhs`. main_token is the `*%`.
mul_wrap,
/// `lhs *| rhs`. main_token is the `*|`.
mul_sat,
/// `lhs + rhs`. main_token is the `+`.
add,
/// `lhs - rhs`. main_token is the `-`.
@ -2528,10 +2589,16 @@ pub const Node = struct {
add_wrap,
/// `lhs -% rhs`. main_token is the `-%`.
sub_wrap,
/// `lhs +| rhs`. main_token is the `+|`.
add_sat,
/// `lhs -| rhs`. main_token is the `-|`.
sub_sat,
/// `lhs << rhs`. main_token is the `<<`.
bit_shift_left,
shl,
/// `lhs <<| rhs`. main_token is the `<<|`.
shl_sat,
/// `lhs >> rhs`. main_token is the `>>`.
bit_shift_right,
shr,
/// `lhs & rhs`. main_token is the `&`.
bit_and,
/// `lhs ^ rhs`. main_token is the `^`.
@ -2723,13 +2790,13 @@ pub const Node = struct {
/// main_token is the `fn` keyword.
/// extern function declarations use this tag.
fn_proto_multi,
/// `fn(a: b) rhs linksection(e) callconv(f)`. `FnProtoOne[lhs]`.
/// `fn(a: b) rhs addrspace(e) linksection(f) callconv(g)`. `FnProtoOne[lhs]`.
/// zero or one parameters.
/// anytype and ... parameters are omitted from the AST tree.
/// main_token is the `fn` keyword.
/// extern function declarations use this tag.
fn_proto_one,
/// `fn(a: b, c: d) rhs linksection(e) callconv(f)`. `FnProto[lhs]`.
/// `fn(a: b, c: d) rhs addrspace(e) linksection(f) callconv(g)`. `FnProto[lhs]`.
/// anytype and ... parameters are omitted from the AST tree.
/// main_token is the `fn` keyword.
/// extern function declarations use this tag.
@ -2893,11 +2960,13 @@ pub const Node = struct {
pub const PtrType = struct {
sentinel: Index,
align_node: Index,
addrspace_node: Index,
};
pub const PtrTypeBitRange = struct {
sentinel: Index,
align_node: Index,
addrspace_node: Index,
bit_range_start: Index,
bit_range_end: Index,
};
@ -2920,8 +2989,13 @@ pub const Node = struct {
};
pub const GlobalVarDecl = struct {
/// Populated if there is an explicit type ascription.
type_node: Index,
/// Populated if align(A) is present.
align_node: Index,
/// Populated if addrspace(A) is present.
addrspace_node: Index,
/// Populated if linksection(A) is present.
section_node: Index,
};
@ -2953,6 +3027,8 @@ pub const Node = struct {
param: Index,
/// Populated if align(A) is present.
align_expr: Index,
/// Populated if addrspace(A) is present.
addrspace_expr: Index,
/// Populated if linksection(A) is present.
section_expr: Index,
/// Populated if callconv(A) is present.
@ -2964,6 +3040,8 @@ pub const Node = struct {
params_end: Index,
/// Populated if align(A) is present.
align_expr: Index,
/// Populated if addrspace(A) is present.
addrspace_expr: Index,
/// Populated if linksection(A) is present.
section_expr: Index,
/// Populated if callconv(A) is present.

View file

@ -325,6 +325,7 @@ pub fn FlexibleArrayType(comptime SelfType: type, ElementType: type) type {
.is_const = ptr.is_const,
.is_volatile = ptr.is_volatile,
.alignment = @alignOf(ElementType),
.address_space = .generic,
.child = ElementType,
.is_allowzero = true,
.sentinel = null,

View file

@ -17,7 +17,7 @@ pub fn parse(gpa: *Allocator, source: [:0]const u8) Allocator.Error!Ast {
// Empirically, the zig std lib has an 8:1 ratio of source bytes to token count.
const estimated_token_count = source.len / 8;
try tokens.ensureCapacity(gpa, estimated_token_count);
try tokens.ensureTotalCapacity(gpa, estimated_token_count);
var tokenizer = std.zig.Tokenizer.init(source);
while (true) {
@ -48,7 +48,7 @@ pub fn parse(gpa: *Allocator, source: [:0]const u8) Allocator.Error!Ast {
// Empirically, Zig source code has a 2:1 ratio of tokens to AST nodes.
// Make sure at least 1 so we can use appendAssumeCapacity on the root node below.
const estimated_node_count = (tokens.len + 2) / 2;
try parser.nodes.ensureCapacity(gpa, estimated_node_count);
try parser.nodes.ensureTotalCapacity(gpa, estimated_node_count);
// Root node must be index 0.
// Root <- skip ContainerMembers eof
@ -138,7 +138,7 @@ const Parser = struct {
fn addExtra(p: *Parser, extra: anytype) Allocator.Error!Node.Index {
const fields = std.meta.fields(@TypeOf(extra));
try p.extra_data.ensureCapacity(p.gpa, p.extra_data.items.len + fields.len);
try p.extra_data.ensureUnusedCapacity(p.gpa, fields.len);
const result = @intCast(u32, p.extra_data.items.len);
inline for (fields) |field| {
comptime assert(field.field_type == Node.Index);
@ -629,7 +629,7 @@ const Parser = struct {
};
}
/// FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? LinkSection? CallConv? EXCLAMATIONMARK? TypeExpr
/// FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? AddrSpace? LinkSection? CallConv? EXCLAMATIONMARK? TypeExpr
fn parseFnProto(p: *Parser) !Node.Index {
const fn_token = p.eatToken(.keyword_fn) orelse return null_node;
@ -639,6 +639,7 @@ const Parser = struct {
_ = p.eatToken(.identifier);
const params = try p.parseParamDeclList();
const align_expr = try p.parseByteAlign();
const addrspace_expr = try p.parseAddrSpace();
const section_expr = try p.parseLinkSection();
const callconv_expr = try p.parseCallconv();
_ = p.eatToken(.bang);
@ -650,7 +651,7 @@ const Parser = struct {
try p.warn(.expected_return_type);
}
if (align_expr == 0 and section_expr == 0 and callconv_expr == 0) {
if (align_expr == 0 and section_expr == 0 and callconv_expr == 0 and addrspace_expr == 0) {
switch (params) {
.zero_or_one => |param| return p.setNode(fn_proto_index, .{
.tag = .fn_proto_simple,
@ -683,6 +684,7 @@ const Parser = struct {
.lhs = try p.addExtra(Node.FnProtoOne{
.param = param,
.align_expr = align_expr,
.addrspace_expr = addrspace_expr,
.section_expr = section_expr,
.callconv_expr = callconv_expr,
}),
@ -698,6 +700,7 @@ const Parser = struct {
.params_start = span.start,
.params_end = span.end,
.align_expr = align_expr,
.addrspace_expr = addrspace_expr,
.section_expr = section_expr,
.callconv_expr = callconv_expr,
}),
@ -708,7 +711,7 @@ const Parser = struct {
}
}
/// VarDecl <- (KEYWORD_const / KEYWORD_var) IDENTIFIER (COLON TypeExpr)? ByteAlign? LinkSection? (EQUAL Expr)? SEMICOLON
/// VarDecl <- (KEYWORD_const / KEYWORD_var) IDENTIFIER (COLON TypeExpr)? ByteAlign? AddrSpace? LinkSection? (EQUAL Expr)? SEMICOLON
fn parseVarDecl(p: *Parser) !Node.Index {
const mut_token = p.eatToken(.keyword_const) orelse
p.eatToken(.keyword_var) orelse
@ -717,9 +720,10 @@ const Parser = struct {
_ = try p.expectToken(.identifier);
const type_node: Node.Index = if (p.eatToken(.colon) == null) 0 else try p.expectTypeExpr();
const align_node = try p.parseByteAlign();
const addrspace_node = try p.parseAddrSpace();
const section_node = try p.parseLinkSection();
const init_node: Node.Index = if (p.eatToken(.equal) == null) 0 else try p.expectExpr();
if (section_node == 0) {
if (section_node == 0 and addrspace_node == 0) {
if (align_node == 0) {
return p.addNode(.{
.tag = .simple_var_decl,
@ -759,6 +763,7 @@ const Parser = struct {
.lhs = try p.addExtra(Node.GlobalVarDecl{
.type_node = type_node,
.align_node = align_node,
.addrspace_node = addrspace_node,
.section_node = section_node,
}),
.rhs = init_node,
@ -1263,14 +1268,18 @@ const Parser = struct {
.percent_equal => .assign_mod,
.plus_equal => .assign_add,
.minus_equal => .assign_sub,
.angle_bracket_angle_bracket_left_equal => .assign_bit_shift_left,
.angle_bracket_angle_bracket_right_equal => .assign_bit_shift_right,
.angle_bracket_angle_bracket_left_equal => .assign_shl,
.angle_bracket_angle_bracket_left_pipe_equal => .assign_shl_sat,
.angle_bracket_angle_bracket_right_equal => .assign_shr,
.ampersand_equal => .assign_bit_and,
.caret_equal => .assign_bit_xor,
.pipe_equal => .assign_bit_or,
.asterisk_percent_equal => .assign_mul_wrap,
.plus_percent_equal => .assign_add_wrap,
.minus_percent_equal => .assign_sub_wrap,
.asterisk_pipe_equal => .assign_mul_sat,
.plus_pipe_equal => .assign_add_sat,
.minus_pipe_equal => .assign_sub_sat,
.equal => .assign,
else => return expr,
};
@ -1337,14 +1346,17 @@ const Parser = struct {
.keyword_orelse = .{ .prec = 40, .tag = .@"orelse" },
.keyword_catch = .{ .prec = 40, .tag = .@"catch" },
.angle_bracket_angle_bracket_left = .{ .prec = 50, .tag = .bit_shift_left },
.angle_bracket_angle_bracket_right = .{ .prec = 50, .tag = .bit_shift_right },
.angle_bracket_angle_bracket_left = .{ .prec = 50, .tag = .shl },
.angle_bracket_angle_bracket_left_pipe = .{ .prec = 50, .tag = .shl_sat },
.angle_bracket_angle_bracket_right = .{ .prec = 50, .tag = .shr },
.plus = .{ .prec = 60, .tag = .add },
.minus = .{ .prec = 60, .tag = .sub },
.plus_plus = .{ .prec = 60, .tag = .array_cat },
.plus_percent = .{ .prec = 60, .tag = .add_wrap },
.minus_percent = .{ .prec = 60, .tag = .sub_wrap },
.plus_pipe = .{ .prec = 60, .tag = .add_sat },
.minus_pipe = .{ .prec = 60, .tag = .sub_sat },
.pipe_pipe = .{ .prec = 70, .tag = .merge_error_sets },
.asterisk = .{ .prec = 70, .tag = .mul },
@ -1352,6 +1364,7 @@ const Parser = struct {
.percent = .{ .prec = 70, .tag = .mod },
.asterisk_asterisk = .{ .prec = 70, .tag = .array_mult },
.asterisk_percent = .{ .prec = 70, .tag = .mul_wrap },
.asterisk_pipe = .{ .prec = 70, .tag = .mul_sat },
});
fn parseExprPrecedence(p: *Parser, min_prec: i32) Error!Node.Index {
@ -1440,8 +1453,8 @@ const Parser = struct {
/// PrefixTypeOp
/// <- QUESTIONMARK
/// / KEYWORD_anyframe MINUSRARROW
/// / SliceTypeStart (ByteAlign / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
/// / PtrTypeStart (KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
/// / SliceTypeStart (ByteAlign / AddrSpace / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
/// / PtrTypeStart (AddrSpace / KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
/// / ArrayTypeStart
/// SliceTypeStart <- LBRACKET (COLON Expr)? RBRACKET
/// PtrTypeStart
@ -1474,16 +1487,7 @@ const Parser = struct {
const asterisk = p.nextToken();
const mods = try p.parsePtrModifiers();
const elem_type = try p.expectTypeExpr();
if (mods.bit_range_start == 0) {
return p.addNode(.{
.tag = .ptr_type_aligned,
.main_token = asterisk,
.data = .{
.lhs = mods.align_node,
.rhs = elem_type,
},
});
} else {
if (mods.bit_range_start != 0) {
return p.addNode(.{
.tag = .ptr_type_bit_range,
.main_token = asterisk,
@ -1491,12 +1495,35 @@ const Parser = struct {
.lhs = try p.addExtra(Node.PtrTypeBitRange{
.sentinel = 0,
.align_node = mods.align_node,
.addrspace_node = mods.addrspace_node,
.bit_range_start = mods.bit_range_start,
.bit_range_end = mods.bit_range_end,
}),
.rhs = elem_type,
},
});
} else if (mods.addrspace_node != 0) {
return p.addNode(.{
.tag = .ptr_type,
.main_token = asterisk,
.data = .{
.lhs = try p.addExtra(Node.PtrType{
.sentinel = 0,
.align_node = mods.align_node,
.addrspace_node = mods.addrspace_node,
}),
.rhs = elem_type,
},
});
} else {
return p.addNode(.{
.tag = .ptr_type_aligned,
.main_token = asterisk,
.data = .{
.lhs = mods.align_node,
.rhs = elem_type,
},
});
}
},
.asterisk_asterisk => {
@ -1504,16 +1531,7 @@ const Parser = struct {
const mods = try p.parsePtrModifiers();
const elem_type = try p.expectTypeExpr();
const inner: Node.Index = inner: {
if (mods.bit_range_start == 0) {
break :inner try p.addNode(.{
.tag = .ptr_type_aligned,
.main_token = asterisk,
.data = .{
.lhs = mods.align_node,
.rhs = elem_type,
},
});
} else {
if (mods.bit_range_start != 0) {
break :inner try p.addNode(.{
.tag = .ptr_type_bit_range,
.main_token = asterisk,
@ -1521,12 +1539,35 @@ const Parser = struct {
.lhs = try p.addExtra(Node.PtrTypeBitRange{
.sentinel = 0,
.align_node = mods.align_node,
.addrspace_node = mods.addrspace_node,
.bit_range_start = mods.bit_range_start,
.bit_range_end = mods.bit_range_end,
}),
.rhs = elem_type,
},
});
} else if (mods.addrspace_node != 0) {
break :inner try p.addNode(.{
.tag = .ptr_type,
.main_token = asterisk,
.data = .{
.lhs = try p.addExtra(Node.PtrType{
.sentinel = 0,
.align_node = mods.align_node,
.addrspace_node = mods.addrspace_node,
}),
.rhs = elem_type,
},
});
} else {
break :inner try p.addNode(.{
.tag = .ptr_type_aligned,
.main_token = asterisk,
.data = .{
.lhs = mods.align_node,
.rhs = elem_type,
},
});
}
};
return p.addNode(.{
@ -1543,24 +1584,19 @@ const Parser = struct {
_ = p.nextToken();
const asterisk = p.nextToken();
var sentinel: Node.Index = 0;
prefix: {
if (p.eatToken(.identifier)) |ident| {
const token_slice = p.source[p.token_starts[ident]..][0..2];
if (!std.mem.eql(u8, token_slice, "c]")) {
p.tok_i -= 1;
} else {
break :prefix;
}
}
if (p.eatToken(.colon)) |_| {
sentinel = try p.expectExpr();
if (p.eatToken(.identifier)) |ident| {
const ident_slice = p.source[p.token_starts[ident]..p.token_starts[ident + 1]];
if (!std.mem.eql(u8, std.mem.trimRight(u8, ident_slice, &std.ascii.spaces), "c")) {
p.tok_i -= 1;
}
} else if (p.eatToken(.colon)) |_| {
sentinel = try p.expectExpr();
}
_ = try p.expectToken(.r_bracket);
const mods = try p.parsePtrModifiers();
const elem_type = try p.expectTypeExpr();
if (mods.bit_range_start == 0) {
if (sentinel == 0) {
if (sentinel == 0 and mods.addrspace_node == 0) {
return p.addNode(.{
.tag = .ptr_type_aligned,
.main_token = asterisk,
@ -1569,7 +1605,7 @@ const Parser = struct {
.rhs = elem_type,
},
});
} else if (mods.align_node == 0) {
} else if (mods.align_node == 0 and mods.addrspace_node == 0) {
return p.addNode(.{
.tag = .ptr_type_sentinel,
.main_token = asterisk,
@ -1586,6 +1622,7 @@ const Parser = struct {
.lhs = try p.addExtra(Node.PtrType{
.sentinel = sentinel,
.align_node = mods.align_node,
.addrspace_node = mods.addrspace_node,
}),
.rhs = elem_type,
},
@ -1599,6 +1636,7 @@ const Parser = struct {
.lhs = try p.addExtra(Node.PtrTypeBitRange{
.sentinel = sentinel,
.align_node = mods.align_node,
.addrspace_node = mods.addrspace_node,
.bit_range_start = mods.bit_range_start,
.bit_range_end = mods.bit_range_end,
}),
@ -1624,7 +1662,7 @@ const Parser = struct {
.token = p.nodes.items(.main_token)[mods.bit_range_start],
});
}
if (sentinel == 0) {
if (sentinel == 0 and mods.addrspace_node == 0) {
return p.addNode(.{
.tag = .ptr_type_aligned,
.main_token = lbracket,
@ -1633,7 +1671,7 @@ const Parser = struct {
.rhs = elem_type,
},
});
} else if (mods.align_node == 0) {
} else if (mods.align_node == 0 and mods.addrspace_node == 0) {
return p.addNode(.{
.tag = .ptr_type_sentinel,
.main_token = lbracket,
@ -1650,6 +1688,7 @@ const Parser = struct {
.lhs = try p.addExtra(Node.PtrType{
.sentinel = sentinel,
.align_node = mods.align_node,
.addrspace_node = mods.addrspace_node,
}),
.rhs = elem_type,
},
@ -1661,6 +1700,7 @@ const Parser = struct {
.keyword_const,
.keyword_volatile,
.keyword_allowzero,
.keyword_addrspace,
=> return p.fail(.ptr_mod_on_array_child_type),
else => {},
}
@ -2879,6 +2919,15 @@ const Parser = struct {
return expr_node;
}
/// AddrSpace <- KEYWORD_addrspace LPAREN Expr RPAREN
fn parseAddrSpace(p: *Parser) !Node.Index {
_ = p.eatToken(.keyword_addrspace) orelse return null_node;
_ = try p.expectToken(.l_paren);
const expr_node = try p.expectExpr();
_ = try p.expectToken(.r_paren);
return expr_node;
}
/// ParamDecl
/// <- (KEYWORD_noalias / KEYWORD_comptime)? (IDENTIFIER COLON)? ParamType
/// / DOT3
@ -3011,6 +3060,7 @@ const Parser = struct {
const PtrModifiers = struct {
align_node: Node.Index,
addrspace_node: Node.Index,
bit_range_start: Node.Index,
bit_range_end: Node.Index,
};
@ -3018,12 +3068,14 @@ const Parser = struct {
fn parsePtrModifiers(p: *Parser) !PtrModifiers {
var result: PtrModifiers = .{
.align_node = 0,
.addrspace_node = 0,
.bit_range_start = 0,
.bit_range_end = 0,
};
var saw_const = false;
var saw_volatile = false;
var saw_allowzero = false;
var saw_addrspace = false;
while (true) {
switch (p.token_tags[p.tok_i]) {
.keyword_align => {
@ -3063,6 +3115,12 @@ const Parser = struct {
p.tok_i += 1;
saw_allowzero = true;
},
.keyword_addrspace => {
if (saw_addrspace) {
try p.warn(.extra_addrspace_qualifier);
}
result.addrspace_node = try p.parseAddrSpace();
},
else => return result,
}
}

View file

@ -404,6 +404,10 @@ test "zig fmt: trailing comma in fn parameter list" {
\\pub fn f(
\\ a: i32,
\\ b: i32,
\\) addrspace(.generic) i32 {}
\\pub fn f(
\\ a: i32,
\\ b: i32,
\\) linksection(".text") i32 {}
\\pub fn f(
\\ a: i32,
@ -553,8 +557,8 @@ test "zig fmt: sentinel-terminated slice type" {
test "zig fmt: pointer-to-one with modifiers" {
try testCanonical(
\\const x: *u32 = undefined;
\\const y: *allowzero align(8) const volatile u32 = undefined;
\\const z: *allowzero align(8:4:2) const volatile u32 = undefined;
\\const y: *allowzero align(8) addrspace(.generic) const volatile u32 = undefined;
\\const z: *allowzero align(8:4:2) addrspace(.generic) const volatile u32 = undefined;
\\
);
}
@ -562,8 +566,8 @@ test "zig fmt: pointer-to-one with modifiers" {
test "zig fmt: pointer-to-many with modifiers" {
try testCanonical(
\\const x: [*]u32 = undefined;
\\const y: [*]allowzero align(8) const volatile u32 = undefined;
\\const z: [*]allowzero align(8:4:2) const volatile u32 = undefined;
\\const y: [*]allowzero align(8) addrspace(.generic) const volatile u32 = undefined;
\\const z: [*]allowzero align(8:4:2) addrspace(.generic) const volatile u32 = undefined;
\\
);
}
@ -571,8 +575,8 @@ test "zig fmt: pointer-to-many with modifiers" {
test "zig fmt: sentinel pointer with modifiers" {
try testCanonical(
\\const x: [*:42]u32 = undefined;
\\const y: [*:42]allowzero align(8) const volatile u32 = undefined;
\\const y: [*:42]allowzero align(8:4:2) const volatile u32 = undefined;
\\const y: [*:42]allowzero align(8) addrspace(.generic) const volatile u32 = undefined;
\\const y: [*:42]allowzero align(8:4:2) addrspace(.generic) const volatile u32 = undefined;
\\
);
}
@ -580,8 +584,8 @@ test "zig fmt: sentinel pointer with modifiers" {
test "zig fmt: c pointer with modifiers" {
try testCanonical(
\\const x: [*c]u32 = undefined;
\\const y: [*c]allowzero align(8) const volatile u32 = undefined;
\\const z: [*c]allowzero align(8:4:2) const volatile u32 = undefined;
\\const y: [*c]allowzero align(8) addrspace(.generic) const volatile u32 = undefined;
\\const z: [*c]allowzero align(8:4:2) addrspace(.generic) const volatile u32 = undefined;
\\
);
}
@ -589,7 +593,7 @@ test "zig fmt: c pointer with modifiers" {
test "zig fmt: slice with modifiers" {
try testCanonical(
\\const x: []u32 = undefined;
\\const y: []allowzero align(8) const volatile u32 = undefined;
\\const y: []allowzero align(8) addrspace(.generic) const volatile u32 = undefined;
\\
);
}
@ -597,7 +601,7 @@ test "zig fmt: slice with modifiers" {
test "zig fmt: sentinel slice with modifiers" {
try testCanonical(
\\const x: [:42]u32 = undefined;
\\const y: [:42]allowzero align(8) const volatile u32 = undefined;
\\const y: [:42]allowzero align(8) addrspace(.generic) const volatile u32 = undefined;
\\
);
}
@ -1129,6 +1133,16 @@ test "zig fmt: linksection" {
);
}
test "zig fmt: addrspace" {
try testCanonical(
\\export var python_length: u64 align(1) addrspace(.generic);
\\export var python_color: Color addrspace(.generic) = .green;
\\export var python_legs: u0 align(8) addrspace(.generic) linksection(".python") = 0;
\\export fn python_hiss() align(8) addrspace(.generic) linksection(".python") void;
\\
);
}
test "zig fmt: correctly space struct fields with doc comments" {
try testTransform(
\\pub const S = struct {
@ -4725,6 +4739,26 @@ test "zig fmt: assignment with inline for and inline while" {
);
}
test "zig fmt: saturating arithmetic" {
try testCanonical(
\\test {
\\ const actual = switch (op) {
\\ .add => a +| b,
\\ .sub => a -| b,
\\ .mul => a *| b,
\\ .shl => a <<| b,
\\ };
\\ switch (op) {
\\ .add => actual +|= b,
\\ .sub => actual -|= b,
\\ .mul => actual *|= b,
\\ .shl => actual <<|= b,
\\ }
\\}
\\
);
}
test "zig fmt: insert trailing comma if there are comments between switch values" {
try testTransform(
\\const a = switch (b) {
@ -5225,6 +5259,14 @@ test "recovery: nonfinal varargs" {
});
}
test "recovery: eof in c pointer" {
try testError(
\\const Ptr = [*c
, &[_]Error{
.expected_token,
});
}
const std = @import("std");
const mem = std.mem;
const print = std.debug.print;

View file

@ -333,27 +333,33 @@ fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index,
.add,
.add_wrap,
.add_sat,
.array_cat,
.array_mult,
.assign,
.assign_bit_and,
.assign_bit_or,
.assign_bit_shift_left,
.assign_bit_shift_right,
.assign_shl,
.assign_shl_sat,
.assign_shr,
.assign_bit_xor,
.assign_div,
.assign_sub,
.assign_sub_wrap,
.assign_sub_sat,
.assign_mod,
.assign_add,
.assign_add_wrap,
.assign_add_sat,
.assign_mul,
.assign_mul_wrap,
.assign_mul_sat,
.bang_equal,
.bit_and,
.bit_or,
.bit_shift_left,
.bit_shift_right,
.shl,
.shl_sat,
.shr,
.bit_xor,
.bool_and,
.bool_or,
@ -367,8 +373,10 @@ fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index,
.mod,
.mul,
.mul_wrap,
.mul_sat,
.sub,
.sub_wrap,
.sub_sat,
.@"orelse",
=> {
const infix = datas[node];
@ -797,6 +805,14 @@ fn renderPtrType(
}
}
if (ptr_type.ast.addrspace_node != 0) {
const addrspace_first = tree.firstToken(ptr_type.ast.addrspace_node);
try renderToken(ais, tree, addrspace_first - 2, .none); // addrspace
try renderToken(ais, tree, addrspace_first - 1, .none); // lparen
try renderExpression(gpa, ais, tree, ptr_type.ast.addrspace_node, .none);
try renderToken(ais, tree, tree.lastToken(ptr_type.ast.addrspace_node) + 1, .space); // rparen
}
if (ptr_type.const_token) |const_token| {
try renderToken(ais, tree, const_token, .space);
}
@ -921,6 +937,7 @@ fn renderVarDecl(gpa: *Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDe
const name_space = if (var_decl.ast.type_node == 0 and
(var_decl.ast.align_node != 0 or
var_decl.ast.addrspace_node != 0 or
var_decl.ast.section_node != 0 or
var_decl.ast.init_node != 0))
Space.space
@ -930,8 +947,8 @@ fn renderVarDecl(gpa: *Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDe
if (var_decl.ast.type_node != 0) {
try renderToken(ais, tree, var_decl.ast.mut_token + 2, Space.space); // :
if (var_decl.ast.align_node != 0 or var_decl.ast.section_node != 0 or
var_decl.ast.init_node != 0)
if (var_decl.ast.align_node != 0 or var_decl.ast.addrspace_node != 0 or
var_decl.ast.section_node != 0 or var_decl.ast.init_node != 0)
{
try renderExpression(gpa, ais, tree, var_decl.ast.type_node, .space);
} else {
@ -948,6 +965,23 @@ fn renderVarDecl(gpa: *Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDe
try renderToken(ais, tree, align_kw, Space.none); // align
try renderToken(ais, tree, lparen, Space.none); // (
try renderExpression(gpa, ais, tree, var_decl.ast.align_node, Space.none);
if (var_decl.ast.addrspace_node != 0 or var_decl.ast.section_node != 0 or
var_decl.ast.init_node != 0)
{
try renderToken(ais, tree, rparen, .space); // )
} else {
try renderToken(ais, tree, rparen, .none); // )
return renderToken(ais, tree, rparen + 1, Space.newline); // ;
}
}
if (var_decl.ast.addrspace_node != 0) {
const lparen = tree.firstToken(var_decl.ast.addrspace_node) - 1;
const addrspace_kw = lparen - 1;
const rparen = tree.lastToken(var_decl.ast.addrspace_node) + 1;
try renderToken(ais, tree, addrspace_kw, Space.none); // addrspace
try renderToken(ais, tree, lparen, Space.none); // (
try renderExpression(gpa, ais, tree, var_decl.ast.addrspace_node, Space.none);
if (var_decl.ast.section_node != 0 or var_decl.ast.init_node != 0) {
try renderToken(ais, tree, rparen, .space); // )
} else {
@ -1267,6 +1301,14 @@ fn renderFnProto(gpa: *Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnPro
smallest_start = start;
}
}
if (fn_proto.ast.addrspace_expr != 0) {
const tok = tree.firstToken(fn_proto.ast.addrspace_expr) - 3;
const start = token_starts[tok];
if (start < smallest_start) {
rparen = tok;
smallest_start = start;
}
}
if (fn_proto.ast.section_expr != 0) {
const tok = tree.firstToken(fn_proto.ast.section_expr) - 3;
const start = token_starts[tok];
@ -1407,6 +1449,16 @@ fn renderFnProto(gpa: *Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnPro
try renderToken(ais, tree, align_rparen, .space); // )
}
if (fn_proto.ast.addrspace_expr != 0) {
const align_lparen = tree.firstToken(fn_proto.ast.addrspace_expr) - 1;
const align_rparen = tree.lastToken(fn_proto.ast.addrspace_expr) + 1;
try renderToken(ais, tree, align_lparen - 1, .none); // addrspace
try renderToken(ais, tree, align_lparen, .none); // (
try renderExpression(gpa, ais, tree, fn_proto.ast.addrspace_expr, .none);
try renderToken(ais, tree, align_rparen, .space); // )
}
if (fn_proto.ast.section_expr != 0) {
const section_lparen = tree.firstToken(fn_proto.ast.section_expr) - 1;
const section_rparen = tree.lastToken(fn_proto.ast.section_expr) + 1;
@ -2476,8 +2528,8 @@ fn nodeCausesSliceOpSpace(tag: Ast.Node.Tag) bool {
.assign,
.assign_bit_and,
.assign_bit_or,
.assign_bit_shift_left,
.assign_bit_shift_right,
.assign_shl,
.assign_shr,
.assign_bit_xor,
.assign_div,
.assign_sub,
@ -2490,8 +2542,8 @@ fn nodeCausesSliceOpSpace(tag: Ast.Node.Tag) bool {
.bang_equal,
.bit_and,
.bit_or,
.bit_shift_left,
.bit_shift_right,
.shl,
.shr,
.bit_xor,
.bool_and,
.bool_or,

View file

@ -29,7 +29,7 @@ pub fn parseAppend(buf: *std.ArrayList(u8), bytes: []const u8) error{OutOfMemory
const slice = bytes[1..];
const prev_len = buf.items.len;
try buf.ensureCapacity(prev_len + slice.len - 1);
try buf.ensureUnusedCapacity(slice.len - 1);
errdefer buf.shrinkRetainingCapacity(prev_len);
const State = enum {

View file

@ -101,6 +101,17 @@ pub const NativePaths = struct {
return self;
}
if (comptime native_target.os.tag == .solaris) {
try self.addLibDir("/usr/lib/64");
try self.addLibDir("/usr/local/lib/64");
try self.addLibDir("/lib/64");
try self.addIncludeDir("/usr/include");
try self.addIncludeDir("/usr/local/include");
return self;
}
if (native_target.os.tag != .windows) {
const triple = try native_target.linuxTriple(allocator);
const qual = native_target.cpu.arch.ptrBitWidth();
@ -243,6 +254,18 @@ pub const NativeTargetInfo = struct {
error.InvalidVersion => {},
}
},
.solaris => {
const uts = std.os.uname();
const release = mem.spanZ(&uts.release);
if (std.builtin.Version.parse(release)) |ver| {
os.version_range.semver.min = ver;
os.version_range.semver.max = ver;
} else |err| switch (err) {
error.Overflow => {},
error.InvalidCharacter => {},
error.InvalidVersion => {},
}
},
.windows => {
const detected_version = windows.detectRuntimeVersion();
os.version_range.windows.min = detected_version;

View file

@ -11,6 +11,7 @@ pub const Token = struct {
};
pub const keywords = std.ComptimeStringMap(Tag, .{
.{ "addrspace", .keyword_addrspace },
.{ "align", .keyword_align },
.{ "allowzero", .keyword_allowzero },
.{ "and", .keyword_and },
@ -102,15 +103,21 @@ pub const Token = struct {
plus_equal,
plus_percent,
plus_percent_equal,
plus_pipe,
plus_pipe_equal,
minus,
minus_equal,
minus_percent,
minus_percent_equal,
minus_pipe,
minus_pipe_equal,
asterisk,
asterisk_equal,
asterisk_asterisk,
asterisk_percent,
asterisk_percent_equal,
asterisk_pipe,
asterisk_pipe_equal,
arrow,
colon,
slash,
@ -123,6 +130,8 @@ pub const Token = struct {
angle_bracket_left_equal,
angle_bracket_angle_bracket_left,
angle_bracket_angle_bracket_left_equal,
angle_bracket_angle_bracket_left_pipe,
angle_bracket_angle_bracket_left_pipe_equal,
angle_bracket_right,
angle_bracket_right_equal,
angle_bracket_angle_bracket_right,
@ -132,6 +141,7 @@ pub const Token = struct {
float_literal,
doc_comment,
container_doc_comment,
keyword_addrspace,
keyword_align,
keyword_allowzero,
keyword_and,
@ -225,15 +235,21 @@ pub const Token = struct {
.plus_equal => "+=",
.plus_percent => "+%",
.plus_percent_equal => "+%=",
.plus_pipe => "+|",
.plus_pipe_equal => "+|=",
.minus => "-",
.minus_equal => "-=",
.minus_percent => "-%",
.minus_percent_equal => "-%=",
.minus_pipe => "-|",
.minus_pipe_equal => "-|=",
.asterisk => "*",
.asterisk_equal => "*=",
.asterisk_asterisk => "**",
.asterisk_percent => "*%",
.asterisk_percent_equal => "*%=",
.asterisk_pipe => "*|",
.asterisk_pipe_equal => "*|=",
.arrow => "->",
.colon => ":",
.slash => "/",
@ -246,11 +262,14 @@ pub const Token = struct {
.angle_bracket_left_equal => "<=",
.angle_bracket_angle_bracket_left => "<<",
.angle_bracket_angle_bracket_left_equal => "<<=",
.angle_bracket_angle_bracket_left_pipe => "<<|",
.angle_bracket_angle_bracket_left_pipe_equal => "<<|=",
.angle_bracket_right => ">",
.angle_bracket_right_equal => ">=",
.angle_bracket_angle_bracket_right => ">>",
.angle_bracket_angle_bracket_right_equal => ">>=",
.tilde => "~",
.keyword_addrspace => "addrspace",
.keyword_align => "align",
.keyword_allowzero => "allowzero",
.keyword_and => "and",
@ -349,8 +368,10 @@ pub const Tokenizer = struct {
pipe,
minus,
minus_percent,
minus_pipe,
asterisk,
asterisk_percent,
asterisk_pipe,
slash,
line_comment_start,
line_comment,
@ -379,8 +400,10 @@ pub const Tokenizer = struct {
percent,
plus,
plus_percent,
plus_pipe,
angle_bracket_left,
angle_bracket_angle_bracket_left,
angle_bracket_angle_bracket_left_pipe,
angle_bracket_right,
angle_bracket_angle_bracket_right,
period,
@ -581,6 +604,9 @@ pub const Tokenizer = struct {
'%' => {
state = .asterisk_percent;
},
'|' => {
state = .asterisk_pipe;
},
else => {
result.tag = .asterisk;
break;
@ -599,6 +625,18 @@ pub const Tokenizer = struct {
},
},
.asterisk_pipe => switch (c) {
'=' => {
result.tag = .asterisk_pipe_equal;
self.index += 1;
break;
},
else => {
result.tag = .asterisk_pipe;
break;
},
},
.percent => switch (c) {
'=' => {
result.tag = .percent_equal;
@ -625,6 +663,9 @@ pub const Tokenizer = struct {
'%' => {
state = .plus_percent;
},
'|' => {
state = .plus_pipe;
},
else => {
result.tag = .plus;
break;
@ -643,6 +684,18 @@ pub const Tokenizer = struct {
},
},
.plus_pipe => switch (c) {
'=' => {
result.tag = .plus_pipe_equal;
self.index += 1;
break;
},
else => {
result.tag = .plus_pipe;
break;
},
},
.caret => switch (c) {
'=' => {
result.tag = .caret_equal;
@ -700,7 +753,7 @@ pub const Tokenizer = struct {
},
.string_literal_backslash => switch (c) {
'\n' => {
0, '\n' => {
result.tag = .invalid;
break;
},
@ -769,6 +822,10 @@ pub const Tokenizer = struct {
},
.char_literal_unicode_escape_saw_u => switch (c) {
0 => {
result.tag = .invalid;
break;
},
'{' => {
state = .char_literal_unicode_escape;
},
@ -779,6 +836,10 @@ pub const Tokenizer = struct {
},
.char_literal_unicode_escape => switch (c) {
0 => {
result.tag = .invalid;
break;
},
'0'...'9', 'a'...'f', 'A'...'F' => {},
'}' => {
state = .char_literal_end; // too many/few digits handled later
@ -892,6 +953,9 @@ pub const Tokenizer = struct {
'%' => {
state = .minus_percent;
},
'|' => {
state = .minus_pipe;
},
else => {
result.tag = .minus;
break;
@ -909,6 +973,17 @@ pub const Tokenizer = struct {
break;
},
},
.minus_pipe => switch (c) {
'=' => {
result.tag = .minus_pipe_equal;
self.index += 1;
break;
},
else => {
result.tag = .minus_pipe;
break;
},
},
.angle_bracket_left => switch (c) {
'<' => {
@ -931,12 +1006,27 @@ pub const Tokenizer = struct {
self.index += 1;
break;
},
'|' => {
state = .angle_bracket_angle_bracket_left_pipe;
},
else => {
result.tag = .angle_bracket_angle_bracket_left;
break;
},
},
.angle_bracket_angle_bracket_left_pipe => switch (c) {
'=' => {
result.tag = .angle_bracket_angle_bracket_left_pipe_equal;
self.index += 1;
break;
},
else => {
result.tag = .angle_bracket_angle_bracket_left_pipe;
break;
},
},
.angle_bracket_right => switch (c) {
'>' => {
state = .angle_bracket_angle_bracket_right;
@ -1919,6 +2009,30 @@ test "tokenizer - invalid builtin identifiers" {
try testTokenize("@0()", &.{ .invalid, .integer_literal, .l_paren, .r_paren });
}
test "tokenizer - invalid token with unfinished escape right before eof" {
try testTokenize("\"\\", &.{.invalid});
try testTokenize("'\\", &.{.invalid});
try testTokenize("'\\u", &.{.invalid});
}
test "tokenizer - saturating" {
try testTokenize("<<", &.{.angle_bracket_angle_bracket_left});
try testTokenize("<<|", &.{.angle_bracket_angle_bracket_left_pipe});
try testTokenize("<<|=", &.{.angle_bracket_angle_bracket_left_pipe_equal});
try testTokenize("*", &.{.asterisk});
try testTokenize("*|", &.{.asterisk_pipe});
try testTokenize("*|=", &.{.asterisk_pipe_equal});
try testTokenize("+", &.{.plus});
try testTokenize("+|", &.{.plus_pipe});
try testTokenize("+|=", &.{.plus_pipe_equal});
try testTokenize("-", &.{.minus});
try testTokenize("-|", &.{.minus_pipe});
try testTokenize("-|=", &.{.minus_pipe_equal});
}
fn testTokenize(source: [:0]const u8, expected_tokens: []const Token.Tag) !void {
var tokenizer = Tokenizer.init(source);
for (expected_tokens) |expected_token_id| {

View file

@ -44,6 +44,11 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
addwrap,
/// Saturating integer addition.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
add_sat,
/// Float or integer subtraction. For integers, wrapping is undefined behavior.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
@ -54,6 +59,11 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
subwrap,
/// Saturating integer subtraction.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
sub_sat,
/// Float or integer multiplication. For integers, wrapping is undefined behavior.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
@ -64,15 +74,26 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
mulwrap,
/// Saturating integer multiplication.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
mul_sat,
/// Integer or float division. For integers, wrapping is undefined behavior.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
div,
/// Integer or float remainder.
/// Both operands are guaranteed to be the same type, and the result type is the same as both operands.
/// Integer or float remainder division.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
rem,
/// Integer or float modulus division.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
mod,
/// Add an offset to a pointer, returning a new pointer.
/// The offset is in element type units, not bytes.
/// Wrapping is undefined behavior.
@ -104,6 +125,14 @@ pub const Inst = struct {
/// Shift left. `<<`
/// Uses the `bin_op` field.
shl,
/// Shift left; For unsigned integers, the shift produces a poison value if it shifts
/// out any non-zero bits. For signed integers, the shift produces a poison value if
/// it shifts out any bits that disagree with the resultant sign bit.
/// Uses the `bin_op` field.
shl_exact,
/// Saturating integer shift left. `<<|`
/// Uses the `bin_op` field.
shl_sat,
/// Bitwise XOR. `^`
/// Uses the `bin_op` field.
xor,
@ -131,6 +160,15 @@ pub const Inst = struct {
/// Result type is the return type of the function being called.
/// Uses the `pl_op` field with the `Call` payload. operand is the callee.
call,
/// Count leading zeroes of an integer according to its representation in twos complement.
/// Result type will always be an unsigned integer big enough to fit the answer.
/// Uses the `ty_op` field.
clz,
/// Count trailing zeroes of an integer according to its representation in twos complement.
/// Result type will always be an unsigned integer big enough to fit the answer.
/// Uses the `ty_op` field.
ctz,
/// `<`. Result type is always bool.
/// Uses the `bin_op` field.
cmp_lt,
@ -149,6 +187,7 @@ pub const Inst = struct {
/// `!=`. Result type is always bool.
/// Uses the `bin_op` field.
cmp_neq,
/// Conditional branch.
/// Result type is always noreturn; no instructions in a block follow this one.
/// Uses the `pl_op` field. Operand is the condition. Payload is `CondBr`.
@ -225,9 +264,12 @@ pub const Inst = struct {
/// Indicates the program counter will never get to this instruction.
/// Result type is always noreturn; no instructions in a block follow this one.
unreach,
/// Convert from one float type to another.
/// Convert from a float type to a smaller one.
/// Uses the `ty_op` field.
floatcast,
fptrunc,
/// Convert from a float type to a wider one.
/// Uses the `ty_op` field.
fpext,
/// Returns an integer with a different type than the operand. The new type may have
/// fewer, the same, or more bits than the operand type. However, the instruction
/// guarantees that the same integer value fits in both types.
@ -265,19 +307,29 @@ pub const Inst = struct {
/// wrap from E to E!T
/// Uses the `ty_op` field.
wrap_errunion_err,
/// Given a pointer to a struct and a field index, returns a pointer to the field.
/// Given a pointer to a struct or union and a field index, returns a pointer to the field.
/// Uses the `ty_pl` field, payload is `StructField`.
/// TODO rename to `agg_field_ptr`.
struct_field_ptr,
/// Given a pointer to a struct, returns a pointer to the field.
/// Given a pointer to a struct or union, returns a pointer to the field.
/// The field index is the number at the end of the name.
/// Uses `ty_op` field.
/// TODO rename to `agg_field_ptr_index_X`
struct_field_ptr_index_0,
struct_field_ptr_index_1,
struct_field_ptr_index_2,
struct_field_ptr_index_3,
/// Given a byval struct and a field index, returns the field byval.
/// Given a byval struct or union and a field index, returns the field byval.
/// Uses the `ty_pl` field, payload is `StructField`.
/// TODO rename to `agg_field_val`
struct_field_val,
/// Given a pointer to a tagged union, set its tag to the provided value.
/// Result type is always void.
/// Uses the `bin_op` field. LHS is union pointer, RHS is new tag value.
set_union_tag,
/// Given a tagged union value, get its tag value.
/// Uses the `ty_op` field.
get_union_tag,
/// Given a slice value, return the length.
/// Result type is always usize.
/// Uses the `ty_op` field.
@ -309,10 +361,52 @@ pub const Inst = struct {
/// Given a pointer to an array, return a slice.
/// Uses the `ty_op` field.
array_to_slice,
/// Given a float operand, return the integer with the closest mathematical meaning.
/// Uses the `ty_op` field.
float_to_int,
/// Given an integer operand, return the float with the closest mathematical meaning.
/// Uses the `ty_op` field.
int_to_float,
/// Given dest ptr, value, and len, set all elements at dest to value.
/// Result type is always void.
/// Uses the `pl_op` field. Operand is the dest ptr. Payload is `Bin`. `lhs` is the
/// value, `rhs` is the length.
/// The element type may be any type, not just u8.
memset,
/// Given dest ptr, src ptr, and len, copy len elements from src to dest.
/// Result type is always void.
/// Uses the `pl_op` field. Operand is the dest ptr. Payload is `Bin`. `lhs` is the
/// src ptr, `rhs` is the length.
/// The element type may be any type, not just u8.
memcpy,
/// Uses the `ty_pl` field with payload `Cmpxchg`.
cmpxchg_weak,
/// Uses the `ty_pl` field with payload `Cmpxchg`.
cmpxchg_strong,
/// Lowers to a memory fence instruction.
/// Result type is always void.
/// Uses the `fence` field.
fence,
/// Atomically load from a pointer.
/// Result type is the element type of the pointer.
/// Uses the `atomic_load` field.
atomic_load,
/// Atomically store through a pointer.
/// Result type is always `void`.
/// Uses the `bin_op` field. LHS is pointer, RHS is element.
atomic_store_unordered,
/// Same as `atomic_store_unordered` but with `AtomicOrder.Monotonic`.
atomic_store_monotonic,
/// Same as `atomic_store_unordered` but with `AtomicOrder.Release`.
atomic_store_release,
/// Same as `atomic_store_unordered` but with `AtomicOrder.SeqCst`.
atomic_store_seq_cst,
/// Atomically read-modify-write via a pointer.
/// Result type is the element type of the pointer.
/// Uses the `pl_op` field with payload `AtomicRmw`. Operand is `ptr`.
atomic_rmw,
pub fn fromCmpOp(op: std.math.CompareOperator) Tag {
return switch (op) {
@ -380,6 +474,11 @@ pub const Inst = struct {
line: u32,
column: u32,
},
fence: std.builtin.AtomicOrder,
atomic_load: struct {
ptr: Ref,
order: std.builtin.AtomicOrder,
},
// Make sure we don't accidentally add a field to make this union
// bigger than expected. Note that in Debug builds, Zig is allowed
@ -464,6 +563,21 @@ pub const Cmpxchg = struct {
}
};
pub const AtomicRmw = struct {
operand: Inst.Ref,
/// 0b00000000000000000000000000000XXX - ordering
/// 0b0000000000000000000000000XXXX000 - op
flags: u32,
pub fn ordering(self: AtomicRmw) std.builtin.AtomicOrder {
return @intToEnum(std.builtin.AtomicOrder, @truncate(u3, self.flags));
}
pub fn op(self: AtomicRmw) std.builtin.AtomicRmwOp {
return @intToEnum(std.builtin.AtomicRmwOp, @truncate(u4, self.flags >> 3));
}
};
pub fn getMainBody(air: Air) []const Air.Inst.Index {
const body_index = air.extra[@enumToInt(ExtraIndex.main_block)];
const extra = air.extraData(Block, body_index);
@ -485,12 +599,16 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.add,
.addwrap,
.add_sat,
.sub,
.subwrap,
.sub_sat,
.mul,
.mulwrap,
.mul_sat,
.div,
.rem,
.mod,
.bit_and,
.bit_or,
.xor,
@ -498,6 +616,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.ptr_sub,
.shr,
.shl,
.shl_exact,
.shl_sat,
=> return air.typeOf(datas[inst].bin_op.lhs),
.cmp_lt,
@ -535,7 +655,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.not,
.bitcast,
.load,
.floatcast,
.fpext,
.fptrunc,
.intcast,
.trunc,
.optional_payload,
@ -553,6 +674,11 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.struct_field_ptr_index_2,
.struct_field_ptr_index_3,
.array_to_slice,
.float_to_int,
.int_to_float,
.get_union_tag,
.clz,
.ctz,
=> return air.getRefType(datas[inst].ty_op.ty),
.loop,
@ -566,6 +692,14 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.breakpoint,
.dbg_stmt,
.store,
.fence,
.atomic_store_unordered,
.atomic_store_monotonic,
.atomic_store_release,
.atomic_store_seq_cst,
.memset,
.memcpy,
.set_union_tag,
=> return Type.initTag(.void),
.ptrtoint,
@ -588,6 +722,14 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
const inner_ptr_ty = outer_ptr_ty.elemType();
return inner_ptr_ty.elemType();
},
.atomic_load => {
const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr);
return ptr_ty.elemType();
},
.atomic_rmw => {
const ptr_ty = air.typeOf(datas[inst].pl_op.operand);
return ptr_ty.elemType();
},
}
}

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,6 @@ const std = @import("std");
pub const Tag = enum {
add_with_overflow,
add_with_saturation,
align_cast,
align_of,
as,
@ -66,7 +65,6 @@ pub const Tag = enum {
wasm_memory_grow,
mod,
mul_with_overflow,
mul_with_saturation,
panic,
pop_count,
ptr_cast,
@ -81,12 +79,10 @@ pub const Tag = enum {
set_runtime_safety,
shl_exact,
shl_with_overflow,
shl_with_saturation,
shr_exact,
shuffle,
size_of,
splat,
sub_with_saturation,
reduce,
src,
sqrt,
@ -114,10 +110,19 @@ pub const Tag = enum {
Vector,
};
pub const MemLocRequirement = enum {
/// The builtin never needs a memory location.
never,
/// The builtin always needs a memory location.
always,
/// The builtin forwards the question to argument at index 1.
forward1,
};
tag: Tag,
/// `true` if the builtin call can take advantage of a result location pointer.
needs_mem_loc: bool = false,
/// Info about the builtin call's ability to take advantage of a result location pointer.
needs_mem_loc: MemLocRequirement = .never,
/// `true` if the builtin call can be the left-hand side of an expression (assigned to).
allows_lvalue: bool = false,
/// The number of parameters to this builtin function. `null` means variable number
@ -152,7 +157,7 @@ pub const list = list: {
"@as",
.{
.tag = .as,
.needs_mem_loc = true,
.needs_mem_loc = .forward1,
.param_count = 2,
},
},
@ -188,7 +193,7 @@ pub const list = list: {
"@bitCast",
.{
.tag = .bit_cast,
.needs_mem_loc = true,
.needs_mem_loc = .forward1,
.param_count = 2,
},
},
@ -252,7 +257,7 @@ pub const list = list: {
"@call",
.{
.tag = .call,
.needs_mem_loc = true,
.needs_mem_loc = .always,
.param_count = 3,
},
},
@ -414,7 +419,7 @@ pub const list = list: {
"@field",
.{
.tag = .field,
.needs_mem_loc = true,
.needs_mem_loc = .always,
.param_count = 2,
.allows_lvalue = true,
},
@ -531,34 +536,6 @@ pub const list = list: {
.param_count = 2,
},
},
.{
"@addWithSaturation",
.{
.tag = .add_with_saturation,
.param_count = 2,
},
},
.{
"@subWithSaturation",
.{
.tag = .sub_with_saturation,
.param_count = 2,
},
},
.{
"@mulWithSaturation",
.{
.tag = .mul_with_saturation,
.param_count = 2,
},
},
.{
"@shlWithSaturation",
.{
.tag = .shl_with_saturation,
.param_count = 2,
},
},
.{
"@memcpy",
.{
@ -731,7 +708,6 @@ pub const list = list: {
"@splat",
.{
.tag = .splat,
.needs_mem_loc = true,
.param_count = 2,
},
},
@ -746,7 +722,7 @@ pub const list = list: {
"@src",
.{
.tag = .src,
.needs_mem_loc = true,
.needs_mem_loc = .always,
.param_count = 0,
},
},
@ -901,7 +877,7 @@ pub const list = list: {
"@unionInit",
.{
.tag = .union_init,
.needs_mem_loc = true,
.needs_mem_loc = .always,
.param_count = 3,
},
},

View file

@ -812,7 +812,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
const needs_c_symbols = !options.skip_linker_dependencies and is_exe_or_dyn_lib;
// WASI-only. Resolve the optinal exec-model option, defaults to command.
// WASI-only. Resolve the optional exec-model option, defaults to command.
const wasi_exec_model = if (options.target.os.tag != .wasi) undefined else options.wasi_exec_model orelse .command;
const comp: *Compilation = comp: {
@ -849,10 +849,6 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
if (options.use_llvm) |explicit|
break :blk explicit;
// If we have no zig code to compile, no need for LLVM.
if (options.main_pkg == null)
break :blk false;
// If we are outputting .c code we must use Zig backend.
if (ofmt == .c)
break :blk false;
@ -861,6 +857,10 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
if (options.emit_llvm_ir != null or options.emit_llvm_bc != null)
break :blk true;
// If we have no zig code to compile, no need for LLVM.
if (options.main_pkg == null)
break :blk false;
// The stage1 compiler depends on the stage1 C++ LLVM backend
// to compile zig code.
if (use_stage1)
@ -876,9 +876,6 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
if (options.use_llvm == true) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
if (options.machine_code_model != .default) {
return error.MachineCodeModelNotSupportedWithoutLlvm;
}
if (options.emit_llvm_ir != null or options.emit_llvm_bc != null) {
return error.EmittingLlvmModuleRequiresUsingLlvmBackend;
}
@ -1100,7 +1097,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
if (feature.llvm_name) |llvm_name| {
const plus_or_minus = "-+"[@boolToInt(is_enabled)];
try buf.ensureCapacity(buf.items.len + 2 + llvm_name.len);
try buf.ensureUnusedCapacity(2 + llvm_name.len);
buf.appendAssumeCapacity(plus_or_minus);
buf.appendSliceAssumeCapacity(llvm_name);
buf.appendSliceAssumeCapacity(",");
@ -1180,6 +1177,8 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
}
hash.add(valgrind);
hash.add(single_threaded);
hash.add(use_stage1);
hash.add(use_llvm);
hash.add(dll_export_fns);
hash.add(options.is_test);
hash.add(options.skip_linker_dependencies);
@ -1350,7 +1349,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
var system_libs: std.StringArrayHashMapUnmanaged(void) = .{};
errdefer system_libs.deinit(gpa);
try system_libs.ensureCapacity(gpa, options.system_libs.len);
try system_libs.ensureTotalCapacity(gpa, options.system_libs.len);
for (options.system_libs) |lib_name| {
system_libs.putAssumeCapacity(lib_name, {});
}
@ -1486,7 +1485,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
errdefer comp.astgen_wait_group.deinit();
// Add a `CObject` for each `c_source_files`.
try comp.c_object_table.ensureCapacity(gpa, options.c_source_files.len);
try comp.c_object_table.ensureTotalCapacity(gpa, options.c_source_files.len);
for (options.c_source_files) |c_source_file| {
const c_object = try gpa.create(CObject);
errdefer gpa.destroy(c_object);
@ -1577,25 +1576,30 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
// also test the use case of `build-obj -fcompiler-rt` with the self-hosted compiler
// and make sure the compiler-rt symbols are emitted. Currently this is hooked up for
// stage1 but not stage2.
if (comp.bin_file.options.use_stage1) {
if (comp.bin_file.options.include_compiler_rt) {
if (is_exe_or_dyn_lib) {
try comp.work_queue.writeItem(.{ .compiler_rt_lib = {} });
} else if (options.output_mode != .Obj) {
// If build-obj with -fcompiler-rt is requested, that is handled specially
// elsewhere. In this case we are making a static library, so we ask
// for a compiler-rt object to put in it.
try comp.work_queue.writeItem(.{ .compiler_rt_obj = {} });
}
const capable_of_building_compiler_rt = comp.bin_file.options.use_stage1 or
comp.bin_file.options.use_llvm;
const capable_of_building_zig_libc = comp.bin_file.options.use_stage1 or
comp.bin_file.options.use_llvm;
const capable_of_building_ssp = comp.bin_file.options.use_stage1;
if (comp.bin_file.options.include_compiler_rt and capable_of_building_compiler_rt) {
if (is_exe_or_dyn_lib) {
try comp.work_queue.writeItem(.{ .compiler_rt_lib = {} });
} else if (options.output_mode != .Obj) {
// If build-obj with -fcompiler-rt is requested, that is handled specially
// elsewhere. In this case we are making a static library, so we ask
// for a compiler-rt object to put in it.
try comp.work_queue.writeItem(.{ .compiler_rt_obj = {} });
}
if (needs_c_symbols) {
// MinGW provides no libssp, use our own implementation.
if (comp.getTarget().isMinGW()) {
try comp.work_queue.writeItem(.{ .libssp = {} });
}
if (!comp.bin_file.options.link_libc) {
try comp.work_queue.writeItem(.{ .zig_libc = {} });
}
}
if (needs_c_symbols) {
// MinGW provides no libssp, use our own implementation.
if (comp.getTarget().isMinGW() and capable_of_building_ssp) {
try comp.work_queue.writeItem(.{ .libssp = {} });
}
if (!comp.bin_file.options.link_libc and capable_of_building_zig_libc) {
try comp.work_queue.writeItem(.{ .zig_libc = {} });
}
}
}
@ -1647,6 +1651,9 @@ pub fn destroy(self: *Compilation) void {
if (self.compiler_rt_static_lib) |*crt_file| {
crt_file.deinit(gpa);
}
if (self.compiler_rt_obj) |*crt_file| {
crt_file.deinit(gpa);
}
if (self.libssp_static_lib) |*crt_file| {
crt_file.deinit(gpa);
}
@ -1793,6 +1800,10 @@ pub fn update(self: *Compilation) !void {
}
}
// Flush takes care of -femit-bin, but we still have -femit-llvm-ir, -femit-llvm-bc, and
// -femit-asm to handle, in the case of C objects.
try self.emitOthers();
// If there are any errors, we anticipate the source files being loaded
// to report error messages. Otherwise we unload all source files to save memory.
// The ZIR needs to stay loaded in memory because (1) Decl objects contain references
@ -1808,6 +1819,37 @@ pub fn update(self: *Compilation) !void {
}
}
fn emitOthers(comp: *Compilation) !void {
if (comp.bin_file.options.output_mode != .Obj or comp.bin_file.options.module != null or
comp.c_object_table.count() == 0)
{
return;
}
const obj_path = comp.c_object_table.keys()[0].status.success.object_path;
const cwd = std.fs.cwd();
const ext = std.fs.path.extension(obj_path);
const basename = obj_path[0 .. obj_path.len - ext.len];
// This obj path always ends with the object file extension, but if we change the
// extension to .ll, .bc, or .s, then it will be the path to those things.
const outs = [_]struct {
emit: ?EmitLoc,
ext: []const u8,
}{
.{ .emit = comp.emit_asm, .ext = ".s" },
.{ .emit = comp.emit_llvm_ir, .ext = ".ll" },
.{ .emit = comp.emit_llvm_bc, .ext = ".bc" },
};
for (outs) |out| {
if (out.emit) |loc| {
if (loc.directory) |directory| {
const src_path = try std.fmt.allocPrint(comp.gpa, "{s}{s}", .{ basename, out.ext });
defer comp.gpa.free(src_path);
try cwd.copyFile(src_path, directory.handle, loc.basename, .{});
}
}
}
}
/// Having the file open for writing is problematic as far as executing the
/// binary is concerned. This will remove the write flag, or close the file,
/// or whatever is needed so that it can be executed.
@ -2113,7 +2155,11 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
const module = self.bin_file.options.module.?;
const decl = func.owner_decl;
var air = module.analyzeFnBody(decl, func) catch |err| switch (err) {
var tmp_arena = std.heap.ArenaAllocator.init(gpa);
defer tmp_arena.deinit();
const sema_arena = &tmp_arena.allocator;
var air = module.analyzeFnBody(decl, func, sema_arena) catch |err| switch (err) {
error.AnalysisFail => {
assert(func.state != .in_progress);
continue;
@ -2175,16 +2221,20 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
const decl_emit_h = decl.getEmitH(module);
const fwd_decl = &decl_emit_h.fwd_decl;
fwd_decl.shrinkRetainingCapacity(0);
var typedefs_arena = std.heap.ArenaAllocator.init(gpa);
defer typedefs_arena.deinit();
var dg: c_codegen.DeclGen = .{
.gpa = gpa,
.module = module,
.error_msg = null,
.decl = decl,
.fwd_decl = fwd_decl.toManaged(gpa),
// we don't want to emit optionals and error unions to headers since they have no ABI
.typedefs = undefined,
.typedefs = c_codegen.TypedefMap.init(gpa),
.typedefs_arena = &typedefs_arena.allocator,
};
defer dg.fwd_decl.deinit();
defer dg.typedefs.deinit();
c_codegen.genHeader(&dg) catch |err| switch (err) {
error.AnalysisFail => {
@ -2612,7 +2662,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
const dep_basename = std.fs.path.basename(out_dep_path);
try man.addDepFilePost(zig_cache_tmp_dir, dep_basename);
try comp.stage1_cache_manifest.addDepFilePost(zig_cache_tmp_dir, dep_basename);
if (build_options.is_stage1 and comp.bin_file.options.use_stage1) try comp.stage1_cache_manifest.addDepFilePost(zig_cache_tmp_dir, dep_basename);
const digest = man.final();
const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest });
@ -2721,16 +2771,12 @@ fn reportRetryableAstGenError(
try Module.ErrorMsg.create(
gpa,
src_loc,
"unable to load '{'}" ++ std.fs.path.sep_str ++ "{'}': {s}",
.{
std.zig.fmtEscapes(dir_path),
std.zig.fmtEscapes(file.sub_file_path),
@errorName(err),
},
"unable to load '{s}" ++ std.fs.path.sep_str ++ "{s}': {s}",
.{ dir_path, file.sub_file_path, @errorName(err) },
)
else
try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{'}': {s}", .{
std.zig.fmtEscapes(file.sub_file_path), @errorName(err),
try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{s}': {s}", .{
file.sub_file_path, @errorName(err),
});
errdefer err_msg.destroy(gpa);
@ -2766,6 +2812,9 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
defer man.deinit();
man.hash.add(comp.clang_preprocessor_mode);
man.hash.addOptionalEmitLoc(comp.emit_asm);
man.hash.addOptionalEmitLoc(comp.emit_llvm_ir);
man.hash.addOptionalEmitLoc(comp.emit_llvm_bc);
try man.hashCSource(c_object.src);
@ -2789,16 +2838,29 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
comp.bin_file.options.root_name
else
c_source_basename[0 .. c_source_basename.len - std.fs.path.extension(c_source_basename).len];
const o_basename = try std.fmt.allocPrint(arena, "{s}{s}", .{
o_basename_noext,
comp.bin_file.options.object_format.fileExt(comp.bin_file.options.target.cpu.arch),
});
const o_ext = comp.bin_file.options.object_format.fileExt(comp.bin_file.options.target.cpu.arch);
const digest = if (!comp.disable_c_depfile and try man.hit()) man.final() else blk: {
var argv = std.ArrayList([]const u8).init(comp.gpa);
defer argv.deinit();
// We can't know the digest until we do the C compiler invocation, so we need a temporary filename.
// In case we are doing passthrough mode, we need to detect -S and -emit-llvm.
const out_ext = e: {
if (!comp.clang_passthrough_mode)
break :e o_ext;
if (comp.emit_asm != null)
break :e ".s";
if (comp.emit_llvm_ir != null)
break :e ".ll";
if (comp.emit_llvm_bc != null)
break :e ".bc";
break :e o_ext;
};
const o_basename = try std.fmt.allocPrint(arena, "{s}{s}", .{ o_basename_noext, out_ext });
// We can't know the digest until we do the C compiler invocation,
// so we need a temporary filename.
const out_obj_path = try comp.tmpFilePath(arena, o_basename);
var zig_cache_tmp_dir = try comp.local_cache_directory.handle.makeOpenPath("tmp", .{});
defer zig_cache_tmp_dir.close();
@ -2812,15 +2874,23 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
try std.fmt.allocPrint(arena, "{s}.d", .{out_obj_path});
try comp.addCCArgs(arena, &argv, ext, out_dep_path);
try argv.ensureCapacity(argv.items.len + 3);
try argv.ensureUnusedCapacity(6 + c_object.src.extra_flags.len);
switch (comp.clang_preprocessor_mode) {
.no => argv.appendSliceAssumeCapacity(&[_][]const u8{ "-c", "-o", out_obj_path }),
.yes => argv.appendSliceAssumeCapacity(&[_][]const u8{ "-E", "-o", out_obj_path }),
.stdout => argv.appendAssumeCapacity("-E"),
}
try argv.append(c_object.src.src_path);
try argv.appendSlice(c_object.src.extra_flags);
if (comp.clang_passthrough_mode) {
if (comp.emit_asm != null) {
argv.appendAssumeCapacity("-S");
} else if (comp.emit_llvm_ir != null) {
argv.appendSliceAssumeCapacity(&[_][]const u8{ "-emit-llvm", "-S" });
} else if (comp.emit_llvm_bc != null) {
argv.appendAssumeCapacity("-emit-llvm");
}
}
argv.appendAssumeCapacity(c_object.src.src_path);
argv.appendSliceAssumeCapacity(c_object.src.extra_flags);
if (comp.verbose_cc) {
dump_argv(argv.items);
@ -2840,8 +2910,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
switch (term) {
.Exited => |code| {
if (code != 0) {
// TODO https://github.com/ziglang/zig/issues/6342
std.process.exit(1);
std.process.exit(code);
}
if (comp.clang_preprocessor_mode == .stdout)
std.process.exit(0);
@ -2857,9 +2926,6 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
const stderr_reader = child.stderr.?.reader();
// TODO https://github.com/ziglang/zig/issues/6343
// Please uncomment and use stdout once this issue is fixed
// const stdout = try stdout_reader.readAllAlloc(arena, std.math.maxInt(u32));
const stderr = try stderr_reader.readAllAlloc(arena, 10 * 1024 * 1024);
const term = child.wait() catch |err| {
@ -2909,6 +2975,8 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
break :blk digest;
};
const o_basename = try std.fmt.allocPrint(arena, "{s}{s}", .{ o_basename_noext, o_ext });
c_object.status = .{
.success = .{
.object_path = try comp.local_cache_directory.join(comp.gpa, &[_][]const u8{
@ -3032,7 +3100,7 @@ pub fn addCCArgs(
// It would be really nice if there was a more compact way to communicate this info to Clang.
const all_features_list = target.cpu.arch.allFeaturesList();
try argv.ensureCapacity(argv.items.len + all_features_list.len * 4);
try argv.ensureUnusedCapacity(all_features_list.len * 4);
for (all_features_list) |feature, index_usize| {
const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize);
const is_enabled = target.cpu.features.isEnabled(index);
@ -3533,7 +3601,7 @@ fn detectLibCIncludeDirs(
fn detectLibCFromLibCInstallation(arena: *Allocator, target: Target, lci: *const LibCInstallation) !LibCDirs {
var list = std.ArrayList([]const u8).init(arena);
try list.ensureCapacity(4);
try list.ensureTotalCapacity(4);
list.appendAssumeCapacity(lci.include_dir.?);
@ -3640,7 +3708,7 @@ fn setMiscFailure(
comptime format: []const u8,
args: anytype,
) Allocator.Error!void {
try comp.misc_failures.ensureCapacity(comp.gpa, comp.misc_failures.count() + 1);
try comp.misc_failures.ensureUnusedCapacity(comp.gpa, 1);
const msg = try std.fmt.allocPrint(comp.gpa, format, args);
comp.misc_failures.putAssumeCapacityNoClobber(tag, .{ .msg = msg });
}
@ -3662,6 +3730,8 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) Alloc
const target = comp.getTarget();
const generic_arch_name = target.cpu.arch.genericName();
const use_stage1 = build_options.is_stage1 and comp.bin_file.options.use_stage1;
const stage2_x86_cx16 = target.cpu.arch == .x86_64 and
std.Target.x86.featureSetHas(target.cpu.features, .cx16);
@setEvalBranchQuota(4000);
try buffer.writer().print(
@ -3673,6 +3743,8 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) Alloc
\\pub const zig_is_stage2 = {};
\\/// Temporary until self-hosted supports the `cpu.arch` value.
\\pub const stage2_arch: std.Target.Cpu.Arch = .{};
\\/// Temporary until self-hosted can call `std.Target.x86.featureSetHas` at comptime.
\\pub const stage2_x86_cx16 = {};
\\
\\pub const output_mode = std.builtin.OutputMode.{};
\\pub const link_mode = std.builtin.LinkMode.{};
@ -3688,6 +3760,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) Alloc
build_options.version,
!use_stage1,
std.zig.fmtId(@tagName(target.cpu.arch)),
stage2_x86_cx16,
std.zig.fmtId(@tagName(comp.bin_file.options.output_mode)),
std.zig.fmtId(@tagName(comp.bin_file.options.link_mode)),
comp.bin_file.options.is_test,
@ -3912,6 +3985,7 @@ fn buildOutputFromZig(
},
.root_src_path = src_basename,
};
defer main_pkg.deinitTable(comp.gpa);
const root_name = src_basename[0 .. src_basename.len - std.fs.path.extension(src_basename).len];
const target = comp.getTarget();
const bin_basename = try std.zig.binNameAlloc(comp.gpa, .{
@ -3970,7 +4044,7 @@ fn buildOutputFromZig(
defer if (!keep_errors) errors.deinit(sub_compilation.gpa);
if (errors.list.len != 0) {
try comp.misc_failures.ensureCapacity(comp.gpa, comp.misc_failures.count() + 1);
try comp.misc_failures.ensureUnusedCapacity(comp.gpa, 1);
comp.misc_failures.putAssumeCapacityNoClobber(misc_task_tag, .{
.msg = try std.fmt.allocPrint(comp.gpa, "sub-compilation of {s} failed", .{
@tagName(misc_task_tag),
@ -4402,7 +4476,7 @@ pub fn build_crt_file(
try sub_compilation.updateSubCompilation();
try comp.crt_files.ensureCapacity(comp.gpa, comp.crt_files.count() + 1);
try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1);
comp.crt_files.putAssumeCapacityNoClobber(basename, .{
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(comp.gpa, &[_][]const u8{

View file

@ -30,7 +30,7 @@ tomb_bits: []usize,
/// The main tomb bits are still used and the extra ones are starting with the lsb of the
/// value here.
special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32),
/// Auxilliary data. The way this data is interpreted is determined contextually.
/// Auxiliary data. The way this data is interpreted is determined contextually.
extra: []const u32,
/// Trailing is the set of instructions whose lifetimes end at the start of the then branch,
@ -226,12 +226,16 @@ fn analyzeInst(
switch (inst_tags[inst]) {
.add,
.addwrap,
.add_sat,
.sub,
.subwrap,
.sub_sat,
.mul,
.mulwrap,
.mul_sat,
.div,
.rem,
.mod,
.ptr_add,
.ptr_sub,
.bit_and,
@ -251,7 +255,14 @@ fn analyzeInst(
.ptr_elem_val,
.ptr_ptr_elem_val,
.shl,
.shl_exact,
.shl_sat,
.shr,
.atomic_store_unordered,
.atomic_store_monotonic,
.atomic_store_release,
.atomic_store_seq_cst,
.set_union_tag,
=> {
const o = inst_datas[inst].bin_op;
return trackOperands(a, new_set, inst, main_tomb, .{ o.lhs, o.rhs, .none });
@ -264,12 +275,14 @@ fn analyzeInst(
.breakpoint,
.dbg_stmt,
.unreach,
.fence,
=> return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }),
.not,
.bitcast,
.load,
.floatcast,
.fpext,
.fptrunc,
.intcast,
.trunc,
.optional_payload,
@ -288,6 +301,11 @@ fn analyzeInst(
.struct_field_ptr_index_2,
.struct_field_ptr_index_3,
.array_to_slice,
.float_to_int,
.int_to_float,
.get_union_tag,
.clz,
.ctz,
=> {
const o = inst_datas[inst].ty_op;
return trackOperands(a, new_set, inst, main_tomb, .{ o.operand, .none, .none });
@ -344,6 +362,20 @@ fn analyzeInst(
const extra = a.air.extraData(Air.Cmpxchg, inst_datas[inst].ty_pl.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ extra.ptr, extra.expected_value, extra.new_value });
},
.atomic_load => {
const ptr = inst_datas[inst].atomic_load.ptr;
return trackOperands(a, new_set, inst, main_tomb, .{ ptr, .none, .none });
},
.atomic_rmw => {
const pl_op = inst_datas[inst].pl_op;
const extra = a.air.extraData(Air.AtomicRmw, pl_op.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.operand, .none });
},
.memset, .memcpy => {
const pl_op = inst_datas[inst].pl_op;
const extra = a.air.extraData(Air.Bin, pl_op.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.lhs, extra.rhs });
},
.br => {
const br = inst_datas[inst].br;
return trackOperands(a, new_set, inst, main_tomb, .{ br.operand, .none, .none });
@ -440,7 +472,7 @@ fn analyzeInst(
}
// Now we have to correctly populate new_set.
if (new_set) |ns| {
try ns.ensureCapacity(gpa, @intCast(u32, ns.count() + then_table.count() + else_table.count()));
try ns.ensureUnusedCapacity(gpa, @intCast(u32, then_table.count() + else_table.count()));
var it = then_table.keyIterator();
while (it.next()) |key| {
_ = ns.putAssumeCapacity(key.*, {});

File diff suppressed because it is too large Load diff

View file

@ -99,19 +99,22 @@ pub fn destroy(pkg: *Package, gpa: *Allocator) void {
}
}
{
var it = pkg.table.keyIterator();
while (it.next()) |key| {
gpa.free(key.*);
}
}
pkg.table.deinit(gpa);
pkg.deinitTable(gpa);
gpa.destroy(pkg);
}
/// Only frees memory associated with the table.
pub fn deinitTable(pkg: *Package, gpa: *Allocator) void {
var it = pkg.table.keyIterator();
while (it.next()) |key| {
gpa.free(key.*);
}
pkg.table.deinit(gpa);
}
pub fn add(pkg: *Package, gpa: *Allocator, name: []const u8, package: *Package) !void {
try pkg.table.ensureCapacity(gpa, pkg.table.count() + 1);
try pkg.table.ensureUnusedCapacity(gpa, 1);
const name_dupe = try mem.dupe(gpa, u8, name);
pkg.table.putAssumeCapacityNoClobber(name_dupe, package);
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,7 @@ const std = @import("std");
const DW = std.dwarf;
const testing = std.testing;
/// The condition field specifies the flags neccessary for an
/// The condition field specifies the flags necessary for an
/// Instruction to be executed
pub const Condition = enum(u4) {
/// equal

View file

@ -2434,7 +2434,14 @@ flagpd1("emit-codegen-only"),
flagpd1("emit-header-module"),
flagpd1("emit-html"),
flagpd1("emit-interface-stubs"),
flagpd1("emit-llvm"),
.{
.name = "emit-llvm",
.syntax = .flag,
.zig_equivalent = .emit_llvm,
.pd1 = true,
.pd2 = false,
.psl = false,
},
flagpd1("emit-llvm-bc"),
flagpd1("emit-llvm-only"),
flagpd1("emit-llvm-uselists"),

View file

@ -21,7 +21,7 @@ const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
const RegisterManager = @import("register_manager.zig").RegisterManager;
const X8664Encoder = @import("codegen/x86_64.zig").Encoder;
const X8664Encoder = @import("arch/x86_64/bits.zig").Encoder;
pub const FnResult = union(enum) {
/// The `code` parameter passed to `generateSymbol` has the value appended.
@ -48,6 +48,28 @@ pub const DebugInfoOutput = union(enum) {
dbg_info: *std.ArrayList(u8),
dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
},
/// the plan9 debuginfo output is a bytecode with 4 opcodes
/// assume all numbers/variables are bytes
/// 0 w x y z -> interpret w x y z as a big-endian i32, and add it to the line offset
/// x when x < 65 -> add x to line offset
/// x when x < 129 -> subtract 64 from x and subtract it from the line offset
/// x -> subtract 129 from x, multiply it by the quanta of the instruction size
/// (1 on x86_64), and add it to the pc
/// after every opcode, add the quanta of the instruction size to the pc
plan9: struct {
/// the actual opcodes
dbg_line: *std.ArrayList(u8),
/// what line the debuginfo starts on
/// this helps because the linker might have to insert some opcodes to make sure that the line count starts at the right amount for the next decl
start_line: *?u32,
/// what the line count ends on after codegen
/// this helps because the linker might have to insert some opcodes to make sure that the line count starts at the right amount for the next decl
end_line: *u32,
/// the last pc change op
/// This is very useful for adding quanta
/// to it if its not actually the last one.
pcop_change_index: *?u32,
},
none,
};
@ -141,7 +163,7 @@ pub fn generateSymbol(
// TODO populate .debug_info for the array
if (typed_value.val.castTag(.bytes)) |payload| {
if (typed_value.ty.sentinel()) |sentinel| {
try code.ensureCapacity(code.items.len + payload.data.len + 1);
try code.ensureUnusedCapacity(payload.data.len + 1);
code.appendSliceAssumeCapacity(payload.data);
switch (try generateSymbol(bin_file, src_loc, .{
.ty = typed_value.ty.elemType(),
@ -448,7 +470,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
/// A branch in the ARM instruction set
arm_branch: struct {
pos: usize,
cond: @import("codegen/arm.zig").Condition,
cond: @import("arch/arm/bits.zig").Condition,
},
};
@ -568,7 +590,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
fn gen(self: *Self) !void {
switch (arch) {
.x86_64 => {
try self.code.ensureCapacity(self.code.items.len + 11);
try self.code.ensureUnusedCapacity(11);
const cc = self.fn_type.fnCallingConvention();
if (cc != .Naked) {
@ -607,7 +629,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// Important to be after the possible self.code.items.len -= 5 above.
try self.dbgSetEpilogueBegin();
try self.code.ensureCapacity(self.code.items.len + 9);
try self.code.ensureUnusedCapacity(9);
// add rsp, x
if (aligned_stack_end > math.maxInt(i8)) {
// example: 48 81 c4 ff ff ff 7f add rsp,0x7fffffff
@ -802,14 +824,20 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
switch (air_tags[inst]) {
// zig fmt: off
.add, .ptr_add => try self.airAdd(inst),
.addwrap => try self.airAddWrap(inst),
.sub, .ptr_sub => try self.airSub(inst),
.subwrap => try self.airSubWrap(inst),
.mul => try self.airMul(inst),
.mulwrap => try self.airMulWrap(inst),
.div => try self.airDiv(inst),
.rem => try self.airRem(inst),
.add, .ptr_add => try self.airAdd(inst),
.addwrap => try self.airAddWrap(inst),
.add_sat => try self.airAddSat(inst),
.sub, .ptr_sub => try self.airSub(inst),
.subwrap => try self.airSubWrap(inst),
.sub_sat => try self.airSubSat(inst),
.mul => try self.airMul(inst),
.mulwrap => try self.airMulWrap(inst),
.mul_sat => try self.airMulSat(inst),
.div => try self.airDiv(inst),
.rem => try self.airRem(inst),
.mod => try self.airMod(inst),
.shl, .shl_exact => try self.airShl(inst),
.shl_sat => try self.airShlSat(inst),
.cmp_lt => try self.airCmp(inst, .lt),
.cmp_lte => try self.airCmp(inst, .lte),
@ -824,7 +852,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.bit_or => try self.airBitOr(inst),
.xor => try self.airXor(inst),
.shr => try self.airShr(inst),
.shl => try self.airShl(inst),
.alloc => try self.airAlloc(inst),
.arg => try self.airArg(inst),
@ -833,10 +860,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.breakpoint => try self.airBreakpoint(),
.fence => try self.airFence(),
.call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
.dbg_stmt => try self.airDbgStmt(inst),
.floatcast => try self.airFloatCast(inst),
.fptrunc => try self.airFptrunc(inst),
.fpext => try self.airFpext(inst),
.intcast => try self.airIntCast(inst),
.trunc => try self.airTrunc(inst),
.bool_to_int => try self.airBoolToInt(inst),
@ -857,8 +886,23 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.struct_field_ptr=> try self.airStructFieldPtr(inst),
.struct_field_val=> try self.airStructFieldVal(inst),
.array_to_slice => try self.airArrayToSlice(inst),
.int_to_float => try self.airIntToFloat(inst),
.float_to_int => try self.airFloatToInt(inst),
.cmpxchg_strong => try self.airCmpxchg(inst),
.cmpxchg_weak => try self.airCmpxchg(inst),
.atomic_rmw => try self.airAtomicRmw(inst),
.atomic_load => try self.airAtomicLoad(inst),
.memcpy => try self.airMemcpy(inst),
.memset => try self.airMemset(inst),
.set_union_tag => try self.airSetUnionTag(inst),
.get_union_tag => try self.airGetUnionTag(inst),
.clz => try self.airClz(inst),
.ctz => try self.airCtz(inst),
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
.atomic_store_release => try self.airAtomicStore(inst, .Release),
.atomic_store_seq_cst => try self.airAtomicStore(inst, .SeqCst),
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
@ -905,6 +949,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try dbg_out.dbg_line.append(DW.LNS.set_prologue_end);
try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column);
},
.plan9 => {},
.none => {},
}
}
@ -915,15 +960,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try dbg_out.dbg_line.append(DW.LNS.set_epilogue_begin);
try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column);
},
.plan9 => {},
.none => {},
}
}
fn dbgAdvancePCAndLine(self: *Self, line: u32, column: u32) InnerError!void {
const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line);
const delta_pc: usize = self.code.items.len - self.prev_di_pc;
switch (self.debug_output) {
.dwarf => |dbg_out| {
const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line);
const delta_pc = self.code.items.len - self.prev_di_pc;
// TODO Look into using the DWARF special opcodes to compress this data.
// It lets you emit single-byte opcodes that add different numbers to
// both the PC and the line number at the same time.
@ -935,12 +981,39 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
leb128.writeILEB128(dbg_out.dbg_line.writer(), delta_line) catch unreachable;
}
dbg_out.dbg_line.appendAssumeCapacity(DW.LNS.copy);
self.prev_di_pc = self.code.items.len;
self.prev_di_line = line;
self.prev_di_column = column;
self.prev_di_pc = self.code.items.len;
},
.plan9 => |dbg_out| {
if (delta_pc <= 0) return; // only do this when the pc changes
// we have already checked the target in the linker to make sure it is compatable
const quant = @import("link/Plan9/aout.zig").getPCQuant(self.target.cpu.arch) catch unreachable;
// increasing the line number
try @import("link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line);
// increasing the pc
const d_pc_p9 = @intCast(i64, delta_pc) - quant;
if (d_pc_p9 > 0) {
// minus one because if its the last one, we want to leave space to change the line which is one quanta
try dbg_out.dbg_line.append(@intCast(u8, @divExact(d_pc_p9, quant) + 128) - quant);
if (dbg_out.pcop_change_index.*) |pci|
dbg_out.dbg_line.items[pci] += 1;
dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1);
} else if (d_pc_p9 == 0) {
// we don't need to do anything, because adding the quant does it for us
} else unreachable;
if (dbg_out.start_line.* == null)
dbg_out.start_line.* = self.prev_di_line;
dbg_out.end_line.* = line;
// only do this if the pc changed
self.prev_di_line = line;
self.prev_di_column = column;
self.prev_di_pc = self.code.items.len;
},
.none => {},
}
self.prev_di_line = line;
self.prev_di_column = column;
self.prev_di_pc = self.code.items.len;
}
/// Asserts there is already capacity to insert into top branch inst_table.
@ -1024,6 +1097,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
try gop.value_ptr.relocs.append(self.gpa, @intCast(u32, index));
},
.plan9 => {},
.none => {},
}
}
@ -1110,10 +1184,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none });
}
fn airFloatCast(self: *Self, inst: Air.Inst.Index) !void {
fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement floatCast for {}", .{self.target.cpu.arch}),
else => return self.fail("TODO implement airFptrunc for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement airFpext for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@ -1226,6 +1308,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement add_sat for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airSub(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
@ -1244,6 +1334,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement sub_sat for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airMul(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
@ -1262,6 +1360,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement mul_sat for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airDiv(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
@ -1278,6 +1384,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airMod(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement mod for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
@ -1316,6 +1430,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airShr(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
@ -1470,6 +1592,38 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = switch (arch) {
else => return self.fail("TODO implement airSetUnionTag for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement airGetUnionTag for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airClz(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement airClz for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement airCtz for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool {
if (!self.liveness.operandDies(inst, op_index))
return false;
@ -1848,15 +2002,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
},
.shl => {
assert(!swap_lhs_and_rhs);
const shift_amout = switch (operand) {
const shift_amount = switch (operand) {
.Register => |reg_op| Instruction.ShiftAmount.reg(@intToEnum(Register, reg_op.rm)),
.Immediate => |imm_op| Instruction.ShiftAmount.imm(@intCast(u5, imm_op.imm)),
};
writeInt(u32, try self.code.addManyAsArray(4), Instruction.lsl(.al, dst_reg, op1, shift_amout).toU32());
writeInt(u32, try self.code.addManyAsArray(4), Instruction.lsl(.al, dst_reg, op1, shift_amount).toU32());
},
.shr => {
assert(!swap_lhs_and_rhs);
const shift_amout = switch (operand) {
const shift_amount = switch (operand) {
.Register => |reg_op| Instruction.ShiftAmount.reg(@intToEnum(Register, reg_op.rm)),
.Immediate => |imm_op| Instruction.ShiftAmount.imm(@intCast(u5, imm_op.imm)),
};
@ -1865,7 +2019,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.signed => Instruction.asr,
.unsigned => Instruction.lsr,
};
writeInt(u32, try self.code.addManyAsArray(4), shr(.al, dst_reg, op1, shift_amout).toU32());
writeInt(u32, try self.code.addManyAsArray(4), shr(.al, dst_reg, op1, shift_amount).toU32());
},
else => unreachable, // not a binary instruction
}
@ -1952,7 +2106,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
//
// TODO: make this algorithm less bad
try self.code.ensureCapacity(self.code.items.len + 8);
try self.code.ensureUnusedCapacity(8);
const lhs = try self.resolveInst(op_lhs);
const rhs = try self.resolveInst(op_rhs);
@ -2439,16 +2593,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.register => |reg| {
switch (self.debug_output) {
.dwarf => |dbg_out| {
try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 3);
try dbg_out.dbg_info.ensureUnusedCapacity(3);
dbg_out.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter);
dbg_out.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
1, // ULEB128 dwarf expression length
reg.dwarfLocOp(),
});
try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len);
try dbg_out.dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
try self.addDbgInfoTypeReloc(ty); // DW.AT.type, DW.FORM.ref4
dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
},
.plan9 => {},
.none => {},
}
},
@ -2476,13 +2631,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try dbg_out.dbg_info.append(DW.OP.breg11);
try leb128.writeILEB128(dbg_out.dbg_info.writer(), adjusted_stack_offset);
try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len);
try dbg_out.dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
try self.addDbgInfoTypeReloc(ty); // DW.AT.type, DW.FORM.ref4
dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
},
else => {},
}
},
.plan9 => {},
.none => {},
}
},
@ -2549,6 +2705,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAirBookkeeping();
}
fn airFence(self: *Self) !void {
return self.fail("TODO implement fence() for {}", .{self.target.cpu.arch});
//return self.finishAirBookkeeping();
}
fn airCall(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const fn_ty = self.air.typeOf(pl_op.operand);
@ -2613,7 +2774,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
unreachable;
// ff 14 25 xx xx xx xx call [addr]
try self.code.ensureCapacity(self.code.items.len + 7);
try self.code.ensureUnusedCapacity(7);
self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), got_addr);
} else if (func_value.castTag(.extern_fn)) |_| {
@ -2826,7 +2987,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.memory = func.owner_decl.link.macho.local_sym_index,
});
// callq *%rax
try self.code.ensureCapacity(self.code.items.len + 2);
try self.code.ensureUnusedCapacity(2);
self.code.appendSliceAssumeCapacity(&[2]u8{ 0xff, 0xd0 });
},
.aarch64 => {
@ -2840,12 +3001,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
} else if (func_value.castTag(.extern_fn)) |func_payload| {
const decl = func_payload.data;
const where_index = try macho_file.addExternFn(mem.spanZ(decl.name));
const resolv = try macho_file.addExternFn(mem.spanZ(decl.name));
const offset = blk: {
switch (arch) {
.x86_64 => {
// callq
try self.code.ensureCapacity(self.code.items.len + 5);
try self.code.ensureUnusedCapacity(5);
self.code.appendSliceAssumeCapacity(&[5]u8{ 0xe8, 0x0, 0x0, 0x0, 0x0 });
break :blk @intCast(u32, self.code.items.len) - 4;
},
@ -2861,8 +3022,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// Add relocation to the decl.
try macho_file.active_decl.?.link.macho.relocs.append(self.bin_file.allocator, .{
.offset = offset,
.where = .undef,
.where_index = where_index,
.where = switch (resolv.where) {
.local => .local,
.undef => .undef,
},
.where_index = resolv.where_index,
.payload = .{ .branch = .{
.arch = arch,
} },
@ -2911,12 +3075,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
try p9.seeDecl(func_payload.data.owner_decl);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
const got_index = func_payload.data.owner_decl.link.plan9.got_index.?;
// ff 14 25 xx xx xx xx call [addr]
try self.code.ensureCapacity(self.code.items.len + 7);
try self.code.ensureUnusedCapacity(7);
self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
const fn_got_addr = got_addr + got_index * ptr_bytes;
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), @intCast(u32, fn_got_addr));
@ -2958,6 +3123,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
try p9.seeDecl(func_payload.data.owner_decl);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
@ -3059,7 +3225,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const rhs = try self.resolveInst(bin_op.rhs);
const result: MCValue = switch (arch) {
.x86_64 => result: {
try self.code.ensureCapacity(self.code.items.len + 8);
try self.code.ensureUnusedCapacity(8);
// There are 2 operands, destination and source.
// Either one, but not both, can be a memory operand.
@ -3143,7 +3309,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const reloc: Reloc = switch (arch) {
.i386, .x86_64 => reloc: {
try self.code.ensureCapacity(self.code.items.len + 6);
try self.code.ensureUnusedCapacity(6);
const opcode: u8 = switch (cond) {
.compare_flags_signed => |cmp_op| blk: {
@ -3503,7 +3669,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
fn jump(self: *Self, index: usize) !void {
switch (arch) {
.i386, .x86_64 => {
try self.code.ensureCapacity(self.code.items.len + 5);
try self.code.ensureUnusedCapacity(5);
if (math.cast(i8, @intCast(i32, index) - (@intCast(i32, self.code.items.len + 2)))) |delta| {
self.code.appendAssumeCapacity(0xeb); // jmp rel8
self.code.appendAssumeCapacity(@bitCast(u8, delta));
@ -3535,7 +3701,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try self.blocks.putNoClobber(self.gpa, inst, .{
// A block is a setup to be able to jump to the end.
.relocs = .{},
// It also acts as a receptical for break operands.
// It also acts as a receptacle for break operands.
// Here we use `MCValue.none` to represent a null value so that the first
// break instruction will choose a MCValue for the block result and overwrite
// this field. Following break instructions will use that MCValue to put their
@ -3589,7 +3755,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.fail("TODO: enable larger branch offset", .{});
}
},
else => unreachable, // attempting to perfrom an ARM relocation on a non-ARM target arch
else => unreachable, // attempting to perform an ARM relocation on a non-ARM target arch
}
},
}
@ -3641,7 +3807,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const block_data = self.blocks.getPtr(block).?;
// Emit a jump with a relocation. It will be patched up after the block ends.
try block_data.relocs.ensureCapacity(self.gpa, block_data.relocs.items.len + 1);
try block_data.relocs.ensureUnusedCapacity(self.gpa, 1);
switch (arch) {
.i386, .x86_64 => {
@ -4025,7 +4191,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (adj_off > 128) {
return self.fail("TODO implement set stack variable with large stack offset", .{});
}
try self.code.ensureCapacity(self.code.items.len + 8);
try self.code.ensureUnusedCapacity(8);
switch (abi_size) {
1 => {
return self.fail("TODO implement set abi_size=1 stack variable with immediate", .{});
@ -4051,7 +4217,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// 64 bit write to memory would take two mov's anyways so we
// insted just use two 32 bit writes to avoid register allocation
try self.code.ensureCapacity(self.code.items.len + 14);
try self.code.ensureUnusedCapacity(14);
var buf: [8]u8 = undefined;
mem.writeIntLittle(u64, &buf, x_big);
@ -4753,6 +4919,26 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement airIntToFloat for {}", .{
self.target.cpu.arch,
}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement airFloatToInt for {}", .{
self.target.cpu.arch,
}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
@ -4764,6 +4950,32 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value });
}
fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airCmpxchg for {}", .{self.target.cpu.arch});
}
fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airAtomicLoad for {}", .{self.target.cpu.arch});
}
fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void {
_ = inst;
_ = order;
return self.fail("TODO implement airAtomicStore for {}", .{self.target.cpu.arch});
}
fn airMemset(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airMemset for {}", .{self.target.cpu.arch});
}
fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airMemcpy for {}", .{self.target.cpu.arch});
}
fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// First section of indexes correspond to a set number of constant values.
const ref_int = @enumToInt(inst);
@ -4841,7 +5053,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {
var buf: Type.Payload.ElemType = undefined;
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_type = typed_value.ty.slicePtrFieldType(&buf);
const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val });
const slice_len = typed_value.val.sliceLen();
@ -4869,6 +5081,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
@ -5216,11 +5429,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
const Register = switch (arch) {
.i386 => @import("codegen/x86.zig").Register,
.x86_64 => @import("codegen/x86_64.zig").Register,
.riscv64 => @import("codegen/riscv64.zig").Register,
.arm, .armeb => @import("codegen/arm.zig").Register,
.aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").Register,
.i386 => @import("arch/x86/bits.zig").Register,
.x86_64 => @import("arch/x86_64/bits.zig").Register,
.riscv64 => @import("arch/riscv64/bits.zig").Register,
.arm, .armeb => @import("arch/arm/bits.zig").Register,
.aarch64, .aarch64_be, .aarch64_32 => @import("arch/aarch64/bits.zig").Register,
else => enum {
dummy,
@ -5232,39 +5445,39 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
};
const Instruction = switch (arch) {
.riscv64 => @import("codegen/riscv64.zig").Instruction,
.arm, .armeb => @import("codegen/arm.zig").Instruction,
.aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").Instruction,
.riscv64 => @import("arch/riscv64/bits.zig").Instruction,
.arm, .armeb => @import("arch/arm/bits.zig").Instruction,
.aarch64, .aarch64_be, .aarch64_32 => @import("arch/aarch64/bits.zig").Instruction,
else => void,
};
const Condition = switch (arch) {
.arm, .armeb => @import("codegen/arm.zig").Condition,
.arm, .armeb => @import("arch/arm/bits.zig").Condition,
else => void,
};
const callee_preserved_regs = switch (arch) {
.i386 => @import("codegen/x86.zig").callee_preserved_regs,
.x86_64 => @import("codegen/x86_64.zig").callee_preserved_regs,
.riscv64 => @import("codegen/riscv64.zig").callee_preserved_regs,
.arm, .armeb => @import("codegen/arm.zig").callee_preserved_regs,
.aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").callee_preserved_regs,
.i386 => @import("arch/x86/bits.zig").callee_preserved_regs,
.x86_64 => @import("arch/x86_64/bits.zig").callee_preserved_regs,
.riscv64 => @import("arch/riscv64/bits.zig").callee_preserved_regs,
.arm, .armeb => @import("arch/arm/bits.zig").callee_preserved_regs,
.aarch64, .aarch64_be, .aarch64_32 => @import("arch/aarch64/bits.zig").callee_preserved_regs,
else => [_]Register{},
};
const c_abi_int_param_regs = switch (arch) {
.i386 => @import("codegen/x86.zig").c_abi_int_param_regs,
.x86_64 => @import("codegen/x86_64.zig").c_abi_int_param_regs,
.arm, .armeb => @import("codegen/arm.zig").c_abi_int_param_regs,
.aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").c_abi_int_param_regs,
.i386 => @import("arch/x86/bits.zig").c_abi_int_param_regs,
.x86_64 => @import("arch/x86_64/bits.zig").c_abi_int_param_regs,
.arm, .armeb => @import("arch/arm/bits.zig").c_abi_int_param_regs,
.aarch64, .aarch64_be, .aarch64_32 => @import("arch/aarch64/bits.zig").c_abi_int_param_regs,
else => [_]Register{},
};
const c_abi_int_return_regs = switch (arch) {
.i386 => @import("codegen/x86.zig").c_abi_int_return_regs,
.x86_64 => @import("codegen/x86_64.zig").c_abi_int_return_regs,
.arm, .armeb => @import("codegen/arm.zig").c_abi_int_return_regs,
.aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").c_abi_int_return_regs,
.i386 => @import("arch/x86/bits.zig").c_abi_int_return_regs,
.x86_64 => @import("arch/x86_64/bits.zig").c_abi_int_return_regs,
.arm, .armeb => @import("arch/arm/bits.zig").c_abi_int_return_regs,
.aarch64, .aarch64_be, .aarch64_32 => @import("arch/aarch64/bits.zig").c_abi_int_return_regs,
else => [_]Register{},
};

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -85,6 +85,9 @@ pub const Value = opaque {
pub const addAttributeAtIndex = LLVMAddAttributeAtIndex;
extern fn LLVMAddAttributeAtIndex(*const Value, Idx: AttributeIndex, A: *const Attribute) void;
pub const removeEnumAttributeAtIndex = LLVMRemoveEnumAttributeAtIndex;
extern fn LLVMRemoveEnumAttributeAtIndex(F: *const Value, Idx: AttributeIndex, KindID: c_uint) void;
pub const getFirstBasicBlock = LLVMGetFirstBasicBlock;
extern fn LLVMGetFirstBasicBlock(Fn: *const Value) ?*const BasicBlock;
@ -136,6 +139,30 @@ pub const Value = opaque {
pub const setWeak = LLVMSetWeak;
extern fn LLVMSetWeak(CmpXchgInst: *const Value, IsWeak: Bool) void;
pub const setOrdering = LLVMSetOrdering;
extern fn LLVMSetOrdering(MemoryAccessInst: *const Value, Ordering: AtomicOrdering) void;
pub const setVolatile = LLVMSetVolatile;
extern fn LLVMSetVolatile(MemoryAccessInst: *const Value, IsVolatile: Bool) void;
pub const setAlignment = LLVMSetAlignment;
extern fn LLVMSetAlignment(V: *const Value, Bytes: c_uint) void;
pub const getFunctionCallConv = LLVMGetFunctionCallConv;
extern fn LLVMGetFunctionCallConv(Fn: *const Value) CallConv;
pub const setFunctionCallConv = LLVMSetFunctionCallConv;
extern fn LLVMSetFunctionCallConv(Fn: *const Value, CC: CallConv) void;
pub const setValueName = LLVMSetValueName;
extern fn LLVMSetValueName(Val: *const Value, Name: [*:0]const u8) void;
pub const setValueName2 = LLVMSetValueName2;
extern fn LLVMSetValueName2(Val: *const Value, Name: [*]const u8, NameLen: usize) void;
pub const deleteFunction = LLVMDeleteFunction;
extern fn LLVMDeleteFunction(Fn: *const Value) void;
};
pub const Type = opaque {
@ -148,12 +175,22 @@ pub const Type = opaque {
pub const constInt = LLVMConstInt;
extern fn LLVMConstInt(IntTy: *const Type, N: c_ulonglong, SignExtend: Bool) *const Value;
pub const constIntOfArbitraryPrecision = LLVMConstIntOfArbitraryPrecision;
extern fn LLVMConstIntOfArbitraryPrecision(IntTy: *const Type, NumWords: c_uint, Words: [*]const u64) *const Value;
pub const constReal = LLVMConstReal;
extern fn LLVMConstReal(RealTy: *const Type, N: f64) *const Value;
pub const constArray = LLVMConstArray;
extern fn LLVMConstArray(ElementTy: *const Type, ConstantVals: [*]*const Value, Length: c_uint) *const Value;
pub const constNamedStruct = LLVMConstNamedStruct;
extern fn LLVMConstNamedStruct(
StructTy: *const Type,
ConstantVals: [*]const *const Value,
Count: c_uint,
) *const Value;
pub const getUndef = LLVMGetUndef;
extern fn LLVMGetUndef(Ty: *const Type) *const Value;
@ -170,6 +207,9 @@ pub const Type = opaque {
ElementCount: c_uint,
Packed: Bool,
) void;
pub const getTypeKind = LLVMGetTypeKind;
extern fn LLVMGetTypeKind(Ty: *const Type) TypeKind;
};
pub const Module = opaque {
@ -182,9 +222,15 @@ pub const Module = opaque {
pub const verify = LLVMVerifyModule;
extern fn LLVMVerifyModule(*const Module, Action: VerifierFailureAction, OutMessage: *[*:0]const u8) Bool;
pub const setModuleDataLayout = LLVMSetModuleDataLayout;
extern fn LLVMSetModuleDataLayout(*const Module, *const TargetData) void;
pub const addFunction = LLVMAddFunction;
extern fn LLVMAddFunction(*const Module, Name: [*:0]const u8, FunctionTy: *const Type) *const Value;
pub const addFunctionInAddressSpace = ZigLLVMAddFunctionInAddressSpace;
extern fn ZigLLVMAddFunctionInAddressSpace(*const Module, Name: [*:0]const u8, FunctionTy: *const Type, AddressSpace: c_uint) *const Value;
pub const getNamedFunction = LLVMGetNamedFunction;
extern fn LLVMGetNamedFunction(*const Module, Name: [*:0]const u8) ?*const Value;
@ -197,6 +243,9 @@ pub const Module = opaque {
pub const addGlobal = LLVMAddGlobal;
extern fn LLVMAddGlobal(M: *const Module, Ty: *const Type, Name: [*:0]const u8) *const Value;
pub const addGlobalInAddressSpace = LLVMAddGlobalInAddressSpace;
extern fn LLVMAddGlobalInAddressSpace(M: *const Module, Ty: *const Type, Name: [*:0]const u8, AddressSpace: c_uint) *const Value;
pub const getNamedGlobal = LLVMGetNamedGlobal;
extern fn LLVMGetNamedGlobal(M: *const Module, Name: [*:0]const u8) ?*const Value;
@ -268,7 +317,7 @@ extern fn LLVMGetInlineAsm(
pub const functionType = LLVMFunctionType;
extern fn LLVMFunctionType(
ReturnType: *const Type,
ParamTypes: [*]*const Type,
ParamTypes: [*]const *const Type,
ParamCount: c_uint,
IsVarArg: Bool,
) *const Type;
@ -314,7 +363,7 @@ pub const Builder = opaque {
extern fn LLVMBuildCall(
*const Builder,
Fn: *const Value,
Args: [*]*const Value,
Args: [*]const *const Value,
NumArgs: c_uint,
Name: [*:0]const u8,
) *const Value;
@ -365,6 +414,12 @@ pub const Builder = opaque {
pub const buildNUWAdd = LLVMBuildNUWAdd;
extern fn LLVMBuildNUWAdd(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildSAddSat = ZigLLVMBuildSAddSat;
extern fn ZigLLVMBuildSAddSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildUAddSat = ZigLLVMBuildUAddSat;
extern fn ZigLLVMBuildUAddSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildFSub = LLVMBuildFSub;
extern fn LLVMBuildFSub(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
@ -377,6 +432,12 @@ pub const Builder = opaque {
pub const buildNUWSub = LLVMBuildNUWSub;
extern fn LLVMBuildNUWSub(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildSSubSat = ZigLLVMBuildSSubSat;
extern fn ZigLLVMBuildSSubSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildUSubSat = ZigLLVMBuildUSubSat;
extern fn ZigLLVMBuildUSubSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildFMul = LLVMBuildFMul;
extern fn LLVMBuildFMul(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
@ -389,6 +450,12 @@ pub const Builder = opaque {
pub const buildNUWMul = LLVMBuildNUWMul;
extern fn LLVMBuildNUWMul(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildSMulFixSat = ZigLLVMBuildSMulFixSat;
extern fn ZigLLVMBuildSMulFixSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildUMulFixSat = ZigLLVMBuildUMulFixSat;
extern fn ZigLLVMBuildUMulFixSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildUDiv = LLVMBuildUDiv;
extern fn LLVMBuildUDiv(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
@ -419,6 +486,18 @@ pub const Builder = opaque {
pub const buildShl = LLVMBuildShl;
extern fn LLVMBuildShl(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildNUWShl = ZigLLVMBuildNUWShl;
extern fn ZigLLVMBuildNUWShl(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildNSWShl = ZigLLVMBuildNSWShl;
extern fn ZigLLVMBuildNSWShl(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildSShlSat = ZigLLVMBuildSShlSat;
extern fn ZigLLVMBuildSShlSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildUShlSat = ZigLLVMBuildUShlSat;
extern fn ZigLLVMBuildUShlSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
pub const buildOr = LLVMBuildOr;
extern fn LLVMBuildOr(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
@ -481,6 +560,14 @@ pub const Builder = opaque {
Name: [*:0]const u8,
) *const Value;
pub const buildIntToPtr = LLVMBuildIntToPtr;
extern fn LLVMBuildIntToPtr(
*const Builder,
Val: *const Value,
DestTy: *const Type,
Name: [*:0]const u8,
) *const Value;
pub const buildStructGEP = LLVMBuildStructGEP;
extern fn LLVMBuildStructGEP(
B: *const Builder,
@ -525,6 +612,93 @@ pub const Builder = opaque {
Else: *const Value,
Name: [*:0]const u8,
) *const Value;
pub const buildFence = LLVMBuildFence;
extern fn LLVMBuildFence(
B: *const Builder,
ordering: AtomicOrdering,
singleThread: Bool,
Name: [*:0]const u8,
) *const Value;
pub const buildAtomicRmw = LLVMBuildAtomicRMW;
extern fn LLVMBuildAtomicRMW(
B: *const Builder,
op: AtomicRMWBinOp,
PTR: *const Value,
Val: *const Value,
ordering: AtomicOrdering,
singleThread: Bool,
) *const Value;
pub const buildFPToUI = LLVMBuildFPToUI;
extern fn LLVMBuildFPToUI(
*const Builder,
Val: *const Value,
DestTy: *const Type,
Name: [*:0]const u8,
) *const Value;
pub const buildFPToSI = LLVMBuildFPToSI;
extern fn LLVMBuildFPToSI(
*const Builder,
Val: *const Value,
DestTy: *const Type,
Name: [*:0]const u8,
) *const Value;
pub const buildUIToFP = LLVMBuildUIToFP;
extern fn LLVMBuildUIToFP(
*const Builder,
Val: *const Value,
DestTy: *const Type,
Name: [*:0]const u8,
) *const Value;
pub const buildSIToFP = LLVMBuildSIToFP;
extern fn LLVMBuildSIToFP(
*const Builder,
Val: *const Value,
DestTy: *const Type,
Name: [*:0]const u8,
) *const Value;
pub const buildFPTrunc = LLVMBuildFPTrunc;
extern fn LLVMBuildFPTrunc(
*const Builder,
Val: *const Value,
DestTy: *const Type,
Name: [*:0]const u8,
) *const Value;
pub const buildFPExt = LLVMBuildFPExt;
extern fn LLVMBuildFPExt(
*const Builder,
Val: *const Value,
DestTy: *const Type,
Name: [*:0]const u8,
) *const Value;
pub const buildMemSet = ZigLLVMBuildMemSet;
extern fn ZigLLVMBuildMemSet(
B: *const Builder,
Ptr: *const Value,
Val: *const Value,
Len: *const Value,
Align: c_uint,
is_volatile: bool,
) *const Value;
pub const buildMemCpy = ZigLLVMBuildMemCpy;
extern fn ZigLLVMBuildMemCpy(
B: *const Builder,
Dst: *const Value,
DstAlign: c_uint,
Src: *const Value,
SrcAlign: c_uint,
Size: *const Value,
is_volatile: bool,
) *const Value;
};
pub const IntPredicate = enum(c_uint) {
@ -598,6 +772,14 @@ pub const TargetMachine = opaque {
llvm_ir_filename: ?[*:0]const u8,
bitcode_filename: ?[*:0]const u8,
) bool;
pub const createTargetDataLayout = LLVMCreateTargetDataLayout;
extern fn LLVMCreateTargetDataLayout(*const TargetMachine) *const TargetData;
};
pub const TargetData = opaque {
pub const dispose = LLVMDisposeTargetData;
extern fn LLVMDisposeTargetData(*const TargetData) void;
};
pub const CodeModel = enum(c_int) {
@ -915,3 +1097,151 @@ pub const AtomicOrdering = enum(c_uint) {
AcquireRelease = 6,
SequentiallyConsistent = 7,
};
pub const AtomicRMWBinOp = enum(c_int) {
Xchg,
Add,
Sub,
And,
Nand,
Or,
Xor,
Max,
Min,
UMax,
UMin,
FAdd,
FSub,
};
pub const TypeKind = enum(c_int) {
Void,
Half,
Float,
Double,
X86_FP80,
FP128,
PPC_FP128,
Label,
Integer,
Function,
Struct,
Array,
Pointer,
Vector,
Metadata,
X86_MMX,
Token,
ScalableVector,
BFloat,
X86_AMX,
};
pub const CallConv = enum(c_uint) {
C = 0,
Fast = 8,
Cold = 9,
GHC = 10,
HiPE = 11,
WebKit_JS = 12,
AnyReg = 13,
PreserveMost = 14,
PreserveAll = 15,
Swift = 16,
CXX_FAST_TLS = 17,
X86_StdCall = 64,
X86_FastCall = 65,
ARM_APCS = 66,
ARM_AAPCS = 67,
ARM_AAPCS_VFP = 68,
MSP430_INTR = 69,
X86_ThisCall = 70,
PTX_Kernel = 71,
PTX_Device = 72,
SPIR_FUNC = 75,
SPIR_KERNEL = 76,
Intel_OCL_BI = 77,
X86_64_SysV = 78,
Win64 = 79,
X86_VectorCall = 80,
HHVM = 81,
HHVM_C = 82,
X86_INTR = 83,
AVR_INTR = 84,
AVR_SIGNAL = 85,
AVR_BUILTIN = 86,
AMDGPU_VS = 87,
AMDGPU_GS = 88,
AMDGPU_PS = 89,
AMDGPU_CS = 90,
AMDGPU_KERNEL = 91,
X86_RegCall = 92,
AMDGPU_HS = 93,
MSP430_BUILTIN = 94,
AMDGPU_LS = 95,
AMDGPU_ES = 96,
AArch64_VectorCall = 97,
};
pub const address_space = struct {
pub const default: c_uint = 0;
// See llvm/lib/Target/X86/X86.h
pub const x86_64 = x86;
pub const x86 = struct {
pub const gs: c_uint = 256;
pub const fs: c_uint = 257;
pub const ss: c_uint = 258;
pub const ptr32_sptr: c_uint = 270;
pub const ptr32_uptr: c_uint = 271;
pub const ptr64: c_uint = 272;
};
// See llvm/lib/Target/AVR/AVR.h
pub const avr = struct {
pub const data_memory: c_uint = 0;
pub const program_memory: c_uint = 1;
};
// See llvm/lib/Target/NVPTX/NVPTX.h
pub const nvptx = struct {
pub const generic: c_uint = 0;
pub const global: c_uint = 1;
pub const constant: c_uint = 2;
pub const shared: c_uint = 3;
pub const param: c_uint = 4;
pub const local: c_uint = 5;
};
// See llvm/lib/Target/AMDGPU/AMDGPU.h
pub const amdgpu = struct {
pub const flat: c_uint = 0;
pub const global: c_uint = 1;
pub const region: c_uint = 2;
pub const local: c_uint = 3;
pub const constant: c_uint = 4;
pub const private: c_uint = 5;
pub const constant_32bit: c_uint = 6;
pub const buffer_fat_pointer: c_uint = 7;
pub const param_d: c_uint = 6;
pub const param_i: c_uint = 7;
pub const constant_buffer_0: c_uint = 8;
pub const constant_buffer_1: c_uint = 9;
pub const constant_buffer_2: c_uint = 10;
pub const constant_buffer_3: c_uint = 11;
pub const constant_buffer_4: c_uint = 12;
pub const constant_buffer_5: c_uint = 13;
pub const constant_buffer_6: c_uint = 14;
pub const constant_buffer_7: c_uint = 15;
pub const constant_buffer_8: c_uint = 16;
pub const constant_buffer_9: c_uint = 17;
pub const constant_buffer_10: c_uint = 18;
pub const constant_buffer_11: c_uint = 19;
pub const constant_buffer_12: c_uint = 20;
pub const constant_buffer_13: c_uint = 21;
pub const constant_buffer_14: c_uint = 22;
pub const constant_buffer_15: c_uint = 23;
};
};

Some files were not shown because too many files have changed in this diff Show more