mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
Merge branch 'master' into kt128r
* master: (35 commits) std.Io.Threaded: fix QueryPerformanceCounter usage build runner: fix recursive locking of max_rss_mutex frontend: introduce error.Canceled build runner: update Mutex and Condition usage to std.Io build runner: update from std.Thread.Pool to std.Io std.Io: add Group.concurrent std.Io.File.readPositional fixed buffer type crypto.ml_kem: avoid redundant assignment & fix K-PKE.KeyGen (#26031) README: update some links Move Windows rename implementation from std.posix to windows.RenameFile os.windows: Delete unused functions and kernel32 bindings ci: bump riscv64-linux-release timeout to 8 hours on Forgejo Actions windows.GetFinalPathNameByHandle: add links to bugs tracking the Wine workaround README: use HTTPS for `releases.llvm.org` langref: convert to unmanaged `ArrayList` in example std.math.big.int: fix format functions std.Target: remove Abi.code16 cbe: translate sparc ccr/icc/xcc registers to icc Revert "std.os.linux.sparc64: use icc instead of xcc in asm clobbers" flate.Compress: simplify huffman node comparisons ...
This commit is contained in:
commit
8bb0127da5
173 changed files with 5790 additions and 4746 deletions
|
|
@ -72,7 +72,7 @@ jobs:
|
|||
fetch-depth: 0
|
||||
- name: Build and Test
|
||||
run: sh ci/riscv64-linux-release.sh
|
||||
timeout-minutes: 420
|
||||
timeout-minutes: 480
|
||||
s390x-linux-debug:
|
||||
runs-on: [self-hosted, s390x-linux]
|
||||
steps:
|
||||
|
|
|
|||
2
.gitattributes
vendored
2
.gitattributes
vendored
|
|
@ -15,6 +15,8 @@ src/Package/Fetch/testdata/** binary
|
|||
src/Package/Fetch/git/testdata/** binary
|
||||
|
||||
lib/compiler/aro/** linguist-vendored
|
||||
lib/compiler/resinator/** linguist-vendored
|
||||
lib/compiler/translate-c/** linguist-vendored
|
||||
lib/include/** linguist-vendored
|
||||
lib/libc/** linguist-vendored
|
||||
lib/libcxx/** linguist-vendored
|
||||
|
|
|
|||
12
.github/ISSUE_TEMPLATE/config.yml
vendored
12
.github/ISSUE_TEMPLATE/config.yml
vendored
|
|
@ -1,13 +1,13 @@
|
|||
contact_links:
|
||||
- name: Language Proposal
|
||||
about: Propose to improve the Zig language
|
||||
url: https://github.com/ziglang/zig/wiki/Language-Proposals
|
||||
about: "Please do not submit a proposal to change the language"
|
||||
url: https://ziglang.org/code-of-conduct
|
||||
- name: Question
|
||||
about: Please use one of the community spaces for questions or general discussions.
|
||||
url: https://github.com/ziglang/zig/wiki/Community
|
||||
about: "Please use one of the community spaces instead for questions or general discussions."
|
||||
url: https://ziglang.org/community
|
||||
- name: C Translation
|
||||
about: "Issues related to `zig translate-c` and `@cImport` are tracked separately."
|
||||
url: https://github.com/ziglang/translate-c/
|
||||
- name: Copilot and Other LLMs
|
||||
about: Please do not use GitHub Copilot or any other LLM to write an issue.
|
||||
url: https://github.com/ziglang/zig/wiki/Writing-Issues-with-Copilot-and-Other-LLMs
|
||||
about: "Please do not use GitHub Copilot or any other LLM to write an issue."
|
||||
url: https://ziglang.org/code-of-conduct
|
||||
|
|
|
|||
686
README.md
686
README.md
|
|
@ -20,7 +20,7 @@ running `zig std`, which will open a browser tab.
|
|||
## Installation
|
||||
|
||||
* [download a pre-built binary](https://ziglang.org/download/)
|
||||
* [install from a package manager](https://github.com/ziglang/zig/wiki/Install-Zig-from-a-Package-Manager)
|
||||
* [install from a package manager](https://ziglang.org/learn/getting-started/#managers)
|
||||
* [bootstrap zig for any target](https://github.com/ziglang/zig-bootstrap)
|
||||
|
||||
A Zig installation is composed of two things:
|
||||
|
|
@ -47,7 +47,10 @@ Ensure you have the required dependencies:
|
|||
|
||||
* CMake >= 3.15
|
||||
* System C/C++ Toolchain
|
||||
* LLVM, Clang, LLD development libraries == 21.x
|
||||
* LLVM, Clang, LLD development libraries, version 21.x, compiled with the
|
||||
same system C/C++ toolchain.
|
||||
- If the system package manager lacks these libraries, or has them misconfigured,
|
||||
see below for how to build them from source.
|
||||
|
||||
Then it is the standard CMake build process:
|
||||
|
||||
|
|
@ -58,9 +61,9 @@ cmake ..
|
|||
make install
|
||||
```
|
||||
|
||||
For more options, tips, and troubleshooting, please see the
|
||||
[Building Zig From Source](https://github.com/ziglang/zig/wiki/Building-Zig-From-Source)
|
||||
page on the wiki.
|
||||
Use `CMAKE_PREFIX_PATH` if needed to help CMake find LLVM.
|
||||
|
||||
This produces `stage3/bin/zig` which is the Zig compiler built by itself.
|
||||
|
||||
## Building from Source without LLVM
|
||||
|
||||
|
|
@ -88,15 +91,363 @@ files, which may be optimized and compiled into object files via a system Clang
|
|||
package. This can be used to produce system packages of Zig applications
|
||||
without the Zig package dependency on LLVM.
|
||||
|
||||
## Building from Source Using Prebuilt Zig
|
||||
|
||||
Dependencies:
|
||||
|
||||
* A recent prior build of Zig. The exact version required depends on how
|
||||
recently breaking changes occurred. If the language or std lib changed too
|
||||
much since this version, then this method of building from source will fail.
|
||||
* LLVM, Clang, and LLD libraries built using Zig.
|
||||
|
||||
The easiest way to obtain both of these artifacts is to use
|
||||
[zig-bootstrap](https://github.com/ziglang/zig-bootstrap), which creates the
|
||||
directory `out/zig-$target-$cpu` and `out/$target-$cpu`, to be used as
|
||||
`$ZIG_PREFIX` and `$LLVM_PREFIX`, respectively, in the following command:
|
||||
|
||||
```
|
||||
"$ZIG_PREFIX/zig" build \
|
||||
-p stage3 \
|
||||
--search-prefix "$LLVM_PREFIX" \
|
||||
--zig-lib-dir "lib" \
|
||||
-Dstatic-llvm
|
||||
```
|
||||
|
||||
Where `$LLVM_PREFIX` is the path that contains, for example,
|
||||
`include/llvm/Pass.h` and `lib/libLLVMCore.a`.
|
||||
|
||||
This produces `stage3/bin/zig`. See `zig build -h` to learn about the options
|
||||
that can be passed such as `-Drelease`.
|
||||
|
||||
## Building from Source on Windows
|
||||
|
||||
### Option 1: Use the Windows Zig Compiler Dev Kit
|
||||
|
||||
This one has the benefit that LLVM, LLD, and Clang are built in Release mode,
|
||||
while your Zig build has the option to be a Debug build. It also works
|
||||
completely independently from MSVC so you don't need it to be installed.
|
||||
|
||||
Determine the URL by
|
||||
[looking at the CI script](https://github.com/ziglang/zig/blob/master/ci/x86_64-windows-debug.ps1#L1-L4).
|
||||
It will look something like this (replace `$VERSION` with the one you see by
|
||||
following the above link):
|
||||
|
||||
```
|
||||
https://ziglang.org/deps/zig+llvm+lld+clang-x86_64-windows-gnu-$VERSION.zip
|
||||
```
|
||||
|
||||
This zip file contains:
|
||||
|
||||
* An older Zig installation.
|
||||
* LLVM, LLD, and Clang libraries (.lib and .h files), version 16.0.1, built in Release mode.
|
||||
* zlib (.lib and .h files), v1.2.13, built in Release mode
|
||||
* zstd (.lib and .h files), v1.5.2, built in Release mode
|
||||
|
||||
#### Option 1a: CMake + [Ninja](https://ninja-build.org/)
|
||||
|
||||
Unzip the dev kit and then in cmd.exe in your Zig source checkout:
|
||||
|
||||
```bat
|
||||
mkdir build
|
||||
cd build
|
||||
set DEVKIT=$DEVKIT
|
||||
```
|
||||
|
||||
Replace `$DEVKIT` with the path to the folder that you unzipped after
|
||||
downloading it from the link above. Make sure to use forward slashes (`/`) for
|
||||
all path separators (otherwise CMake will try to interpret backslashes as
|
||||
escapes and fail).
|
||||
|
||||
Then run:
|
||||
|
||||
```bat
|
||||
cmake .. -GNinja -DCMAKE_PREFIX_PATH="%DEVKIT%" -DCMAKE_C_COMPILER="%DEVKIT%/bin/zig.exe;cc" -DCMAKE_CXX_COMPILER="%DEVKIT%/bin/zig.exe;c++" -DCMAKE_AR="%DEVKIT%/bin/zig.exe" -DZIG_AR_WORKAROUND=ON -DZIG_STATIC=ON -DZIG_USE_LLVM_CONFIG=OFF
|
||||
```
|
||||
|
||||
* Append `-DCMAKE_BUILD_TYPE=Release` for a Release build.
|
||||
* Append `-DZIG_NO_LIB=ON` to avoid having multiple copies of the lib/ folder.
|
||||
|
||||
Finally, run:
|
||||
|
||||
```bat
|
||||
ninja install
|
||||
```
|
||||
|
||||
You now have the `zig.exe` binary at `stage3\bin\zig.exe`.
|
||||
|
||||
#### Option 1b: zig build
|
||||
|
||||
Unzip the dev kit and then in cmd.exe in your Zig source checkout:
|
||||
|
||||
```bat
|
||||
$DEVKIT\bin\zig.exe build -p stage3 --search-prefix $DEVKIT --zig-lib-dir lib -Dstatic-llvm -Duse-zig-libcxx -Dtarget=x86_64-windows-gnu
|
||||
```
|
||||
|
||||
Replace `$DEVKIT` with the path to the folder that you unzipped after
|
||||
downloading it from the link above.
|
||||
|
||||
Append `-Doptimize=ReleaseSafe` for a Release build.
|
||||
|
||||
**If you get an error building at this step**, it is most likely that the Zig
|
||||
installation inside the dev kit is too old, and the dev kit needs to be
|
||||
updated. In this case one more step is required:
|
||||
|
||||
1. [Download the latest master branch zip file](https://ziglang.org/download/#release-master).
|
||||
2. Unzip, and try the above command again, replacing the path to zig.exe with
|
||||
the path to the zig.exe you just extracted, and also replace the lib\zig
|
||||
folder with the new contents.
|
||||
|
||||
You now have the `zig.exe` binary at `stage3\bin\zig.exe`.
|
||||
|
||||
### Option 2: Using CMake and Microsoft Visual Studio
|
||||
|
||||
This one has the benefit that changes to the language or build system won't
|
||||
break your dev kit. This option can be used to upgrade a dev kit.
|
||||
|
||||
First, build LLVM, LLD, and Clang from source using CMake and Microsoft Visual
|
||||
Studio (see below for detailed instructions).
|
||||
|
||||
Install [Build Tools for Visual Studio
|
||||
2019](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2019).
|
||||
Be sure to select "Desktop development with C++" when prompted.
|
||||
* You must additionally check the optional component labeled **C++ ATL for
|
||||
v142 build tools**.
|
||||
|
||||
Install [CMake](http://cmake.org).
|
||||
|
||||
Use [git](https://git-scm.com/) to clone the zig repository to a path with no spaces, e.g. `C:\Users\Andy\zig`.
|
||||
|
||||
Using the start menu, run **x64 Native Tools Command Prompt for VS 2019** and execute these commands, replacing `C:\Users\Andy` with the correct value.
|
||||
|
||||
```bat
|
||||
mkdir C:\Users\Andy\zig\build-release
|
||||
cd C:\Users\Andy\zig\build-release
|
||||
"c:\Program Files\CMake\bin\cmake.exe" .. -Thost=x64 -G "Visual Studio 16 2019" -A x64 -DCMAKE_PREFIX_PATH=C:\Users\Andy\llvm+clang+lld-20.0.0-x86_64-windows-msvc-release-mt -DCMAKE_BUILD_TYPE=Release
|
||||
msbuild -p:Configuration=Release INSTALL.vcxproj
|
||||
```
|
||||
|
||||
You now have the `zig.exe` binary at `bin\zig.exe` and you can run the tests:
|
||||
|
||||
```bat
|
||||
bin\zig.exe build test
|
||||
```
|
||||
|
||||
This can take a long time. For tips & tricks on using the test suite, see [Contributing](https://github.com/ziglang/zig/blob/master/.github/CONTRIBUTING.md#editing-source-code).
|
||||
|
||||
Note: In case you get the error "llvm-config not found" (or similar), make sure that you have **no** trailing slash (`/` or `\`) at the end of the `-DCMAKE_PREFIX_PATH` value.
|
||||
|
||||
## Building LLVM, LLD, and Clang from Source
|
||||
|
||||
### Windows
|
||||
|
||||
Install [CMake](https://cmake.org/), version 3.20.0 or newer.
|
||||
|
||||
[Download LLVM, Clang, and LLD sources](https://releases.llvm.org/download.html#21.0.0)
|
||||
The downloads from llvm lead to the github release pages, where the source's
|
||||
will be listed as : `llvm-21.X.X.src.tar.xz`, `clang-21.X.X.src.tar.xz`,
|
||||
`lld-21.X.X.src.tar.xz`. Unzip each to their own directory. Ensure no
|
||||
directories have spaces in them. For example:
|
||||
|
||||
* `C:\Users\Andy\llvm-21.0.0.src`
|
||||
* `C:\Users\Andy\clang-21.0.0.src`
|
||||
* `C:\Users\Andy\lld-21.0.0.src`
|
||||
|
||||
Install [Build Tools for Visual Studio
|
||||
2019](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2019).
|
||||
Be sure to select "C++ build tools" when prompted.
|
||||
* You **must** additionally check the optional component labeled **C++ ATL for
|
||||
v142 build tools**. As this won't be supplied by a default installation of
|
||||
Visual Studio.
|
||||
* Full list of supported MSVC versions:
|
||||
- 2017 (version 15.8) (unverified)
|
||||
- 2019 (version 16.7)
|
||||
|
||||
Install [Python 3.9.4](https://www.python.org). Tick the box to add python to
|
||||
your PATH environment variable.
|
||||
|
||||
#### LLVM
|
||||
|
||||
Using the start menu, run **x64 Native Tools Command Prompt for VS 2019** and execute these commands, replacing `C:\Users\Andy` with the correct value. Here is listed a brief explanation of each of the CMake parameters we pass when configuring the build
|
||||
|
||||
- `-Thost=x64` : Sets the windows toolset to use 64 bit mode.
|
||||
- `-A x64` : Make the build target 64 bit .
|
||||
- `-G "Visual Studio 16 2019"` : Specifies to generate a 2019 Visual Studio project, the best supported version.
|
||||
- `-DCMAKE_INSTALL_PREFIX=""` : Path that llvm components will being installed into by the install project.
|
||||
- `-DCMAKE_PREFIX_PATH=""` : Path that CMake will look into first when trying to locate dependencies, should be the same place as the install prefix. This will ensure that clang and lld will use your newly built llvm libraries.
|
||||
- `-DLLVM_ENABLE_ZLIB=OFF` : Don't build llvm with ZLib support as it's not required and will disrupt the target dependencies for components linking against llvm. This only has to be passed when building llvm, as this option will be saved into the config headers.
|
||||
- `-DCMAKE_BUILD_TYPE=Release` : Build llvm and components in release mode.
|
||||
- `-DCMAKE_BUILD_TYPE=Debug` : Build llvm and components in debug mode.
|
||||
- `-DLLVM_USE_CRT_RELEASE=MT` : Which C runtime should llvm use during release builds.
|
||||
- `-DLLVM_USE_CRT_DEBUG=MTd` : Make llvm use the debug version of the runtime in debug builds.
|
||||
|
||||
##### Release Mode
|
||||
|
||||
```bat
|
||||
mkdir C:\Users\Andy\llvm-21.0.0.src\build-release
|
||||
cd C:\Users\Andy\llvm-21.0.0.src\build-release
|
||||
"c:\Program Files\CMake\bin\cmake.exe" .. -Thost=x64 -G "Visual Studio 16 2019" -A x64 -DCMAKE_INSTALL_PREFIX=C:\Users\Andy\llvm+clang+lld-21.0.0-x86_64-windows-msvc-release-mt -DCMAKE_PREFIX_PATH=C:\Users\Andy\llvm+clang+lld-21.0.0-x86_64-windows-msvc-release-mt -
|
||||
DLLVM_ENABLE_ZLIB=OFF -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_LIBXML2=OFF -DLLVM_USE_CRT_RELEASE=MT
|
||||
msbuild /m -p:Configuration=Release INSTALL.vcxproj
|
||||
```
|
||||
|
||||
##### Debug Mode
|
||||
|
||||
```bat
|
||||
mkdir C:\Users\Andy\llvm-21.0.0.src\build-debug
|
||||
cd C:\Users\Andy\llvm-21.0.0.src\build-debug
|
||||
"c:\Program Files\CMake\bin\cmake.exe" .. -Thost=x64 -G "Visual Studio 16 2019" -A x64 -DCMAKE_INSTALL_PREFIX=C:\Users\andy\llvm+clang+lld-21.0.0-x86_64-windows-msvc-debug -
|
||||
DLLVM_ENABLE_ZLIB=OFF -DCMAKE_PREFIX_PATH=C:\Users\andy\llvm+clang+lld-21.0.0-x86_64-windows-msvc-debug -DCMAKE_BUILD_TYPE=Debug -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD="AVR" -DLLVM_ENABLE_LIBXML2=OFF -DLLVM_USE_CRT_DEBUG=MTd
|
||||
msbuild /m INSTALL.vcxproj
|
||||
```
|
||||
|
||||
#### LLD
|
||||
|
||||
Using the start menu, run **x64 Native Tools Command Prompt for VS 2019** and execute these commands, replacing `C:\Users\Andy` with the correct value.
|
||||
|
||||
##### Release Mode
|
||||
|
||||
```bat
|
||||
mkdir C:\Users\Andy\lld-21.0.0.src\build-release
|
||||
cd C:\Users\Andy\lld-21.0.0.src\build-release
|
||||
"c:\Program Files\CMake\bin\cmake.exe" .. -Thost=x64 -G "Visual Studio 16 2019" -A x64 -DCMAKE_INSTALL_PREFIX=C:\Users\Andy\llvm+clang+lld-14.0.6-x86_64-windows-msvc-release-mt -DCMAKE_PREFIX_PATH=C:\Users\Andy\llvm+clang+lld-21.0.0-x86_64-windows-msvc-release-mt -DCMAKE_BUILD_TYPE=Release -DLLVM_USE_CRT_RELEASE=MT
|
||||
msbuild /m -p:Configuration=Release INSTALL.vcxproj
|
||||
```
|
||||
|
||||
##### Debug Mode
|
||||
|
||||
```bat
|
||||
mkdir C:\Users\Andy\lld-21.0.0.src\build-debug
|
||||
cd C:\Users\Andy\lld-21.0.0.src\build-debug
|
||||
"c:\Program Files\CMake\bin\cmake.exe" .. -Thost=x64 -G "Visual Studio 16 2019" -A x64 -DCMAKE_INSTALL_PREFIX=C:\Users\andy\llvm+clang+lld-21.0.0-x86_64-windows-msvc-debug -DCMAKE_PREFIX_PATH=C:\Users\andy\llvm+clang+lld-21.0.0-x86_64-windows-msvc-debug -DCMAKE_BUILD_TYPE=Debug -DLLVM_USE_CRT_DEBUG=MTd
|
||||
msbuild /m INSTALL.vcxproj
|
||||
```
|
||||
|
||||
#### Clang
|
||||
|
||||
Using the start menu, run **x64 Native Tools Command Prompt for VS 2019** and execute these commands, replacing `C:\Users\Andy` with the correct value.
|
||||
|
||||
##### Release Mode
|
||||
|
||||
```bat
|
||||
mkdir C:\Users\Andy\clang-21.0.0.src\build-release
|
||||
cd C:\Users\Andy\clang-21.0.0.src\build-release
|
||||
"c:\Program Files\CMake\bin\cmake.exe" .. -Thost=x64 -G "Visual Studio 16 2019" -A x64 -DCMAKE_INSTALL_PREFIX=C:\Users\Andy\llvm+clang+lld-21.0.0-x86_64-windows-msvc-release-mt -DCMAKE_PREFIX_PATH=C:\Users\Andy\llvm+clang+lld-21.0.0-x86_64-windows-msvc-release-mt -DCMAKE_BUILD_TYPE=Release -DLLVM_USE_CRT_RELEASE=MT
|
||||
msbuild /m -p:Configuration=Release INSTALL.vcxproj
|
||||
```
|
||||
|
||||
##### Debug Mode
|
||||
|
||||
```bat
|
||||
mkdir C:\Users\Andy\clang-21.0.0.src\build-debug
|
||||
cd C:\Users\Andy\clang-21.0.0.src\build-debug
|
||||
"c:\Program Files\CMake\bin\cmake.exe" .. -Thost=x64 -G "Visual Studio 16 2019" -A x64 -DCMAKE_INSTALL_PREFIX=C:\Users\andy\llvm+clang+lld-21.0.0-x86_64-windows-msvc-debug -DCMAKE_PREFIX_PATH=C:\Users\andy\llvm+clang+lld-21.0.0-x86_64-windows-msvc-debug -DCMAKE_BUILD_TYPE=Debug -DLLVM_USE_CRT_DEBUG=MTd
|
||||
msbuild /m INSTALL.vcxproj
|
||||
```
|
||||
|
||||
### POSIX Systems
|
||||
|
||||
This guide will get you both a Debug build of LLVM, and/or a Release build of LLVM.
|
||||
It intentionally does not require privileged access, using a prefix inside your home
|
||||
directory instead of a global installation.
|
||||
|
||||
#### Release
|
||||
|
||||
This is the generally recommended approach.
|
||||
|
||||
```
|
||||
cd ~/Downloads
|
||||
git clone --depth 1 --branch release/21.x https://github.com/llvm/llvm-project llvm-project-21
|
||||
cd llvm-project-21
|
||||
git checkout release/21.x
|
||||
|
||||
mkdir build-release
|
||||
cd build-release
|
||||
cmake ../llvm \
|
||||
-DCMAKE_INSTALL_PREFIX=$HOME/local/llvm21-assert \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DLLVM_ENABLE_PROJECTS="lld;clang" \
|
||||
-DLLVM_ENABLE_LIBXML2=OFF \
|
||||
-DLLVM_ENABLE_TERMINFO=OFF \
|
||||
-DLLVM_ENABLE_LIBEDIT=OFF \
|
||||
-DLLVM_ENABLE_ASSERTIONS=ON \
|
||||
-DLLVM_PARALLEL_LINK_JOBS=1 \
|
||||
-G Ninja
|
||||
ninja install
|
||||
```
|
||||
|
||||
#### Debug
|
||||
|
||||
This is occasionally needed when debugging Zig's LLVM backend. Here we build
|
||||
the three projects separately so that LLVM can be in Debug mode while the
|
||||
others are in Release mode.
|
||||
|
||||
```
|
||||
cd ~/Downloads
|
||||
git clone --depth 1 --branch release/21.x https://github.com/llvm/llvm-project llvm-project-21
|
||||
cd llvm-project-21
|
||||
git checkout release/21.x
|
||||
|
||||
# LLVM
|
||||
mkdir llvm/build-debug
|
||||
cd llvm/build-debug
|
||||
cmake .. \
|
||||
-DCMAKE_INSTALL_PREFIX=$HOME/local/llvm21-debug \
|
||||
-DCMAKE_PREFIX_PATH=$HOME/local/llvm21-debug \
|
||||
-DCMAKE_BUILD_TYPE=Debug \
|
||||
-DLLVM_ENABLE_LIBXML2=OFF \
|
||||
-DLLVM_ENABLE_TERMINFO=OFF \
|
||||
-DLLVM_ENABLE_LIBEDIT=OFF \
|
||||
-DLLVM_PARALLEL_LINK_JOBS=1 \
|
||||
-G Ninja
|
||||
ninja install
|
||||
cd ../..
|
||||
|
||||
# LLD
|
||||
mkdir lld/build-debug
|
||||
cd lld/build-debug
|
||||
cmake .. \
|
||||
-DCMAKE_INSTALL_PREFIX=$HOME/local/llvm21-debug \
|
||||
-DCMAKE_PREFIX_PATH=$HOME/local/llvm21-debug \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DLLVM_PARALLEL_LINK_JOBS=1 \
|
||||
-DCMAKE_CXX_STANDARD=17 \
|
||||
-G Ninja
|
||||
ninja install
|
||||
cd ../..
|
||||
|
||||
# Clang
|
||||
mkdir clang/build-debug
|
||||
cd clang/build-debug
|
||||
cmake .. \
|
||||
-DCMAKE_INSTALL_PREFIX=$HOME/local/llvm21-debug \
|
||||
-DCMAKE_PREFIX_PATH=$HOME/local/llvm21-debug \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DLLVM_PARALLEL_LINK_JOBS=1 \
|
||||
-DLLVM_INCLUDE_TESTS=OFF \
|
||||
-G Ninja
|
||||
ninja install
|
||||
cd ../..
|
||||
```
|
||||
|
||||
Then add to your Zig CMake line that you got from the README.md:
|
||||
`-DCMAKE_PREFIX_PATH=$HOME/local/llvm21-debug` or
|
||||
`-DCMAKE_PREFIX_PATH=$HOME/local/llvm21-assert` depending on whether you want
|
||||
Debug or Release LLVM.
|
||||
|
||||
|
||||
## Contributing
|
||||
|
||||
[Donate monthly](https://ziglang.org/zsf/).
|
||||
|
||||
[Join a community](https://ziglang.org/community/).
|
||||
|
||||
Zig is Free and Open Source Software. We welcome bug reports and patches from
|
||||
everyone. However, keep in mind that Zig governance is BDFN (Benevolent
|
||||
Dictator For Now) which means that Andrew Kelley has final say on the design
|
||||
and implementation of everything.
|
||||
|
||||
### Make Software With Zig
|
||||
|
||||
One of the best ways you can contribute to Zig is to start using it for an
|
||||
open-source personal project.
|
||||
|
||||
|
|
@ -105,13 +456,36 @@ further design iterations of Zig. Importantly, each issue found this way comes
|
|||
with real world motivations, making it straightforward to explain the reasoning
|
||||
behind proposals and feature requests.
|
||||
|
||||
You will be taken much more seriously on the issue tracker if you have a
|
||||
personal project that uses Zig.
|
||||
Ideally, such a project will help you to learn new skills and add something
|
||||
to your personal portfolio at the same time.
|
||||
|
||||
### Talk About Zig
|
||||
|
||||
Another way to contribute is to write about Zig, speak about Zig at a
|
||||
conference, or do either of those things for your project which uses Zig.
|
||||
|
||||
Programming languages live and die based on the pulse of their ecosystems. The
|
||||
more people involved, the more we can build great things upon each other's
|
||||
abstractions.
|
||||
|
||||
### Strict No LLM / No AI Policy
|
||||
|
||||
No LLMs for issues.
|
||||
|
||||
No LLMs for patches / pull requests.
|
||||
|
||||
No LLMs for comments on the bug tracker, including translation.
|
||||
|
||||
English is encouraged, but not required. You are welcome to post in your native
|
||||
language and rely on others to have their own translation tools of choice to
|
||||
interpret your words.
|
||||
|
||||
### Find a Contributor Friendly Issue
|
||||
|
||||
The issue label
|
||||
[Contributor Friendly](https://github.com/ziglang/zig/issues?q=is%3Aissue+is%3Aopen+label%3A%22contributor+friendly%22)
|
||||
exists to help you find issues that are **limited in scope and/or knowledge of
|
||||
Zig internals.**
|
||||
exists to help you find issues that are **limited in scope and/or
|
||||
knowledge of Zig internals.**
|
||||
|
||||
Please note that issues labeled
|
||||
[Proposal](https://github.com/ziglang/zig/issues?q=is%3Aissue+is%3Aopen+label%3Aproposal)
|
||||
|
|
@ -123,17 +497,289 @@ still under consideration, please express your interest in the issue tracker,
|
|||
providing extra insights and considerations that others have not yet expressed.
|
||||
The most highly regarded argument in such a discussion is a real world use case.
|
||||
|
||||
For more tips, please see the
|
||||
[Contributing](https://github.com/ziglang/zig/wiki/Contributing) page on the
|
||||
wiki.
|
||||
Language proposals are not accepted. Please do not open an issue proposing to
|
||||
change the Zig language or syntax.
|
||||
|
||||
## Community
|
||||
### Editing Source Code
|
||||
|
||||
The Zig community is decentralized. Anyone is free to start and maintain their
|
||||
own space for Zig users to gather. There is no concept of "official" or
|
||||
"unofficial". Each gathering place has its own moderators and rules. Users are
|
||||
encouraged to be aware of the social structures of the spaces they inhabit, and
|
||||
work purposefully to facilitate spaces that align with their values.
|
||||
For a smooth workflow, when building from source, it is recommended to use
|
||||
CMake with the following settings:
|
||||
|
||||
* `-DCMAKE_BUILD_TYPE=Release` - to recompile zig faster.
|
||||
* `-GNinja` - Ninja is faster and simpler to use than Make.
|
||||
* `-DZIG_NO_LIB=ON` - Prevents the build system from copying the lib/
|
||||
directory to the installation prefix, causing zig use lib/ directly from the
|
||||
source tree instead. Effectively, this makes it so that changes to lib/ do
|
||||
not require re-running the install command to become active.
|
||||
|
||||
After configuration, there are two scenarios:
|
||||
|
||||
1. Pulling upstream changes and rebuilding.
|
||||
- In this case use `git pull` and then `ninja install`. Expected wait:
|
||||
about 10 minutes.
|
||||
2. Building from source after making local changes.
|
||||
- In this case use `stage3/bin/zig build -p stage4 -Denable-llvm -Dno-lib`.
|
||||
Expected wait: about 20 seconds.
|
||||
|
||||
This leaves you with two builds of Zig:
|
||||
|
||||
* `stage3/bin/zig` - an optimized master branch build. Useful for
|
||||
miscellaneous activities such as `zig fmt`, as well as for building the
|
||||
compiler itself after changing the source code.
|
||||
* `stage4/bin/zig` - a debug build that includes your local changes; useful
|
||||
for testing and eliminating bugs before submitting a patch.
|
||||
|
||||
To reduce time spent waiting for the compiler to build, try these techniques:
|
||||
|
||||
* Omit `-Denable-llvm` if you don't need the LLVM backend.
|
||||
* Use `-Ddev=foo` to build with a reduced feature set for development of
|
||||
specific features. See `zig build -h` for a list of options.
|
||||
* Use `--watch -fincremental` to enable incremental compilation. This offers
|
||||
**near instant rebuilds**.
|
||||
|
||||
### Testing
|
||||
|
||||
```
|
||||
stage4/bin/zig build test
|
||||
```
|
||||
|
||||
This command runs the whole test suite, which does a lot of extra testing that
|
||||
you likely won't always need, and can take upwards of 1 hour. This is what the
|
||||
CI server runs when you make a pull request.
|
||||
|
||||
To save time, you can add the `--help` option to the `zig build` command and
|
||||
see what options are available. One of the most helpful ones is
|
||||
`-Dskip-release`. Adding this option to the command above, along with
|
||||
`-Dskip-non-native`, will take the time down from around 2 hours to about 30
|
||||
minutes, and this is a good enough amount of testing before making a pull
|
||||
request.
|
||||
|
||||
Another example is choosing a different set of things to test. For example,
|
||||
`test-std` instead of `test` will only run the standard library tests, and
|
||||
not the other ones. Combining this suggestion with the previous one, you could
|
||||
do this:
|
||||
|
||||
```
|
||||
stage4/bin/zig build test-std -Dskip-release
|
||||
```
|
||||
|
||||
This will run only the standard library tests in debug mode for all targets.
|
||||
It will cross-compile the tests for non-native targets but not run them.
|
||||
|
||||
When making changes to the compiler source code, the most helpful test step to
|
||||
run is `test-behavior`. When editing documentation it is `docs`. You can find
|
||||
this information and more in the `zig build --help` menu.
|
||||
|
||||
#### Directly Testing the Standard Library with `zig test`
|
||||
|
||||
This command will run the standard library tests with only the native target
|
||||
configuration and is estimated to complete in 3 minutes:
|
||||
|
||||
```
|
||||
zig build test-std -Dno-matrix
|
||||
```
|
||||
|
||||
However, one may also use `zig test` directly. From inside the `ziglang/zig` repo root:
|
||||
|
||||
```
|
||||
zig test lib/std/std.zig --zig-lib-dir lib
|
||||
```
|
||||
|
||||
You can add `--test-filter "some test name"` to run a specific test or a subset of tests.
|
||||
(Running exactly 1 test is not reliably possible, because the test filter does not
|
||||
exclude anonymous test blocks, but that shouldn't interfere with whatever
|
||||
you're trying to test in practice.)
|
||||
|
||||
Note that `--test-filter` filters on fully qualified names, so e.g. it's possible to run only the `std.json` tests with:
|
||||
|
||||
```
|
||||
zig test lib/std/std.zig --zig-lib-dir lib --test-filter "json."
|
||||
```
|
||||
|
||||
If you used `-Dno-lib` and you are in a `build/` subdirectory, you can omit the
|
||||
`--zig-lib-dir` argument:
|
||||
|
||||
```
|
||||
stage3/bin/zig test ../lib/std/std.zig
|
||||
```
|
||||
|
||||
#### Testing Non-Native Architectures with QEMU
|
||||
|
||||
The Linux CI server additionally has qemu installed and sets `-fqemu`.
|
||||
This provides test coverage for, e.g. aarch64 even on x86_64 machines. It's
|
||||
recommended for Linux users to install qemu and enable this testing option
|
||||
when editing the standard library or anything related to a non-native
|
||||
architecture.
|
||||
|
||||
QEMU packages provided by some system package managers (such as Debian) may be
|
||||
a few releases old, or may be missing newer targets such as aarch64 and RISC-V.
|
||||
[ziglang/qemu-static](https://github.com/ziglang/qemu-static) offers static
|
||||
binaries of the latest QEMU version.
|
||||
|
||||
##### Testing Non-Native glibc Targets
|
||||
|
||||
Testing foreign architectures with dynamically linked glibc is one step trickier.
|
||||
This requires enabling `--glibc-runtimes /path/to/glibc/multi/install/glibcs`.
|
||||
This path is obtained by building glibc for multiple architectures. This
|
||||
process for me took an entire day to complete and takes up 65 GiB on my hard
|
||||
drive. The CI server does not provide this test coverage.
|
||||
|
||||
[Instructions for producing this path](https://codeberg.org/ziglang/infra/src/branch/master/building-libcs.md#linux-glibc) (just the part with `build-many-glibcs.py`).
|
||||
|
||||
It is understood that most contributors will not have these tests enabled.
|
||||
|
||||
#### Testing Windows from a Linux Machine with Wine
|
||||
|
||||
When developing on Linux, another option is available to you: `-fwine`.
|
||||
This will enable running behavior tests and std lib tests with Wine. It's
|
||||
recommended for Linux users to install Wine and enable this testing option
|
||||
when editing the standard library or anything Windows-related.
|
||||
|
||||
#### Testing WebAssembly using wasmtime
|
||||
|
||||
If you have [wasmtime](https://wasmtime.dev/) installed, take advantage of the
|
||||
`-fwasmtime` flag which will enable running WASI behavior tests and std
|
||||
lib tests. It's recommended for all users to install wasmtime and enable this
|
||||
testing option when editing the standard library and especially anything
|
||||
WebAssembly-related.
|
||||
|
||||
### Improving Translate-C
|
||||
|
||||
`translate-c` is a feature provided by Zig that converts C source code into
|
||||
Zig source code. It powers the `zig translate-c` command as well as
|
||||
[@cImport](https://ziglang.org/documentation/master/#cImport), allowing Zig
|
||||
code to not only take advantage of function prototypes defined in .h files,
|
||||
but also `static inline` functions written in C, and even some macros.
|
||||
|
||||
This feature used to work by using libclang API to parse and semantically
|
||||
analyze C/C++ files, and then based on the provided AST and type information,
|
||||
generating Zig AST, and finally using the mechanisms of `zig fmt` to render the
|
||||
Zig AST to a file.
|
||||
|
||||
However, C translation is in a transitional period right now. It used to be
|
||||
based on Clang, but is now based on Aro:
|
||||
|
||||
[Pull Request: update aro and translate-c to latest; delete clang translate-c](https://github.com/ziglang/zig/pull/24497)
|
||||
|
||||
Test coverage as well as bug reports have been moved to this repository:
|
||||
|
||||
[ziglang/translate-c](https://github.com/ziglang/translate-c/)
|
||||
|
||||
In the future, [@cImport will move to the build system](https://github.com/ziglang/zig/issues/20630),
|
||||
but for now, the translate-c logic is copy-pasted from that project into
|
||||
[ziglang/zig](https://github.com/ziglang/zig/), powering both `zig translate-c`
|
||||
and `@cImport`.
|
||||
|
||||
Please see the readme of the translate-c project for how to contribute. Once an
|
||||
issue is resolved (and test coverage added) there, the changes can be
|
||||
immediately backported to the zig compiler.
|
||||
|
||||
Once we fix the problems people are facing from this transition from Clang to
|
||||
Aro, we can move on to enhancing the translate-c package such that `@cImport`
|
||||
becomes redundant and can therefore be eliminated from the language.
|
||||
|
||||
### Autodoc
|
||||
|
||||
Autodoc is an interactive, searchable, single-page web application for browsing
|
||||
Zig codebases.
|
||||
|
||||
An autodoc deployment looks like this:
|
||||
|
||||
```
|
||||
index.html
|
||||
main.js
|
||||
main.wasm
|
||||
sources.tar
|
||||
```
|
||||
|
||||
* `main.js` and `index.html` are static files which live in a Zig installation
|
||||
at `lib/docs/`.
|
||||
* `main.wasm` is compiled from the Zig files inside `lib/docs/wasm/`.
|
||||
* `sources.tar` is all the zig source files of the project.
|
||||
|
||||
These artifacts are produced by the compiler when `-femit-docs` is passed.
|
||||
|
||||
#### Making Changes
|
||||
|
||||
The command `zig std` spawns an HTTP server that provides all the assets
|
||||
mentioned above specifically for the standard library.
|
||||
|
||||
The server creates the requested files on the fly, including rebuilding
|
||||
`main.wasm` if any of its source files changed, and constructing `sources.tar`,
|
||||
meaning that any source changes to the documented files, or to the autodoc
|
||||
system itself are immediately reflected when viewing docs.
|
||||
|
||||
This means you can test changes to Zig standard library documentation, as well
|
||||
as autodocs functionality, by pressing refresh in the browser.
|
||||
|
||||
Prefixing the URL with `/debug` results in a debug build of `main.wasm`.
|
||||
|
||||
#### Debugging the Zig Code
|
||||
|
||||
While Firefox and Safari support are obviously required, I recommend Chromium
|
||||
for development for one reason in particular:
|
||||
|
||||
[C/C++ DevTools Support (DWARF)](https://chromewebstore.google.com/detail/cc++-devtools-support-dwa/pdcpmagijalfljmkmjngeonclgbbannb)
|
||||
|
||||
This makes debugging Zig WebAssembly code a breeze.
|
||||
|
||||
#### The Sources Tarball
|
||||
|
||||
The system expects the top level of `sources.tar` to be the set of modules
|
||||
documented. So for the Zig standard library you would do this:
|
||||
`tar cf std.tar std/`. Don't compress it; the idea is to rely on HTTP
|
||||
compression.
|
||||
|
||||
Any files that are not `.zig` source files will be ignored by `main.wasm`,
|
||||
however, those files will take up wasted space in the tar file. For the
|
||||
standard library, use the set of files that zig installs to when running `zig
|
||||
build`, which is the same as the set of files that are provided on
|
||||
ziglang.org/download.
|
||||
|
||||
If the system doesn't find a file named "foo/root.zig" or "foo/foo.zig", it
|
||||
will use the first file in the tar as the module root.
|
||||
|
||||
You don't typically need to create `sources.tar` yourself, since it is lazily
|
||||
provided by the `zig std` HTTP server as well as produced by `-femit-docs`.
|
||||
|
||||
|
||||
## Testing Zig Code With LLDB
|
||||
|
||||
[@jacobly0](https://github.com/jacobly0) maintains a fork of LLDB with Zig support: https://github.com/jacobly0/llvm-project/tree/lldb-zig
|
||||
|
||||
This fork only contains changes for debugging programs compiled by Zig's self-hosted backends, i.e. `zig build-exe -fno-llvm ...`.
|
||||
|
||||
### Building
|
||||
|
||||
To build the LLDB fork, make sure you have [prerequisites](https://lldb.llvm.org/resources/build.html#preliminaries) installed, and then do something like:
|
||||
|
||||
```console
|
||||
$ cmake llvm -G Ninja -B build -DLLVM_ENABLE_PROJECTS="clang;lldb" -DCMAKE_BUILD_TYPE=RelWithDebInfo -DLLVM_ENABLE_ASSERTIONS=ON -DLLDB_ENABLE_LIBEDIT=ON -DLLDB_ENABLE_PYTHON=ON
|
||||
$ cmake --build build --target lldb --target lldb-server
|
||||
```
|
||||
|
||||
(You may need to manually [configure dependencies](https://lldb.llvm.org/resources/build.html#optional-dependencies) if CMake can't find them.)
|
||||
|
||||
Once built, you can run `./build/bin/lldb` and so on.
|
||||
|
||||
### Pretty Printers
|
||||
|
||||
If you will be debugging the Zig compiler itself, or if you will be debugging any project compiled with Zig's LLVM backend (not recommended with the LLDB fork, prefer vanilla LLDB with a version that matches the version of LLVM that Zig is using), you can get a better debugging experience by using [`lldb_pretty_printers.py`](https://github.com/ziglang/zig/blob/master/tools/lldb_pretty_printers.py).
|
||||
|
||||
Put this line in `~/.lldbinit`:
|
||||
|
||||
```
|
||||
command script import /path/to/zig/tools/lldb_pretty_printers.py
|
||||
```
|
||||
|
||||
If you will be using Zig's LLVM backend (again, not recommended with the LLDB fork), you will also want these lines:
|
||||
|
||||
```
|
||||
type category enable zig.lang
|
||||
type category enable zig.std
|
||||
```
|
||||
If you will be debugging a Zig compiler built using Zig's LLVM backend (again, not recommended with the LLDB fork), you will also want this line:
|
||||
```
|
||||
type category enable zig.stage2
|
||||
```
|
||||
|
||||
Please see the [Community](https://github.com/ziglang/zig/wiki/Community) wiki
|
||||
page for a public listing of social spaces.
|
||||
|
|
|
|||
|
|
@ -97,6 +97,7 @@ pub fn build(b: *std.Build) !void {
|
|||
const skip_darwin = b.option(bool, "skip-darwin", "Main test suite skips targets with darwin OSs") orelse false;
|
||||
const skip_linux = b.option(bool, "skip-linux", "Main test suite skips targets with linux OS") orelse false;
|
||||
const skip_llvm = b.option(bool, "skip-llvm", "Main test suite skips targets that use LLVM backend") orelse false;
|
||||
const skip_test_incremental = b.option(bool, "skip-test-incremental", "Main test step omits dependency on test-incremental step") orelse false;
|
||||
|
||||
const only_install_lib_files = b.option(bool, "lib-files-only", "Only install library files") orelse false;
|
||||
|
||||
|
|
@ -609,7 +610,7 @@ pub fn build(b: *std.Build) !void {
|
|||
|
||||
const test_incremental_step = b.step("test-incremental", "Run the incremental compilation test cases");
|
||||
try tests.addIncrementalTests(b, test_incremental_step);
|
||||
test_step.dependOn(test_incremental_step);
|
||||
if (!skip_test_incremental) test_step.dependOn(test_incremental_step);
|
||||
|
||||
if (tests.addLibcTests(b, .{
|
||||
.optimize_modes = optimization_modes,
|
||||
|
|
|
|||
|
|
@ -59,6 +59,7 @@ Write-Output "Main test suite..."
|
|||
-Dstatic-llvm `
|
||||
-Dskip-non-native `
|
||||
-Dskip-release `
|
||||
-Dskip-test-incremental `
|
||||
-Denable-symlinks-windows `
|
||||
--test-timeout 30m
|
||||
CheckLastExitCode
|
||||
|
|
|
|||
|
|
@ -58,6 +58,7 @@ Write-Output "Main test suite..."
|
|||
--search-prefix "$PREFIX_PATH" `
|
||||
-Dstatic-llvm `
|
||||
-Dskip-non-native `
|
||||
-Dskip-test-incremental `
|
||||
-Denable-symlinks-windows `
|
||||
--test-timeout 30m
|
||||
CheckLastExitCode
|
||||
|
|
|
|||
|
|
@ -638,7 +638,7 @@
|
|||
{#syntax#}i7{#endsyntax#} refers to a signed 7-bit integer. The maximum allowed bit-width of an
|
||||
integer type is {#syntax#}65535{#endsyntax#}.
|
||||
</p>
|
||||
{#see_also|Integers|Floats|void|Errors|@Type#}
|
||||
{#see_also|Integers|Floats|void|Errors|@Int#}
|
||||
{#header_close#}
|
||||
{#header_open|Primitive Values#}
|
||||
<div class="table-wrapper">
|
||||
|
|
@ -3723,9 +3723,9 @@ void do_a_thing(struct Foo *foo) {
|
|||
<td>{#syntax#}x{#endsyntax#} is a {#syntax#}@FieldType(T, "a"){#endsyntax#}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">{#syntax#}@Type(x){#endsyntax#}</th>
|
||||
<th scope="row">{#syntax#}@Int(x, y){#endsyntax#}</th>
|
||||
<td>-</td>
|
||||
<td>{#syntax#}x{#endsyntax#} is a {#syntax#}std.builtin.Type{#endsyntax#}</td>
|
||||
<td>{#syntax#}x{#endsyntax#} is a {#syntax#}std.builtin.Signedness{#endsyntax#}, {#syntax#}y{#endsyntax#} is a {#syntax#}u16{#endsyntax#}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">{#syntax#}@typeInfo(x){#endsyntax#}</th>
|
||||
|
|
@ -3839,9 +3839,9 @@ void do_a_thing(struct Foo *foo) {
|
|||
<td>{#syntax#}x{#endsyntax#} has no result location (typed initializers do not propagate result locations)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">{#syntax#}@Type(x){#endsyntax#}</th>
|
||||
<td>{#syntax#}ptr{#endsyntax#}</td>
|
||||
<td>{#syntax#}x{#endsyntax#} has no result location</td>
|
||||
<th scope="row">{#syntax#}@Int(x, y){#endsyntax#}</th>
|
||||
<td>-</td>
|
||||
<td>{#syntax#}x{#endsyntax#} and {#syntax#}y{#endsyntax#} do not have result locations</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">{#syntax#}@typeInfo(x){#endsyntax#}</th>
|
||||
|
|
@ -5755,41 +5755,75 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
|
|||
</p>
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@Type#}
|
||||
<pre>{#syntax#}@Type(comptime info: std.builtin.Type) type{#endsyntax#}</pre>
|
||||
<p>
|
||||
This function is the inverse of {#link|@typeInfo#}. It reifies type information
|
||||
into a {#syntax#}type{#endsyntax#}.
|
||||
</p>
|
||||
<p>
|
||||
It is available for the following types:
|
||||
</p>
|
||||
<ul>
|
||||
<li>{#syntax#}type{#endsyntax#}</li>
|
||||
<li>{#syntax#}noreturn{#endsyntax#}</li>
|
||||
<li>{#syntax#}void{#endsyntax#}</li>
|
||||
<li>{#syntax#}bool{#endsyntax#}</li>
|
||||
<li>{#link|Integers#} - The maximum bit count for an integer type is {#syntax#}65535{#endsyntax#}.</li>
|
||||
<li>{#link|Floats#}</li>
|
||||
<li>{#link|Pointers#}</li>
|
||||
<li>{#syntax#}comptime_int{#endsyntax#}</li>
|
||||
<li>{#syntax#}comptime_float{#endsyntax#}</li>
|
||||
<li>{#syntax#}@TypeOf(undefined){#endsyntax#}</li>
|
||||
<li>{#syntax#}@TypeOf(null){#endsyntax#}</li>
|
||||
<li>{#link|Arrays#}</li>
|
||||
<li>{#link|Optionals#}</li>
|
||||
<li>{#link|Error Set Type#}</li>
|
||||
<li>{#link|Error Union Type#}</li>
|
||||
<li>{#link|Vectors#}</li>
|
||||
<li>{#link|opaque#}</li>
|
||||
<li>{#syntax#}anyframe{#endsyntax#}</li>
|
||||
<li>{#link|struct#}</li>
|
||||
<li>{#link|enum#}</li>
|
||||
<li>{#link|Enum Literals#}</li>
|
||||
<li>{#link|union#}</li>
|
||||
<li>{#link|Functions#}</li>
|
||||
</ul>
|
||||
{#header_open|@EnumLiteral#}
|
||||
<pre>{#syntax#}@EnumLiteral() type{#endsyntax#}</pre>
|
||||
<p>Returns the comptime-only "enum literal" type. This is the type of uncoerced {#link|Enum Literals#}. Values of this type can coerce to any {#link|enum#} with a matching field.</p>
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@Int#}
|
||||
<pre>{#syntax#}@Int(comptime signedness: std.builtin.Signedness, comptime bits: u16) type{#endsyntax#}</pre>
|
||||
<p>Returns an integer type with the given signedness and bit width.</p>
|
||||
<p>For instance, {#syntax#}@Int(.unsigned, 18){#endsyntax#} returns the type {#syntax#}u18{#endsyntax#}.</p>
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@Tuple#}
|
||||
<pre>{#syntax#}@Tuple(comptime field_types: []const type) type{#endsyntax#}</pre>
|
||||
<p>Returns a {#link|tuple|Tuples#} type with the given field types.</p>
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@Pointer#}
|
||||
<pre>{#syntax#}@Pointer(
|
||||
comptime size: std.builtin.Type.Pointer.Size,
|
||||
comptime attrs: std.builtin.Type.Pointer.Attributes,
|
||||
comptime Element: type,
|
||||
comptime sentinel: ?Element,
|
||||
) type{#endsyntax#}</pre>
|
||||
<p>Returns a {#link|pointer|Pointers#} type with the properties specified by the arguments.</p>
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@Fn#}
|
||||
<pre>{#syntax#}@Fn(
|
||||
comptime param_types: []const type,
|
||||
comptime param_attrs: *const [param_types.len]std.builtin.Type.Fn.Param.Attributes,
|
||||
comptime ReturnType: type,
|
||||
comptime attrs: std.builtin.Type.Fn.Attributes,
|
||||
) type{#endsyntax#}</pre>
|
||||
<p>Returns a {#link|function|Functions#} type with the properties specified by the arguments.</p>
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@Struct#}
|
||||
<pre>{#syntax#}@Struct(
|
||||
comptime layout: std.builtin.Type.ContainerLayout,
|
||||
comptime BackingInt: ?type,
|
||||
comptime field_names: []const []const u8,
|
||||
comptime field_types: *const [field_names.len]type,
|
||||
comptime field_attrs: *const [field_names.len]std.builtin.Type.StructField.Attributes,
|
||||
) type{#endsyntax#}</pre>
|
||||
<p>Returns a {#link|struct#} type with the properties specified by the arguments.</p>
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@Union#}
|
||||
<pre>{#syntax#}@Union(
|
||||
comptime layout: std.builtin.Type.ContainerLayout,
|
||||
/// Either the integer tag type, or the integer backing type, depending on `layout`.
|
||||
comptime ArgType: ?type,
|
||||
comptime field_names: []const []const u8,
|
||||
comptime field_types: *const [field_names.len]type,
|
||||
comptime field_attrs: *const [field_names.len]std.builtin.Type.UnionField.Attributes,
|
||||
) type{#endsyntax#}</pre>
|
||||
<p>Returns a {#link|union#} type with the properties specified by the arguments.</p>
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@Enum#}
|
||||
<pre>{#syntax#}@Enum(
|
||||
comptime TagInt: type,
|
||||
comptime mode: std.builtin.Type.Enum.Mode,
|
||||
comptime field_names: []const []const u8,
|
||||
comptime field_values: *const [field_names.len]TagInt,
|
||||
) type{#endsyntax#}</pre>
|
||||
<p>Returns an {#link|enum#} type with the properties specified by the arguments.</p>
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@typeInfo#}
|
||||
<pre>{#syntax#}@typeInfo(comptime T: type) std.builtin.Type{#endsyntax#}</pre>
|
||||
<p>
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ pub const std_options: std.Options = .{
|
|||
|
||||
fn myLogFn(
|
||||
comptime level: std.log.Level,
|
||||
comptime scope: @Type(.enum_literal),
|
||||
comptime scope: @EnumLiteral(),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ test "coercion between unions and enums" {
|
|||
try expect(u_4.tag() == 1);
|
||||
|
||||
// The following example is invalid.
|
||||
// error: coercion from enum '@TypeOf(.enum_literal)' to union 'test_coerce_unions_enum.U2' must initialize 'f32' field 'b'
|
||||
// error: coercion from enum '@EnumLiteral()' to union 'test_coerce_unions_enum.U2' must initialize 'f32' field 'b'
|
||||
//var u_5: U2 = .b;
|
||||
//try expect(u_5.tag() == 2);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
const std = @import("std");
|
||||
|
||||
test "detect leak" {
|
||||
var list = std.array_list.Managed(u21).init(std.testing.allocator);
|
||||
// missing `defer list.deinit();`
|
||||
try list.append('☔');
|
||||
const gpa = std.testing.allocator;
|
||||
var list: std.ArrayList(u21) = .empty;
|
||||
// missing `defer list.deinit(gpa);`
|
||||
try list.append(gpa, '☔');
|
||||
|
||||
try std.testing.expect(list.items.len == 1);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ pub fn panic(msg: []const u8, st: ?*std.builtin.StackTrace, addr: ?usize) noretu
|
|||
|
||||
fn logFn(
|
||||
comptime message_level: log.Level,
|
||||
comptime scope: @TypeOf(.enum_literal),
|
||||
comptime scope: @EnumLiteral(),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
|
|
|
|||
22
lib/compiler/aro/aro/Attribute.zig
vendored
22
lib/compiler/aro/aro/Attribute.zig
vendored
|
|
@ -717,23 +717,13 @@ pub const Tag = std.meta.DeclEnum(attributes);
|
|||
|
||||
pub const Arguments = blk: {
|
||||
const decls = @typeInfo(attributes).@"struct".decls;
|
||||
var union_fields: [decls.len]ZigType.UnionField = undefined;
|
||||
for (decls, &union_fields) |decl, *field| {
|
||||
field.* = .{
|
||||
.name = decl.name,
|
||||
.type = @field(attributes, decl.name),
|
||||
.alignment = @alignOf(@field(attributes, decl.name)),
|
||||
};
|
||||
var names: [decls.len][]const u8 = undefined;
|
||||
var types: [decls.len]type = undefined;
|
||||
for (decls, &names, &types) |decl, *name, *T| {
|
||||
name.* = decl.name;
|
||||
T.* = @field(attributes, decl.name);
|
||||
}
|
||||
|
||||
break :blk @Type(.{
|
||||
.@"union" = .{
|
||||
.layout = .auto,
|
||||
.tag_type = null,
|
||||
.fields = &union_fields,
|
||||
.decls = &.{},
|
||||
},
|
||||
});
|
||||
break :blk @Union(.auto, null, &names, &types, &@splat(.{}));
|
||||
};
|
||||
|
||||
pub fn ArgumentsForTag(comptime tag: Tag) type {
|
||||
|
|
|
|||
9
lib/compiler/aro/aro/Target.zig
vendored
9
lib/compiler/aro/aro/Target.zig
vendored
|
|
@ -1258,7 +1258,6 @@ pub fn toLLVMTriple(target: *const Target, buf: []u8) []const u8 {
|
|||
|
||||
.android => "android",
|
||||
.androideabi => "androideabi",
|
||||
.code16 => "code16",
|
||||
.eabi => "eabi",
|
||||
.eabihf => "eabihf",
|
||||
.gnu => "gnu",
|
||||
|
|
@ -1630,11 +1629,11 @@ test parseAbi {
|
|||
try testing.expect(query.abi == .android);
|
||||
try testing.expectEqual(query.android_api_level, 17);
|
||||
|
||||
try testing.expectError(error.InvalidAbiVersion, parseAbi(&query, "code162", null));
|
||||
try testing.expect(query.abi == .code16);
|
||||
try testing.expectError(error.InvalidAbiVersion, parseAbi(&query, "ilp322", null));
|
||||
try testing.expect(query.abi == .ilp32);
|
||||
|
||||
try testing.expectError(error.InvalidAbiVersion, parseAbi(&query, "code16.2", null));
|
||||
try testing.expect(query.abi == .code16);
|
||||
try testing.expectError(error.InvalidAbiVersion, parseAbi(&query, "ilp32.2", null));
|
||||
try testing.expect(query.abi == .ilp32);
|
||||
}
|
||||
|
||||
/// Parse OS string with common aliases in `<os>(.?<version>(...<version>))?` format.
|
||||
|
|
|
|||
2
lib/compiler/aro/assembly_backend/x86_64.zig
vendored
2
lib/compiler/aro/assembly_backend/x86_64.zig
vendored
|
|
@ -59,7 +59,7 @@ fn serializeFloat(comptime T: type, value: T, w: *std.Io.Writer) !void {
|
|||
else => {
|
||||
const size = @bitSizeOf(T);
|
||||
const storage_unit = std.meta.intToEnum(StorageUnit, size) catch unreachable;
|
||||
const IntTy = @Type(.{ .int = .{ .signedness = .unsigned, .bits = size } });
|
||||
const IntTy = @Int(.unsigned, size);
|
||||
const int_val: IntTy = @bitCast(value);
|
||||
return serializeInt(int_val, storage_unit, w);
|
||||
},
|
||||
|
|
|
|||
|
|
@ -107,7 +107,6 @@ pub fn main() !void {
|
|||
|
||||
var targets = std.array_list.Managed([]const u8).init(arena);
|
||||
var debug_log_scopes = std.array_list.Managed([]const u8).init(arena);
|
||||
var thread_pool_options: std.Thread.Pool.Options = .{ .allocator = arena };
|
||||
|
||||
var install_prefix: ?[]const u8 = null;
|
||||
var dir_list = std.Build.DirList{};
|
||||
|
|
@ -413,19 +412,11 @@ pub fn main() !void {
|
|||
};
|
||||
} else if (mem.eql(u8, arg, "-fno-reference-trace")) {
|
||||
builder.reference_trace = null;
|
||||
} else if (mem.startsWith(u8, arg, "-j")) {
|
||||
const num = arg["-j".len..];
|
||||
const n_jobs = std.fmt.parseUnsigned(u32, num, 10) catch |err| {
|
||||
std.debug.print("unable to parse jobs count '{s}': {s}", .{
|
||||
num, @errorName(err),
|
||||
});
|
||||
process.exit(1);
|
||||
};
|
||||
if (n_jobs < 1) {
|
||||
std.debug.print("number of jobs must be at least 1\n", .{});
|
||||
process.exit(1);
|
||||
}
|
||||
thread_pool_options.n_jobs = n_jobs;
|
||||
} else if (mem.cutPrefix(u8, arg, "-j")) |text| {
|
||||
const n = std.fmt.parseUnsigned(u32, text, 10) catch |err|
|
||||
fatal("unable to parse jobs count '{s}': {t}", .{ text, err });
|
||||
if (n < 1) fatal("number of jobs must be at least 1", .{});
|
||||
threaded.setAsyncLimit(.limited(n));
|
||||
} else if (mem.eql(u8, arg, "--")) {
|
||||
builder.args = argsRest(args, arg_idx);
|
||||
break;
|
||||
|
|
@ -503,7 +494,7 @@ pub fn main() !void {
|
|||
|
||||
.max_rss = max_rss,
|
||||
.max_rss_is_default = false,
|
||||
.max_rss_mutex = .{},
|
||||
.max_rss_mutex = .init,
|
||||
.skip_oom_steps = skip_oom_steps,
|
||||
.unit_test_timeout_ns = test_timeout_ns,
|
||||
|
||||
|
|
@ -516,7 +507,6 @@ pub fn main() !void {
|
|||
.error_style = error_style,
|
||||
.multiline_errors = multiline_errors,
|
||||
.summary = summary orelse if (watch or webui_listen != null) .line else .failures,
|
||||
.thread_pool = undefined,
|
||||
|
||||
.ttyconf = ttyconf,
|
||||
};
|
||||
|
|
@ -547,16 +537,12 @@ pub fn main() !void {
|
|||
break :w try .init();
|
||||
};
|
||||
|
||||
try run.thread_pool.init(thread_pool_options);
|
||||
defer run.thread_pool.deinit();
|
||||
|
||||
const now = Io.Clock.Timestamp.now(io, .awake) catch |err| fatal("failed to collect timestamp: {t}", .{err});
|
||||
|
||||
run.web_server = if (webui_listen) |listen_address| ws: {
|
||||
if (builtin.single_threaded) unreachable; // `fatal` above
|
||||
break :ws .init(.{
|
||||
.gpa = gpa,
|
||||
.thread_pool = &run.thread_pool,
|
||||
.ttyconf = ttyconf,
|
||||
.graph = &graph,
|
||||
.all_steps = run.step_stack.keys(),
|
||||
|
|
@ -597,7 +583,7 @@ pub fn main() !void {
|
|||
|
||||
if (run.web_server) |*ws| {
|
||||
assert(!watch); // fatal error after CLI parsing
|
||||
while (true) switch (ws.wait()) {
|
||||
while (true) switch (try ws.wait()) {
|
||||
.rebuild => {
|
||||
for (run.step_stack.keys()) |step| {
|
||||
step.state = .precheck_done;
|
||||
|
|
@ -666,7 +652,7 @@ const Run = struct {
|
|||
gpa: Allocator,
|
||||
max_rss: u64,
|
||||
max_rss_is_default: bool,
|
||||
max_rss_mutex: std.Thread.Mutex,
|
||||
max_rss_mutex: Io.Mutex,
|
||||
skip_oom_steps: bool,
|
||||
unit_test_timeout_ns: ?u64,
|
||||
watch: bool,
|
||||
|
|
@ -675,7 +661,6 @@ const Run = struct {
|
|||
memory_blocked_steps: std.ArrayList(*Step),
|
||||
/// Allocated into `gpa`.
|
||||
step_stack: std.AutoArrayHashMapUnmanaged(*Step, void),
|
||||
thread_pool: std.Thread.Pool,
|
||||
/// Similar to the `tty.Config` returned by `std.debug.lockStderrWriter`,
|
||||
/// but also respects the '--color' flag.
|
||||
ttyconf: tty.Config,
|
||||
|
|
@ -754,14 +739,13 @@ fn runStepNames(
|
|||
const gpa = run.gpa;
|
||||
const io = b.graph.io;
|
||||
const step_stack = &run.step_stack;
|
||||
const thread_pool = &run.thread_pool;
|
||||
|
||||
{
|
||||
const step_prog = parent_prog_node.start("steps", step_stack.count());
|
||||
defer step_prog.end();
|
||||
|
||||
var wait_group: std.Thread.WaitGroup = .{};
|
||||
defer wait_group.wait();
|
||||
var group: Io.Group = .init;
|
||||
defer group.wait(io);
|
||||
|
||||
// Here we spawn the initial set of tasks with a nice heuristic -
|
||||
// dependency order. Each worker when it finishes a step will then
|
||||
|
|
@ -771,9 +755,7 @@ fn runStepNames(
|
|||
const step = steps_slice[steps_slice.len - i - 1];
|
||||
if (step.state == .skipped_oom) continue;
|
||||
|
||||
thread_pool.spawnWg(&wait_group, workerMakeOneStep, .{
|
||||
&wait_group, b, step, step_prog, run,
|
||||
});
|
||||
group.async(io, workerMakeOneStep, .{ &group, b, step, step_prog, run });
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -855,7 +837,6 @@ fn runStepNames(
|
|||
var f = std.Build.Fuzz.init(
|
||||
gpa,
|
||||
io,
|
||||
thread_pool,
|
||||
run.ttyconf,
|
||||
step_stack.keys(),
|
||||
parent_prog_node,
|
||||
|
|
@ -1318,13 +1299,14 @@ fn constructGraphAndCheckForDependencyLoop(
|
|||
}
|
||||
|
||||
fn workerMakeOneStep(
|
||||
wg: *std.Thread.WaitGroup,
|
||||
group: *Io.Group,
|
||||
b: *std.Build,
|
||||
s: *Step,
|
||||
prog_node: std.Progress.Node,
|
||||
run: *Run,
|
||||
) void {
|
||||
const thread_pool = &run.thread_pool;
|
||||
const io = b.graph.io;
|
||||
const gpa = run.gpa;
|
||||
|
||||
// First, check the conditions for running this step. If they are not met,
|
||||
// then we return without doing the step, relying on another worker to
|
||||
|
|
@ -1347,8 +1329,8 @@ fn workerMakeOneStep(
|
|||
}
|
||||
|
||||
if (s.max_rss != 0) {
|
||||
run.max_rss_mutex.lock();
|
||||
defer run.max_rss_mutex.unlock();
|
||||
run.max_rss_mutex.lockUncancelable(io);
|
||||
defer run.max_rss_mutex.unlock(io);
|
||||
|
||||
// Avoid running steps twice.
|
||||
if (s.state != .precheck_done) {
|
||||
|
|
@ -1360,7 +1342,7 @@ fn workerMakeOneStep(
|
|||
if (new_claimed_rss > run.max_rss) {
|
||||
// Running this step right now could possibly exceed the allotted RSS.
|
||||
// Add this step to the queue of memory-blocked steps.
|
||||
run.memory_blocked_steps.append(run.gpa, s) catch @panic("OOM");
|
||||
run.memory_blocked_steps.append(gpa, s) catch @panic("OOM");
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -1381,12 +1363,11 @@ fn workerMakeOneStep(
|
|||
|
||||
const make_result = s.make(.{
|
||||
.progress_node = sub_prog_node,
|
||||
.thread_pool = thread_pool,
|
||||
.watch = run.watch,
|
||||
.web_server = if (run.web_server) |*ws| ws else null,
|
||||
.ttyconf = run.ttyconf,
|
||||
.unit_test_timeout_ns = run.unit_test_timeout_ns,
|
||||
.gpa = run.gpa,
|
||||
.gpa = gpa,
|
||||
});
|
||||
|
||||
// No matter the result, we want to display error/warning messages.
|
||||
|
|
@ -1397,7 +1378,7 @@ fn workerMakeOneStep(
|
|||
const bw, _ = std.debug.lockStderrWriter(&stdio_buffer_allocation);
|
||||
defer std.debug.unlockStderrWriter();
|
||||
const ttyconf = run.ttyconf;
|
||||
printErrorMessages(run.gpa, s, .{}, bw, ttyconf, run.error_style, run.multiline_errors) catch {};
|
||||
printErrorMessages(gpa, s, .{}, bw, ttyconf, run.error_style, run.multiline_errors) catch {};
|
||||
}
|
||||
|
||||
handle_result: {
|
||||
|
|
@ -1419,40 +1400,43 @@ fn workerMakeOneStep(
|
|||
|
||||
// Successful completion of a step, so we queue up its dependants as well.
|
||||
for (s.dependants.items) |dep| {
|
||||
thread_pool.spawnWg(wg, workerMakeOneStep, .{
|
||||
wg, b, dep, prog_node, run,
|
||||
});
|
||||
group.async(io, workerMakeOneStep, .{ group, b, dep, prog_node, run });
|
||||
}
|
||||
}
|
||||
|
||||
// If this is a step that claims resources, we must now queue up other
|
||||
// steps that are waiting for resources.
|
||||
if (s.max_rss != 0) {
|
||||
run.max_rss_mutex.lock();
|
||||
defer run.max_rss_mutex.unlock();
|
||||
var dispatch_deps: std.ArrayList(*Step) = .empty;
|
||||
defer dispatch_deps.deinit(gpa);
|
||||
dispatch_deps.ensureUnusedCapacity(gpa, run.memory_blocked_steps.items.len) catch @panic("OOM");
|
||||
|
||||
// Give the memory back to the scheduler.
|
||||
run.claimed_rss -= s.max_rss;
|
||||
// Avoid kicking off too many tasks that we already know will not have
|
||||
// enough resources.
|
||||
var remaining = run.max_rss - run.claimed_rss;
|
||||
var i: usize = 0;
|
||||
var j: usize = 0;
|
||||
while (j < run.memory_blocked_steps.items.len) : (j += 1) {
|
||||
const dep = run.memory_blocked_steps.items[j];
|
||||
assert(dep.max_rss != 0);
|
||||
if (dep.max_rss <= remaining) {
|
||||
remaining -= dep.max_rss;
|
||||
{
|
||||
run.max_rss_mutex.lockUncancelable(io);
|
||||
defer run.max_rss_mutex.unlock(io);
|
||||
|
||||
thread_pool.spawnWg(wg, workerMakeOneStep, .{
|
||||
wg, b, dep, prog_node, run,
|
||||
});
|
||||
} else {
|
||||
run.memory_blocked_steps.items[i] = dep;
|
||||
i += 1;
|
||||
// Give the memory back to the scheduler.
|
||||
run.claimed_rss -= s.max_rss;
|
||||
// Avoid kicking off too many tasks that we already know will not have
|
||||
// enough resources.
|
||||
var remaining = run.max_rss - run.claimed_rss;
|
||||
var i: usize = 0;
|
||||
for (run.memory_blocked_steps.items) |dep| {
|
||||
assert(dep.max_rss != 0);
|
||||
if (dep.max_rss <= remaining) {
|
||||
remaining -= dep.max_rss;
|
||||
dispatch_deps.appendAssumeCapacity(dep);
|
||||
} else {
|
||||
run.memory_blocked_steps.items[i] = dep;
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
run.memory_blocked_steps.shrinkRetainingCapacity(i);
|
||||
}
|
||||
for (dispatch_deps.items) |dep| {
|
||||
// Must be called without max_rss_mutex held in case it executes recursively.
|
||||
group.async(io, workerMakeOneStep, .{ group, b, dep, prog_node, run });
|
||||
}
|
||||
run.memory_blocked_steps.shrinkRetainingCapacity(i);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
13
lib/compiler/resinator/code_pages.zig
vendored
13
lib/compiler/resinator/code_pages.zig
vendored
|
|
@ -179,12 +179,13 @@ pub const UnsupportedCodePage = enum(u16) {
|
|||
|
||||
pub const CodePage = blk: {
|
||||
const fields = @typeInfo(SupportedCodePage).@"enum".fields ++ @typeInfo(UnsupportedCodePage).@"enum".fields;
|
||||
break :blk @Type(.{ .@"enum" = .{
|
||||
.tag_type = u16,
|
||||
.decls = &.{},
|
||||
.fields = fields,
|
||||
.is_exhaustive = true,
|
||||
} });
|
||||
var field_names: [fields.len][]const u8 = undefined;
|
||||
var field_values: [fields.len]u16 = undefined;
|
||||
for (fields, &field_names, &field_values) |field, *name, *val| {
|
||||
name.* = field.name;
|
||||
val.* = field.value;
|
||||
}
|
||||
break :blk @Enum(u16, .exhaustive, &field_names, &field_values);
|
||||
};
|
||||
|
||||
pub fn isSupported(code_page: CodePage) bool {
|
||||
|
|
|
|||
2
lib/compiler/resinator/compile.zig
vendored
2
lib/compiler/resinator/compile.zig
vendored
|
|
@ -2914,7 +2914,7 @@ fn validateSearchPath(path: []const u8) error{BadPathName}!void {
|
|||
// (e.g. the NT \??\ prefix, the device \\.\ prefix, etc).
|
||||
// Those path types are something of an unavoidable way to
|
||||
// still hit unreachable during the openDir call.
|
||||
var component_iterator = try std.fs.path.componentIterator(path);
|
||||
var component_iterator = std.fs.path.componentIterator(path);
|
||||
while (component_iterator.next()) |component| {
|
||||
// https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
if (std.mem.indexOfAny(u8, component.name, "\x00<>:\"|?*") != null) return error.BadPathName;
|
||||
|
|
|
|||
21
lib/compiler/resinator/errors.zig
vendored
21
lib/compiler/resinator/errors.zig
vendored
|
|
@ -862,20 +862,23 @@ pub const ErrorDetails = struct {
|
|||
pub const ErrorDetailsWithoutCodePage = blk: {
|
||||
const details_info = @typeInfo(ErrorDetails);
|
||||
const fields = details_info.@"struct".fields;
|
||||
var fields_without_codepage: [fields.len - 1]std.builtin.Type.StructField = undefined;
|
||||
var field_names: [fields.len - 1][]const u8 = undefined;
|
||||
var field_types: [fields.len - 1]type = undefined;
|
||||
var field_attrs: [fields.len - 1]std.builtin.Type.StructField.Attributes = undefined;
|
||||
var i: usize = 0;
|
||||
for (fields) |field| {
|
||||
if (std.mem.eql(u8, field.name, "code_page")) continue;
|
||||
fields_without_codepage[i] = field;
|
||||
field_names[i] = field.name;
|
||||
field_types[i] = field.type;
|
||||
field_attrs[i] = .{
|
||||
.@"comptime" = field.is_comptime,
|
||||
.@"align" = field.alignment,
|
||||
.default_value_ptr = field.default_value_ptr,
|
||||
};
|
||||
i += 1;
|
||||
}
|
||||
std.debug.assert(i == fields_without_codepage.len);
|
||||
break :blk @Type(.{ .@"struct" = .{
|
||||
.layout = .auto,
|
||||
.fields = &fields_without_codepage,
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
} });
|
||||
std.debug.assert(i == fields.len - 1);
|
||||
break :blk @Struct(.auto, null, &field_names, &field_types, &field_attrs);
|
||||
};
|
||||
|
||||
fn cellCount(code_page: SupportedCodePage, source: []const u8, start_index: usize, end_index: usize) usize {
|
||||
|
|
|
|||
|
|
@ -298,7 +298,7 @@ fn mainTerminal() void {
|
|||
|
||||
pub fn log(
|
||||
comptime message_level: std.log.Level,
|
||||
comptime scope: @Type(.enum_literal),
|
||||
comptime scope: @EnumLiteral(),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
|
|
|
|||
|
|
@ -290,10 +290,7 @@ pub fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeIn
|
|||
pub inline fn fneg(a: anytype) @TypeOf(a) {
|
||||
const F = @TypeOf(a);
|
||||
const bits = @typeInfo(F).float.bits;
|
||||
const U = @Type(.{ .int = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = bits,
|
||||
} });
|
||||
const U = @Int(.unsigned, bits);
|
||||
const sign_bit_mask = @as(U, 1) << (bits - 1);
|
||||
const negated = @as(U, @bitCast(a)) ^ sign_bit_mask;
|
||||
return @bitCast(negated);
|
||||
|
|
|
|||
|
|
@ -66,17 +66,17 @@ pub inline fn floatFromBigInt(comptime T: type, comptime signedness: std.builtin
|
|||
switch (x.len) {
|
||||
0 => return 0,
|
||||
inline 1...4 => |limbs_len| return @floatFromInt(@as(
|
||||
@Type(.{ .int = .{ .signedness = signedness, .bits = 32 * limbs_len } }),
|
||||
@Int(signedness, 32 * limbs_len),
|
||||
@bitCast(x[0..limbs_len].*),
|
||||
)),
|
||||
else => {},
|
||||
}
|
||||
|
||||
// sign implicit fraction round sticky
|
||||
const I = comptime @Type(.{ .int = .{
|
||||
.signedness = signedness,
|
||||
.bits = @as(u16, @intFromBool(signedness == .signed)) + 1 + math.floatFractionalBits(T) + 1 + 1,
|
||||
} });
|
||||
const I = comptime @Int(
|
||||
signedness,
|
||||
@as(u16, @intFromBool(signedness == .signed)) + 1 + math.floatFractionalBits(T) + 1 + 1,
|
||||
);
|
||||
|
||||
const clrsb = clrsb: {
|
||||
var clsb: usize = 0;
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ pub inline fn bigIntFromFloat(comptime signedness: std.builtin.Signedness, resul
|
|||
0 => return,
|
||||
inline 1...4 => |limbs_len| {
|
||||
result[0..limbs_len].* = @bitCast(@as(
|
||||
@Type(.{ .int = .{ .signedness = signedness, .bits = 32 * limbs_len } }),
|
||||
@Int(signedness, 32 * limbs_len),
|
||||
@intFromFloat(a),
|
||||
));
|
||||
return;
|
||||
|
|
@ -66,10 +66,7 @@ pub inline fn bigIntFromFloat(comptime signedness: std.builtin.Signedness, resul
|
|||
|
||||
// sign implicit fraction
|
||||
const significand_bits = 1 + math.floatFractionalBits(@TypeOf(a));
|
||||
const I = @Type(comptime .{ .int = .{
|
||||
.signedness = signedness,
|
||||
.bits = @as(u16, @intFromBool(signedness == .signed)) + significand_bits,
|
||||
} });
|
||||
const I = @Int(signedness, @as(u16, @intFromBool(signedness == .signed)) + significand_bits);
|
||||
|
||||
const parts = math.frexp(a);
|
||||
const significand_bits_adjusted_to_handle_smin = @as(i32, significand_bits) +
|
||||
|
|
|
|||
|
|
@ -159,7 +159,7 @@ inline fn copyFixedLength(
|
|||
else if (len > @sizeOf(usize))
|
||||
@Vector(len, u8)
|
||||
else
|
||||
@Type(.{ .int = .{ .signedness = .unsigned, .bits = len * 8 } });
|
||||
@Int(.unsigned, len * 8);
|
||||
|
||||
const loop_count = @divExact(len, @sizeOf(T));
|
||||
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ pub fn panic(msg: []const u8, st: ?*std.builtin.StackTrace, addr: ?usize) noretu
|
|||
|
||||
fn logFn(
|
||||
comptime message_level: log.Level,
|
||||
comptime scope: @TypeOf(.enum_literal),
|
||||
comptime scope: @EnumLiteral(),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ pub const std_options = std.Options{
|
|||
|
||||
fn logOverride(
|
||||
comptime level: std.log.Level,
|
||||
comptime scope: @Type(.enum_literal),
|
||||
comptime scope: @EnumLiteral(),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
|
|
|
|||
|
|
@ -416,7 +416,7 @@ fn createChildOnly(
|
|||
fn userInputOptionsFromArgs(arena: Allocator, args: anytype) UserInputOptionsMap {
|
||||
var map = UserInputOptionsMap.init(arena);
|
||||
inline for (@typeInfo(@TypeOf(args)).@"struct".fields) |field| {
|
||||
if (field.type == @Type(.null)) continue;
|
||||
if (field.type == @TypeOf(null)) continue;
|
||||
addUserInputOptionFromArg(arena, &map, field, field.type, @field(args, field.name));
|
||||
}
|
||||
return map;
|
||||
|
|
@ -526,16 +526,11 @@ fn addUserInputOptionFromArg(
|
|||
.pointer => |ptr_info| switch (ptr_info.size) {
|
||||
.one => switch (@typeInfo(ptr_info.child)) {
|
||||
.array => |array_info| {
|
||||
comptime var slice_info = ptr_info;
|
||||
slice_info.size = .slice;
|
||||
slice_info.is_const = true;
|
||||
slice_info.child = array_info.child;
|
||||
slice_info.sentinel_ptr = null;
|
||||
addUserInputOptionFromArg(
|
||||
arena,
|
||||
map,
|
||||
field,
|
||||
@Type(.{ .pointer = slice_info }),
|
||||
@Pointer(.slice, .{ .@"const" = true }, array_info.child, null),
|
||||
maybe_value orelse null,
|
||||
);
|
||||
return;
|
||||
|
|
@ -553,14 +548,11 @@ fn addUserInputOptionFromArg(
|
|||
}) catch @panic("OOM");
|
||||
},
|
||||
else => {
|
||||
comptime var slice_info = ptr_info;
|
||||
slice_info.is_const = true;
|
||||
slice_info.sentinel_ptr = null;
|
||||
addUserInputOptionFromArg(
|
||||
arena,
|
||||
map,
|
||||
field,
|
||||
@Type(.{ .pointer = slice_info }),
|
||||
@Pointer(ptr_info.size, .{ .@"const" = true }, ptr_info.child, null),
|
||||
maybe_value orelse null,
|
||||
);
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ manifest_dir: fs.Dir,
|
|||
hash: HashHelper = .{},
|
||||
/// This value is accessed from multiple threads, protected by mutex.
|
||||
recent_problematic_timestamp: Io.Timestamp = .zero,
|
||||
mutex: std.Thread.Mutex = .{},
|
||||
mutex: Io.Mutex = .init,
|
||||
|
||||
/// A set of strings such as the zig library directory or project source root, which
|
||||
/// are stripped from the file paths before putting into the cache. They
|
||||
|
|
@ -104,9 +104,7 @@ fn findPrefixResolved(cache: *const Cache, resolved_path: []u8) !PrefixedPath {
|
|||
fn getPrefixSubpath(allocator: Allocator, prefix: []const u8, path: []u8) ![]u8 {
|
||||
const relative = try fs.path.relative(allocator, prefix, path);
|
||||
errdefer allocator.free(relative);
|
||||
var component_iterator = fs.path.NativeComponentIterator.init(relative) catch {
|
||||
return error.NotASubPath;
|
||||
};
|
||||
var component_iterator = fs.path.NativeComponentIterator.init(relative);
|
||||
if (component_iterator.root() != null) {
|
||||
return error.NotASubPath;
|
||||
}
|
||||
|
|
@ -474,6 +472,7 @@ pub const Manifest = struct {
|
|||
/// A cache manifest file exists however it could not be parsed.
|
||||
InvalidFormat,
|
||||
OutOfMemory,
|
||||
Canceled,
|
||||
};
|
||||
|
||||
/// Check the cache to see if the input exists in it. If it exists, returns `true`.
|
||||
|
|
@ -559,12 +558,14 @@ pub const Manifest = struct {
|
|||
self.diagnostic = .{ .manifest_create = error.FileNotFound };
|
||||
return error.CacheCheckFailed;
|
||||
},
|
||||
error.Canceled => return error.Canceled,
|
||||
else => |e| {
|
||||
self.diagnostic = .{ .manifest_create = e };
|
||||
return error.CacheCheckFailed;
|
||||
},
|
||||
}
|
||||
},
|
||||
error.Canceled => return error.Canceled,
|
||||
else => |e| {
|
||||
self.diagnostic = .{ .manifest_create = e };
|
||||
return error.CacheCheckFailed;
|
||||
|
|
@ -762,6 +763,7 @@ pub const Manifest = struct {
|
|||
// Every digest before this one has been populated successfully.
|
||||
return .{ .miss = .{ .file_digests_populated = idx } };
|
||||
},
|
||||
error.Canceled => return error.Canceled,
|
||||
else => |e| {
|
||||
self.diagnostic = .{ .file_open = .{
|
||||
.file_index = idx,
|
||||
|
|
@ -790,7 +792,7 @@ pub const Manifest = struct {
|
|||
.inode = actual_stat.inode,
|
||||
};
|
||||
|
||||
if (self.isProblematicTimestamp(cache_hash_file.stat.mtime)) {
|
||||
if (try self.isProblematicTimestamp(cache_hash_file.stat.mtime)) {
|
||||
// The actual file has an unreliable timestamp, force it to be hashed
|
||||
cache_hash_file.stat.mtime = .zero;
|
||||
cache_hash_file.stat.inode = 0;
|
||||
|
|
@ -848,7 +850,9 @@ pub const Manifest = struct {
|
|||
}
|
||||
}
|
||||
|
||||
fn isProblematicTimestamp(man: *Manifest, timestamp: Io.Timestamp) bool {
|
||||
fn isProblematicTimestamp(man: *Manifest, timestamp: Io.Timestamp) error{Canceled}!bool {
|
||||
const io = man.cache.io;
|
||||
|
||||
// If the file_time is prior to the most recent problematic timestamp
|
||||
// then we don't need to access the filesystem.
|
||||
if (timestamp.nanoseconds < man.recent_problematic_timestamp.nanoseconds)
|
||||
|
|
@ -856,8 +860,8 @@ pub const Manifest = struct {
|
|||
|
||||
// Next we will check the globally shared Cache timestamp, which is accessed
|
||||
// from multiple threads.
|
||||
man.cache.mutex.lock();
|
||||
defer man.cache.mutex.unlock();
|
||||
try man.cache.mutex.lock(io);
|
||||
defer man.cache.mutex.unlock(io);
|
||||
|
||||
// Save the global one to our local one to avoid locking next time.
|
||||
man.recent_problematic_timestamp = man.cache.recent_problematic_timestamp;
|
||||
|
|
@ -871,11 +875,18 @@ pub const Manifest = struct {
|
|||
var file = man.cache.manifest_dir.createFile("timestamp", .{
|
||||
.read = true,
|
||||
.truncate = true,
|
||||
}) catch return true;
|
||||
}) catch |err| switch (err) {
|
||||
error.Canceled => return error.Canceled,
|
||||
else => return true,
|
||||
};
|
||||
defer file.close();
|
||||
|
||||
// Save locally and also save globally (we still hold the global lock).
|
||||
man.recent_problematic_timestamp = (file.stat() catch return true).mtime;
|
||||
const stat = file.stat() catch |err| switch (err) {
|
||||
error.Canceled => return error.Canceled,
|
||||
else => return true,
|
||||
};
|
||||
man.recent_problematic_timestamp = stat.mtime;
|
||||
man.cache.recent_problematic_timestamp = man.recent_problematic_timestamp;
|
||||
}
|
||||
|
||||
|
|
@ -902,7 +913,7 @@ pub const Manifest = struct {
|
|||
.inode = actual_stat.inode,
|
||||
};
|
||||
|
||||
if (self.isProblematicTimestamp(ch_file.stat.mtime)) {
|
||||
if (try self.isProblematicTimestamp(ch_file.stat.mtime)) {
|
||||
// The actual file has an unreliable timestamp, force it to be hashed
|
||||
ch_file.stat.mtime = .zero;
|
||||
ch_file.stat.inode = 0;
|
||||
|
|
@ -1038,7 +1049,7 @@ pub const Manifest = struct {
|
|||
.contents = null,
|
||||
};
|
||||
|
||||
if (self.isProblematicTimestamp(new_file.stat.mtime)) {
|
||||
if (try self.isProblematicTimestamp(new_file.stat.mtime)) {
|
||||
// The actual file has an unreliable timestamp, force it to be hashed
|
||||
new_file.stat.mtime = .zero;
|
||||
new_file.stat.inode = 0;
|
||||
|
|
|
|||
|
|
@ -22,17 +22,16 @@ mode: Mode,
|
|||
/// Allocated into `gpa`.
|
||||
run_steps: []const *Step.Run,
|
||||
|
||||
wait_group: std.Thread.WaitGroup,
|
||||
group: Io.Group,
|
||||
root_prog_node: std.Progress.Node,
|
||||
prog_node: std.Progress.Node,
|
||||
thread_pool: *std.Thread.Pool,
|
||||
|
||||
/// Protects `coverage_files`.
|
||||
coverage_mutex: std.Thread.Mutex,
|
||||
coverage_mutex: Io.Mutex,
|
||||
coverage_files: std.AutoArrayHashMapUnmanaged(u64, CoverageMap),
|
||||
|
||||
queue_mutex: std.Thread.Mutex,
|
||||
queue_cond: std.Thread.Condition,
|
||||
queue_mutex: Io.Mutex,
|
||||
queue_cond: Io.Condition,
|
||||
msg_queue: std.ArrayList(Msg),
|
||||
|
||||
pub const Mode = union(enum) {
|
||||
|
|
@ -78,7 +77,6 @@ const CoverageMap = struct {
|
|||
pub fn init(
|
||||
gpa: Allocator,
|
||||
io: Io,
|
||||
thread_pool: *std.Thread.Pool,
|
||||
ttyconf: tty.Config,
|
||||
all_steps: []const *Build.Step,
|
||||
root_prog_node: std.Progress.Node,
|
||||
|
|
@ -89,20 +87,22 @@ pub fn init(
|
|||
defer steps.deinit(gpa);
|
||||
const rebuild_node = root_prog_node.start("Rebuilding Unit Tests", 0);
|
||||
defer rebuild_node.end();
|
||||
var rebuild_wg: std.Thread.WaitGroup = .{};
|
||||
defer rebuild_wg.wait();
|
||||
var rebuild_group: Io.Group = .init;
|
||||
defer rebuild_group.cancel(io);
|
||||
|
||||
for (all_steps) |step| {
|
||||
const run = step.cast(Step.Run) orelse continue;
|
||||
if (run.producer == null) continue;
|
||||
if (run.fuzz_tests.items.len == 0) continue;
|
||||
try steps.append(gpa, run);
|
||||
thread_pool.spawnWg(&rebuild_wg, rebuildTestsWorkerRun, .{ run, gpa, ttyconf, rebuild_node });
|
||||
rebuild_group.async(io, rebuildTestsWorkerRun, .{ run, gpa, ttyconf, rebuild_node });
|
||||
}
|
||||
|
||||
if (steps.items.len == 0) fatal("no fuzz tests found", .{});
|
||||
rebuild_node.setEstimatedTotalItems(steps.items.len);
|
||||
break :steps try gpa.dupe(*Step.Run, steps.items);
|
||||
const run_steps = try gpa.dupe(*Step.Run, steps.items);
|
||||
rebuild_group.wait(io);
|
||||
break :steps run_steps;
|
||||
};
|
||||
errdefer gpa.free(run_steps);
|
||||
|
||||
|
|
@ -118,42 +118,38 @@ pub fn init(
|
|||
.ttyconf = ttyconf,
|
||||
.mode = mode,
|
||||
.run_steps = run_steps,
|
||||
.wait_group = .{},
|
||||
.thread_pool = thread_pool,
|
||||
.group = .init,
|
||||
.root_prog_node = root_prog_node,
|
||||
.prog_node = .none,
|
||||
.coverage_files = .empty,
|
||||
.coverage_mutex = .{},
|
||||
.queue_mutex = .{},
|
||||
.coverage_mutex = .init,
|
||||
.queue_mutex = .init,
|
||||
.queue_cond = .{},
|
||||
.msg_queue = .empty,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn start(fuzz: *Fuzz) void {
|
||||
const io = fuzz.io;
|
||||
fuzz.prog_node = fuzz.root_prog_node.start("Fuzzing", fuzz.run_steps.len);
|
||||
|
||||
if (fuzz.mode == .forever) {
|
||||
// For polling messages and sending updates to subscribers.
|
||||
fuzz.wait_group.start();
|
||||
_ = std.Thread.spawn(.{}, coverageRun, .{fuzz}) catch |err| {
|
||||
fuzz.wait_group.finish();
|
||||
fatal("unable to spawn coverage thread: {s}", .{@errorName(err)});
|
||||
};
|
||||
fuzz.group.concurrent(io, coverageRun, .{fuzz}) catch |err|
|
||||
fatal("unable to spawn coverage task: {t}", .{err});
|
||||
}
|
||||
|
||||
for (fuzz.run_steps) |run| {
|
||||
for (run.fuzz_tests.items) |unit_test_index| {
|
||||
assert(run.rebuilt_executable != null);
|
||||
fuzz.thread_pool.spawnWg(&fuzz.wait_group, fuzzWorkerRun, .{
|
||||
fuzz, run, unit_test_index,
|
||||
});
|
||||
fuzz.group.async(io, fuzzWorkerRun, .{ fuzz, run, unit_test_index });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deinit(fuzz: *Fuzz) void {
|
||||
if (!fuzz.wait_group.isDone()) @panic("TODO: terminate the fuzzer processes");
|
||||
const io = fuzz.io;
|
||||
fuzz.group.cancel(io);
|
||||
fuzz.prog_node.end();
|
||||
fuzz.gpa.free(fuzz.run_steps);
|
||||
}
|
||||
|
|
@ -161,9 +157,7 @@ pub fn deinit(fuzz: *Fuzz) void {
|
|||
fn rebuildTestsWorkerRun(run: *Step.Run, gpa: Allocator, ttyconf: tty.Config, parent_prog_node: std.Progress.Node) void {
|
||||
rebuildTestsWorkerRunFallible(run, gpa, ttyconf, parent_prog_node) catch |err| {
|
||||
const compile = run.producer.?;
|
||||
log.err("step '{s}': failed to rebuild in fuzz mode: {s}", .{
|
||||
compile.step.name, @errorName(err),
|
||||
});
|
||||
log.err("step '{s}': failed to rebuild in fuzz mode: {t}", .{ compile.step.name, err });
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -212,9 +206,7 @@ fn fuzzWorkerRun(
|
|||
return;
|
||||
},
|
||||
else => {
|
||||
log.err("step '{s}': failed to rerun '{s}' in fuzz mode: {s}", .{
|
||||
run.step.name, test_name, @errorName(err),
|
||||
});
|
||||
log.err("step '{s}': failed to rerun '{s}' in fuzz mode: {t}", .{ run.step.name, test_name, err });
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
|
@ -273,8 +265,10 @@ pub fn sendUpdate(
|
|||
socket: *std.http.Server.WebSocket,
|
||||
prev: *Previous,
|
||||
) !void {
|
||||
fuzz.coverage_mutex.lock();
|
||||
defer fuzz.coverage_mutex.unlock();
|
||||
const io = fuzz.io;
|
||||
|
||||
try fuzz.coverage_mutex.lock(io);
|
||||
defer fuzz.coverage_mutex.unlock(io);
|
||||
|
||||
const coverage_maps = fuzz.coverage_files.values();
|
||||
if (coverage_maps.len == 0) return;
|
||||
|
|
@ -335,32 +329,41 @@ pub fn sendUpdate(
|
|||
}
|
||||
|
||||
fn coverageRun(fuzz: *Fuzz) void {
|
||||
defer fuzz.wait_group.finish();
|
||||
coverageRunCancelable(fuzz) catch |err| switch (err) {
|
||||
error.Canceled => return,
|
||||
};
|
||||
}
|
||||
|
||||
fuzz.queue_mutex.lock();
|
||||
defer fuzz.queue_mutex.unlock();
|
||||
fn coverageRunCancelable(fuzz: *Fuzz) Io.Cancelable!void {
|
||||
const io = fuzz.io;
|
||||
|
||||
try fuzz.queue_mutex.lock(io);
|
||||
defer fuzz.queue_mutex.unlock(io);
|
||||
|
||||
while (true) {
|
||||
fuzz.queue_cond.wait(&fuzz.queue_mutex);
|
||||
try fuzz.queue_cond.wait(io, &fuzz.queue_mutex);
|
||||
for (fuzz.msg_queue.items) |msg| switch (msg) {
|
||||
.coverage => |coverage| prepareTables(fuzz, coverage.run, coverage.id) catch |err| switch (err) {
|
||||
error.AlreadyReported => continue,
|
||||
else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}),
|
||||
error.Canceled => return,
|
||||
else => |e| log.err("failed to prepare code coverage tables: {t}", .{e}),
|
||||
},
|
||||
.entry_point => |entry_point| addEntryPoint(fuzz, entry_point.coverage_id, entry_point.addr) catch |err| switch (err) {
|
||||
error.AlreadyReported => continue,
|
||||
else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}),
|
||||
error.Canceled => return,
|
||||
else => |e| log.err("failed to prepare code coverage tables: {t}", .{e}),
|
||||
},
|
||||
};
|
||||
fuzz.msg_queue.clearRetainingCapacity();
|
||||
}
|
||||
}
|
||||
fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutOfMemory, AlreadyReported }!void {
|
||||
fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutOfMemory, AlreadyReported, Canceled }!void {
|
||||
assert(fuzz.mode == .forever);
|
||||
const ws = fuzz.mode.forever.ws;
|
||||
const io = fuzz.io;
|
||||
|
||||
fuzz.coverage_mutex.lock();
|
||||
defer fuzz.coverage_mutex.unlock();
|
||||
try fuzz.coverage_mutex.lock(io);
|
||||
defer fuzz.coverage_mutex.unlock(io);
|
||||
|
||||
const gop = try fuzz.coverage_files.getOrPut(fuzz.gpa, coverage_id);
|
||||
if (gop.found_existing) {
|
||||
|
|
@ -391,8 +394,8 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO
|
|||
target.ofmt,
|
||||
target.cpu.arch,
|
||||
) catch |err| {
|
||||
log.err("step '{s}': failed to load debug information for '{f}': {s}", .{
|
||||
run_step.step.name, rebuilt_exe_path, @errorName(err),
|
||||
log.err("step '{s}': failed to load debug information for '{f}': {t}", .{
|
||||
run_step.step.name, rebuilt_exe_path, err,
|
||||
});
|
||||
return error.AlreadyReported;
|
||||
};
|
||||
|
|
@ -403,15 +406,15 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO
|
|||
.sub_path = "v/" ++ std.fmt.hex(coverage_id),
|
||||
};
|
||||
var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| {
|
||||
log.err("step '{s}': failed to load coverage file '{f}': {s}", .{
|
||||
run_step.step.name, coverage_file_path, @errorName(err),
|
||||
log.err("step '{s}': failed to load coverage file '{f}': {t}", .{
|
||||
run_step.step.name, coverage_file_path, err,
|
||||
});
|
||||
return error.AlreadyReported;
|
||||
};
|
||||
defer coverage_file.close();
|
||||
|
||||
const file_size = coverage_file.getEndPos() catch |err| {
|
||||
log.err("unable to check len of coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) });
|
||||
log.err("unable to check len of coverage file '{f}': {t}", .{ coverage_file_path, err });
|
||||
return error.AlreadyReported;
|
||||
};
|
||||
|
||||
|
|
@ -423,7 +426,7 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO
|
|||
coverage_file.handle,
|
||||
0,
|
||||
) catch |err| {
|
||||
log.err("failed to map coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) });
|
||||
log.err("failed to map coverage file '{f}': {t}", .{ coverage_file_path, err });
|
||||
return error.AlreadyReported;
|
||||
};
|
||||
gop.value_ptr.mapped_memory = mapped_memory;
|
||||
|
|
@ -449,7 +452,7 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO
|
|||
}{ .addrs = sorted_pcs.items(.pc) });
|
||||
|
||||
debug_info.resolveAddresses(fuzz.gpa, sorted_pcs.items(.pc), sorted_pcs.items(.sl)) catch |err| {
|
||||
log.err("failed to resolve addresses to source locations: {s}", .{@errorName(err)});
|
||||
log.err("failed to resolve addresses to source locations: {t}", .{err});
|
||||
return error.AlreadyReported;
|
||||
};
|
||||
|
||||
|
|
@ -459,9 +462,11 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO
|
|||
ws.notifyUpdate();
|
||||
}
|
||||
|
||||
fn addEntryPoint(fuzz: *Fuzz, coverage_id: u64, addr: u64) error{ AlreadyReported, OutOfMemory }!void {
|
||||
fuzz.coverage_mutex.lock();
|
||||
defer fuzz.coverage_mutex.unlock();
|
||||
fn addEntryPoint(fuzz: *Fuzz, coverage_id: u64, addr: u64) error{ AlreadyReported, OutOfMemory, Canceled }!void {
|
||||
const io = fuzz.io;
|
||||
|
||||
try fuzz.coverage_mutex.lock(io);
|
||||
defer fuzz.coverage_mutex.unlock(io);
|
||||
|
||||
const coverage_map = fuzz.coverage_files.getPtr(coverage_id).?;
|
||||
const header: *const abi.SeenPcsHeader = @ptrCast(coverage_map.mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]);
|
||||
|
|
@ -511,8 +516,8 @@ pub fn waitAndPrintReport(fuzz: *Fuzz) void {
|
|||
assert(fuzz.mode == .limit);
|
||||
const io = fuzz.io;
|
||||
|
||||
fuzz.wait_group.wait();
|
||||
fuzz.wait_group.reset();
|
||||
fuzz.group.wait(io);
|
||||
fuzz.group = .init;
|
||||
|
||||
std.debug.print("======= FUZZING REPORT =======\n", .{});
|
||||
for (fuzz.msg_queue.items) |msg| {
|
||||
|
|
@ -524,8 +529,8 @@ pub fn waitAndPrintReport(fuzz: *Fuzz) void {
|
|||
.sub_path = "v/" ++ std.fmt.hex(cov.id),
|
||||
};
|
||||
var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| {
|
||||
fatal("step '{s}': failed to load coverage file '{f}': {s}", .{
|
||||
cov.run.step.name, coverage_file_path, @errorName(err),
|
||||
fatal("step '{s}': failed to load coverage file '{f}': {t}", .{
|
||||
cov.run.step.name, coverage_file_path, err,
|
||||
});
|
||||
};
|
||||
defer coverage_file.close();
|
||||
|
|
@ -536,8 +541,8 @@ pub fn waitAndPrintReport(fuzz: *Fuzz) void {
|
|||
|
||||
var header: fuzz_abi.SeenPcsHeader = undefined;
|
||||
r.interface.readSliceAll(std.mem.asBytes(&header)) catch |err| {
|
||||
fatal("step '{s}': failed to read from coverage file '{f}': {s}", .{
|
||||
cov.run.step.name, coverage_file_path, @errorName(err),
|
||||
fatal("step '{s}': failed to read from coverage file '{f}': {t}", .{
|
||||
cov.run.step.name, coverage_file_path, err,
|
||||
});
|
||||
};
|
||||
|
||||
|
|
@ -551,8 +556,8 @@ pub fn waitAndPrintReport(fuzz: *Fuzz) void {
|
|||
const chunk_count = fuzz_abi.SeenPcsHeader.seenElemsLen(header.pcs_len);
|
||||
for (0..chunk_count) |_| {
|
||||
const seen = r.interface.takeInt(usize, .little) catch |err| {
|
||||
fatal("step '{s}': failed to read from coverage file '{f}': {s}", .{
|
||||
cov.run.step.name, coverage_file_path, @errorName(err),
|
||||
fatal("step '{s}': failed to read from coverage file '{f}': {t}", .{
|
||||
cov.run.step.name, coverage_file_path, err,
|
||||
});
|
||||
};
|
||||
seen_count += @popCount(seen);
|
||||
|
|
|
|||
|
|
@ -110,7 +110,6 @@ pub const TestResults = struct {
|
|||
|
||||
pub const MakeOptions = struct {
|
||||
progress_node: std.Progress.Node,
|
||||
thread_pool: *std.Thread.Pool,
|
||||
watch: bool,
|
||||
web_server: switch (builtin.target.cpu.arch) {
|
||||
else => ?*Build.WebServer,
|
||||
|
|
@ -363,7 +362,7 @@ pub fn captureChildProcess(
|
|||
.allocator = arena,
|
||||
.argv = argv,
|
||||
.progress_node = progress_node,
|
||||
}) catch |err| return s.fail("failed to run {s}: {s}", .{ argv[0], @errorName(err) });
|
||||
}) catch |err| return s.fail("failed to run {s}: {t}", .{ argv[0], err });
|
||||
|
||||
if (result.stderr.len > 0) {
|
||||
try s.result_error_msgs.append(arena, result.stderr);
|
||||
|
|
@ -413,7 +412,7 @@ pub fn evalZigProcess(
|
|||
error.BrokenPipe => {
|
||||
// Process restart required.
|
||||
const term = zp.child.wait() catch |e| {
|
||||
return s.fail("unable to wait for {s}: {s}", .{ argv[0], @errorName(e) });
|
||||
return s.fail("unable to wait for {s}: {t}", .{ argv[0], e });
|
||||
};
|
||||
_ = term;
|
||||
s.clearZigProcess(gpa);
|
||||
|
|
@ -429,7 +428,7 @@ pub fn evalZigProcess(
|
|||
if (s.result_error_msgs.items.len > 0 and result == null) {
|
||||
// Crash detected.
|
||||
const term = zp.child.wait() catch |e| {
|
||||
return s.fail("unable to wait for {s}: {s}", .{ argv[0], @errorName(e) });
|
||||
return s.fail("unable to wait for {s}: {t}", .{ argv[0], e });
|
||||
};
|
||||
s.result_peak_rss = zp.child.resource_usage_statistics.getMaxRss() orelse 0;
|
||||
s.clearZigProcess(gpa);
|
||||
|
|
@ -454,9 +453,7 @@ pub fn evalZigProcess(
|
|||
child.request_resource_usage_statistics = true;
|
||||
child.progress_node = prog_node;
|
||||
|
||||
child.spawn() catch |err| return s.fail("failed to spawn zig compiler {s}: {s}", .{
|
||||
argv[0], @errorName(err),
|
||||
});
|
||||
child.spawn() catch |err| return s.fail("failed to spawn zig compiler {s}: {t}", .{ argv[0], err });
|
||||
|
||||
const zp = try gpa.create(ZigProcess);
|
||||
zp.* = .{
|
||||
|
|
@ -481,7 +478,7 @@ pub fn evalZigProcess(
|
|||
zp.child.stdin = null;
|
||||
|
||||
const term = zp.child.wait() catch |err| {
|
||||
return s.fail("unable to wait for {s}: {s}", .{ argv[0], @errorName(err) });
|
||||
return s.fail("unable to wait for {s}: {t}", .{ argv[0], err });
|
||||
};
|
||||
s.result_peak_rss = zp.child.resource_usage_statistics.getMaxRss() orelse 0;
|
||||
|
||||
|
|
@ -514,8 +511,8 @@ pub fn installFile(s: *Step, src_lazy_path: Build.LazyPath, dest_path: []const u
|
|||
const src_path = src_lazy_path.getPath3(b, s);
|
||||
try handleVerbose(b, null, &.{ "install", "-C", b.fmt("{f}", .{src_path}), dest_path });
|
||||
return Io.Dir.updateFile(src_path.root_dir.handle.adaptToNewApi(), io, src_path.sub_path, .cwd(), dest_path, .{}) catch |err| {
|
||||
return s.fail("unable to update file from '{f}' to '{s}': {s}", .{
|
||||
src_path, dest_path, @errorName(err),
|
||||
return s.fail("unable to update file from '{f}' to '{s}': {t}", .{
|
||||
src_path, dest_path, err,
|
||||
});
|
||||
};
|
||||
}
|
||||
|
|
@ -525,9 +522,7 @@ pub fn installDir(s: *Step, dest_path: []const u8) !std.fs.Dir.MakePathStatus {
|
|||
const b = s.owner;
|
||||
try handleVerbose(b, null, &.{ "install", "-d", dest_path });
|
||||
return std.fs.cwd().makePathStatus(dest_path) catch |err| {
|
||||
return s.fail("unable to create dir '{s}': {s}", .{
|
||||
dest_path, @errorName(err),
|
||||
});
|
||||
return s.fail("unable to create dir '{s}': {t}", .{ dest_path, err });
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -826,22 +821,27 @@ pub fn cacheHitAndWatch(s: *Step, man: *Build.Cache.Manifest) !bool {
|
|||
return is_hit;
|
||||
}
|
||||
|
||||
fn failWithCacheError(s: *Step, man: *const Build.Cache.Manifest, err: Build.Cache.Manifest.HitError) error{ OutOfMemory, MakeFailed } {
|
||||
fn failWithCacheError(
|
||||
s: *Step,
|
||||
man: *const Build.Cache.Manifest,
|
||||
err: Build.Cache.Manifest.HitError,
|
||||
) error{ OutOfMemory, Canceled, MakeFailed } {
|
||||
switch (err) {
|
||||
error.CacheCheckFailed => switch (man.diagnostic) {
|
||||
.none => unreachable,
|
||||
.manifest_create, .manifest_read, .manifest_lock => |e| return s.fail("failed to check cache: {s} {s}", .{
|
||||
@tagName(man.diagnostic), @errorName(e),
|
||||
.manifest_create, .manifest_read, .manifest_lock => |e| return s.fail("failed to check cache: {t} {t}", .{
|
||||
man.diagnostic, e,
|
||||
}),
|
||||
.file_open, .file_stat, .file_read, .file_hash => |op| {
|
||||
const pp = man.files.keys()[op.file_index].prefixed_path;
|
||||
const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
|
||||
return s.fail("failed to check cache: '{s}{c}{s}' {s} {s}", .{
|
||||
prefix, std.fs.path.sep, pp.sub_path, @tagName(man.diagnostic), @errorName(op.err),
|
||||
return s.fail("failed to check cache: '{s}{c}{s}' {t} {t}", .{
|
||||
prefix, std.fs.path.sep, pp.sub_path, man.diagnostic, op.err,
|
||||
});
|
||||
},
|
||||
},
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.Canceled => return error.Canceled,
|
||||
error.InvalidFormat => return s.fail("failed to check cache: invalid manifest file format", .{}),
|
||||
}
|
||||
}
|
||||
|
|
@ -851,7 +851,7 @@ fn failWithCacheError(s: *Step, man: *const Build.Cache.Manifest, err: Build.Cac
|
|||
pub fn writeManifest(s: *Step, man: *Build.Cache.Manifest) !void {
|
||||
if (s.test_results.isSuccess()) {
|
||||
man.writeManifest() catch |err| {
|
||||
try s.addError("unable to write cache manifest: {s}", .{@errorName(err)});
|
||||
try s.addError("unable to write cache manifest: {t}", .{err});
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1151,7 +1151,6 @@ pub fn rerunInFuzzMode(
|
|||
const tmp_dir_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
|
||||
try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, .{
|
||||
.progress_node = prog_node,
|
||||
.thread_pool = undefined, // not used by `runCommand`
|
||||
.watch = undefined, // not used by `runCommand`
|
||||
.web_server = null, // only needed for time reports
|
||||
.ttyconf = fuzz.ttyconf,
|
||||
|
|
@ -1831,6 +1830,7 @@ fn pollZigTest(
|
|||
} {
|
||||
const gpa = run.step.owner.allocator;
|
||||
const arena = run.step.owner.allocator;
|
||||
const io = run.step.owner.graph.io;
|
||||
|
||||
var sub_prog_node: ?std.Progress.Node = null;
|
||||
defer if (sub_prog_node) |n| n.end();
|
||||
|
|
@ -2036,8 +2036,8 @@ fn pollZigTest(
|
|||
|
||||
{
|
||||
const fuzz = fuzz_context.?.fuzz;
|
||||
fuzz.queue_mutex.lock();
|
||||
defer fuzz.queue_mutex.unlock();
|
||||
fuzz.queue_mutex.lockUncancelable(io);
|
||||
defer fuzz.queue_mutex.unlock(io);
|
||||
try fuzz.msg_queue.append(fuzz.gpa, .{ .coverage = .{
|
||||
.id = coverage_id.?,
|
||||
.cumulative = .{
|
||||
|
|
@ -2047,20 +2047,20 @@ fn pollZigTest(
|
|||
},
|
||||
.run = run,
|
||||
} });
|
||||
fuzz.queue_cond.signal();
|
||||
fuzz.queue_cond.signal(io);
|
||||
}
|
||||
},
|
||||
.fuzz_start_addr => {
|
||||
const fuzz = fuzz_context.?.fuzz;
|
||||
const addr = body_r.takeInt(u64, .little) catch unreachable;
|
||||
{
|
||||
fuzz.queue_mutex.lock();
|
||||
defer fuzz.queue_mutex.unlock();
|
||||
fuzz.queue_mutex.lockUncancelable(io);
|
||||
defer fuzz.queue_mutex.unlock(io);
|
||||
try fuzz.msg_queue.append(fuzz.gpa, .{ .entry_point = .{
|
||||
.addr = addr,
|
||||
.coverage_id = coverage_id.?,
|
||||
} });
|
||||
fuzz.queue_cond.signal();
|
||||
fuzz.queue_cond.signal(io);
|
||||
}
|
||||
},
|
||||
else => {}, // ignore other messages
|
||||
|
|
|
|||
|
|
@ -167,7 +167,7 @@ pub fn setPaths(fse: *FsEvents, gpa: Allocator, steps: []const *std.Build.Step)
|
|||
}.lessThan);
|
||||
need_dirs.clearRetainingCapacity();
|
||||
for (old_dirs) |dir_path| {
|
||||
var it: std.fs.path.ComponentIterator(.posix, u8) = try .init(dir_path);
|
||||
var it: std.fs.path.ComponentIterator(.posix, u8) = .init(dir_path);
|
||||
while (it.next()) |component| {
|
||||
if (need_dirs.contains(component.path)) {
|
||||
// this path is '/foo/bar/qux', but '/foo' or '/foo/bar' was already added
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
gpa: Allocator,
|
||||
thread_pool: *std.Thread.Pool,
|
||||
graph: *const Build.Graph,
|
||||
all_steps: []const *Build.Step,
|
||||
listen_address: net.IpAddress,
|
||||
|
|
@ -20,7 +19,7 @@ step_names_trailing: []u8,
|
|||
step_status_bits: []u8,
|
||||
|
||||
fuzz: ?Fuzz,
|
||||
time_report_mutex: std.Thread.Mutex,
|
||||
time_report_mutex: Io.Mutex,
|
||||
time_report_msgs: [][]u8,
|
||||
time_report_update_times: []i64,
|
||||
|
||||
|
|
@ -34,9 +33,9 @@ build_status: std.atomic.Value(abi.BuildStatus),
|
|||
/// an unreasonable number of packets.
|
||||
update_id: std.atomic.Value(u32),
|
||||
|
||||
runner_request_mutex: std.Thread.Mutex,
|
||||
runner_request_ready_cond: std.Thread.Condition,
|
||||
runner_request_empty_cond: std.Thread.Condition,
|
||||
runner_request_mutex: Io.Mutex,
|
||||
runner_request_ready_cond: Io.Condition,
|
||||
runner_request_empty_cond: Io.Condition,
|
||||
runner_request: ?RunnerRequest,
|
||||
|
||||
/// If a client is not explicitly notified of changes with `notifyUpdate`, it will be sent updates
|
||||
|
|
@ -53,7 +52,6 @@ pub fn notifyUpdate(ws: *WebServer) void {
|
|||
|
||||
pub const Options = struct {
|
||||
gpa: Allocator,
|
||||
thread_pool: *std.Thread.Pool,
|
||||
ttyconf: Io.tty.Config,
|
||||
graph: *const std.Build.Graph,
|
||||
all_steps: []const *Build.Step,
|
||||
|
|
@ -100,7 +98,6 @@ pub fn init(opts: Options) WebServer {
|
|||
|
||||
return .{
|
||||
.gpa = opts.gpa,
|
||||
.thread_pool = opts.thread_pool,
|
||||
.ttyconf = opts.ttyconf,
|
||||
.graph = opts.graph,
|
||||
.all_steps = all_steps,
|
||||
|
|
@ -117,14 +114,14 @@ pub fn init(opts: Options) WebServer {
|
|||
.step_status_bits = step_status_bits,
|
||||
|
||||
.fuzz = null,
|
||||
.time_report_mutex = .{},
|
||||
.time_report_mutex = .init,
|
||||
.time_report_msgs = time_report_msgs,
|
||||
.time_report_update_times = time_report_update_times,
|
||||
|
||||
.build_status = .init(.idle),
|
||||
.update_id = .init(0),
|
||||
|
||||
.runner_request_mutex = .{},
|
||||
.runner_request_mutex = .init,
|
||||
.runner_request_ready_cond = .{},
|
||||
.runner_request_empty_cond = .{},
|
||||
.runner_request = null,
|
||||
|
|
@ -235,7 +232,6 @@ pub fn finishBuild(ws: *WebServer, opts: struct {
|
|||
ws.fuzz = Fuzz.init(
|
||||
ws.gpa,
|
||||
ws.graph.io,
|
||||
ws.thread_pool,
|
||||
ws.ttyconf,
|
||||
ws.all_steps,
|
||||
ws.root_prog_node,
|
||||
|
|
@ -300,6 +296,8 @@ fn accept(ws: *WebServer, stream: net.Stream) void {
|
|||
}
|
||||
|
||||
fn serveWebSocket(ws: *WebServer, sock: *http.Server.WebSocket) !noreturn {
|
||||
const io = ws.graph.io;
|
||||
|
||||
var prev_build_status = ws.build_status.load(.monotonic);
|
||||
|
||||
const prev_step_status_bits = try ws.gpa.alloc(u8, ws.step_status_bits.len);
|
||||
|
|
@ -335,8 +333,8 @@ fn serveWebSocket(ws: *WebServer, sock: *http.Server.WebSocket) !noreturn {
|
|||
}
|
||||
|
||||
{
|
||||
ws.time_report_mutex.lock();
|
||||
defer ws.time_report_mutex.unlock();
|
||||
try ws.time_report_mutex.lock(io);
|
||||
defer ws.time_report_mutex.unlock(io);
|
||||
for (ws.time_report_msgs, ws.time_report_update_times) |msg, update_time| {
|
||||
if (update_time <= prev_time) continue;
|
||||
// We want to send `msg`, but shouldn't block `ws.time_report_mutex` while we do, so
|
||||
|
|
@ -344,8 +342,8 @@ fn serveWebSocket(ws: *WebServer, sock: *http.Server.WebSocket) !noreturn {
|
|||
const owned_msg = try ws.gpa.dupe(u8, msg);
|
||||
defer ws.gpa.free(owned_msg);
|
||||
// Temporarily unlock, then re-lock after the message is sent.
|
||||
ws.time_report_mutex.unlock();
|
||||
defer ws.time_report_mutex.lock();
|
||||
ws.time_report_mutex.unlock(io);
|
||||
defer ws.time_report_mutex.lockUncancelable(io);
|
||||
try sock.writeMessage(owned_msg, .binary);
|
||||
}
|
||||
}
|
||||
|
|
@ -386,6 +384,8 @@ fn serveWebSocket(ws: *WebServer, sock: *http.Server.WebSocket) !noreturn {
|
|||
}
|
||||
}
|
||||
fn recvWebSocketMessages(ws: *WebServer, sock: *http.Server.WebSocket) void {
|
||||
const io = ws.graph.io;
|
||||
|
||||
while (true) {
|
||||
const msg = sock.readSmallMessage() catch return;
|
||||
if (msg.opcode != .binary) continue;
|
||||
|
|
@ -394,14 +394,16 @@ fn recvWebSocketMessages(ws: *WebServer, sock: *http.Server.WebSocket) void {
|
|||
switch (tag) {
|
||||
_ => continue,
|
||||
.rebuild => while (true) {
|
||||
ws.runner_request_mutex.lock();
|
||||
defer ws.runner_request_mutex.unlock();
|
||||
ws.runner_request_mutex.lock(io) catch |err| switch (err) {
|
||||
error.Canceled => return,
|
||||
};
|
||||
defer ws.runner_request_mutex.unlock(io);
|
||||
if (ws.runner_request == null) {
|
||||
ws.runner_request = .rebuild;
|
||||
ws.runner_request_ready_cond.signal();
|
||||
ws.runner_request_ready_cond.signal(io);
|
||||
break;
|
||||
}
|
||||
ws.runner_request_empty_cond.wait(&ws.runner_request_mutex);
|
||||
ws.runner_request_empty_cond.wait(io, &ws.runner_request_mutex) catch return;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -695,14 +697,15 @@ pub fn updateTimeReportCompile(ws: *WebServer, opts: struct {
|
|||
trailing: []const u8,
|
||||
}) void {
|
||||
const gpa = ws.gpa;
|
||||
const io = ws.graph.io;
|
||||
|
||||
const step_idx: u32 = for (ws.all_steps, 0..) |s, i| {
|
||||
if (s == &opts.compile.step) break @intCast(i);
|
||||
} else unreachable;
|
||||
|
||||
const old_buf = old: {
|
||||
ws.time_report_mutex.lock();
|
||||
defer ws.time_report_mutex.unlock();
|
||||
ws.time_report_mutex.lock(io) catch return;
|
||||
defer ws.time_report_mutex.unlock(io);
|
||||
const old = ws.time_report_msgs[step_idx];
|
||||
ws.time_report_msgs[step_idx] = &.{};
|
||||
break :old old;
|
||||
|
|
@ -724,8 +727,8 @@ pub fn updateTimeReportCompile(ws: *WebServer, opts: struct {
|
|||
@memcpy(buf[@sizeOf(abi.time_report.CompileResult)..], opts.trailing);
|
||||
|
||||
{
|
||||
ws.time_report_mutex.lock();
|
||||
defer ws.time_report_mutex.unlock();
|
||||
ws.time_report_mutex.lock(io) catch return;
|
||||
defer ws.time_report_mutex.unlock(io);
|
||||
assert(ws.time_report_msgs[step_idx].len == 0);
|
||||
ws.time_report_msgs[step_idx] = buf;
|
||||
ws.time_report_update_times[step_idx] = ws.now();
|
||||
|
|
@ -735,14 +738,15 @@ pub fn updateTimeReportCompile(ws: *WebServer, opts: struct {
|
|||
|
||||
pub fn updateTimeReportGeneric(ws: *WebServer, step: *Build.Step, ns_total: u64) void {
|
||||
const gpa = ws.gpa;
|
||||
const io = ws.graph.io;
|
||||
|
||||
const step_idx: u32 = for (ws.all_steps, 0..) |s, i| {
|
||||
if (s == step) break @intCast(i);
|
||||
} else unreachable;
|
||||
|
||||
const old_buf = old: {
|
||||
ws.time_report_mutex.lock();
|
||||
defer ws.time_report_mutex.unlock();
|
||||
ws.time_report_mutex.lock(io) catch return;
|
||||
defer ws.time_report_mutex.unlock(io);
|
||||
const old = ws.time_report_msgs[step_idx];
|
||||
ws.time_report_msgs[step_idx] = &.{};
|
||||
break :old old;
|
||||
|
|
@ -754,8 +758,8 @@ pub fn updateTimeReportGeneric(ws: *WebServer, step: *Build.Step, ns_total: u64)
|
|||
.ns_total = ns_total,
|
||||
};
|
||||
{
|
||||
ws.time_report_mutex.lock();
|
||||
defer ws.time_report_mutex.unlock();
|
||||
ws.time_report_mutex.lock(io) catch return;
|
||||
defer ws.time_report_mutex.unlock(io);
|
||||
assert(ws.time_report_msgs[step_idx].len == 0);
|
||||
ws.time_report_msgs[step_idx] = buf;
|
||||
ws.time_report_update_times[step_idx] = ws.now();
|
||||
|
|
@ -770,6 +774,7 @@ pub fn updateTimeReportRunTest(
|
|||
ns_per_test: []const u64,
|
||||
) void {
|
||||
const gpa = ws.gpa;
|
||||
const io = ws.graph.io;
|
||||
|
||||
const step_idx: u32 = for (ws.all_steps, 0..) |s, i| {
|
||||
if (s == &run.step) break @intCast(i);
|
||||
|
|
@ -786,8 +791,8 @@ pub fn updateTimeReportRunTest(
|
|||
break :len @sizeOf(abi.time_report.RunTestResult) + names_len + 8 * tests_len;
|
||||
};
|
||||
const old_buf = old: {
|
||||
ws.time_report_mutex.lock();
|
||||
defer ws.time_report_mutex.unlock();
|
||||
ws.time_report_mutex.lock(io) catch return;
|
||||
defer ws.time_report_mutex.unlock(io);
|
||||
const old = ws.time_report_msgs[step_idx];
|
||||
ws.time_report_msgs[step_idx] = &.{};
|
||||
break :old old;
|
||||
|
|
@ -812,8 +817,8 @@ pub fn updateTimeReportRunTest(
|
|||
assert(offset == buf.len);
|
||||
|
||||
{
|
||||
ws.time_report_mutex.lock();
|
||||
defer ws.time_report_mutex.unlock();
|
||||
ws.time_report_mutex.lock(io) catch return;
|
||||
defer ws.time_report_mutex.unlock(io);
|
||||
assert(ws.time_report_msgs[step_idx].len == 0);
|
||||
ws.time_report_msgs[step_idx] = buf;
|
||||
ws.time_report_update_times[step_idx] = ws.now();
|
||||
|
|
@ -825,8 +830,9 @@ const RunnerRequest = union(enum) {
|
|||
rebuild,
|
||||
};
|
||||
pub fn getRunnerRequest(ws: *WebServer) ?RunnerRequest {
|
||||
ws.runner_request_mutex.lock();
|
||||
defer ws.runner_request_mutex.unlock();
|
||||
const io = ws.graph.io;
|
||||
ws.runner_request_mutex.lock(io) catch return;
|
||||
defer ws.runner_request_mutex.unlock(io);
|
||||
if (ws.runner_request) |req| {
|
||||
ws.runner_request = null;
|
||||
ws.runner_request_empty_cond.signal();
|
||||
|
|
@ -834,16 +840,17 @@ pub fn getRunnerRequest(ws: *WebServer) ?RunnerRequest {
|
|||
}
|
||||
return null;
|
||||
}
|
||||
pub fn wait(ws: *WebServer) RunnerRequest {
|
||||
ws.runner_request_mutex.lock();
|
||||
defer ws.runner_request_mutex.unlock();
|
||||
pub fn wait(ws: *WebServer) Io.Cancelable!RunnerRequest {
|
||||
const io = ws.graph.io;
|
||||
try ws.runner_request_mutex.lock(io);
|
||||
defer ws.runner_request_mutex.unlock(io);
|
||||
while (true) {
|
||||
if (ws.runner_request) |req| {
|
||||
ws.runner_request = null;
|
||||
ws.runner_request_empty_cond.signal();
|
||||
ws.runner_request_empty_cond.signal(io);
|
||||
return req;
|
||||
}
|
||||
ws.runner_request_ready_cond.wait(&ws.runner_request_mutex);
|
||||
try ws.runner_request_ready_cond.wait(io, &ws.runner_request_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -528,23 +528,7 @@ pub fn Poller(comptime StreamEnum: type) type {
|
|||
/// Given an enum, returns a struct with fields of that enum, each field
|
||||
/// representing an I/O stream for polling.
|
||||
pub fn PollFiles(comptime StreamEnum: type) type {
|
||||
const enum_fields = @typeInfo(StreamEnum).@"enum".fields;
|
||||
var struct_fields: [enum_fields.len]std.builtin.Type.StructField = undefined;
|
||||
for (&struct_fields, enum_fields) |*struct_field, enum_field| {
|
||||
struct_field.* = .{
|
||||
.name = enum_field.name,
|
||||
.type = std.fs.File,
|
||||
.default_value_ptr = null,
|
||||
.is_comptime = false,
|
||||
.alignment = @alignOf(std.fs.File),
|
||||
};
|
||||
}
|
||||
return @Type(.{ .@"struct" = .{
|
||||
.layout = .auto,
|
||||
.fields = &struct_fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
} });
|
||||
return @Struct(.auto, null, std.meta.fieldNames(StreamEnum), &@splat(std.fs.File), &@splat(.{}));
|
||||
}
|
||||
|
||||
test {
|
||||
|
|
@ -642,8 +626,9 @@ pub const VTable = struct {
|
|||
/// Thread-safe.
|
||||
cancelRequested: *const fn (?*anyopaque) bool,
|
||||
|
||||
/// Executes `start` asynchronously in a manner such that it cleans itself
|
||||
/// up. This mode does not support results, await, or cancel.
|
||||
/// When this function returns, implementation guarantees that `start` has
|
||||
/// either already been called, or a unit of concurrency has been assigned
|
||||
/// to the task of calling the function.
|
||||
///
|
||||
/// Thread-safe.
|
||||
groupAsync: *const fn (
|
||||
|
|
@ -656,6 +641,17 @@ pub const VTable = struct {
|
|||
context_alignment: std.mem.Alignment,
|
||||
start: *const fn (*Group, context: *const anyopaque) void,
|
||||
) void,
|
||||
/// Thread-safe.
|
||||
groupConcurrent: *const fn (
|
||||
/// Corresponds to `Io.userdata`.
|
||||
userdata: ?*anyopaque,
|
||||
/// Owner of the spawned async task.
|
||||
group: *Group,
|
||||
/// Copied and then passed to `start`.
|
||||
context: []const u8,
|
||||
context_alignment: std.mem.Alignment,
|
||||
start: *const fn (*Group, context: *const anyopaque) void,
|
||||
) ConcurrentError!void,
|
||||
groupWait: *const fn (?*anyopaque, *Group, token: *anyopaque) void,
|
||||
groupCancel: *const fn (?*anyopaque, *Group, token: *anyopaque) void,
|
||||
|
||||
|
|
@ -1037,8 +1033,8 @@ pub const Group = struct {
|
|||
/// Threadsafe.
|
||||
///
|
||||
/// See also:
|
||||
/// * `Io.async`
|
||||
/// * `concurrent`
|
||||
/// * `Io.async`
|
||||
pub fn async(g: *Group, io: Io, function: anytype, args: std.meta.ArgsTuple(@TypeOf(function))) void {
|
||||
const Args = @TypeOf(args);
|
||||
const TypeErased = struct {
|
||||
|
|
@ -1051,6 +1047,34 @@ pub const Group = struct {
|
|||
io.vtable.groupAsync(io.userdata, g, @ptrCast(&args), .of(Args), TypeErased.start);
|
||||
}
|
||||
|
||||
/// Calls `function` with `args`, such that the function is not guaranteed
|
||||
/// to have returned until `wait` is called, allowing the caller to
|
||||
/// progress while waiting for any `Io` operations.
|
||||
///
|
||||
/// The resource spawned is owned by the group; after this is called,
|
||||
/// `wait` or `cancel` must be called before the group is deinitialized.
|
||||
///
|
||||
/// This has stronger guarantee than `async`, placing restrictions on what kind
|
||||
/// of `Io` implementations are supported. By calling `async` instead, one
|
||||
/// allows, for example, stackful single-threaded blocking I/O.
|
||||
///
|
||||
/// Threadsafe.
|
||||
///
|
||||
/// See also:
|
||||
/// * `async`
|
||||
/// * `Io.concurrent`
|
||||
pub fn concurrent(g: *Group, io: Io, function: anytype, args: std.meta.ArgsTuple(@TypeOf(function))) ConcurrentError!void {
|
||||
const Args = @TypeOf(args);
|
||||
const TypeErased = struct {
|
||||
fn start(group: *Group, context: *const anyopaque) void {
|
||||
_ = group;
|
||||
const args_casted: *const Args = @ptrCast(@alignCast(context));
|
||||
@call(.auto, function, args_casted.*);
|
||||
}
|
||||
};
|
||||
return io.vtable.groupConcurrent(io.userdata, g, @ptrCast(&args), .of(Args), TypeErased.start);
|
||||
}
|
||||
|
||||
/// Blocks until all tasks of the group finish. During this time,
|
||||
/// cancellation requests propagate to all members of the group.
|
||||
///
|
||||
|
|
@ -1625,22 +1649,14 @@ pub fn sleep(io: Io, duration: Duration, clock: Clock) SleepError!void {
|
|||
/// fields, each field type the future's result.
|
||||
pub fn SelectUnion(S: type) type {
|
||||
const struct_fields = @typeInfo(S).@"struct".fields;
|
||||
var fields: [struct_fields.len]std.builtin.Type.UnionField = undefined;
|
||||
for (&fields, struct_fields) |*union_field, struct_field| {
|
||||
const F = @typeInfo(struct_field.type).pointer.child;
|
||||
const Result = @TypeOf(@as(F, undefined).result);
|
||||
union_field.* = .{
|
||||
.name = struct_field.name,
|
||||
.type = Result,
|
||||
.alignment = struct_field.alignment,
|
||||
};
|
||||
var names: [struct_fields.len][]const u8 = undefined;
|
||||
var types: [struct_fields.len]type = undefined;
|
||||
for (struct_fields, &names, &types) |struct_field, *union_field_name, *UnionFieldType| {
|
||||
const FieldFuture = @typeInfo(struct_field.type).pointer.child;
|
||||
union_field_name.* = struct_field.name;
|
||||
UnionFieldType.* = @FieldType(FieldFuture, "result");
|
||||
}
|
||||
return @Type(.{ .@"union" = .{
|
||||
.layout = .auto,
|
||||
.tag_type = std.meta.FieldEnum(S),
|
||||
.fields = &fields,
|
||||
.decls = &.{},
|
||||
} });
|
||||
return @Union(.auto, std.meta.FieldEnum(S), &names, &types, &@splat(.{}));
|
||||
}
|
||||
|
||||
/// `s` is a struct with every field a `*Future(T)`, where `T` can be any type,
|
||||
|
|
|
|||
|
|
@ -318,7 +318,7 @@ pub const MakePathStatus = enum { existed, created };
|
|||
/// Same as `makePath` except returns whether the path already existed or was
|
||||
/// successfully created.
|
||||
pub fn makePathStatus(dir: Dir, io: Io, sub_path: []const u8) MakePathError!MakePathStatus {
|
||||
var it = try std.fs.path.componentIterator(sub_path);
|
||||
var it = std.fs.path.componentIterator(sub_path);
|
||||
var status: MakePathStatus = .existed;
|
||||
var component = it.last() orelse return error.BadPathName;
|
||||
while (true) {
|
||||
|
|
|
|||
|
|
@ -215,7 +215,7 @@ pub fn openSelfExe(io: Io, flags: OpenFlags) OpenSelfExeError!File {
|
|||
|
||||
pub const ReadPositionalError = Reader.Error || error{Unseekable};
|
||||
|
||||
pub fn readPositional(file: File, io: Io, buffer: []u8, offset: u64) ReadPositionalError!usize {
|
||||
pub fn readPositional(file: File, io: Io, buffer: [][]u8, offset: u64) ReadPositionalError!usize {
|
||||
return io.vtable.fileReadPositional(io.userdata, file, buffer, offset);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1273,20 +1273,17 @@ pub const TakeLeb128Error = Error || error{Overflow};
|
|||
/// Read a single LEB128 value as type T, or `error.Overflow` if the value cannot fit.
|
||||
pub fn takeLeb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result {
|
||||
const result_info = @typeInfo(Result).int;
|
||||
return std.math.cast(Result, try r.takeMultipleOf7Leb128(@Type(.{ .int = .{
|
||||
.signedness = result_info.signedness,
|
||||
.bits = std.mem.alignForwardAnyAlign(u16, result_info.bits, 7),
|
||||
} }))) orelse error.Overflow;
|
||||
return std.math.cast(Result, try r.takeMultipleOf7Leb128(@Int(
|
||||
result_info.signedness,
|
||||
std.mem.alignForwardAnyAlign(u16, result_info.bits, 7),
|
||||
))) orelse error.Overflow;
|
||||
}
|
||||
|
||||
fn takeMultipleOf7Leb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result {
|
||||
const result_info = @typeInfo(Result).int;
|
||||
comptime assert(result_info.bits % 7 == 0);
|
||||
var remaining_bits: std.math.Log2IntCeil(Result) = result_info.bits;
|
||||
const UnsignedResult = @Type(.{ .int = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = result_info.bits,
|
||||
} });
|
||||
const UnsignedResult = @Int(.unsigned, result_info.bits);
|
||||
var result: UnsignedResult = 0;
|
||||
var fits = true;
|
||||
while (true) {
|
||||
|
|
|
|||
|
|
@ -33,6 +33,9 @@ wait_group: std.Thread.WaitGroup = .{},
|
|||
/// immediately.
|
||||
///
|
||||
/// Defaults to a number equal to logical CPU cores.
|
||||
///
|
||||
/// Protected by `mutex` once the I/O instance is already in use. See
|
||||
/// `setAsyncLimit`.
|
||||
async_limit: Io.Limit,
|
||||
/// Maximum thread pool size (excluding main thread) for dispatching concurrent
|
||||
/// tasks. Until this limit, calls to `Io.concurrent` will increase the thread
|
||||
|
|
@ -116,6 +119,7 @@ pub fn init(
|
|||
/// * `Io.VTable.async`
|
||||
/// * `Io.VTable.concurrent`
|
||||
/// * `Io.VTable.groupAsync`
|
||||
/// * `Io.VTable.groupConcurrent`
|
||||
/// If these functions are avoided, then `Allocator.failing` may be passed
|
||||
/// here.
|
||||
gpa: Allocator,
|
||||
|
|
@ -167,6 +171,12 @@ pub const init_single_threaded: Threaded = .{
|
|||
.have_signal_handler = false,
|
||||
};
|
||||
|
||||
pub fn setAsyncLimit(t: *Threaded, new_limit: Io.Limit) void {
|
||||
t.mutex.lock();
|
||||
defer t.mutex.unlock();
|
||||
t.async_limit = new_limit;
|
||||
}
|
||||
|
||||
pub fn deinit(t: *Threaded) void {
|
||||
t.join();
|
||||
if (is_windows and t.wsa.status == .initialized) {
|
||||
|
|
@ -221,6 +231,7 @@ pub fn io(t: *Threaded) Io {
|
|||
.select = select,
|
||||
|
||||
.groupAsync = groupAsync,
|
||||
.groupConcurrent = groupConcurrent,
|
||||
.groupWait = groupWait,
|
||||
.groupCancel = groupCancel,
|
||||
|
||||
|
|
@ -317,6 +328,7 @@ pub fn ioBasic(t: *Threaded) Io {
|
|||
.select = select,
|
||||
|
||||
.groupAsync = groupAsync,
|
||||
.groupConcurrent = groupConcurrent,
|
||||
.groupWait = groupWait,
|
||||
.groupCancel = groupCancel,
|
||||
|
||||
|
|
@ -504,7 +516,7 @@ fn async(
|
|||
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
|
||||
) ?*Io.AnyFuture {
|
||||
const t: *Threaded = @ptrCast(@alignCast(userdata));
|
||||
if (builtin.single_threaded or t.async_limit == .nothing) {
|
||||
if (builtin.single_threaded) {
|
||||
start(context.ptr, result.ptr);
|
||||
return null;
|
||||
}
|
||||
|
|
@ -681,8 +693,7 @@ fn groupAsync(
|
|||
start: *const fn (*Io.Group, context: *const anyopaque) void,
|
||||
) void {
|
||||
const t: *Threaded = @ptrCast(@alignCast(userdata));
|
||||
if (builtin.single_threaded or t.async_limit == .nothing)
|
||||
return start(group, context.ptr);
|
||||
if (builtin.single_threaded) return start(group, context.ptr);
|
||||
|
||||
const gpa = t.allocator;
|
||||
const gc = GroupClosure.init(gpa, t, group, context, context_alignment, start) catch
|
||||
|
|
@ -729,6 +740,57 @@ fn groupAsync(
|
|||
t.cond.signal();
|
||||
}
|
||||
|
||||
fn groupConcurrent(
|
||||
userdata: ?*anyopaque,
|
||||
group: *Io.Group,
|
||||
context: []const u8,
|
||||
context_alignment: Alignment,
|
||||
start: *const fn (*Io.Group, context: *const anyopaque) void,
|
||||
) Io.ConcurrentError!void {
|
||||
if (builtin.single_threaded) return error.ConcurrencyUnavailable;
|
||||
|
||||
const t: *Threaded = @ptrCast(@alignCast(userdata));
|
||||
|
||||
const gpa = t.allocator;
|
||||
const gc = GroupClosure.init(gpa, t, group, context, context_alignment, start) catch
|
||||
return error.ConcurrencyUnavailable;
|
||||
|
||||
t.mutex.lock();
|
||||
defer t.mutex.unlock();
|
||||
|
||||
const busy_count = t.busy_count;
|
||||
|
||||
if (busy_count >= @intFromEnum(t.concurrent_limit))
|
||||
return error.ConcurrencyUnavailable;
|
||||
|
||||
t.busy_count = busy_count + 1;
|
||||
errdefer t.busy_count = busy_count;
|
||||
|
||||
const pool_size = t.wait_group.value();
|
||||
if (pool_size - busy_count == 0) {
|
||||
t.wait_group.start();
|
||||
errdefer t.wait_group.finish();
|
||||
|
||||
const thread = std.Thread.spawn(.{ .stack_size = t.stack_size }, worker, .{t}) catch
|
||||
return error.ConcurrencyUnavailable;
|
||||
thread.detach();
|
||||
}
|
||||
|
||||
// Append to the group linked list inside the mutex to make `Io.Group.concurrent` thread-safe.
|
||||
gc.node = .{ .next = @ptrCast(@alignCast(group.token)) };
|
||||
group.token = &gc.node;
|
||||
|
||||
t.run_queue.prepend(&gc.closure.node);
|
||||
|
||||
// This needs to be done before unlocking the mutex to avoid a race with
|
||||
// the associated task finishing.
|
||||
const group_state: *std.atomic.Value(usize) = @ptrCast(&group.state);
|
||||
const prev_state = group_state.fetchAdd(GroupClosure.sync_one_pending, .monotonic);
|
||||
assert((prev_state / GroupClosure.sync_one_pending) < (std.math.maxInt(usize) / GroupClosure.sync_one_pending));
|
||||
|
||||
t.cond.signal();
|
||||
}
|
||||
|
||||
fn groupWait(userdata: ?*anyopaque, group: *Io.Group, token: *anyopaque) void {
|
||||
const t: *Threaded = @ptrCast(@alignCast(userdata));
|
||||
const gpa = t.allocator;
|
||||
|
|
@ -1156,7 +1218,7 @@ fn dirMakeOpenPathWindows(
|
|||
w.SYNCHRONIZE | w.FILE_TRAVERSE |
|
||||
(if (options.iterate) w.FILE_LIST_DIRECTORY else @as(u32, 0));
|
||||
|
||||
var it = try std.fs.path.componentIterator(sub_path);
|
||||
var it = std.fs.path.componentIterator(sub_path);
|
||||
// If there are no components in the path, then create a dummy component with the full path.
|
||||
var component: std.fs.path.NativeComponentIterator.Component = it.last() orelse .{
|
||||
.name = "",
|
||||
|
|
@ -2894,7 +2956,22 @@ fn nowWindows(userdata: ?*anyopaque, clock: Io.Clock) Io.Clock.Error!Io.Timestam
|
|||
},
|
||||
.awake, .boot => {
|
||||
// QPC on windows doesn't fail on >= XP/2000 and includes time suspended.
|
||||
return .{ .nanoseconds = windows.QueryPerformanceCounter() };
|
||||
const qpc = windows.QueryPerformanceCounter();
|
||||
// We don't need to cache QPF as it's internally just a memory read to KUSER_SHARED_DATA
|
||||
// (a read-only page of info updated and mapped by the kernel to all processes):
|
||||
// https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/ntddk/ns-ntddk-kuser_shared_data
|
||||
// https://www.geoffchappell.com/studies/windows/km/ntoskrnl/inc/api/ntexapi_x/kuser_shared_data/index.htm
|
||||
const qpf = windows.QueryPerformanceFrequency();
|
||||
|
||||
// 10Mhz (1 qpc tick every 100ns) is a common enough QPF value that we can optimize on it.
|
||||
// https://github.com/microsoft/STL/blob/785143a0c73f030238ef618890fd4d6ae2b3a3a0/stl/inc/chrono#L694-L701
|
||||
const common_qpf = 10_000_000;
|
||||
if (qpf == common_qpf) return .{ .nanoseconds = qpc * (std.time.ns_per_s / common_qpf) };
|
||||
|
||||
// Convert to ns using fixed point.
|
||||
const scale = @as(u64, std.time.ns_per_s << 32) / @as(u32, @intCast(qpf));
|
||||
const result = (@as(u96, qpc) * scale) >> 32;
|
||||
return .{ .nanoseconds = @intCast(result) };
|
||||
},
|
||||
.cpu_process,
|
||||
.cpu_thread,
|
||||
|
|
|
|||
|
|
@ -1890,7 +1890,7 @@ pub fn writeUleb128(w: *Writer, value: anytype) Error!void {
|
|||
try w.writeLeb128(switch (@typeInfo(@TypeOf(value))) {
|
||||
.comptime_int => @as(std.math.IntFittingRange(0, @abs(value)), value),
|
||||
.int => |value_info| switch (value_info.signedness) {
|
||||
.signed => @as(@Type(.{ .int = .{ .signedness = .unsigned, .bits = value_info.bits -| 1 } }), @intCast(value)),
|
||||
.signed => @as(@Int(.unsigned, value_info.bits -| 1), @intCast(value)),
|
||||
.unsigned => value,
|
||||
},
|
||||
else => comptime unreachable,
|
||||
|
|
@ -1903,7 +1903,7 @@ pub fn writeSleb128(w: *Writer, value: anytype) Error!void {
|
|||
.comptime_int => @as(std.math.IntFittingRange(@min(value, -1), @max(0, value)), value),
|
||||
.int => |value_info| switch (value_info.signedness) {
|
||||
.signed => value,
|
||||
.unsigned => @as(@Type(.{ .int = .{ .signedness = .signed, .bits = value_info.bits + 1 } }), value),
|
||||
.unsigned => @as(@Int(.signed, value_info.bits + 1), value),
|
||||
},
|
||||
else => comptime unreachable,
|
||||
});
|
||||
|
|
@ -1912,10 +1912,10 @@ pub fn writeSleb128(w: *Writer, value: anytype) Error!void {
|
|||
/// Write a single integer as LEB128 to the given writer.
|
||||
pub fn writeLeb128(w: *Writer, value: anytype) Error!void {
|
||||
const value_info = @typeInfo(@TypeOf(value)).int;
|
||||
try w.writeMultipleOf7Leb128(@as(@Type(.{ .int = .{
|
||||
.signedness = value_info.signedness,
|
||||
.bits = @max(std.mem.alignForwardAnyAlign(u16, value_info.bits, 7), 7),
|
||||
} }), value));
|
||||
try w.writeMultipleOf7Leb128(@as(@Int(
|
||||
value_info.signedness,
|
||||
@max(std.mem.alignForwardAnyAlign(u16, value_info.bits, 7), 7),
|
||||
), value));
|
||||
}
|
||||
|
||||
fn writeMultipleOf7Leb128(w: *Writer, value: anytype) Error!void {
|
||||
|
|
@ -1929,10 +1929,10 @@ fn writeMultipleOf7Leb128(w: *Writer, value: anytype) Error!void {
|
|||
.unsigned => remaining > std.math.maxInt(u7),
|
||||
};
|
||||
byte.* = .{
|
||||
.bits = @bitCast(@as(@Type(.{ .int = .{
|
||||
.signedness = value_info.signedness,
|
||||
.bits = 7,
|
||||
} }), @truncate(remaining))),
|
||||
.bits = @bitCast(@as(
|
||||
@Int(value_info.signedness, 7),
|
||||
@truncate(remaining),
|
||||
)),
|
||||
.more = more,
|
||||
};
|
||||
if (value_info.bits > 7) remaining >>= 7;
|
||||
|
|
|
|||
|
|
@ -172,6 +172,32 @@ fn sleep(io: Io, result: *usize) void {
|
|||
result.* = 1;
|
||||
}
|
||||
|
||||
test "Group concurrent" {
|
||||
const io = testing.io;
|
||||
|
||||
var group: Io.Group = .init;
|
||||
defer group.cancel(io);
|
||||
var results: [2]usize = undefined;
|
||||
|
||||
group.concurrent(io, count, .{ 1, 10, &results[0] }) catch |err| switch (err) {
|
||||
error.ConcurrencyUnavailable => {
|
||||
try testing.expect(builtin.single_threaded);
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
||||
group.concurrent(io, count, .{ 20, 30, &results[1] }) catch |err| switch (err) {
|
||||
error.ConcurrencyUnavailable => {
|
||||
try testing.expect(builtin.single_threaded);
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
||||
group.wait(io);
|
||||
|
||||
try testing.expectEqualSlices(usize, &.{ 45, 245 }, &results);
|
||||
}
|
||||
|
||||
test "select" {
|
||||
const io = testing.io;
|
||||
|
||||
|
|
|
|||
|
|
@ -746,7 +746,6 @@ pub const Abi = enum {
|
|||
gnuf32,
|
||||
gnusf,
|
||||
gnux32,
|
||||
code16,
|
||||
eabi,
|
||||
eabihf,
|
||||
ilp32,
|
||||
|
|
|
|||
|
|
@ -548,19 +548,19 @@ pub const TypeId = std.meta.Tag(Type);
|
|||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const Type = union(enum) {
|
||||
type: void,
|
||||
void: void,
|
||||
bool: void,
|
||||
noreturn: void,
|
||||
type,
|
||||
void,
|
||||
bool,
|
||||
noreturn,
|
||||
int: Int,
|
||||
float: Float,
|
||||
pointer: Pointer,
|
||||
array: Array,
|
||||
@"struct": Struct,
|
||||
comptime_float: void,
|
||||
comptime_int: void,
|
||||
undefined: void,
|
||||
null: void,
|
||||
comptime_float,
|
||||
comptime_int,
|
||||
undefined,
|
||||
null,
|
||||
optional: Optional,
|
||||
error_union: ErrorUnion,
|
||||
error_set: ErrorSet,
|
||||
|
|
@ -571,7 +571,7 @@ pub const Type = union(enum) {
|
|||
frame: Frame,
|
||||
@"anyframe": AnyFrame,
|
||||
vector: Vector,
|
||||
enum_literal: void,
|
||||
enum_literal,
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
|
|
@ -619,6 +619,16 @@ pub const Type = union(enum) {
|
|||
slice,
|
||||
c,
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const Attributes = struct {
|
||||
@"const": bool = false,
|
||||
@"volatile": bool = false,
|
||||
@"allowzero": bool = false,
|
||||
@"addrspace": ?AddressSpace = null,
|
||||
@"align": ?usize = null,
|
||||
};
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
|
|
@ -668,6 +678,14 @@ pub const Type = union(enum) {
|
|||
const dp: *const sf.type = @ptrCast(@alignCast(sf.default_value_ptr orelse return null));
|
||||
return dp.*;
|
||||
}
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const Attributes = struct {
|
||||
@"comptime": bool = false,
|
||||
@"align": ?usize = null,
|
||||
default_value_ptr: ?*const anyopaque = null,
|
||||
};
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
|
|
@ -718,6 +736,10 @@ pub const Type = union(enum) {
|
|||
fields: []const EnumField,
|
||||
decls: []const Declaration,
|
||||
is_exhaustive: bool,
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const Mode = enum { exhaustive, nonexhaustive };
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
|
|
@ -726,6 +748,12 @@ pub const Type = union(enum) {
|
|||
name: [:0]const u8,
|
||||
type: type,
|
||||
alignment: comptime_int,
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const Attributes = struct {
|
||||
@"align": ?usize = null,
|
||||
};
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
|
|
@ -753,6 +781,19 @@ pub const Type = union(enum) {
|
|||
is_generic: bool,
|
||||
is_noalias: bool,
|
||||
type: ?type,
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const Attributes = struct {
|
||||
@"noalias": bool = false,
|
||||
};
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const Attributes = struct {
|
||||
@"callconv": CallingConvention = .auto,
|
||||
varargs: bool = false,
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -993,14 +993,15 @@ const huffman = struct {
|
|||
const max_leafs = 286;
|
||||
const max_nodes = max_leafs * 2;
|
||||
|
||||
const Node = struct {
|
||||
freq: u16,
|
||||
const Node = packed struct(u32) {
|
||||
depth: u16,
|
||||
freq: u16,
|
||||
|
||||
pub const Index = u16;
|
||||
|
||||
/// `freq` is more significant than `depth`
|
||||
pub fn smaller(a: Node, b: Node) bool {
|
||||
return if (a.freq != b.freq) a.freq < b.freq else a.depth < b.depth;
|
||||
return @as(u32, @bitCast(a)) < @as(u32, @bitCast(b));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -12,8 +12,8 @@ const Vec16 = @Vector(16, u32);
|
|||
const chunk_length = 1024;
|
||||
const max_depth = 54;
|
||||
|
||||
pub const simd_degree = std.simd.suggestVectorLength(u32) orelse 1;
|
||||
pub const max_simd_degree = simd_degree;
|
||||
const simd_degree = std.simd.suggestVectorLength(u32) orelse 1;
|
||||
const max_simd_degree = simd_degree;
|
||||
const max_simd_degree_or_2 = if (max_simd_degree > 2) max_simd_degree else 2;
|
||||
|
||||
/// Threshold for switching to parallel processing.
|
||||
|
|
@ -502,9 +502,7 @@ fn hashManySimd(
|
|||
var out_ptr = out.ptr;
|
||||
var cnt = counter;
|
||||
|
||||
const simd_deg = comptime simd_degree;
|
||||
|
||||
if (comptime simd_deg >= 16) {
|
||||
if (simd_degree >= 16) {
|
||||
while (remaining >= 16) {
|
||||
const sixteen_inputs = [16][*]const u8{
|
||||
inp[0], inp[1], inp[2], inp[3],
|
||||
|
|
@ -525,7 +523,7 @@ fn hashManySimd(
|
|||
}
|
||||
}
|
||||
|
||||
if (comptime simd_deg >= 8) {
|
||||
if (simd_degree >= 8) {
|
||||
while (remaining >= 8) {
|
||||
const eight_inputs = [8][*]const u8{
|
||||
inp[0], inp[1], inp[2], inp[3],
|
||||
|
|
@ -544,7 +542,7 @@ fn hashManySimd(
|
|||
}
|
||||
}
|
||||
|
||||
if (comptime simd_deg >= 4) {
|
||||
if (simd_degree >= 4) {
|
||||
while (remaining >= 4) {
|
||||
const four_inputs = [4][*]const u8{
|
||||
inp[0],
|
||||
|
|
@ -571,7 +569,7 @@ fn hashManySimd(
|
|||
}
|
||||
|
||||
fn hashMany(inputs: [][*]const u8, num_inputs: usize, blocks: usize, key: [8]u32, counter: u64, increment_counter: bool, flags: Flags, flags_start: Flags, flags_end: Flags, out: []u8) void {
|
||||
if (comptime max_simd_degree >= 4) {
|
||||
if (max_simd_degree >= 4) {
|
||||
hashManySimd(inputs, num_inputs, blocks, key, counter, increment_counter, flags, flags_start, flags_end, out);
|
||||
} else {
|
||||
hashManyPortable(inputs, num_inputs, blocks, key, counter, increment_counter, flags, flags_start, flags_end, out);
|
||||
|
|
@ -909,7 +907,7 @@ pub const Blake3 = struct {
|
|||
pub const digest_length = 32;
|
||||
pub const key_length = 32;
|
||||
|
||||
pub const Options = struct { key: ?[digest_length]u8 = null };
|
||||
pub const Options = struct { key: ?[key_length]u8 = null };
|
||||
pub const KdfOptions = struct {};
|
||||
|
||||
key: [8]u32,
|
||||
|
|
|
|||
|
|
@ -376,7 +376,6 @@ fn Kyber(comptime p: Params) type {
|
|||
/// Except in tests, applications should generally call `generate()` instead of this function.
|
||||
pub fn generateDeterministic(seed: [seed_length]u8) !KeyPair {
|
||||
var ret: KeyPair = undefined;
|
||||
ret.secret_key.z = seed[inner_seed_length..seed_length].*;
|
||||
|
||||
// Generate inner key
|
||||
innerKeyFromSeed(
|
||||
|
|
@ -507,8 +506,8 @@ fn Kyber(comptime p: Params) type {
|
|||
fn innerKeyFromSeed(seed: [inner_seed_length]u8, pk: *InnerPk, sk: *InnerSk) void {
|
||||
var expanded_seed: [64]u8 = undefined;
|
||||
var h = sha3.Sha3_512.init(.{});
|
||||
if (p.ml_kem) h.update(&[1]u8{p.k});
|
||||
h.update(&seed);
|
||||
if (p.ml_kem) h.update(&[1]u8{p.k});
|
||||
h.final(&expanded_seed);
|
||||
pk.rho = expanded_seed[0..32].*;
|
||||
const sigma = expanded_seed[32..64];
|
||||
|
|
|
|||
|
|
@ -94,12 +94,12 @@ pub fn deserialize(comptime HashResult: type, str: []const u8) Error!HashResult
|
|||
if (kvSplit(field)) |opt_version| {
|
||||
if (mem.eql(u8, opt_version.key, version_param_name)) {
|
||||
if (@hasField(HashResult, "alg_version")) {
|
||||
const value_type_info = switch (@typeInfo(@TypeOf(out.alg_version))) {
|
||||
.optional => |opt| @typeInfo(opt.child),
|
||||
else => |t| t,
|
||||
const ValueType = switch (@typeInfo(@TypeOf(out.alg_version))) {
|
||||
.optional => |opt| opt.child,
|
||||
else => @TypeOf(out.alg_version),
|
||||
};
|
||||
out.alg_version = fmt.parseUnsigned(
|
||||
@Type(value_type_info),
|
||||
ValueType,
|
||||
opt_version.value,
|
||||
10,
|
||||
) catch return Error.InvalidEncoding;
|
||||
|
|
|
|||
|
|
@ -606,7 +606,7 @@ pub fn array(
|
|||
const elem_size = @divExact(@bitSizeOf(Elem), 8);
|
||||
var arr: [len_size + elem_size * elems.len]u8 = undefined;
|
||||
std.mem.writeInt(Len, arr[0..len_size], @intCast(elem_size * elems.len), .big);
|
||||
const ElemInt = @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Elem) } });
|
||||
const ElemInt = @Int(.unsigned, @bitSizeOf(Elem));
|
||||
for (0.., @as([elems.len]Elem, elems)) |index, elem| {
|
||||
std.mem.writeInt(
|
||||
ElemInt,
|
||||
|
|
|
|||
|
|
@ -33,22 +33,8 @@ pub fn fromInt(comptime E: type, integer: anytype) ?E {
|
|||
/// default, which may be undefined.
|
||||
pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_default: ?Data) type {
|
||||
@setEvalBranchQuota(@typeInfo(E).@"enum".fields.len + eval_branch_quota_cushion);
|
||||
var struct_fields: [@typeInfo(E).@"enum".fields.len]std.builtin.Type.StructField = undefined;
|
||||
for (&struct_fields, @typeInfo(E).@"enum".fields) |*struct_field, enum_field| {
|
||||
struct_field.* = .{
|
||||
.name = enum_field.name,
|
||||
.type = Data,
|
||||
.default_value_ptr = if (field_default) |d| @as(?*const anyopaque, @ptrCast(&d)) else null,
|
||||
.is_comptime = false,
|
||||
.alignment = if (@sizeOf(Data) > 0) @alignOf(Data) else 0,
|
||||
};
|
||||
}
|
||||
return @Type(.{ .@"struct" = .{
|
||||
.layout = .auto,
|
||||
.fields = &struct_fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
} });
|
||||
const default_ptr: ?*const anyopaque = if (field_default) |d| @ptrCast(&d) else null;
|
||||
return @Struct(.auto, null, std.meta.fieldNames(E), &@splat(Data), &@splat(.{ .default_value_ptr = default_ptr }));
|
||||
}
|
||||
|
||||
/// Looks up the supplied fields in the given enum type.
|
||||
|
|
@ -1532,19 +1518,15 @@ test "EnumIndexer empty" {
|
|||
test "EnumIndexer large dense unsorted" {
|
||||
@setEvalBranchQuota(500_000); // many `comptimePrint`s
|
||||
// Make an enum with 500 fields with values in *descending* order.
|
||||
const E = @Type(.{ .@"enum" = .{
|
||||
.tag_type = u32,
|
||||
.fields = comptime fields: {
|
||||
var fields: [500]EnumField = undefined;
|
||||
for (&fields, 0..) |*f, i| f.* = .{
|
||||
.name = std.fmt.comptimePrint("f{d}", .{i}),
|
||||
.value = 500 - i,
|
||||
};
|
||||
break :fields &fields;
|
||||
},
|
||||
.decls = &.{},
|
||||
.is_exhaustive = true,
|
||||
} });
|
||||
const E = @Enum(u32, .exhaustive, names: {
|
||||
var names: [500][]const u8 = undefined;
|
||||
for (&names, 0..) |*name, i| name.* = std.fmt.comptimePrint("f{d}", .{i});
|
||||
break :names &names;
|
||||
}, vals: {
|
||||
var vals: [500]u32 = undefined;
|
||||
for (&vals, 0..) |*val, i| val.* = 500 - i;
|
||||
break :vals &vals;
|
||||
});
|
||||
const Indexer = EnumIndexer(E);
|
||||
try testing.expectEqual(E.f0, Indexer.keyForIndex(499));
|
||||
try testing.expectEqual(E.f499, Indexer.keyForIndex(0));
|
||||
|
|
|
|||
|
|
@ -279,7 +279,7 @@ pub fn Alt(
|
|||
/// Helper for calling alternate format methods besides one named "format".
|
||||
pub fn alt(
|
||||
context: anytype,
|
||||
comptime func_name: @TypeOf(.enum_literal),
|
||||
comptime func_name: @EnumLiteral(),
|
||||
) Alt(@TypeOf(context), @field(@TypeOf(context), @tagName(func_name))) {
|
||||
return .{ .data = context };
|
||||
}
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ pub fn render(buf: []u8, value: anytype, options: Options) Error![]const u8 {
|
|||
|
||||
const T = @TypeOf(v);
|
||||
comptime std.debug.assert(@typeInfo(T) == .float);
|
||||
const I = @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(T) } });
|
||||
const I = @Int(.unsigned, @bitSizeOf(T));
|
||||
|
||||
const DT = if (@bitSizeOf(T) <= 64) u64 else u128;
|
||||
const tables = switch (DT) {
|
||||
|
|
@ -1516,7 +1516,7 @@ const FLOAT128_POW5_INV_ERRORS: [154]u64 = .{
|
|||
const builtin = @import("builtin");
|
||||
|
||||
fn check(comptime T: type, value: T, comptime expected: []const u8) !void {
|
||||
const I = @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(T) } });
|
||||
const I = @Int(.unsigned, @bitSizeOf(T));
|
||||
|
||||
var buf: [6000]u8 = undefined;
|
||||
const value_bits: I = @bitCast(value);
|
||||
|
|
|
|||
1572
lib/std/fs/path.zig
1572
lib/std/fs/path.zig
File diff suppressed because it is too large
Load diff
|
|
@ -56,7 +56,7 @@ const PathType = enum {
|
|||
// using '127.0.0.1' as the server name and '<drive letter>$' as the share name.
|
||||
var fd_path_buf: [fs.max_path_bytes]u8 = undefined;
|
||||
const dir_path = try std.os.getFdPath(dir.fd, &fd_path_buf);
|
||||
const windows_path_type = windows.getUnprefixedPathType(u8, dir_path);
|
||||
const windows_path_type = windows.getWin32PathType(u8, dir_path);
|
||||
switch (windows_path_type) {
|
||||
.unc_absolute => return fs.path.joinZ(allocator, &.{ dir_path, relative_path }),
|
||||
.drive_absolute => {
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ pub fn int(input: anytype) @TypeOf(input) {
|
|||
const info = @typeInfo(@TypeOf(input)).int;
|
||||
const bits = info.bits;
|
||||
// Convert input to unsigned integer (easier to deal with)
|
||||
const Uint = @Type(.{ .int = .{ .bits = bits, .signedness = .unsigned } });
|
||||
const Uint = @Int(.unsigned, bits);
|
||||
const u_input: Uint = @bitCast(input);
|
||||
if (bits > 256) @compileError("bit widths > 256 are unsupported, use std.hash.autoHash functionality.");
|
||||
// For bit widths that don't have a dedicated function, use a heuristic
|
||||
|
|
|
|||
|
|
@ -91,10 +91,7 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
|
|||
// Help the optimizer see that hashing an int is easy by inlining!
|
||||
// TODO Check if the situation is better after #561 is resolved.
|
||||
.int => |int| switch (int.signedness) {
|
||||
.signed => hash(hasher, @as(@Type(.{ .int = .{
|
||||
.bits = int.bits,
|
||||
.signedness = .unsigned,
|
||||
} }), @bitCast(key)), strat),
|
||||
.signed => hash(hasher, @as(@Int(.unsigned, int.bits), @bitCast(key)), strat),
|
||||
.unsigned => {
|
||||
if (std.meta.hasUniqueRepresentation(Key)) {
|
||||
@call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key) });
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ pub var next_mmap_addr_hint: ?[*]align(page_size_min) u8 = null;
|
|||
|
||||
/// comptime-known minimum page size of the target.
|
||||
///
|
||||
/// All pointers from `mmap` or `VirtualAlloc` are aligned to at least
|
||||
/// All pointers from `mmap` or `NtAllocateVirtualMemory` are aligned to at least
|
||||
/// `page_size_min`, but their actual alignment may be bigger.
|
||||
///
|
||||
/// This value can be overridden via `std.options.page_size_min`.
|
||||
|
|
|
|||
|
|
@ -57,13 +57,13 @@ pub const default_level: Level = switch (builtin.mode) {
|
|||
};
|
||||
|
||||
pub const ScopeLevel = struct {
|
||||
scope: @Type(.enum_literal),
|
||||
scope: @EnumLiteral(),
|
||||
level: Level,
|
||||
};
|
||||
|
||||
fn log(
|
||||
comptime level: Level,
|
||||
comptime scope: @Type(.enum_literal),
|
||||
comptime scope: @EnumLiteral(),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
|
|
@ -73,7 +73,7 @@ fn log(
|
|||
}
|
||||
|
||||
/// Determine if a specific log message level and scope combination are enabled for logging.
|
||||
pub fn logEnabled(comptime level: Level, comptime scope: @Type(.enum_literal)) bool {
|
||||
pub fn logEnabled(comptime level: Level, comptime scope: @EnumLiteral()) bool {
|
||||
inline for (std.options.log_scope_levels) |scope_level| {
|
||||
if (scope_level.scope == scope) return @intFromEnum(level) <= @intFromEnum(scope_level.level);
|
||||
}
|
||||
|
|
@ -87,7 +87,7 @@ pub fn logEnabled(comptime level: Level, comptime scope: @Type(.enum_literal)) b
|
|||
/// function returns.
|
||||
pub fn defaultLog(
|
||||
comptime level: Level,
|
||||
comptime scope: @Type(.enum_literal),
|
||||
comptime scope: @EnumLiteral(),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
|
|
@ -115,7 +115,7 @@ pub fn defaultLog(
|
|||
|
||||
/// Returns a scoped logging namespace that logs all messages using the scope
|
||||
/// provided here.
|
||||
pub fn scoped(comptime scope: @Type(.enum_literal)) type {
|
||||
pub fn scoped(comptime scope: @EnumLiteral()) type {
|
||||
return struct {
|
||||
/// Log an error message. This log level is intended to be used
|
||||
/// when something has gone wrong. This might be recoverable or might
|
||||
|
|
|
|||
|
|
@ -450,12 +450,7 @@ pub fn wrap(x: anytype, r: anytype) @TypeOf(x) {
|
|||
// in the rare usecase of r not being comptime_int or float,
|
||||
// take the penalty of having an intermediary type conversion,
|
||||
// otherwise the alternative is to unwind iteratively to avoid overflow
|
||||
const R = comptime do: {
|
||||
var info = info_r;
|
||||
info.int.bits += 1;
|
||||
info.int.signedness = .signed;
|
||||
break :do @Type(info);
|
||||
};
|
||||
const R = @Int(.signed, info_r.int.bits + 1);
|
||||
const radius: if (info_r.int.signedness == .signed) @TypeOf(r) else R = r;
|
||||
return @intCast(@mod(x - radius, 2 * @as(R, r)) - r); // provably impossible to overflow
|
||||
},
|
||||
|
|
@ -799,14 +794,14 @@ pub fn Log2IntCeil(comptime T: type) type {
|
|||
pub fn IntFittingRange(comptime from: comptime_int, comptime to: comptime_int) type {
|
||||
assert(from <= to);
|
||||
const signedness: std.builtin.Signedness = if (from < 0) .signed else .unsigned;
|
||||
return @Type(.{ .int = .{
|
||||
.signedness = signedness,
|
||||
.bits = @as(u16, @intFromBool(signedness == .signed)) +
|
||||
return @Int(
|
||||
signedness,
|
||||
@as(u16, @intFromBool(signedness == .signed)) +
|
||||
switch (if (from < 0) @max(@abs(from) - 1, to) else to) {
|
||||
0 => 0,
|
||||
else => |pos_max| 1 + log2(pos_max),
|
||||
},
|
||||
} });
|
||||
);
|
||||
}
|
||||
|
||||
test IntFittingRange {
|
||||
|
|
@ -1107,9 +1102,14 @@ test cast {
|
|||
pub const AlignCastError = error{UnalignedMemory};
|
||||
|
||||
fn AlignCastResult(comptime alignment: Alignment, comptime Ptr: type) type {
|
||||
var ptr_info = @typeInfo(Ptr);
|
||||
ptr_info.pointer.alignment = alignment.toByteUnits();
|
||||
return @Type(ptr_info);
|
||||
const orig = @typeInfo(Ptr).pointer;
|
||||
return @Pointer(orig.size, .{
|
||||
.@"const" = orig.is_const,
|
||||
.@"volatile" = orig.is_volatile,
|
||||
.@"allowzero" = orig.is_allowzero,
|
||||
.@"align" = alignment.toByteUnits(),
|
||||
.@"addrspace" = orig.address_space,
|
||||
}, orig.child, orig.sentinel());
|
||||
}
|
||||
|
||||
/// Align cast a pointer but return an error if it's the wrong alignment
|
||||
|
|
|
|||
|
|
@ -2032,7 +2032,11 @@ pub const Mutable = struct {
|
|||
return formatNumber(self, w, .{});
|
||||
}
|
||||
|
||||
pub fn formatNumber(self: Const, w: *std.Io.Writer, n: std.fmt.Number) std.Io.Writer.Error!void {
|
||||
/// If the absolute value of integer is greater than or equal to `pow(2, 64 * @sizeOf(usize) * 8)`,
|
||||
/// this function will fail to print the string, printing "(BigInt)" instead of a number.
|
||||
/// This is because the rendering algorithm requires reversing a string, which requires O(N) memory.
|
||||
/// See `Const.toString` and `Const.toStringAlloc` for a way to print big integers without failure.
|
||||
pub fn formatNumber(self: Mutable, w: *std.Io.Writer, n: std.fmt.Number) std.Io.Writer.Error!void {
|
||||
return self.toConst().formatNumber(w, n);
|
||||
}
|
||||
};
|
||||
|
|
@ -2321,6 +2325,10 @@ pub const Const = struct {
|
|||
return .{ normalized_res.reconstruct(if (self.positive) .positive else .negative), exactness };
|
||||
}
|
||||
|
||||
pub fn format(self: Const, w: *std.Io.Writer) std.Io.Writer.Error!void {
|
||||
return self.formatNumber(w, .{});
|
||||
}
|
||||
|
||||
/// If the absolute value of integer is greater than or equal to `pow(2, 64 * @sizeOf(usize) * 8)`,
|
||||
/// this function will fail to print the string, printing "(BigInt)" instead of a number.
|
||||
/// This is because the rendering algorithm requires reversing a string, which requires O(N) memory.
|
||||
|
|
@ -4625,3 +4633,29 @@ fn testOneShiftCaseAliasing(func: fn ([]Limb, []const Limb, usize) usize, case:
|
|||
try std.testing.expectEqualSlices(Limb, expected, r[base .. base + len]);
|
||||
}
|
||||
}
|
||||
|
||||
test "format" {
|
||||
var a: Managed = try .init(std.testing.allocator);
|
||||
defer a.deinit();
|
||||
|
||||
try a.set(123);
|
||||
try testFormat(a, "123");
|
||||
|
||||
try a.set(-123);
|
||||
try testFormat(a, "-123");
|
||||
|
||||
try a.set(20000000000000000000); // > maxInt(u64)
|
||||
try testFormat(a, "20000000000000000000");
|
||||
|
||||
try a.set(1 << 64 * @sizeOf(usize) * 8);
|
||||
try testFormat(a, "(BigInt)");
|
||||
|
||||
try a.set(-(1 << 64 * @sizeOf(usize) * 8));
|
||||
try testFormat(a, "(BigInt)");
|
||||
}
|
||||
|
||||
fn testFormat(a: Managed, expected: []const u8) !void {
|
||||
try std.testing.expectFmt(expected, "{f}", .{a});
|
||||
try std.testing.expectFmt(expected, "{f}", .{a.toMutable()});
|
||||
try std.testing.expectFmt(expected, "{f}", .{a.toConst()});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2787,11 +2787,11 @@ test "bitNotWrap more than two limbs" {
|
|||
const bits = @bitSizeOf(Limb) * 4 + 2;
|
||||
|
||||
try res.bitNotWrap(&a, .unsigned, bits);
|
||||
const Unsigned = @Type(.{ .int = .{ .signedness = .unsigned, .bits = bits } });
|
||||
const Unsigned = @Int(.unsigned, bits);
|
||||
try testing.expectEqual((try res.toInt(Unsigned)), ~@as(Unsigned, maxInt(Limb)));
|
||||
|
||||
try res.bitNotWrap(&a, .signed, bits);
|
||||
const Signed = @Type(.{ .int = .{ .signedness = .signed, .bits = bits } });
|
||||
const Signed = @Int(.signed, bits);
|
||||
try testing.expectEqual((try res.toInt(Signed)), ~@as(Signed, maxInt(Limb)));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -14,22 +14,10 @@ pub fn FloatRepr(comptime Float: type) type {
|
|||
exponent: BiasedExponent,
|
||||
sign: std.math.Sign,
|
||||
|
||||
pub const StoredMantissa = @Type(.{ .int = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = floatMantissaBits(Float),
|
||||
} });
|
||||
pub const Mantissa = @Type(.{ .int = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = 1 + fractional_bits,
|
||||
} });
|
||||
pub const Exponent = @Type(.{ .int = .{
|
||||
.signedness = .signed,
|
||||
.bits = exponent_bits,
|
||||
} });
|
||||
pub const BiasedExponent = enum(@Type(.{ .int = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = exponent_bits,
|
||||
} })) {
|
||||
pub const StoredMantissa = @Int(.unsigned, floatMantissaBits(Float));
|
||||
pub const Mantissa = @Int(.unsigned, 1 + fractional_bits);
|
||||
pub const Exponent = @Int(.signed, exponent_bits);
|
||||
pub const BiasedExponent = enum(@Int(.unsigned, exponent_bits)) {
|
||||
denormal = 0,
|
||||
min_normal = 1,
|
||||
zero = (1 << (exponent_bits - 1)) - 1,
|
||||
|
|
@ -56,14 +44,8 @@ pub fn FloatRepr(comptime Float: type) type {
|
|||
fraction: Fraction,
|
||||
exponent: Normalized.Exponent,
|
||||
|
||||
pub const Fraction = @Type(.{ .int = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = fractional_bits,
|
||||
} });
|
||||
pub const Exponent = @Type(.{ .int = .{
|
||||
.signedness = .signed,
|
||||
.bits = 1 + exponent_bits,
|
||||
} });
|
||||
pub const Fraction = @Int(.unsigned, fractional_bits);
|
||||
pub const Exponent = @Int(.signed, 1 + exponent_bits);
|
||||
|
||||
/// This currently truncates denormal values, which needs to be fixed before this can be used to
|
||||
/// produce a rounded value.
|
||||
|
|
@ -122,7 +104,7 @@ inline fn mantissaOne(comptime T: type) comptime_int {
|
|||
|
||||
/// Creates floating point type T from an unbiased exponent and raw mantissa.
|
||||
inline fn reconstructFloat(comptime T: type, comptime exponent: comptime_int, comptime mantissa: comptime_int) T {
|
||||
const TBits = @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(T) } });
|
||||
const TBits = @Int(.unsigned, @bitSizeOf(T));
|
||||
const biased_exponent = @as(TBits, exponent + floatExponentMax(T));
|
||||
return @as(T, @bitCast((biased_exponent << floatMantissaBits(T)) | @as(TBits, mantissa)));
|
||||
}
|
||||
|
|
@ -209,7 +191,7 @@ pub inline fn floatEps(comptime T: type) T {
|
|||
pub inline fn floatEpsAt(comptime T: type, x: T) T {
|
||||
switch (@typeInfo(T)) {
|
||||
.float => |F| {
|
||||
const U: type = @Type(.{ .int = .{ .signedness = .unsigned, .bits = F.bits } });
|
||||
const U: type = @Int(.unsigned, F.bits);
|
||||
const u: U = @bitCast(x);
|
||||
const y: T = @bitCast(u ^ 1);
|
||||
return @abs(x - y);
|
||||
|
|
|
|||
|
|
@ -33,10 +33,7 @@ pub fn log2(x: anytype) @TypeOf(x) {
|
|||
return result;
|
||||
},
|
||||
.int => |int_info| math.log2_int(switch (int_info.signedness) {
|
||||
.signed => @Type(.{ .int = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = int_info.bits -| 1,
|
||||
} }),
|
||||
.signed => @Int(.unsigned, int_info.bits -| 1),
|
||||
.unsigned => T,
|
||||
}, @intCast(x)),
|
||||
else => @compileError("log2 not implemented for " ++ @typeName(T)),
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ test "log_int" {
|
|||
// Test all unsigned integers with 2, 3, ..., 64 bits.
|
||||
// We cannot test 0 or 1 bits since base must be > 1.
|
||||
inline for (2..64 + 1) |bits| {
|
||||
const T = @Type(.{ .int = .{ .signedness = .unsigned, .bits = @intCast(bits) } });
|
||||
const T = @Int(.unsigned, @intCast(bits));
|
||||
|
||||
// for base = 2, 3, ..., min(maxInt(T),1024)
|
||||
var base: T = 1;
|
||||
|
|
|
|||
|
|
@ -6,10 +6,7 @@ const expect = std.testing.expect;
|
|||
pub fn signbit(x: anytype) bool {
|
||||
return switch (@typeInfo(@TypeOf(x))) {
|
||||
.int, .comptime_int => x,
|
||||
.float => |float| @as(@Type(.{ .int = .{
|
||||
.signedness = .signed,
|
||||
.bits = float.bits,
|
||||
} }), @bitCast(x)),
|
||||
.float => |float| @as(@Int(.signed, float.bits), @bitCast(x)),
|
||||
.comptime_float => @as(i128, @bitCast(@as(f128, x))), // any float type will do
|
||||
else => @compileError("std.math.signbit does not support " ++ @typeName(@TypeOf(x))),
|
||||
} < 0;
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ test sqrt_int {
|
|||
/// Returns the return type `sqrt` will return given an operand of type `T`.
|
||||
pub fn Sqrt(comptime T: type) type {
|
||||
return switch (@typeInfo(T)) {
|
||||
.int => |int| @Type(.{ .int = .{ .signedness = .unsigned, .bits = (int.bits + 1) / 2 } }),
|
||||
.int => |int| @Int(.unsigned, (int.bits + 1) / 2),
|
||||
else => T,
|
||||
};
|
||||
}
|
||||
|
|
|
|||
163
lib/std/mem.zig
163
lib/std/mem.zig
|
|
@ -846,17 +846,18 @@ fn Span(comptime T: type) type {
|
|||
return ?Span(optional_info.child);
|
||||
},
|
||||
.pointer => |ptr_info| {
|
||||
var new_ptr_info = ptr_info;
|
||||
switch (ptr_info.size) {
|
||||
.c => {
|
||||
new_ptr_info.sentinel_ptr = &@as(ptr_info.child, 0);
|
||||
new_ptr_info.is_allowzero = false;
|
||||
},
|
||||
.many => if (ptr_info.sentinel() == null) @compileError("invalid type given to std.mem.span: " ++ @typeName(T)),
|
||||
const new_sentinel: ?ptr_info.child = switch (ptr_info.size) {
|
||||
.one, .slice => @compileError("invalid type given to std.mem.span: " ++ @typeName(T)),
|
||||
}
|
||||
new_ptr_info.size = .slice;
|
||||
return @Type(.{ .pointer = new_ptr_info });
|
||||
.many => ptr_info.sentinel() orelse @compileError("invalid type given to std.mem.span: " ++ @typeName(T)),
|
||||
.c => 0,
|
||||
};
|
||||
return @Pointer(.slice, .{
|
||||
.@"const" = ptr_info.is_const,
|
||||
.@"volatile" = ptr_info.is_volatile,
|
||||
.@"allowzero" = ptr_info.is_allowzero and ptr_info.size != .c,
|
||||
.@"align" = ptr_info.alignment,
|
||||
.@"addrspace" = ptr_info.address_space,
|
||||
}, ptr_info.child, new_sentinel);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
|
@ -910,45 +911,18 @@ fn SliceTo(comptime T: type, comptime end: std.meta.Elem(T)) type {
|
|||
return ?SliceTo(optional_info.child, end);
|
||||
},
|
||||
.pointer => |ptr_info| {
|
||||
var new_ptr_info = ptr_info;
|
||||
new_ptr_info.size = .slice;
|
||||
switch (ptr_info.size) {
|
||||
.one => switch (@typeInfo(ptr_info.child)) {
|
||||
.array => |array_info| {
|
||||
new_ptr_info.child = array_info.child;
|
||||
// The return type must only be sentinel terminated if we are guaranteed
|
||||
// to find the value searched for, which is only the case if it matches
|
||||
// the sentinel of the type passed.
|
||||
if (array_info.sentinel()) |s| {
|
||||
if (end == s) {
|
||||
new_ptr_info.sentinel_ptr = &end;
|
||||
} else {
|
||||
new_ptr_info.sentinel_ptr = null;
|
||||
}
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
},
|
||||
.many, .slice => {
|
||||
// The return type must only be sentinel terminated if we are guaranteed
|
||||
// to find the value searched for, which is only the case if it matches
|
||||
// the sentinel of the type passed.
|
||||
if (ptr_info.sentinel()) |s| {
|
||||
if (end == s) {
|
||||
new_ptr_info.sentinel_ptr = &end;
|
||||
} else {
|
||||
new_ptr_info.sentinel_ptr = null;
|
||||
}
|
||||
}
|
||||
},
|
||||
.c => {
|
||||
new_ptr_info.sentinel_ptr = &end;
|
||||
// C pointers are always allowzero, but we don't want the return type to be.
|
||||
assert(new_ptr_info.is_allowzero);
|
||||
new_ptr_info.is_allowzero = false;
|
||||
},
|
||||
}
|
||||
return @Type(.{ .pointer = new_ptr_info });
|
||||
const Elem = std.meta.Elem(T);
|
||||
const have_sentinel: bool = switch (ptr_info.size) {
|
||||
.one, .slice, .many => if (std.meta.sentinel(T)) |s| s == end else false,
|
||||
.c => false,
|
||||
};
|
||||
return @Pointer(.slice, .{
|
||||
.@"const" = ptr_info.is_const,
|
||||
.@"volatile" = ptr_info.is_volatile,
|
||||
.@"allowzero" = ptr_info.is_allowzero and ptr_info.size != .c,
|
||||
.@"align" = ptr_info.alignment,
|
||||
.@"addrspace" = ptr_info.address_space,
|
||||
}, Elem, if (have_sentinel) end else null);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
|
@ -3951,38 +3925,25 @@ test reverse {
|
|||
}
|
||||
}
|
||||
fn ReverseIterator(comptime T: type) type {
|
||||
const Pointer = blk: {
|
||||
switch (@typeInfo(T)) {
|
||||
.pointer => |ptr_info| switch (ptr_info.size) {
|
||||
.one => switch (@typeInfo(ptr_info.child)) {
|
||||
.array => |array_info| {
|
||||
var new_ptr_info = ptr_info;
|
||||
new_ptr_info.size = .many;
|
||||
new_ptr_info.child = array_info.child;
|
||||
new_ptr_info.sentinel_ptr = array_info.sentinel_ptr;
|
||||
break :blk @Type(.{ .pointer = new_ptr_info });
|
||||
},
|
||||
else => {},
|
||||
},
|
||||
.slice => {
|
||||
var new_ptr_info = ptr_info;
|
||||
new_ptr_info.size = .many;
|
||||
break :blk @Type(.{ .pointer = new_ptr_info });
|
||||
},
|
||||
else => {},
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
@compileError("expected slice or pointer to array, found '" ++ @typeName(T) ++ "'");
|
||||
const ptr = switch (@typeInfo(T)) {
|
||||
.pointer => |ptr| ptr,
|
||||
else => @compileError("expected slice or pointer to array, found '" ++ @typeName(T) ++ "'"),
|
||||
};
|
||||
const Element = std.meta.Elem(Pointer);
|
||||
const ElementPointer = @Type(.{ .pointer = ptr: {
|
||||
var ptr = @typeInfo(Pointer).pointer;
|
||||
ptr.size = .one;
|
||||
ptr.child = Element;
|
||||
ptr.sentinel_ptr = null;
|
||||
break :ptr ptr;
|
||||
} });
|
||||
switch (ptr.size) {
|
||||
.slice => {},
|
||||
.one => if (@typeInfo(ptr.child) != .array) @compileError("expected slice or pointer to array, found '" ++ @typeName(T) ++ "'"),
|
||||
.many, .c => @compileError("expected slice or pointer to array, found '" ++ @typeName(T) ++ "'"),
|
||||
}
|
||||
const Element = std.meta.Elem(T);
|
||||
const attrs: std.builtin.Type.Pointer.Attributes = .{
|
||||
.@"const" = ptr.is_const,
|
||||
.@"volatile" = ptr.is_volatile,
|
||||
.@"allowzero" = ptr.is_allowzero,
|
||||
.@"align" = ptr.alignment,
|
||||
.@"addrspace" = ptr.address_space,
|
||||
};
|
||||
const Pointer = @Pointer(.many, attrs, Element, std.meta.sentinel(T));
|
||||
const ElementPointer = @Pointer(.one, attrs, Element, null);
|
||||
return struct {
|
||||
ptr: Pointer,
|
||||
index: usize,
|
||||
|
|
@ -4342,19 +4303,14 @@ fn CopyPtrAttrs(
|
|||
comptime size: std.builtin.Type.Pointer.Size,
|
||||
comptime child: type,
|
||||
) type {
|
||||
const info = @typeInfo(source).pointer;
|
||||
return @Type(.{
|
||||
.pointer = .{
|
||||
.size = size,
|
||||
.is_const = info.is_const,
|
||||
.is_volatile = info.is_volatile,
|
||||
.is_allowzero = info.is_allowzero,
|
||||
.alignment = info.alignment,
|
||||
.address_space = info.address_space,
|
||||
.child = child,
|
||||
.sentinel_ptr = null,
|
||||
},
|
||||
});
|
||||
const ptr = @typeInfo(source).pointer;
|
||||
return @Pointer(size, .{
|
||||
.@"const" = ptr.is_const,
|
||||
.@"volatile" = ptr.is_volatile,
|
||||
.@"allowzero" = ptr.is_allowzero,
|
||||
.@"align" = ptr.alignment,
|
||||
.@"addrspace" = ptr.address_space,
|
||||
}, child, null);
|
||||
}
|
||||
|
||||
fn AsBytesReturnType(comptime P: type) type {
|
||||
|
|
@ -4936,19 +4892,14 @@ test "freeing empty string with null-terminated sentinel" {
|
|||
/// Returns a slice with the given new alignment,
|
||||
/// all other pointer attributes copied from `AttributeSource`.
|
||||
fn AlignedSlice(comptime AttributeSource: type, comptime new_alignment: usize) type {
|
||||
const info = @typeInfo(AttributeSource).pointer;
|
||||
return @Type(.{
|
||||
.pointer = .{
|
||||
.size = .slice,
|
||||
.is_const = info.is_const,
|
||||
.is_volatile = info.is_volatile,
|
||||
.is_allowzero = info.is_allowzero,
|
||||
.alignment = new_alignment,
|
||||
.address_space = info.address_space,
|
||||
.child = info.child,
|
||||
.sentinel_ptr = null,
|
||||
},
|
||||
});
|
||||
const ptr = @typeInfo(AttributeSource).pointer;
|
||||
return @Pointer(.slice, .{
|
||||
.@"const" = ptr.is_const,
|
||||
.@"volatile" = ptr.is_volatile,
|
||||
.@"allowzero" = ptr.is_allowzero,
|
||||
.@"align" = new_alignment,
|
||||
.@"addrspace" = ptr.address_space,
|
||||
}, ptr.child, null);
|
||||
}
|
||||
|
||||
/// Returns the largest slice in the given bytes that conforms to the new alignment,
|
||||
|
|
|
|||
188
lib/std/meta.zig
188
lib/std/meta.zig
|
|
@ -171,58 +171,34 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
|
|||
switch (@typeInfo(T)) {
|
||||
.pointer => |info| switch (info.size) {
|
||||
.one => switch (@typeInfo(info.child)) {
|
||||
.array => |array_info| return @Type(.{
|
||||
.pointer = .{
|
||||
.size = info.size,
|
||||
.is_const = info.is_const,
|
||||
.is_volatile = info.is_volatile,
|
||||
.alignment = info.alignment,
|
||||
.address_space = info.address_space,
|
||||
.child = @Type(.{
|
||||
.array = .{
|
||||
.len = array_info.len,
|
||||
.child = array_info.child,
|
||||
.sentinel_ptr = @as(?*const anyopaque, @ptrCast(&sentinel_val)),
|
||||
},
|
||||
}),
|
||||
.is_allowzero = info.is_allowzero,
|
||||
.sentinel_ptr = info.sentinel_ptr,
|
||||
},
|
||||
}),
|
||||
.array => |array_info| return @Pointer(.one, .{
|
||||
.@"const" = info.is_const,
|
||||
.@"volatile" = info.is_volatile,
|
||||
.@"allowzero" = info.is_allowzero,
|
||||
.@"align" = info.alignment,
|
||||
.@"addrspace" = info.address_space,
|
||||
}, [array_info.len:sentinel_val]array_info.child, null),
|
||||
else => {},
|
||||
},
|
||||
.many, .slice => return @Type(.{
|
||||
.pointer = .{
|
||||
.size = info.size,
|
||||
.is_const = info.is_const,
|
||||
.is_volatile = info.is_volatile,
|
||||
.alignment = info.alignment,
|
||||
.address_space = info.address_space,
|
||||
.child = info.child,
|
||||
.is_allowzero = info.is_allowzero,
|
||||
.sentinel_ptr = @as(?*const anyopaque, @ptrCast(&sentinel_val)),
|
||||
},
|
||||
}),
|
||||
.many, .slice => |size| return @Pointer(size, .{
|
||||
.@"const" = info.is_const,
|
||||
.@"volatile" = info.is_volatile,
|
||||
.@"allowzero" = info.is_allowzero,
|
||||
.@"align" = info.alignment,
|
||||
.@"addrspace" = info.address_space,
|
||||
}, info.child, sentinel_val),
|
||||
else => {},
|
||||
},
|
||||
.optional => |info| switch (@typeInfo(info.child)) {
|
||||
.pointer => |ptr_info| switch (ptr_info.size) {
|
||||
.many => return @Type(.{
|
||||
.optional = .{
|
||||
.child = @Type(.{
|
||||
.pointer = .{
|
||||
.size = ptr_info.size,
|
||||
.is_const = ptr_info.is_const,
|
||||
.is_volatile = ptr_info.is_volatile,
|
||||
.alignment = ptr_info.alignment,
|
||||
.address_space = ptr_info.address_space,
|
||||
.child = ptr_info.child,
|
||||
.is_allowzero = ptr_info.is_allowzero,
|
||||
.sentinel_ptr = @as(?*const anyopaque, @ptrCast(&sentinel_val)),
|
||||
},
|
||||
}),
|
||||
},
|
||||
}),
|
||||
.many => return ?@Pointer(.many, .{
|
||||
.@"const" = ptr_info.is_const,
|
||||
.@"volatile" = ptr_info.is_volatile,
|
||||
.@"allowzero" = ptr_info.is_allowzero,
|
||||
.@"align" = ptr_info.alignment,
|
||||
.@"addrspace" = ptr_info.address_space,
|
||||
.child = ptr_info.child,
|
||||
}, ptr_info.child, sentinel_val),
|
||||
else => {},
|
||||
},
|
||||
else => {},
|
||||
|
|
@ -487,46 +463,22 @@ test tags {
|
|||
|
||||
/// Returns an enum with a variant named after each field of `T`.
|
||||
pub fn FieldEnum(comptime T: type) type {
|
||||
const field_infos = fields(T);
|
||||
const field_names = fieldNames(T);
|
||||
|
||||
if (field_infos.len == 0) {
|
||||
return @Type(.{
|
||||
.@"enum" = .{
|
||||
.tag_type = u0,
|
||||
.fields = &.{},
|
||||
.decls = &.{},
|
||||
.is_exhaustive = true,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
if (@typeInfo(T) == .@"union") {
|
||||
if (@typeInfo(T).@"union".tag_type) |tag_type| {
|
||||
for (std.enums.values(tag_type), 0..) |v, i| {
|
||||
switch (@typeInfo(T)) {
|
||||
.@"union" => |@"union"| if (@"union".tag_type) |EnumTag| {
|
||||
for (std.enums.values(EnumTag), 0..) |v, i| {
|
||||
if (@intFromEnum(v) != i) break; // enum values not consecutive
|
||||
if (!std.mem.eql(u8, @tagName(v), field_infos[i].name)) break; // fields out of order
|
||||
if (!std.mem.eql(u8, @tagName(v), field_names[i])) break; // fields out of order
|
||||
} else {
|
||||
return tag_type;
|
||||
return EnumTag;
|
||||
}
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
var enumFields: [field_infos.len]std.builtin.Type.EnumField = undefined;
|
||||
var decls = [_]std.builtin.Type.Declaration{};
|
||||
inline for (field_infos, 0..) |field, i| {
|
||||
enumFields[i] = .{
|
||||
.name = field.name,
|
||||
.value = i,
|
||||
};
|
||||
}
|
||||
return @Type(.{
|
||||
.@"enum" = .{
|
||||
.tag_type = std.math.IntFittingRange(0, field_infos.len - 1),
|
||||
.fields = &enumFields,
|
||||
.decls = &decls,
|
||||
.is_exhaustive = true,
|
||||
},
|
||||
});
|
||||
const IntTag = std.math.IntFittingRange(0, field_names.len -| 1);
|
||||
return @Enum(IntTag, .exhaustive, field_names, &std.simd.iota(IntTag, field_names.len));
|
||||
}
|
||||
|
||||
fn expectEqualEnum(expected: anytype, actual: @TypeOf(expected)) !void {
|
||||
|
|
@ -583,20 +535,11 @@ test FieldEnum {
|
|||
}
|
||||
|
||||
pub fn DeclEnum(comptime T: type) type {
|
||||
const fieldInfos = std.meta.declarations(T);
|
||||
var enumDecls: [fieldInfos.len]std.builtin.Type.EnumField = undefined;
|
||||
var decls = [_]std.builtin.Type.Declaration{};
|
||||
inline for (fieldInfos, 0..) |field, i| {
|
||||
enumDecls[i] = .{ .name = field.name, .value = i };
|
||||
}
|
||||
return @Type(.{
|
||||
.@"enum" = .{
|
||||
.tag_type = std.math.IntFittingRange(0, if (fieldInfos.len == 0) 0 else fieldInfos.len - 1),
|
||||
.fields = &enumDecls,
|
||||
.decls = &decls,
|
||||
.is_exhaustive = true,
|
||||
},
|
||||
});
|
||||
const decls = declarations(T);
|
||||
var names: [decls.len][]const u8 = undefined;
|
||||
for (&names, decls) |*name, decl| name.* = decl.name;
|
||||
const IntTag = std.math.IntFittingRange(0, decls.len -| 1);
|
||||
return @Enum(IntTag, .exhaustive, &names, &std.simd.iota(IntTag, decls.len));
|
||||
}
|
||||
|
||||
test DeclEnum {
|
||||
|
|
@ -868,25 +811,26 @@ pub fn declList(comptime Namespace: type, comptime Decl: type) []const *const De
|
|||
}
|
||||
}
|
||||
|
||||
/// Deprecated: use @Int
|
||||
pub fn Int(comptime signedness: std.builtin.Signedness, comptime bit_count: u16) type {
|
||||
return @Type(.{
|
||||
.int = .{
|
||||
.signedness = signedness,
|
||||
.bits = bit_count,
|
||||
},
|
||||
});
|
||||
return @Int(signedness, bit_count);
|
||||
}
|
||||
|
||||
pub fn Float(comptime bit_count: u8) type {
|
||||
return @Type(.{
|
||||
.float = .{ .bits = bit_count },
|
||||
});
|
||||
return switch (bit_count) {
|
||||
16 => f16,
|
||||
32 => f32,
|
||||
64 => f64,
|
||||
80 => f80,
|
||||
128 => f128,
|
||||
else => @compileError("invalid float bit count"),
|
||||
};
|
||||
}
|
||||
|
||||
test Float {
|
||||
try testing.expectEqual(f16, Float(16));
|
||||
try testing.expectEqual(f32, Float(32));
|
||||
try testing.expectEqual(f64, Float(64));
|
||||
try testing.expectEqual(f80, Float(80));
|
||||
try testing.expectEqual(f128, Float(128));
|
||||
}
|
||||
|
||||
|
|
@ -912,42 +856,14 @@ pub fn ArgsTuple(comptime Function: type) type {
|
|||
argument_field_list[i] = T;
|
||||
}
|
||||
|
||||
return CreateUniqueTuple(argument_field_list.len, argument_field_list);
|
||||
return Tuple(&argument_field_list);
|
||||
}
|
||||
|
||||
/// For a given anonymous list of types, returns a new tuple type
|
||||
/// with those types as fields.
|
||||
/// Deprecated; use `@Tuple` instead.
|
||||
///
|
||||
/// Examples:
|
||||
/// - `Tuple(&[_]type {})` ⇒ `tuple { }`
|
||||
/// - `Tuple(&[_]type {f32})` ⇒ `tuple { f32 }`
|
||||
/// - `Tuple(&[_]type {f32,u32})` ⇒ `tuple { f32, u32 }`
|
||||
/// To be removed after Zig 0.16.0 releases.
|
||||
pub fn Tuple(comptime types: []const type) type {
|
||||
return CreateUniqueTuple(types.len, types[0..types.len].*);
|
||||
}
|
||||
|
||||
fn CreateUniqueTuple(comptime N: comptime_int, comptime types: [N]type) type {
|
||||
var tuple_fields: [types.len]std.builtin.Type.StructField = undefined;
|
||||
inline for (types, 0..) |T, i| {
|
||||
@setEvalBranchQuota(10_000);
|
||||
var num_buf: [128]u8 = undefined;
|
||||
tuple_fields[i] = .{
|
||||
.name = std.fmt.bufPrintSentinel(&num_buf, "{d}", .{i}, 0) catch unreachable,
|
||||
.type = T,
|
||||
.default_value_ptr = null,
|
||||
.is_comptime = false,
|
||||
.alignment = @alignOf(T),
|
||||
};
|
||||
}
|
||||
|
||||
return @Type(.{
|
||||
.@"struct" = .{
|
||||
.is_tuple = true,
|
||||
.layout = .auto,
|
||||
.decls = &.{},
|
||||
.fields = &tuple_fields,
|
||||
},
|
||||
});
|
||||
return @Tuple(types);
|
||||
}
|
||||
|
||||
const TupleTester = struct {
|
||||
|
|
|
|||
|
|
@ -20,24 +20,16 @@ pub fn TrailerFlags(comptime Fields: type) type {
|
|||
|
||||
pub const ActiveFields = std.enums.EnumFieldStruct(FieldEnum, bool, false);
|
||||
pub const FieldValues = blk: {
|
||||
var fields: [bit_count]Type.StructField = undefined;
|
||||
for (@typeInfo(Fields).@"struct".fields, 0..) |struct_field, i| {
|
||||
fields[i] = Type.StructField{
|
||||
.name = struct_field.name,
|
||||
.type = ?struct_field.type,
|
||||
.default_value_ptr = &@as(?struct_field.type, null),
|
||||
.is_comptime = false,
|
||||
.alignment = @alignOf(?struct_field.type),
|
||||
};
|
||||
var field_names: [bit_count][]const u8 = undefined;
|
||||
var field_types: [bit_count]type = undefined;
|
||||
var field_attrs: [bit_count]std.builtin.Type.StructField.Attributes = undefined;
|
||||
for (@typeInfo(Fields).@"struct".fields, &field_names, &field_types, &field_attrs) |field, *new_name, *NewType, *new_attrs| {
|
||||
new_name.* = field.name;
|
||||
NewType.* = ?field.type;
|
||||
const default: ?field.type = null;
|
||||
new_attrs.* = .{ .default_value_ptr = &default };
|
||||
}
|
||||
break :blk @Type(.{
|
||||
.@"struct" = .{
|
||||
.layout = .auto,
|
||||
.fields = &fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
},
|
||||
});
|
||||
break :blk @Struct(.auto, null, &field_names, &field_types, &field_attrs);
|
||||
};
|
||||
|
||||
pub const Self = @This();
|
||||
|
|
|
|||
|
|
@ -32,12 +32,17 @@ pub fn MultiArrayList(comptime T: type) type {
|
|||
const Elem = switch (@typeInfo(T)) {
|
||||
.@"struct" => T,
|
||||
.@"union" => |u| struct {
|
||||
pub const Bare = @Type(.{ .@"union" = .{
|
||||
.layout = u.layout,
|
||||
.tag_type = null,
|
||||
.fields = u.fields,
|
||||
.decls = &.{},
|
||||
} });
|
||||
pub const Bare = Bare: {
|
||||
var field_names: [u.fields.len][]const u8 = undefined;
|
||||
var field_types: [u.fields.len]type = undefined;
|
||||
var field_attrs: [u.fields.len]std.builtin.Type.UnionField.Attributes = undefined;
|
||||
for (u.fields, &field_names, &field_types, &field_attrs) |field, *name, *Type, *attrs| {
|
||||
name.* = field.name;
|
||||
Type.* = field.type;
|
||||
attrs.* = .{ .@"align" = field.alignment };
|
||||
}
|
||||
break :Bare @Union(u.layout, null, &field_names, &field_types, &field_attrs);
|
||||
};
|
||||
pub const Tag =
|
||||
u.tag_type orelse @compileError("MultiArrayList does not support untagged unions");
|
||||
tags: Tag,
|
||||
|
|
@ -609,20 +614,18 @@ pub fn MultiArrayList(comptime T: type) type {
|
|||
}
|
||||
|
||||
const Entry = entry: {
|
||||
var entry_fields: [fields.len]std.builtin.Type.StructField = undefined;
|
||||
for (&entry_fields, sizes.fields) |*entry_field, i| entry_field.* = .{
|
||||
.name = fields[i].name ++ "_ptr",
|
||||
.type = *fields[i].type,
|
||||
.default_value_ptr = null,
|
||||
.is_comptime = fields[i].is_comptime,
|
||||
.alignment = fields[i].alignment,
|
||||
};
|
||||
break :entry @Type(.{ .@"struct" = .{
|
||||
.layout = .@"extern",
|
||||
.fields = &entry_fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
} });
|
||||
var field_names: [fields.len][]const u8 = undefined;
|
||||
var field_types: [fields.len]type = undefined;
|
||||
var field_attrs: [fields.len]std.builtin.Type.StructField.Attributes = undefined;
|
||||
for (sizes.fields, &field_names, &field_types, &field_attrs) |i, *name, *Type, *attrs| {
|
||||
name.* = fields[i].name ++ "_ptr";
|
||||
Type.* = *fields[i].type;
|
||||
attrs.* = .{
|
||||
.@"comptime" = fields[i].is_comptime,
|
||||
.@"align" = fields[i].alignment,
|
||||
};
|
||||
}
|
||||
break :entry @Struct(.@"extern", null, &field_names, &field_types, &field_attrs);
|
||||
};
|
||||
/// This function is used in the debugger pretty formatters in tools/ to fetch the
|
||||
/// child field order and entry type to facilitate fancy debug printing for this type.
|
||||
|
|
@ -1023,23 +1026,9 @@ test "struct with many fields" {
|
|||
const ManyFields = struct {
|
||||
fn Type(count: comptime_int) type {
|
||||
@setEvalBranchQuota(50000);
|
||||
var fields: [count]std.builtin.Type.StructField = undefined;
|
||||
for (0..count) |i| {
|
||||
fields[i] = .{
|
||||
.name = std.fmt.comptimePrint("a{}", .{i}),
|
||||
.type = u32,
|
||||
.default_value_ptr = null,
|
||||
.is_comptime = false,
|
||||
.alignment = @alignOf(u32),
|
||||
};
|
||||
}
|
||||
const info: std.builtin.Type = .{ .@"struct" = .{
|
||||
.layout = .auto,
|
||||
.fields = &fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
} };
|
||||
return @Type(info);
|
||||
var field_names: [count][]const u8 = undefined;
|
||||
for (&field_names, 0..) |*n, i| n.* = std.fmt.comptimePrint("a{d}", .{i});
|
||||
return @Struct(.@"extern", null, &field_names, &@splat(u32), &@splat(.{}));
|
||||
}
|
||||
|
||||
fn doTest(ally: std.mem.Allocator, count: comptime_int) !void {
|
||||
|
|
|
|||
|
|
@ -56,21 +56,6 @@ pub var argv: [][*:0]u8 = if (builtin.link_libc) undefined else switch (native_o
|
|||
else => undefined,
|
||||
};
|
||||
|
||||
/// Call from Windows-specific code if you already have a WTF-16LE encoded, null terminated string.
|
||||
/// Otherwise use `access`.
|
||||
pub fn accessW(path: [*:0]const u16) windows.GetFileAttributesError!void {
|
||||
const ret = try windows.GetFileAttributesW(path);
|
||||
if (ret != windows.INVALID_FILE_ATTRIBUTES) {
|
||||
return;
|
||||
}
|
||||
switch (windows.GetLastError()) {
|
||||
.FILE_NOT_FOUND => return error.FileNotFound,
|
||||
.PATH_NOT_FOUND => return error.FileNotFound,
|
||||
.ACCESS_DENIED => return error.AccessDenied,
|
||||
else => |err| return windows.unexpectedError(err),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn isGetFdPathSupportedOnTarget(os: std.Target.Os) bool {
|
||||
return switch (os.tag) {
|
||||
.windows,
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ pub fn syscall_fork() u64 {
|
|||
\\ 2:
|
||||
: [ret] "={o0}" (-> u64),
|
||||
: [number] "{g1}" (@intFromEnum(SYS.fork)),
|
||||
: .{ .memory = true, .icc = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
: .{ .memory = true, .xcc = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
}
|
||||
|
||||
pub fn syscall0(number: SYS) u64 {
|
||||
|
|
@ -53,7 +53,7 @@ pub fn syscall0(number: SYS) u64 {
|
|||
\\ 1:
|
||||
: [ret] "={o0}" (-> u64),
|
||||
: [number] "{g1}" (@intFromEnum(number)),
|
||||
: .{ .memory = true, .icc = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
: .{ .memory = true, .xcc = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
}
|
||||
|
||||
pub fn syscall1(number: SYS, arg1: u64) u64 {
|
||||
|
|
@ -66,7 +66,7 @@ pub fn syscall1(number: SYS, arg1: u64) u64 {
|
|||
: [ret] "={o0}" (-> u64),
|
||||
: [number] "{g1}" (@intFromEnum(number)),
|
||||
[arg1] "{o0}" (arg1),
|
||||
: .{ .memory = true, .icc = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
: .{ .memory = true, .xcc = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
}
|
||||
|
||||
pub fn syscall2(number: SYS, arg1: u64, arg2: u64) u64 {
|
||||
|
|
@ -80,7 +80,7 @@ pub fn syscall2(number: SYS, arg1: u64, arg2: u64) u64 {
|
|||
: [number] "{g1}" (@intFromEnum(number)),
|
||||
[arg1] "{o0}" (arg1),
|
||||
[arg2] "{o1}" (arg2),
|
||||
: .{ .memory = true, .icc = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
: .{ .memory = true, .xcc = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
}
|
||||
|
||||
pub fn syscall3(number: SYS, arg1: u64, arg2: u64, arg3: u64) u64 {
|
||||
|
|
@ -95,7 +95,7 @@ pub fn syscall3(number: SYS, arg1: u64, arg2: u64, arg3: u64) u64 {
|
|||
[arg1] "{o0}" (arg1),
|
||||
[arg2] "{o1}" (arg2),
|
||||
[arg3] "{o2}" (arg3),
|
||||
: .{ .memory = true, .icc = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
: .{ .memory = true, .xcc = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
}
|
||||
|
||||
pub fn syscall4(number: SYS, arg1: u64, arg2: u64, arg3: u64, arg4: u64) u64 {
|
||||
|
|
@ -111,7 +111,7 @@ pub fn syscall4(number: SYS, arg1: u64, arg2: u64, arg3: u64, arg4: u64) u64 {
|
|||
[arg2] "{o1}" (arg2),
|
||||
[arg3] "{o2}" (arg3),
|
||||
[arg4] "{o3}" (arg4),
|
||||
: .{ .memory = true, .icc = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
: .{ .memory = true, .xcc = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
}
|
||||
|
||||
pub fn syscall5(number: SYS, arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64) u64 {
|
||||
|
|
@ -128,7 +128,7 @@ pub fn syscall5(number: SYS, arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u
|
|||
[arg3] "{o2}" (arg3),
|
||||
[arg4] "{o3}" (arg4),
|
||||
[arg5] "{o4}" (arg5),
|
||||
: .{ .memory = true, .icc = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
: .{ .memory = true, .xcc = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
}
|
||||
|
||||
pub fn syscall6(
|
||||
|
|
@ -154,7 +154,7 @@ pub fn syscall6(
|
|||
[arg4] "{o3}" (arg4),
|
||||
[arg5] "{o4}" (arg5),
|
||||
[arg6] "{o5}" (arg6),
|
||||
: .{ .memory = true, .icc = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
: .{ .memory = true, .xcc = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
}
|
||||
|
||||
pub fn clone() callconv(.naked) u64 {
|
||||
|
|
@ -220,7 +220,7 @@ pub fn restore_rt() callconv(.c) void {
|
|||
return asm volatile ("t 0x6d"
|
||||
:
|
||||
: [number] "{g1}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true, .icc = true, .o0 = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
: .{ .memory = true, .xcc = true, .o0 = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o7 = true });
|
||||
}
|
||||
|
||||
pub const VDSO = struct {
|
||||
|
|
|
|||
|
|
@ -306,22 +306,6 @@ pub fn CreatePipe(rd: *HANDLE, wr: *HANDLE, sattr: *const SECURITY_ATTRIBUTES) C
|
|||
wr.* = write;
|
||||
}
|
||||
|
||||
pub fn CreateEventEx(attributes: ?*SECURITY_ATTRIBUTES, name: []const u8, flags: DWORD, desired_access: DWORD) !HANDLE {
|
||||
const nameW = try sliceToPrefixedFileW(null, name);
|
||||
return CreateEventExW(attributes, nameW.span().ptr, flags, desired_access);
|
||||
}
|
||||
|
||||
pub fn CreateEventExW(attributes: ?*SECURITY_ATTRIBUTES, nameW: ?LPCWSTR, flags: DWORD, desired_access: DWORD) !HANDLE {
|
||||
const handle = kernel32.CreateEventExW(attributes, nameW, flags, desired_access);
|
||||
if (handle) |h| {
|
||||
return h;
|
||||
} else {
|
||||
switch (GetLastError()) {
|
||||
else => |err| return unexpectedError(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const DeviceIoControlError = error{
|
||||
AccessDenied,
|
||||
/// The volume does not contain a recognized file system. File system
|
||||
|
|
@ -598,10 +582,6 @@ pub fn CloseHandle(hObject: HANDLE) void {
|
|||
assert(ntdll.NtClose(hObject) == .SUCCESS);
|
||||
}
|
||||
|
||||
pub fn FindClose(hFindFile: HANDLE) void {
|
||||
assert(kernel32.FindClose(hFindFile) != 0);
|
||||
}
|
||||
|
||||
pub const ReadFileError = error{
|
||||
BrokenPipe,
|
||||
/// The specified network name is no longer available.
|
||||
|
|
@ -836,8 +816,11 @@ pub fn CreateSymbolicLink(
|
|||
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createsymboliclinkw
|
||||
var is_target_absolute = false;
|
||||
const final_target_path = target_path: {
|
||||
switch (getNamespacePrefix(u16, target_path)) {
|
||||
.none => switch (getUnprefixedPathType(u16, target_path)) {
|
||||
if (hasCommonNtPrefix(u16, target_path)) {
|
||||
// Already an NT path, no need to do anything to it
|
||||
break :target_path target_path;
|
||||
} else {
|
||||
switch (getWin32PathType(u16, target_path)) {
|
||||
// Rooted paths need to avoid getting put through wToPrefixedFileW
|
||||
// (and they are treated as relative in this context)
|
||||
// Note: It seems that rooted paths in symbolic links are relative to
|
||||
|
|
@ -849,10 +832,7 @@ pub fn CreateSymbolicLink(
|
|||
// Keep relative paths relative, but anything else needs to get NT-prefixed.
|
||||
else => if (!std.fs.path.isAbsoluteWindowsWtf16(target_path))
|
||||
break :target_path target_path,
|
||||
},
|
||||
// Already an NT path, no need to do anything to it
|
||||
.nt => break :target_path target_path,
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
var prefixed_target_path = try wToPrefixedFileW(dir, target_path);
|
||||
// We do this after prefixing to ensure that drive-relative paths are treated as absolute
|
||||
|
|
@ -1104,21 +1084,135 @@ pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFil
|
|||
}
|
||||
}
|
||||
|
||||
pub const MoveFileError = error{ FileNotFound, AccessDenied, Unexpected };
|
||||
pub const RenameError = error{
|
||||
IsDir,
|
||||
NotDir,
|
||||
FileNotFound,
|
||||
NoDevice,
|
||||
AccessDenied,
|
||||
PipeBusy,
|
||||
PathAlreadyExists,
|
||||
Unexpected,
|
||||
NameTooLong,
|
||||
NetworkNotFound,
|
||||
AntivirusInterference,
|
||||
BadPathName,
|
||||
RenameAcrossMountPoints,
|
||||
} || UnexpectedError;
|
||||
|
||||
pub fn MoveFileEx(old_path: []const u8, new_path: []const u8, flags: DWORD) (MoveFileError || Wtf8ToPrefixedFileWError)!void {
|
||||
const old_path_w = try sliceToPrefixedFileW(null, old_path);
|
||||
const new_path_w = try sliceToPrefixedFileW(null, new_path);
|
||||
return MoveFileExW(old_path_w.span().ptr, new_path_w.span().ptr, flags);
|
||||
}
|
||||
pub fn RenameFile(
|
||||
/// May only be `null` if `old_path_w` is a fully-qualified absolute path.
|
||||
old_dir_fd: ?HANDLE,
|
||||
old_path_w: []const u16,
|
||||
/// May only be `null` if `new_path_w` is a fully-qualified absolute path,
|
||||
/// or if the file is not being moved to a different directory.
|
||||
new_dir_fd: ?HANDLE,
|
||||
new_path_w: []const u16,
|
||||
replace_if_exists: bool,
|
||||
) RenameError!void {
|
||||
const src_fd = OpenFile(old_path_w, .{
|
||||
.dir = old_dir_fd,
|
||||
.access_mask = SYNCHRONIZE | GENERIC_WRITE | DELETE,
|
||||
.creation = FILE_OPEN,
|
||||
.filter = .any, // This function is supposed to rename both files and directories.
|
||||
.follow_symlinks = false,
|
||||
}) catch |err| switch (err) {
|
||||
error.WouldBlock => unreachable, // Not possible without `.share_access_nonblocking = true`.
|
||||
else => |e| return e,
|
||||
};
|
||||
defer CloseHandle(src_fd);
|
||||
|
||||
pub fn MoveFileExW(old_path: [*:0]const u16, new_path: [*:0]const u16, flags: DWORD) MoveFileError!void {
|
||||
if (kernel32.MoveFileExW(old_path, new_path, flags) == 0) {
|
||||
switch (GetLastError()) {
|
||||
.FILE_NOT_FOUND => return error.FileNotFound,
|
||||
.ACCESS_DENIED => return error.AccessDenied,
|
||||
else => |err| return unexpectedError(err),
|
||||
var rc: NTSTATUS = undefined;
|
||||
// FileRenameInformationEx has varying levels of support:
|
||||
// - FILE_RENAME_INFORMATION_EX requires >= win10_rs1
|
||||
// (INVALID_INFO_CLASS is returned if not supported)
|
||||
// - Requires the NTFS filesystem
|
||||
// (on filesystems like FAT32, INVALID_PARAMETER is returned)
|
||||
// - FILE_RENAME_POSIX_SEMANTICS requires >= win10_rs1
|
||||
// - FILE_RENAME_IGNORE_READONLY_ATTRIBUTE requires >= win10_rs5
|
||||
// (NOT_SUPPORTED is returned if a flag is unsupported)
|
||||
//
|
||||
// The strategy here is just to try using FileRenameInformationEx and fall back to
|
||||
// FileRenameInformation if the return value lets us know that some aspect of it is not supported.
|
||||
const need_fallback = need_fallback: {
|
||||
const struct_buf_len = @sizeOf(FILE_RENAME_INFORMATION_EX) + (PATH_MAX_WIDE * 2);
|
||||
var rename_info_buf: [struct_buf_len]u8 align(@alignOf(FILE_RENAME_INFORMATION_EX)) = undefined;
|
||||
const struct_len = @sizeOf(FILE_RENAME_INFORMATION_EX) + new_path_w.len * 2;
|
||||
if (struct_len > struct_buf_len) return error.NameTooLong;
|
||||
|
||||
const rename_info: *FILE_RENAME_INFORMATION_EX = @ptrCast(&rename_info_buf);
|
||||
var io_status_block: IO_STATUS_BLOCK = undefined;
|
||||
|
||||
var flags: ULONG = FILE_RENAME_POSIX_SEMANTICS | FILE_RENAME_IGNORE_READONLY_ATTRIBUTE;
|
||||
if (replace_if_exists) flags |= FILE_RENAME_REPLACE_IF_EXISTS;
|
||||
rename_info.* = .{
|
||||
.Flags = flags,
|
||||
.RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(new_path_w)) null else new_dir_fd,
|
||||
.FileNameLength = @intCast(new_path_w.len * 2), // already checked error.NameTooLong
|
||||
.FileName = undefined,
|
||||
};
|
||||
@memcpy((&rename_info.FileName).ptr, new_path_w);
|
||||
rc = ntdll.NtSetInformationFile(
|
||||
src_fd,
|
||||
&io_status_block,
|
||||
rename_info,
|
||||
@intCast(struct_len), // already checked for error.NameTooLong
|
||||
.FileRenameInformationEx,
|
||||
);
|
||||
switch (rc) {
|
||||
.SUCCESS => return,
|
||||
// The filesystem does not support FileDispositionInformationEx
|
||||
.INVALID_PARAMETER,
|
||||
// The operating system does not support FileDispositionInformationEx
|
||||
.INVALID_INFO_CLASS,
|
||||
// The operating system does not support one of the flags
|
||||
.NOT_SUPPORTED,
|
||||
=> break :need_fallback true,
|
||||
// For all other statuses, fall down to the switch below to handle them.
|
||||
else => break :need_fallback false,
|
||||
}
|
||||
};
|
||||
|
||||
if (need_fallback) {
|
||||
const struct_buf_len = @sizeOf(FILE_RENAME_INFORMATION) + (PATH_MAX_WIDE * 2);
|
||||
var rename_info_buf: [struct_buf_len]u8 align(@alignOf(FILE_RENAME_INFORMATION)) = undefined;
|
||||
const struct_len = @sizeOf(FILE_RENAME_INFORMATION) + new_path_w.len * 2;
|
||||
if (struct_len > struct_buf_len) return error.NameTooLong;
|
||||
|
||||
const rename_info: *FILE_RENAME_INFORMATION = @ptrCast(&rename_info_buf);
|
||||
var io_status_block: IO_STATUS_BLOCK = undefined;
|
||||
|
||||
rename_info.* = .{
|
||||
.Flags = @intFromBool(replace_if_exists),
|
||||
.RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(new_path_w)) null else new_dir_fd,
|
||||
.FileNameLength = @intCast(new_path_w.len * 2), // already checked error.NameTooLong
|
||||
.FileName = undefined,
|
||||
};
|
||||
@memcpy((&rename_info.FileName).ptr, new_path_w);
|
||||
|
||||
rc = ntdll.NtSetInformationFile(
|
||||
src_fd,
|
||||
&io_status_block,
|
||||
rename_info,
|
||||
@intCast(struct_len), // already checked for error.NameTooLong
|
||||
.FileRenameInformation,
|
||||
);
|
||||
}
|
||||
|
||||
switch (rc) {
|
||||
.SUCCESS => {},
|
||||
.INVALID_HANDLE => unreachable,
|
||||
.INVALID_PARAMETER => unreachable,
|
||||
.OBJECT_PATH_SYNTAX_BAD => unreachable,
|
||||
.ACCESS_DENIED => return error.AccessDenied,
|
||||
.OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
|
||||
.OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
|
||||
.NOT_SAME_DEVICE => return error.RenameAcrossMountPoints,
|
||||
.OBJECT_NAME_COLLISION => return error.PathAlreadyExists,
|
||||
.DIRECTORY_NOT_EMPTY => return error.PathAlreadyExists,
|
||||
.FILE_IS_A_DIRECTORY => return error.IsDir,
|
||||
.NOT_A_DIRECTORY => return error.NotDir,
|
||||
else => return unexpectedStatus(rc),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1298,16 +1392,29 @@ pub fn GetFinalPathNameByHandle(
|
|||
},
|
||||
.Dos => {
|
||||
// parse the string to separate volume path from file path
|
||||
const expected_prefix = std.unicode.utf8ToUtf16LeStringLiteral("\\Device\\");
|
||||
const device_prefix = std.unicode.utf8ToUtf16LeStringLiteral("\\Device\\");
|
||||
|
||||
// TODO find out if a path can start with something besides `\Device\<volume name>`,
|
||||
// and if we need to handle it differently
|
||||
// (i.e. how to determine the start and end of the volume name in that case)
|
||||
if (!mem.eql(u16, expected_prefix, final_path[0..expected_prefix.len])) return error.Unexpected;
|
||||
// We aren't entirely sure of the structure of the path returned by
|
||||
// QueryObjectName in all contexts/environments.
|
||||
// This code is written to cover the various cases that have
|
||||
// been encountered and solved appropriately. But note that there's
|
||||
// no easy way to verify that they have all been tackled!
|
||||
// (Unless you, the reader knows of one then please do action that!)
|
||||
if (!mem.startsWith(u16, final_path, device_prefix)) {
|
||||
// Wine seems to return NT namespaced paths starting with \??\ from QueryObjectName
|
||||
// (e.g. `\??\Z:\some\path\to\a\file.txt`), in which case we can just strip the
|
||||
// prefix to turn it into an absolute path.
|
||||
// https://github.com/ziglang/zig/issues/26029
|
||||
// https://bugs.winehq.org/show_bug.cgi?id=39569
|
||||
return ntToWin32Namespace(final_path, out_buffer) catch |err| switch (err) {
|
||||
error.NotNtPath => return error.Unexpected,
|
||||
error.NameTooLong => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
const file_path_begin_index = mem.indexOfPos(u16, final_path, expected_prefix.len, &[_]u16{'\\'}) orelse unreachable;
|
||||
const file_path_begin_index = mem.indexOfPos(u16, final_path, device_prefix.len, &[_]u16{'\\'}) orelse unreachable;
|
||||
const volume_name_u16 = final_path[0..file_path_begin_index];
|
||||
const device_name_u16 = volume_name_u16[expected_prefix.len..];
|
||||
const device_name_u16 = volume_name_u16[device_prefix.len..];
|
||||
const file_name_u16 = final_path[file_path_begin_index..];
|
||||
|
||||
// MUP is Multiple UNC Provider, and indicates that the path is a UNC
|
||||
|
|
@ -1515,30 +1622,6 @@ pub fn GetFileSizeEx(hFile: HANDLE) GetFileSizeError!u64 {
|
|||
return @as(u64, @bitCast(file_size));
|
||||
}
|
||||
|
||||
pub const GetFileAttributesError = error{
|
||||
FileNotFound,
|
||||
AccessDenied,
|
||||
Unexpected,
|
||||
};
|
||||
|
||||
pub fn GetFileAttributes(filename: []const u8) (GetFileAttributesError || Wtf8ToPrefixedFileWError)!DWORD {
|
||||
const filename_w = try sliceToPrefixedFileW(null, filename);
|
||||
return GetFileAttributesW(filename_w.span().ptr);
|
||||
}
|
||||
|
||||
pub fn GetFileAttributesW(lpFileName: [*:0]const u16) GetFileAttributesError!DWORD {
|
||||
const rc = kernel32.GetFileAttributesW(lpFileName);
|
||||
if (rc == INVALID_FILE_ATTRIBUTES) {
|
||||
switch (GetLastError()) {
|
||||
.FILE_NOT_FOUND => return error.FileNotFound,
|
||||
.PATH_NOT_FOUND => return error.FileNotFound,
|
||||
.ACCESS_DENIED => return error.AccessDenied,
|
||||
else => |err| return unexpectedError(err),
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
pub fn getpeername(s: ws2_32.SOCKET, name: *ws2_32.sockaddr, namelen: *ws2_32.socklen_t) i32 {
|
||||
return ws2_32.getpeername(s, name, @as(*i32, @ptrCast(namelen)));
|
||||
}
|
||||
|
|
@ -1657,6 +1740,7 @@ pub const NtFreeVirtualMemoryError = error{
|
|||
};
|
||||
|
||||
pub fn NtFreeVirtualMemory(hProcess: HANDLE, addr: ?*PVOID, size: *SIZE_T, free_type: ULONG) NtFreeVirtualMemoryError!void {
|
||||
// TODO: If the return value is .INVALID_PAGE_PROTECTION, call RtlFlushSecureMemoryCache and try again.
|
||||
return switch (ntdll.NtFreeVirtualMemory(hProcess, addr, size, free_type)) {
|
||||
.SUCCESS => return,
|
||||
.ACCESS_DENIED => NtFreeVirtualMemoryError.AccessDenied,
|
||||
|
|
@ -1665,20 +1749,6 @@ pub fn NtFreeVirtualMemory(hProcess: HANDLE, addr: ?*PVOID, size: *SIZE_T, free_
|
|||
};
|
||||
}
|
||||
|
||||
pub const VirtualAllocError = error{Unexpected};
|
||||
|
||||
pub fn VirtualAlloc(addr: ?LPVOID, size: usize, alloc_type: DWORD, flProtect: DWORD) VirtualAllocError!LPVOID {
|
||||
return kernel32.VirtualAlloc(addr, size, alloc_type, flProtect) orelse {
|
||||
switch (GetLastError()) {
|
||||
else => |err| return unexpectedError(err),
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn VirtualFree(lpAddress: ?LPVOID, dwSize: usize, dwFreeType: DWORD) void {
|
||||
assert(kernel32.VirtualFree(lpAddress, dwSize, dwFreeType) != 0);
|
||||
}
|
||||
|
||||
pub const VirtualProtectError = error{
|
||||
InvalidAddress,
|
||||
Unexpected,
|
||||
|
|
@ -1713,19 +1783,6 @@ pub fn VirtualProtectEx(handle: HANDLE, addr: ?LPVOID, size: SIZE_T, new_prot: D
|
|||
}
|
||||
}
|
||||
|
||||
pub const VirtualQueryError = error{Unexpected};
|
||||
|
||||
pub fn VirtualQuery(lpAddress: ?LPVOID, lpBuffer: PMEMORY_BASIC_INFORMATION, dwLength: SIZE_T) VirtualQueryError!SIZE_T {
|
||||
const rc = kernel32.VirtualQuery(lpAddress, lpBuffer, dwLength);
|
||||
if (rc == 0) {
|
||||
switch (GetLastError()) {
|
||||
else => |err| return unexpectedError(err),
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
pub const SetConsoleTextAttributeError = error{Unexpected};
|
||||
|
||||
pub fn SetConsoleTextAttribute(hConsoleOutput: HANDLE, wAttributes: WORD) SetConsoleTextAttributeError!void {
|
||||
|
|
@ -2088,7 +2145,7 @@ pub fn nanoSecondsToFileTime(ns: Io.Timestamp) FILETIME {
|
|||
/// Compares two WTF16 strings using the equivalent functionality of
|
||||
/// `RtlEqualUnicodeString` (with case insensitive comparison enabled).
|
||||
/// This function can be called on any target.
|
||||
pub fn eqlIgnoreCaseWTF16(a: []const u16, b: []const u16) bool {
|
||||
pub fn eqlIgnoreCaseWtf16(a: []const u16, b: []const u16) bool {
|
||||
if (@inComptime() or builtin.os.tag != .windows) {
|
||||
// This function compares the strings code unit by code unit (aka u16-to-u16),
|
||||
// so any length difference implies inequality. In other words, there's no possible
|
||||
|
|
@ -2165,19 +2222,19 @@ pub fn eqlIgnoreCaseWtf8(a: []const u8, b: []const u8) bool {
|
|||
|
||||
fn testEqlIgnoreCase(comptime expect_eql: bool, comptime a: []const u8, comptime b: []const u8) !void {
|
||||
try std.testing.expectEqual(expect_eql, eqlIgnoreCaseWtf8(a, b));
|
||||
try std.testing.expectEqual(expect_eql, eqlIgnoreCaseWTF16(
|
||||
try std.testing.expectEqual(expect_eql, eqlIgnoreCaseWtf16(
|
||||
std.unicode.utf8ToUtf16LeStringLiteral(a),
|
||||
std.unicode.utf8ToUtf16LeStringLiteral(b),
|
||||
));
|
||||
|
||||
try comptime std.testing.expect(expect_eql == eqlIgnoreCaseWtf8(a, b));
|
||||
try comptime std.testing.expect(expect_eql == eqlIgnoreCaseWTF16(
|
||||
try comptime std.testing.expect(expect_eql == eqlIgnoreCaseWtf16(
|
||||
std.unicode.utf8ToUtf16LeStringLiteral(a),
|
||||
std.unicode.utf8ToUtf16LeStringLiteral(b),
|
||||
));
|
||||
}
|
||||
|
||||
test "eqlIgnoreCaseWTF16/Wtf8" {
|
||||
test "eqlIgnoreCaseWtf16/Wtf8" {
|
||||
try testEqlIgnoreCase(true, "\x01 a B Λ ɐ", "\x01 A b λ Ɐ");
|
||||
// does not do case-insensitive comparison for codepoints >= U+10000
|
||||
try testEqlIgnoreCase(false, "𐓏", "𐓷");
|
||||
|
|
@ -2308,158 +2365,309 @@ pub const Wtf16ToPrefixedFileWError = error{
|
|||
/// - . and space are not stripped from the end of relative paths (potential TODO)
|
||||
pub fn wToPrefixedFileW(dir: ?HANDLE, path: [:0]const u16) Wtf16ToPrefixedFileWError!PathSpace {
|
||||
const nt_prefix = [_]u16{ '\\', '?', '?', '\\' };
|
||||
switch (getNamespacePrefix(u16, path)) {
|
||||
// TODO: Figure out a way to design an API that can avoid the copy for .nt,
|
||||
if (hasCommonNtPrefix(u16, path)) {
|
||||
// TODO: Figure out a way to design an API that can avoid the copy for NT,
|
||||
// since it is always returned fully unmodified.
|
||||
.nt, .verbatim => {
|
||||
var path_space: PathSpace = undefined;
|
||||
path_space.data[0..nt_prefix.len].* = nt_prefix;
|
||||
const len_after_prefix = path.len - nt_prefix.len;
|
||||
@memcpy(path_space.data[nt_prefix.len..][0..len_after_prefix], path[nt_prefix.len..]);
|
||||
path_space.len = path.len;
|
||||
path_space.data[path_space.len] = 0;
|
||||
return path_space;
|
||||
},
|
||||
.local_device, .fake_verbatim => {
|
||||
var path_space: PathSpace = undefined;
|
||||
const path_byte_len = ntdll.RtlGetFullPathName_U(
|
||||
path.ptr,
|
||||
path_space.data.len * 2,
|
||||
&path_space.data,
|
||||
null,
|
||||
);
|
||||
if (path_byte_len == 0) {
|
||||
// TODO: This may not be the right error
|
||||
return error.BadPathName;
|
||||
} else if (path_byte_len / 2 > path_space.data.len) {
|
||||
return error.NameTooLong;
|
||||
}
|
||||
path_space.len = path_byte_len / 2;
|
||||
// Both prefixes will be normalized but retained, so all
|
||||
// we need to do now is replace them with the NT prefix
|
||||
path_space.data[0..nt_prefix.len].* = nt_prefix;
|
||||
return path_space;
|
||||
},
|
||||
.none => {
|
||||
const path_type = getUnprefixedPathType(u16, path);
|
||||
var path_space: PathSpace = undefined;
|
||||
relative: {
|
||||
if (path_type == .relative) {
|
||||
// TODO: Handle special case device names like COM1, AUX, NUL, CONIN$, CONOUT$, etc.
|
||||
// See https://googleprojectzero.blogspot.com/2016/02/the-definitive-guide-on-win32-to-nt.html
|
||||
|
||||
// TODO: Potentially strip all trailing . and space characters from the
|
||||
// end of the path. This is something that both RtlDosPathNameToNtPathName_U
|
||||
// and RtlGetFullPathName_U do. Technically, trailing . and spaces
|
||||
// are allowed, but such paths may not interact well with Windows (i.e.
|
||||
// files with these paths can't be deleted from explorer.exe, etc).
|
||||
// This could be something that normalizePath may want to do.
|
||||
|
||||
@memcpy(path_space.data[0..path.len], path);
|
||||
// Try to normalize, but if we get too many parent directories,
|
||||
// then we need to start over and use RtlGetFullPathName_U instead.
|
||||
path_space.len = normalizePath(u16, path_space.data[0..path.len]) catch |err| switch (err) {
|
||||
error.TooManyParentDirs => break :relative,
|
||||
};
|
||||
var path_space: PathSpace = undefined;
|
||||
path_space.data[0..nt_prefix.len].* = nt_prefix;
|
||||
const len_after_prefix = path.len - nt_prefix.len;
|
||||
@memcpy(path_space.data[nt_prefix.len..][0..len_after_prefix], path[nt_prefix.len..]);
|
||||
path_space.len = path.len;
|
||||
path_space.data[path_space.len] = 0;
|
||||
return path_space;
|
||||
} else {
|
||||
const path_type = getWin32PathType(u16, path);
|
||||
var path_space: PathSpace = undefined;
|
||||
if (path_type == .local_device) {
|
||||
switch (getLocalDevicePathType(u16, path)) {
|
||||
.verbatim => {
|
||||
path_space.data[0..nt_prefix.len].* = nt_prefix;
|
||||
const len_after_prefix = path.len - nt_prefix.len;
|
||||
@memcpy(path_space.data[nt_prefix.len..][0..len_after_prefix], path[nt_prefix.len..]);
|
||||
path_space.len = path.len;
|
||||
path_space.data[path_space.len] = 0;
|
||||
return path_space;
|
||||
}
|
||||
},
|
||||
.local_device, .fake_verbatim => {
|
||||
const path_byte_len = ntdll.RtlGetFullPathName_U(
|
||||
path.ptr,
|
||||
path_space.data.len * 2,
|
||||
&path_space.data,
|
||||
null,
|
||||
);
|
||||
if (path_byte_len == 0) {
|
||||
// TODO: This may not be the right error
|
||||
return error.BadPathName;
|
||||
} else if (path_byte_len / 2 > path_space.data.len) {
|
||||
return error.NameTooLong;
|
||||
}
|
||||
path_space.len = path_byte_len / 2;
|
||||
// Both prefixes will be normalized but retained, so all
|
||||
// we need to do now is replace them with the NT prefix
|
||||
path_space.data[0..nt_prefix.len].* = nt_prefix;
|
||||
return path_space;
|
||||
},
|
||||
}
|
||||
// We now know we are going to return an absolute NT path, so
|
||||
// we can unconditionally prefix it with the NT prefix.
|
||||
path_space.data[0..nt_prefix.len].* = nt_prefix;
|
||||
if (path_type == .root_local_device) {
|
||||
// `\\.` and `\\?` always get converted to `\??\` exactly, so
|
||||
// we can just stop here
|
||||
path_space.len = nt_prefix.len;
|
||||
}
|
||||
relative: {
|
||||
if (path_type == .relative) {
|
||||
// TODO: Handle special case device names like COM1, AUX, NUL, CONIN$, CONOUT$, etc.
|
||||
// See https://googleprojectzero.blogspot.com/2016/02/the-definitive-guide-on-win32-to-nt.html
|
||||
|
||||
// TODO: Potentially strip all trailing . and space characters from the
|
||||
// end of the path. This is something that both RtlDosPathNameToNtPathName_U
|
||||
// and RtlGetFullPathName_U do. Technically, trailing . and spaces
|
||||
// are allowed, but such paths may not interact well with Windows (i.e.
|
||||
// files with these paths can't be deleted from explorer.exe, etc).
|
||||
// This could be something that normalizePath may want to do.
|
||||
|
||||
@memcpy(path_space.data[0..path.len], path);
|
||||
// Try to normalize, but if we get too many parent directories,
|
||||
// then we need to start over and use RtlGetFullPathName_U instead.
|
||||
path_space.len = normalizePath(u16, path_space.data[0..path.len]) catch |err| switch (err) {
|
||||
error.TooManyParentDirs => break :relative,
|
||||
};
|
||||
path_space.data[path_space.len] = 0;
|
||||
return path_space;
|
||||
}
|
||||
const path_buf_offset = switch (path_type) {
|
||||
// UNC paths will always start with `\\`. However, we want to
|
||||
// end up with something like `\??\UNC\server\share`, so to get
|
||||
// RtlGetFullPathName to write into the spot we want the `server`
|
||||
// part to end up, we need to provide an offset such that
|
||||
// the `\\` part gets written where the `C\` of `UNC\` will be
|
||||
// in the final NT path.
|
||||
.unc_absolute => nt_prefix.len + 2,
|
||||
else => nt_prefix.len,
|
||||
}
|
||||
// We now know we are going to return an absolute NT path, so
|
||||
// we can unconditionally prefix it with the NT prefix.
|
||||
path_space.data[0..nt_prefix.len].* = nt_prefix;
|
||||
if (path_type == .root_local_device) {
|
||||
// `\\.` and `\\?` always get converted to `\??\` exactly, so
|
||||
// we can just stop here
|
||||
path_space.len = nt_prefix.len;
|
||||
path_space.data[path_space.len] = 0;
|
||||
return path_space;
|
||||
}
|
||||
const path_buf_offset = switch (path_type) {
|
||||
// UNC paths will always start with `\\`. However, we want to
|
||||
// end up with something like `\??\UNC\server\share`, so to get
|
||||
// RtlGetFullPathName to write into the spot we want the `server`
|
||||
// part to end up, we need to provide an offset such that
|
||||
// the `\\` part gets written where the `C\` of `UNC\` will be
|
||||
// in the final NT path.
|
||||
.unc_absolute => nt_prefix.len + 2,
|
||||
else => nt_prefix.len,
|
||||
};
|
||||
const buf_len: u32 = @intCast(path_space.data.len - path_buf_offset);
|
||||
const path_to_get: [:0]const u16 = path_to_get: {
|
||||
// If dir is null, then we don't need to bother with GetFinalPathNameByHandle because
|
||||
// RtlGetFullPathName_U will resolve relative paths against the CWD for us.
|
||||
if (path_type != .relative or dir == null) {
|
||||
break :path_to_get path;
|
||||
}
|
||||
// We can also skip GetFinalPathNameByHandle if the handle matches
|
||||
// the handle returned by fs.cwd()
|
||||
if (dir.? == std.fs.cwd().fd) {
|
||||
break :path_to_get path;
|
||||
}
|
||||
// At this point, we know we have a relative path that had too many
|
||||
// `..` components to be resolved by normalizePath, so we need to
|
||||
// convert it into an absolute path and let RtlGetFullPathName_U
|
||||
// canonicalize it. We do this by getting the path of the `dir`
|
||||
// and appending the relative path to it.
|
||||
var dir_path_buf: [PATH_MAX_WIDE:0]u16 = undefined;
|
||||
const dir_path = GetFinalPathNameByHandle(dir.?, .{}, &dir_path_buf) catch |err| switch (err) {
|
||||
// This mapping is not correct; it is actually expected
|
||||
// that calling GetFinalPathNameByHandle might return
|
||||
// error.UnrecognizedVolume, and in fact has been observed
|
||||
// in the wild. The problem is that wToPrefixedFileW was
|
||||
// never intended to make *any* OS syscall APIs. It's only
|
||||
// supposed to convert a string to one that is eligible to
|
||||
// be used in the ntdll syscalls.
|
||||
//
|
||||
// To solve this, this function needs to no longer call
|
||||
// GetFinalPathNameByHandle under any conditions, or the
|
||||
// calling function needs to get reworked to not need to
|
||||
// call this function.
|
||||
//
|
||||
// This may involve making breaking API changes.
|
||||
error.UnrecognizedVolume => return error.Unexpected,
|
||||
else => |e| return e,
|
||||
};
|
||||
const buf_len: u32 = @intCast(path_space.data.len - path_buf_offset);
|
||||
const path_to_get: [:0]const u16 = path_to_get: {
|
||||
// If dir is null, then we don't need to bother with GetFinalPathNameByHandle because
|
||||
// RtlGetFullPathName_U will resolve relative paths against the CWD for us.
|
||||
if (path_type != .relative or dir == null) {
|
||||
break :path_to_get path;
|
||||
}
|
||||
// We can also skip GetFinalPathNameByHandle if the handle matches
|
||||
// the handle returned by fs.cwd()
|
||||
if (dir.? == std.fs.cwd().fd) {
|
||||
break :path_to_get path;
|
||||
}
|
||||
// At this point, we know we have a relative path that had too many
|
||||
// `..` components to be resolved by normalizePath, so we need to
|
||||
// convert it into an absolute path and let RtlGetFullPathName_U
|
||||
// canonicalize it. We do this by getting the path of the `dir`
|
||||
// and appending the relative path to it.
|
||||
var dir_path_buf: [PATH_MAX_WIDE:0]u16 = undefined;
|
||||
const dir_path = GetFinalPathNameByHandle(dir.?, .{}, &dir_path_buf) catch |err| switch (err) {
|
||||
// This mapping is not correct; it is actually expected
|
||||
// that calling GetFinalPathNameByHandle might return
|
||||
// error.UnrecognizedVolume, and in fact has been observed
|
||||
// in the wild. The problem is that wToPrefixedFileW was
|
||||
// never intended to make *any* OS syscall APIs. It's only
|
||||
// supposed to convert a string to one that is eligible to
|
||||
// be used in the ntdll syscalls.
|
||||
//
|
||||
// To solve this, this function needs to no longer call
|
||||
// GetFinalPathNameByHandle under any conditions, or the
|
||||
// calling function needs to get reworked to not need to
|
||||
// call this function.
|
||||
//
|
||||
// This may involve making breaking API changes.
|
||||
error.UnrecognizedVolume => return error.Unexpected,
|
||||
else => |e| return e,
|
||||
};
|
||||
if (dir_path.len + 1 + path.len > PATH_MAX_WIDE) {
|
||||
return error.NameTooLong;
|
||||
}
|
||||
// We don't have to worry about potentially doubling up path separators
|
||||
// here since RtlGetFullPathName_U will handle canonicalizing it.
|
||||
dir_path_buf[dir_path.len] = '\\';
|
||||
@memcpy(dir_path_buf[dir_path.len + 1 ..][0..path.len], path);
|
||||
const full_len = dir_path.len + 1 + path.len;
|
||||
dir_path_buf[full_len] = 0;
|
||||
break :path_to_get dir_path_buf[0..full_len :0];
|
||||
};
|
||||
const path_byte_len = ntdll.RtlGetFullPathName_U(
|
||||
path_to_get.ptr,
|
||||
buf_len * 2,
|
||||
path_space.data[path_buf_offset..].ptr,
|
||||
null,
|
||||
);
|
||||
if (path_byte_len == 0) {
|
||||
// TODO: This may not be the right error
|
||||
return error.BadPathName;
|
||||
} else if (path_byte_len / 2 > buf_len) {
|
||||
if (dir_path.len + 1 + path.len > PATH_MAX_WIDE) {
|
||||
return error.NameTooLong;
|
||||
}
|
||||
path_space.len = path_buf_offset + (path_byte_len / 2);
|
||||
if (path_type == .unc_absolute) {
|
||||
// Now add in the UNC, the `C` should overwrite the first `\` of the
|
||||
// FullPathName, ultimately resulting in `\??\UNC\<the rest of the path>`
|
||||
std.debug.assert(path_space.data[path_buf_offset] == '\\');
|
||||
std.debug.assert(path_space.data[path_buf_offset + 1] == '\\');
|
||||
const unc = [_]u16{ 'U', 'N', 'C' };
|
||||
path_space.data[nt_prefix.len..][0..unc.len].* = unc;
|
||||
}
|
||||
return path_space;
|
||||
},
|
||||
// We don't have to worry about potentially doubling up path separators
|
||||
// here since RtlGetFullPathName_U will handle canonicalizing it.
|
||||
dir_path_buf[dir_path.len] = '\\';
|
||||
@memcpy(dir_path_buf[dir_path.len + 1 ..][0..path.len], path);
|
||||
const full_len = dir_path.len + 1 + path.len;
|
||||
dir_path_buf[full_len] = 0;
|
||||
break :path_to_get dir_path_buf[0..full_len :0];
|
||||
};
|
||||
const path_byte_len = ntdll.RtlGetFullPathName_U(
|
||||
path_to_get.ptr,
|
||||
buf_len * 2,
|
||||
path_space.data[path_buf_offset..].ptr,
|
||||
null,
|
||||
);
|
||||
if (path_byte_len == 0) {
|
||||
// TODO: This may not be the right error
|
||||
return error.BadPathName;
|
||||
} else if (path_byte_len / 2 > buf_len) {
|
||||
return error.NameTooLong;
|
||||
}
|
||||
path_space.len = path_buf_offset + (path_byte_len / 2);
|
||||
if (path_type == .unc_absolute) {
|
||||
// Now add in the UNC, the `C` should overwrite the first `\` of the
|
||||
// FullPathName, ultimately resulting in `\??\UNC\<the rest of the path>`
|
||||
std.debug.assert(path_space.data[path_buf_offset] == '\\');
|
||||
std.debug.assert(path_space.data[path_buf_offset + 1] == '\\');
|
||||
const unc = [_]u16{ 'U', 'N', 'C' };
|
||||
path_space.data[nt_prefix.len..][0..unc.len].* = unc;
|
||||
}
|
||||
return path_space;
|
||||
}
|
||||
}
|
||||
|
||||
pub const NamespacePrefix = enum {
|
||||
none,
|
||||
/// Similar to `RTL_PATH_TYPE`, but without the `UNKNOWN` path type.
|
||||
pub const Win32PathType = enum {
|
||||
/// `\\server\share\foo`
|
||||
unc_absolute,
|
||||
/// `C:\foo`
|
||||
drive_absolute,
|
||||
/// `C:foo`
|
||||
drive_relative,
|
||||
/// `\foo`
|
||||
rooted,
|
||||
/// `foo`
|
||||
relative,
|
||||
/// `\\.\foo`, `\\?\foo`
|
||||
local_device,
|
||||
/// `\\.`, `\\?`
|
||||
root_local_device,
|
||||
};
|
||||
|
||||
/// Get the path type of a Win32 namespace path.
|
||||
/// Similar to `RtlDetermineDosPathNameType_U`.
|
||||
/// If `T` is `u16`, then `path` should be encoded as WTF-16LE.
|
||||
pub fn getWin32PathType(comptime T: type, path: []const T) Win32PathType {
|
||||
if (path.len < 1) return .relative;
|
||||
|
||||
const windows_path = std.fs.path.PathType.windows;
|
||||
if (windows_path.isSep(T, path[0])) {
|
||||
// \x
|
||||
if (path.len < 2 or !windows_path.isSep(T, path[1])) return .rooted;
|
||||
// \\. or \\?
|
||||
if (path.len > 2 and (path[2] == mem.nativeToLittle(T, '.') or path[2] == mem.nativeToLittle(T, '?'))) {
|
||||
// exactly \\. or \\? with nothing trailing
|
||||
if (path.len == 3) return .root_local_device;
|
||||
// \\.\x or \\?\x
|
||||
if (windows_path.isSep(T, path[3])) return .local_device;
|
||||
}
|
||||
// \\x
|
||||
return .unc_absolute;
|
||||
} else {
|
||||
// Some choice has to be made about how non-ASCII code points as drive-letters are handled, since
|
||||
// path[0] is a different size for WTF-16 vs WTF-8, leading to a potential mismatch in classification
|
||||
// for a WTF-8 path and its WTF-16 equivalent. For example, `€:\` encoded in WTF-16 is three code
|
||||
// units `<0x20AC>:\` whereas `€:\` encoded as WTF-8 is 6 code units `<0xE2><0x82><0xAC>:\` so
|
||||
// checking path[0], path[1] and path[2] would not behave the same between WTF-8/WTF-16.
|
||||
//
|
||||
// `RtlDetermineDosPathNameType_U` exclusively deals with WTF-16 and considers
|
||||
// `€:\` a drive-absolute path, but code points that take two WTF-16 code units to encode get
|
||||
// classified as a relative path (e.g. with U+20000 as the drive-letter that'd be encoded
|
||||
// in WTF-16 as `<0xD840><0xDC00>:\` and be considered a relative path).
|
||||
//
|
||||
// The choice made here is to emulate the behavior of `RtlDetermineDosPathNameType_U` for both
|
||||
// WTF-16 and WTF-8. This is because, while unlikely and not supported by the Disk Manager GUI,
|
||||
// drive letters are not actually restricted to A-Z. Using `SetVolumeMountPointW` will allow you
|
||||
// to set any byte value as a drive letter, and going through `IOCTL_MOUNTMGR_CREATE_POINT` will
|
||||
// allow you to set any WTF-16 code unit as a drive letter.
|
||||
//
|
||||
// Non-A-Z drive letters don't interact well with most of Windows, but certain things do work, e.g.
|
||||
// `cd /D €:\` will work, filesystem functions still work, etc.
|
||||
//
|
||||
// The unfortunate part of this is that this makes handling WTF-8 more complicated as we can't
|
||||
// just check path[0], path[1], path[2].
|
||||
const colon_i: usize = switch (T) {
|
||||
u8 => i: {
|
||||
const code_point_len = std.unicode.utf8ByteSequenceLength(path[0]) catch return .relative;
|
||||
// Conveniently, 4-byte sequences in WTF-8 have the same starting code point
|
||||
// as 2-code-unit sequences in WTF-16.
|
||||
if (code_point_len > 3) return .relative;
|
||||
break :i code_point_len;
|
||||
},
|
||||
u16 => 1,
|
||||
else => @compileError("unsupported type: " ++ @typeName(T)),
|
||||
};
|
||||
// x
|
||||
if (path.len < colon_i + 1 or path[colon_i] != mem.nativeToLittle(T, ':')) return .relative;
|
||||
// x:\
|
||||
if (path.len > colon_i + 1 and windows_path.isSep(T, path[colon_i + 1])) return .drive_absolute;
|
||||
// x:
|
||||
return .drive_relative;
|
||||
}
|
||||
}
|
||||
|
||||
test getWin32PathType {
|
||||
try std.testing.expectEqual(.relative, getWin32PathType(u8, ""));
|
||||
try std.testing.expectEqual(.relative, getWin32PathType(u8, "x"));
|
||||
try std.testing.expectEqual(.relative, getWin32PathType(u8, "x\\"));
|
||||
|
||||
try std.testing.expectEqual(.root_local_device, getWin32PathType(u8, "//."));
|
||||
try std.testing.expectEqual(.root_local_device, getWin32PathType(u8, "/\\?"));
|
||||
try std.testing.expectEqual(.root_local_device, getWin32PathType(u8, "\\\\?"));
|
||||
|
||||
try std.testing.expectEqual(.local_device, getWin32PathType(u8, "//./x"));
|
||||
try std.testing.expectEqual(.local_device, getWin32PathType(u8, "/\\?\\x"));
|
||||
try std.testing.expectEqual(.local_device, getWin32PathType(u8, "\\\\?\\x"));
|
||||
// local device paths require a path separator after the root, otherwise it is considered a UNC path
|
||||
try std.testing.expectEqual(.unc_absolute, getWin32PathType(u8, "\\\\?x"));
|
||||
try std.testing.expectEqual(.unc_absolute, getWin32PathType(u8, "//.x"));
|
||||
|
||||
try std.testing.expectEqual(.unc_absolute, getWin32PathType(u8, "//"));
|
||||
try std.testing.expectEqual(.unc_absolute, getWin32PathType(u8, "\\\\x"));
|
||||
try std.testing.expectEqual(.unc_absolute, getWin32PathType(u8, "//x"));
|
||||
|
||||
try std.testing.expectEqual(.rooted, getWin32PathType(u8, "\\x"));
|
||||
try std.testing.expectEqual(.rooted, getWin32PathType(u8, "/"));
|
||||
|
||||
try std.testing.expectEqual(.drive_relative, getWin32PathType(u8, "x:"));
|
||||
try std.testing.expectEqual(.drive_relative, getWin32PathType(u8, "x:abc"));
|
||||
try std.testing.expectEqual(.drive_relative, getWin32PathType(u8, "x:a/b/c"));
|
||||
|
||||
try std.testing.expectEqual(.drive_absolute, getWin32PathType(u8, "x:\\"));
|
||||
try std.testing.expectEqual(.drive_absolute, getWin32PathType(u8, "x:\\abc"));
|
||||
try std.testing.expectEqual(.drive_absolute, getWin32PathType(u8, "x:/a/b/c"));
|
||||
|
||||
// Non-ASCII code point that is encoded as one WTF-16 code unit is considered a valid drive letter
|
||||
try std.testing.expectEqual(.drive_absolute, getWin32PathType(u8, "€:\\"));
|
||||
try std.testing.expectEqual(.drive_absolute, getWin32PathType(u16, std.unicode.wtf8ToWtf16LeStringLiteral("€:\\")));
|
||||
try std.testing.expectEqual(.drive_relative, getWin32PathType(u8, "€:"));
|
||||
try std.testing.expectEqual(.drive_relative, getWin32PathType(u16, std.unicode.wtf8ToWtf16LeStringLiteral("€:")));
|
||||
// But code points that are encoded as two WTF-16 code units are not
|
||||
try std.testing.expectEqual(.relative, getWin32PathType(u8, "\u{10000}:\\"));
|
||||
try std.testing.expectEqual(.relative, getWin32PathType(u16, std.unicode.wtf8ToWtf16LeStringLiteral("\u{10000}:\\")));
|
||||
}
|
||||
|
||||
/// Returns true if the path starts with `\??\`, which is indicative of an NT path
|
||||
/// but is not enough to fully distinguish between NT paths and Win32 paths, as
|
||||
/// `\??\` is not actually a distinct prefix but rather the path to a special virtual
|
||||
/// folder in the Object Manager.
|
||||
///
|
||||
/// For example, `\Device\HarddiskVolume2` and `\DosDevices\C:` are also NT paths but
|
||||
/// cannot be distinguished as such by their prefix.
|
||||
///
|
||||
/// So, inferring whether a path is an NT path or a Win32 path is usually a mistake;
|
||||
/// that information should instead be known ahead-of-time.
|
||||
///
|
||||
/// If `T` is `u16`, then `path` should be encoded as WTF-16LE.
|
||||
pub fn hasCommonNtPrefix(comptime T: type, path: []const T) bool {
|
||||
// Must be exactly \??\, forward slashes are not allowed
|
||||
const expected_wtf8_prefix = "\\??\\";
|
||||
const expected_prefix = switch (T) {
|
||||
u8 => expected_wtf8_prefix,
|
||||
u16 => std.unicode.wtf8ToWtf16LeStringLiteral(expected_wtf8_prefix),
|
||||
else => @compileError("unsupported type: " ++ @typeName(T)),
|
||||
};
|
||||
return mem.startsWith(T, path, expected_prefix);
|
||||
}
|
||||
|
||||
const LocalDevicePathType = enum {
|
||||
/// `\\.\` (path separators can be `\` or `/`)
|
||||
local_device,
|
||||
/// `\\?\`
|
||||
|
|
@ -2472,107 +2680,24 @@ pub const NamespacePrefix = enum {
|
|||
/// it will become `\??\C:\foo` [it will be canonicalized and the //?/ won't
|
||||
/// be treated as part of the final path])
|
||||
fake_verbatim,
|
||||
/// `\??\`
|
||||
nt,
|
||||
};
|
||||
|
||||
/// If `T` is `u16`, then `path` should be encoded as WTF-16LE.
|
||||
pub fn getNamespacePrefix(comptime T: type, path: []const T) NamespacePrefix {
|
||||
if (path.len < 4) return .none;
|
||||
var all_backslash = switch (mem.littleToNative(T, path[0])) {
|
||||
'\\' => true,
|
||||
'/' => false,
|
||||
else => return .none,
|
||||
};
|
||||
all_backslash = all_backslash and switch (mem.littleToNative(T, path[3])) {
|
||||
'\\' => true,
|
||||
'/' => false,
|
||||
else => return .none,
|
||||
};
|
||||
switch (mem.littleToNative(T, path[1])) {
|
||||
'?' => if (mem.littleToNative(T, path[2]) == '?' and all_backslash) return .nt else return .none,
|
||||
'\\' => {},
|
||||
'/' => all_backslash = false,
|
||||
else => return .none,
|
||||
}
|
||||
return switch (mem.littleToNative(T, path[2])) {
|
||||
'?' => if (all_backslash) .verbatim else .fake_verbatim,
|
||||
'.' => .local_device,
|
||||
else => .none,
|
||||
};
|
||||
}
|
||||
|
||||
test getNamespacePrefix {
|
||||
try std.testing.expectEqual(NamespacePrefix.none, getNamespacePrefix(u8, ""));
|
||||
try std.testing.expectEqual(NamespacePrefix.nt, getNamespacePrefix(u8, "\\??\\"));
|
||||
try std.testing.expectEqual(NamespacePrefix.none, getNamespacePrefix(u8, "/??/"));
|
||||
try std.testing.expectEqual(NamespacePrefix.none, getNamespacePrefix(u8, "/??\\"));
|
||||
try std.testing.expectEqual(NamespacePrefix.none, getNamespacePrefix(u8, "\\?\\\\"));
|
||||
try std.testing.expectEqual(NamespacePrefix.local_device, getNamespacePrefix(u8, "\\\\.\\"));
|
||||
try std.testing.expectEqual(NamespacePrefix.local_device, getNamespacePrefix(u8, "\\\\./"));
|
||||
try std.testing.expectEqual(NamespacePrefix.local_device, getNamespacePrefix(u8, "/\\./"));
|
||||
try std.testing.expectEqual(NamespacePrefix.local_device, getNamespacePrefix(u8, "//./"));
|
||||
try std.testing.expectEqual(NamespacePrefix.none, getNamespacePrefix(u8, "/.//"));
|
||||
try std.testing.expectEqual(NamespacePrefix.verbatim, getNamespacePrefix(u8, "\\\\?\\"));
|
||||
try std.testing.expectEqual(NamespacePrefix.fake_verbatim, getNamespacePrefix(u8, "\\/?\\"));
|
||||
try std.testing.expectEqual(NamespacePrefix.fake_verbatim, getNamespacePrefix(u8, "\\/?/"));
|
||||
try std.testing.expectEqual(NamespacePrefix.fake_verbatim, getNamespacePrefix(u8, "//?/"));
|
||||
}
|
||||
|
||||
pub const UnprefixedPathType = enum {
|
||||
unc_absolute,
|
||||
drive_absolute,
|
||||
drive_relative,
|
||||
rooted,
|
||||
relative,
|
||||
root_local_device,
|
||||
};
|
||||
|
||||
/// Get the path type of a path that is known to not have any namespace prefixes
|
||||
/// (`\\?\`, `\\.\`, `\??\`).
|
||||
/// If `T` is `u16`, then `path` should be encoded as WTF-16LE.
|
||||
pub fn getUnprefixedPathType(comptime T: type, path: []const T) UnprefixedPathType {
|
||||
if (path.len < 1) return .relative;
|
||||
|
||||
/// Only relevant for Win32 -> NT path conversion.
|
||||
/// Asserts `path` is of type `Win32PathType.local_device`.
|
||||
fn getLocalDevicePathType(comptime T: type, path: []const T) LocalDevicePathType {
|
||||
if (std.debug.runtime_safety) {
|
||||
std.debug.assert(getNamespacePrefix(T, path) == .none);
|
||||
std.debug.assert(getWin32PathType(T, path) == .local_device);
|
||||
}
|
||||
|
||||
const windows_path = std.fs.path.PathType.windows;
|
||||
if (windows_path.isSep(T, mem.littleToNative(T, path[0]))) {
|
||||
// \x
|
||||
if (path.len < 2 or !windows_path.isSep(T, mem.littleToNative(T, path[1]))) return .rooted;
|
||||
// exactly \\. or \\? with nothing trailing
|
||||
if (path.len == 3 and (mem.littleToNative(T, path[2]) == '.' or mem.littleToNative(T, path[2]) == '?')) return .root_local_device;
|
||||
// \\x
|
||||
return .unc_absolute;
|
||||
} else {
|
||||
// x
|
||||
if (path.len < 2 or mem.littleToNative(T, path[1]) != ':') return .relative;
|
||||
// x:\
|
||||
if (path.len > 2 and windows_path.isSep(T, mem.littleToNative(T, path[2]))) return .drive_absolute;
|
||||
// x:
|
||||
return .drive_relative;
|
||||
}
|
||||
}
|
||||
|
||||
test getUnprefixedPathType {
|
||||
try std.testing.expectEqual(UnprefixedPathType.relative, getUnprefixedPathType(u8, ""));
|
||||
try std.testing.expectEqual(UnprefixedPathType.relative, getUnprefixedPathType(u8, "x"));
|
||||
try std.testing.expectEqual(UnprefixedPathType.relative, getUnprefixedPathType(u8, "x\\"));
|
||||
try std.testing.expectEqual(UnprefixedPathType.root_local_device, getUnprefixedPathType(u8, "//."));
|
||||
try std.testing.expectEqual(UnprefixedPathType.root_local_device, getUnprefixedPathType(u8, "/\\?"));
|
||||
try std.testing.expectEqual(UnprefixedPathType.root_local_device, getUnprefixedPathType(u8, "\\\\?"));
|
||||
try std.testing.expectEqual(UnprefixedPathType.unc_absolute, getUnprefixedPathType(u8, "\\\\x"));
|
||||
try std.testing.expectEqual(UnprefixedPathType.unc_absolute, getUnprefixedPathType(u8, "//x"));
|
||||
try std.testing.expectEqual(UnprefixedPathType.rooted, getUnprefixedPathType(u8, "\\x"));
|
||||
try std.testing.expectEqual(UnprefixedPathType.rooted, getUnprefixedPathType(u8, "/"));
|
||||
try std.testing.expectEqual(UnprefixedPathType.drive_relative, getUnprefixedPathType(u8, "x:"));
|
||||
try std.testing.expectEqual(UnprefixedPathType.drive_relative, getUnprefixedPathType(u8, "x:abc"));
|
||||
try std.testing.expectEqual(UnprefixedPathType.drive_relative, getUnprefixedPathType(u8, "x:a/b/c"));
|
||||
try std.testing.expectEqual(UnprefixedPathType.drive_absolute, getUnprefixedPathType(u8, "x:\\"));
|
||||
try std.testing.expectEqual(UnprefixedPathType.drive_absolute, getUnprefixedPathType(u8, "x:\\abc"));
|
||||
try std.testing.expectEqual(UnprefixedPathType.drive_absolute, getUnprefixedPathType(u8, "x:/a/b/c"));
|
||||
const backslash = mem.nativeToLittle(T, '\\');
|
||||
const all_backslash = path[0] == backslash and
|
||||
path[1] == backslash and
|
||||
path[3] == backslash;
|
||||
return switch (path[2]) {
|
||||
mem.nativeToLittle(T, '?') => if (all_backslash) .verbatim else .fake_verbatim,
|
||||
mem.nativeToLittle(T, '.') => .local_device,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
/// Similar to `RtlNtPathNameToDosPathName` but does not do any heap allocation.
|
||||
|
|
@ -2589,30 +2714,25 @@ test getUnprefixedPathType {
|
|||
/// Supports in-place modification (`path` and `out` may refer to the same slice).
|
||||
pub fn ntToWin32Namespace(path: []const u16, out: []u16) error{ NameTooLong, NotNtPath }![]u16 {
|
||||
if (path.len > PATH_MAX_WIDE) return error.NameTooLong;
|
||||
if (!hasCommonNtPrefix(u16, path)) return error.NotNtPath;
|
||||
|
||||
const namespace_prefix = getNamespacePrefix(u16, path);
|
||||
switch (namespace_prefix) {
|
||||
.nt => {
|
||||
var dest_index: usize = 0;
|
||||
var after_prefix = path[4..]; // after the `\??\`
|
||||
// The prefix \??\UNC\ means this is a UNC path, in which case the
|
||||
// `\??\UNC\` should be replaced by `\\` (two backslashes)
|
||||
const is_unc = after_prefix.len >= 4 and
|
||||
eqlIgnoreCaseWTF16(after_prefix[0..3], std.unicode.utf8ToUtf16LeStringLiteral("UNC")) and
|
||||
std.fs.path.PathType.windows.isSep(u16, std.mem.littleToNative(u16, after_prefix[3]));
|
||||
const win32_len = path.len - @as(usize, if (is_unc) 6 else 4);
|
||||
if (out.len < win32_len) return error.NameTooLong;
|
||||
if (is_unc) {
|
||||
out[0] = comptime std.mem.nativeToLittle(u16, '\\');
|
||||
dest_index += 1;
|
||||
// We want to include the last `\` of `\??\UNC\`
|
||||
after_prefix = path[7..];
|
||||
}
|
||||
@memmove(out[dest_index..][0..after_prefix.len], after_prefix);
|
||||
return out[0..win32_len];
|
||||
},
|
||||
else => return error.NotNtPath,
|
||||
var dest_index: usize = 0;
|
||||
var after_prefix = path[4..]; // after the `\??\`
|
||||
// The prefix \??\UNC\ means this is a UNC path, in which case the
|
||||
// `\??\UNC\` should be replaced by `\\` (two backslashes)
|
||||
const is_unc = after_prefix.len >= 4 and
|
||||
eqlIgnoreCaseWtf16(after_prefix[0..3], std.unicode.utf8ToUtf16LeStringLiteral("UNC")) and
|
||||
std.fs.path.PathType.windows.isSep(u16, after_prefix[3]);
|
||||
const win32_len = path.len - @as(usize, if (is_unc) 6 else 4);
|
||||
if (out.len < win32_len) return error.NameTooLong;
|
||||
if (is_unc) {
|
||||
out[0] = comptime std.mem.nativeToLittle(u16, '\\');
|
||||
dest_index += 1;
|
||||
// We want to include the last `\` of `\??\UNC\`
|
||||
after_prefix = path[7..];
|
||||
}
|
||||
@memmove(out[dest_index..][0..after_prefix.len], after_prefix);
|
||||
return out[0..win32_len];
|
||||
}
|
||||
|
||||
test ntToWin32Namespace {
|
||||
|
|
@ -2628,16 +2748,6 @@ test ntToWin32Namespace {
|
|||
try std.testing.expectError(error.NameTooLong, ntToWin32Namespace(L("\\??\\C:\\test"), &too_small_buf));
|
||||
}
|
||||
|
||||
fn getFullPathNameW(path: [*:0]const u16, out: []u16) !usize {
|
||||
const result = kernel32.GetFullPathNameW(path, @as(u32, @intCast(out.len)), out.ptr, null);
|
||||
if (result == 0) {
|
||||
switch (GetLastError()) {
|
||||
else => |err| return unexpectedError(err),
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
inline fn MAKELANGID(p: c_ushort, s: c_ushort) LANGID {
|
||||
return (s << 10) | p;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -86,34 +86,11 @@ pub extern "kernel32" fn CreateNamedPipeW(
|
|||
lpSecurityAttributes: ?*const SECURITY_ATTRIBUTES,
|
||||
) callconv(.winapi) HANDLE;
|
||||
|
||||
pub extern "kernel32" fn FindFirstFileW(
|
||||
lpFileName: LPCWSTR,
|
||||
lpFindFileData: *WIN32_FIND_DATAW,
|
||||
) callconv(.winapi) HANDLE;
|
||||
|
||||
pub extern "kernel32" fn FindClose(
|
||||
hFindFile: HANDLE,
|
||||
) callconv(.winapi) BOOL;
|
||||
|
||||
// TODO: Wrapper around RtlGetFullPathName_UEx
|
||||
pub extern "kernel32" fn GetFullPathNameW(
|
||||
lpFileName: LPCWSTR,
|
||||
nBufferLength: DWORD,
|
||||
lpBuffer: LPWSTR,
|
||||
lpFilePart: ?*?LPWSTR,
|
||||
) callconv(.winapi) DWORD;
|
||||
|
||||
// TODO: Matches `STD_*_HANDLE` to peb().ProcessParameters.Standard*
|
||||
pub extern "kernel32" fn GetStdHandle(
|
||||
nStdHandle: DWORD,
|
||||
) callconv(.winapi) ?HANDLE;
|
||||
|
||||
pub extern "kernel32" fn MoveFileExW(
|
||||
lpExistingFileName: LPCWSTR,
|
||||
lpNewFileName: LPCWSTR,
|
||||
dwFlags: DWORD,
|
||||
) callconv(.winapi) BOOL;
|
||||
|
||||
// TODO: Wrapper around NtSetInformationFile + `FILE_POSITION_INFORMATION`.
|
||||
// `FILE_STANDARD_INFORMATION` is also used if dwMoveMethod is `FILE_END`
|
||||
pub extern "kernel32" fn SetFilePointerEx(
|
||||
|
|
@ -162,11 +139,6 @@ pub extern "kernel32" fn GetCurrentDirectoryW(
|
|||
lpBuffer: ?[*]WCHAR,
|
||||
) callconv(.winapi) DWORD;
|
||||
|
||||
// TODO: RtlDosPathNameToNtPathNameU_WithStatus + NtQueryAttributesFile.
|
||||
pub extern "kernel32" fn GetFileAttributesW(
|
||||
lpFileName: LPCWSTR,
|
||||
) callconv(.winapi) DWORD;
|
||||
|
||||
pub extern "kernel32" fn ReadFile(
|
||||
hFile: HANDLE,
|
||||
lpBuffer: LPVOID,
|
||||
|
|
@ -182,14 +154,6 @@ pub extern "kernel32" fn GetSystemDirectoryW(
|
|||
|
||||
// I/O - Kernel Objects
|
||||
|
||||
// TODO: Wrapper around NtCreateEvent.
|
||||
pub extern "kernel32" fn CreateEventExW(
|
||||
lpEventAttributes: ?*SECURITY_ATTRIBUTES,
|
||||
lpName: ?LPCWSTR,
|
||||
dwFlags: DWORD,
|
||||
dwDesiredAccess: DWORD,
|
||||
) callconv(.winapi) ?HANDLE;
|
||||
|
||||
// TODO: Wrapper around GetStdHandle + NtDuplicateObject.
|
||||
pub extern "kernel32" fn DuplicateHandle(
|
||||
hSourceProcessHandle: HANDLE,
|
||||
|
|
@ -318,9 +282,6 @@ pub extern "kernel32" fn GetExitCodeProcess(
|
|||
lpExitCode: *DWORD,
|
||||
) callconv(.winapi) BOOL;
|
||||
|
||||
// TODO: Already a wrapper for this, see `windows.GetCurrentProcess`.
|
||||
pub extern "kernel32" fn GetCurrentProcess() callconv(.winapi) HANDLE;
|
||||
|
||||
// TODO: Wrapper around RtlSetEnvironmentVar.
|
||||
pub extern "kernel32" fn SetEnvironmentVariableW(
|
||||
lpName: LPCWSTR,
|
||||
|
|
@ -465,29 +426,6 @@ pub extern "kernel32" fn HeapValidate(
|
|||
lpMem: ?*const anyopaque,
|
||||
) callconv(.winapi) BOOL;
|
||||
|
||||
// TODO: Wrapper around NtAllocateVirtualMemory.
|
||||
pub extern "kernel32" fn VirtualAlloc(
|
||||
lpAddress: ?LPVOID,
|
||||
dwSize: SIZE_T,
|
||||
flAllocationType: DWORD,
|
||||
flProtect: DWORD,
|
||||
) callconv(.winapi) ?LPVOID;
|
||||
|
||||
// TODO: Wrapper around NtFreeVirtualMemory.
|
||||
// If the return value is .INVALID_PAGE_PROTECTION, calls RtlFlushSecureMemoryCache and try again.
|
||||
pub extern "kernel32" fn VirtualFree(
|
||||
lpAddress: ?LPVOID,
|
||||
dwSize: SIZE_T,
|
||||
dwFreeType: DWORD,
|
||||
) callconv(.winapi) BOOL;
|
||||
|
||||
// TODO: Wrapper around NtQueryVirtualMemory.
|
||||
pub extern "kernel32" fn VirtualQuery(
|
||||
lpAddress: ?LPVOID,
|
||||
lpBuffer: PMEMORY_BASIC_INFORMATION,
|
||||
dwLength: SIZE_T,
|
||||
) callconv(.winapi) SIZE_T;
|
||||
|
||||
// TODO: Getter for peb.ProcessHeap
|
||||
pub extern "kernel32" fn GetProcessHeap() callconv(.winapi) ?HANDLE;
|
||||
|
||||
|
|
|
|||
|
|
@ -54,8 +54,7 @@ fn testToPrefixedFileOnlyOracle(comptime path: []const u8) !void {
|
|||
}
|
||||
|
||||
test "toPrefixedFileW" {
|
||||
if (builtin.os.tag != .windows)
|
||||
return;
|
||||
if (builtin.os.tag != .windows) return error.SkipZigTest;
|
||||
|
||||
// Most test cases come from https://googleprojectzero.blogspot.com/2016/02/the-definitive-guide-on-win32-to-nt.html
|
||||
// Note that these tests do not actually touch the filesystem or care about whether or not
|
||||
|
|
@ -237,3 +236,104 @@ test "removeDotDirs" {
|
|||
try testRemoveDotDirs("a\\b\\..\\", "a\\");
|
||||
try testRemoveDotDirs("a\\b\\..\\c", "a\\c");
|
||||
}
|
||||
|
||||
const RTL_PATH_TYPE = enum(c_int) {
|
||||
Unknown,
|
||||
UncAbsolute,
|
||||
DriveAbsolute,
|
||||
DriveRelative,
|
||||
Rooted,
|
||||
Relative,
|
||||
LocalDevice,
|
||||
RootLocalDevice,
|
||||
};
|
||||
|
||||
pub extern "ntdll" fn RtlDetermineDosPathNameType_U(
|
||||
Path: [*:0]const u16,
|
||||
) callconv(.winapi) RTL_PATH_TYPE;
|
||||
|
||||
test "getWin32PathType vs RtlDetermineDosPathNameType_U" {
|
||||
if (builtin.os.tag != .windows) return error.SkipZigTest;
|
||||
|
||||
var buf: std.ArrayList(u16) = .empty;
|
||||
defer buf.deinit(std.testing.allocator);
|
||||
|
||||
var wtf8_buf: std.ArrayList(u8) = .empty;
|
||||
defer wtf8_buf.deinit(std.testing.allocator);
|
||||
|
||||
var random = std.Random.DefaultPrng.init(std.testing.random_seed);
|
||||
const rand = random.random();
|
||||
|
||||
for (0..1000) |_| {
|
||||
buf.clearRetainingCapacity();
|
||||
const path = try getRandomWtf16Path(std.testing.allocator, &buf, rand);
|
||||
wtf8_buf.clearRetainingCapacity();
|
||||
const wtf8_len = std.unicode.calcWtf8Len(path);
|
||||
try wtf8_buf.ensureTotalCapacity(std.testing.allocator, wtf8_len);
|
||||
wtf8_buf.items.len = wtf8_len;
|
||||
std.debug.assert(std.unicode.wtf16LeToWtf8(wtf8_buf.items, path) == wtf8_len);
|
||||
|
||||
const windows_type = RtlDetermineDosPathNameType_U(path);
|
||||
const wtf16_type = windows.getWin32PathType(u16, path);
|
||||
const wtf8_type = windows.getWin32PathType(u8, wtf8_buf.items);
|
||||
|
||||
checkPathType(windows_type, wtf16_type) catch |err| {
|
||||
std.debug.print("expected type {}, got {} for path: {f}\n", .{ windows_type, wtf16_type, std.unicode.fmtUtf16Le(path) });
|
||||
std.debug.print("path bytes:\n", .{});
|
||||
std.debug.dumpHex(std.mem.sliceAsBytes(path));
|
||||
return err;
|
||||
};
|
||||
|
||||
if (wtf16_type != wtf8_type) {
|
||||
std.debug.print("type mismatch between wtf8: {} and wtf16: {} for path: {f}\n", .{ wtf8_type, wtf16_type, std.unicode.fmtUtf16Le(path) });
|
||||
std.debug.print("wtf-16 path bytes:\n", .{});
|
||||
std.debug.dumpHex(std.mem.sliceAsBytes(path));
|
||||
std.debug.print("wtf-8 path bytes:\n", .{});
|
||||
std.debug.dumpHex(std.mem.sliceAsBytes(wtf8_buf.items));
|
||||
return error.Wtf8Wtf16Mismatch;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn checkPathType(windows_type: RTL_PATH_TYPE, zig_type: windows.Win32PathType) !void {
|
||||
const expected_windows_type: RTL_PATH_TYPE = switch (zig_type) {
|
||||
.unc_absolute => .UncAbsolute,
|
||||
.drive_absolute => .DriveAbsolute,
|
||||
.drive_relative => .DriveRelative,
|
||||
.rooted => .Rooted,
|
||||
.relative => .Relative,
|
||||
.local_device => .LocalDevice,
|
||||
.root_local_device => .RootLocalDevice,
|
||||
};
|
||||
if (windows_type != expected_windows_type) return error.PathTypeMismatch;
|
||||
}
|
||||
|
||||
fn getRandomWtf16Path(allocator: std.mem.Allocator, buf: *std.ArrayList(u16), rand: std.Random) ![:0]const u16 {
|
||||
const Choice = enum {
|
||||
backslash,
|
||||
slash,
|
||||
control,
|
||||
printable,
|
||||
non_ascii,
|
||||
};
|
||||
|
||||
const choices = rand.uintAtMostBiased(u16, 32);
|
||||
|
||||
for (0..choices) |_| {
|
||||
const choice = rand.enumValue(Choice);
|
||||
const code_unit = switch (choice) {
|
||||
.backslash => '\\',
|
||||
.slash => '/',
|
||||
.control => switch (rand.uintAtMostBiased(u8, 0x20)) {
|
||||
0x20 => '\x7F',
|
||||
else => |b| b + 1, // no NUL
|
||||
},
|
||||
.printable => '!' + rand.uintAtMostBiased(u8, '~' - '!'),
|
||||
.non_ascii => rand.intRangeAtMostBiased(u16, 0x80, 0xFFFF),
|
||||
};
|
||||
try buf.append(allocator, std.mem.nativeToLittle(u16, code_unit));
|
||||
}
|
||||
|
||||
try buf.append(allocator, 0);
|
||||
return buf.items[0 .. buf.items.len - 1 :0];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2470,8 +2470,8 @@ pub fn renameZ(old_path: [*:0]const u8, new_path: [*:0]const u8) RenameError!voi
|
|||
/// Same as `rename` except the parameters are null-terminated and WTF16LE encoded.
|
||||
/// Assumes target is Windows.
|
||||
pub fn renameW(old_path: [*:0]const u16, new_path: [*:0]const u16) RenameError!void {
|
||||
const flags = windows.MOVEFILE_REPLACE_EXISTING | windows.MOVEFILE_WRITE_THROUGH;
|
||||
return windows.MoveFileExW(old_path, new_path, flags);
|
||||
const cwd_handle = std.fs.cwd().fd;
|
||||
return windows.RenameFile(cwd_handle, mem.span(old_path), cwd_handle, mem.span(new_path), true);
|
||||
}
|
||||
|
||||
/// Change the name or location of a file based on an open directory handle.
|
||||
|
|
@ -2588,110 +2588,7 @@ pub fn renameatW(
|
|||
new_path_w: []const u16,
|
||||
ReplaceIfExists: windows.BOOLEAN,
|
||||
) RenameError!void {
|
||||
const src_fd = windows.OpenFile(old_path_w, .{
|
||||
.dir = old_dir_fd,
|
||||
.access_mask = windows.SYNCHRONIZE | windows.GENERIC_WRITE | windows.DELETE,
|
||||
.creation = windows.FILE_OPEN,
|
||||
.filter = .any, // This function is supposed to rename both files and directories.
|
||||
.follow_symlinks = false,
|
||||
}) catch |err| switch (err) {
|
||||
error.WouldBlock => unreachable, // Not possible without `.share_access_nonblocking = true`.
|
||||
else => |e| return e,
|
||||
};
|
||||
defer windows.CloseHandle(src_fd);
|
||||
|
||||
var rc: windows.NTSTATUS = undefined;
|
||||
// FileRenameInformationEx has varying levels of support:
|
||||
// - FILE_RENAME_INFORMATION_EX requires >= win10_rs1
|
||||
// (INVALID_INFO_CLASS is returned if not supported)
|
||||
// - Requires the NTFS filesystem
|
||||
// (on filesystems like FAT32, INVALID_PARAMETER is returned)
|
||||
// - FILE_RENAME_POSIX_SEMANTICS requires >= win10_rs1
|
||||
// - FILE_RENAME_IGNORE_READONLY_ATTRIBUTE requires >= win10_rs5
|
||||
// (NOT_SUPPORTED is returned if a flag is unsupported)
|
||||
//
|
||||
// The strategy here is just to try using FileRenameInformationEx and fall back to
|
||||
// FileRenameInformation if the return value lets us know that some aspect of it is not supported.
|
||||
const need_fallback = need_fallback: {
|
||||
const struct_buf_len = @sizeOf(windows.FILE_RENAME_INFORMATION_EX) + (max_path_bytes - 1);
|
||||
var rename_info_buf: [struct_buf_len]u8 align(@alignOf(windows.FILE_RENAME_INFORMATION_EX)) = undefined;
|
||||
const struct_len = @sizeOf(windows.FILE_RENAME_INFORMATION_EX) - 1 + new_path_w.len * 2;
|
||||
if (struct_len > struct_buf_len) return error.NameTooLong;
|
||||
|
||||
const rename_info: *windows.FILE_RENAME_INFORMATION_EX = @ptrCast(&rename_info_buf);
|
||||
var io_status_block: windows.IO_STATUS_BLOCK = undefined;
|
||||
|
||||
var flags: windows.ULONG = windows.FILE_RENAME_POSIX_SEMANTICS | windows.FILE_RENAME_IGNORE_READONLY_ATTRIBUTE;
|
||||
if (ReplaceIfExists == windows.TRUE) flags |= windows.FILE_RENAME_REPLACE_IF_EXISTS;
|
||||
rename_info.* = .{
|
||||
.Flags = flags,
|
||||
.RootDirectory = if (fs.path.isAbsoluteWindowsWtf16(new_path_w)) null else new_dir_fd,
|
||||
.FileNameLength = @intCast(new_path_w.len * 2), // already checked error.NameTooLong
|
||||
.FileName = undefined,
|
||||
};
|
||||
@memcpy((&rename_info.FileName).ptr, new_path_w);
|
||||
rc = windows.ntdll.NtSetInformationFile(
|
||||
src_fd,
|
||||
&io_status_block,
|
||||
rename_info,
|
||||
@intCast(struct_len), // already checked for error.NameTooLong
|
||||
.FileRenameInformationEx,
|
||||
);
|
||||
switch (rc) {
|
||||
.SUCCESS => return,
|
||||
// The filesystem does not support FileDispositionInformationEx
|
||||
.INVALID_PARAMETER,
|
||||
// The operating system does not support FileDispositionInformationEx
|
||||
.INVALID_INFO_CLASS,
|
||||
// The operating system does not support one of the flags
|
||||
.NOT_SUPPORTED,
|
||||
=> break :need_fallback true,
|
||||
// For all other statuses, fall down to the switch below to handle them.
|
||||
else => break :need_fallback false,
|
||||
}
|
||||
};
|
||||
|
||||
if (need_fallback) {
|
||||
const struct_buf_len = @sizeOf(windows.FILE_RENAME_INFORMATION) + (max_path_bytes - 1);
|
||||
var rename_info_buf: [struct_buf_len]u8 align(@alignOf(windows.FILE_RENAME_INFORMATION)) = undefined;
|
||||
const struct_len = @sizeOf(windows.FILE_RENAME_INFORMATION) - 1 + new_path_w.len * 2;
|
||||
if (struct_len > struct_buf_len) return error.NameTooLong;
|
||||
|
||||
const rename_info: *windows.FILE_RENAME_INFORMATION = @ptrCast(&rename_info_buf);
|
||||
var io_status_block: windows.IO_STATUS_BLOCK = undefined;
|
||||
|
||||
rename_info.* = .{
|
||||
.Flags = ReplaceIfExists,
|
||||
.RootDirectory = if (fs.path.isAbsoluteWindowsWtf16(new_path_w)) null else new_dir_fd,
|
||||
.FileNameLength = @intCast(new_path_w.len * 2), // already checked error.NameTooLong
|
||||
.FileName = undefined,
|
||||
};
|
||||
@memcpy((&rename_info.FileName).ptr, new_path_w);
|
||||
|
||||
rc = windows.ntdll.NtSetInformationFile(
|
||||
src_fd,
|
||||
&io_status_block,
|
||||
rename_info,
|
||||
@intCast(struct_len), // already checked for error.NameTooLong
|
||||
.FileRenameInformation,
|
||||
);
|
||||
}
|
||||
|
||||
switch (rc) {
|
||||
.SUCCESS => {},
|
||||
.INVALID_HANDLE => unreachable,
|
||||
.INVALID_PARAMETER => unreachable,
|
||||
.OBJECT_PATH_SYNTAX_BAD => unreachable,
|
||||
.ACCESS_DENIED => return error.AccessDenied,
|
||||
.OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
|
||||
.OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
|
||||
.NOT_SAME_DEVICE => return error.RenameAcrossMountPoints,
|
||||
.OBJECT_NAME_COLLISION => return error.PathAlreadyExists,
|
||||
.DIRECTORY_NOT_EMPTY => return error.PathAlreadyExists,
|
||||
.FILE_IS_A_DIRECTORY => return error.IsDir,
|
||||
.NOT_A_DIRECTORY => return error.NotDir,
|
||||
else => return windows.unexpectedStatus(rc),
|
||||
}
|
||||
return windows.RenameFile(old_dir_fd, old_path_w, new_dir_fd, new_path_w, ReplaceIfExists != 0);
|
||||
}
|
||||
|
||||
/// On Windows, `sub_dir_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
|
||||
|
|
@ -4409,7 +4306,7 @@ pub fn mmap(
|
|||
/// Note that while POSIX allows unmapping a region in the middle of an existing mapping,
|
||||
/// Zig's munmap function does not, for two reasons:
|
||||
/// * It violates the Zig principle that resource deallocation must succeed.
|
||||
/// * The Windows function, VirtualFree, has this restriction.
|
||||
/// * The Windows function, NtFreeVirtualMemory, has this restriction.
|
||||
pub fn munmap(memory: []align(page_size_min) const u8) void {
|
||||
switch (errno(system.munmap(memory.ptr, memory.len))) {
|
||||
.SUCCESS => return,
|
||||
|
|
|
|||
|
|
@ -22,16 +22,17 @@ pub const GetCwdError = posix.GetCwdError;
|
|||
/// The result is a slice of `out_buffer`, from index `0`.
|
||||
/// On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
|
||||
/// On other platforms, the result is an opaque sequence of bytes with no particular encoding.
|
||||
pub fn getCwd(out_buffer: []u8) ![]u8 {
|
||||
pub fn getCwd(out_buffer: []u8) GetCwdError![]u8 {
|
||||
return posix.getcwd(out_buffer);
|
||||
}
|
||||
|
||||
pub const GetCwdAllocError = Allocator.Error || posix.GetCwdError;
|
||||
// Same as GetCwdError, minus error.NameTooLong + Allocator.Error
|
||||
pub const GetCwdAllocError = Allocator.Error || error{CurrentWorkingDirectoryUnlinked} || posix.UnexpectedError;
|
||||
|
||||
/// Caller must free the returned memory.
|
||||
/// On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
|
||||
/// On other platforms, the result is an opaque sequence of bytes with no particular encoding.
|
||||
pub fn getCwdAlloc(allocator: Allocator) ![]u8 {
|
||||
pub fn getCwdAlloc(allocator: Allocator) GetCwdAllocError![]u8 {
|
||||
// The use of max_path_bytes here is just a heuristic: most paths will fit
|
||||
// in stack_buf, avoiding an extra allocation in the common case.
|
||||
var stack_buf: [fs.max_path_bytes]u8 = undefined;
|
||||
|
|
@ -529,6 +530,7 @@ pub fn hasNonEmptyEnvVar(allocator: Allocator, key: []const u8) HasEnvVarError!b
|
|||
}
|
||||
|
||||
/// Windows-only. Get an environment variable with a null-terminated, WTF-16 encoded name.
|
||||
/// The returned slice points to memory in the PEB.
|
||||
///
|
||||
/// This function performs a Unicode-aware case-insensitive lookup using RtlEqualUnicodeString.
|
||||
///
|
||||
|
|
@ -564,7 +566,7 @@ pub fn getenvW(key: [*:0]const u16) ?[:0]const u16 {
|
|||
};
|
||||
|
||||
const this_key = key_value[0..equal_index];
|
||||
if (windows.eqlIgnoreCaseWTF16(key_slice, this_key)) {
|
||||
if (windows.eqlIgnoreCaseWtf16(key_slice, this_key)) {
|
||||
return key_value[equal_index + 1 ..];
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1227,7 +1227,7 @@ fn windowsCreateProcessPathExt(
|
|||
const app_name = app_buf.items[0..app_name_len];
|
||||
const ext_start = std.mem.lastIndexOfScalar(u16, app_name, '.') orelse break :unappended err;
|
||||
const ext = app_name[ext_start..];
|
||||
if (windows.eqlIgnoreCaseWTF16(ext, unicode.utf8ToUtf16LeStringLiteral(".EXE"))) {
|
||||
if (windows.eqlIgnoreCaseWtf16(ext, unicode.utf8ToUtf16LeStringLiteral(".EXE"))) {
|
||||
return error.UnrecoverableInvalidExe;
|
||||
}
|
||||
break :unappended err;
|
||||
|
|
@ -1278,7 +1278,7 @@ fn windowsCreateProcessPathExt(
|
|||
// On InvalidExe, if the extension of the app name is .exe then
|
||||
// it's treated as an unrecoverable error. Otherwise, it'll be
|
||||
// skipped as normal.
|
||||
if (windows.eqlIgnoreCaseWTF16(ext, unicode.utf8ToUtf16LeStringLiteral(".EXE"))) {
|
||||
if (windows.eqlIgnoreCaseWtf16(ext, unicode.utf8ToUtf16LeStringLiteral(".EXE"))) {
|
||||
return error.UnrecoverableInvalidExe;
|
||||
}
|
||||
continue;
|
||||
|
|
|
|||
|
|
@ -124,7 +124,7 @@ pub const Options = struct {
|
|||
|
||||
logFn: fn (
|
||||
comptime message_level: log.Level,
|
||||
comptime scope: @TypeOf(.enum_literal),
|
||||
comptime scope: @EnumLiteral(),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void = log.defaultLog,
|
||||
|
|
|
|||
|
|
@ -773,7 +773,6 @@ pub const EnvVar = enum {
|
|||
pub const SimpleComptimeReason = enum(u32) {
|
||||
// Evaluating at comptime because a builtin operand must be comptime-known.
|
||||
// These messages all mention a specific builtin.
|
||||
operand_Type,
|
||||
operand_setEvalBranchQuota,
|
||||
operand_setFloatMode,
|
||||
operand_branchHint,
|
||||
|
|
@ -809,25 +808,34 @@ pub const SimpleComptimeReason = enum(u32) {
|
|||
// Evaluating at comptime because types must be comptime-known.
|
||||
// Reasons other than `.type` are just more specific messages.
|
||||
type,
|
||||
int_signedness,
|
||||
int_bit_width,
|
||||
array_sentinel,
|
||||
array_length,
|
||||
pointer_size,
|
||||
pointer_attrs,
|
||||
pointer_sentinel,
|
||||
slice_sentinel,
|
||||
array_length,
|
||||
vector_length,
|
||||
error_set_contents,
|
||||
struct_fields,
|
||||
enum_fields,
|
||||
union_fields,
|
||||
function_ret_ty,
|
||||
function_parameters,
|
||||
fn_ret_ty,
|
||||
fn_param_types,
|
||||
fn_param_attrs,
|
||||
fn_attrs,
|
||||
struct_layout,
|
||||
struct_field_names,
|
||||
struct_field_types,
|
||||
struct_field_attrs,
|
||||
union_layout,
|
||||
union_field_names,
|
||||
union_field_types,
|
||||
union_field_attrs,
|
||||
tuple_field_types,
|
||||
enum_field_names,
|
||||
enum_field_values,
|
||||
|
||||
// Evaluating at comptime because decl/field name must be comptime-known.
|
||||
decl_name,
|
||||
field_name,
|
||||
struct_field_name,
|
||||
enum_field_name,
|
||||
union_field_name,
|
||||
tuple_field_name,
|
||||
tuple_field_index,
|
||||
|
||||
// Evaluating at comptime because it is an attribute of a global declaration.
|
||||
|
|
@ -856,7 +864,6 @@ pub const SimpleComptimeReason = enum(u32) {
|
|||
pub fn message(r: SimpleComptimeReason) []const u8 {
|
||||
return switch (r) {
|
||||
// zig fmt: off
|
||||
.operand_Type => "operand to '@Type' must be comptime-known",
|
||||
.operand_setEvalBranchQuota => "operand to '@setEvalBranchQuota' must be comptime-known",
|
||||
.operand_setFloatMode => "operand to '@setFloatMode' must be comptime-known",
|
||||
.operand_branchHint => "operand to '@branchHint' must be comptime-known",
|
||||
|
|
@ -888,24 +895,33 @@ pub const SimpleComptimeReason = enum(u32) {
|
|||
.clobber => "clobber must be comptime-known",
|
||||
|
||||
.type => "types must be comptime-known",
|
||||
.int_signedness => "integer signedness must be comptime-known",
|
||||
.int_bit_width => "integer bit width must be comptime-known",
|
||||
.array_sentinel => "array sentinel value must be comptime-known",
|
||||
.array_length => "array length must be comptime-known",
|
||||
.pointer_size => "pointer size must be comptime-known",
|
||||
.pointer_attrs => "pointer attributes must be comptime-known",
|
||||
.pointer_sentinel => "pointer sentinel value must be comptime-known",
|
||||
.slice_sentinel => "slice sentinel value must be comptime-known",
|
||||
.array_length => "array length must be comptime-known",
|
||||
.vector_length => "vector length must be comptime-known",
|
||||
.error_set_contents => "error set contents must be comptime-known",
|
||||
.struct_fields => "struct fields must be comptime-known",
|
||||
.enum_fields => "enum fields must be comptime-known",
|
||||
.union_fields => "union fields must be comptime-known",
|
||||
.function_ret_ty => "function return type must be comptime-known",
|
||||
.function_parameters => "function parameters must be comptime-known",
|
||||
.fn_ret_ty => "function return type must be comptime-known",
|
||||
.fn_param_types => "function parameter types must be comptime-known",
|
||||
.fn_param_attrs => "function parameter attributes must be comptime-known",
|
||||
.fn_attrs => "function attributes must be comptime-known",
|
||||
.struct_layout => "struct layout must be comptime-known",
|
||||
.struct_field_names => "struct field names must be comptime-known",
|
||||
.struct_field_types => "struct field types must be comptime-known",
|
||||
.struct_field_attrs => "struct field attributes must be comptime-known",
|
||||
.union_layout => "union layout must be comptime-known",
|
||||
.union_field_names => "union field names must be comptime-known",
|
||||
.union_field_types => "union field types must be comptime-known",
|
||||
.union_field_attrs => "union field attributes must be comptime-known",
|
||||
.tuple_field_types => "tuple field types must be comptime-known",
|
||||
.enum_field_names => "enum field names must be comptime-known",
|
||||
.enum_field_values => "enum field values must be comptime-known",
|
||||
|
||||
.decl_name => "declaration name must be comptime-known",
|
||||
.field_name => "field name must be comptime-known",
|
||||
.struct_field_name => "struct field name must be comptime-known",
|
||||
.enum_field_name => "enum field name must be comptime-known",
|
||||
.union_field_name => "union field name must be comptime-known",
|
||||
.tuple_field_name => "tuple field name must be comptime-known",
|
||||
.tuple_field_index => "tuple field index must be comptime-known",
|
||||
|
||||
.container_var_init => "initializer of container-level variable must be comptime-known",
|
||||
|
|
|
|||
|
|
@ -833,7 +833,7 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE
|
|||
=> {
|
||||
var buf: [2]Ast.Node.Index = undefined;
|
||||
const params = tree.builtinCallParams(&buf, node).?;
|
||||
return builtinCall(gz, scope, ri, node, params, false);
|
||||
return builtinCall(gz, scope, ri, node, params, false, .anon);
|
||||
},
|
||||
|
||||
.call_one,
|
||||
|
|
@ -1194,14 +1194,20 @@ fn nameStratExpr(
|
|||
},
|
||||
.builtin_call_two,
|
||||
.builtin_call_two_comma,
|
||||
.builtin_call,
|
||||
.builtin_call_comma,
|
||||
=> {
|
||||
const builtin_token = tree.nodeMainToken(node);
|
||||
const builtin_name = tree.tokenSlice(builtin_token);
|
||||
if (!std.mem.eql(u8, builtin_name, "@Type")) return null;
|
||||
var buf: [2]Ast.Node.Index = undefined;
|
||||
const params = tree.builtinCallParams(&buf, node).?;
|
||||
if (params.len != 1) return null; // let `builtinCall` error
|
||||
return try builtinReify(gz, scope, ri, node, params[0], name_strat);
|
||||
const info = BuiltinFn.list.get(builtin_name) orelse return null;
|
||||
switch (info.tag) {
|
||||
.Enum, .Struct, .Union => {
|
||||
var buf: [2]Ast.Node.Index = undefined;
|
||||
const params = tree.builtinCallParams(&buf, node).?;
|
||||
return try builtinCall(gz, scope, ri, node, params, false, name_strat);
|
||||
},
|
||||
else => return null,
|
||||
}
|
||||
},
|
||||
else => return null,
|
||||
}
|
||||
|
|
@ -1406,7 +1412,7 @@ fn fnProtoExprInner(
|
|||
.none;
|
||||
|
||||
const ret_ty_node = fn_proto.ast.return_type.unwrap().?;
|
||||
const ret_ty = try comptimeExpr(&block_scope, scope, coerced_type_ri, ret_ty_node, .function_ret_ty);
|
||||
const ret_ty = try comptimeExpr(&block_scope, scope, coerced_type_ri, ret_ty_node, .fn_ret_ty);
|
||||
|
||||
const result = try block_scope.addFunc(.{
|
||||
.src_node = fn_proto.ast.proto_node,
|
||||
|
|
@ -2629,7 +2635,7 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Nod
|
|||
const params = tree.builtinCallParams(&buf, inner_node).?;
|
||||
|
||||
try emitDbgNode(gz, inner_node);
|
||||
const result = try builtinCall(gz, scope, .{ .rl = .none }, inner_node, params, allow_branch_hint);
|
||||
const result = try builtinCall(gz, scope, .{ .rl = .none }, inner_node, params, allow_branch_hint, .anon);
|
||||
noreturn_src_node = try addEnsureResult(gz, result, inner_node);
|
||||
},
|
||||
|
||||
|
|
@ -2707,6 +2713,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
|
|||
.elem_type,
|
||||
.indexable_ptr_elem_type,
|
||||
.splat_op_result_ty,
|
||||
.reify_int,
|
||||
.vector_type,
|
||||
.indexable_ptr_len,
|
||||
.anyframe_type,
|
||||
|
|
@ -8942,7 +8949,7 @@ fn unionInit(
|
|||
params: []const Ast.Node.Index,
|
||||
) InnerError!Zir.Inst.Ref {
|
||||
const union_type = try typeExpr(gz, scope, params[0]);
|
||||
const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1], .union_field_name);
|
||||
const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1], .union_field_names);
|
||||
const field_type = try gz.addPlNode(.field_type_ref, node, Zir.Inst.FieldTypeRef{
|
||||
.container_type = union_type,
|
||||
.field_name = field_name,
|
||||
|
|
@ -9210,6 +9217,7 @@ fn builtinCall(
|
|||
node: Ast.Node.Index,
|
||||
params: []const Ast.Node.Index,
|
||||
allow_branch_hint: bool,
|
||||
reify_name_strat: Zir.Inst.NameStrategy,
|
||||
) InnerError!Zir.Inst.Ref {
|
||||
const astgen = gz.astgen;
|
||||
const tree = astgen.tree;
|
||||
|
|
@ -9443,9 +9451,140 @@ fn builtinCall(
|
|||
return rvalue(gz, ri, try gz.addNodeExtended(.in_comptime, node), node);
|
||||
},
|
||||
|
||||
.Type => {
|
||||
return builtinReify(gz, scope, ri, node, params[0], .anon);
|
||||
.EnumLiteral => return rvalue(gz, ri, .enum_literal_type, node),
|
||||
.Int => {
|
||||
const signedness_ty = try gz.addBuiltinValue(node, .signedness);
|
||||
const result = try gz.addPlNode(.reify_int, node, Zir.Inst.Bin{
|
||||
.lhs = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = signedness_ty } }, params[0], .int_signedness),
|
||||
.rhs = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u16_type } }, params[1], .int_bit_width),
|
||||
});
|
||||
return rvalue(gz, ri, result, node);
|
||||
},
|
||||
.Tuple => {
|
||||
const result = try gz.addExtendedPayload(.reify_tuple, Zir.Inst.UnNode{
|
||||
.node = gz.nodeIndexToRelative(node),
|
||||
.operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_type_type } }, params[0], .tuple_field_types),
|
||||
});
|
||||
return rvalue(gz, ri, result, node);
|
||||
},
|
||||
.Pointer => {
|
||||
const ptr_size_ty = try gz.addBuiltinValue(node, .pointer_size);
|
||||
const ptr_attrs_ty = try gz.addBuiltinValue(node, .pointer_attributes);
|
||||
const size = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = ptr_size_ty } }, params[0], .pointer_size);
|
||||
const attrs = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = ptr_attrs_ty } }, params[1], .pointer_attrs);
|
||||
const elem_ty = try typeExpr(gz, scope, params[2]);
|
||||
const sentinel_ty = try gz.addExtendedPayload(.reify_pointer_sentinel_ty, Zir.Inst.UnNode{
|
||||
.node = gz.nodeIndexToRelative(params[2]),
|
||||
.operand = elem_ty,
|
||||
});
|
||||
const sentinel = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = sentinel_ty } }, params[3], .pointer_sentinel);
|
||||
const result = try gz.addExtendedPayload(.reify_pointer, Zir.Inst.ReifyPointer{
|
||||
.node = gz.nodeIndexToRelative(node),
|
||||
.size = size,
|
||||
.attrs = attrs,
|
||||
.elem_ty = elem_ty,
|
||||
.sentinel = sentinel,
|
||||
});
|
||||
return rvalue(gz, ri, result, node);
|
||||
},
|
||||
.Fn => {
|
||||
const fn_attrs_ty = try gz.addBuiltinValue(node, .fn_attributes);
|
||||
const param_types = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_type_type } }, params[0], .fn_param_types);
|
||||
const param_attrs_ty = try gz.addExtendedPayloadSmall(
|
||||
.reify_slice_arg_ty,
|
||||
@intFromEnum(Zir.Inst.ReifySliceArgInfo.type_to_fn_param_attrs),
|
||||
Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(params[0]), .operand = param_types },
|
||||
);
|
||||
const param_attrs = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = param_attrs_ty } }, params[1], .fn_param_attrs);
|
||||
const ret_ty = try comptimeExpr(gz, scope, coerced_type_ri, params[2], .fn_ret_ty);
|
||||
const fn_attrs = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = fn_attrs_ty } }, params[3], .fn_attrs);
|
||||
const result = try gz.addExtendedPayload(.reify_fn, Zir.Inst.ReifyFn{
|
||||
.node = gz.nodeIndexToRelative(node),
|
||||
.param_types = param_types,
|
||||
.param_attrs = param_attrs,
|
||||
.ret_ty = ret_ty,
|
||||
.fn_attrs = fn_attrs,
|
||||
});
|
||||
return rvalue(gz, ri, result, node);
|
||||
},
|
||||
.Struct => {
|
||||
const container_layout_ty = try gz.addBuiltinValue(node, .container_layout);
|
||||
const layout = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = container_layout_ty } }, params[0], .struct_layout);
|
||||
const backing_ty = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .optional_type_type } }, params[1], .type);
|
||||
const field_names = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_slice_const_u8_type } }, params[2], .struct_field_names);
|
||||
const field_types_ty = try gz.addExtendedPayloadSmall(
|
||||
.reify_slice_arg_ty,
|
||||
@intFromEnum(Zir.Inst.ReifySliceArgInfo.string_to_struct_field_type),
|
||||
Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(params[2]), .operand = field_names },
|
||||
);
|
||||
const field_attrs_ty = try gz.addExtendedPayloadSmall(
|
||||
.reify_slice_arg_ty,
|
||||
@intFromEnum(Zir.Inst.ReifySliceArgInfo.string_to_struct_field_attrs),
|
||||
Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(params[2]), .operand = field_names },
|
||||
);
|
||||
const field_types = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = field_types_ty } }, params[3], .struct_field_types);
|
||||
const field_attrs = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = field_attrs_ty } }, params[4], .struct_field_attrs);
|
||||
const result = try gz.addExtendedPayloadSmall(.reify_struct, @intFromEnum(reify_name_strat), Zir.Inst.ReifyStruct{
|
||||
.src_line = gz.astgen.source_line,
|
||||
.node = node,
|
||||
.layout = layout,
|
||||
.backing_ty = backing_ty,
|
||||
.field_names = field_names,
|
||||
.field_types = field_types,
|
||||
.field_attrs = field_attrs,
|
||||
});
|
||||
return rvalue(gz, ri, result, node);
|
||||
},
|
||||
.Union => {
|
||||
const container_layout_ty = try gz.addBuiltinValue(node, .container_layout);
|
||||
const layout = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = container_layout_ty } }, params[0], .union_layout);
|
||||
const arg_ty = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .optional_type_type } }, params[1], .type);
|
||||
const field_names = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_slice_const_u8_type } }, params[2], .union_field_names);
|
||||
const field_types_ty = try gz.addExtendedPayloadSmall(
|
||||
.reify_slice_arg_ty,
|
||||
@intFromEnum(Zir.Inst.ReifySliceArgInfo.string_to_union_field_type),
|
||||
Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(params[2]), .operand = field_names },
|
||||
);
|
||||
const field_attrs_ty = try gz.addExtendedPayloadSmall(
|
||||
.reify_slice_arg_ty,
|
||||
@intFromEnum(Zir.Inst.ReifySliceArgInfo.string_to_union_field_attrs),
|
||||
Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(params[2]), .operand = field_names },
|
||||
);
|
||||
const field_types = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = field_types_ty } }, params[3], .union_field_types);
|
||||
const field_attrs = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = field_attrs_ty } }, params[4], .union_field_attrs);
|
||||
const result = try gz.addExtendedPayloadSmall(.reify_union, @intFromEnum(reify_name_strat), Zir.Inst.ReifyUnion{
|
||||
.src_line = gz.astgen.source_line,
|
||||
.node = node,
|
||||
.layout = layout,
|
||||
.arg_ty = arg_ty,
|
||||
.field_names = field_names,
|
||||
.field_types = field_types,
|
||||
.field_attrs = field_attrs,
|
||||
});
|
||||
return rvalue(gz, ri, result, node);
|
||||
},
|
||||
.Enum => {
|
||||
const enum_mode_ty = try gz.addBuiltinValue(node, .enum_mode);
|
||||
const tag_ty = try typeExpr(gz, scope, params[0]);
|
||||
const mode = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = enum_mode_ty } }, params[1], .type);
|
||||
const field_names = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_slice_const_u8_type } }, params[2], .enum_field_names);
|
||||
const field_values_ty = try gz.addExtendedPayload(.reify_enum_value_slice_ty, Zir.Inst.BinNode{
|
||||
.node = gz.nodeIndexToRelative(node),
|
||||
.lhs = tag_ty,
|
||||
.rhs = field_names,
|
||||
});
|
||||
const field_values = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = field_values_ty } }, params[3], .enum_field_values);
|
||||
const result = try gz.addExtendedPayloadSmall(.reify_enum, @intFromEnum(reify_name_strat), Zir.Inst.ReifyEnum{
|
||||
.src_line = gz.astgen.source_line,
|
||||
.node = node,
|
||||
.tag_ty = tag_ty,
|
||||
.mode = mode,
|
||||
.field_names = field_names,
|
||||
.field_values = field_values,
|
||||
});
|
||||
return rvalue(gz, ri, result, node);
|
||||
},
|
||||
|
||||
.panic => {
|
||||
try emitDbgNode(gz, node);
|
||||
return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[0], .panic);
|
||||
|
|
@ -9764,41 +9903,6 @@ fn builtinCall(
|
|||
},
|
||||
}
|
||||
}
|
||||
fn builtinReify(
|
||||
gz: *GenZir,
|
||||
scope: *Scope,
|
||||
ri: ResultInfo,
|
||||
node: Ast.Node.Index,
|
||||
arg_node: Ast.Node.Index,
|
||||
name_strat: Zir.Inst.NameStrategy,
|
||||
) InnerError!Zir.Inst.Ref {
|
||||
const astgen = gz.astgen;
|
||||
const gpa = astgen.gpa;
|
||||
|
||||
const type_info_ty = try gz.addBuiltinValue(node, .type_info);
|
||||
const operand = try expr(gz, scope, .{ .rl = .{ .coerced_ty = type_info_ty } }, arg_node);
|
||||
|
||||
try gz.instructions.ensureUnusedCapacity(gpa, 1);
|
||||
try astgen.instructions.ensureUnusedCapacity(gpa, 1);
|
||||
|
||||
const payload_index = try astgen.addExtra(Zir.Inst.Reify{
|
||||
.node = node, // Absolute node index -- see the definition of `Reify`.
|
||||
.operand = operand,
|
||||
.src_line = astgen.source_line,
|
||||
});
|
||||
const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len);
|
||||
astgen.instructions.appendAssumeCapacity(.{
|
||||
.tag = .extended,
|
||||
.data = .{ .extended = .{
|
||||
.opcode = .reify,
|
||||
.small = @intFromEnum(name_strat),
|
||||
.operand = payload_index,
|
||||
} },
|
||||
});
|
||||
gz.instructions.appendAssumeCapacity(new_index);
|
||||
const result = new_index.toRef();
|
||||
return rvalue(gz, ri, result, node);
|
||||
}
|
||||
|
||||
fn hasDeclOrField(
|
||||
gz: *GenZir,
|
||||
|
|
|
|||
|
|
@ -866,6 +866,7 @@ fn builtinCall(astrl: *AstRlAnnotate, block: ?*Block, ri: ResultInfo, node: Ast.
|
|||
// These builtins take no args and do not consume the result pointer.
|
||||
.src,
|
||||
.This,
|
||||
.EnumLiteral,
|
||||
.return_address,
|
||||
.error_return_trace,
|
||||
.frame,
|
||||
|
|
@ -906,7 +907,7 @@ fn builtinCall(astrl: *AstRlAnnotate, block: ?*Block, ri: ResultInfo, node: Ast.
|
|||
.embed_file,
|
||||
.error_name,
|
||||
.set_runtime_safety,
|
||||
.Type,
|
||||
.Tuple,
|
||||
.c_undef,
|
||||
.c_include,
|
||||
.wasm_memory_size,
|
||||
|
|
@ -1058,6 +1059,48 @@ fn builtinCall(astrl: *AstRlAnnotate, block: ?*Block, ri: ResultInfo, node: Ast.
|
|||
_ = try astrl.expr(args[3], block, ResultInfo.none);
|
||||
return false;
|
||||
},
|
||||
.Int => {
|
||||
_ = try astrl.expr(args[0], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[1], block, ResultInfo.type_only);
|
||||
return false;
|
||||
},
|
||||
.Pointer => {
|
||||
_ = try astrl.expr(args[0], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[1], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[2], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[3], block, ResultInfo.type_only);
|
||||
return false;
|
||||
},
|
||||
.Fn => {
|
||||
_ = try astrl.expr(args[0], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[1], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[2], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[3], block, ResultInfo.type_only);
|
||||
return false;
|
||||
},
|
||||
.Struct => {
|
||||
_ = try astrl.expr(args[0], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[1], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[2], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[3], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[4], block, ResultInfo.type_only);
|
||||
return false;
|
||||
},
|
||||
.Union => {
|
||||
_ = try astrl.expr(args[0], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[1], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[2], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[3], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[4], block, ResultInfo.type_only);
|
||||
return false;
|
||||
},
|
||||
.Enum => {
|
||||
_ = try astrl.expr(args[0], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[1], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[2], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[3], block, ResultInfo.type_only);
|
||||
return false;
|
||||
},
|
||||
.Vector => {
|
||||
_ = try astrl.expr(args[0], block, ResultInfo.type_only);
|
||||
_ = try astrl.expr(args[1], block, ResultInfo.type_only);
|
||||
|
|
|
|||
|
|
@ -110,7 +110,14 @@ pub const Tag = enum {
|
|||
This,
|
||||
trap,
|
||||
truncate,
|
||||
Type,
|
||||
EnumLiteral,
|
||||
Int,
|
||||
Tuple,
|
||||
Pointer,
|
||||
Fn,
|
||||
Struct,
|
||||
Union,
|
||||
Enum,
|
||||
type_info,
|
||||
type_name,
|
||||
TypeOf,
|
||||
|
|
@ -937,12 +944,61 @@ pub const list = list: {
|
|||
},
|
||||
},
|
||||
.{
|
||||
"@Type",
|
||||
"@EnumLiteral",
|
||||
.{
|
||||
.tag = .Type,
|
||||
.tag = .EnumLiteral,
|
||||
.param_count = 0,
|
||||
},
|
||||
},
|
||||
.{
|
||||
"@Int",
|
||||
.{
|
||||
.tag = .Int,
|
||||
.param_count = 2,
|
||||
},
|
||||
},
|
||||
.{
|
||||
"@Tuple",
|
||||
.{
|
||||
.tag = .Tuple,
|
||||
.param_count = 1,
|
||||
},
|
||||
},
|
||||
.{
|
||||
"@Pointer",
|
||||
.{
|
||||
.tag = .Pointer,
|
||||
.param_count = 4,
|
||||
},
|
||||
},
|
||||
.{
|
||||
"@Fn",
|
||||
.{
|
||||
.tag = .Fn,
|
||||
.param_count = 4,
|
||||
},
|
||||
},
|
||||
.{
|
||||
"@Struct",
|
||||
.{
|
||||
.tag = .Struct,
|
||||
.param_count = 5,
|
||||
},
|
||||
},
|
||||
.{
|
||||
"@Union",
|
||||
.{
|
||||
.tag = .Union,
|
||||
.param_count = 5,
|
||||
},
|
||||
},
|
||||
.{
|
||||
"@Enum",
|
||||
.{
|
||||
.tag = .Enum,
|
||||
.param_count = 4,
|
||||
},
|
||||
},
|
||||
.{
|
||||
"@typeInfo",
|
||||
.{
|
||||
|
|
|
|||
|
|
@ -254,7 +254,6 @@ fn libCGenericName(target: *const std.Target) [:0]const u8 {
|
|||
.ohos,
|
||||
.ohoseabi,
|
||||
=> return "musl",
|
||||
.code16,
|
||||
.eabi,
|
||||
.eabihf,
|
||||
.ilp32,
|
||||
|
|
|
|||
|
|
@ -643,7 +643,7 @@ const MsvcLibDir = struct {
|
|||
|
||||
if (!std.fs.path.isAbsolute(dll_path)) return error.PathNotFound;
|
||||
|
||||
var path_it = std.fs.path.componentIterator(dll_path) catch return error.PathNotFound;
|
||||
var path_it = std.fs.path.componentIterator(dll_path);
|
||||
// the .dll filename
|
||||
_ = path_it.last();
|
||||
const root_path = while (path_it.previous()) |dir_component| {
|
||||
|
|
|
|||
|
|
@ -260,6 +260,10 @@ pub const Inst = struct {
|
|||
/// `[N:S]T` syntax. Source location is the array type expression node.
|
||||
/// Uses the `pl_node` union field. Payload is `ArrayTypeSentinel`.
|
||||
array_type_sentinel,
|
||||
/// `@Int` builtin.
|
||||
/// Uses the `pl_node` union field with `Bin` payload.
|
||||
/// lhs is signedness, rhs is bit count.
|
||||
reify_int,
|
||||
/// `@Vector` builtin.
|
||||
/// Uses the `pl_node` union field with `Bin` payload.
|
||||
/// lhs is length, rhs is element type.
|
||||
|
|
@ -1112,6 +1116,7 @@ pub const Inst = struct {
|
|||
.array_mul,
|
||||
.array_type,
|
||||
.array_type_sentinel,
|
||||
.reify_int,
|
||||
.vector_type,
|
||||
.elem_type,
|
||||
.indexable_ptr_elem_type,
|
||||
|
|
@ -1409,6 +1414,7 @@ pub const Inst = struct {
|
|||
.array_mul,
|
||||
.array_type,
|
||||
.array_type_sentinel,
|
||||
.reify_int,
|
||||
.vector_type,
|
||||
.elem_type,
|
||||
.indexable_ptr_elem_type,
|
||||
|
|
@ -1644,6 +1650,7 @@ pub const Inst = struct {
|
|||
.array_mul = .pl_node,
|
||||
.array_type = .pl_node,
|
||||
.array_type_sentinel = .pl_node,
|
||||
.reify_int = .pl_node,
|
||||
.vector_type = .pl_node,
|
||||
.elem_type = .un_node,
|
||||
.indexable_ptr_elem_type = .un_node,
|
||||
|
|
@ -2035,10 +2042,43 @@ pub const Inst = struct {
|
|||
/// Implement builtin `@errorFromInt`.
|
||||
/// `operand` is payload index to `UnNode`.
|
||||
error_from_int,
|
||||
/// Implement builtin `@Type`.
|
||||
/// `operand` is payload index to `Reify`.
|
||||
/// Given a comptime-known operand of type `[]const A`, returns the type `*const [operand.len]B`.
|
||||
/// The types `A` and `B` are determined from `ReifySliceArgInfo`.
|
||||
/// This instruction is used to provide result types to arguments of `@Fn`, `@Struct`, etc.
|
||||
/// `operand` is payload index to `UnNode`.
|
||||
/// `small` is a bitcast `ReifySliceArgInfo`.
|
||||
reify_slice_arg_ty,
|
||||
/// Like `reify_slice_arg_ty` for the specific case of `[]const []const u8` to `[]const TagInt`,
|
||||
/// as needed for `@Enum`.
|
||||
/// `operand` is payload index to `BinNode`. lhs is the type `TagInt`. rhs is the `[]const []const u8` value.
|
||||
/// `small` is unused.
|
||||
reify_enum_value_slice_ty,
|
||||
/// Given a comptime-known operand of type `type`, returns the type `?operand` if possible, otherwise `?noreturn`.
|
||||
/// Used for the final arg of `@Pointer` to allow reifying pointers to opaque types.
|
||||
/// `operand` is payload index to `UnNode`.
|
||||
/// `small` is unused.
|
||||
reify_pointer_sentinel_ty,
|
||||
/// Implements builtin `@Tuple`.
|
||||
/// `operand` is payload index to `UnNode`.
|
||||
reify_tuple,
|
||||
/// Implements builtin `@Pointer`.
|
||||
/// `operand` is payload index to `ReifyPointer`.
|
||||
reify_pointer,
|
||||
/// Implements builtin `@Fn`.
|
||||
/// `operand` is payload index to `ReifyFn`.
|
||||
reify_fn,
|
||||
/// Implements builtin `@Struct`.
|
||||
/// `operand` is payload index to `ReifyStruct`.
|
||||
/// `small` contains `NameStrategy`.
|
||||
reify,
|
||||
reify_struct,
|
||||
/// Implements builtin `@Union`.
|
||||
/// `operand` is payload index to `ReifyUnion`.
|
||||
/// `small` contains `NameStrategy`.
|
||||
reify_union,
|
||||
/// Implements builtin `@Enum`.
|
||||
/// `operand` is payload index to `ReifyEnum`.
|
||||
/// `small` contains `NameStrategy`.
|
||||
reify_enum,
|
||||
/// Implements the `@cmpxchgStrong` and `@cmpxchgWeak` builtins.
|
||||
/// `small` 0=>weak 1=>strong
|
||||
/// `operand` is payload index to `Cmpxchg`.
|
||||
|
|
@ -2226,6 +2266,11 @@ pub const Inst = struct {
|
|||
manyptr_const_u8_sentinel_0_type,
|
||||
slice_const_u8_type,
|
||||
slice_const_u8_sentinel_0_type,
|
||||
manyptr_const_slice_const_u8_type,
|
||||
slice_const_slice_const_u8_type,
|
||||
optional_type_type,
|
||||
manyptr_const_type_type,
|
||||
slice_const_type_type,
|
||||
vector_8_i8_type,
|
||||
vector_16_i8_type,
|
||||
vector_32_i8_type,
|
||||
|
|
@ -3169,6 +3214,23 @@ pub const Inst = struct {
|
|||
rhs: Ref,
|
||||
};
|
||||
|
||||
pub const ReifySliceArgInfo = enum(u16) {
|
||||
/// Input element type is `type`.
|
||||
/// Output element type is `std.builtin.Type.Fn.Param.Attributes`.
|
||||
type_to_fn_param_attrs,
|
||||
/// Input element type is `[]const u8`.
|
||||
/// Output element type is `type`.
|
||||
string_to_struct_field_type,
|
||||
/// Identical to `string_to_struct_field_type` aside from emitting slightly different error messages.
|
||||
string_to_union_field_type,
|
||||
/// Input element type is `[]const u8`.
|
||||
/// Output element type is `std.builtin.Type.StructField.Attributes`.
|
||||
string_to_struct_field_attrs,
|
||||
/// Input element type is `[]const u8`.
|
||||
/// Output element type is `std.builtin.Type.UnionField.Attributes`.
|
||||
string_to_union_field_attrs,
|
||||
};
|
||||
|
||||
pub const UnNode = struct {
|
||||
node: Ast.Node.Offset,
|
||||
operand: Ref,
|
||||
|
|
@ -3179,12 +3241,55 @@ pub const Inst = struct {
|
|||
index: u32,
|
||||
};
|
||||
|
||||
pub const Reify = struct {
|
||||
pub const ReifyPointer = struct {
|
||||
node: Ast.Node.Offset,
|
||||
size: Ref,
|
||||
attrs: Ref,
|
||||
elem_ty: Ref,
|
||||
sentinel: Ref,
|
||||
};
|
||||
|
||||
pub const ReifyFn = struct {
|
||||
node: Ast.Node.Offset,
|
||||
param_types: Ref,
|
||||
param_attrs: Ref,
|
||||
ret_ty: Ref,
|
||||
fn_attrs: Ref,
|
||||
};
|
||||
|
||||
pub const ReifyStruct = struct {
|
||||
src_line: u32,
|
||||
/// This node is absolute, because `reify` instructions are tracked across updates, and
|
||||
/// this simplifies the logic for getting source locations for types.
|
||||
node: Ast.Node.Index,
|
||||
operand: Ref,
|
||||
layout: Ref,
|
||||
backing_ty: Ref,
|
||||
field_names: Ref,
|
||||
field_types: Ref,
|
||||
field_attrs: Ref,
|
||||
};
|
||||
|
||||
pub const ReifyUnion = struct {
|
||||
src_line: u32,
|
||||
/// This node is absolute, because `reify` instructions are tracked across updates, and
|
||||
/// this simplifies the logic for getting source locations for types.
|
||||
node: Ast.Node.Index,
|
||||
layout: Ref,
|
||||
arg_ty: Ref,
|
||||
field_names: Ref,
|
||||
field_types: Ref,
|
||||
field_attrs: Ref,
|
||||
};
|
||||
|
||||
pub const ReifyEnum = struct {
|
||||
src_line: u32,
|
||||
/// This node is absolute, because `reify` instructions are tracked across updates, and
|
||||
/// this simplifies the logic for getting source locations for types.
|
||||
node: Ast.Node.Index,
|
||||
tag_ty: Ref,
|
||||
mode: Ref,
|
||||
field_names: Ref,
|
||||
field_values: Ref,
|
||||
};
|
||||
|
||||
/// Trailing:
|
||||
|
|
@ -3496,14 +3601,19 @@ pub const Inst = struct {
|
|||
calling_convention,
|
||||
address_space,
|
||||
float_mode,
|
||||
signedness,
|
||||
reduce_op,
|
||||
call_modifier,
|
||||
prefetch_options,
|
||||
export_options,
|
||||
extern_options,
|
||||
type_info,
|
||||
branch_hint,
|
||||
clobbers,
|
||||
pointer_size,
|
||||
pointer_attributes,
|
||||
fn_attributes,
|
||||
container_layout,
|
||||
enum_mode,
|
||||
// Values
|
||||
calling_convention_c,
|
||||
calling_convention_inline,
|
||||
|
|
@ -4190,6 +4300,7 @@ fn findTrackableInner(
|
|||
.array_mul,
|
||||
.array_type,
|
||||
.array_type_sentinel,
|
||||
.reify_int,
|
||||
.vector_type,
|
||||
.elem_type,
|
||||
.indexable_ptr_elem_type,
|
||||
|
|
@ -4432,6 +4543,12 @@ fn findTrackableInner(
|
|||
.select,
|
||||
.int_from_error,
|
||||
.error_from_int,
|
||||
.reify_slice_arg_ty,
|
||||
.reify_enum_value_slice_ty,
|
||||
.reify_pointer_sentinel_ty,
|
||||
.reify_tuple,
|
||||
.reify_pointer,
|
||||
.reify_fn,
|
||||
.cmpxchg,
|
||||
.c_va_arg,
|
||||
.c_va_copy,
|
||||
|
|
@ -4463,7 +4580,11 @@ fn findTrackableInner(
|
|||
},
|
||||
|
||||
// Reifications and opaque declarations need tracking, but have no body.
|
||||
.reify, .opaque_decl => return contents.other.append(gpa, inst),
|
||||
.reify_enum,
|
||||
.reify_struct,
|
||||
.reify_union,
|
||||
.opaque_decl,
|
||||
=> return contents.other.append(gpa, inst),
|
||||
|
||||
// Struct declarations need tracking and have bodies.
|
||||
.struct_decl => {
|
||||
|
|
@ -5246,7 +5367,9 @@ pub fn assertTrackable(zir: Zir, inst_idx: Zir.Inst.Index) void {
|
|||
.union_decl,
|
||||
.enum_decl,
|
||||
.opaque_decl,
|
||||
.reify,
|
||||
.reify_enum,
|
||||
.reify_struct,
|
||||
.reify_union,
|
||||
=> {}, // tracked in order, as the owner instructions of explicit container types
|
||||
else => unreachable, // assertion failure; not trackable
|
||||
},
|
||||
|
|
|
|||
|
|
@ -81,23 +81,15 @@ fn ToUnsigned(comptime T: type) type {
|
|||
}
|
||||
|
||||
/// Constructs a [*c] pointer with the const and volatile annotations
|
||||
/// from SelfType for pointing to a C flexible array of ElementType.
|
||||
pub fn FlexibleArrayType(comptime SelfType: type, comptime ElementType: type) type {
|
||||
switch (@typeInfo(SelfType)) {
|
||||
.pointer => |ptr| {
|
||||
return @Type(.{ .pointer = .{
|
||||
.size = .c,
|
||||
.is_const = ptr.is_const,
|
||||
.is_volatile = ptr.is_volatile,
|
||||
.alignment = @alignOf(ElementType),
|
||||
.address_space = .generic,
|
||||
.child = ElementType,
|
||||
.is_allowzero = true,
|
||||
.sentinel_ptr = null,
|
||||
} });
|
||||
},
|
||||
else => |info| @compileError("Invalid self type \"" ++ @tagName(info) ++ "\" for flexible array getter: " ++ @typeName(SelfType)),
|
||||
}
|
||||
/// from Self for pointing to a C flexible array of Element.
|
||||
pub fn FlexibleArrayType(comptime Self: type, comptime Element: type) type {
|
||||
return switch (@typeInfo(Self)) {
|
||||
.pointer => |ptr| @Pointer(.c, .{
|
||||
.@"const" = ptr.is_const,
|
||||
.@"volatile" = ptr.is_volatile,
|
||||
}, Element, null),
|
||||
else => |info| @compileError("Invalid self type \"" ++ @tagName(info) ++ "\" for flexible array getter: " ++ @typeName(Self)),
|
||||
};
|
||||
}
|
||||
|
||||
/// Promote the type of an integer literal until it fits as C would.
|
||||
|
|
@ -219,7 +211,7 @@ fn castInt(comptime DestType: type, target: anytype) DestType {
|
|||
const dest = @typeInfo(DestType).int;
|
||||
const source = @typeInfo(@TypeOf(target)).int;
|
||||
|
||||
const Int = @Type(.{ .int = .{ .bits = dest.bits, .signedness = source.signedness } });
|
||||
const Int = @Int(source.signedness, dest.bits);
|
||||
|
||||
if (dest.bits < source.bits)
|
||||
return @as(DestType, @bitCast(@as(Int, @truncate(target))))
|
||||
|
|
|
|||
|
|
@ -8614,39 +8614,18 @@ pub const Metadata = packed struct(u32) {
|
|||
nodes: anytype,
|
||||
w: *Writer,
|
||||
) !void {
|
||||
comptime var fmt_str: []const u8 = "";
|
||||
const names = comptime std.meta.fieldNames(@TypeOf(nodes));
|
||||
comptime var fields: [2 + names.len]std.builtin.Type.StructField = undefined;
|
||||
inline for (fields[0..2], .{ "distinct", "node" }) |*field, name| {
|
||||
fmt_str = fmt_str ++ "{[" ++ name ++ "]s}";
|
||||
field.* = .{
|
||||
.name = name,
|
||||
.type = []const u8,
|
||||
.default_value_ptr = null,
|
||||
.is_comptime = false,
|
||||
.alignment = @alignOf([]const u8),
|
||||
};
|
||||
}
|
||||
fmt_str = fmt_str ++ "(";
|
||||
inline for (fields[2..], names) |*field, name| {
|
||||
fmt_str = fmt_str ++ "{[" ++ name ++ "]f}";
|
||||
const T = std.fmt.Alt(FormatData, format);
|
||||
field.* = .{
|
||||
.name = name,
|
||||
.type = T,
|
||||
.default_value_ptr = null,
|
||||
.is_comptime = false,
|
||||
.alignment = @alignOf(T),
|
||||
};
|
||||
}
|
||||
|
||||
comptime var fmt_str: []const u8 = "{[distinct]s}{[node]s}(";
|
||||
inline for (names) |name| fmt_str = fmt_str ++ "{[" ++ name ++ "]f}";
|
||||
fmt_str = fmt_str ++ ")\n";
|
||||
|
||||
var fmt_args: @Type(.{ .@"struct" = .{
|
||||
.layout = .auto,
|
||||
.fields = &fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
} }) = undefined;
|
||||
const field_names = @as([]const []const u8, &.{ "distinct", "node" }) ++ names;
|
||||
comptime var field_types: [2 + names.len]type = undefined;
|
||||
@memset(field_types[0..2], []const u8);
|
||||
@memset(field_types[2..], std.fmt.Alt(FormatData, format));
|
||||
|
||||
var fmt_args: @Struct(.auto, null, field_names, &field_types, &@splat(.{})) = undefined;
|
||||
fmt_args.distinct = @tagName(distinct);
|
||||
fmt_args.node = @tagName(node);
|
||||
inline for (names) |name| @field(fmt_args, name) = try formatter.fmt(
|
||||
|
|
|
|||
|
|
@ -380,28 +380,19 @@ pub fn resolveTargetQuery(io: Io, query: Target.Query) DetectError!Target {
|
|||
|
||||
// For x86, we need to populate some CPU feature flags depending on architecture
|
||||
// and mode:
|
||||
// * 16bit_mode => if the abi is code16
|
||||
// * 16bit_mode => if the arch is x86_16
|
||||
// * 32bit_mode => if the arch is x86
|
||||
// However, the "mode" flags can be used as overrides, so if the user explicitly
|
||||
// sets one of them, that takes precedence.
|
||||
switch (query_cpu_arch) {
|
||||
.x86_16 => {
|
||||
cpu.features.addFeature(
|
||||
@intFromEnum(Target.x86.Feature.@"16bit_mode"),
|
||||
);
|
||||
cpu.features.addFeature(@intFromEnum(Target.x86.Feature.@"16bit_mode"));
|
||||
},
|
||||
.x86 => {
|
||||
if (!Target.x86.featureSetHasAny(query.cpu_features_add, .{
|
||||
.@"16bit_mode", .@"32bit_mode",
|
||||
})) {
|
||||
switch (query_abi) {
|
||||
.code16 => cpu.features.addFeature(
|
||||
@intFromEnum(Target.x86.Feature.@"16bit_mode"),
|
||||
),
|
||||
else => cpu.features.addFeature(
|
||||
@intFromEnum(Target.x86.Feature.@"32bit_mode"),
|
||||
),
|
||||
}
|
||||
cpu.features.addFeature(@intFromEnum(Target.x86.Feature.@"32bit_mode"));
|
||||
}
|
||||
},
|
||||
.arm, .armeb => {
|
||||
|
|
@ -409,9 +400,7 @@ pub fn resolveTargetQuery(io: Io, query: Target.Query) DetectError!Target {
|
|||
// What do we do if the user specifies +thumb_mode?
|
||||
},
|
||||
.thumb, .thumbeb => {
|
||||
cpu.features.addFeature(
|
||||
@intFromEnum(Target.arm.Feature.thumb_mode),
|
||||
);
|
||||
cpu.features.addFeature(@intFromEnum(Target.arm.Feature.thumb_mode));
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1062,6 +1062,11 @@ pub const Inst = struct {
|
|||
manyptr_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.manyptr_const_u8_sentinel_0_type),
|
||||
slice_const_u8_type = @intFromEnum(InternPool.Index.slice_const_u8_type),
|
||||
slice_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.slice_const_u8_sentinel_0_type),
|
||||
manyptr_const_slice_const_u8_type = @intFromEnum(InternPool.Index.manyptr_const_slice_const_u8_type),
|
||||
slice_const_slice_const_u8_type = @intFromEnum(InternPool.Index.slice_const_slice_const_u8_type),
|
||||
optional_type_type = @intFromEnum(InternPool.Index.optional_type_type),
|
||||
manyptr_const_type_type = @intFromEnum(InternPool.Index.manyptr_const_type_type),
|
||||
slice_const_type_type = @intFromEnum(InternPool.Index.slice_const_type_type),
|
||||
vector_8_i8_type = @intFromEnum(InternPool.Index.vector_8_i8_type),
|
||||
vector_16_i8_type = @intFromEnum(InternPool.Index.vector_16_i8_type),
|
||||
vector_32_i8_type = @intFromEnum(InternPool.Index.vector_32_i8_type),
|
||||
|
|
|
|||
|
|
@ -2851,6 +2851,7 @@ fn cleanupAfterUpdate(comp: *Compilation, tmp_dir_rand_int: u64) void {
|
|||
|
||||
pub const UpdateError = error{
|
||||
OutOfMemory,
|
||||
Canceled,
|
||||
Unexpected,
|
||||
CurrentWorkingDirectoryUnlinked,
|
||||
};
|
||||
|
|
@ -2930,6 +2931,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
|
|||
},
|
||||
},
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.Canceled => return error.Canceled,
|
||||
error.InvalidFormat => return comp.setMiscFailure(
|
||||
.check_whole_cache,
|
||||
"failed to check cache: invalid manifest file format",
|
||||
|
|
@ -5010,7 +5012,7 @@ fn performAllTheWork(
|
|||
}
|
||||
}
|
||||
|
||||
const JobError = Allocator.Error;
|
||||
const JobError = Allocator.Error || Io.Cancelable;
|
||||
|
||||
pub fn queueJob(comp: *Compilation, job: Job) !void {
|
||||
try comp.work_queues[Job.stage(job)].pushBack(comp.gpa, job);
|
||||
|
|
@ -5117,6 +5119,7 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job) JobError!void {
|
|||
|
||||
pt.ensureFuncBodyUpToDate(func) catch |err| switch (err) {
|
||||
error.OutOfMemory => |e| return e,
|
||||
error.Canceled => |e| return e,
|
||||
error.AnalysisFail => return,
|
||||
};
|
||||
},
|
||||
|
|
@ -5137,6 +5140,7 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job) JobError!void {
|
|||
};
|
||||
maybe_err catch |err| switch (err) {
|
||||
error.OutOfMemory => |e| return e,
|
||||
error.Canceled => |e| return e,
|
||||
error.AnalysisFail => return,
|
||||
};
|
||||
|
||||
|
|
@ -5166,7 +5170,7 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job) JobError!void {
|
|||
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
|
||||
defer pt.deactivate();
|
||||
Type.fromInterned(ty).resolveFully(pt) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.OutOfMemory, error.Canceled => |e| return e,
|
||||
error.AnalysisFail => return,
|
||||
};
|
||||
},
|
||||
|
|
@ -5177,7 +5181,7 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job) JobError!void {
|
|||
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
|
||||
defer pt.deactivate();
|
||||
pt.semaMod(mod) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.OutOfMemory, error.Canceled => |e| return e,
|
||||
error.AnalysisFail => return,
|
||||
};
|
||||
},
|
||||
|
|
@ -5190,8 +5194,8 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job) JobError!void {
|
|||
// TODO Surface more error details.
|
||||
comp.lockAndSetMiscFailure(
|
||||
.windows_import_lib,
|
||||
"unable to generate DLL import .lib file for {s}: {s}",
|
||||
.{ link_lib, @errorName(err) },
|
||||
"unable to generate DLL import .lib file for {s}: {t}",
|
||||
.{ link_lib, err },
|
||||
);
|
||||
};
|
||||
},
|
||||
|
|
@ -6066,14 +6070,10 @@ fn buildLibZigC(comp: *Compilation, prog_node: std.Progress.Node) void {
|
|||
};
|
||||
}
|
||||
|
||||
fn reportRetryableCObjectError(
|
||||
comp: *Compilation,
|
||||
c_object: *CObject,
|
||||
err: anyerror,
|
||||
) error{OutOfMemory}!void {
|
||||
fn reportRetryableCObjectError(comp: *Compilation, c_object: *CObject, err: anyerror) error{OutOfMemory}!void {
|
||||
c_object.status = .failure_retryable;
|
||||
|
||||
switch (comp.failCObj(c_object, "{s}", .{@errorName(err)})) {
|
||||
switch (comp.failCObj(c_object, "{t}", .{err})) {
|
||||
error.AnalysisFail => return,
|
||||
else => |e| return e,
|
||||
}
|
||||
|
|
@ -7317,7 +7317,7 @@ fn failCObj(
|
|||
c_object: *CObject,
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) SemaError {
|
||||
) error{ OutOfMemory, AnalysisFail } {
|
||||
@branchHint(.cold);
|
||||
const diag_bundle = blk: {
|
||||
const diag_bundle = try comp.gpa.create(CObject.Diag.Bundle);
|
||||
|
|
@ -7341,7 +7341,7 @@ fn failCObjWithOwnedDiagBundle(
|
|||
comp: *Compilation,
|
||||
c_object: *CObject,
|
||||
diag_bundle: *CObject.Diag.Bundle,
|
||||
) SemaError {
|
||||
) error{ OutOfMemory, AnalysisFail } {
|
||||
@branchHint(.cold);
|
||||
assert(diag_bundle.diags.len > 0);
|
||||
{
|
||||
|
|
@ -7357,7 +7357,7 @@ fn failCObjWithOwnedDiagBundle(
|
|||
return error.AnalysisFail;
|
||||
}
|
||||
|
||||
fn failWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, comptime format: []const u8, args: anytype) SemaError {
|
||||
fn failWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, comptime format: []const u8, args: anytype) error{ OutOfMemory, AnalysisFail } {
|
||||
@branchHint(.cold);
|
||||
var bundle: ErrorBundle.Wip = undefined;
|
||||
try bundle.init(comp.gpa);
|
||||
|
|
@ -7384,7 +7384,7 @@ fn failWin32ResourceWithOwnedBundle(
|
|||
comp: *Compilation,
|
||||
win32_resource: *Win32Resource,
|
||||
err_bundle: ErrorBundle,
|
||||
) SemaError {
|
||||
) error{ OutOfMemory, AnalysisFail } {
|
||||
@branchHint(.cold);
|
||||
{
|
||||
comp.mutex.lock();
|
||||
|
|
|
|||
|
|
@ -1153,23 +1153,17 @@ const Local = struct {
|
|||
fn PtrArrayElem(comptime len: usize) type {
|
||||
const elem_info = @typeInfo(Elem).@"struct";
|
||||
const elem_fields = elem_info.fields;
|
||||
var new_fields: [elem_fields.len]std.builtin.Type.StructField = undefined;
|
||||
for (&new_fields, elem_fields) |*new_field, elem_field| {
|
||||
const T = *[len]elem_field.type;
|
||||
new_field.* = .{
|
||||
.name = elem_field.name,
|
||||
.type = T,
|
||||
.default_value_ptr = null,
|
||||
.is_comptime = false,
|
||||
.alignment = @alignOf(T),
|
||||
};
|
||||
var new_names: [elem_fields.len][]const u8 = undefined;
|
||||
var new_types: [elem_fields.len]type = undefined;
|
||||
for (elem_fields, &new_names, &new_types) |elem_field, *new_name, *NewType| {
|
||||
new_name.* = elem_field.name;
|
||||
NewType.* = *[len]elem_field.type;
|
||||
}
|
||||
if (elem_info.is_tuple) {
|
||||
return @Tuple(&new_types);
|
||||
} else {
|
||||
return @Struct(.auto, null, &new_names, &new_types, &@splat(.{}));
|
||||
}
|
||||
return @Type(.{ .@"struct" = .{
|
||||
.layout = .auto,
|
||||
.fields = &new_fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = elem_info.is_tuple,
|
||||
} });
|
||||
}
|
||||
fn PtrElem(comptime opts: struct {
|
||||
size: std.builtin.Type.Pointer.Size,
|
||||
|
|
@ -1177,32 +1171,17 @@ const Local = struct {
|
|||
}) type {
|
||||
const elem_info = @typeInfo(Elem).@"struct";
|
||||
const elem_fields = elem_info.fields;
|
||||
var new_fields: [elem_fields.len]std.builtin.Type.StructField = undefined;
|
||||
for (&new_fields, elem_fields) |*new_field, elem_field| {
|
||||
const T = @Type(.{ .pointer = .{
|
||||
.size = opts.size,
|
||||
.is_const = opts.is_const,
|
||||
.is_volatile = false,
|
||||
.alignment = @alignOf(elem_field.type),
|
||||
.address_space = .generic,
|
||||
.child = elem_field.type,
|
||||
.is_allowzero = false,
|
||||
.sentinel_ptr = null,
|
||||
} });
|
||||
new_field.* = .{
|
||||
.name = elem_field.name,
|
||||
.type = T,
|
||||
.default_value_ptr = null,
|
||||
.is_comptime = false,
|
||||
.alignment = @alignOf(T),
|
||||
};
|
||||
var new_names: [elem_fields.len][]const u8 = undefined;
|
||||
var new_types: [elem_fields.len]type = undefined;
|
||||
for (elem_fields, &new_names, &new_types) |elem_field, *new_name, *NewType| {
|
||||
new_name.* = elem_field.name;
|
||||
NewType.* = @Pointer(opts.size, .{ .@"const" = opts.is_const }, elem_field.type, null);
|
||||
}
|
||||
if (elem_info.is_tuple) {
|
||||
return @Tuple(&new_types);
|
||||
} else {
|
||||
return @Struct(.auto, null, &new_names, &new_types, &@splat(.{}));
|
||||
}
|
||||
return @Type(.{ .@"struct" = .{
|
||||
.layout = .auto,
|
||||
.fields = &new_fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = elem_info.is_tuple,
|
||||
} });
|
||||
}
|
||||
|
||||
pub fn addOne(mutable: Mutable) Allocator.Error!PtrElem(.{ .size = .one }) {
|
||||
|
|
@ -2017,8 +1996,7 @@ pub const Key = union(enum) {
|
|||
error_union_type: ErrorUnionType,
|
||||
simple_type: SimpleType,
|
||||
/// This represents a struct that has been explicitly declared in source code,
|
||||
/// or was created with `@Type`. It is unique and based on a declaration.
|
||||
/// It may be a tuple, if declared like this: `struct {A, B, C}`.
|
||||
/// or was created with `@Struct`. It is unique and based on a declaration.
|
||||
struct_type: NamespaceType,
|
||||
/// This is a tuple type. Tuples are logically similar to structs, but have some
|
||||
/// important differences in semantics; they do not undergo staged type resolution,
|
||||
|
|
@ -2175,7 +2153,7 @@ pub const Key = union(enum) {
|
|||
/// The union for which this is a tag type.
|
||||
union_type: Index,
|
||||
},
|
||||
/// This type originates from a reification via `@Type`, or from an anonymous initialization.
|
||||
/// This type originates from a reification via `@Enum`, `@Struct`, `@Union` or from an anonymous initialization.
|
||||
/// It is hashed based on its ZIR instruction index and fields, attributes, etc.
|
||||
/// To avoid making this key overly complex, the type-specific data is hashed by Sema.
|
||||
reified: struct {
|
||||
|
|
@ -4641,6 +4619,13 @@ pub const Index = enum(u32) {
|
|||
slice_const_u8_type,
|
||||
slice_const_u8_sentinel_0_type,
|
||||
|
||||
manyptr_const_slice_const_u8_type,
|
||||
slice_const_slice_const_u8_type,
|
||||
|
||||
optional_type_type,
|
||||
manyptr_const_type_type,
|
||||
slice_const_type_type,
|
||||
|
||||
vector_8_i8_type,
|
||||
vector_16_i8_type,
|
||||
vector_32_i8_type,
|
||||
|
|
@ -5201,6 +5186,45 @@ pub const static_keys: [static_len]Key = .{
|
|||
},
|
||||
} },
|
||||
|
||||
// [*]const []const u8
|
||||
.{ .ptr_type = .{
|
||||
.child = .slice_const_u8_type,
|
||||
.flags = .{
|
||||
.size = .many,
|
||||
.is_const = true,
|
||||
},
|
||||
} },
|
||||
|
||||
// []const []const u8
|
||||
.{ .ptr_type = .{
|
||||
.child = .slice_const_u8_type,
|
||||
.flags = .{
|
||||
.size = .slice,
|
||||
.is_const = true,
|
||||
},
|
||||
} },
|
||||
|
||||
// ?type
|
||||
.{ .opt_type = .type_type },
|
||||
|
||||
// [*]const type
|
||||
.{ .ptr_type = .{
|
||||
.child = .type_type,
|
||||
.flags = .{
|
||||
.size = .many,
|
||||
.is_const = true,
|
||||
},
|
||||
} },
|
||||
|
||||
// []const type
|
||||
.{ .ptr_type = .{
|
||||
.child = .type_type,
|
||||
.flags = .{
|
||||
.size = .slice,
|
||||
.is_const = true,
|
||||
},
|
||||
} },
|
||||
|
||||
// @Vector(8, i8)
|
||||
.{ .vector_type = .{ .len = 8, .child = .i8_type } },
|
||||
// @Vector(16, i8)
|
||||
|
|
@ -10225,16 +10249,8 @@ pub fn getGeneratedTagEnumType(
|
|||
}
|
||||
|
||||
pub const OpaqueTypeInit = struct {
|
||||
key: union(enum) {
|
||||
declared: struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
captures: []const CaptureValue,
|
||||
},
|
||||
reified: struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
// No type hash since reifid opaques have no data other than the `@Type` location
|
||||
},
|
||||
},
|
||||
zir_index: TrackedInst.Index,
|
||||
captures: []const CaptureValue,
|
||||
};
|
||||
|
||||
pub fn getOpaqueType(
|
||||
|
|
@ -10243,16 +10259,10 @@ pub fn getOpaqueType(
|
|||
tid: Zcu.PerThread.Id,
|
||||
ini: OpaqueTypeInit,
|
||||
) Allocator.Error!WipNamespaceType.Result {
|
||||
var gop = try ip.getOrPutKey(gpa, tid, .{ .opaque_type = switch (ini.key) {
|
||||
.declared => |d| .{ .declared = .{
|
||||
.zir_index = d.zir_index,
|
||||
.captures = .{ .external = d.captures },
|
||||
} },
|
||||
.reified => |r| .{ .reified = .{
|
||||
.zir_index = r.zir_index,
|
||||
.type_hash = 0,
|
||||
} },
|
||||
} });
|
||||
var gop = try ip.getOrPutKey(gpa, tid, .{ .opaque_type = .{ .declared = .{
|
||||
.zir_index = ini.zir_index,
|
||||
.captures = .{ .external = ini.captures },
|
||||
} } });
|
||||
defer gop.deinit();
|
||||
if (gop == .existing) return .{ .existing = gop.existing };
|
||||
|
||||
|
|
@ -10261,30 +10271,19 @@ pub fn getOpaqueType(
|
|||
const extra = local.getMutableExtra(gpa);
|
||||
try items.ensureUnusedCapacity(1);
|
||||
|
||||
try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeOpaque).@"struct".fields.len + switch (ini.key) {
|
||||
.declared => |d| d.captures.len,
|
||||
.reified => 0,
|
||||
});
|
||||
try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeOpaque).@"struct".fields.len + ini.captures.len);
|
||||
const extra_index = addExtraAssumeCapacity(extra, Tag.TypeOpaque{
|
||||
.name = undefined, // set by `finish`
|
||||
.name_nav = undefined, // set by `finish`
|
||||
.namespace = undefined, // set by `finish`
|
||||
.zir_index = switch (ini.key) {
|
||||
inline else => |x| x.zir_index,
|
||||
},
|
||||
.captures_len = switch (ini.key) {
|
||||
.declared => |d| @intCast(d.captures.len),
|
||||
.reified => std.math.maxInt(u32),
|
||||
},
|
||||
.zir_index = ini.zir_index,
|
||||
.captures_len = @intCast(ini.captures.len),
|
||||
});
|
||||
items.appendAssumeCapacity(.{
|
||||
.tag = .type_opaque,
|
||||
.data = extra_index,
|
||||
});
|
||||
switch (ini.key) {
|
||||
.declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}),
|
||||
.reified => {},
|
||||
}
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(ini.captures)});
|
||||
return .{
|
||||
.wip = .{
|
||||
.tid = tid,
|
||||
|
|
@ -10555,6 +10554,8 @@ pub fn slicePtrType(ip: *const InternPool, index: Index) Index {
|
|||
switch (index) {
|
||||
.slice_const_u8_type => return .manyptr_const_u8_type,
|
||||
.slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type,
|
||||
.slice_const_slice_const_u8_type => return .manyptr_const_slice_const_u8_type,
|
||||
.slice_const_type_type => return .manyptr_const_type_type,
|
||||
else => {},
|
||||
}
|
||||
const item = index.unwrap(ip).getItem(ip);
|
||||
|
|
@ -12013,8 +12014,13 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
|
|||
.manyptr_u8_type,
|
||||
.manyptr_const_u8_type,
|
||||
.manyptr_const_u8_sentinel_0_type,
|
||||
.manyptr_const_slice_const_u8_type,
|
||||
.slice_const_u8_type,
|
||||
.slice_const_u8_sentinel_0_type,
|
||||
.slice_const_slice_const_u8_type,
|
||||
.optional_type_type,
|
||||
.manyptr_const_type_type,
|
||||
.slice_const_type_type,
|
||||
.vector_8_i8_type,
|
||||
.vector_16_i8_type,
|
||||
.vector_32_i8_type,
|
||||
|
|
@ -12355,8 +12361,12 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
|
|||
.manyptr_u8_type,
|
||||
.manyptr_const_u8_type,
|
||||
.manyptr_const_u8_sentinel_0_type,
|
||||
.manyptr_const_slice_const_u8_type,
|
||||
.slice_const_u8_type,
|
||||
.slice_const_u8_sentinel_0_type,
|
||||
.slice_const_slice_const_u8_type,
|
||||
.manyptr_const_type_type,
|
||||
.slice_const_type_type,
|
||||
=> .pointer,
|
||||
|
||||
.vector_8_i8_type,
|
||||
|
|
@ -12408,6 +12418,7 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
|
|||
.vector_8_f64_type,
|
||||
=> .vector,
|
||||
|
||||
.optional_type_type => .optional,
|
||||
.optional_noreturn_type => .optional,
|
||||
.anyerror_void_error_union_type => .error_union,
|
||||
.empty_tuple_type => .@"struct",
|
||||
|
|
|
|||
2535
src/Sema.zig
2535
src/Sema.zig
File diff suppressed because it is too large
Load diff
13
src/Type.zig
13
src/Type.zig
|
|
@ -317,7 +317,7 @@ pub fn print(ty: Type, writer: *std.Io.Writer, pt: Zcu.PerThread, ctx: ?*Compari
|
|||
.undefined,
|
||||
=> try writer.print("@TypeOf({s})", .{@tagName(s)}),
|
||||
|
||||
.enum_literal => try writer.writeAll("@Type(.enum_literal)"),
|
||||
.enum_literal => try writer.writeAll("@EnumLiteral()"),
|
||||
|
||||
.generic_poison => unreachable,
|
||||
},
|
||||
|
|
@ -3509,7 +3509,9 @@ pub fn typeDeclSrcLine(ty: Type, zcu: *Zcu) ?u32 {
|
|||
.union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_line,
|
||||
.enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_line,
|
||||
.opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_line,
|
||||
.reify => zir.extraData(Zir.Inst.Reify, inst.data.extended.operand).data.src_line,
|
||||
.reify_enum => zir.extraData(Zir.Inst.ReifyEnum, inst.data.extended.operand).data.src_line,
|
||||
.reify_struct => zir.extraData(Zir.Inst.ReifyStruct, inst.data.extended.operand).data.src_line,
|
||||
.reify_union => zir.extraData(Zir.Inst.ReifyUnion, inst.data.extended.operand).data.src_line,
|
||||
else => unreachable,
|
||||
},
|
||||
else => unreachable,
|
||||
|
|
@ -3835,7 +3837,7 @@ fn resolveStructInner(
|
|||
}
|
||||
return error.AnalysisFail;
|
||||
},
|
||||
error.OutOfMemory => |e| return e,
|
||||
error.OutOfMemory, error.Canceled => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -3894,6 +3896,7 @@ fn resolveUnionInner(
|
|||
return error.AnalysisFail;
|
||||
},
|
||||
error.OutOfMemory => |e| return e,
|
||||
error.Canceled => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -4280,6 +4283,10 @@ pub const manyptr_const_u8: Type = .{ .ip_index = .manyptr_const_u8_type };
|
|||
pub const manyptr_const_u8_sentinel_0: Type = .{ .ip_index = .manyptr_const_u8_sentinel_0_type };
|
||||
pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type };
|
||||
pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type };
|
||||
pub const slice_const_slice_const_u8: Type = .{ .ip_index = .slice_const_slice_const_u8_type };
|
||||
pub const slice_const_type: Type = .{ .ip_index = .slice_const_type_type };
|
||||
pub const optional_type: Type = .{ .ip_index = .optional_type_type };
|
||||
pub const optional_noreturn: Type = .{ .ip_index = .optional_noreturn_type };
|
||||
|
||||
pub const vector_8_i8: Type = .{ .ip_index = .vector_8_i8_type };
|
||||
pub const vector_16_i8: Type = .{ .ip_index = .vector_16_i8_type };
|
||||
|
|
|
|||
|
|
@ -1,12 +1,15 @@
|
|||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const build_options = @import("build_options");
|
||||
const Type = @import("Type.zig");
|
||||
const builtin = @import("builtin");
|
||||
|
||||
const std = @import("std");
|
||||
const Io = std.Io;
|
||||
const assert = std.debug.assert;
|
||||
const BigIntConst = std.math.big.int.Const;
|
||||
const BigIntMutable = std.math.big.int.Mutable;
|
||||
const Target = std.Target;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const Type = @import("Type.zig");
|
||||
const Zcu = @import("Zcu.zig");
|
||||
const Sema = @import("Sema.zig");
|
||||
const InternPool = @import("InternPool.zig");
|
||||
|
|
@ -2410,6 +2413,7 @@ pub const PointerDeriveStep = union(enum) {
|
|||
pub fn pointerDerivation(ptr_val: Value, arena: Allocator, pt: Zcu.PerThread) Allocator.Error!PointerDeriveStep {
|
||||
return ptr_val.pointerDerivationAdvanced(arena, pt, false, null) catch |err| switch (err) {
|
||||
error.OutOfMemory => |e| return e,
|
||||
error.Canceled => @panic("TODO"), // pls remove from error set mlugg
|
||||
error.AnalysisFail => unreachable,
|
||||
};
|
||||
}
|
||||
|
|
@ -2824,6 +2828,29 @@ pub fn resolveLazy(
|
|||
.val = resolved_val,
|
||||
}));
|
||||
},
|
||||
.error_union => |eu| switch (eu.val) {
|
||||
.err_name => return val,
|
||||
.payload => |payload| {
|
||||
const resolved_payload = try Value.fromInterned(payload).resolveLazy(arena, pt);
|
||||
if (resolved_payload.toIntern() == payload) return val;
|
||||
return .fromInterned(try pt.intern(.{ .error_union = .{
|
||||
.ty = eu.ty,
|
||||
.val = .{ .payload = resolved_payload.toIntern() },
|
||||
} }));
|
||||
},
|
||||
},
|
||||
.opt => |opt| switch (opt.val) {
|
||||
.none => return val,
|
||||
else => |payload| {
|
||||
const resolved_payload = try Value.fromInterned(payload).resolveLazy(arena, pt);
|
||||
if (resolved_payload.toIntern() == payload) return val;
|
||||
return .fromInterned(try pt.intern(.{ .opt = .{
|
||||
.ty = opt.ty,
|
||||
.val = resolved_payload.toIntern(),
|
||||
} }));
|
||||
},
|
||||
},
|
||||
|
||||
else => return val,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
28
src/Zcu.zig
28
src/Zcu.zig
|
|
@ -416,10 +416,13 @@ pub const BuiltinDecl = enum {
|
|||
Type,
|
||||
@"Type.Fn",
|
||||
@"Type.Fn.Param",
|
||||
@"Type.Fn.Param.Attributes",
|
||||
@"Type.Fn.Attributes",
|
||||
@"Type.Int",
|
||||
@"Type.Float",
|
||||
@"Type.Pointer",
|
||||
@"Type.Pointer.Size",
|
||||
@"Type.Pointer.Attributes",
|
||||
@"Type.Array",
|
||||
@"Type.Vector",
|
||||
@"Type.Optional",
|
||||
|
|
@ -427,10 +430,13 @@ pub const BuiltinDecl = enum {
|
|||
@"Type.ErrorUnion",
|
||||
@"Type.EnumField",
|
||||
@"Type.Enum",
|
||||
@"Type.Enum.Mode",
|
||||
@"Type.Union",
|
||||
@"Type.UnionField",
|
||||
@"Type.UnionField.Attributes",
|
||||
@"Type.Struct",
|
||||
@"Type.StructField",
|
||||
@"Type.StructField.Attributes",
|
||||
@"Type.ContainerLayout",
|
||||
@"Type.Opaque",
|
||||
@"Type.Declaration",
|
||||
|
|
@ -495,10 +501,13 @@ pub const BuiltinDecl = enum {
|
|||
.Type,
|
||||
.@"Type.Fn",
|
||||
.@"Type.Fn.Param",
|
||||
.@"Type.Fn.Param.Attributes",
|
||||
.@"Type.Fn.Attributes",
|
||||
.@"Type.Int",
|
||||
.@"Type.Float",
|
||||
.@"Type.Pointer",
|
||||
.@"Type.Pointer.Size",
|
||||
.@"Type.Pointer.Attributes",
|
||||
.@"Type.Array",
|
||||
.@"Type.Vector",
|
||||
.@"Type.Optional",
|
||||
|
|
@ -506,10 +515,13 @@ pub const BuiltinDecl = enum {
|
|||
.@"Type.ErrorUnion",
|
||||
.@"Type.EnumField",
|
||||
.@"Type.Enum",
|
||||
.@"Type.Enum.Mode",
|
||||
.@"Type.Union",
|
||||
.@"Type.UnionField",
|
||||
.@"Type.UnionField.Attributes",
|
||||
.@"Type.Struct",
|
||||
.@"Type.StructField",
|
||||
.@"Type.StructField.Attributes",
|
||||
.@"Type.ContainerLayout",
|
||||
.@"Type.Opaque",
|
||||
.@"Type.Declaration",
|
||||
|
|
@ -1745,28 +1757,28 @@ pub const SrcLoc = struct {
|
|||
const node = node_off.toAbsolute(src_loc.base_node);
|
||||
var buf: [1]Ast.Node.Index = undefined;
|
||||
const full = tree.fullFnProto(&buf, node).?;
|
||||
return tree.nodeToSpan(full.ast.align_expr.unwrap().?);
|
||||
return tree.nodeToSpan(full.ast.align_expr.unwrap() orelse node);
|
||||
},
|
||||
.node_offset_fn_type_addrspace => |node_off| {
|
||||
const tree = try src_loc.file_scope.getTree(zcu);
|
||||
const node = node_off.toAbsolute(src_loc.base_node);
|
||||
var buf: [1]Ast.Node.Index = undefined;
|
||||
const full = tree.fullFnProto(&buf, node).?;
|
||||
return tree.nodeToSpan(full.ast.addrspace_expr.unwrap().?);
|
||||
return tree.nodeToSpan(full.ast.addrspace_expr.unwrap() orelse node);
|
||||
},
|
||||
.node_offset_fn_type_section => |node_off| {
|
||||
const tree = try src_loc.file_scope.getTree(zcu);
|
||||
const node = node_off.toAbsolute(src_loc.base_node);
|
||||
var buf: [1]Ast.Node.Index = undefined;
|
||||
const full = tree.fullFnProto(&buf, node).?;
|
||||
return tree.nodeToSpan(full.ast.section_expr.unwrap().?);
|
||||
return tree.nodeToSpan(full.ast.section_expr.unwrap() orelse node);
|
||||
},
|
||||
.node_offset_fn_type_cc => |node_off| {
|
||||
const tree = try src_loc.file_scope.getTree(zcu);
|
||||
const node = node_off.toAbsolute(src_loc.base_node);
|
||||
var buf: [1]Ast.Node.Index = undefined;
|
||||
const full = tree.fullFnProto(&buf, node).?;
|
||||
return tree.nodeToSpan(full.ast.callconv_expr.unwrap().?);
|
||||
return tree.nodeToSpan(full.ast.callconv_expr.unwrap() orelse node);
|
||||
},
|
||||
|
||||
.node_offset_fn_type_ret_ty => |node_off| {
|
||||
|
|
@ -2684,7 +2696,9 @@ pub const LazySrcLoc = struct {
|
|||
.union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_node,
|
||||
.enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_node,
|
||||
.opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_node,
|
||||
.reify => zir.extraData(Zir.Inst.Reify, inst.data.extended.operand).data.node,
|
||||
.reify_enum => zir.extraData(Zir.Inst.ReifyEnum, inst.data.extended.operand).data.node,
|
||||
.reify_struct => zir.extraData(Zir.Inst.ReifyStruct, inst.data.extended.operand).data.node,
|
||||
.reify_union => zir.extraData(Zir.Inst.ReifyUnion, inst.data.extended.operand).data.node,
|
||||
else => unreachable,
|
||||
},
|
||||
else => unreachable,
|
||||
|
|
@ -2741,9 +2755,11 @@ pub const LazySrcLoc = struct {
|
|||
}
|
||||
};
|
||||
|
||||
pub const SemaError = error{ OutOfMemory, AnalysisFail };
|
||||
pub const SemaError = error{ OutOfMemory, Canceled, AnalysisFail };
|
||||
pub const CompileError = error{
|
||||
OutOfMemory,
|
||||
/// The compilation update is no longer desired.
|
||||
Canceled,
|
||||
/// When this is returned, the compile error for the failure has already been recorded.
|
||||
AnalysisFail,
|
||||
/// In a comptime scope, a return instruction was encountered. This error is only seen when
|
||||
|
|
|
|||
|
|
@ -1,26 +1,31 @@
|
|||
//! This type provides a wrapper around a `*Zcu` for uses which require a thread `Id`.
|
||||
//! Any operation which mutates `InternPool` state lives here rather than on `Zcu`.
|
||||
|
||||
const Air = @import("../Air.zig");
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const Ast = std.zig.Ast;
|
||||
const AstGen = std.zig.AstGen;
|
||||
const BigIntConst = std.math.big.int.Const;
|
||||
const BigIntMutable = std.math.big.int.Mutable;
|
||||
const Cache = std.Build.Cache;
|
||||
const log = std.log.scoped(.zcu);
|
||||
const mem = std.mem;
|
||||
const Zir = std.zig.Zir;
|
||||
const Zoir = std.zig.Zoir;
|
||||
const ZonGen = std.zig.ZonGen;
|
||||
const Io = std.Io;
|
||||
|
||||
const Air = @import("../Air.zig");
|
||||
const Builtin = @import("../Builtin.zig");
|
||||
const build_options = @import("build_options");
|
||||
const builtin = @import("builtin");
|
||||
const Cache = std.Build.Cache;
|
||||
const dev = @import("../dev.zig");
|
||||
const InternPool = @import("../InternPool.zig");
|
||||
const AnalUnit = InternPool.AnalUnit;
|
||||
const introspect = @import("../introspect.zig");
|
||||
const log = std.log.scoped(.zcu);
|
||||
const Module = @import("../Package.zig").Module;
|
||||
const Sema = @import("../Sema.zig");
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const target_util = @import("../target.zig");
|
||||
const trace = @import("../tracy.zig").trace;
|
||||
const Type = @import("../Type.zig");
|
||||
|
|
@ -29,9 +34,6 @@ const Zcu = @import("../Zcu.zig");
|
|||
const Compilation = @import("../Compilation.zig");
|
||||
const codegen = @import("../codegen.zig");
|
||||
const crash_report = @import("../crash_report.zig");
|
||||
const Zir = std.zig.Zir;
|
||||
const Zoir = std.zig.Zoir;
|
||||
const ZonGen = std.zig.ZonGen;
|
||||
|
||||
zcu: *Zcu,
|
||||
|
||||
|
|
@ -678,6 +680,7 @@ pub fn ensureMemoizedStateUpToDate(pt: Zcu.PerThread, stage: InternPool.Memoized
|
|||
// TODO: same as for `ensureComptimeUnitUpToDate` etc
|
||||
return error.OutOfMemory;
|
||||
},
|
||||
error.Canceled => |e| return e,
|
||||
error.ComptimeReturn => unreachable,
|
||||
error.ComptimeBreak => unreachable,
|
||||
};
|
||||
|
|
@ -842,6 +845,7 @@ pub fn ensureComptimeUnitUpToDate(pt: Zcu.PerThread, cu_id: InternPool.ComptimeU
|
|||
// for reporting OOM errors without allocating.
|
||||
return error.OutOfMemory;
|
||||
},
|
||||
error.Canceled => |e| return e,
|
||||
error.ComptimeReturn => unreachable,
|
||||
error.ComptimeBreak => unreachable,
|
||||
};
|
||||
|
|
@ -1030,6 +1034,7 @@ pub fn ensureNavValUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu
|
|||
// for reporting OOM errors without allocating.
|
||||
return error.OutOfMemory;
|
||||
},
|
||||
error.Canceled => |e| return e,
|
||||
error.ComptimeReturn => unreachable,
|
||||
error.ComptimeBreak => unreachable,
|
||||
};
|
||||
|
|
@ -1443,6 +1448,7 @@ pub fn ensureNavTypeUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zc
|
|||
// for reporting OOM errors without allocating.
|
||||
return error.OutOfMemory;
|
||||
},
|
||||
error.Canceled => |e| return e,
|
||||
error.ComptimeReturn => unreachable,
|
||||
error.ComptimeBreak => unreachable,
|
||||
};
|
||||
|
|
@ -1668,6 +1674,7 @@ pub fn ensureFuncBodyUpToDate(pt: Zcu.PerThread, func_index: InternPool.Index) Z
|
|||
// for reporting OOM errors without allocating.
|
||||
return error.OutOfMemory;
|
||||
},
|
||||
error.Canceled => |e| return e,
|
||||
};
|
||||
|
||||
if (was_outdated) {
|
||||
|
|
@ -2360,6 +2367,7 @@ pub fn embedFile(
|
|||
import_string: []const u8,
|
||||
) error{
|
||||
OutOfMemory,
|
||||
Canceled,
|
||||
ImportOutsideModulePath,
|
||||
CurrentWorkingDirectoryUnlinked,
|
||||
}!Zcu.EmbedFile.Index {
|
||||
|
|
@ -4123,7 +4131,7 @@ fn recreateEnumType(
|
|||
pt: Zcu.PerThread,
|
||||
old_ty: InternPool.Index,
|
||||
key: InternPool.Key.NamespaceType.Declared,
|
||||
) Allocator.Error!InternPool.Index {
|
||||
) (Allocator.Error || Io.Cancelable)!InternPool.Index {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
|
@ -4234,6 +4242,7 @@ fn recreateEnumType(
|
|||
body_end,
|
||||
) catch |err| switch (err) {
|
||||
error.OutOfMemory => |e| return e,
|
||||
error.Canceled => |e| return e,
|
||||
error.AnalysisFail => {}, // call sites are responsible for checking `[transitive_]failed_analysis` to detect this
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -120,23 +120,13 @@ const matchers = matchers: {
|
|||
);
|
||||
var symbols: Symbols: {
|
||||
const symbols = @typeInfo(@TypeOf(instruction.symbols)).@"struct".fields;
|
||||
var symbol_fields: [symbols.len]std.builtin.Type.StructField = undefined;
|
||||
for (&symbol_fields, symbols) |*symbol_field, symbol| {
|
||||
const Storage = zonCast(SymbolSpec, @field(instruction.symbols, symbol.name), .{}).Storage();
|
||||
symbol_field.* = .{
|
||||
.name = symbol.name,
|
||||
.type = Storage,
|
||||
.default_value_ptr = null,
|
||||
.is_comptime = false,
|
||||
.alignment = @alignOf(Storage),
|
||||
};
|
||||
var field_names: [symbols.len][]const u8 = undefined;
|
||||
var field_types: [symbols.len]type = undefined;
|
||||
for (symbols, &field_names, &field_types) |symbol, *field_name, *FieldType| {
|
||||
field_name.* = symbol.name;
|
||||
FieldType.* = zonCast(SymbolSpec, @field(instruction.symbols, symbol.name), .{}).Storage();
|
||||
}
|
||||
break :Symbols @Type(.{ .@"struct" = .{
|
||||
.layout = .auto,
|
||||
.fields = &symbol_fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
} });
|
||||
break :Symbols @Struct(.auto, null, &field_names, &field_types, &@splat(.{}));
|
||||
} = undefined;
|
||||
const Symbol = std.meta.FieldEnum(@TypeOf(instruction.symbols));
|
||||
comptime var unused_symbols: std.enums.EnumSet(Symbol) = .initFull();
|
||||
|
|
@ -334,7 +324,7 @@ const SymbolSpec = union(enum) {
|
|||
.reg => aarch64.encoding.Register,
|
||||
.arrangement => aarch64.encoding.Register.Arrangement,
|
||||
.systemreg => aarch64.encoding.Register.System,
|
||||
.imm => |imm_spec| @Type(.{ .int = imm_spec.type }),
|
||||
.imm => |imm_spec| @Int(imm_spec.type.signedness, imm_spec.type.bits),
|
||||
.fimm => f16,
|
||||
.extend => Instruction.DataProcessingRegister.AddSubtractExtendedRegister.Option,
|
||||
.shift => Instruction.DataProcessingRegister.Shift.Op,
|
||||
|
|
@ -413,13 +403,13 @@ const SymbolSpec = union(enum) {
|
|||
return systemreg;
|
||||
},
|
||||
.imm => |imm_spec| {
|
||||
const imm = std.fmt.parseInt(@Type(.{ .int = .{
|
||||
.signedness = imm_spec.type.signedness,
|
||||
.bits = switch (imm_spec.adjust) {
|
||||
const imm = std.fmt.parseInt(@Int(
|
||||
imm_spec.type.signedness,
|
||||
switch (imm_spec.adjust) {
|
||||
.none, .neg_wrap => imm_spec.type.bits,
|
||||
.dec => imm_spec.type.bits + 1,
|
||||
},
|
||||
} }), token, 0) catch {
|
||||
), token, 0) catch {
|
||||
log.debug("invalid immediate: \"{f}\"", .{std.zig.fmtString(token)});
|
||||
return null;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -8928,12 +8928,16 @@ pub const Value = struct {
|
|||
constant: Constant,
|
||||
|
||||
pub const Tag = @typeInfo(Parent).@"union".tag_type.?;
|
||||
pub const Payload = @Type(.{ .@"union" = .{
|
||||
.layout = .auto,
|
||||
.tag_type = null,
|
||||
.fields = @typeInfo(Parent).@"union".fields,
|
||||
.decls = &.{},
|
||||
} });
|
||||
pub const Payload = Payload: {
|
||||
const fields = @typeInfo(Parent).@"union".fields;
|
||||
var types: [fields.len]type = undefined;
|
||||
var names: [fields.len][]const u8 = undefined;
|
||||
for (fields, &types, &names) |f, *ty, *name| {
|
||||
ty.* = f.type;
|
||||
name.* = f.name;
|
||||
}
|
||||
break :Payload @Union(.auto, null, &names, &types, &@splat(.{}));
|
||||
};
|
||||
};
|
||||
|
||||
pub const Location = union(enum(u1)) {
|
||||
|
|
@ -8949,12 +8953,16 @@ pub const Value = struct {
|
|||
},
|
||||
|
||||
pub const Tag = @typeInfo(Location).@"union".tag_type.?;
|
||||
pub const Payload = @Type(.{ .@"union" = .{
|
||||
.layout = .auto,
|
||||
.tag_type = null,
|
||||
.fields = @typeInfo(Location).@"union".fields,
|
||||
.decls = &.{},
|
||||
} });
|
||||
pub const Payload = Payload: {
|
||||
const fields = @typeInfo(Location).@"union".fields;
|
||||
var types: [fields.len]type = undefined;
|
||||
var names: [fields.len][]const u8 = undefined;
|
||||
for (fields, &types, &names) |f, *ty, *name| {
|
||||
ty.* = f.type;
|
||||
name.* = f.name;
|
||||
}
|
||||
break :Payload @Union(.auto, null, &names, &types, &@splat(.{}));
|
||||
};
|
||||
};
|
||||
|
||||
pub const Indirect = packed struct(u32) {
|
||||
|
|
@ -11210,7 +11218,7 @@ pub const Value = struct {
|
|||
.storage = .{ .u64 = switch (size) {
|
||||
else => unreachable,
|
||||
inline 1...8 => |ct_size| std.mem.readInt(
|
||||
@Type(.{ .int = .{ .signedness = .unsigned, .bits = 8 * ct_size } }),
|
||||
@Int(.unsigned, 8 * ct_size),
|
||||
buffer[@intCast(offset)..][0..ct_size],
|
||||
isel.target.cpu.arch.endian(),
|
||||
),
|
||||
|
|
@ -11438,7 +11446,7 @@ fn writeKeyToMemory(isel: *Select, constant_key: InternPool.Key, buffer: []u8) e
|
|||
switch (buffer.len) {
|
||||
else => unreachable,
|
||||
inline 1...4 => |size| std.mem.writeInt(
|
||||
@Type(.{ .int = .{ .signedness = .unsigned, .bits = 8 * size } }),
|
||||
@Int(.unsigned, 8 * size),
|
||||
buffer[0..size],
|
||||
@intCast(error_int),
|
||||
isel.target.cpu.arch.endian(),
|
||||
|
|
|
|||
|
|
@ -5672,6 +5672,10 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
c_name_buf[0] = '$';
|
||||
@memcpy((&c_name_buf)[1..][0..field_name.len], field_name);
|
||||
break :name (&c_name_buf)[0 .. 1 + field_name.len];
|
||||
} else if (target.cpu.arch.isSPARC() and
|
||||
(mem.eql(u8, field_name, "ccr") or mem.eql(u8, field_name, "icc") or mem.eql(u8, field_name, "xcc"))) name: {
|
||||
// C compilers just use `icc` to encompass all of these.
|
||||
break :name "icc";
|
||||
} else field_name;
|
||||
|
||||
try w.print(" {f}", .{fmtStringLiteral(name, null)});
|
||||
|
|
|
|||
|
|
@ -1416,6 +1416,9 @@ pub const Pool = struct {
|
|||
.null_type,
|
||||
.undefined_type,
|
||||
.enum_literal_type,
|
||||
.optional_type_type,
|
||||
.manyptr_const_type_type,
|
||||
.slice_const_type_type,
|
||||
=> return .void,
|
||||
.u1_type, .u8_type => return .u8,
|
||||
.i8_type => return .i8,
|
||||
|
|
@ -1525,6 +1528,73 @@ pub const Pool = struct {
|
|||
return pool.fromFields(allocator, .@"struct", &fields, kind);
|
||||
},
|
||||
|
||||
.manyptr_const_slice_const_u8_type => {
|
||||
const target = &mod.resolved_target.result;
|
||||
var fields: [2]Info.Field = .{
|
||||
.{
|
||||
.name = .{ .index = .ptr },
|
||||
.ctype = try pool.getPointer(allocator, .{
|
||||
.elem_ctype = .u8,
|
||||
.@"const" = true,
|
||||
.nonstring = true,
|
||||
}),
|
||||
.alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target)),
|
||||
},
|
||||
.{
|
||||
.name = .{ .index = .len },
|
||||
.ctype = .usize,
|
||||
.alignas = AlignAs.fromAbiAlignment(
|
||||
.fromByteUnits(std.zig.target.intAlignment(target, target.ptrBitWidth())),
|
||||
),
|
||||
},
|
||||
};
|
||||
const slice_const_u8 = try pool.fromFields(allocator, .@"struct", &fields, kind);
|
||||
return pool.getPointer(allocator, .{
|
||||
.elem_ctype = slice_const_u8,
|
||||
.@"const" = true,
|
||||
});
|
||||
},
|
||||
.slice_const_slice_const_u8_type => {
|
||||
const target = &mod.resolved_target.result;
|
||||
var fields: [2]Info.Field = .{
|
||||
.{
|
||||
.name = .{ .index = .ptr },
|
||||
.ctype = try pool.getPointer(allocator, .{
|
||||
.elem_ctype = .u8,
|
||||
.@"const" = true,
|
||||
.nonstring = true,
|
||||
}),
|
||||
.alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target)),
|
||||
},
|
||||
.{
|
||||
.name = .{ .index = .len },
|
||||
.ctype = .usize,
|
||||
.alignas = AlignAs.fromAbiAlignment(
|
||||
.fromByteUnits(std.zig.target.intAlignment(target, target.ptrBitWidth())),
|
||||
),
|
||||
},
|
||||
};
|
||||
const slice_const_u8 = try pool.fromFields(allocator, .@"struct", &fields, .forward);
|
||||
fields = .{
|
||||
.{
|
||||
.name = .{ .index = .ptr },
|
||||
.ctype = try pool.getPointer(allocator, .{
|
||||
.elem_ctype = slice_const_u8,
|
||||
.@"const" = true,
|
||||
}),
|
||||
.alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target)),
|
||||
},
|
||||
.{
|
||||
.name = .{ .index = .len },
|
||||
.ctype = .usize,
|
||||
.alignas = AlignAs.fromAbiAlignment(
|
||||
.fromByteUnits(std.zig.target.intAlignment(target, target.ptrBitWidth())),
|
||||
),
|
||||
},
|
||||
};
|
||||
return pool.fromFields(allocator, .@"struct", &fields, kind);
|
||||
},
|
||||
|
||||
.vector_8_i8_type => {
|
||||
const vector_ctype = try pool.getVector(allocator, .{
|
||||
.elem_ctype = .i8,
|
||||
|
|
|
|||
|
|
@ -277,7 +277,6 @@ pub fn targetTriple(allocator: Allocator, target: *const std.Target) ![]const u8
|
|||
.gnusf => "gnusf",
|
||||
.gnux32 => "gnux32",
|
||||
.ilp32 => "unknown",
|
||||
.code16 => "code16",
|
||||
.eabi => "eabi",
|
||||
.eabihf => "eabihf",
|
||||
.android => "android",
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue