Compare commits

...

52 commits

Author SHA1 Message Date
David Rubin
e93ce49504
c: support +r output constraint 2025-12-05 16:52:21 -08:00
David Rubin
2e341a7173
codegen: fix x86-64 backend crc32 inline asm 2025-12-05 16:52:21 -08:00
David Rubin
7e0d86638f
hash: implement fast crc32c 2025-12-05 16:52:18 -08:00
Matthew Lugg
4ce7b57e86 std.heap: rework c_allocator
The main goal here was to avoid allocating padding and header space if
`malloc` already guarantees the alignment we need via `max_align_t`.
Previously, the compiler was using `std.heap.raw_c_allocator` as its GPA
in some cases depending on `std.c.max_align_t`, but that's pretty
fragile (it meant we had to encode our alignment requirements into
`src/main.zig`!). Perhaps more importantly, that solution is
unnecessarily restrictive: since Zig's `Allocator` API passes the
`Alignment` not only to `alloc`, but also to `free` etc, we are able to
use a different strategy depending on its value. So `c_allocator` can
simply compare the requested align to `Alignment.of(std.c.max_align_t)`,
and use a raw `malloc` call (no header needed!) if it will guarantee a
suitable alignment (which, in practice, will be true the vast majority
of the time).

So in short, this makes `std.heap.c_allocator` more memory efficient,
and probably removes any incentive to use `std.heap.raw_c_allocator`.

I also refactored the `c_allocator` implementation while doing this,
just to neaten things up a little.
2025-12-06 00:16:33 +01:00
Matthew Lugg
ea94ac52c5
std.debug: skip manage resources correctly with cbe
Some checks are pending
ci / aarch64-linux-debug (push) Waiting to run
ci / aarch64-linux-release (push) Waiting to run
ci / aarch64-macos-debug (push) Waiting to run
ci / aarch64-macos-release (push) Waiting to run
ci / loongarch64-linux-debug (push) Waiting to run
ci / loongarch64-linux-release (push) Waiting to run
ci / riscv64-linux-debug (push) Waiting to run
ci / riscv64-linux-release (push) Waiting to run
ci / s390x-linux-debug (push) Waiting to run
ci / s390x-linux-release (push) Waiting to run
ci / x86_64-freebsd-debug (push) Waiting to run
ci / x86_64-freebsd-release (push) Waiting to run
ci / x86_64-linux-debug (push) Waiting to run
ci / x86_64-linux-debug-llvm (push) Waiting to run
ci / x86_64-linux-release (push) Waiting to run
ci / x86_64-windows-debug (push) Waiting to run
ci / x86_64-windows-release (push) Waiting to run
2025-12-05 15:10:58 +01:00
Aidan Welch
032e3c9254 std.Io.Timestamp: when creating a Clock.Timestamp actually set .raw instead of the non-existant .nanoseconds
Some checks are pending
ci / aarch64-linux-debug (push) Waiting to run
ci / aarch64-linux-release (push) Waiting to run
ci / aarch64-macos-debug (push) Waiting to run
ci / aarch64-macos-release (push) Waiting to run
ci / loongarch64-linux-debug (push) Waiting to run
ci / loongarch64-linux-release (push) Waiting to run
ci / riscv64-linux-debug (push) Waiting to run
ci / riscv64-linux-release (push) Waiting to run
ci / s390x-linux-debug (push) Waiting to run
ci / s390x-linux-release (push) Waiting to run
ci / x86_64-freebsd-debug (push) Waiting to run
ci / x86_64-freebsd-release (push) Waiting to run
ci / x86_64-linux-debug (push) Waiting to run
ci / x86_64-linux-debug-llvm (push) Waiting to run
ci / x86_64-linux-release (push) Waiting to run
ci / x86_64-windows-debug (push) Waiting to run
ci / x86_64-windows-release (push) Waiting to run
2025-12-05 14:14:01 +01:00
Luna Schwalbe
adc5a39de2 Change github links to codeberg 2025-12-05 14:12:39 +01:00
Loris Cro
58e3c2cefd make Io.net.sendMany compile 2025-12-05 11:50:04 +01:00
Alex Rønne Petersen
c166bb36f6
ci: reduce x86_64-linux timeouts
These excessive timeouts should no longer be necessary with the recent tuning of
job capacity and maxrss on these machines.
2025-12-04 20:52:34 +01:00
Alex Rønne Petersen
78cba86928
ci: set maxrss from $ZSF_MAX_RSS if provided by the runner
Some checks are pending
ci / aarch64-linux-debug (push) Waiting to run
ci / aarch64-linux-release (push) Waiting to run
ci / aarch64-macos-debug (push) Waiting to run
ci / aarch64-macos-release (push) Waiting to run
ci / loongarch64-linux-debug (push) Waiting to run
ci / loongarch64-linux-release (push) Waiting to run
ci / riscv64-linux-debug (push) Waiting to run
ci / riscv64-linux-release (push) Waiting to run
ci / s390x-linux-debug (push) Waiting to run
ci / s390x-linux-release (push) Waiting to run
ci / x86_64-freebsd-debug (push) Waiting to run
ci / x86_64-freebsd-release (push) Waiting to run
ci / x86_64-linux-debug (push) Waiting to run
ci / x86_64-linux-debug-llvm (push) Waiting to run
ci / x86_64-linux-release (push) Waiting to run
ci / x86_64-windows-debug (push) Waiting to run
ci / x86_64-windows-release (push) Waiting to run
All of our runners now define this. When running a CI script locally, this will
not be set, so we default to 0, aka "all available system memory".
2025-12-04 20:06:48 +01:00
Alex Rønne Petersen
2728eb5d5e
build: adjust max_rss on a per-CI-host basis
This avoids pessimizing concurrency on all machines due to e.g. the macOS
machine having high memory usage across the board due to 16K page size.

This also adds max_rss to test-unit and test-c-abi since those tend to eat a
decent chunk of memory too.
2025-12-04 20:06:12 +01:00
Alex Rønne Petersen
44543800a5
std.process.Child: enable rusage collection for dragonfly, netbsd, openbsd 2025-12-04 03:46:36 +01:00
Alex Rønne Petersen
2659fadb95
std.c: add rusage for dragonfly, netbsd, openbsd 2025-12-04 03:46:36 +01:00
Alex Rønne Petersen
9eed87f93e
std.process.Child: enable rusage collection for freebsd, illumos, serenity 2025-12-04 03:46:36 +01:00
Alex Rønne Petersen
e270c97ed1
ci: don't skip release mode tests on x86_64-windows-debug
pulsar is much faster than george so we don't need to do this anymore.
2025-12-04 03:46:36 +01:00
Alex Rønne Petersen
c21ce53494
ci: skip spirv and wasm tests on x86_64-freebsd
These are already built and run on x86_64-linux.
2025-12-04 03:46:36 +01:00
Alex Rønne Petersen
e2a9e568b4
build: add -Dskip-spirv and -Dskip-wasm options 2025-12-04 03:46:35 +01:00
Alex Rønne Petersen
ad9a5187ac
build: add some missing darwin tags 2025-12-04 03:46:35 +01:00
jedisct1
d73fbcc3ae Merge pull request 'Argon2: use the std.Io interface' (#30084) from jedisct1/zig:argon2 into master
Some checks failed
ci / aarch64-linux-debug (push) Has been cancelled
ci / aarch64-linux-release (push) Has been cancelled
ci / aarch64-macos-debug (push) Has been cancelled
ci / aarch64-macos-release (push) Has been cancelled
ci / loongarch64-linux-debug (push) Has been cancelled
ci / loongarch64-linux-release (push) Has been cancelled
ci / riscv64-linux-debug (push) Has been cancelled
ci / riscv64-linux-release (push) Has been cancelled
ci / s390x-linux-debug (push) Has been cancelled
ci / s390x-linux-release (push) Has been cancelled
ci / x86_64-freebsd-debug (push) Has been cancelled
ci / x86_64-freebsd-release (push) Has been cancelled
ci / x86_64-linux-debug (push) Has been cancelled
ci / x86_64-linux-debug-llvm (push) Has been cancelled
ci / x86_64-linux-release (push) Has been cancelled
ci / x86_64-windows-debug (push) Has been cancelled
ci / x86_64-windows-release (push) Has been cancelled
Reviewed-on: https://codeberg.org/ziglang/zig/pulls/30084
2025-12-03 12:18:09 +01:00
Zihad
cb115cf73a std.process.ArgIteratorWasi: fix no-args deinit 2025-12-03 08:35:24 +01:00
Alex Rønne Petersen
be9649f4ea
ci: set a sensible maxrss in x86_64-windows scripts
Some checks are pending
ci / aarch64-linux-debug (push) Waiting to run
ci / aarch64-linux-release (push) Waiting to run
ci / aarch64-macos-debug (push) Waiting to run
ci / aarch64-macos-release (push) Waiting to run
ci / loongarch64-linux-debug (push) Waiting to run
ci / loongarch64-linux-release (push) Waiting to run
ci / riscv64-linux-debug (push) Waiting to run
ci / riscv64-linux-release (push) Waiting to run
ci / s390x-linux-debug (push) Waiting to run
ci / s390x-linux-release (push) Waiting to run
ci / x86_64-freebsd-debug (push) Waiting to run
ci / x86_64-freebsd-release (push) Waiting to run
ci / x86_64-linux-debug (push) Waiting to run
ci / x86_64-linux-debug-llvm (push) Waiting to run
ci / x86_64-linux-release (push) Waiting to run
ci / x86_64-windows-debug (push) Waiting to run
ci / x86_64-windows-release (push) Waiting to run
2025-12-03 00:24:58 +01:00
Frank Denis
6fe95c28cf Argon2: use the std.Io interface
Also reduce the memory required by tests.

4GB for every test is way too much and doesn't provide much benefits
in testing the algorithms.
2025-12-02 23:03:52 +01:00
Andrew Kelley
52ad126bb4 Merge pull request 'std.Io.Threaded: rework cancellation' (#30033) from cancellation into master
Some checks are pending
ci / aarch64-linux-debug (push) Waiting to run
ci / aarch64-linux-release (push) Waiting to run
ci / aarch64-macos-debug (push) Waiting to run
ci / aarch64-macos-release (push) Waiting to run
ci / loongarch64-linux-debug (push) Waiting to run
ci / loongarch64-linux-release (push) Waiting to run
ci / riscv64-linux-debug (push) Waiting to run
ci / riscv64-linux-release (push) Waiting to run
ci / s390x-linux-debug (push) Waiting to run
ci / s390x-linux-release (push) Waiting to run
ci / x86_64-freebsd-debug (push) Waiting to run
ci / x86_64-freebsd-release (push) Waiting to run
ci / x86_64-linux-debug (push) Waiting to run
ci / x86_64-linux-debug-llvm (push) Waiting to run
ci / x86_64-linux-release (push) Waiting to run
ci / x86_64-windows-debug (push) Waiting to run
ci / x86_64-windows-release (push) Waiting to run
Reviewed-on: https://codeberg.org/ziglang/zig/pulls/30033
2025-12-02 17:58:29 +01:00
Andrew Kelley
bb3f56d5d5 std.Io.Threaded: separate out ECANCELED handling again
If ECANCELED occurs, it's from pthread_cancel which will *permanently*
set that thread to be in a "canceling" state, which means the cancel
cannot be ignored. That means it cannot be retried, like EINTR. It must
be acknowledged.
2025-12-01 19:17:52 -08:00
Andrew Kelley
cf82064ebc std.Io.Threaded: don't use pthread_cancel with musl
It doesn't support setting the "canceled" status to false, so once a
thread has been canceled, all operations on the thread start permanently
failing.
2025-12-01 19:17:52 -08:00
Andrew Kelley
bf0ffc45b9 std.Io.Threaded: musl: handle ECANCELED same as EINTR
Otherwise the pthread_cancel can affect unrelated tasks.
2025-12-01 19:17:52 -08:00
Andrew Kelley
54a84964f8 std.os.linux: SIG enum is non-exhaustive 2025-12-01 19:17:52 -08:00
Andrew Kelley
57f5de5b77 std.Io.Threaded: use the correct mmsghdr struct 2025-12-01 19:17:52 -08:00
Andrew Kelley
103467fa6c std.Io.Threaded: make is_musl linux-only 2025-12-01 19:17:52 -08:00
David Rubin
85053a6a36 link.Elf: implement aarch64 relocation 2025-12-01 19:17:52 -08:00
Andrew Kelley
c4f5dda135 std.Io.Threaded: re-introduce retry logic behind config 2025-12-01 19:17:52 -08:00
Andrew Kelley
de87bad4c3 std.Io.Threaded: don't solve the cancel race after all
Unfortunately, trying again until the cancellation request is
acknowledged has been observed to incur a large amount of overhead,
and usually strong cancellation guarantees are not needed, so the
race condition is not handled here. Users who want to avoid this
have this menu of options instead:
* Use no libc, in which case Zig std lib can avoid the race (tracking
  issue: https://codeberg.org/ziglang/zig/issues/30049)
* Use musl libc
* Use `std.Io.Evented`. But this is not implemented yet. Tracked by
  - https://codeberg.org/ziglang/zig/issues/30050
  - https://codeberg.org/ziglang/zig/issues/30051

glibc + threaded is the only problematic combination.
2025-12-01 19:17:52 -08:00
Andrew Kelley
144206856e std.Io.Threaded: fix compilation for riscv32-linux 2025-12-01 19:17:52 -08:00
Andrew Kelley
9e981c3ae5 std.os.linux: delete unnecessary @compileError
Without this, it already fails to compile with a sufficiently helpful
error message.
2025-12-01 19:17:52 -08:00
Andrew Kelley
39ac40209b std.Io.Threaded: use musl's beautiful pthread_cancel semantics 2025-12-01 19:17:52 -08:00
Andrew Kelley
d60760d61e std.Io.Threaded: tune requestCancel
On a heavily loaded Linux 6.17.5, I observed a maximum of 20 attempts
not acknowledged before the timeout (including exponential backoff) was
sufficient, despite the heavy load.

The time wasted here sleeping is mitigated by the fact that, later on,
the system will likely wait for the canceled task, causing it to
indefinitely yield until the canceled task finishes, and the task must
acknowledge the cancel before it proceeds to that point.
2025-12-01 19:17:52 -08:00
Andrew Kelley
29e418cbfb std.Io.Threaded: fix the cancellation race
Now, before a syscall is entered, beginSyscall is called, which may
return error.Canceled. After syscall returns, whether error or success,
endSyscall is called. If the syscall returns EINTR then checkCancel is
called.

`cancelRequested` is removed from the std.Io VTable for now, with plans
to replace it with a more powerful API that allows protection against
cancellation requests.

closes #25751
2025-12-01 19:17:52 -08:00
rpkak
95f93a0b28 std.zig.Ast: count addrspace tokens correctly
Some checks are pending
ci / aarch64-linux-debug (push) Waiting to run
ci / aarch64-linux-release (push) Waiting to run
ci / aarch64-macos-debug (push) Waiting to run
ci / aarch64-macos-release (push) Waiting to run
ci / loongarch64-linux-debug (push) Waiting to run
ci / loongarch64-linux-release (push) Waiting to run
ci / riscv64-linux-debug (push) Waiting to run
ci / riscv64-linux-release (push) Waiting to run
ci / s390x-linux-debug (push) Waiting to run
ci / s390x-linux-release (push) Waiting to run
ci / x86_64-freebsd-debug (push) Waiting to run
ci / x86_64-freebsd-release (push) Waiting to run
ci / x86_64-linux-debug (push) Waiting to run
ci / x86_64-linux-debug-llvm (push) Waiting to run
ci / x86_64-linux-release (push) Waiting to run
ci / x86_64-windows-debug (push) Waiting to run
ci / x86_64-windows-release (push) Waiting to run
Before this PR this

```zig
const namespace = struct {
    extern const num: u8 addrspace(.generic);
};
// comment
```

got formatted to this

```zig
const namespace = struct {
    extern const num: u8 addrspace(.generic);
(
```

Co-authored-by: rpkak <rpkak@noreply.codeberg.org>
Co-committed-by: rpkak <rpkak@noreply.codeberg.org>
2025-12-01 12:56:11 +01:00
jedisct1
1d1e2b7780 Merge pull request 'std.crypto.aes: expose the inverse MixColumns operation' (#30052) from jedisct1/zig:invmixcolumns into master
Some checks are pending
ci / aarch64-linux-debug (push) Waiting to run
ci / aarch64-linux-release (push) Waiting to run
ci / aarch64-macos-debug (push) Waiting to run
ci / aarch64-macos-release (push) Waiting to run
ci / loongarch64-linux-debug (push) Waiting to run
ci / loongarch64-linux-release (push) Waiting to run
ci / riscv64-linux-debug (push) Waiting to run
ci / riscv64-linux-release (push) Waiting to run
ci / s390x-linux-debug (push) Waiting to run
ci / s390x-linux-release (push) Waiting to run
ci / x86_64-freebsd-debug (push) Waiting to run
ci / x86_64-freebsd-release (push) Waiting to run
ci / x86_64-linux-debug (push) Waiting to run
ci / x86_64-linux-debug-llvm (push) Waiting to run
ci / x86_64-linux-release (push) Waiting to run
ci / x86_64-windows-debug (push) Waiting to run
ci / x86_64-windows-release (push) Waiting to run
Reviewed-on: https://codeberg.org/ziglang/zig/pulls/30052
2025-12-01 11:16:26 +01:00
fn ⌃ ⌥
bfe3317059 Return a usize from @abs if given an isize
Also:
- `c_ushort` for `c_short`
- `c_uint` for `c_int`
- `c_ulong` for `c_long`
- `c_ulonglong` for `c_longlong`
2025-11-29 21:09:08 +01:00
mlugg
44e99edd7a Merge pull request 'Sema: initialize OPV comptime allocs correctly' (#30043) from reify-empty-struct into master
Reviewed-on: https://codeberg.org/ziglang/zig/pulls/30043
2025-11-29 20:21:30 +01:00
Nikolay Govorov
a0289d0cce std.posix.accept: handle non-listening socket EINVAL 2025-11-29 19:57:59 +01:00
Frank Denis
5e00a0c9b5 std.crypto.aes: expose the inverse MixColumns operation
The inverse MixColumns operation is already used internally for
AES decryption, but it wasn’t exposed in the public API because
it didn’t seem necessary at the time.

Since then, several new AES-based block ciphers and permutations
(such as Vistrutah and Areion) have been developed, and they require
this operation to be implementable in Zig.
Since then, new interesting AES-based block ciphers and permutations
(Vistrutah, Areion, etc). have been invented, and require that
operation to be implementable in Zig.
2025-11-29 19:25:22 +01:00
Alex Rønne Petersen
7d9ad992ab
issue templates: update issue labels to match the actual org labels 2025-11-29 19:02:04 +01:00
Matthew Lugg
8f5db19791
Sema: initialize OPV comptime allocs correctly
This was caused a `[0]std.builtin.Type.StructField.Attributes` to be
considered `undefined`, even though that type is OPV so should prefer
its OPV `.{}` over `undefined`.

Resolves: #30039
2025-11-29 11:55:36 +00:00
Matthew Lugg
e52232cd57
print_zir: fix typo 2025-11-29 11:55:26 +00:00
Alex Rønne Petersen
a38220376e
Revert "ci: apply workaround for #22213 to x86_64-linux scripts"
This reverts commit a8f9b5dc06.
2025-11-28 22:57:33 +01:00
Alex Rønne Petersen
a8f9b5dc06
ci: apply workaround for #22213 to x86_64-linux scripts 2025-11-28 22:06:46 +01:00
Alex Rønne Petersen
713716770e
README: github -> codeberg for some links 2025-11-28 18:29:05 +01:00
Andrew Kelley
e19c61a16e issue templates: use anchors in URLs 2025-11-28 07:38:24 -08:00
jedisct1
e6d19a07d2 Merge pull request 'std.crypto: add hybrid post-quantum/traditional key encapsulation' (#30010) from jedisct1/zig:hybridkem into master
Reviewed-on: https://codeberg.org/ziglang/zig/pulls/30010
2025-11-28 09:10:20 +01:00
Frank Denis
ca96d853ff std.crypto: add hybrid post-quantum/traditional key encapsulation
Hybrid KEMs combine a post-quantum secure KEM with a traditional
elliptic curve Diffie-Hellman key exchange.

The hybrid construction provides security against both classical and quantum
adversaries: even if one component is broken, the combined scheme remains
secure as long as the other component holds.

The implementation follows the IETF CFRG draft specification for concrete
hybrid KEMs:

https://datatracker.ietf.org/doc/draft-irtf-cfrg-concrete-hybrid-kems/
2025-11-27 12:10:17 +01:00
66 changed files with 4203 additions and 1498 deletions

View file

@ -1,6 +1,6 @@
name: Bug Report name: Bug Report
description: File a bug report description: File a bug report
labels: ["kind/bug"] labels: ["bug"]
body: body:
- type: markdown - type: markdown
attributes: attributes:

View file

@ -1,7 +1,7 @@
contact_links: contact_links:
- name: Language Proposal - name: Language Proposal
about: "Please do not submit a proposal to change the language" about: "Please do not submit a proposal to change the language"
url: https://ziglang.org/code-of-conduct url: https://ziglang.org/code-of-conduct/#no-language-proposals
- name: Question - name: Question
about: "Please use one of the community spaces instead for questions or general discussions." about: "Please use one of the community spaces instead for questions or general discussions."
url: https://ziglang.org/community url: https://ziglang.org/community
@ -10,4 +10,4 @@ contact_links:
url: https://codeberg.org/ziglang/translate-c url: https://codeberg.org/ziglang/translate-c
- name: Copilot and Other LLMs - name: Copilot and Other LLMs
about: "Please do not use GitHub Copilot or any other LLM to write an issue." about: "Please do not use GitHub Copilot or any other LLM to write an issue."
url: https://ziglang.org/code-of-conduct url: https://ziglang.org/code-of-conduct/#strict-no-llm-no-ai-policy

View file

@ -1,6 +1,6 @@
name: Error message improvement name: Error message improvement
description: Compiler produces an unhelpful or misleading error message. description: Compiler produces an unhelpful or misleading error message.
labels: ["kind/error message"] labels: ["error message"]
body: body:
- type: input - type: input
id: version id: version

View file

@ -152,7 +152,7 @@ jobs:
fetch-depth: 0 fetch-depth: 0
- name: Build and Test - name: Build and Test
run: sh ci/x86_64-linux-debug.sh run: sh ci/x86_64-linux-debug.sh
timeout-minutes: 240 timeout-minutes: 180
x86_64-linux-debug-llvm: x86_64-linux-debug-llvm:
runs-on: [self-hosted, x86_64-linux] runs-on: [self-hosted, x86_64-linux]
steps: steps:
@ -162,7 +162,7 @@ jobs:
fetch-depth: 0 fetch-depth: 0
- name: Build and Test - name: Build and Test
run: sh ci/x86_64-linux-debug-llvm.sh run: sh ci/x86_64-linux-debug-llvm.sh
timeout-minutes: 480 timeout-minutes: 360
x86_64-linux-release: x86_64-linux-release:
runs-on: [self-hosted, x86_64-linux] runs-on: [self-hosted, x86_64-linux]
steps: steps:
@ -172,7 +172,7 @@ jobs:
fetch-depth: 0 fetch-depth: 0
- name: Build and Test - name: Build and Test
run: sh ci/x86_64-linux-release.sh run: sh ci/x86_64-linux-release.sh
timeout-minutes: 480 timeout-minutes: 360
x86_64-windows-debug: x86_64-windows-debug:
runs-on: [self-hosted, x86_64-windows] runs-on: [self-hosted, x86_64-windows]

View file

@ -128,7 +128,7 @@ while your Zig build has the option to be a Debug build. It also works
completely independently from MSVC so you don't need it to be installed. completely independently from MSVC so you don't need it to be installed.
Determine the URL by Determine the URL by
[looking at the CI script](https://github.com/ziglang/zig/blob/master/ci/x86_64-windows-debug.ps1#L1-L4). [looking at the CI script](https://codeberg.org/ziglang/zig/src/branch/master/ci/x86_64-windows-debug.ps1#L1-L4).
It will look something like this (replace `$VERSION` with the one you see by It will look something like this (replace `$VERSION` with the one you see by
following the above link): following the above link):
@ -485,16 +485,14 @@ interpret your words.
### Find a Contributor Friendly Issue ### Find a Contributor Friendly Issue
The issue label The issue label
[Contributor Friendly](https://github.com/ziglang/zig/issues?q=is%3Aissue+is%3Aopen+label%3A%22contributor+friendly%22) [Contributor Friendly](https://codeberg.org/ziglang/zig/issues?labels=741726&state=open)
exists to help you find issues that are **limited in scope and/or exists to help you find issues that are **limited in scope and/or
knowledge of Zig internals.** knowledge of Zig internals.**
Please note that issues labeled Please note that issues labeled
[Proposal](https://github.com/ziglang/zig/issues?q=is%3Aissue+is%3Aopen+label%3Aproposal) [Proposal: Proposed](https://codeberg.org/ziglang/zig/issues?labels=746937&state=open)
but do not also have the are still under consideration, and efforts to implement such a proposal have
[Accepted](https://github.com/ziglang/zig/issues?q=is%3Aissue+is%3Aopen+label%3Aaccepted) a high risk of being wasted. If you are interested in a proposal which is
label are still under consideration, and efforts to implement such a proposal
have a high risk of being wasted. If you are interested in a proposal which is
still under consideration, please express your interest in the issue tracker, still under consideration, please express your interest in the issue tracker,
providing extra insights and considerations that others have not yet expressed. providing extra insights and considerations that others have not yet expressed.
The most highly regarded argument in such a discussion is a real world use case. The most highly regarded argument in such a discussion is a real world use case.
@ -665,11 +663,11 @@ based on Clang, but is now based on Aro:
Test coverage as well as bug reports have been moved to this repository: Test coverage as well as bug reports have been moved to this repository:
[ziglang/translate-c](https://github.com/ziglang/translate-c/) [ziglang/translate-c](https://codeberg.org/ziglang/translate-c/)
In the future, [@cImport will move to the build system](https://github.com/ziglang/zig/issues/20630), In the future, [@cImport will move to the build system](https://github.com/ziglang/zig/issues/20630),
but for now, the translate-c logic is copy-pasted from that project into but for now, the translate-c logic is copy-pasted from that project into
[ziglang/zig](https://github.com/ziglang/zig/), powering both `zig translate-c` [ziglang/zig](https://codeberg.org/ziglang/zig/), powering both `zig translate-c`
and `@cImport`. and `@cImport`.
Please see the readme of the translate-c project for how to contribute. Once an Please see the readme of the translate-c project for how to contribute. Once an
@ -777,7 +775,7 @@ If you will be debugging the Zig compiler itself, or if you will be debugging
any project compiled with Zig's LLVM backend (not recommended with the LLDB any project compiled with Zig's LLVM backend (not recommended with the LLDB
fork, prefer vanilla LLDB with a version that matches the version of LLVM that fork, prefer vanilla LLDB with a version that matches the version of LLVM that
Zig is using), you can get a better debugging experience by using Zig is using), you can get a better debugging experience by using
[`lldb_pretty_printers.py`](https://github.com/ziglang/zig/blob/master/tools/lldb_pretty_printers.py). [`lldb_pretty_printers.py`](https://codeberg.org/ziglang/zig/src/branch/master/tools/lldb_pretty_printers.py).
Put this line in `~/.lldbinit`: Put this line in `~/.lldbinit`:

182
build.zig
View file

@ -91,6 +91,8 @@ pub fn build(b: *std.Build) !void {
const skip_libc = b.option(bool, "skip-libc", "Main test suite skips tests that link libc") orelse false; const skip_libc = b.option(bool, "skip-libc", "Main test suite skips tests that link libc") orelse false;
const skip_single_threaded = b.option(bool, "skip-single-threaded", "Main test suite skips tests that are single-threaded") orelse false; const skip_single_threaded = b.option(bool, "skip-single-threaded", "Main test suite skips tests that are single-threaded") orelse false;
const skip_compile_errors = b.option(bool, "skip-compile-errors", "Main test suite skips compile error tests") orelse false; const skip_compile_errors = b.option(bool, "skip-compile-errors", "Main test suite skips compile error tests") orelse false;
const skip_spirv = b.option(bool, "skip-spirv", "Main test suite skips targets with spirv32/spirv64 architecture") orelse false;
const skip_wasm = b.option(bool, "skip-wasm", "Main test suite skips targets with wasm32/wasm64 architecture") orelse false;
const skip_freebsd = b.option(bool, "skip-freebsd", "Main test suite skips targets with freebsd OS") orelse false; const skip_freebsd = b.option(bool, "skip-freebsd", "Main test suite skips targets with freebsd OS") orelse false;
const skip_netbsd = b.option(bool, "skip-netbsd", "Main test suite skips targets with netbsd OS") orelse false; const skip_netbsd = b.option(bool, "skip-netbsd", "Main test suite skips targets with netbsd OS") orelse false;
const skip_windows = b.option(bool, "skip-windows", "Main test suite skips targets with windows OS") orelse false; const skip_windows = b.option(bool, "skip-windows", "Main test suite skips targets with windows OS") orelse false;
@ -421,6 +423,8 @@ pub fn build(b: *std.Build) !void {
.test_target_filters = test_target_filters, .test_target_filters = test_target_filters,
.skip_compile_errors = skip_compile_errors, .skip_compile_errors = skip_compile_errors,
.skip_non_native = skip_non_native, .skip_non_native = skip_non_native,
.skip_spirv = skip_spirv,
.skip_wasm = skip_wasm,
.skip_freebsd = skip_freebsd, .skip_freebsd = skip_freebsd,
.skip_netbsd = skip_netbsd, .skip_netbsd = skip_netbsd,
.skip_windows = skip_windows, .skip_windows = skip_windows,
@ -452,6 +456,8 @@ pub fn build(b: *std.Build) !void {
.skip_single_threaded = skip_single_threaded, .skip_single_threaded = skip_single_threaded,
.skip_non_native = skip_non_native, .skip_non_native = skip_non_native,
.test_default_only = no_matrix, .test_default_only = no_matrix,
.skip_spirv = skip_spirv,
.skip_wasm = skip_wasm,
.skip_freebsd = skip_freebsd, .skip_freebsd = skip_freebsd,
.skip_netbsd = skip_netbsd, .skip_netbsd = skip_netbsd,
.skip_windows = skip_windows, .skip_windows = skip_windows,
@ -459,8 +465,29 @@ pub fn build(b: *std.Build) !void {
.skip_linux = skip_linux, .skip_linux = skip_linux,
.skip_llvm = skip_llvm, .skip_llvm = skip_llvm,
.skip_libc = skip_libc, .skip_libc = skip_libc,
// 3888779264 was observed on an x86_64-linux-gnu host. .max_rss = switch (b.graph.host.result.os.tag) {
.max_rss = 4000000000, .freebsd => switch (b.graph.host.result.cpu.arch) {
.x86_64 => 1_060_217_241,
else => 1_100_000_000,
},
.linux => switch (b.graph.host.result.cpu.arch) {
.aarch64 => 659_809_075,
.loongarch64 => 598_902_374,
.riscv64 => 731_258_880,
.s390x => 580_596_121,
.x86_64 => 3_290_894_745,
else => 3_300_000_000,
},
.macos => switch (b.graph.host.result.cpu.arch) {
.aarch64 => 767_736_217,
else => 800_000_000,
},
.windows => switch (b.graph.host.result.cpu.arch) {
.x86_64 => 603_070_054,
else => 700_000_000,
},
else => 3_300_000_000,
},
})); }));
test_modules_step.dependOn(tests.addModuleTests(b, .{ test_modules_step.dependOn(tests.addModuleTests(b, .{
@ -475,6 +502,8 @@ pub fn build(b: *std.Build) !void {
.skip_single_threaded = true, .skip_single_threaded = true,
.skip_non_native = skip_non_native, .skip_non_native = skip_non_native,
.test_default_only = no_matrix, .test_default_only = no_matrix,
.skip_spirv = skip_spirv,
.skip_wasm = skip_wasm,
.skip_freebsd = skip_freebsd, .skip_freebsd = skip_freebsd,
.skip_netbsd = skip_netbsd, .skip_netbsd = skip_netbsd,
.skip_windows = skip_windows, .skip_windows = skip_windows,
@ -483,6 +512,29 @@ pub fn build(b: *std.Build) !void {
.skip_llvm = skip_llvm, .skip_llvm = skip_llvm,
.skip_libc = true, .skip_libc = true,
.no_builtin = true, .no_builtin = true,
.max_rss = switch (b.graph.host.result.os.tag) {
.freebsd => switch (b.graph.host.result.cpu.arch) {
.x86_64 => 743_802_470,
else => 800_000_000,
},
.linux => switch (b.graph.host.result.cpu.arch) {
.aarch64 => 639_565_414,
.loongarch64 => 598_884_352,
.riscv64 => 636_429_516,
.s390x => 574_166_630,
.x86_64 => 764_861_644,
else => 800_000_000,
},
.macos => switch (b.graph.host.result.cpu.arch) {
.aarch64 => 701_413_785,
else => 800_000_000,
},
.windows => switch (b.graph.host.result.cpu.arch) {
.x86_64 => 536_414_208,
else => 600_000_000,
},
else => 800_000_000,
},
})); }));
test_modules_step.dependOn(tests.addModuleTests(b, .{ test_modules_step.dependOn(tests.addModuleTests(b, .{
@ -497,6 +549,8 @@ pub fn build(b: *std.Build) !void {
.skip_single_threaded = true, .skip_single_threaded = true,
.skip_non_native = skip_non_native, .skip_non_native = skip_non_native,
.test_default_only = no_matrix, .test_default_only = no_matrix,
.skip_spirv = skip_spirv,
.skip_wasm = skip_wasm,
.skip_freebsd = skip_freebsd, .skip_freebsd = skip_freebsd,
.skip_netbsd = skip_netbsd, .skip_netbsd = skip_netbsd,
.skip_windows = skip_windows, .skip_windows = skip_windows,
@ -505,6 +559,29 @@ pub fn build(b: *std.Build) !void {
.skip_llvm = skip_llvm, .skip_llvm = skip_llvm,
.skip_libc = true, .skip_libc = true,
.no_builtin = true, .no_builtin = true,
.max_rss = switch (b.graph.host.result.os.tag) {
.freebsd => switch (b.graph.host.result.cpu.arch) {
.x86_64 => 557_892_403,
else => 600_000_000,
},
.linux => switch (b.graph.host.result.cpu.arch) {
.aarch64 => 615_302_758,
.loongarch64 => 598_974_464,
.riscv64 => 382_786_764,
.s390x => 395_555_635,
.x86_64 => 692_348_518,
else => 700_000_000,
},
.macos => switch (b.graph.host.result.cpu.arch) {
.aarch64 => 451_389_030,
else => 500_000_000,
},
.windows => switch (b.graph.host.result.cpu.arch) {
.x86_64 => 367_747_072,
else => 400_000_000,
},
else => 700_000_000,
},
})); }));
test_modules_step.dependOn(tests.addModuleTests(b, .{ test_modules_step.dependOn(tests.addModuleTests(b, .{
@ -519,6 +596,8 @@ pub fn build(b: *std.Build) !void {
.skip_single_threaded = skip_single_threaded, .skip_single_threaded = skip_single_threaded,
.skip_non_native = skip_non_native, .skip_non_native = skip_non_native,
.test_default_only = no_matrix, .test_default_only = no_matrix,
.skip_spirv = skip_spirv,
.skip_wasm = skip_wasm,
.skip_freebsd = skip_freebsd, .skip_freebsd = skip_freebsd,
.skip_netbsd = skip_netbsd, .skip_netbsd = skip_netbsd,
.skip_windows = skip_windows, .skip_windows = skip_windows,
@ -526,8 +605,29 @@ pub fn build(b: *std.Build) !void {
.skip_linux = skip_linux, .skip_linux = skip_linux,
.skip_llvm = skip_llvm, .skip_llvm = skip_llvm,
.skip_libc = skip_libc, .skip_libc = skip_libc,
// I observed a value of 5605064704 on the M2 CI. .max_rss = switch (b.graph.host.result.os.tag) {
.max_rss = 6165571174, .freebsd => switch (b.graph.host.result.cpu.arch) {
.x86_64 => 3_756_422_348,
else => 3_800_000_000,
},
.linux => switch (b.graph.host.result.cpu.arch) {
.aarch64 => 6_732_817_203,
.loongarch64 => 3_216_349_593,
.riscv64 => 3_570_899_763,
.s390x => 3_652_514_201,
.x86_64 => 3_249_546_854,
else => 6_800_000_000,
},
.macos => switch (b.graph.host.result.cpu.arch) {
.aarch64 => 8_273_795_481,
else => 8_300_000_000,
},
.windows => switch (b.graph.host.result.cpu.arch) {
.x86_64 => 3_750_236_160,
else => 3_800_000_000,
},
else => 8_300_000_000,
},
})); }));
const unit_tests_step = b.step("test-unit", "Run the compiler source unit tests"); const unit_tests_step = b.step("test-unit", "Run the compiler source unit tests");
@ -543,6 +643,29 @@ pub fn build(b: *std.Build) !void {
.use_llvm = use_llvm, .use_llvm = use_llvm,
.use_lld = use_llvm, .use_lld = use_llvm,
.zig_lib_dir = b.path("lib"), .zig_lib_dir = b.path("lib"),
.max_rss = switch (b.graph.host.result.os.tag) {
.freebsd => switch (b.graph.host.result.cpu.arch) {
.x86_64 => 2_188_099_584,
else => 2_200_000_000,
},
.linux => switch (b.graph.host.result.cpu.arch) {
.aarch64 => 1_991_934_771,
.loongarch64 => 1_844_538_572,
.riscv64 => 2_459_003_289,
.s390x => 1_781_248_409,
.x86_64 => 977_192_550,
else => 2_500_000_000,
},
.macos => switch (b.graph.host.result.cpu.arch) {
.aarch64 => 2_062_393_344,
else => 2_100_000_000,
},
.windows => switch (b.graph.host.result.cpu.arch) {
.x86_64 => 1_953_087_488,
else => 2_000_000_000,
},
else => 2_500_000_000,
},
}); });
if (link_libc) { if (link_libc) {
unit_tests.root_module.link_libc = true; unit_tests.root_module.link_libc = true;
@ -560,6 +683,7 @@ pub fn build(b: *std.Build) !void {
test_step.dependOn(tests.addCAbiTests(b, .{ test_step.dependOn(tests.addCAbiTests(b, .{
.test_target_filters = test_target_filters, .test_target_filters = test_target_filters,
.skip_non_native = skip_non_native, .skip_non_native = skip_non_native,
.skip_wasm = skip_wasm,
.skip_freebsd = skip_freebsd, .skip_freebsd = skip_freebsd,
.skip_netbsd = skip_netbsd, .skip_netbsd = skip_netbsd,
.skip_windows = skip_windows, .skip_windows = skip_windows,
@ -567,6 +691,29 @@ pub fn build(b: *std.Build) !void {
.skip_linux = skip_linux, .skip_linux = skip_linux,
.skip_llvm = skip_llvm, .skip_llvm = skip_llvm,
.skip_release = skip_release, .skip_release = skip_release,
.max_rss = switch (b.graph.host.result.os.tag) {
.freebsd => switch (b.graph.host.result.cpu.arch) {
.x86_64 => 727_221_862,
else => 800_000_000,
},
.linux => switch (b.graph.host.result.cpu.arch) {
.aarch64 => 1_318_185_369,
.loongarch64 => 1_422_904_524,
.riscv64 => 449_924_710,
.s390x => 1_946_743_603,
.x86_64 => 2_139_993_292,
else => 2_200_000_000,
},
.macos => switch (b.graph.host.result.cpu.arch) {
.aarch64 => 1_813_612_134,
else => 1_900_000_000,
},
.windows => switch (b.graph.host.result.cpu.arch) {
.x86_64 => 386_287_616,
else => 400_000_000,
},
else => 2_200_000_000,
},
})); }));
test_step.dependOn(tests.addLinkTests(b, enable_macos_sdk, enable_ios_sdk, enable_symlinks_windows)); test_step.dependOn(tests.addLinkTests(b, enable_macos_sdk, enable_ios_sdk, enable_symlinks_windows));
test_step.dependOn(tests.addStackTraceTests(b, test_filters, skip_non_native)); test_step.dependOn(tests.addStackTraceTests(b, test_filters, skip_non_native));
@ -616,6 +763,7 @@ pub fn build(b: *std.Build) !void {
.optimize_modes = optimization_modes, .optimize_modes = optimization_modes,
.test_filters = test_filters, .test_filters = test_filters,
.test_target_filters = test_target_filters, .test_target_filters = test_target_filters,
.skip_wasm = skip_wasm,
// Highest RSS observed in any test case was exactly 1802878976 on x86_64-linux. // Highest RSS observed in any test case was exactly 1802878976 on x86_64-linux.
.max_rss = 2253598720, .max_rss = 2253598720,
})) |test_libc_step| test_step.dependOn(test_libc_step); })) |test_libc_step| test_step.dependOn(test_libc_step);
@ -721,7 +869,29 @@ fn addCompilerMod(b: *std.Build, options: AddCompilerModOptions) *std.Build.Modu
fn addCompilerStep(b: *std.Build, options: AddCompilerModOptions) *std.Build.Step.Compile { fn addCompilerStep(b: *std.Build, options: AddCompilerModOptions) *std.Build.Step.Compile {
const exe = b.addExecutable(.{ const exe = b.addExecutable(.{
.name = "zig", .name = "zig",
.max_rss = 7_800_000_000, .max_rss = switch (b.graph.host.result.os.tag) {
.freebsd => switch (b.graph.host.result.cpu.arch) {
.x86_64 => 6_044_158_771,
else => 6_100_000_000,
},
.linux => switch (b.graph.host.result.cpu.arch) {
.aarch64 => 6_240_805_683,
.loongarch64 => 5_024_158_515,
.riscv64 => 6_996_309_196,
.s390x => 4_997_174_476,
.x86_64 => 5_486_090_649,
else => 7_000_000_000,
},
.macos => switch (b.graph.host.result.cpu.arch) {
.aarch64 => 6_639_145_779,
else => 6_700_000_000,
},
.windows => switch (b.graph.host.result.cpu.arch) {
.x86_64 => 5_770_394_009,
else => 5_800_000_000,
},
else => 7_000_000_000,
},
.root_module = addCompilerMod(b, options), .root_module = addCompilerMod(b, options),
}); });
exe.stack_size = stack_size; exe.stack_size = stack_size;
@ -798,7 +968,7 @@ fn addCmakeCfgOptionsToExe(
}; };
mod.linkSystemLibrary("unwind", .{}); mod.linkSystemLibrary("unwind", .{});
}, },
.ios, .macos, .watchos, .tvos, .visionos => { .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => {
mod.link_libcpp = true; mod.link_libcpp = true;
}, },
.windows => { .windows => {

View file

@ -44,7 +44,7 @@ ninja install
# No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts. # No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts.
stage3-debug/bin/zig build test docs \ stage3-debug/bin/zig build test docs \
--maxrss 44918199637 \ --maxrss ${ZSF_MAX_RSS:-0} \
-Dstatic-llvm \ -Dstatic-llvm \
-Dskip-non-native \ -Dskip-non-native \
-Dtarget=native-native-musl \ -Dtarget=native-native-musl \

View file

@ -44,7 +44,7 @@ ninja install
# No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts. # No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts.
stage3-release/bin/zig build test docs \ stage3-release/bin/zig build test docs \
--maxrss 44918199637 \ --maxrss ${ZSF_MAX_RSS:-0} \
-Dstatic-llvm \ -Dstatic-llvm \
-Dskip-non-native \ -Dskip-non-native \
-Dtarget=native-native-musl \ -Dtarget=native-native-musl \

View file

@ -42,6 +42,7 @@ cmake .. \
ninja install ninja install
stage3-debug/bin/zig build test docs \ stage3-debug/bin/zig build test docs \
--maxrss ${ZSF_MAX_RSS:-0} \
--zig-lib-dir "$PWD/../lib" \ --zig-lib-dir "$PWD/../lib" \
-Denable-macos-sdk \ -Denable-macos-sdk \
-Dstatic-llvm \ -Dstatic-llvm \

View file

@ -51,6 +51,7 @@ stage3-release/bin/zig build test docs \
# Ensure that stage3 and stage4 are byte-for-byte identical. # Ensure that stage3 and stage4 are byte-for-byte identical.
stage3-release/bin/zig build \ stage3-release/bin/zig build \
--maxrss ${ZSF_MAX_RSS:-0} \
--prefix stage4-release \ --prefix stage4-release \
-Denable-llvm \ -Denable-llvm \
-Dno-lib \ -Dno-lib \

View file

@ -5,6 +5,7 @@ $ZIG_LLVM_CLANG_LLD_URL = "https://ziglang.org/deps/$ZIG_LLVM_CLANG_LLD_NAME.zip
$PREFIX_PATH = "$(Get-Location)\..\$ZIG_LLVM_CLANG_LLD_NAME" $PREFIX_PATH = "$(Get-Location)\..\$ZIG_LLVM_CLANG_LLD_NAME"
$ZIG = "$PREFIX_PATH\bin\zig.exe" $ZIG = "$PREFIX_PATH\bin\zig.exe"
$ZIG_LIB_DIR = "$(Get-Location)\lib" $ZIG_LIB_DIR = "$(Get-Location)\lib"
$ZSF_MAX_RSS = if ($Env:ZSF_MAX_RSS) { $Env:ZSF_MAX_RSS } else { 0 }
if (!(Test-Path "..\$ZIG_LLVM_CLANG_LLD_NAME.zip")) { if (!(Test-Path "..\$ZIG_LLVM_CLANG_LLD_NAME.zip")) {
Write-Output "Downloading $ZIG_LLVM_CLANG_LLD_URL" Write-Output "Downloading $ZIG_LLVM_CLANG_LLD_URL"
@ -54,6 +55,7 @@ CheckLastExitCode
Write-Output "Main test suite..." Write-Output "Main test suite..."
& "stage3-release\bin\zig.exe" build test docs ` & "stage3-release\bin\zig.exe" build test docs `
--maxrss $ZSF_MAX_RSS `
--zig-lib-dir "$ZIG_LIB_DIR" ` --zig-lib-dir "$ZIG_LIB_DIR" `
--search-prefix "$PREFIX_PATH" ` --search-prefix "$PREFIX_PATH" `
-Dstatic-llvm ` -Dstatic-llvm `

View file

@ -45,7 +45,7 @@ ninja install
# No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts. # No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts.
stage3-debug/bin/zig build test docs \ stage3-debug/bin/zig build test docs \
--maxrss 60129542144 \ --maxrss ${ZSF_MAX_RSS:-0} \
-Dstatic-llvm \ -Dstatic-llvm \
-Dskip-non-native \ -Dskip-non-native \
-Dtarget=native-native-musl \ -Dtarget=native-native-musl \

View file

@ -45,7 +45,7 @@ ninja install
# No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts. # No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts.
stage3-release/bin/zig build test docs \ stage3-release/bin/zig build test docs \
--maxrss 60129542144 \ --maxrss ${ZSF_MAX_RSS:-0} \
-Dstatic-llvm \ -Dstatic-llvm \
-Dskip-non-native \ -Dskip-non-native \
-Dtarget=native-native-musl \ -Dtarget=native-native-musl \

View file

@ -44,7 +44,7 @@ ninja install
# No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts. # No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts.
stage3-debug/bin/zig build test-cases test-modules test-unit test-c-abi test-stack-traces test-error-traces test-llvm-ir \ stage3-debug/bin/zig build test-cases test-modules test-unit test-c-abi test-stack-traces test-error-traces test-llvm-ir \
--maxrss 68719476736 \ --maxrss ${ZSF_MAX_RSS:-0} \
-Dstatic-llvm \ -Dstatic-llvm \
-Dskip-non-native \ -Dskip-non-native \
-Dskip-single-threaded \ -Dskip-single-threaded \

View file

@ -44,7 +44,7 @@ ninja install
# No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts. # No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts.
stage3-release/bin/zig build test-cases test-modules test-unit test-c-abi test-stack-traces test-error-traces test-llvm-ir \ stage3-release/bin/zig build test-cases test-modules test-unit test-c-abi test-stack-traces test-error-traces test-llvm-ir \
--maxrss 68719476736 \ --maxrss ${ZSF_MAX_RSS:-0} \
-Dstatic-llvm \ -Dstatic-llvm \
-Dskip-non-native \ -Dskip-non-native \
-Dskip-single-threaded \ -Dskip-single-threaded \

View file

@ -45,7 +45,7 @@ ninja install
# No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts. # No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts.
stage3-debug/bin/zig build test docs \ stage3-debug/bin/zig build test docs \
--maxrss 30064771072 \ --maxrss ${ZSF_MAX_RSS:-0} \
-Dstatic-llvm \ -Dstatic-llvm \
-Dskip-non-native \ -Dskip-non-native \
-Dtarget=native-native-musl \ -Dtarget=native-native-musl \

View file

@ -45,7 +45,7 @@ ninja install
# No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts. # No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts.
stage3-release/bin/zig build test docs \ stage3-release/bin/zig build test docs \
--maxrss 30064771072 \ --maxrss ${ZSF_MAX_RSS:-0} \
-Dstatic-llvm \ -Dstatic-llvm \
-Dskip-non-native \ -Dskip-non-native \
-Dtarget=native-native-musl \ -Dtarget=native-native-musl \

View file

@ -44,8 +44,10 @@ unset CXX
ninja install ninja install
stage3-debug/bin/zig build test docs \ stage3-debug/bin/zig build test docs \
--maxrss 42949672960 \ --maxrss ${ZSF_MAX_RSS:-0} \
-Dstatic-llvm \ -Dstatic-llvm \
-Dskip-spirv \
-Dskip-wasm \
-Dskip-linux \ -Dskip-linux \
-Dskip-netbsd \ -Dskip-netbsd \
-Dskip-windows \ -Dskip-windows \

View file

@ -44,8 +44,10 @@ unset CXX
ninja install ninja install
stage3-release/bin/zig build test docs \ stage3-release/bin/zig build test docs \
--maxrss 42949672960 \ --maxrss ${ZSF_MAX_RSS:-0} \
-Dstatic-llvm \ -Dstatic-llvm \
-Dskip-spirv \
-Dskip-wasm \
-Dskip-linux \ -Dskip-linux \
-Dskip-netbsd \ -Dskip-netbsd \
-Dskip-windows \ -Dskip-windows \

View file

@ -49,7 +49,7 @@ stage3-debug/bin/zig build \
-Dno-lib -Dno-lib
stage3-debug/bin/zig build test docs \ stage3-debug/bin/zig build test docs \
--maxrss 21000000000 \ --maxrss ${ZSF_MAX_RSS:-0} \
-Dlldb=$HOME/deps/lldb-zig/Debug-e0a42bb34/bin/lldb \ -Dlldb=$HOME/deps/lldb-zig/Debug-e0a42bb34/bin/lldb \
-fqemu \ -fqemu \
-fwasmtime \ -fwasmtime \

View file

@ -48,7 +48,7 @@ stage3-debug/bin/zig build \
-Dno-lib -Dno-lib
stage3-debug/bin/zig build test docs \ stage3-debug/bin/zig build test docs \
--maxrss 21000000000 \ --maxrss ${ZSF_MAX_RSS:-0} \
-Dlldb=$HOME/deps/lldb-zig/Debug-e0a42bb34/bin/lldb \ -Dlldb=$HOME/deps/lldb-zig/Debug-e0a42bb34/bin/lldb \
-fqemu \ -fqemu \
-fwasmtime \ -fwasmtime \

View file

@ -54,7 +54,7 @@ stage3-release/bin/zig build \
-Dno-lib -Dno-lib
stage3-release/bin/zig build test docs \ stage3-release/bin/zig build test docs \
--maxrss 21000000000 \ --maxrss ${ZSF_MAX_RSS:-0} \
-Dlldb=$HOME/deps/lldb-zig/Release-e0a42bb34/bin/lldb \ -Dlldb=$HOME/deps/lldb-zig/Release-e0a42bb34/bin/lldb \
-fqemu \ -fqemu \
-fwasmtime \ -fwasmtime \

View file

@ -5,6 +5,7 @@ $ZIG_LLVM_CLANG_LLD_URL = "https://ziglang.org/deps/$ZIG_LLVM_CLANG_LLD_NAME.zip
$PREFIX_PATH = "$($Env:USERPROFILE)\$ZIG_LLVM_CLANG_LLD_NAME" $PREFIX_PATH = "$($Env:USERPROFILE)\$ZIG_LLVM_CLANG_LLD_NAME"
$ZIG = "$PREFIX_PATH\bin\zig.exe" $ZIG = "$PREFIX_PATH\bin\zig.exe"
$ZIG_LIB_DIR = "$(Get-Location)\lib" $ZIG_LIB_DIR = "$(Get-Location)\lib"
$ZSF_MAX_RSS = if ($Env:ZSF_MAX_RSS) { $Env:ZSF_MAX_RSS } else { 0 }
if (!(Test-Path "$PREFIX_PATH.zip")) { if (!(Test-Path "$PREFIX_PATH.zip")) {
Write-Output "Downloading $ZIG_LLVM_CLANG_LLD_URL" Write-Output "Downloading $ZIG_LLVM_CLANG_LLD_URL"
@ -54,11 +55,11 @@ CheckLastExitCode
Write-Output "Main test suite..." Write-Output "Main test suite..."
& "stage3-debug\bin\zig.exe" build test docs ` & "stage3-debug\bin\zig.exe" build test docs `
--maxrss $ZSF_MAX_RSS `
--zig-lib-dir "$ZIG_LIB_DIR" ` --zig-lib-dir "$ZIG_LIB_DIR" `
--search-prefix "$PREFIX_PATH" ` --search-prefix "$PREFIX_PATH" `
-Dstatic-llvm ` -Dstatic-llvm `
-Dskip-non-native ` -Dskip-non-native `
-Dskip-release `
-Dskip-test-incremental ` -Dskip-test-incremental `
-Denable-symlinks-windows ` -Denable-symlinks-windows `
--test-timeout 30m --test-timeout 30m

View file

@ -5,6 +5,7 @@ $ZIG_LLVM_CLANG_LLD_URL = "https://ziglang.org/deps/$ZIG_LLVM_CLANG_LLD_NAME.zip
$PREFIX_PATH = "$($Env:USERPROFILE)\$ZIG_LLVM_CLANG_LLD_NAME" $PREFIX_PATH = "$($Env:USERPROFILE)\$ZIG_LLVM_CLANG_LLD_NAME"
$ZIG = "$PREFIX_PATH\bin\zig.exe" $ZIG = "$PREFIX_PATH\bin\zig.exe"
$ZIG_LIB_DIR = "$(Get-Location)\lib" $ZIG_LIB_DIR = "$(Get-Location)\lib"
$ZSF_MAX_RSS = if ($Env:ZSF_MAX_RSS) { $Env:ZSF_MAX_RSS } else { 0 }
if (!(Test-Path "$PREFIX_PATH.zip")) { if (!(Test-Path "$PREFIX_PATH.zip")) {
Write-Output "Downloading $ZIG_LLVM_CLANG_LLD_URL" Write-Output "Downloading $ZIG_LLVM_CLANG_LLD_URL"
@ -54,6 +55,7 @@ CheckLastExitCode
Write-Output "Main test suite..." Write-Output "Main test suite..."
& "stage3-release\bin\zig.exe" build test docs ` & "stage3-release\bin\zig.exe" build test docs `
--maxrss $ZSF_MAX_RSS `
--zig-lib-dir "$ZIG_LIB_DIR" ` --zig-lib-dir "$ZIG_LIB_DIR" `
--search-prefix "$PREFIX_PATH" ` --search-prefix "$PREFIX_PATH" `
-Dstatic-llvm ` -Dstatic-llvm `

View file

@ -39,7 +39,7 @@ v2.2.5.
The file `lib/libc/glibc/abilist` is a Zig-specific binary blob that The file `lib/libc/glibc/abilist` is a Zig-specific binary blob that
defines the supported glibc versions and the set of symbols each version defines the supported glibc versions and the set of symbols each version
must define. See https://github.com/ziglang/glibc-abi-tool for the must define. See https://codeberg.org/ziglang/libc-abi-tools for the
tooling to generate this blob. The code in `glibc.zig` parses the abilist tooling to generate this blob. The code in `glibc.zig` parses the abilist
to build version-specific stub libraries on demand. to build version-specific stub libraries on demand.

View file

@ -79,7 +79,7 @@ enable_rosetta: bool = false,
enable_wasmtime: bool = false, enable_wasmtime: bool = false,
/// Use system Wine installation to run cross compiled Windows build artifacts. /// Use system Wine installation to run cross compiled Windows build artifacts.
enable_wine: bool = false, enable_wine: bool = false,
/// After following the steps in https://github.com/ziglang/zig/wiki/Updating-libc#glibc, /// After following the steps in https://codeberg.org/ziglang/infra/src/branch/master/libc-update/glibc.md,
/// this will be the directory $glibc-build-dir/install/glibcs /// this will be the directory $glibc-build-dir/install/glibcs
/// Given the example of the aarch64 target, this is the directory /// Given the example of the aarch64 target, this is the directory
/// that contains the path `aarch64-linux-gnu/lib/ld-linux-aarch64.so.1`. /// that contains the path `aarch64-linux-gnu/lib/ld-linux-aarch64.so.1`.

View file

@ -620,11 +620,6 @@ pub const VTable = struct {
result: []u8, result: []u8,
result_alignment: std.mem.Alignment, result_alignment: std.mem.Alignment,
) void, ) void,
/// Returns whether the current thread of execution is known to have
/// been requested to cancel.
///
/// Thread-safe.
cancelRequested: *const fn (?*anyopaque) bool,
/// When this function returns, implementation guarantees that `start` has /// When this function returns, implementation guarantees that `start` has
/// either already been called, or a unit of concurrency has been assigned /// either already been called, or a unit of concurrency has been assigned
@ -895,7 +890,7 @@ pub const Timestamp = struct {
} }
pub fn withClock(t: Timestamp, clock: Clock) Clock.Timestamp { pub fn withClock(t: Timestamp, clock: Clock) Clock.Timestamp {
return .{ .nanoseconds = t.nanoseconds, .clock = clock }; return .{ .raw = t, .clock = clock };
} }
pub fn fromNanoseconds(x: i96) Timestamp { pub fn fromNanoseconds(x: i96) Timestamp {

File diff suppressed because it is too large Load diff

View file

@ -1090,7 +1090,8 @@ pub const Socket = struct {
} }
pub fn sendMany(s: *const Socket, io: Io, messages: []OutgoingMessage, flags: SendFlags) SendError!void { pub fn sendMany(s: *const Socket, io: Io, messages: []OutgoingMessage, flags: SendFlags) SendError!void {
return io.vtable.netSend(io.userdata, s.handle, messages, flags); const err, const n = io.vtable.netSend(io.userdata, s.handle, messages, flags);
if (n != messages.len) return err.?;
} }
pub const ReceiveError = error{ pub const ReceiveError = error{
@ -1333,6 +1334,10 @@ pub const Server = struct {
/// Not enough free memory. This often means that the memory allocation is limited /// Not enough free memory. This often means that the memory allocation is limited
/// by the socket buffer limits, not by the system memory. /// by the socket buffer limits, not by the system memory.
SystemResources, SystemResources,
/// Either `listen` was never called, or `shutdown` was called (possibly while
/// this call was blocking). This allows `shutdown` to be used as a concurrent
/// cancellation mechanism.
SocketNotListening,
/// The network subsystem has failed. /// The network subsystem has failed.
NetworkDown, NetworkDown,
/// No connection is already queued and ready to be accepted, and /// No connection is already queued and ready to be accepted, and

View file

@ -4471,7 +4471,7 @@ pub const rusage = switch (native_os) {
pub const SELF = 1; pub const SELF = 1;
pub const CHILDREN = 2; pub const CHILDREN = 2;
}, },
.freebsd => extern struct { .freebsd, .openbsd => extern struct {
utime: timeval, utime: timeval,
stime: timeval, stime: timeval,
maxrss: c_long, maxrss: c_long,
@ -4493,6 +4493,27 @@ pub const rusage = switch (native_os) {
pub const CHILDREN = -1; pub const CHILDREN = -1;
pub const THREAD = 1; pub const THREAD = 1;
}, },
.dragonfly, .netbsd => extern struct {
utime: timeval,
stime: timeval,
maxrss: c_long,
ixrss: c_long,
idrss: c_long,
isrss: c_long,
minflt: c_long,
majflt: c_long,
nswap: c_long,
inblock: c_long,
oublock: c_long,
msgsnd: c_long,
msgrcv: c_long,
nsignals: c_long,
nvcsw: c_long,
nivcsw: c_long,
pub const SELF = 0;
pub const CHILDREN = -1;
},
else => void, else => void,
}; };
@ -10881,6 +10902,23 @@ pub extern "c" fn pthread_create(
start_routine: *const fn (?*anyopaque) callconv(.c) ?*anyopaque, start_routine: *const fn (?*anyopaque) callconv(.c) ?*anyopaque,
noalias arg: ?*anyopaque, noalias arg: ?*anyopaque,
) E; ) E;
pub const pthread_cancelstate = switch (native_os) {
.ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => enum(c_int) {
ENABLE = 1,
DISABLE = 0,
},
.linux => if (native_abi.isMusl()) enum(c_int) {
ENABLE = 0,
DISABLE = 1,
MASKED = 2,
} else if (native_abi.isGnu()) enum(c_int) {
ENABLE = 0,
DISABLE = 1,
},
else => void,
};
pub extern "c" fn pthread_setcancelstate(pthread_cancelstate, ?*pthread_cancelstate) E;
pub extern "c" fn pthread_cancel(pthread_t) E;
pub extern "c" fn pthread_attr_init(attr: *pthread_attr_t) E; pub extern "c" fn pthread_attr_init(attr: *pthread_attr_t) E;
pub extern "c" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: *anyopaque, stacksize: usize) E; pub extern "c" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: *anyopaque, stacksize: usize) E;
pub extern "c" fn pthread_attr_setstacksize(attr: *pthread_attr_t, stacksize: usize) E; pub extern "c" fn pthread_attr_setstacksize(attr: *pthread_attr_t, stacksize: usize) E;

View file

@ -116,6 +116,7 @@ pub const dh = struct {
/// Key Encapsulation Mechanisms. /// Key Encapsulation Mechanisms.
pub const kem = struct { pub const kem = struct {
pub const hybrid = @import("crypto/hybrid_kem.zig");
pub const kyber_d00 = @import("crypto/ml_kem.zig").d00; pub const kyber_d00 = @import("crypto/ml_kem.zig").d00;
pub const ml_kem = @import("crypto/ml_kem.zig").nist; pub const ml_kem = @import("crypto/ml_kem.zig").nist;
}; };

View file

@ -108,6 +108,36 @@ test "expand 128-bit key" {
} }
} }
test "invMixColumns" {
const key = [_]u8{ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c };
const enc_ctx = Aes128.initEnc(key);
const dec_ctx = Aes128.initDec(key);
for (1..10) |i| {
const enc_rk = enc_ctx.key_schedule.round_keys[10 - i];
const dec_rk = dec_ctx.key_schedule.round_keys[i];
const computed = enc_rk.invMixColumns();
try testing.expectEqualSlices(u8, &dec_rk.toBytes(), &computed.toBytes());
}
}
test "BlockVec invMixColumns" {
const input = [_]u8{
0x5f, 0x57, 0xf7, 0x1d, 0x72, 0xf5, 0xbe, 0xb9, 0x64, 0xbc, 0x3b, 0xf9, 0x15, 0x92, 0x29, 0x1a,
0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c,
};
const vec2 = BlockVec(2).fromBytes(&input);
const result_vec = vec2.invMixColumns();
const result_bytes = result_vec.toBytes();
for (0..2) |i| {
const block = Block.fromBytes(input[i * 16 ..][0..16]);
const expected = block.invMixColumns().toBytes();
try testing.expectEqualSlices(u8, &expected, result_bytes[i * 16 ..][0..16]);
}
}
test "expand 256-bit key" { test "expand 256-bit key" {
const key = [_]u8{ const key = [_]u8{
0x60, 0x3d, 0xeb, 0x10, 0x60, 0x3d, 0xeb, 0x10,

View file

@ -96,6 +96,17 @@ pub const Block = struct {
return Block{ .repr = block1.repr | block2.repr }; return Block{ .repr = block1.repr | block2.repr };
} }
/// Apply the inverse MixColumns operation to a block.
pub fn invMixColumns(block: Block) Block {
return Block{
.repr = asm (
\\ vaesimc %[in], %[out]
: [out] "=x" (-> Repr),
: [in] "x" (block.repr),
),
};
}
/// Perform operations on multiple blocks in parallel. /// Perform operations on multiple blocks in parallel.
pub const parallel = struct { pub const parallel = struct {
const cpu = std.Target.x86.cpu; const cpu = std.Target.x86.cpu;
@ -308,6 +319,17 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
} }
return out; return out;
} }
/// Apply the inverse MixColumns operation to each block in the vector.
pub fn invMixColumns(block_vec: Self) Self {
var out_bytes: [blocks_count * 16]u8 = undefined;
const in_bytes = block_vec.toBytes();
inline for (0..blocks_count) |i| {
const block = Block.fromBytes(in_bytes[i * 16 ..][0..16]);
out_bytes[i * 16 ..][0..16].* = block.invMixColumns().toBytes();
}
return fromBytes(&out_bytes);
}
}; };
} }

View file

@ -99,6 +99,17 @@ pub const Block = struct {
return Block{ .repr = block1.repr | block2.repr }; return Block{ .repr = block1.repr | block2.repr };
} }
/// Apply the inverse MixColumns operation to a block.
pub fn invMixColumns(block: Block) Block {
return Block{
.repr = asm (
\\ aesimc %[out].16b, %[in].16b
: [out] "=x" (-> Repr),
: [in] "x" (block.repr),
),
};
}
/// Perform operations on multiple blocks in parallel. /// Perform operations on multiple blocks in parallel.
pub const parallel = struct { pub const parallel = struct {
/// The recommended number of AES encryption/decryption to perform in parallel for the chosen implementation. /// The recommended number of AES encryption/decryption to perform in parallel for the chosen implementation.
@ -275,6 +286,15 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
} }
return out; return out;
} }
/// Apply the inverse MixColumns operation to each block in the vector.
pub fn invMixColumns(block_vec: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = block_vec.repr[i].invMixColumns();
}
return out;
}
}; };
} }

View file

@ -265,6 +265,26 @@ pub const Block = struct {
return Block{ .repr = x }; return Block{ .repr = x };
} }
/// Apply the inverse MixColumns operation to a block.
pub fn invMixColumns(block: Block) Block {
var out: Repr = undefined;
inline for (0..4) |i| {
const col = block.repr[i];
const b0: u8 = @truncate(col);
const b1: u8 = @truncate(col >> 8);
const b2: u8 = @truncate(col >> 16);
const b3: u8 = @truncate(col >> 24);
const r0 = mul(0x0e, b0) ^ mul(0x0b, b1) ^ mul(0x0d, b2) ^ mul(0x09, b3);
const r1 = mul(0x09, b0) ^ mul(0x0e, b1) ^ mul(0x0b, b2) ^ mul(0x0d, b3);
const r2 = mul(0x0d, b0) ^ mul(0x09, b1) ^ mul(0x0e, b2) ^ mul(0x0b, b3);
const r3 = mul(0x0b, b0) ^ mul(0x0d, b1) ^ mul(0x09, b2) ^ mul(0x0e, b3);
out[i] = @as(u32, r0) | (@as(u32, r1) << 8) | (@as(u32, r2) << 16) | (@as(u32, r3) << 24);
}
return Block{ .repr = out };
}
/// Perform operations on multiple blocks in parallel. /// Perform operations on multiple blocks in parallel.
pub const parallel = struct { pub const parallel = struct {
/// The recommended number of AES encryption/decryption to perform in parallel for the chosen implementation. /// The recommended number of AES encryption/decryption to perform in parallel for the chosen implementation.
@ -441,6 +461,15 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
} }
return out; return out;
} }
/// Apply the inverse MixColumns operation to each block in the vector.
pub fn invMixColumns(block_vec: Self) Self {
var out: Self = undefined;
for (0..native_words) |i| {
out.repr[i] = block_vec.repr[i].invMixColumns();
}
return out;
}
}; };
} }

View file

@ -7,12 +7,11 @@ const builtin = @import("builtin");
const blake2 = crypto.hash.blake2; const blake2 = crypto.hash.blake2;
const crypto = std.crypto; const crypto = std.crypto;
const Io = std.Io;
const math = std.math; const math = std.math;
const mem = std.mem; const mem = std.mem;
const phc_format = pwhash.phc_format; const phc_format = pwhash.phc_format;
const pwhash = crypto.pwhash; const pwhash = crypto.pwhash;
const Thread = std.Thread;
const Blake2b512 = blake2.Blake2b512; const Blake2b512 = blake2.Blake2b512;
const Blocks = std.array_list.AlignedManaged([block_length]u64, .@"16"); const Blocks = std.array_list.AlignedManaged([block_length]u64, .@"16");
const H0 = [Blake2b512.digest_length + 8]u8; const H0 = [Blake2b512.digest_length + 8]u8;
@ -204,20 +203,20 @@ fn initBlocks(
} }
fn processBlocks( fn processBlocks(
allocator: mem.Allocator,
blocks: *Blocks, blocks: *Blocks,
time: u32, time: u32,
memory: u32, memory: u32,
threads: u24, threads: u24,
mode: Mode, mode: Mode,
) KdfError!void { io: Io,
) void {
const lanes = memory / threads; const lanes = memory / threads;
const segments = lanes / sync_points; const segments = lanes / sync_points;
if (builtin.single_threaded or threads == 1) { if (builtin.single_threaded or threads == 1) {
processBlocksSt(blocks, time, memory, threads, mode, lanes, segments); processBlocksSt(blocks, time, memory, threads, mode, lanes, segments);
} else { } else {
try processBlocksMt(allocator, blocks, time, memory, threads, mode, lanes, segments); processBlocksMt(blocks, time, memory, threads, mode, lanes, segments, io);
} }
} }
@ -243,7 +242,6 @@ fn processBlocksSt(
} }
fn processBlocksMt( fn processBlocksMt(
allocator: mem.Allocator,
blocks: *Blocks, blocks: *Blocks,
time: u32, time: u32,
memory: u32, memory: u32,
@ -251,26 +249,20 @@ fn processBlocksMt(
mode: Mode, mode: Mode,
lanes: u32, lanes: u32,
segments: u32, segments: u32,
) KdfError!void { io: Io,
var threads_list = try std.array_list.Managed(Thread).initCapacity(allocator, threads); ) void {
defer threads_list.deinit();
var n: u32 = 0; var n: u32 = 0;
while (n < time) : (n += 1) { while (n < time) : (n += 1) {
var slice: u32 = 0; var slice: u32 = 0;
while (slice < sync_points) : (slice += 1) { while (slice < sync_points) : (slice += 1) {
var group: Io.Group = .init;
var lane: u24 = 0; var lane: u24 = 0;
while (lane < threads) : (lane += 1) { while (lane < threads) : (lane += 1) {
const thread = try Thread.spawn(.{}, processSegment, .{ group.async(io, processSegment, .{
blocks, time, memory, threads, mode, lanes, segments, n, slice, lane, blocks, time, memory, threads, mode, lanes, segments, n, slice, lane,
}); });
threads_list.appendAssumeCapacity(thread);
} }
lane = 0; group.wait(io);
while (lane < threads) : (lane += 1) {
threads_list.items[lane].join();
}
threads_list.clearRetainingCapacity();
} }
} }
} }
@ -489,6 +481,7 @@ pub fn kdf(
salt: []const u8, salt: []const u8,
params: Params, params: Params,
mode: Mode, mode: Mode,
io: Io,
) KdfError!void { ) KdfError!void {
if (derived_key.len < 4) return KdfError.WeakParameters; if (derived_key.len < 4) return KdfError.WeakParameters;
if (derived_key.len > max_int) return KdfError.OutputTooLong; if (derived_key.len > max_int) return KdfError.OutputTooLong;
@ -510,7 +503,7 @@ pub fn kdf(
blocks.appendNTimesAssumeCapacity(@splat(0), memory); blocks.appendNTimesAssumeCapacity(@splat(0), memory);
initBlocks(&blocks, &h0, memory, params.p); initBlocks(&blocks, &h0, memory, params.p);
try processBlocks(allocator, &blocks, params.t, memory, params.p, mode); processBlocks(&blocks, params.t, memory, params.p, mode, io);
finalize(&blocks, memory, params.p, derived_key); finalize(&blocks, memory, params.p, derived_key);
} }
@ -533,6 +526,7 @@ const PhcFormatHasher = struct {
params: Params, params: Params,
mode: Mode, mode: Mode,
buf: []u8, buf: []u8,
io: Io,
) HasherError![]const u8 { ) HasherError![]const u8 {
if (params.secret != null or params.ad != null) return HasherError.InvalidEncoding; if (params.secret != null or params.ad != null) return HasherError.InvalidEncoding;
@ -540,7 +534,7 @@ const PhcFormatHasher = struct {
crypto.random.bytes(&salt); crypto.random.bytes(&salt);
var hash: [default_hash_len]u8 = undefined; var hash: [default_hash_len]u8 = undefined;
try kdf(allocator, &hash, password, &salt, params, mode); try kdf(allocator, &hash, password, &salt, params, mode, io);
return phc_format.serialize(HashResult{ return phc_format.serialize(HashResult{
.alg_id = @tagName(mode), .alg_id = @tagName(mode),
@ -557,6 +551,7 @@ const PhcFormatHasher = struct {
allocator: mem.Allocator, allocator: mem.Allocator,
str: []const u8, str: []const u8,
password: []const u8, password: []const u8,
io: Io,
) HasherError!void { ) HasherError!void {
const hash_result = try phc_format.deserialize(HashResult, str); const hash_result = try phc_format.deserialize(HashResult, str);
@ -572,7 +567,7 @@ const PhcFormatHasher = struct {
if (expected_hash.len > hash_buf.len) return HasherError.InvalidEncoding; if (expected_hash.len > hash_buf.len) return HasherError.InvalidEncoding;
const hash = hash_buf[0..expected_hash.len]; const hash = hash_buf[0..expected_hash.len];
try kdf(allocator, hash, password, hash_result.salt.constSlice(), params, mode); try kdf(allocator, hash, password, hash_result.salt.constSlice(), params, mode, io);
if (!mem.eql(u8, hash, expected_hash)) return HasherError.PasswordVerificationFailed; if (!mem.eql(u8, hash, expected_hash)) return HasherError.PasswordVerificationFailed;
} }
}; };
@ -595,6 +590,7 @@ pub fn strHash(
password: []const u8, password: []const u8,
options: HashOptions, options: HashOptions,
out: []u8, out: []u8,
io: Io,
) Error![]const u8 { ) Error![]const u8 {
const allocator = options.allocator orelse return Error.AllocatorRequired; const allocator = options.allocator orelse return Error.AllocatorRequired;
switch (options.encoding) { switch (options.encoding) {
@ -604,6 +600,7 @@ pub fn strHash(
options.params, options.params,
options.mode, options.mode,
out, out,
io,
), ),
.crypt => return Error.InvalidEncoding, .crypt => return Error.InvalidEncoding,
} }
@ -621,9 +618,10 @@ pub fn strVerify(
str: []const u8, str: []const u8,
password: []const u8, password: []const u8,
options: VerifyOptions, options: VerifyOptions,
io: Io,
) Error!void { ) Error!void {
const allocator = options.allocator orelse return Error.AllocatorRequired; const allocator = options.allocator orelse return Error.AllocatorRequired;
return PhcFormatHasher.verify(allocator, str, password); return PhcFormatHasher.verify(allocator, str, password, io);
} }
test "argon2d" { test "argon2d" {
@ -640,6 +638,7 @@ test "argon2d" {
&salt, &salt,
.{ .t = 3, .m = 32, .p = 4, .secret = &secret, .ad = &ad }, .{ .t = 3, .m = 32, .p = 4, .secret = &secret, .ad = &ad },
.argon2d, .argon2d,
std.testing.io,
); );
const want = [_]u8{ const want = [_]u8{
@ -665,6 +664,7 @@ test "argon2i" {
&salt, &salt,
.{ .t = 3, .m = 32, .p = 4, .secret = &secret, .ad = &ad }, .{ .t = 3, .m = 32, .p = 4, .secret = &secret, .ad = &ad },
.argon2i, .argon2i,
std.testing.io,
); );
const want = [_]u8{ const want = [_]u8{
@ -690,6 +690,7 @@ test "argon2id" {
&salt, &salt,
.{ .t = 3, .m = 32, .p = 4, .secret = &secret, .ad = &ad }, .{ .t = 3, .m = 32, .p = 4, .secret = &secret, .ad = &ad },
.argon2id, .argon2id,
std.testing.io,
); );
const want = [_]u8{ const want = [_]u8{
@ -800,44 +801,44 @@ test "kdf" {
.{ .{
.mode = .argon2i, .mode = .argon2i,
.time = 4, .time = 4,
.memory = 4096, .memory = 256,
.threads = 4, .threads = 4,
.hash = "a11f7b7f3f93f02ad4bddb59ab62d121e278369288a0d0e7", .hash = "f7dbbacbf16999e3700817a7e06f65a8db2e9fa9504ede4c",
}, },
.{ .{
.mode = .argon2d, .mode = .argon2d,
.time = 4, .time = 4,
.memory = 4096, .memory = 256,
.threads = 4, .threads = 4,
.hash = "935598181aa8dc2b720914aa6435ac8d3e3a4210c5b0fb2d", .hash = "ea2970501cf49faa5ba1d2e6370204e9b57ca90a8fea937b",
}, },
.{ .{
.mode = .argon2id, .mode = .argon2id,
.time = 4, .time = 4,
.memory = 4096, .memory = 256,
.threads = 4, .threads = 4,
.hash = "145db9733a9f4ee43edf33c509be96b934d505a4efb33c5a", .hash = "fbd40d5a8cb92f88c20bda4b3cdb1f9d5af1efa937032410",
}, },
.{ .{
.mode = .argon2i, .mode = .argon2i,
.time = 4, .time = 4,
.memory = 1024, .memory = 256,
.threads = 8, .threads = 8,
.hash = "0cdd3956aa35e6b475a7b0c63488822f774f15b43f6e6e17", .hash = "15d3c398364e53f68fd12d19baf3f21432d964254fe27467",
}, },
.{ .{
.mode = .argon2d, .mode = .argon2d,
.time = 4, .time = 4,
.memory = 1024, .memory = 256,
.threads = 8, .threads = 8,
.hash = "83604fc2ad0589b9d055578f4d3cc55bc616df3578a896e9", .hash = "23c9adc06f06e21e4612c1466a1be02627690932b02c0df0",
}, },
.{ .{
.mode = .argon2id, .mode = .argon2id,
.time = 4, .time = 4,
.memory = 1024, .memory = 256,
.threads = 8, .threads = 8,
.hash = "8dafa8e004f8ea96bf7c0f93eecf67a6047476143d15577f", .hash = "f22802f8ca47be93f9954e4ce20c1e944e938fbd4a125d9d",
}, },
.{ .{
.mode = .argon2i, .mode = .argon2i,
@ -863,23 +864,23 @@ test "kdf" {
.{ .{
.mode = .argon2i, .mode = .argon2i,
.time = 3, .time = 3,
.memory = 1024, .memory = 256,
.threads = 6, .threads = 6,
.hash = "d236b29c2b2a09babee842b0dec6aa1e83ccbdea8023dced", .hash = "ebc8f91964abd8ceab49a12963b0a9e57d635bfa2aad2884",
}, },
.{ .{
.mode = .argon2d, .mode = .argon2d,
.time = 3, .time = 3,
.memory = 1024, .memory = 256,
.threads = 6, .threads = 6,
.hash = "a3351b0319a53229152023d9206902f4ef59661cdca89481", .hash = "1dd7202fd68da6675f769f4034b7a1db30d8785331954117",
}, },
.{ .{
.mode = .argon2id, .mode = .argon2id,
.time = 3, .time = 3,
.memory = 1024, .memory = 256,
.threads = 6, .threads = 6,
.hash = "1640b932f4b60e272f5d2207b9a9c626ffa1bd88d2349016", .hash = "424436b6ee22a66b04b9d0cf78f190305c5c166bae8baa09",
}, },
}; };
for (test_vectors) |v| { for (test_vectors) |v| {
@ -894,6 +895,7 @@ test "kdf" {
salt, salt,
.{ .t = v.time, .m = v.memory, .p = v.threads }, .{ .t = v.time, .m = v.memory, .p = v.threads },
v.mode, v.mode,
std.testing.io,
); );
try std.testing.expectEqualSlices(u8, &dk, &want); try std.testing.expectEqualSlices(u8, &dk, &want);
@ -903,6 +905,7 @@ test "kdf" {
test "phc format hasher" { test "phc format hasher" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const password = "testpass"; const password = "testpass";
const io = std.testing.io;
var buf: [128]u8 = undefined; var buf: [128]u8 = undefined;
const hash = try PhcFormatHasher.create( const hash = try PhcFormatHasher.create(
@ -911,25 +914,29 @@ test "phc format hasher" {
.{ .t = 3, .m = 32, .p = 4 }, .{ .t = 3, .m = 32, .p = 4 },
.argon2id, .argon2id,
&buf, &buf,
io,
); );
try PhcFormatHasher.verify(allocator, hash, password); try PhcFormatHasher.verify(allocator, hash, password, io);
} }
test "password hash and password verify" { test "password hash and password verify" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const password = "testpass"; const password = "testpass";
const io = std.testing.io;
var buf: [128]u8 = undefined; var buf: [128]u8 = undefined;
const hash = try strHash( const hash = try strHash(
password, password,
.{ .allocator = allocator, .params = .{ .t = 3, .m = 32, .p = 4 } }, .{ .allocator = allocator, .params = .{ .t = 3, .m = 32, .p = 4 } },
&buf, &buf,
io,
); );
try strVerify(hash, password, .{ .allocator = allocator }); try strVerify(hash, password, .{ .allocator = allocator }, io);
} }
test "kdf derived key length" { test "kdf derived key length" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const io = std.testing.io;
const password = "testpass"; const password = "testpass";
const salt = "saltsalt"; const salt = "saltsalt";
@ -937,11 +944,11 @@ test "kdf derived key length" {
const mode = Mode.argon2id; const mode = Mode.argon2id;
var dk1: [11]u8 = undefined; var dk1: [11]u8 = undefined;
try kdf(allocator, &dk1, password, salt, params, mode); try kdf(allocator, &dk1, password, salt, params, mode, io);
var dk2: [77]u8 = undefined; var dk2: [77]u8 = undefined;
try kdf(allocator, &dk2, password, salt, params, mode); try kdf(allocator, &dk2, password, salt, params, mode, io);
var dk3: [111]u8 = undefined; var dk3: [111]u8 = undefined;
try kdf(allocator, &dk3, password, salt, params, mode); try kdf(allocator, &dk3, password, salt, params, mode, io);
} }

View file

@ -450,6 +450,7 @@ fn benchmarkPwhash(
comptime ty: anytype, comptime ty: anytype,
comptime params: *const anyopaque, comptime params: *const anyopaque,
comptime count: comptime_int, comptime count: comptime_int,
io: std.Io,
) !f64 { ) !f64 {
const password = "testpass" ** 2; const password = "testpass" ** 2;
const opts = ty.HashOptions{ const opts = ty.HashOptions{
@ -459,12 +460,20 @@ fn benchmarkPwhash(
}; };
var buf: [256]u8 = undefined; var buf: [256]u8 = undefined;
const strHash = ty.strHash;
const strHashFnInfo = @typeInfo(@TypeOf(strHash)).@"fn";
const needs_io = strHashFnInfo.params.len == 4;
var timer = try Timer.start(); var timer = try Timer.start();
const start = timer.lap(); const start = timer.lap();
{ {
var i: usize = 0; var i: usize = 0;
while (i < count) : (i += 1) { while (i < count) : (i += 1) {
_ = try ty.strHash(password, opts, &buf); if (needs_io) {
_ = try strHash(password, opts, &buf, io);
} else {
_ = try strHash(password, opts, &buf);
}
mem.doNotOptimizeAway(&buf); mem.doNotOptimizeAway(&buf);
} }
} }
@ -623,7 +632,7 @@ pub fn main() !void {
inline for (pwhashes) |H| { inline for (pwhashes) |H| {
if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) { if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) {
const throughput = try benchmarkPwhash(arena_allocator, H.ty, H.params, mode(64)); const throughput = try benchmarkPwhash(arena_allocator, H.ty, H.params, mode(64), io);
try stdout.print("{s:>17}: {d:10.3} s/ops\n", .{ H.name, throughput }); try stdout.print("{s:>17}: {d:10.3} s/ops\n", .{ H.name, throughput });
try stdout.flush(); try stdout.flush();
} }

File diff suppressed because it is too large Load diff

View file

@ -1604,6 +1604,13 @@ pub fn dumpStackPointerAddr(prefix: []const u8) void {
test "manage resources correctly" { test "manage resources correctly" {
if (SelfInfo == void) return error.SkipZigTest; if (SelfInfo == void) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) {
// The C backend emits an extremely large C source file, meaning it has a huge
// amount of debug information. Parsing this debug information makes this test
// take too long to be worth running.
return error.SkipZigTest;
}
const S = struct { const S = struct {
noinline fn showMyTrace() usize { noinline fn showMyTrace() usize {
return @returnAddress(); return @returnAddress();

View file

@ -1,5 +1,6 @@
//! This file is auto-generated by tools/update_crc_catalog.zig. //! This file is auto-generated by tools/update_crc_catalog.zig.
const builtin = @import("builtin");
const impl = @import("crc/impl.zig"); const impl = @import("crc/impl.zig");
pub const Crc = impl.Crc; pub const Crc = impl.Crc;
@ -13,6 +14,17 @@ test {
_ = @import("crc/test.zig"); _ = @import("crc/test.zig");
} }
pub const Crc32Iscsi = switch (builtin.cpu.hasAll(.x86, &.{ .@"64bit", .crc32 })) {
true => @import("crc/crc32c.zig").Wrapper,
else => Crc(u32, .{
.polynomial = 0x1edc6f41,
.initial = 0xffffffff,
.reflect_input = true,
.reflect_output = true,
.xor_output = 0xffffffff,
}),
};
pub const Crc3Gsm = Crc(u3, .{ pub const Crc3Gsm = Crc(u3, .{
.polynomial = 0x3, .polynomial = 0x3,
.initial = 0x0, .initial = 0x0,
@ -797,14 +809,6 @@ pub const Crc32Cksum = Crc(u32, .{
.xor_output = 0xffffffff, .xor_output = 0xffffffff,
}); });
pub const Crc32Iscsi = Crc(u32, .{
.polynomial = 0x1edc6f41,
.initial = 0xffffffff,
.reflect_input = true,
.reflect_output = true,
.xor_output = 0xffffffff,
});
pub const Crc32IsoHdlc = Crc(u32, .{ pub const Crc32IsoHdlc = Crc(u32, .{
.polynomial = 0x04c11db7, .polynomial = 0x04c11db7,
.initial = 0xffffffff, .initial = 0xffffffff,

239
lib/std/hash/crc/crc32c.zig Normal file
View file

@ -0,0 +1,239 @@
//! Implements CRC-32C (Castagnoli) using the SSE4.2 Intel CRC32 instruction.
//!
//! A couple useful links for understanding the approach taken here:
//! - https://github.com/madler/brotli/blob/1d428d3a9baade233ebc3ac108293256bcb813d1/crc32c.c
//! - https://github.com/madler/zlib/blob/5a82f71ed1dfc0bec044d9702463dbdf84ea3b71/crc32.c
//! - http://www.ross.net/crc/download/crc_v3.txt
// Reflected CRC-32C polynomial in binary form.
const POLY = 0x82f63b78;
const LONG = 8192;
const SHORT = 256;
const long_lookup_table = genTable(LONG);
const short_lookup_table = genTable(SHORT);
/// Generates the lookup table for efficiently combining CRCs over a block of a given length `length`.
/// This works by building an operator that advances the CRC state as if `length` zero-bytes were appended.
/// We pre-compute 4 tables of 256 entries each (one per byte offset).
///
///
/// The idea behind this table is quite interesting. The CRC state is equivalent to the
/// remainder of dividing the message polynomial (over GF(2)) by the CRC polynomial.
///
/// Advancing the CRC register by `k` zero bits is equivalent to multiplying the current
/// CRC state by `x^k` modulo the CRC polynomial. This operation can be represented
/// as a linear transformation in GF(2), i.e, a matrix.
///
/// We build up this matrix via repeated squaring:
/// - odd represents the operator for 1 zero bit (i.e, multiplication by `x^1 mod POLY`)
/// - even represents the operator for 2 zero bits (`x^2 mod POLY`)
/// - squaring again gives `x^4 mod POLY`, and so on until we get to the right size.
///
/// By squaring the shifting `len`, we build the operator for `x^l mod POLY`.
fn genTable(length: usize) [4][256]u32 {
@setEvalBranchQuota(250000);
var even: [32]u32 = undefined;
zeroes: {
var odd: [32]u32 = undefined;
// Initialize our `odd` array with the operator for a single zero bit:
// - odd[0] is the polynomial itself (acts on the MSB).
// - odd[1..32] represent shifting a single bit through 31 positions.
odd[0] = POLY;
var row: u32 = 1;
for (1..32) |n| {
odd[n] = row;
row <<= 1;
}
// even = odd squared: even represents `x^2 mod POLY`.
square(&even, &odd);
// odd = even squared: odd now represents `x^4 mod POLY`.
square(&odd, &even);
// Continue squaring to double the number of zeroes encoded each time:
//
// At each point in the process:
// - square(even, odd): even gets the operator for twice the current length.
// - square(odd, even): odd gets the operator for 4 times the original length.
var len = length;
while (true) {
square(&even, &odd);
len >>= 1;
if (len == 0) break :zeroes;
square(&odd, &even);
len >>= 1;
if (len == 0) break;
}
@memcpy(&even, &odd);
}
var zeroes: [4][256]u32 = undefined;
for (0..256) |n| {
zeroes[0][n] = times(&even, n);
zeroes[1][n] = times(&even, n << 8);
zeroes[2][n] = times(&even, n << 16);
zeroes[3][n] = times(&even, n << 24);
}
return zeroes;
}
/// Computes `mat * vec` over `GF(2)`, where `mat` is a 32x32 binary matrix and `vec`
/// is a 32-bit vector. This somewhat "simulates" how bits propagate through the CRC register
/// during shifting.
///
/// - In GF(2) (aka a field where the only values are 0 and 1, aka binary), multiplication is
/// an `AND`, and addition is `XOR`.
/// - This dot product determines how each bit in the input vector "contributes" to
/// the final CRC state, by XORing (adding) rows of the matrix where `vec` has 1s.
fn times(mat: *const [32]u32, vec: u32) u32 {
var sum: u32 = 0;
var v = vec;
var i: u32 = 0;
while (v != 0) {
if (v & 1 != 0) sum ^= mat[i];
v >>= 1;
i += 1;
}
return sum;
}
/// Computes the square of a matrix in GF(2), i.e `dst = dst x src`.
///
/// This produces the operator for doubling the number of zeroes:
/// if `src` represents advancing the CRC by `k` zeroes, then `dest` will
/// represent advancing by 2k zeroes.
///
/// Since polynomial multiplication mod POLY is linear, `mat(mat(x)) = mat^2(x)`
/// gives the effect of two sequential applications of the operator.
fn square(dst: *[32]u32, src: *const [32]u32) void {
for (dst, src) |*d, s| {
d.* = times(src, s);
}
}
fn shift(table: *const [4][256]u32, crc: u32) u32 {
return table[0][crc & 0xFF] ^ table[1][(crc >> 8) & 0xFF] ^ table[2][(crc >> 16) & 0xFF] ^ table[3][crc >> 24];
}
fn crc32(crc: u32, input: []const u8) u32 {
var crc0: u64 = ~crc;
// Compute the CRC for up to seven leading bytes to bring the
// `next` pointer to an eight-byte boundary.
var next = input;
while (next.len > 0 and @intFromPtr(next.ptr) & 7 != 0) {
asm volatile ("crc32b %[out], %[in]"
: [in] "+r" (crc0),
: [out] "rm" (next[0]),
);
next = next[1..];
}
// Compute the CRC on sets of LONG * 3 bytes, executing three independent
// CRC instructions, each on LONG bytes. This is an optimization for
// targets where the CRC instruction has a throughput of one CRC per
// cycle, but a latency of three cycles.
while (next.len >= LONG * 3) {
var crc1: u64 = 0;
var crc2: u64 = 0;
const start = next.len;
while (true) {
// Safe @alignCast(), since we've aligned the pointer to 8 bytes before this loop.
const long: [*]const u64 = @ptrCast(@alignCast(next));
asm volatile (
\\crc32q %[out0], %[in0]
\\crc32q %[out1], %[in1]
\\crc32q %[out2], %[in2]
: [in0] "+r" (crc0),
[in1] "+r" (crc1),
[in2] "+r" (crc2),
: [out0] "rm" (long[0 * LONG / 8]),
[out1] "rm" (long[1 * LONG / 8]),
[out2] "rm" (long[2 * LONG / 8]),
);
next = next[8..];
if (next.len <= start - LONG) break;
}
crc0 = shift(&long_lookup_table, @truncate(crc0)) ^ crc1;
crc0 = shift(&long_lookup_table, @truncate(crc0)) ^ crc2;
next = next[LONG * 2 ..];
}
// Same thing as above, but for smaller chunks of SHORT bytes.
while (next.len >= SHORT * 3) {
var crc1: u64 = 0;
var crc2: u64 = 0;
const start = next.len;
while (true) {
const long: [*]const u64 = @ptrCast(@alignCast(next));
asm volatile (
\\crc32q %[out0], %[in0]
\\crc32q %[out1], %[in1]
\\crc32q %[out2], %[in2]
: [in0] "+r" (crc0),
[in1] "+r" (crc1),
[in2] "+r" (crc2),
: [out0] "rm" (long[0 * SHORT / 8]),
[out1] "rm" (long[1 * SHORT / 8]),
[out2] "rm" (long[2 * SHORT / 8]),
);
next = next[8..];
if (next.len <= start - SHORT) break;
}
crc0 = shift(&short_lookup_table, @truncate(crc0)) ^ crc1;
crc0 = shift(&short_lookup_table, @truncate(crc0)) ^ crc2;
next = next[SHORT * 2 ..];
}
// Compute via 8-byte chunks, until we're left with less than 8 bytes.
while (next.len >= 8) {
const long: [*]const u64 = @ptrCast(@alignCast(next));
asm volatile ("crc32q %[out], %[in]"
: [in] "+r" (crc0),
: [out] "rm" (long[0]),
);
next = next[8..];
}
// Finish the last bytes with just single instructions.
while (next.len > 0) {
asm volatile ("crc32b %[out], %[in]"
: [in] "+r" (crc0),
: [out] "rm" (next[0]),
);
next = next[1..];
}
return @truncate(~crc0);
}
// Wrapper around the accelerated implementation to match the one in impl.zig.
pub const Wrapper = struct {
crc: u32,
pub fn init() Wrapper {
return .{ .crc = 0 };
}
pub fn update(w: *Wrapper, bytes: []const u8) void {
w.crc = crc32(w.crc, bytes);
}
pub fn final(w: Wrapper) u32 {
return w.crc;
}
pub fn hash(bytes: []const u8) u32 {
var c = init();
c.update(bytes);
return c.final();
}
};

View file

@ -23,12 +23,7 @@ pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type {
const I = if (@bitSizeOf(W) < 8) u8 else W; const I = if (@bitSizeOf(W) < 8) u8 else W;
const lookup_table = blk: { const lookup_table = blk: {
@setEvalBranchQuota(2500); @setEvalBranchQuota(2500);
const poly = reflect(algorithm.polynomial);
const poly = if (algorithm.reflect_input)
@bitReverse(@as(I, algorithm.polynomial)) >> (@bitSizeOf(I) - @bitSizeOf(W))
else
@as(I, algorithm.polynomial) << (@bitSizeOf(I) - @bitSizeOf(W));
var table: [256]I = undefined; var table: [256]I = undefined;
for (&table, 0..) |*e, i| { for (&table, 0..) |*e, i| {
var crc: I = i; var crc: I = i;
@ -52,15 +47,13 @@ pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type {
crc: I, crc: I,
pub fn init() Self { pub fn init() Self {
const initial = if (algorithm.reflect_input) const initial = reflect(algorithm.initial);
@bitReverse(@as(I, algorithm.initial)) >> (@bitSizeOf(I) - @bitSizeOf(W)) return .{ .crc = initial };
else
@as(I, algorithm.initial) << (@bitSizeOf(I) - @bitSizeOf(W));
return Self{ .crc = initial };
} }
inline fn tableEntry(index: I) I { inline fn tableEntry(index: I) I {
return lookup_table[@as(u8, @intCast(index & 0xFF))]; const short: u8 = @truncate(index);
return lookup_table[short];
} }
pub fn update(self: *Self, bytes: []const u8) void { pub fn update(self: *Self, bytes: []const u8) void {
@ -90,7 +83,7 @@ pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type {
if (!algorithm.reflect_output) { if (!algorithm.reflect_output) {
c >>= @bitSizeOf(I) - @bitSizeOf(W); c >>= @bitSizeOf(I) - @bitSizeOf(W);
} }
return @as(W, @intCast(c ^ algorithm.xor_output)); return @intCast(c ^ algorithm.xor_output);
} }
pub fn hash(bytes: []const u8) W { pub fn hash(bytes: []const u8) W {
@ -98,5 +91,13 @@ pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type {
c.update(bytes); c.update(bytes);
return c.final(); return c.final();
} }
fn reflect(x: I) I {
const offset = @bitSizeOf(I) - @bitSizeOf(W);
if (algorithm.reflect_input)
return @bitReverse(x) >> offset
else
return x << offset;
}
}; };
} }

View file

@ -26,6 +26,17 @@ test "crc32 koopman regression" {
try testing.expectEqual(crc32.hash("abc"), 0xba2322ac); try testing.expectEqual(crc32.hash("abc"), 0xba2322ac);
} }
test "CRC-32/ISCSI" {
const Crc32Iscsi = crc.Crc32Iscsi;
try testing.expectEqual(@as(u32, 0xe3069283), Crc32Iscsi.hash("123456789"));
var c = Crc32Iscsi.init();
c.update("1234");
c.update("56789");
try testing.expectEqual(@as(u32, 0xe3069283), c.final());
}
test "CRC-3/GSM" { test "CRC-3/GSM" {
const Crc3Gsm = crc.Crc3Gsm; const Crc3Gsm = crc.Crc3Gsm;
@ -1104,17 +1115,6 @@ test "CRC-32/CKSUM" {
try testing.expectEqual(@as(u32, 0x765e7680), c.final()); try testing.expectEqual(@as(u32, 0x765e7680), c.final());
} }
test "CRC-32/ISCSI" {
const Crc32Iscsi = crc.Crc32Iscsi;
try testing.expectEqual(@as(u32, 0xe3069283), Crc32Iscsi.hash("123456789"));
var c = Crc32Iscsi.init();
c.update("1234");
c.update("56789");
try testing.expectEqual(@as(u32, 0xe3069283), c.final());
}
test "CRC-32/ISO-HDLC" { test "CRC-32/ISO-HDLC" {
const Crc32IsoHdlc = crc.Crc32IsoHdlc; const Crc32IsoHdlc = crc.Crc32IsoHdlc;

View file

@ -141,7 +141,19 @@ test defaultQueryPageSize {
assert(std.math.isPowerOfTwo(defaultQueryPageSize())); assert(std.math.isPowerOfTwo(defaultQueryPageSize()));
} }
const CAllocator = struct { /// A wrapper around the C memory allocation API which supports the full `Allocator`
/// interface, including arbitrary alignment. Simple `malloc` calls are used when
/// possible, but large requested alignments may require larger buffers in order to
/// satisfy the request. As well as `malloc`, `realloc`, and `free`, the extension
/// functions `malloc_usable_size` and `posix_memalign` are used when available.
///
/// For an allocator that directly calls `malloc`/`realloc`/`free`, with no padding
/// or special handling, see `raw_c_allocator`.
pub const c_allocator: Allocator = .{
.ptr = undefined,
.vtable = &c_allocator_impl.vtable,
};
const c_allocator_impl = struct {
comptime { comptime {
if (!builtin.link_libc) { if (!builtin.link_libc) {
@compileError("C allocator is only available when linking against libc"); @compileError("C allocator is only available when linking against libc");
@ -155,67 +167,55 @@ const CAllocator = struct {
.free = free, .free = free,
}; };
pub const supports_malloc_size = @TypeOf(malloc_size) != void; const have_posix_memalign = switch (builtin.os.tag) {
pub const malloc_size = if (@TypeOf(c.malloc_size) != void) .dragonfly,
c.malloc_size .netbsd,
else if (@TypeOf(c.malloc_usable_size) != void) .freebsd,
c.malloc_usable_size .illumos,
else if (@TypeOf(c._msize) != void) .openbsd,
c._msize .linux,
else {}; .driverkit,
.ios,
pub const supports_posix_memalign = switch (builtin.os.tag) { .maccatalyst,
.dragonfly, .netbsd, .freebsd, .illumos, .openbsd, .linux, .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos, .serenity => true, .macos,
.tvos,
.visionos,
.watchos,
.serenity,
=> true,
else => false, else => false,
}; };
fn getHeader(ptr: [*]u8) *[*]u8 { fn allocStrat(need_align: Alignment) union(enum) {
return @ptrCast(@alignCast(ptr - @sizeOf(usize))); raw,
posix_memalign: if (have_posix_memalign) void else noreturn,
manual_align: if (have_posix_memalign) noreturn else void,
} {
// If `malloc` guarantees `need_align`, always prefer a raw allocation.
if (Alignment.compare(need_align, .lte, .of(c.max_align_t))) {
return .raw;
}
// Use `posix_memalign` if available. Otherwise, we must manually align the allocation.
return if (have_posix_memalign) .posix_memalign else .manual_align;
} }
fn alignedAlloc(len: usize, alignment: Alignment) ?[*]u8 { /// If `allocStrat(a) == .manual_align`, an allocation looks like this:
const alignment_bytes = alignment.toByteUnits(); ///
if (supports_posix_memalign) { /// unaligned_ptr hdr_ptr aligned_ptr
// The posix_memalign only accepts alignment values that are a /// v v v
// multiple of the pointer size /// +---------------+--------+--------------+
const effective_alignment = @max(alignment_bytes, @sizeOf(usize)); /// | padding | header | usable bytes |
/// +---------------+--------+--------------+
var aligned_ptr: ?*anyopaque = undefined; ///
if (c.posix_memalign(&aligned_ptr, effective_alignment, len) != 0) /// * `unaligned_ptr` is the raw return value of `malloc`.
return null; /// * `aligned_ptr` is computed by aligning `unaligned_ptr` forward; it is what `alloc` returns.
/// * `hdr_ptr` points to a pointer-sized header directly before the usable space. This header
return @ptrCast(aligned_ptr); /// contains the value `unaligned_ptr`, so that we can pass it to `free` later. This is
} /// necessary because the width of the padding is unknown.
///
// Thin wrapper around regular malloc, overallocate to account for /// This function accepts `aligned_ptr` and offsets it backwards to return `hdr_ptr`.
// alignment padding and store the original malloc()'ed pointer before fn manualAlignHeader(aligned_ptr: [*]u8) *[*]u8 {
// the aligned address. return @ptrCast(@alignCast(aligned_ptr - @sizeOf(usize)));
const unaligned_ptr = @as([*]u8, @ptrCast(c.malloc(len + alignment_bytes - 1 + @sizeOf(usize)) orelse return null));
const unaligned_addr = @intFromPtr(unaligned_ptr);
const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment_bytes);
const aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr);
getHeader(aligned_ptr).* = unaligned_ptr;
return aligned_ptr;
}
fn alignedFree(ptr: [*]u8) void {
if (supports_posix_memalign) {
return c.free(ptr);
}
const unaligned_ptr = getHeader(ptr).*;
c.free(unaligned_ptr);
}
fn alignedAllocSize(ptr: [*]u8) usize {
if (supports_posix_memalign) {
return CAllocator.malloc_size(ptr);
}
const unaligned_ptr = getHeader(ptr).*;
const delta = @intFromPtr(ptr) - @intFromPtr(unaligned_ptr);
return CAllocator.malloc_size(unaligned_ptr) - delta;
} }
fn alloc( fn alloc(
@ -226,67 +226,120 @@ const CAllocator = struct {
) ?[*]u8 { ) ?[*]u8 {
_ = return_address; _ = return_address;
assert(len > 0); assert(len > 0);
return alignedAlloc(len, alignment); switch (allocStrat(alignment)) {
.raw => {
// C only needs to respect `max_align_t` up to the allocation size due to object
// alignment rules. If necessary, extend the allocation size.
const actual_len = @max(len, @alignOf(std.c.max_align_t));
const ptr = c.malloc(actual_len) orelse return null;
assert(alignment.check(@intFromPtr(ptr)));
return @ptrCast(ptr);
},
.posix_memalign => {
// The posix_memalign only accepts alignment values that are a
// multiple of the pointer size
const effective_alignment = @max(alignment.toByteUnits(), @sizeOf(usize));
var aligned_ptr: ?*anyopaque = undefined;
if (c.posix_memalign(&aligned_ptr, effective_alignment, len) != 0) {
return null;
}
assert(alignment.check(@intFromPtr(aligned_ptr)));
return @ptrCast(aligned_ptr);
},
.manual_align => {
// Overallocate to account for alignment padding and store the original pointer
// returned by `malloc` before the aligned address.
const padded_len = len + @sizeOf(usize) + alignment.toByteUnits() - 1;
const unaligned_ptr: [*]u8 = @ptrCast(c.malloc(padded_len) orelse return null);
const unaligned_addr = @intFromPtr(unaligned_ptr);
const aligned_addr = alignment.forward(unaligned_addr + @sizeOf(usize));
const aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr);
manualAlignHeader(aligned_ptr).* = unaligned_ptr;
return aligned_ptr;
},
}
} }
fn resize( fn resize(
_: *anyopaque, _: *anyopaque,
buf: []u8, memory: []u8,
alignment: Alignment, alignment: Alignment,
new_len: usize, new_len: usize,
return_address: usize, return_address: usize,
) bool { ) bool {
_ = alignment;
_ = return_address; _ = return_address;
if (new_len <= buf.len) { assert(new_len > 0);
return true; if (new_len <= memory.len) {
return true; // in-place shrink always works
} }
if (CAllocator.supports_malloc_size) { const mallocSize = func: {
const full_len = alignedAllocSize(buf.ptr); if (@TypeOf(c.malloc_size) != void) break :func c.malloc_size;
if (new_len <= full_len) { if (@TypeOf(c.malloc_usable_size) != void) break :func c.malloc_usable_size;
return true; if (@TypeOf(c._msize) != void) break :func c._msize;
} return false; // we don't know how much space is actually available
} };
return false; const usable_len: usize = switch (allocStrat(alignment)) {
.raw, .posix_memalign => mallocSize(memory.ptr),
.manual_align => usable_len: {
const unaligned_ptr = manualAlignHeader(memory.ptr).*;
const full_len = mallocSize(unaligned_ptr);
const padding = @intFromPtr(memory.ptr) - @intFromPtr(unaligned_ptr);
break :usable_len full_len - padding;
},
};
return new_len <= usable_len;
} }
fn remap( fn remap(
context: *anyopaque, ctx: *anyopaque,
memory: []u8, memory: []u8,
alignment: Alignment, alignment: Alignment,
new_len: usize, new_len: usize,
return_address: usize, return_address: usize,
) ?[*]u8 { ) ?[*]u8 {
// realloc would potentially return a new allocation that does not assert(new_len > 0);
// respect the original alignment. // Prefer resizing in-place if possible, since `realloc` could be expensive even if legal.
return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null; if (resize(ctx, memory, alignment, new_len, return_address)) {
return memory.ptr;
}
switch (allocStrat(alignment)) {
.raw => {
// `malloc` and friends guarantee the required alignment, so we can try `realloc`.
// C only needs to respect `max_align_t` up to the allocation size due to object
// alignment rules. If necessary, extend the allocation size.
const actual_len = @max(new_len, @alignOf(std.c.max_align_t));
const new_ptr = c.realloc(memory.ptr, actual_len) orelse return null;
assert(alignment.check(@intFromPtr(new_ptr)));
return @ptrCast(new_ptr);
},
.posix_memalign, .manual_align => {
// `realloc` would potentially return a new allocation which does not respect
// the original alignment, so we can't do anything more.
return null;
},
}
} }
fn free( fn free(
_: *anyopaque, _: *anyopaque,
buf: []u8, memory: []u8,
alignment: Alignment, alignment: Alignment,
return_address: usize, return_address: usize,
) void { ) void {
_ = alignment;
_ = return_address; _ = return_address;
alignedFree(buf.ptr); switch (allocStrat(alignment)) {
.raw, .posix_memalign => c.free(memory.ptr),
.manual_align => c.free(manualAlignHeader(memory.ptr).*),
}
} }
}; };
/// Supports the full Allocator interface, including alignment, and exploiting /// Asserts that allocations have alignments which `malloc` can satisfy. This means that
/// `malloc_usable_size` if available. For an allocator that directly calls /// the requested alignment is no greater than `@min(@alignOf(std.c.max_align_t), size)`.
/// `malloc`/`free`, see `raw_c_allocator`. ///
pub const c_allocator: Allocator = .{ /// This allocator is rarely appropriate to use. In general, prefer `c_allocator`, which
.ptr = undefined, /// does not have any special requirements of its input, but is still highly efficient for
.vtable = &CAllocator.vtable, /// allocation requests which obey `malloc` alignment rules.
};
/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly
/// calls `malloc`/`free`. Does not attempt to utilize `malloc_usable_size`.
/// This allocator is safe to use as the backing allocator with
/// `ArenaAllocator` for example and is more optimal in such a case than
/// `c_allocator`.
pub const raw_c_allocator: Allocator = .{ pub const raw_c_allocator: Allocator = .{
.ptr = undefined, .ptr = undefined,
.vtable = &raw_c_allocator_vtable, .vtable = &raw_c_allocator_vtable,
@ -306,13 +359,20 @@ fn rawCAlloc(
) ?[*]u8 { ) ?[*]u8 {
_ = context; _ = context;
_ = return_address; _ = return_address;
assert(alignment.compare(.lte, .of(std.c.max_align_t))); // `std.c.max_align_t` isn't the whole story, because if `len` is smaller than
// Note that this pointer cannot be aligncasted to max_align_t because if // every C type with alignment `max_align_t`, the allocation can be less-aligned.
// len is < max_align_t then the alignment can be smaller. For example, if // The implementation need only guarantee that any type of length `len` would be
// max_align_t is 16, but the user requests 8 bytes, there is no built-in // suitably aligned.
// type in C that is size 8 and has 16 byte alignment, so the alignment may //
// be 8 bytes rather than 16. Similarly if only 1 byte is requested, malloc // For instance, if `len == 8` and `alignment == .@"16"`, then `malloc` may not
// is allowed to return a 1-byte aligned pointer. // fulfil this request, because there is necessarily no C type with 8-byte size
// but 16-byte alignment.
//
// In theory, the resulting rule here would be target-specific, but in practice,
// the smallest type with an alignment of `max_align_t` has the same size (it's
// usually `c_longdouble`), so we can just check that `alignment <= len`.
assert(alignment.toByteUnits() <= len);
assert(Alignment.compare(alignment, .lte, .of(std.c.max_align_t)));
return @ptrCast(c.malloc(len)); return @ptrCast(c.malloc(len));
} }
@ -339,8 +399,9 @@ fn rawCRemap(
return_address: usize, return_address: usize,
) ?[*]u8 { ) ?[*]u8 {
_ = context; _ = context;
_ = alignment;
_ = return_address; _ = return_address;
// See `rawCMalloc` for an explanation of this `assert` call.
assert(alignment.toByteUnits() <= new_len);
return @ptrCast(c.realloc(memory.ptr, new_len)); return @ptrCast(c.realloc(memory.ptr, new_len));
} }

View file

@ -1748,9 +1748,7 @@ pub fn settimeofday(tv: *const timeval, tz: *const timezone) usize {
} }
pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize { pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize {
if (native_arch == .riscv32) { return syscall2(.nanosleep, @intFromPtr(req), @intFromPtr(rem));
@compileError("No nanosleep syscall on this architecture.");
} else return syscall2(.nanosleep, @intFromPtr(req), @intFromPtr(rem));
} }
pub fn pause() usize { pub fn pause() usize {
@ -3773,6 +3771,7 @@ pub const SIG = if (is_mips) enum(u32) {
PROF = 29, PROF = 29,
XCPU = 30, XCPU = 30,
XFZ = 31, XFZ = 31,
_,
} else if (is_sparc) enum(u32) { } else if (is_sparc) enum(u32) {
pub const BLOCK = 1; pub const BLOCK = 1;
pub const UNBLOCK = 2; pub const UNBLOCK = 2;
@ -3818,6 +3817,7 @@ pub const SIG = if (is_mips) enum(u32) {
LOST = 29, LOST = 29,
USR1 = 30, USR1 = 30,
USR2 = 31, USR2 = 31,
_,
} else enum(u32) { } else enum(u32) {
pub const BLOCK = 0; pub const BLOCK = 0;
pub const UNBLOCK = 1; pub const UNBLOCK = 1;
@ -3861,6 +3861,7 @@ pub const SIG = if (is_mips) enum(u32) {
IO = 29, IO = 29,
PWR = 30, PWR = 30,
SYS = 31, SYS = 31,
_,
}; };
pub const kernel_rwf = u32; pub const kernel_rwf = u32;

View file

@ -1360,6 +1360,7 @@ pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize {
.PIPE => return error.BrokenPipe, .PIPE => return error.BrokenPipe,
.CONNRESET => return error.ConnectionResetByPeer, .CONNRESET => return error.ConnectionResetByPeer,
.BUSY => return error.DeviceBusy, .BUSY => return error.DeviceBusy,
.CANCELED => return error.Canceled,
else => |err| return unexpectedErrno(err), else => |err| return unexpectedErrno(err),
} }
} }

View file

@ -690,6 +690,9 @@ pub const ArgIteratorWasi = struct {
/// Call to free the internal buffer of the iterator. /// Call to free the internal buffer of the iterator.
pub fn deinit(self: *ArgIteratorWasi) void { pub fn deinit(self: *ArgIteratorWasi) void {
// Nothing is allocated when there are no args
if (self.args.len == 0) return;
const last_item = self.args[self.args.len - 1]; const last_item = self.args[self.args.len - 1];
const last_byte_addr = @intFromPtr(last_item.ptr) + last_item.len + 1; // null terminated const last_byte_addr = @intFromPtr(last_item.ptr) + last_item.len + 1; // null terminated
const first_item_ptr = self.args[0].ptr; const first_item_ptr = self.args[0].ptr;

View file

@ -122,7 +122,7 @@ pub const ResourceUsageStatistics = struct {
/// if available. /// if available.
pub inline fn getMaxRss(rus: ResourceUsageStatistics) ?usize { pub inline fn getMaxRss(rus: ResourceUsageStatistics) ?usize {
switch (native_os) { switch (native_os) {
.linux => { .dragonfly, .freebsd, .netbsd, .openbsd, .illumos, .linux, .serenity => {
if (rus.rusage) |ru| { if (rus.rusage) |ru| {
return @as(usize, @intCast(ru.maxrss)) * 1024; return @as(usize, @intCast(ru.maxrss)) * 1024;
} else { } else {
@ -149,7 +149,21 @@ pub const ResourceUsageStatistics = struct {
} }
const rusage_init = switch (native_os) { const rusage_init = switch (native_os) {
.linux, .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => @as(?posix.rusage, null), .dragonfly,
.freebsd,
.netbsd,
.openbsd,
.illumos,
.linux,
.serenity,
.driverkit,
.ios,
.maccatalyst,
.macos,
.tvos,
.visionos,
.watchos,
=> @as(?posix.rusage, null),
.windows => @as(?windows.VM_COUNTERS, null), .windows => @as(?windows.VM_COUNTERS, null),
else => {}, else => {},
}; };
@ -486,7 +500,21 @@ fn waitUnwrappedPosix(self: *ChildProcess) void {
const res: posix.WaitPidResult = res: { const res: posix.WaitPidResult = res: {
if (self.request_resource_usage_statistics) { if (self.request_resource_usage_statistics) {
switch (native_os) { switch (native_os) {
.linux, .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => { .dragonfly,
.freebsd,
.netbsd,
.openbsd,
.illumos,
.linux,
.serenity,
.driverkit,
.ios,
.maccatalyst,
.macos,
.tvos,
.visionos,
.watchos,
=> {
var ru: posix.rusage = undefined; var ru: posix.rusage = undefined;
const res = posix.wait4(self.id, 0, &ru); const res = posix.wait4(self.id, 0, &ru);
self.resource_usage_statistics.rusage = ru; self.resource_usage_statistics.rusage = ru;

View file

@ -1195,6 +1195,9 @@ pub fn lastToken(tree: Ast, node: Node.Index) TokenIndex {
if (extra.section_node.unwrap()) |section_node| { if (extra.section_node.unwrap()) |section_node| {
end_offset += 1; // for the rparen end_offset += 1; // for the rparen
n = section_node; n = section_node;
} else if (extra.addrspace_node.unwrap()) |addrspace_node| {
end_offset += 1; // for the rparen
n = addrspace_node;
} else if (extra.align_node.unwrap()) |align_node| { } else if (extra.align_node.unwrap()) |align_node| {
end_offset += 1; // for the rparen end_offset += 1; // for the rparen
n = align_node; n = align_node;

View file

@ -6084,6 +6084,16 @@ test "zig fmt: do not canonicalize invalid cast builtins" {
); );
} }
test "zig fmt: extern addrspace in struct" {
try testCanonical(
\\const namespace = struct {
\\ extern const num: u8 addrspace(.generic);
\\};
\\// comment
\\
);
}
test "recovery: top level" { test "recovery: top level" {
try testError( try testError(
\\test "" {inline} \\test "" {inline}

View file

@ -175,9 +175,11 @@ const ComptimeAlloc = struct {
/// `src` may be `null` if `is_const` will be set. /// `src` may be `null` if `is_const` will be set.
fn newComptimeAlloc(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type, alignment: Alignment) !ComptimeAllocIndex { fn newComptimeAlloc(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type, alignment: Alignment) !ComptimeAllocIndex {
const pt = sema.pt;
const init_val = try sema.typeHasOnePossibleValue(ty) orelse try pt.undefValue(ty);
const idx = sema.comptime_allocs.items.len; const idx = sema.comptime_allocs.items.len;
try sema.comptime_allocs.append(sema.gpa, .{ try sema.comptime_allocs.append(sema.gpa, .{
.val = .{ .interned = try sema.pt.intern(.{ .undef = ty.toIntern() }) }, .val = .{ .interned = init_val.toIntern() },
.is_const = false, .is_const = false,
.src = src, .src = src,
.alignment = alignment, .alignment = alignment,

View file

@ -3445,13 +3445,22 @@ pub fn optEuBaseType(ty: Type, zcu: *const Zcu) Type {
pub fn toUnsigned(ty: Type, pt: Zcu.PerThread) !Type { pub fn toUnsigned(ty: Type, pt: Zcu.PerThread) !Type {
const zcu = pt.zcu; const zcu = pt.zcu;
return switch (ty.zigTypeTag(zcu)) { return switch (ty.toIntern()) {
// zig fmt: off
.usize_type, .isize_type => .usize,
.c_ushort_type, .c_short_type => .c_ushort,
.c_uint_type, .c_int_type => .c_uint,
.c_ulong_type, .c_long_type => .c_ulong,
.c_ulonglong_type, .c_longlong_type => .c_ulonglong,
// zig fmt: on
else => switch (ty.zigTypeTag(zcu)) {
.int => pt.intType(.unsigned, ty.intInfo(zcu).bits), .int => pt.intType(.unsigned, ty.intInfo(zcu).bits),
.vector => try pt.vectorType(.{ .vector => try pt.vectorType(.{
.len = ty.vectorLen(zcu), .len = ty.vectorLen(zcu),
.child = (try ty.childType(zcu).toUnsigned(pt)).toIntern(), .child = (try ty.childType(zcu).toUnsigned(pt)).toIntern(),
}), }),
else => unreachable, else => unreachable,
},
}; };
} }

View file

@ -5478,14 +5478,16 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
// for the string, we still use the next u32 for the null terminator. // for the string, we still use the next u32 for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4; extra_i += (constraint.len + name.len + (2 + 3)) / 4;
if (constraint.len < 2 or constraint[0] != '=' or // +constraint
(constraint[1] == '{' and constraint[constraint.len - 1] != '}')) // =constraint
{ if (constraint.len > 1 and
return f.fail("CBE: constraint not supported: '{s}'", .{constraint}); (constraint[0] == '=' or constraint[0] == '+') and
} constraint[1] != '{') continue;
const is_reg = constraint[1] == '{'; // ={reg}
if (is_reg) { if (std.mem.startsWith(u8, constraint, "={") and
std.mem.endsWith(u8, constraint, "}"))
{
const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(zcu); const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(zcu);
try w.writeAll("register "); try w.writeAll("register ");
const output_local = try f.allocLocalValue(.{ const output_local = try f.allocLocalValue(.{
@ -5503,7 +5505,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
} }
try w.writeByte(';'); try w.writeByte(';');
try f.object.newline(); try f.object.newline();
} } else return f.fail("CBE: constraint not supported: '{s}'", .{constraint});
} }
for (inputs) |input| { for (inputs) |input| {
const extra_bytes = mem.sliceAsBytes(f.air.extra.items[extra_i..]); const extra_bytes = mem.sliceAsBytes(f.air.extra.items[extra_i..]);

View file

@ -177437,6 +177437,10 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
fixed_mnem_size: { fixed_mnem_size: {
const fixed_mnem_size: Memory.Size = switch (mnem_tag) { const fixed_mnem_size: Memory.Size = switch (mnem_tag) {
.clflush => .byte, .clflush => .byte,
.crc32 => {
mnem_size.op_has_size.unset(1);
break :fixed_mnem_size;
},
.fldcw, .fnstcw, .fstcw, .fnstsw, .fstsw => .word, .fldcw, .fnstcw, .fstcw, .fnstsw, .fstsw => .word,
.fldenv, .fnstenv, .fstenv => .none, .fldenv, .fnstenv, .fstenv => .none,
.frstor, .fsave, .fnsave, .fxrstor, .fxrstor64, .fxsave, .fxsave64 => .none, .frstor, .fsave, .fnsave, .fxrstor, .fxrstor64, .fxsave, .fxsave64 => .none,

View file

@ -1499,22 +1499,18 @@ const aarch64 = struct {
.ABS64 => { .ABS64 => {
try atom.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file); try atom.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
}, },
.ADR_PREL_PG_HI21 => { .ADR_PREL_PG_HI21 => {
try atom.scanReloc(symbol, rel, pcRelocAction(symbol, elf_file), elf_file); try atom.scanReloc(symbol, rel, pcRelocAction(symbol, elf_file), elf_file);
}, },
.ADR_GOT_PAGE => { .ADR_GOT_PAGE => {
// TODO: relax if possible // TODO: relax if possible
symbol.flags.needs_got = true; symbol.flags.needs_got = true;
}, },
.LD64_GOT_LO12_NC, .LD64_GOT_LO12_NC,
.LD64_GOTPAGE_LO15, .LD64_GOTPAGE_LO15,
=> { => {
symbol.flags.needs_got = true; symbol.flags.needs_got = true;
}, },
.CALL26, .CALL26,
.JUMP26, .JUMP26,
=> { => {
@ -1522,25 +1518,21 @@ const aarch64 = struct {
symbol.flags.needs_plt = true; symbol.flags.needs_plt = true;
} }
}, },
.TLSLE_ADD_TPREL_HI12, .TLSLE_ADD_TPREL_HI12,
.TLSLE_ADD_TPREL_LO12_NC, .TLSLE_ADD_TPREL_LO12_NC,
=> { => {
if (is_dyn_lib) try atom.reportPicError(symbol, rel, elf_file); if (is_dyn_lib) try atom.reportPicError(symbol, rel, elf_file);
}, },
.TLSIE_ADR_GOTTPREL_PAGE21, .TLSIE_ADR_GOTTPREL_PAGE21,
.TLSIE_LD64_GOTTPREL_LO12_NC, .TLSIE_LD64_GOTTPREL_LO12_NC,
=> { => {
symbol.flags.needs_gottp = true; symbol.flags.needs_gottp = true;
}, },
.TLSGD_ADR_PAGE21, .TLSGD_ADR_PAGE21,
.TLSGD_ADD_LO12_NC, .TLSGD_ADD_LO12_NC,
=> { => {
symbol.flags.needs_tlsgd = true; symbol.flags.needs_tlsgd = true;
}, },
.TLSDESC_ADR_PAGE21, .TLSDESC_ADR_PAGE21,
.TLSDESC_LD64_LO12, .TLSDESC_LD64_LO12,
.TLSDESC_ADD_LO12, .TLSDESC_ADD_LO12,
@ -1551,18 +1543,17 @@ const aarch64 = struct {
symbol.flags.needs_tlsdesc = true; symbol.flags.needs_tlsdesc = true;
} }
}, },
.ADD_ABS_LO12_NC, .ADD_ABS_LO12_NC,
.ADR_PREL_LO21, .ADR_PREL_LO21,
.LDST8_ABS_LO12_NC, .CONDBR19,
.LDST128_ABS_LO12_NC,
.LDST16_ABS_LO12_NC, .LDST16_ABS_LO12_NC,
.LDST32_ABS_LO12_NC, .LDST32_ABS_LO12_NC,
.LDST64_ABS_LO12_NC, .LDST64_ABS_LO12_NC,
.LDST128_ABS_LO12_NC, .LDST8_ABS_LO12_NC,
.PREL32, .PREL32,
.PREL64, .PREL64,
=> {}, => {},
else => try atom.reportUnhandledRelocError(rel, elf_file), else => try atom.reportUnhandledRelocError(rel, elf_file),
} }
} }
@ -1599,7 +1590,6 @@ const aarch64 = struct {
r_offset, r_offset,
); );
}, },
.CALL26, .CALL26,
.JUMP26, .JUMP26,
=> { => {
@ -1611,27 +1601,26 @@ const aarch64 = struct {
}; };
util.writeBranchImm(disp, code); util.writeBranchImm(disp, code);
}, },
.CONDBR19 => {
const value = math.cast(i19, S + A - P) orelse return error.Overflow;
util.writeCondBrImm(value, code);
},
.PREL32 => { .PREL32 => {
const value = math.cast(i32, S + A - P) orelse return error.Overflow; const value = math.cast(i32, S + A - P) orelse return error.Overflow;
mem.writeInt(u32, code, @bitCast(value), .little); mem.writeInt(u32, code, @bitCast(value), .little);
}, },
.PREL64 => { .PREL64 => {
const value = S + A - P; const value = S + A - P;
mem.writeInt(u64, code_buffer[r_offset..][0..8], @bitCast(value), .little); mem.writeInt(u64, code_buffer[r_offset..][0..8], @bitCast(value), .little);
}, },
.ADR_PREL_LO21 => { .ADR_PREL_LO21 => {
const value = math.cast(i21, S + A - P) orelse return error.Overflow; const value = math.cast(i21, S + A - P) orelse return error.Overflow;
util.writeAdrInst(value, code); util.writeAdrInst(value, code);
}, },
.ADR_PREL_PG_HI21 => { .ADR_PREL_PG_HI21 => {
// TODO: check for relaxation of ADRP+ADD // TODO: check for relaxation of ADRP+ADD
util.writeAdrInst(try util.calcNumberOfPages(P, S + A), code); util.writeAdrInst(try util.calcNumberOfPages(P, S + A), code);
}, },
.ADR_GOT_PAGE => if (target.flags.has_got) { .ADR_GOT_PAGE => if (target.flags.has_got) {
util.writeAdrInst(try util.calcNumberOfPages(P, G + GOT + A), code); util.writeAdrInst(try util.calcNumberOfPages(P, G + GOT + A), code);
} else { } else {
@ -1644,18 +1633,15 @@ const aarch64 = struct {
r_offset, r_offset,
}); });
}, },
.LD64_GOT_LO12_NC => { .LD64_GOT_LO12_NC => {
assert(target.flags.has_got); assert(target.flags.has_got);
const taddr = @as(u64, @intCast(G + GOT + A)); const taddr = @as(u64, @intCast(G + GOT + A));
util.writeLoadStoreRegInst(@divExact(@as(u12, @truncate(taddr)), 8), code); util.writeLoadStoreRegInst(@divExact(@as(u12, @truncate(taddr)), 8), code);
}, },
.ADD_ABS_LO12_NC => { .ADD_ABS_LO12_NC => {
const taddr = @as(u64, @intCast(S + A)); const taddr = @as(u64, @intCast(S + A));
util.writeAddImmInst(@truncate(taddr), code); util.writeAddImmInst(@truncate(taddr), code);
}, },
.LDST8_ABS_LO12_NC, .LDST8_ABS_LO12_NC,
.LDST16_ABS_LO12_NC, .LDST16_ABS_LO12_NC,
.LDST32_ABS_LO12_NC, .LDST32_ABS_LO12_NC,
@ -1674,44 +1660,37 @@ const aarch64 = struct {
}; };
util.writeLoadStoreRegInst(off, code); util.writeLoadStoreRegInst(off, code);
}, },
.TLSLE_ADD_TPREL_HI12 => { .TLSLE_ADD_TPREL_HI12 => {
const value = math.cast(i12, (S + A - TP) >> 12) orelse const value = math.cast(i12, (S + A - TP) >> 12) orelse
return error.Overflow; return error.Overflow;
util.writeAddImmInst(@bitCast(value), code); util.writeAddImmInst(@bitCast(value), code);
}, },
.TLSLE_ADD_TPREL_LO12_NC => { .TLSLE_ADD_TPREL_LO12_NC => {
const value: i12 = @truncate(S + A - TP); const value: i12 = @truncate(S + A - TP);
util.writeAddImmInst(@bitCast(value), code); util.writeAddImmInst(@bitCast(value), code);
}, },
.TLSIE_ADR_GOTTPREL_PAGE21 => { .TLSIE_ADR_GOTTPREL_PAGE21 => {
const S_ = target.gotTpAddress(elf_file); const S_ = target.gotTpAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A }); relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code); util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code);
}, },
.TLSIE_LD64_GOTTPREL_LO12_NC => { .TLSIE_LD64_GOTTPREL_LO12_NC => {
const S_ = target.gotTpAddress(elf_file); const S_ = target.gotTpAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A }); relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8); const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
util.writeLoadStoreRegInst(off, code); util.writeLoadStoreRegInst(off, code);
}, },
.TLSGD_ADR_PAGE21 => { .TLSGD_ADR_PAGE21 => {
const S_ = target.tlsGdAddress(elf_file); const S_ = target.tlsGdAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A }); relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code); util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code);
}, },
.TLSGD_ADD_LO12_NC => { .TLSGD_ADD_LO12_NC => {
const S_ = target.tlsGdAddress(elf_file); const S_ = target.tlsGdAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A }); relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const off: u12 = @truncate(@as(u64, @bitCast(S_ + A))); const off: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
util.writeAddImmInst(off, code); util.writeAddImmInst(off, code);
}, },
.TLSDESC_ADR_PAGE21 => { .TLSDESC_ADR_PAGE21 => {
if (target.flags.has_tlsdesc) { if (target.flags.has_tlsdesc) {
const S_ = target.tlsDescAddress(elf_file); const S_ = target.tlsDescAddress(elf_file);
@ -1722,7 +1701,6 @@ const aarch64 = struct {
util.encoding.Instruction.nop().write(code); util.encoding.Instruction.nop().write(code);
} }
}, },
.TLSDESC_LD64_LO12 => { .TLSDESC_LD64_LO12 => {
if (target.flags.has_tlsdesc) { if (target.flags.has_tlsdesc) {
const S_ = target.tlsDescAddress(elf_file); const S_ = target.tlsDescAddress(elf_file);
@ -1734,7 +1712,6 @@ const aarch64 = struct {
util.encoding.Instruction.nop().write(code); util.encoding.Instruction.nop().write(code);
} }
}, },
.TLSDESC_ADD_LO12 => { .TLSDESC_ADD_LO12 => {
if (target.flags.has_tlsdesc) { if (target.flags.has_tlsdesc) {
const S_ = target.tlsDescAddress(elf_file); const S_ = target.tlsDescAddress(elf_file);
@ -1747,13 +1724,11 @@ const aarch64 = struct {
util.encoding.Instruction.movz(.x0, value, .{ .lsl = .@"16" }).write(code); util.encoding.Instruction.movz(.x0, value, .{ .lsl = .@"16" }).write(code);
} }
}, },
.TLSDESC_CALL => if (!target.flags.has_tlsdesc) { .TLSDESC_CALL => if (!target.flags.has_tlsdesc) {
relocs_log.debug(" relaxing br => movk(x0, {x})", .{S + A - TP}); relocs_log.debug(" relaxing br => movk(x0, {x})", .{S + A - TP});
const value: u16 = @bitCast(@as(i16, @truncate(S + A - TP))); const value: u16 = @bitCast(@as(i16, @truncate(S + A - TP)));
util.encoding.Instruction.movk(.x0, value, .{}).write(code); util.encoding.Instruction.movk(.x0, value, .{}).write(code);
}, },
else => try atom.reportUnhandledRelocError(rel, elf_file), else => try atom.reportUnhandledRelocError(rel, elf_file),
} }
} }

View file

@ -29,6 +29,12 @@ pub fn writeBranchImm(disp: i28, code: *[4]u8) void {
inst.write(code); inst.write(code);
} }
pub fn writeCondBrImm(disp: i19, code: *[4]u8) void {
var inst: encoding.Instruction = .read(code);
inst.branch_exception_generating_system.conditional_branch_immediate.group.imm19 = @intCast(@shrExact(disp, 2));
inst.write(code);
}
const assert = std.debug.assert; const assert = std.debug.assert;
const builtin = @import("builtin"); const builtin = @import("builtin");
const math = std.math; const math = std.math;

View file

@ -167,14 +167,7 @@ pub fn main() anyerror!void {
const gpa, const is_debug = gpa: { const gpa, const is_debug = gpa: {
if (build_options.debug_gpa) break :gpa .{ debug_allocator.allocator(), true }; if (build_options.debug_gpa) break :gpa .{ debug_allocator.allocator(), true };
if (native_os == .wasi) break :gpa .{ std.heap.wasm_allocator, false }; if (native_os == .wasi) break :gpa .{ std.heap.wasm_allocator, false };
if (builtin.link_libc) { if (builtin.link_libc) break :gpa .{ std.heap.c_allocator, false };
// We would prefer to use raw libc allocator here, but cannot use
// it if it won't support the alignment we need.
if (@alignOf(std.c.max_align_t) < @max(@alignOf(i128), std.atomic.cache_line)) {
break :gpa .{ std.heap.c_allocator, false };
}
break :gpa .{ std.heap.raw_c_allocator, false };
}
break :gpa switch (builtin.mode) { break :gpa switch (builtin.mode) {
.Debug, .ReleaseSafe => .{ debug_allocator.allocator(), true }, .Debug, .ReleaseSafe => .{ debug_allocator.allocator(), true },
.ReleaseFast, .ReleaseSmall => .{ std.heap.smp_allocator, false }, .ReleaseFast, .ReleaseSmall => .{ std.heap.smp_allocator, false },

View file

@ -595,7 +595,7 @@ const Writer = struct {
}, },
.reify_slice_arg_ty => { .reify_slice_arg_ty => {
const reify_slice_arg_info: Zir.Inst.ReifySliceArgInfo = @enumFromInt(extended.operand); const reify_slice_arg_info: Zir.Inst.ReifySliceArgInfo = @enumFromInt(extended.small);
const extra = self.code.extraData(Zir.Inst.UnNode, extended.operand).data; const extra = self.code.extraData(Zir.Inst.UnNode, extended.operand).data;
try stream.print("{t}, ", .{reify_slice_arg_info}); try stream.print("{t}, ", .{reify_slice_arg_info});
try self.writeInstRef(stream, extra.operand); try self.writeInstRef(stream, extra.operand);

View file

@ -1,5 +1,6 @@
const builtin = @import("builtin"); const builtin = @import("builtin");
const std = @import("std"); const std = @import("std");
const assert = std.debug.assert;
const expect = std.testing.expect; const expect = std.testing.expect;
test "@abs integers" { test "@abs integers" {
@ -48,6 +49,33 @@ fn testAbsIntegers() !void {
} }
} }
test "@abs signed C ABI integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
try testOne(isize, usize);
try testOne(c_short, c_ushort);
try testOne(c_int, c_uint);
try testOne(c_long, c_ulong);
if (!builtin.cpu.arch.isSpirV()) try testOne(c_longlong, c_ulonglong);
}
fn testOne(comptime Signed: type, comptime Unsigned: type) !void {
var negative_one: Signed = undefined;
negative_one = -1;
const one = @abs(negative_one);
comptime assert(@TypeOf(one) == Unsigned);
try expect(one == 1);
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "@abs unsigned integers" { test "@abs unsigned integers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -87,6 +115,32 @@ fn testAbsUnsignedIntegers() !void {
} }
} }
test "@abs unsigned C ABI integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
try testOne(usize);
try testOne(c_ushort);
try testOne(c_uint);
try testOne(c_ulong);
if (!builtin.cpu.arch.isSpirV()) try testOne(c_ulonglong);
}
fn testOne(comptime Unsigned: type) !void {
var one: Unsigned = undefined;
one = 1;
const still_one = @abs(one);
comptime assert(@TypeOf(still_one) == Unsigned);
try expect(still_one == 1);
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "@abs big int <= 128 bits" { test "@abs big int <= 128 bits" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View file

@ -427,3 +427,12 @@ test "undefined type value" {
}; };
comptime assert(@TypeOf(S.undef_type) == type); comptime assert(@TypeOf(S.undef_type) == type);
} }
test "reify struct with zero fields through const arrays" {
const names: [0][]const u8 = .{};
const types: [0]type = .{};
const attrs: [0]std.builtin.Type.StructField.Attributes = .{};
const S = @Struct(.auto, null, &names, &types, &attrs);
comptime assert(@typeInfo(S) == .@"struct");
comptime assert(@typeInfo(S).@"struct".fields.len == 0);
}

View file

@ -444,6 +444,8 @@ pub const CaseTestOptions = struct {
test_target_filters: []const []const u8, test_target_filters: []const []const u8,
skip_compile_errors: bool, skip_compile_errors: bool,
skip_non_native: bool, skip_non_native: bool,
skip_spirv: bool,
skip_wasm: bool,
skip_freebsd: bool, skip_freebsd: bool,
skip_netbsd: bool, skip_netbsd: bool,
skip_windows: bool, skip_windows: bool,
@ -472,6 +474,9 @@ pub fn lowerToBuildSteps(
if (options.skip_non_native and !case.target.query.isNative()) if (options.skip_non_native and !case.target.query.isNative())
continue; continue;
if (options.skip_spirv and case.target.query.cpu_arch != null and case.target.query.cpu_arch.?.isSpirV()) continue;
if (options.skip_wasm and case.target.query.cpu_arch != null and case.target.query.cpu_arch.?.isWasm()) continue;
if (options.skip_freebsd and case.target.query.os_tag == .freebsd) continue; if (options.skip_freebsd and case.target.query.os_tag == .freebsd) continue;
if (options.skip_netbsd and case.target.query.os_tag == .netbsd) continue; if (options.skip_netbsd and case.target.query.os_tag == .netbsd) continue;
if (options.skip_windows and case.target.query.os_tag == .windows) continue; if (options.skip_windows and case.target.query.os_tag == .windows) continue;

View file

@ -10,6 +10,7 @@ pub const Options = struct {
optimize_modes: []const std.builtin.OptimizeMode, optimize_modes: []const std.builtin.OptimizeMode,
test_filters: []const []const u8, test_filters: []const []const u8,
test_target_filters: []const []const u8, test_target_filters: []const []const u8,
skip_wasm: bool,
max_rss: usize, max_rss: usize,
}; };
@ -41,6 +42,8 @@ pub fn addLibcTestCase(
} }
pub fn addTarget(libc: *const Libc, target: std.Build.ResolvedTarget) void { pub fn addTarget(libc: *const Libc, target: std.Build.ResolvedTarget) void {
if (libc.options.skip_wasm and target.query.cpu_arch != null and target.query.cpu_arch.?.isWasm()) return;
if (libc.options.test_target_filters.len > 0) { if (libc.options.test_target_filters.len > 0) {
const triple_txt = target.query.zigTriple(libc.b.allocator) catch @panic("OOM"); const triple_txt = target.query.zigTriple(libc.b.allocator) catch @panic("OOM");
for (libc.options.test_target_filters) |filter| { for (libc.options.test_target_filters) |filter| {

View file

@ -2246,6 +2246,8 @@ const ModuleTestOptions = struct {
test_default_only: bool, test_default_only: bool,
skip_single_threaded: bool, skip_single_threaded: bool,
skip_non_native: bool, skip_non_native: bool,
skip_spirv: bool,
skip_wasm: bool,
skip_freebsd: bool, skip_freebsd: bool,
skip_netbsd: bool, skip_netbsd: bool,
skip_windows: bool, skip_windows: bool,
@ -2281,6 +2283,9 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step {
if (options.skip_non_native and !test_target.target.isNative()) if (options.skip_non_native and !test_target.target.isNative())
continue; continue;
if (options.skip_spirv and test_target.target.cpu_arch != null and test_target.target.cpu_arch.?.isSpirV()) continue;
if (options.skip_wasm and test_target.target.cpu_arch != null and test_target.target.cpu_arch.?.isWasm()) continue;
if (options.skip_freebsd and test_target.target.os_tag == .freebsd) continue; if (options.skip_freebsd and test_target.target.os_tag == .freebsd) continue;
if (options.skip_netbsd and test_target.target.os_tag == .netbsd) continue; if (options.skip_netbsd and test_target.target.os_tag == .netbsd) continue;
if (options.skip_windows and test_target.target.os_tag == .windows) continue; if (options.skip_windows and test_target.target.os_tag == .windows) continue;
@ -2339,8 +2344,11 @@ fn addOneModuleTest(
const libc_suffix = if (test_target.link_libc == true) "-libc" else ""; const libc_suffix = if (test_target.link_libc == true) "-libc" else "";
const model_txt = target.cpu.model.name; const model_txt = target.cpu.model.name;
// wasm32-wasi builds need more RAM, idk why // These emulated targets need a lot more RAM for unknown reasons.
const max_rss = if (target.os.tag == .wasi) const max_rss = if (mem.eql(u8, options.name, "std") and
(target.cpu.arch == .hexagon or
(target.cpu.arch.isRISCV() and !resolved_target.query.isNative()) or
target.cpu.arch.isWasm()))
options.max_rss * 2 options.max_rss * 2
else else
options.max_rss; options.max_rss;
@ -2519,6 +2527,7 @@ pub fn wouldUseLlvm(use_llvm: ?bool, query: std.Target.Query, optimize_mode: Opt
const CAbiTestOptions = struct { const CAbiTestOptions = struct {
test_target_filters: []const []const u8, test_target_filters: []const []const u8,
skip_non_native: bool, skip_non_native: bool,
skip_wasm: bool,
skip_freebsd: bool, skip_freebsd: bool,
skip_netbsd: bool, skip_netbsd: bool,
skip_windows: bool, skip_windows: bool,
@ -2526,6 +2535,7 @@ const CAbiTestOptions = struct {
skip_linux: bool, skip_linux: bool,
skip_llvm: bool, skip_llvm: bool,
skip_release: bool, skip_release: bool,
max_rss: usize = 0,
}; };
pub fn addCAbiTests(b: *std.Build, options: CAbiTestOptions) *Step { pub fn addCAbiTests(b: *std.Build, options: CAbiTestOptions) *Step {
@ -2538,6 +2548,9 @@ pub fn addCAbiTests(b: *std.Build, options: CAbiTestOptions) *Step {
for (c_abi_targets) |c_abi_target| { for (c_abi_targets) |c_abi_target| {
if (options.skip_non_native and !c_abi_target.target.isNative()) continue; if (options.skip_non_native and !c_abi_target.target.isNative()) continue;
if (options.skip_wasm and c_abi_target.target.cpu_arch != null and c_abi_target.target.cpu_arch.?.isWasm()) continue;
if (options.skip_freebsd and c_abi_target.target.os_tag == .freebsd) continue; if (options.skip_freebsd and c_abi_target.target.os_tag == .freebsd) continue;
if (options.skip_netbsd and c_abi_target.target.os_tag == .netbsd) continue; if (options.skip_netbsd and c_abi_target.target.os_tag == .netbsd) continue;
if (options.skip_windows and c_abi_target.target.os_tag == .windows) continue; if (options.skip_windows and c_abi_target.target.os_tag == .windows) continue;
@ -2595,6 +2608,7 @@ pub fn addCAbiTests(b: *std.Build, options: CAbiTestOptions) *Step {
.root_module = test_mod, .root_module = test_mod,
.use_llvm = c_abi_target.use_llvm, .use_llvm = c_abi_target.use_llvm,
.use_lld = c_abi_target.use_lld, .use_lld = c_abi_target.use_lld,
.max_rss = options.max_rss,
}); });
// This test is intentionally trying to check if the external ABI is // This test is intentionally trying to check if the external ABI is

View file

@ -97,7 +97,8 @@ width=32 poly=0xa833982b init=0xffffffff refin=true refout=true xorout=0xff
width=32 poly=0x04c11db7 init=0xffffffff refin=false refout=false xorout=0xffffffff check=0xfc891918 residue=0xc704dd7b name="CRC-32/BZIP2" width=32 poly=0x04c11db7 init=0xffffffff refin=false refout=false xorout=0xffffffff check=0xfc891918 residue=0xc704dd7b name="CRC-32/BZIP2"
width=32 poly=0x8001801b init=0x00000000 refin=true refout=true xorout=0x00000000 check=0x6ec2edc4 residue=0x00000000 name="CRC-32/CD-ROM-EDC" width=32 poly=0x8001801b init=0x00000000 refin=true refout=true xorout=0x00000000 check=0x6ec2edc4 residue=0x00000000 name="CRC-32/CD-ROM-EDC"
width=32 poly=0x04c11db7 init=0x00000000 refin=false refout=false xorout=0xffffffff check=0x765e7680 residue=0xc704dd7b name="CRC-32/CKSUM" width=32 poly=0x04c11db7 init=0x00000000 refin=false refout=false xorout=0xffffffff check=0x765e7680 residue=0xc704dd7b name="CRC-32/CKSUM"
width=32 poly=0x1edc6f41 init=0xffffffff refin=true refout=true xorout=0xffffffff check=0xe3069283 residue=0xb798b438 name="CRC-32/ISCSI" # CRC-32C implementation is defined manually, since it has an accelerated variant.
# width=32 poly=0x1edc6f41 init=0xffffffff refin=true refout=true xorout=0xffffffff check=0xe3069283 residue=0xb798b438 name="CRC-32/ISCSI"
width=32 poly=0x04c11db7 init=0xffffffff refin=true refout=true xorout=0xffffffff check=0xcbf43926 residue=0xdebb20e3 name="CRC-32/ISO-HDLC" width=32 poly=0x04c11db7 init=0xffffffff refin=true refout=true xorout=0xffffffff check=0xcbf43926 residue=0xdebb20e3 name="CRC-32/ISO-HDLC"
width=32 poly=0x04c11db7 init=0xffffffff refin=true refout=true xorout=0x00000000 check=0x340bc6d9 residue=0x00000000 name="CRC-32/JAMCRC" width=32 poly=0x04c11db7 init=0xffffffff refin=true refout=true xorout=0x00000000 check=0x340bc6d9 residue=0x00000000 name="CRC-32/JAMCRC"
width=32 poly=0x741b8cd7 init=0xffffffff refin=true refout=true xorout=0xffffffff check=0x2d3dd0ae residue=0x00000000 name="CRC-32/KOOPMAN" width=32 poly=0x741b8cd7 init=0xffffffff refin=true refout=true xorout=0xffffffff check=0x2d3dd0ae residue=0x00000000 name="CRC-32/KOOPMAN"

View file

@ -36,6 +36,7 @@ pub fn main() anyerror!void {
try code_writer.writeAll( try code_writer.writeAll(
\\//! This file is auto-generated by tools/update_crc_catalog.zig. \\//! This file is auto-generated by tools/update_crc_catalog.zig.
\\ \\
\\const builtin = @import("builtin");
\\const impl = @import("crc/impl.zig"); \\const impl = @import("crc/impl.zig");
\\ \\
\\pub const Crc = impl.Crc; \\pub const Crc = impl.Crc;
@ -49,6 +50,17 @@ pub fn main() anyerror!void {
\\ _ = @import("crc/test.zig"); \\ _ = @import("crc/test.zig");
\\} \\}
\\ \\
\\pub const Crc32Iscsi = switch (builtin.cpu.has(.x86, .crc32)) {
\\ true => @import("crc/crc32c.zig").Wrapper,
\\ else => Crc(u32, .{
\\ .polynomial = 0x1edc6f41,
\\ .initial = 0xffffffff,
\\ .reflect_input = true,
\\ .reflect_output = true,
\\ .xor_output = 0xffffffff,
\\ }),
\\};
\\
); );
var zig_test_file = try crc_target_dir.createFile("test.zig", .{}); var zig_test_file = try crc_target_dir.createFile("test.zig", .{});
@ -80,12 +92,23 @@ pub fn main() anyerror!void {
\\} \\}
\\ \\
\\test "crc32 koopman regression" { \\test "crc32 koopman regression" {
\\ const crc32 = crc.Koopman; \\ const crc32 = crc.Crc32Koopman;
\\ try testing.expectEqual(crc32.hash(""), 0x00000000); \\ try testing.expectEqual(crc32.hash(""), 0x00000000);
\\ try testing.expectEqual(crc32.hash("a"), 0x0da2aa8a); \\ try testing.expectEqual(crc32.hash("a"), 0x0da2aa8a);
\\ try testing.expectEqual(crc32.hash("abc"), 0xba2322ac); \\ try testing.expectEqual(crc32.hash("abc"), 0xba2322ac);
\\} \\}
\\ \\
\\test "CRC-32/ISCSI" {
\\ const Crc32Iscsi = crc.Crc32Iscsi;
\\
\\ try testing.expectEqual(@as(u32, 0xe3069283), Crc32Iscsi.hash("123456789"));
\\
\\ var c = Crc32Iscsi.init();
\\ c.update("1234");
\\ c.update("56789");
\\ try testing.expectEqual(@as(u32, 0xe3069283), c.final());
\\}
\\
); );
var reader: std.Io.Reader = .fixed(catalog_txt); var reader: std.Io.Reader = .fixed(catalog_txt);

View file

@ -1,6 +1,6 @@
//! This script updates the .c, .h, .s, and .S files that make up the start //! This script updates the .c, .h, .s, and .S files that make up the start
//! files such as crt1.o. Not to be confused with //! files such as crt1.o. Not to be confused with
//! https://github.com/ziglang/glibc-abi-tool/ which updates the `abilists` //! https://codeberg.org/ziglang/libc-abi-tools which updates the `abilists`
//! file. //! file.
//! //!
//! Example usage: //! Example usage: