From c993af62347faf124e97b16801356d34322a216e Mon Sep 17 00:00:00 2001
From: flexicoding <97116715+flexicoding@users.noreply.github.com>
Date: Sat, 18 Feb 2023 18:07:35 +0100
Subject: [PATCH 01/36] compiler_rt: remove unexported functions
---
lib/compiler_rt/README.md | 4 ++++
lib/compiler_rt/gehf2.zig | 8 --------
2 files changed, 4 insertions(+), 8 deletions(-)
diff --git a/lib/compiler_rt/README.md b/lib/compiler_rt/README.md
index a5d6bfdd6b..0590c33fde 100644
--- a/lib/compiler_rt/README.md
+++ b/lib/compiler_rt/README.md
@@ -260,6 +260,8 @@ Integer and Float Operations
| ✓ | __gedf2 | f64 | f64 | i32 | .. |
| ✓ | __getf2 | f128 | f128 | i32 | .. |
| ✓ | __gexf2 | f80 | f80 | i32 | .. |
+| ✓ | __aeabi_fcmpge | f32 | f32 | i32 | .. ARM |
+| ✓ | __aeabi_dcmpge | f64 | f64 | i32 | .. ARM |
| ✓ | __gekf2 | f128 | f128 | i32 | .. PPC |
| ✓ | _Qp_fge |*f128 |*f128 | bool | .. SPARC |
| ✓ | __lthf2 | f16 | f16 | i32 | `(a!=Nan) and (b!=Nan) and (a output<0` |
@@ -285,6 +287,8 @@ Integer and Float Operations
| ✓ | __gtdf2 | f64 | f64 | i32 | .. |
| ✓ | __gttf2 | f128 | f128 | i32 | .. |
| ✓ | __gtxf2 | f80 | f80 | i32 | .. |
+| ✓ | __aeabi_fcmpgt | f32 | f32 | i32 | .. ARM |
+| ✓ | __aeabi_dcmpgt | f64 | f64 | i32 | .. ARM |
| ✓ | __gtkf2 | f128 | f128 | i32 | .. PPC |
| ✓ | _Qp_fgt |*f128 |*f128 | bool | .. SPARC |
| | | | | | **Float Arithmetic** |
diff --git a/lib/compiler_rt/gehf2.zig b/lib/compiler_rt/gehf2.zig
index e3df4d3087..6bea4e164a 100644
--- a/lib/compiler_rt/gehf2.zig
+++ b/lib/compiler_rt/gehf2.zig
@@ -21,11 +21,3 @@ pub fn __gehf2(a: f16, b: f16) callconv(.C) i32 {
pub fn __gthf2(a: f16, b: f16) callconv(.C) i32 {
return __gehf2(a, b);
}
-
-fn __aeabi_fcmpge(a: f16, b: f16) callconv(.AAPCS) i32 {
- return @boolToInt(comparef.cmpf2(f16, comparef.GE, a, b) != .Less);
-}
-
-fn __aeabi_fcmpgt(a: f16, b: f16) callconv(.AAPCS) i32 {
- return @boolToInt(comparef.cmpf2(f16, comparef.LE, a, b) == .Greater);
-}
From 07630eb696a4c7097fadf9e0261411d591a82038 Mon Sep 17 00:00:00 2001
From: Matt Knight
Date: Sat, 18 Feb 2023 14:10:27 -0500
Subject: [PATCH 02/36] Value: implement writeToMemory for packed unions
---
src/value.zig | 19 +++++++++++++++++++
test/behavior/comptime_memory.zig | 18 ++++++++++++++++++
2 files changed, 37 insertions(+)
diff --git a/src/value.zig b/src/value.zig
index 306e31c0a7..98842a4ca7 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1340,6 +1340,14 @@ pub const Value = extern union {
const int = mod.global_error_set.get(val.castTag(.@"error").?.data.name).?;
std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian);
},
+ .Union => switch (ty.containerLayout()) {
+ .Auto => unreachable,
+ .Extern => @panic("TODO implement writeToMemory for extern unions"),
+ .Packed => {
+ const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
+ writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
+ },
+ },
else => @panic("TODO implement writeToMemory for more types"),
}
}
@@ -1430,6 +1438,17 @@ pub const Value = extern union {
}
},
},
+ .Union => switch (ty.containerLayout()) {
+ .Auto => unreachable, // Sema is supposed to have emitted a compile error already
+ .Extern => unreachable, // Handled in non-packed writeToMemory
+ .Packed => {
+ const field_index = ty.unionTagFieldIndex(val.unionTag(), mod);
+ const field_type = ty.unionFields().values()[field_index.?].ty;
+ const field_val = val.fieldValue(field_type, field_index.?);
+
+ field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset);
+ },
+ },
else => @panic("TODO implement writeToPackedMemory for more types"),
}
}
diff --git a/test/behavior/comptime_memory.zig b/test/behavior/comptime_memory.zig
index e08dad68f0..a4f9f2f7a9 100644
--- a/test/behavior/comptime_memory.zig
+++ b/test/behavior/comptime_memory.zig
@@ -394,3 +394,21 @@ test "accessing reinterpreted memory of parent object" {
try testing.expect(b == expected);
}
}
+
+test "bitcast packed union to integer" {
+ const U = packed union {
+ x: u1,
+ y: u2,
+ };
+
+ comptime {
+ const a = U{ .x = 1 };
+ const b = U{ .y = 2 };
+ const cast_a = @bitCast(u2, a);
+ const cast_b = @bitCast(u2, b);
+
+ // truncated because the upper bit is garbage memory that we don't care about
+ try testing.expectEqual(@as(u1, 1), @truncate(u1, cast_a));
+ try testing.expectEqual(@as(u2, 2), cast_b);
+ }
+}
From a250af5a51fa3f7af6b7f13a9613c79e6cd94fc3 Mon Sep 17 00:00:00 2001
From: Edoardo Vacchi
Date: Thu, 16 Feb 2023 18:16:31 +0100
Subject: [PATCH 03/36] wasi: add Preopens.findDir, update tests to preopen
`/tmp'
Signed-off-by: Edoardo Vacchi
---
lib/std/Build/CompileStep.zig | 4 +-
lib/std/fs/test.zig | 5 ++
lib/std/fs/wasi.zig | 108 ++++++++++++++++++++++++++++++++++
3 files changed, 116 insertions(+), 1 deletion(-)
diff --git a/lib/std/Build/CompileStep.zig b/lib/std/Build/CompileStep.zig
index 1f145f8171..eeb7da1596 100644
--- a/lib/std/Build/CompileStep.zig
+++ b/lib/std/Build/CompileStep.zig
@@ -1555,7 +1555,9 @@ fn make(step: *Step) !void {
try zig_args.append("--test-cmd");
try zig_args.append(bin_name);
try zig_args.append("--test-cmd");
- try zig_args.append("--dir=.");
+ try zig_args.append("--mapdir=/::.");
+ try zig_args.append("--test-cmd");
+ try zig_args.append("--mapdir=/tmp::/tmp");
try zig_args.append("--test-cmd-bin");
} else {
try zig_args.append("--test-no-exec");
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index 16458d7dc4..957fe4902f 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -13,6 +13,11 @@ const File = std.fs.File;
const tmpDir = testing.tmpDir;
const tmpIterableDir = testing.tmpIterableDir;
+// ensure tests for fs/wasi.zig are run
+comptime {
+ _ = std.fs.wasi;
+}
+
test "Dir.readLink" {
var tmp = tmpDir(.{});
defer tmp.cleanup();
diff --git a/lib/std/fs/wasi.zig b/lib/std/fs/wasi.zig
index fa9de0dff1..a8b9fae2e9 100644
--- a/lib/std/fs/wasi.zig
+++ b/lib/std/fs/wasi.zig
@@ -9,6 +9,7 @@ const Allocator = mem.Allocator;
const wasi = std.os.wasi;
const fd_t = wasi.fd_t;
const prestat_t = wasi.prestat_t;
+const testing = std.testing;
pub const Preopens = struct {
// Indexed by file descriptor number.
@@ -22,6 +23,30 @@ pub const Preopens = struct {
}
return null;
}
+
+ pub fn findDir(p: Preopens, full_path: []const u8, flags: std.fs.Dir.OpenDirOptions) std.fs.Dir.OpenError!std.fs.Dir {
+ if (p.names.len <= 2)
+ return std.fs.Dir.OpenError.BadPathName; // there are no preopens
+
+ var prefix: []const u8 = "";
+ var fd: usize = 0;
+ for (p.names) |preopen, i| {
+ if (i > 2 and wasiPathPrefixMatches(preopen, full_path)) {
+ if (preopen.len > prefix.len) {
+ prefix = preopen;
+ fd = i;
+ }
+ }
+ }
+
+ // still no match
+ if (fd == 0) {
+ return std.fs.Dir.OpenError.FileNotFound;
+ }
+ const d = std.fs.Dir{ .fd = @intCast(os.fd_t, fd) };
+ const rel = full_path[prefix.len + 1 .. full_path.len];
+ return d.openDirWasi(rel, flags);
+ }
};
pub fn preopensAlloc(gpa: Allocator) Allocator.Error!Preopens {
@@ -54,3 +79,86 @@ pub fn preopensAlloc(gpa: Allocator) Allocator.Error!Preopens {
names.appendAssumeCapacity(name);
}
}
+
+fn wasiPathPrefixMatches(prefix: []const u8, path: []const u8) bool {
+ if (path[0] != '/' and prefix.len == 0)
+ return true;
+
+ if (path.len < prefix.len)
+ return false;
+
+ if (prefix.len == 1) {
+ return prefix[0] == path[0];
+ }
+
+ if (!std.mem.eql(u8, path[0..prefix.len], prefix)) {
+ return false;
+ }
+
+ return path.len == prefix.len or
+ path[prefix.len] == '/';
+}
+
+test "preopens" {
+ if (builtin.os.tag != .wasi) return error.SkipZigTest;
+
+ // lifted from `testing`
+ const random_bytes_count = 12;
+ const buf_size = 256;
+ const path = "/tmp";
+ const tmp_file_name = "file.txt";
+ const nonsense = "nonsense";
+
+ var random_bytes: [random_bytes_count]u8 = undefined;
+ var buf: [buf_size]u8 = undefined;
+
+ std.crypto.random.bytes(&random_bytes);
+ const sub_path = std.fs.base64_encoder.encode(&buf, &random_bytes);
+
+ // find all preopens
+ const allocator = std.heap.page_allocator;
+ var wasi_preopens = try std.fs.wasi.preopensAlloc(allocator);
+
+ // look for the exact "/tmp" preopen match
+ const fd = std.fs.wasi.Preopens.find(wasi_preopens, path) orelse unreachable;
+ const base_dir = std.fs.Dir{ .fd = fd };
+
+ var tmp_path = base_dir.makeOpenPath(sub_path, .{}) catch
+ @panic("unable to make tmp dir for testing: /tmp/");
+
+ defer tmp_path.close();
+ defer tmp_path.deleteTree(sub_path) catch {};
+
+ // create a file under /tmp//file.txt with contents "nonsense"
+ try tmp_path.writeFile(tmp_file_name, nonsense);
+
+ // now look for the file as a single path
+ var tmp_dir_path_buf: [buf_size]u8 = undefined;
+ const tmp_dir_path = try std.fmt.bufPrint(&tmp_dir_path_buf, "{s}/{s}", .{ path, sub_path });
+
+ // find "/tmp/" using `findDir()`
+ const tmp_file_dir = try wasi_preopens.findDir(tmp_dir_path, .{});
+
+ const text = try tmp_file_dir.readFile(tmp_file_name, &buf);
+
+ // ensure the file contents match "nonsense"
+ try testing.expect(std.mem.eql(u8, nonsense, text));
+}
+
+test "wasiPathPrefixMatches" {
+ try testing.expect(wasiPathPrefixMatches("/", "/foo"));
+ try testing.expect(wasiPathPrefixMatches("/testcases", "/testcases/test.txt"));
+ try testing.expect(wasiPathPrefixMatches("", "foo"));
+ try testing.expect(wasiPathPrefixMatches("foo", "foo"));
+ try testing.expect(wasiPathPrefixMatches("foo", "foo/bar"));
+ try testing.expect(!wasiPathPrefixMatches("bar", "foo/bar"));
+ try testing.expect(!wasiPathPrefixMatches("bar", "foo"));
+ try testing.expect(wasiPathPrefixMatches("foo", "foo/bar"));
+ try testing.expect(!wasiPathPrefixMatches("fooo", "foo"));
+ try testing.expect(!wasiPathPrefixMatches("foo", "fooo"));
+ try testing.expect(!wasiPathPrefixMatches("foo/bar", "foo"));
+ try testing.expect(!wasiPathPrefixMatches("bar/foo", "foo"));
+ try testing.expect(wasiPathPrefixMatches("/foo", "/foo"));
+ try testing.expect(wasiPathPrefixMatches("/foo", "/foo"));
+ try testing.expect(wasiPathPrefixMatches("/foo", "/foo/"));
+}
From 4940afc434170966a9445b789374ba2a0f0318ab Mon Sep 17 00:00:00 2001
From: Edoardo Vacchi
Date: Sat, 18 Feb 2023 22:29:11 +0100
Subject: [PATCH 04/36] skip when builtin.link_libc
---
lib/std/fs/wasi.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/std/fs/wasi.zig b/lib/std/fs/wasi.zig
index a8b9fae2e9..d62b4137d6 100644
--- a/lib/std/fs/wasi.zig
+++ b/lib/std/fs/wasi.zig
@@ -100,7 +100,7 @@ fn wasiPathPrefixMatches(prefix: []const u8, path: []const u8) bool {
}
test "preopens" {
- if (builtin.os.tag != .wasi) return error.SkipZigTest;
+ if (builtin.os.tag != .wasi or builtin.link_libc) return error.SkipZigTest;
// lifted from `testing`
const random_bytes_count = 12;
From 5e7b09ce9fbc95ec9fb9e277d262b9b5a5aa1917 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 11:31:31 -0700
Subject: [PATCH 05/36] std.Build.RunStep: fix default caching logic
RunStep is supposed to auto-detect whether the intend is for
side-effects or for producing an output file. The auto-detection logic
was incorrect, and this commit fixes it.
I tested this manually locally. Automated testing will require a more
significant investment in the test harness, which I will work on in a
future enhancement.
closes #14666
---
lib/std/Build/RunStep.zig | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/lib/std/Build/RunStep.zig b/lib/std/Build/RunStep.zig
index 5bc271409a..d3f48e4e87 100644
--- a/lib/std/Build/RunStep.zig
+++ b/lib/std/Build/RunStep.zig
@@ -188,6 +188,10 @@ fn stdIoActionToBehavior(action: StdIoAction) std.ChildProcess.StdIo {
}
fn needOutputCheck(self: RunStep) bool {
+ switch (self.condition) {
+ .always => return false,
+ .output_outdated => {},
+ }
if (self.extra_file_dependencies.len > 0) return true;
for (self.argv.items) |arg| switch (arg) {
@@ -195,10 +199,7 @@ fn needOutputCheck(self: RunStep) bool {
else => continue,
};
- return switch (self.condition) {
- .always => false,
- .output_outdated => true,
- };
+ return false;
}
fn make(step: *Step) !void {
From 1b7055b514955f1787937b2ef6097d2e0663da74 Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Mon, 30 Jan 2023 19:10:03 +0200
Subject: [PATCH 06/36] parse and render new for loop syntax
---
lib/std/zig/Ast.zig | 117 +++++++++++++++++++---
lib/std/zig/Parse.zig | 193 +++++++++++++++++++++++++-----------
lib/std/zig/parser_test.zig | 49 +++++++--
lib/std/zig/render.zig | 156 +++++++++++++++++++++++++----
4 files changed, 410 insertions(+), 105 deletions(-)
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index 94cdcff4e7..42eb280966 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -386,6 +386,12 @@ pub fn renderError(tree: Ast, parse_error: Error, stream: anytype) !void {
.expected_comma_after_switch_prong => {
return stream.writeAll("expected ',' after switch prong");
},
+ .expected_comma_after_for_operand => {
+ return stream.writeAll("expected ',' after for operand");
+ },
+ .expected_comma_after_capture => {
+ return stream.writeAll("expected ',' after for capture");
+ },
.expected_initializer => {
return stream.writeAll("expected field initializer");
},
@@ -420,6 +426,12 @@ pub fn renderError(tree: Ast, parse_error: Error, stream: anytype) !void {
.var_const_decl => {
return stream.writeAll("use 'var' or 'const' to declare variable");
},
+ .extra_for_capture => {
+ return stream.writeAll("excess for captures");
+ },
+ .for_input_not_captured => {
+ return stream.writeAll("for input is not captured");
+ },
.expected_token => {
const found_tag = token_tags[parse_error.token + @boolToInt(parse_error.token_is_prev)];
@@ -568,6 +580,7 @@ pub fn firstToken(tree: Ast, node: Node.Index) TokenIndex {
.call,
.call_comma,
.switch_range,
+ .for_range,
.error_union,
=> n = datas[n].lhs,
@@ -845,6 +858,12 @@ pub fn lastToken(tree: Ast, node: Node.Index) TokenIndex {
.switch_range,
=> n = datas[n].rhs,
+ .for_range => if (datas[n].rhs != 0) {
+ n = datas[n].rhs;
+ } else {
+ return main_tokens[n] + end_offset;
+ },
+
.field_access,
.unwrap_optional,
.grouped_expression,
@@ -1263,11 +1282,15 @@ pub fn lastToken(tree: Ast, node: Node.Index) TokenIndex {
assert(extra.else_expr != 0);
n = extra.else_expr;
},
- .@"if", .@"for" => {
+ .@"if" => {
const extra = tree.extraData(datas[n].rhs, Node.If);
assert(extra.else_expr != 0);
n = extra.else_expr;
},
+ .@"for" => {
+ const extra = @bitCast(Node.For, datas[n].rhs);
+ n = tree.extra_data[datas[n].lhs + extra.inputs + @boolToInt(extra.has_else)];
+ },
.@"suspend" => {
if (datas[n].lhs != 0) {
n = datas[n].lhs;
@@ -1916,26 +1939,28 @@ pub fn whileFull(tree: Ast, node: Node.Index) full.While {
});
}
-pub fn forSimple(tree: Ast, node: Node.Index) full.While {
- const data = tree.nodes.items(.data)[node];
- return tree.fullWhileComponents(.{
- .while_token = tree.nodes.items(.main_token)[node],
- .cond_expr = data.lhs,
- .cont_expr = 0,
+pub fn forSimple(tree: Ast, node: Node.Index) full.For {
+ const data = &tree.nodes.items(.data)[node];
+ const inputs: *[1]Node.Index = &data.lhs;
+ return tree.fullForComponents(.{
+ .for_token = tree.nodes.items(.main_token)[node],
+ .inputs = inputs[0..1],
.then_expr = data.rhs,
.else_expr = 0,
});
}
-pub fn forFull(tree: Ast, node: Node.Index) full.While {
+pub fn forFull(tree: Ast, node: Node.Index) full.For {
const data = tree.nodes.items(.data)[node];
- const extra = tree.extraData(data.rhs, Node.If);
- return tree.fullWhileComponents(.{
- .while_token = tree.nodes.items(.main_token)[node],
- .cond_expr = data.lhs,
- .cont_expr = 0,
- .then_expr = extra.then_expr,
- .else_expr = extra.else_expr,
+ const extra = @bitCast(Node.For, data.rhs);
+ const inputs = tree.extra_data[data.lhs..][0..extra.inputs];
+ const then_expr = tree.extra_data[data.lhs + extra.inputs];
+ const else_expr = if (extra.has_else) tree.extra_data[data.lhs + extra.inputs + 1] else 0;
+ return tree.fullForComponents(.{
+ .for_token = tree.nodes.items(.main_token)[node],
+ .inputs = inputs,
+ .then_expr = then_expr,
+ .else_expr = else_expr,
});
}
@@ -2243,6 +2268,33 @@ fn fullWhileComponents(tree: Ast, info: full.While.Components) full.While {
return result;
}
+fn fullForComponents(tree: Ast, info: full.For.Components) full.For {
+ const token_tags = tree.tokens.items(.tag);
+ var result: full.For = .{
+ .ast = info,
+ .inline_token = null,
+ .label_token = null,
+ .payload_token = undefined,
+ .else_token = undefined,
+ };
+ var tok_i = info.for_token - 1;
+ if (token_tags[tok_i] == .keyword_inline) {
+ result.inline_token = tok_i;
+ tok_i -= 1;
+ }
+ if (token_tags[tok_i] == .colon and
+ token_tags[tok_i - 1] == .identifier)
+ {
+ result.label_token = tok_i - 1;
+ }
+ const last_cond_token = tree.lastToken(info.inputs[info.inputs.len - 1]);
+ result.payload_token = last_cond_token + 3 + @boolToInt(token_tags[last_cond_token + 1] == .comma);
+ if (info.else_expr != 0) {
+ result.else_token = tree.lastToken(info.then_expr) + 1;
+ }
+ return result;
+}
+
fn fullCallComponents(tree: Ast, info: full.Call.Components) full.Call {
const token_tags = tree.tokens.items(.tag);
var result: full.Call = .{
@@ -2279,6 +2331,12 @@ pub fn fullWhile(tree: Ast, node: Node.Index) ?full.While {
.while_simple => tree.whileSimple(node),
.while_cont => tree.whileCont(node),
.@"while" => tree.whileFull(node),
+ else => null,
+ };
+}
+
+pub fn fullFor(tree: Ast, node: Node.Index) ?full.For {
+ return switch (tree.nodes.items(.tag)[node]) {
.for_simple => tree.forSimple(node),
.@"for" => tree.forFull(node),
else => null,
@@ -2453,6 +2511,22 @@ pub const full = struct {
};
};
+ pub const For = struct {
+ ast: Components,
+ inline_token: ?TokenIndex,
+ label_token: ?TokenIndex,
+ payload_token: TokenIndex,
+ /// Populated only if else_expr != 0.
+ else_token: TokenIndex,
+
+ pub const Components = struct {
+ for_token: TokenIndex,
+ inputs: []const Node.Index,
+ then_expr: Node.Index,
+ else_expr: Node.Index,
+ };
+ };
+
pub const ContainerField = struct {
comptime_token: ?TokenIndex,
ast: Components,
@@ -2795,6 +2869,8 @@ pub const Error = struct {
expected_comma_after_param,
expected_comma_after_initializer,
expected_comma_after_switch_prong,
+ expected_comma_after_for_operand,
+ expected_comma_after_capture,
expected_initializer,
mismatched_binary_op_whitespace,
invalid_ampersand_ampersand,
@@ -2802,6 +2878,8 @@ pub const Error = struct {
expected_var_const,
wrong_equal_var_decl,
var_const_decl,
+ extra_for_capture,
+ for_input_not_captured,
zig_style_container,
previous_field,
@@ -3112,8 +3190,10 @@ pub const Node = struct {
@"while",
/// `for (lhs) rhs`.
for_simple,
- /// `for (lhs) a else b`. `if_list[rhs]`.
+ /// `for (lhs[0..inputs]) lhs[inputs + 1] else lhs[inputs + 2]`. `For[rhs]`.
@"for",
+ /// `lhs..rhs`.
+ for_range,
/// `if (lhs) rhs`.
/// `if (lhs) |a| rhs`.
if_simple,
@@ -3369,6 +3449,11 @@ pub const Node = struct {
then_expr: Index,
};
+ pub const For = packed struct(u32) {
+ inputs: u31,
+ has_else: bool,
+ };
+
pub const FnProtoOne = struct {
/// Populated if there is exactly 1 parameter. Otherwise there are 0 parameters.
param: Index,
diff --git a/lib/std/zig/Parse.zig b/lib/std/zig/Parse.zig
index d498366b34..7ef884d22c 100644
--- a/lib/std/zig/Parse.zig
+++ b/lib/std/zig/Parse.zig
@@ -104,6 +104,8 @@ fn warnMsg(p: *Parse, msg: Ast.Error) error{OutOfMemory}!void {
.expected_comma_after_param,
.expected_comma_after_initializer,
.expected_comma_after_switch_prong,
+ .expected_comma_after_for_operand,
+ .expected_comma_after_capture,
.expected_semi_or_else,
.expected_semi_or_lbrace,
.expected_token,
@@ -1149,22 +1151,18 @@ fn parseLoopStatement(p: *Parse) !Node.Index {
return p.fail(.expected_inlinable);
}
-/// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
-///
/// ForStatement
/// <- ForPrefix BlockExpr ( KEYWORD_else Statement )?
/// / ForPrefix AssignExpr ( SEMICOLON / KEYWORD_else Statement )
fn parseForStatement(p: *Parse) !Node.Index {
const for_token = p.eatToken(.keyword_for) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const array_expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- const found_payload = try p.parsePtrIndexPayload();
- if (found_payload == 0) try p.warn(.expected_loop_payload);
- // TODO propose to change the syntax so that semicolons are always required
- // inside while statements, even if there is an `else`.
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ const inputs = try p.forPrefix();
+
var else_required = false;
+ var seen_semicolon = false;
const then_expr = blk: {
const block_expr = try p.parseBlockExpr();
if (block_expr != 0) break :blk block_expr;
@@ -1173,39 +1171,40 @@ fn parseForStatement(p: *Parse) !Node.Index {
return p.fail(.expected_block_or_assignment);
}
if (p.eatToken(.semicolon)) |_| {
- return p.addNode(.{
- .tag = .for_simple,
- .main_token = for_token,
- .data = .{
- .lhs = array_expr,
- .rhs = assign_expr,
- },
- });
+ seen_semicolon = true;
+ break :blk assign_expr;
}
else_required = true;
break :blk assign_expr;
};
- _ = p.eatToken(.keyword_else) orelse {
- if (else_required) {
- try p.warn(.expected_semi_or_else);
- }
+ var has_else = false;
+ if (!seen_semicolon and p.eatToken(.keyword_else) != null) {
+ try p.scratch.append(p.gpa, then_expr);
+ const else_stmt = try p.expectStatement(false);
+ try p.scratch.append(p.gpa, else_stmt);
+ has_else = true;
+ } else if (inputs == 1) {
+ if (else_required) try p.warn(.expected_semi_or_else);
return p.addNode(.{
.tag = .for_simple,
.main_token = for_token,
.data = .{
- .lhs = array_expr,
+ .lhs = p.scratch.items[scratch_top],
.rhs = then_expr,
},
});
- };
+ } else {
+ if (else_required) try p.warn(.expected_semi_or_else);
+ try p.scratch.append(p.gpa, then_expr);
+ }
return p.addNode(.{
.tag = .@"for",
.main_token = for_token,
.data = .{
- .lhs = array_expr,
- .rhs = try p.addExtra(Node.If{
- .then_expr = then_expr,
- .else_expr = try p.expectStatement(false),
+ .lhs = (try p.listToSpan(p.scratch.items[scratch_top..])).start,
+ .rhs = @bitCast(u32, Node.For{
+ .inputs = @intCast(u31, inputs),
+ .has_else = has_else,
}),
},
});
@@ -2056,42 +2055,118 @@ fn parseBlock(p: *Parse) !Node.Index {
}
}
-/// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
-///
/// ForExpr <- ForPrefix Expr (KEYWORD_else Expr)?
fn parseForExpr(p: *Parse) !Node.Index {
const for_token = p.eatToken(.keyword_for) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const array_expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- const found_payload = try p.parsePtrIndexPayload();
- if (found_payload == 0) try p.warn(.expected_loop_payload);
+
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ const inputs = try p.forPrefix();
const then_expr = try p.expectExpr();
- _ = p.eatToken(.keyword_else) orelse {
+ var has_else = false;
+ if (p.eatToken(.keyword_else)) |_| {
+ try p.scratch.append(p.gpa, then_expr);
+ const else_expr = try p.expectExpr();
+ try p.scratch.append(p.gpa, else_expr);
+ has_else = true;
+ } else if (inputs == 1) {
return p.addNode(.{
.tag = .for_simple,
.main_token = for_token,
.data = .{
- .lhs = array_expr,
+ .lhs = p.scratch.items[scratch_top],
.rhs = then_expr,
},
});
- };
- const else_expr = try p.expectExpr();
+ } else {
+ try p.scratch.append(p.gpa, then_expr);
+ }
return p.addNode(.{
.tag = .@"for",
.main_token = for_token,
.data = .{
- .lhs = array_expr,
- .rhs = try p.addExtra(Node.If{
- .then_expr = then_expr,
- .else_expr = else_expr,
+ .lhs = (try p.listToSpan(p.scratch.items[scratch_top..])).start,
+ .rhs = @bitCast(u32, Node.For{
+ .inputs = @intCast(u31, inputs),
+ .has_else = has_else,
}),
},
});
}
+/// ForPrefix <- KEYWORD_for LPAREN ForInput (COMMA ForInput)* COMMA? RPAREN ForPayload
+///
+/// ForInput <- Expr (DOT2 Expr?)?
+///
+/// ForPayload <- PIPE ASTERISK? IDENTIFIER (COMMA ASTERISK? IDENTIFIER)* PIPE
+fn forPrefix(p: *Parse) Error!usize {
+ const start = p.scratch.items.len;
+ _ = try p.expectToken(.l_paren);
+
+ while (true) {
+ var input = try p.expectExpr();
+ if (p.eatToken(.ellipsis2)) |ellipsis| {
+ input = try p.addNode(.{
+ .tag = .for_range,
+ .main_token = ellipsis,
+ .data = .{
+ .lhs = input,
+ .rhs = try p.parseExpr(),
+ },
+ });
+ }
+
+ try p.scratch.append(p.gpa, input);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_paren => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_brace, .r_bracket => return p.failExpected(.r_paren),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_for_operand),
+ }
+ if (p.eatToken(.r_paren)) |_| break;
+ }
+ const inputs = p.scratch.items.len - start;
+
+ _ = p.eatToken(.pipe) orelse {
+ try p.warn(.expected_loop_payload);
+ return inputs;
+ };
+
+ var warned_excess = false;
+ var captures: u32 = 0;
+ while (true) {
+ _ = p.eatToken(.asterisk);
+ const identifier = try p.expectToken(.identifier);
+ captures += 1;
+ if (captures > inputs and !warned_excess) {
+ try p.warnMsg(.{ .tag = .extra_for_capture, .token = identifier });
+ warned_excess = true;
+ }
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .pipe => {
+ p.tok_i += 1;
+ break;
+ },
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_capture),
+ }
+ if (p.eatToken(.pipe)) |_| break;
+ }
+
+ if (captures < inputs) {
+ const index = p.scratch.items.len - captures;
+ const input = p.nodes.items(.main_token)[p.scratch.items[index]];
+ try p.warnMsg(.{ .tag = .for_input_not_captured, .token = input });
+ }
+ return inputs;
+}
+
/// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
///
/// WhileExpr <- WhilePrefix Expr (KEYWORD_else Payload? Expr)?
@@ -2752,37 +2827,41 @@ fn expectPrimaryTypeExpr(p: *Parse) !Node.Index {
return node;
}
-/// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
-///
/// ForTypeExpr <- ForPrefix TypeExpr (KEYWORD_else TypeExpr)?
fn parseForTypeExpr(p: *Parse) !Node.Index {
const for_token = p.eatToken(.keyword_for) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const array_expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- const found_payload = try p.parsePtrIndexPayload();
- if (found_payload == 0) try p.warn(.expected_loop_payload);
+
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ const inputs = try p.forPrefix();
const then_expr = try p.expectTypeExpr();
- _ = p.eatToken(.keyword_else) orelse {
+ var has_else = false;
+ if (p.eatToken(.keyword_else)) |_| {
+ try p.scratch.append(p.gpa, then_expr);
+ const else_expr = try p.expectTypeExpr();
+ try p.scratch.append(p.gpa, else_expr);
+ has_else = true;
+ } else if (inputs == 1) {
return p.addNode(.{
.tag = .for_simple,
.main_token = for_token,
.data = .{
- .lhs = array_expr,
+ .lhs = p.scratch.items[scratch_top],
.rhs = then_expr,
},
});
- };
- const else_expr = try p.expectTypeExpr();
+ } else {
+ try p.scratch.append(p.gpa, then_expr);
+ }
return p.addNode(.{
.tag = .@"for",
.main_token = for_token,
.data = .{
- .lhs = array_expr,
- .rhs = try p.addExtra(Node.If{
- .then_expr = then_expr,
- .else_expr = else_expr,
+ .lhs = (try p.listToSpan(p.scratch.items[scratch_top..])).start,
+ .rhs = @bitCast(u32, Node.For{
+ .inputs = @intCast(u31, inputs),
+ .has_else = has_else,
}),
},
});
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index 3c44322ccc..d24dedfeff 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -3457,11 +3457,11 @@ test "zig fmt: for" {
\\ for (a) |*v|
\\ continue;
\\
- \\ for (a) |v, i| {
+ \\ for (a, 0..) |v, i| {
\\ continue;
\\ }
\\
- \\ for (a) |v, i|
+ \\ for (a, 0..) |v, i|
\\ continue;
\\
\\ for (a) |b| switch (b) {
@@ -3469,17 +3469,24 @@ test "zig fmt: for" {
\\ d => {},
\\ };
\\
- \\ const res = for (a) |v, i| {
+ \\ const res = for (a, 0..) |v, i| {
\\ break v;
\\ } else {
\\ unreachable;
\\ };
\\
\\ var num: usize = 0;
- \\ inline for (a) |v, i| {
+ \\ inline for (a, 0..1) |v, i| {
\\ num += v;
\\ num += i;
\\ }
+ \\
+ \\ for (a, b) |
+ \\ long_name,
+ \\ another_long_name,
+ \\ | {
+ \\ continue;
+ \\ }
\\}
\\
);
@@ -3499,6 +3506,26 @@ test "zig fmt: for" {
\\}
\\
);
+
+ try testTransform(
+ \\test "fix for" {
+ \\ for (a, b, c,) |long, another, third,| {}
+ \\}
+ \\
+ ,
+ \\test "fix for" {
+ \\ for (
+ \\ a,
+ \\ b,
+ \\ c,
+ \\ ) |
+ \\ long,
+ \\ another,
+ \\ third,
+ \\ | {}
+ \\}
+ \\
+ );
}
test "zig fmt: for if" {
@@ -4358,7 +4385,7 @@ test "zig fmt: hex literals with underscore separators" {
try testTransform(
\\pub fn orMask(a: [ 1_000 ]u64, b: [ 1_000] u64) [1_000]u64 {
\\ var c: [1_000]u64 = [1]u64{ 0xFFFF_FFFF_FFFF_FFFF}**1_000;
- \\ for (c [ 1_0 .. ]) |_, i| {
+ \\ for (c [ 1_0 .. ], 0..) |_, i| {
\\ c[i] = (a[i] | b[i]) & 0xCCAA_CCAA_CCAA_CCAA;
\\ }
\\ return c;
@@ -4368,7 +4395,7 @@ test "zig fmt: hex literals with underscore separators" {
,
\\pub fn orMask(a: [1_000]u64, b: [1_000]u64) [1_000]u64 {
\\ var c: [1_000]u64 = [1]u64{0xFFFF_FFFF_FFFF_FFFF} ** 1_000;
- \\ for (c[1_0..]) |_, i| {
+ \\ for (c[1_0..], 0..) |_, i| {
\\ c[i] = (a[i] | b[i]) & 0xCCAA_CCAA_CCAA_CCAA;
\\ }
\\ return c;
@@ -4880,10 +4907,10 @@ test "zig fmt: remove trailing whitespace after doc comment" {
test "zig fmt: for loop with ptr payload and index" {
try testCanonical(
\\test {
- \\ for (self.entries.items) |*item, i| {}
- \\ for (self.entries.items) |*item, i|
+ \\ for (self.entries.items, 0..) |*item, i| {}
+ \\ for (self.entries.items, 0..) |*item, i|
\\ a = b;
- \\ for (self.entries.items) |*item, i| a = b;
+ \\ for (self.entries.items, 0..) |*item, i| a = b;
\\}
\\
);
@@ -5471,7 +5498,7 @@ test "zig fmt: canonicalize symbols (primitive types)" {
\\ _ = @"void": {
\\ break :@"void";
\\ };
- \\ for ("hi") |@"u3", @"i4"| {
+ \\ for ("hi", 0..) |@"u3", @"i4"| {
\\ _ = @"u3";
\\ _ = @"i4";
\\ }
@@ -5523,7 +5550,7 @@ test "zig fmt: canonicalize symbols (primitive types)" {
\\ _ = void: {
\\ break :void;
\\ };
- \\ for ("hi") |@"u3", @"i4"| {
+ \\ for ("hi", 0..) |@"u3", @"i4"| {
\\ _ = @"u3";
\\ _ = @"i4";
\\ }
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index 97bc85efac..0e8d3125ac 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -353,6 +353,16 @@ fn renderExpression(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index,
try renderToken(ais, tree, main_tokens[node], .none);
return renderExpression(gpa, ais, tree, infix.rhs, space);
},
+ .for_range => {
+ const infix = datas[node];
+ try renderExpression(gpa, ais, tree, infix.lhs, .none);
+ if (infix.rhs != 0) {
+ try renderToken(ais, tree, main_tokens[node], .none);
+ return renderExpression(gpa, ais, tree, infix.rhs, space);
+ } else {
+ return renderToken(ais, tree, main_tokens[node], space);
+ }
+ },
.add,
.add_wrap,
@@ -694,9 +704,11 @@ fn renderExpression(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index,
.while_simple,
.while_cont,
.@"while",
+ => return renderWhile(gpa, ais, tree, tree.fullWhile(node).?, space),
+
.for_simple,
.@"for",
- => return renderWhile(gpa, ais, tree, tree.fullWhile(node).?, space),
+ => return renderFor(gpa, ais, tree, tree.fullFor(node).?, space),
.if_simple,
.@"if",
@@ -1054,10 +1066,9 @@ fn renderIf(gpa: Allocator, ais: *Ais, tree: Ast, if_node: Ast.full.If, space: S
}, space);
}
-/// Note that this function is additionally used to render if and for expressions, with
+/// Note that this function is additionally used to render if expressions, with
/// respective values set to null.
fn renderWhile(gpa: Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While, space: Space) Error!void {
- const node_tags = tree.nodes.items(.tag);
const token_tags = tree.tokens.items(.tag);
if (while_node.label_token) |label| {
@@ -1108,9 +1119,34 @@ fn renderWhile(gpa: Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While,
last_prefix_token = tree.lastToken(while_node.ast.cont_expr) + 1; // rparen
}
- const then_expr_is_block = nodeIsBlock(node_tags[while_node.ast.then_expr]);
+ try renderThenElse(
+ gpa,
+ ais,
+ tree,
+ last_prefix_token,
+ while_node.ast.then_expr,
+ while_node.else_token,
+ while_node.error_token,
+ while_node.ast.else_expr,
+ space,
+ );
+}
+
+fn renderThenElse(
+ gpa: Allocator,
+ ais: *Ais,
+ tree: Ast,
+ last_prefix_token: Ast.TokenIndex,
+ then_expr: Ast.Node.Index,
+ else_token: Ast.TokenIndex,
+ maybe_error_token: ?Ast.TokenIndex,
+ else_expr: Ast.Node.Index,
+ space: Space,
+) Error!void {
+ const node_tags = tree.nodes.items(.tag);
+ const then_expr_is_block = nodeIsBlock(node_tags[then_expr]);
const indent_then_expr = !then_expr_is_block and
- !tree.tokensOnSameLine(last_prefix_token, tree.firstToken(while_node.ast.then_expr));
+ !tree.tokensOnSameLine(last_prefix_token, tree.firstToken(then_expr));
if (indent_then_expr or (then_expr_is_block and ais.isLineOverIndented())) {
ais.pushIndentNextLine();
try renderToken(ais, tree, last_prefix_token, .newline);
@@ -1119,45 +1155,115 @@ fn renderWhile(gpa: Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While,
try renderToken(ais, tree, last_prefix_token, .space);
}
- if (while_node.ast.else_expr != 0) {
+ if (else_expr != 0) {
if (indent_then_expr) {
ais.pushIndent();
- try renderExpression(gpa, ais, tree, while_node.ast.then_expr, .newline);
+ try renderExpression(gpa, ais, tree, then_expr, .newline);
ais.popIndent();
} else {
- try renderExpression(gpa, ais, tree, while_node.ast.then_expr, .space);
+ try renderExpression(gpa, ais, tree, then_expr, .space);
}
- var last_else_token = while_node.else_token;
+ var last_else_token = else_token;
- if (while_node.error_token) |error_token| {
- try renderToken(ais, tree, while_node.else_token, .space); // else
+ if (maybe_error_token) |error_token| {
+ try renderToken(ais, tree, else_token, .space); // else
try renderToken(ais, tree, error_token - 1, .none); // |
try renderIdentifier(ais, tree, error_token, .none, .preserve_when_shadowing); // identifier
last_else_token = error_token + 1; // |
}
const indent_else_expr = indent_then_expr and
- !nodeIsBlock(node_tags[while_node.ast.else_expr]) and
- !nodeIsIfForWhileSwitch(node_tags[while_node.ast.else_expr]);
+ !nodeIsBlock(node_tags[else_expr]) and
+ !nodeIsIfForWhileSwitch(node_tags[else_expr]);
if (indent_else_expr) {
ais.pushIndentNextLine();
try renderToken(ais, tree, last_else_token, .newline);
ais.popIndent();
- try renderExpressionIndented(gpa, ais, tree, while_node.ast.else_expr, space);
+ try renderExpressionIndented(gpa, ais, tree, else_expr, space);
} else {
try renderToken(ais, tree, last_else_token, .space);
- try renderExpression(gpa, ais, tree, while_node.ast.else_expr, space);
+ try renderExpression(gpa, ais, tree, else_expr, space);
}
} else {
if (indent_then_expr) {
- try renderExpressionIndented(gpa, ais, tree, while_node.ast.then_expr, space);
+ try renderExpressionIndented(gpa, ais, tree, then_expr, space);
} else {
- try renderExpression(gpa, ais, tree, while_node.ast.then_expr, space);
+ try renderExpression(gpa, ais, tree, then_expr, space);
}
}
}
+fn renderFor(gpa: Allocator, ais: *Ais, tree: Ast, for_node: Ast.full.For, space: Space) Error!void {
+ const token_tags = tree.tokens.items(.tag);
+
+ if (for_node.label_token) |label| {
+ try renderIdentifier(ais, tree, label, .none, .eagerly_unquote); // label
+ try renderToken(ais, tree, label + 1, .space); // :
+ }
+
+ if (for_node.inline_token) |inline_token| {
+ try renderToken(ais, tree, inline_token, .space); // inline
+ }
+
+ try renderToken(ais, tree, for_node.ast.for_token, .space); // if/for/while
+
+ const lparen = for_node.ast.for_token + 1;
+ try renderParamList(gpa, ais, tree, lparen, for_node.ast.inputs, .space);
+
+ var cur = for_node.payload_token;
+ const pipe = std.mem.indexOfScalarPos(std.zig.Token.Tag, token_tags, cur, .pipe).?;
+ if (token_tags[pipe - 1] == .comma) {
+ ais.pushIndentNextLine();
+ try renderToken(ais, tree, cur - 1, .newline); // |
+ while (true) {
+ if (token_tags[cur] == .asterisk) {
+ try renderToken(ais, tree, cur, .none); // *
+ cur += 1;
+ }
+ try renderIdentifier(ais, tree, cur, .none, .preserve_when_shadowing); // identifier
+ cur += 1;
+ if (token_tags[cur] == .comma) {
+ try renderToken(ais, tree, cur, .newline); // ,
+ cur += 1;
+ }
+ if (token_tags[cur] == .pipe) {
+ break;
+ }
+ }
+ ais.popIndent();
+ } else {
+ try renderToken(ais, tree, cur - 1, .none); // |
+ while (true) {
+ if (token_tags[cur] == .asterisk) {
+ try renderToken(ais, tree, cur, .none); // *
+ cur += 1;
+ }
+ try renderIdentifier(ais, tree, cur, .none, .preserve_when_shadowing); // identifier
+ cur += 1;
+ if (token_tags[cur] == .comma) {
+ try renderToken(ais, tree, cur, .space); // ,
+ cur += 1;
+ }
+ if (token_tags[cur] == .pipe) {
+ break;
+ }
+ }
+ }
+
+ try renderThenElse(
+ gpa,
+ ais,
+ tree,
+ cur,
+ for_node.ast.then_expr,
+ for_node.else_token,
+ null,
+ for_node.ast.else_expr,
+ space,
+ );
+}
+
fn renderContainerField(
gpa: Allocator,
ais: *Ais,
@@ -2206,15 +2312,23 @@ fn renderCall(
call: Ast.full.Call,
space: Space,
) Error!void {
- const token_tags = tree.tokens.items(.tag);
-
if (call.async_token) |async_token| {
try renderToken(ais, tree, async_token, .space);
}
try renderExpression(gpa, ais, tree, call.ast.fn_expr, .none);
+ try renderParamList(gpa, ais, tree, call.ast.lparen, call.ast.params, space);
+}
+
+fn renderParamList(
+ gpa: Allocator,
+ ais: *Ais,
+ tree: Ast,
+ lparen: Ast.TokenIndex,
+ params: []const Ast.Node.Index,
+ space: Space,
+) Error!void {
+ const token_tags = tree.tokens.items(.tag);
- const lparen = call.ast.lparen;
- const params = call.ast.params;
if (params.len == 0) {
ais.pushIndentNextLine();
try renderToken(ais, tree, lparen, .none);
From 6733e43d87d4fe7b9d89948ebb95a72515c44fee Mon Sep 17 00:00:00 2001
From: Veikka Tuominen
Date: Wed, 1 Feb 2023 20:39:09 +0200
Subject: [PATCH 07/36] AstGen: work-in-progress multi-object for loops
---
src/AstGen.zig | 217 +++++++++++++++++++++++++++++++------------------
1 file changed, 140 insertions(+), 77 deletions(-)
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 40eef32d4e..ee709a3fe2 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -518,6 +518,7 @@ fn lvalExpr(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Ins
.error_union,
.merge_error_sets,
.switch_range,
+ .for_range,
.@"await",
.bit_not,
.negation,
@@ -646,6 +647,8 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE
.asm_output => unreachable, // Handled in `asmExpr`.
.asm_input => unreachable, // Handled in `asmExpr`.
+ .for_range => unreachable, // Handled in `forExpr`.
+
.assign => {
try assign(gz, scope, node);
return rvalue(gz, ri, .void_value, node);
@@ -834,7 +837,7 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE
.@"while",
=> return whileExpr(gz, scope, ri.br(), node, tree.fullWhile(node).?, false),
- .for_simple, .@"for" => return forExpr(gz, scope, ri.br(), node, tree.fullWhile(node).?, false),
+ .for_simple, .@"for" => return forExpr(gz, scope, ri.br(), node, tree.fullFor(node).?, false),
.slice_open => {
const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs);
@@ -2342,7 +2345,7 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Nod
.@"while", => _ = try whileExpr(gz, scope, .{ .rl = .discard }, inner_node, tree.fullWhile(inner_node).?, true),
.for_simple,
- .@"for", => _ = try forExpr(gz, scope, .{ .rl = .discard }, inner_node, tree.fullWhile(inner_node).?, true),
+ .@"for", => _ = try forExpr(gz, scope, .{ .rl = .discard }, inner_node, tree.fullFor(inner_node).?, true),
else => noreturn_src_node = try unusedResultExpr(gz, scope, inner_node),
// zig fmt: on
@@ -6282,7 +6285,7 @@ fn forExpr(
scope: *Scope,
ri: ResultInfo,
node: Ast.Node.Index,
- for_full: Ast.full.While,
+ for_full: Ast.full.For,
is_statement: bool,
) InnerError!Zir.Inst.Ref {
const astgen = parent_gz.astgen;
@@ -6295,23 +6298,79 @@ fn forExpr(
const is_inline = parent_gz.force_comptime or for_full.inline_token != null;
const tree = astgen.tree;
const token_tags = tree.tokens.items(.tag);
+ const node_tags = tree.nodes.items(.tag);
+ const node_data = tree.nodes.items(.data);
- const payload_is_ref = if (for_full.payload_token) |payload_token|
- token_tags[payload_token] == .asterisk
- else
- false;
+ // Check for unterminated ranges.
+ {
+ var unterminated: ?Ast.Node.Index = null;
+ for (for_full.ast.inputs) |input| {
+ if (node_tags[input] != .for_range) break;
+ if (node_data[input].rhs != 0) break;
+ unterminated = unterminated orelse input;
+ } else {
+ return astgen.failNode(unterminated.?, "unterminated for range", .{});
+ }
+ }
- try emitDbgNode(parent_gz, for_full.ast.cond_expr);
+ var lens = astgen.gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len);
+ defer astgen.gpa.free(lens);
+ var indexables = astgen.gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len);
+ defer astgen.gpa.free(indexables);
+ var counters = std.ArrayList(Zir.Inst.Ref).init(astgen.gpa);
+ defer counters.deinit();
- const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none };
- const array_ptr = try expr(parent_gz, scope, cond_ri, for_full.ast.cond_expr);
- const len = try parent_gz.addUnNode(.indexable_ptr_len, array_ptr, for_full.ast.cond_expr);
+ const counter_alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime_mut else .alloc;
+
+ {
+ var payload = for_full.payload_token;
+ for (for_full.ast.inputs) |input, i| {
+ const payload_is_ref = token_tags[payload] == .asterisk;
+ const ident_tok = payload + @boolToInt(payload_is_ref);
+
+ if (mem.eql(u8, tree.tokenSlice(ident_tok), "_") and payload_is_ref) {
+ return astgen.failTok(payload, "pointer modifier invalid on discard", .{});
+ }
+ payload = ident_tok + @as(u32, 2);
+
+ try emitDbgNode(parent_gz, input);
+ if (node_tags[input] == .for_range) {
+ if (payload_is_ref) {
+ return astgen.failTok(ident_tok, "cannot capture reference to range", .{});
+ }
+ const counter_ptr = try parent_gz.addUnNode(counter_alloc_tag, .usize_type, node);
+ const start_val = try expr(parent_gz, scope, node_data[input].lhs, input);
+ _ = try parent_gz.addBin(.store, counter_ptr, start_val);
+ indexables[i] = counter_ptr;
+ try counters.append(counter_ptr);
+
+ const end_node = node_data[input].rhs;
+ const end_val = if (end_node != 0) try expr(parent_gz, scope, node_data[input].rhs, input) else .none;
+ const range_len = try parent_gz.addPlNode(.for_range_len, input, Zir.Inst.Bin{
+ .lhs = start_val,
+ .rhs = end_val,
+ });
+ lens[i] = range_len;
+ } else {
+ const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none };
+ const indexable = try expr(parent_gz, scope, cond_ri, input);
+ indexables[i] = indexable;
+
+ const indexable_len = try parent_gz.addUnNode(.indexable_ptr_len, indexable, input);
+ lens[i] = indexable_len;
+ }
+ }
+ }
+
+ const len = "check_for_lens";
const index_ptr = blk: {
- const alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime_mut else .alloc;
- const index_ptr = try parent_gz.addUnNode(alloc_tag, .usize_type, node);
+ // Future optimization:
+ // for loops with only ranges don't need a separate index variable.
+ const index_ptr = try parent_gz.addUnNode(counter_alloc_tag, .usize_type, node);
// initialize to zero
_ = try parent_gz.addBin(.store, index_ptr, .zero_usize);
+ try counters.append(index_ptr);
break :blk index_ptr;
};
@@ -6343,13 +6402,15 @@ fn forExpr(
// cond_block unstacked now, can add new instructions to loop_scope
try loop_scope.instructions.append(astgen.gpa, cond_block);
- // Increment the index variable.
- const index_2 = try loop_scope.addUnNode(.load, index_ptr, for_full.ast.cond_expr);
- const index_plus_one = try loop_scope.addPlNode(.add, node, Zir.Inst.Bin{
- .lhs = index_2,
- .rhs = .one_usize,
- });
- _ = try loop_scope.addBin(.store, index_ptr, index_plus_one);
+ // Increment the index variable and ranges.
+ for (counters) |counter_ptr| {
+ const counter = try loop_scope.addUnNode(.load, counter_ptr, for_full.ast.cond_expr);
+ const counter_plus_one = try loop_scope.addPlNode(.add, node, Zir.Inst.Bin{
+ .lhs = counter,
+ .rhs = .one_usize,
+ });
+ _ = try loop_scope.addBin(.store, counter_ptr, counter_plus_one);
+ }
const repeat_tag: Zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat;
_ = try loop_scope.addNode(repeat_tag, node);
@@ -6366,64 +6427,62 @@ fn forExpr(
var then_scope = parent_gz.makeSubBlock(&cond_scope.base);
defer then_scope.unstack();
- try then_scope.addDbgBlockBegin();
- var payload_val_scope: Scope.LocalVal = undefined;
- var index_scope: Scope.LocalPtr = undefined;
- const then_sub_scope = blk: {
- const payload_token = for_full.payload_token.?;
- const ident = if (token_tags[payload_token] == .asterisk)
- payload_token + 1
- else
- payload_token;
- const is_ptr = ident != payload_token;
- const value_name = tree.tokenSlice(ident);
- var payload_sub_scope: *Scope = undefined;
- if (!mem.eql(u8, value_name, "_")) {
- const name_str_index = try astgen.identAsString(ident);
- const tag: Zir.Inst.Tag = if (is_ptr) .elem_ptr else .elem_val;
- const payload_inst = try then_scope.addPlNode(tag, for_full.ast.cond_expr, Zir.Inst.Bin{
- .lhs = array_ptr,
- .rhs = index,
- });
- try astgen.detectLocalShadowing(&then_scope.base, name_str_index, ident, value_name, .capture);
- payload_val_scope = .{
- .parent = &then_scope.base,
- .gen_zir = &then_scope,
- .name = name_str_index,
- .inst = payload_inst,
- .token_src = ident,
- .id_cat = .capture,
- };
- try then_scope.addDbgVar(.dbg_var_val, name_str_index, payload_inst);
- payload_sub_scope = &payload_val_scope.base;
- } else if (is_ptr) {
- return astgen.failTok(payload_token, "pointer modifier invalid on discard", .{});
- } else {
- payload_sub_scope = &then_scope.base;
- }
+ const then_sub_scope = &then_scope.base;
- const index_token = if (token_tags[ident + 1] == .comma)
- ident + 2
- else
- break :blk payload_sub_scope;
- const token_bytes = tree.tokenSlice(index_token);
- if (mem.eql(u8, token_bytes, "_")) {
- return astgen.failTok(index_token, "discard of index capture; omit it instead", .{});
- }
- const index_name = try astgen.identAsString(index_token);
- try astgen.detectLocalShadowing(payload_sub_scope, index_name, index_token, token_bytes, .@"loop index capture");
- index_scope = .{
- .parent = payload_sub_scope,
- .gen_zir = &then_scope,
- .name = index_name,
- .ptr = index_ptr,
- .token_src = index_token,
- .maybe_comptime = is_inline,
- .id_cat = .@"loop index capture",
- };
- try then_scope.addDbgVar(.dbg_var_val, index_name, index_ptr);
- break :blk &index_scope.base;
- };
+ // try then_scope.addDbgBlockBegin();
+ // var payload_val_scope: Scope.LocalVal = undefined;
+ // var index_scope: Scope.LocalPtr = undefined;
+ // const then_sub_scope = blk: {
+ // const payload_token = for_full.payload_token.?;
+ // const ident = if (token_tags[payload_token] == .asterisk)
+ // payload_token + 1
+ // else
+ // payload_token;
+ // const is_ptr = ident != payload_token;
+ // const value_name = tree.tokenSlice(ident);
+ // var payload_sub_scope: *Scope = undefined;
+ // if (!mem.eql(u8, value_name, "_")) {
+ // const name_str_index = try astgen.identAsString(ident);
+ // const tag: Zir.Inst.Tag = if (is_ptr) .elem_ptr else .elem_val;
+ // const payload_inst = try then_scope.addPlNode(tag, for_full.ast.cond_expr, Zir.Inst.Bin{
+ // .lhs = array_ptr,
+ // .rhs = index,
+ // });
+ // try astgen.detectLocalShadowing(&then_scope.base, name_str_index, ident, value_name, .capture);
+ // payload_val_scope = .{
+ // .parent = &then_scope.base,
+ // .gen_zir = &then_scope,
+ // .name = name_str_index,
+ // .inst = payload_inst,
+ // .token_src = ident,
+ // .id_cat = .capture,
+ // };
+ // try then_scope.addDbgVar(.dbg_var_val, name_str_index, payload_inst);
+ // payload_sub_scope = &payload_val_scope.base;
+ // } else if (is_ptr) {
+ // } else {
+ // payload_sub_scope = &then_scope.base;
+ // }
+
+ // const index_token = if (token_tags[ident + 1] == .comma)
+ // ident + 2
+ // else
+ // break :blk payload_sub_scope;
+ // const token_bytes = tree.tokenSlice(index_token);
+ // const index_name = try astgen.identAsString(index_token);
+ // try astgen.detectLocalShadowing(payload_sub_scope, index_name, index_token, token_bytes, .@"loop index capture");
+ // index_scope = .{
+ // .parent = payload_sub_scope,
+ // .gen_zir = &then_scope,
+ // .name = index_name,
+ // .ptr = index_ptr,
+ // .token_src = index_token,
+ // .maybe_comptime = is_inline,
+ // .id_cat = .@"loop index capture",
+ // };
+ // try then_scope.addDbgVar(.dbg_var_val, index_name, index_ptr);
+ // break :blk &index_scope.base;
+ // };
const then_result = try expr(&then_scope, then_sub_scope, .{ .rl = .none }, for_full.ast.then_expr);
_ = try addEnsureResult(&then_scope, then_result, for_full.ast.then_expr);
@@ -9021,6 +9080,7 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index, have_
.mul_wrap,
.mul_sat,
.switch_range,
+ .for_range,
.field_access,
.sub,
.sub_wrap,
@@ -9310,6 +9370,7 @@ fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) BuiltinFn.Ev
.mul_wrap,
.mul_sat,
.switch_range,
+ .for_range,
.sub,
.sub_wrap,
.sub_sat,
@@ -9487,6 +9548,7 @@ fn nodeImpliesMoreThanOnePossibleValue(tree: *const Ast, start_node: Ast.Node.In
.mul_wrap,
.mul_sat,
.switch_range,
+ .for_range,
.field_access,
.sub,
.sub_wrap,
@@ -9731,6 +9793,7 @@ fn nodeImpliesComptimeOnly(tree: *const Ast, start_node: Ast.Node.Index) bool {
.mul_wrap,
.mul_sat,
.switch_range,
+ .for_range,
.field_access,
.sub,
.sub_wrap,
From faa44e2e5875036b105d8b7d38ccb2e93757a3c5 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 17 Feb 2023 11:51:22 -0700
Subject: [PATCH 08/36] AstGen: rework multi-object for loop
* Allow unbounded looping.
* Lower by incrementing raw pointers for each iterable rather than
incrementing a single index variable. This elides safety checks
without any analysis required thanks to the length assertion and
lowers to decent machine code even in debug builds.
- An "end" value is selected, prioritizing a counter if possible,
falling back to a runtime calculation of ptr+len on a slice input.
* Specialize on the pattern `0..`, avoiding an unnecessary subtraction
instruction being emitted.
* Add the `for_check_lens` ZIR instruction.
---
src/AstGen.zig | 169 +++++++++++++++++++++++++++++++---------------
src/Sema.zig | 15 ++++
src/Zir.zig | 12 ++++
src/print_zir.zig | 15 ++++
4 files changed, 156 insertions(+), 55 deletions(-)
diff --git a/src/AstGen.zig b/src/AstGen.zig
index ee709a3fe2..7d3a165d2f 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -2666,6 +2666,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.validate_deref,
.save_err_ret_index,
.restore_err_ret_index,
+ .for_check_lens,
=> break :b true,
.@"defer" => unreachable,
@@ -6294,37 +6295,35 @@ fn forExpr(
try astgen.checkLabelRedefinition(scope, label_token);
}
- // Set up variables and constants.
const is_inline = parent_gz.force_comptime or for_full.inline_token != null;
const tree = astgen.tree;
const token_tags = tree.tokens.items(.tag);
const node_tags = tree.nodes.items(.tag);
const node_data = tree.nodes.items(.data);
+ const gpa = astgen.gpa;
- // Check for unterminated ranges.
- {
- var unterminated: ?Ast.Node.Index = null;
- for (for_full.ast.inputs) |input| {
- if (node_tags[input] != .for_range) break;
- if (node_data[input].rhs != 0) break;
- unterminated = unterminated orelse input;
- } else {
- return astgen.failNode(unterminated.?, "unterminated for range", .{});
- }
- }
-
- var lens = astgen.gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len);
- defer astgen.gpa.free(lens);
- var indexables = astgen.gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len);
- defer astgen.gpa.free(indexables);
- var counters = std.ArrayList(Zir.Inst.Ref).init(astgen.gpa);
- defer counters.deinit();
+ const allocs = try gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len);
+ defer gpa.free(allocs);
+ // elements of this array can be `none`, indicating no length check.
+ const lens = try gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len);
+ defer gpa.free(lens);
const counter_alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime_mut else .alloc;
+ // Tracks the index of allocs/lens that has a length to be checked and is
+ // used for the end value.
+ // If this is null, there are no len checks.
+ var end_input_index: ?u32 = null;
+ // This is a value to use to find out if the for loop has reached the end
+ // yet. It prefers to use a counter since the end value is provided directly,
+ // and otherwise falls back to adding ptr+len of a slice to compute end.
+ // Corresponds to end_input_index and will be .none in case that value is null.
+ var cond_end_val: Zir.Inst.Ref = .none;
+
{
var payload = for_full.payload_token;
- for (for_full.ast.inputs) |input, i| {
+ for (for_full.ast.inputs) |input, i_usize| {
+ const i = @intCast(u32, i_usize);
const payload_is_ref = token_tags[payload] == .asterisk;
const ident_tok = payload + @boolToInt(payload_is_ref);
@@ -6339,59 +6338,101 @@ fn forExpr(
return astgen.failTok(ident_tok, "cannot capture reference to range", .{});
}
const counter_ptr = try parent_gz.addUnNode(counter_alloc_tag, .usize_type, node);
- const start_val = try expr(parent_gz, scope, node_data[input].lhs, input);
+ const start_node = node_data[input].lhs;
+ const start_val = try expr(parent_gz, scope, .{ .rl = .none }, start_node);
_ = try parent_gz.addBin(.store, counter_ptr, start_val);
- indexables[i] = counter_ptr;
- try counters.append(counter_ptr);
const end_node = node_data[input].rhs;
- const end_val = if (end_node != 0) try expr(parent_gz, scope, node_data[input].rhs, input) else .none;
- const range_len = try parent_gz.addPlNode(.for_range_len, input, Zir.Inst.Bin{
- .lhs = start_val,
- .rhs = end_val,
- });
+ const end_val = if (end_node != 0)
+ try expr(parent_gz, scope, .{ .rl = .none }, node_data[input].rhs)
+ else
+ .none;
+
+ const range_len = if (end_val == .none or nodeIsTriviallyZero(tree, start_node))
+ end_val
+ else
+ try parent_gz.addPlNode(.sub, input, Zir.Inst.Bin{
+ .lhs = end_val,
+ .rhs = start_val,
+ });
+
+ if (range_len != .none and cond_end_val == .none) {
+ end_input_index = i;
+ cond_end_val = end_val;
+ }
+
+ allocs[i] = counter_ptr;
lens[i] = range_len;
} else {
const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none };
const indexable = try expr(parent_gz, scope, cond_ri, input);
- indexables[i] = indexable;
+ const base_ptr = try parent_gz.addPlNode(.elem_ptr_imm, input, Zir.Inst.ElemPtrImm{
+ .ptr = indexable,
+ .index = 0,
+ });
- const indexable_len = try parent_gz.addUnNode(.indexable_ptr_len, indexable, input);
- lens[i] = indexable_len;
+ if (end_input_index == null) {
+ end_input_index = i;
+ assert(cond_end_val == .none);
+ }
+
+ allocs[i] = base_ptr;
+ lens[i] = try parent_gz.addUnNode(.indexable_ptr_len, indexable, input);
}
}
}
- const len = "check_for_lens";
+ // In case there are no counters which already have an end computed, we
+ // compute an end from base pointer plus length.
+ if (end_input_index) |i| {
+ if (cond_end_val == .none) {
+ cond_end_val = try parent_gz.addPlNode(.add, for_full.ast.inputs[i], Zir.Inst.Bin{
+ .lhs = allocs[i],
+ .rhs = lens[i],
+ });
+ }
+ }
- const index_ptr = blk: {
- // Future optimization:
- // for loops with only ranges don't need a separate index variable.
- const index_ptr = try parent_gz.addUnNode(counter_alloc_tag, .usize_type, node);
- // initialize to zero
- _ = try parent_gz.addBin(.store, index_ptr, .zero_usize);
- try counters.append(index_ptr);
- break :blk index_ptr;
- };
+ // We use a dedicated ZIR instruction to assert the lengths to assist with
+ // nicer error reporting as well as fewer ZIR bytes emitted.
+ if (end_input_index != null) {
+ const lens_len = @intCast(u32, lens.len);
+ try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.MultiOp).Struct.fields.len + lens_len);
+ _ = try parent_gz.addPlNode(.for_check_lens, node, Zir.Inst.MultiOp{
+ .operands_len = lens_len,
+ });
+ appendRefsAssumeCapacity(astgen, lens);
+ }
const loop_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .loop;
const loop_block = try parent_gz.makeBlockInst(loop_tag, node);
- try parent_gz.instructions.append(astgen.gpa, loop_block);
+ try parent_gz.instructions.append(gpa, loop_block);
var loop_scope = parent_gz.makeSubBlock(scope);
loop_scope.is_inline = is_inline;
loop_scope.setBreakResultInfo(ri);
defer loop_scope.unstack();
- defer loop_scope.labeled_breaks.deinit(astgen.gpa);
+ defer loop_scope.labeled_breaks.deinit(gpa);
var cond_scope = parent_gz.makeSubBlock(&loop_scope.base);
defer cond_scope.unstack();
- // check condition i < array_expr.len
- const index = try cond_scope.addUnNode(.load, index_ptr, for_full.ast.cond_expr);
- const cond = try cond_scope.addPlNode(.cmp_lt, for_full.ast.cond_expr, Zir.Inst.Bin{
- .lhs = index,
- .rhs = len,
+ // Load all the iterables.
+ const loaded_ptrs = try gpa.alloc(Zir.Inst.Ref, allocs.len);
+ defer gpa.free(loaded_ptrs);
+ for (allocs) |alloc, i| {
+ loaded_ptrs[i] = try cond_scope.addUnNode(.load, alloc, for_full.ast.inputs[i]);
+ }
+
+ // Check the condition.
+ const input_index = end_input_index orelse {
+ return astgen.failNode(node, "TODO: handle infinite for loop", .{});
+ };
+ assert(cond_end_val != .none);
+
+ const cond = try cond_scope.addPlNode(.cmp_neq, for_full.ast.inputs[input_index], Zir.Inst.Bin{
+ .lhs = loaded_ptrs[input_index],
+ .rhs = cond_end_val,
});
const condbr_tag: Zir.Inst.Tag = if (is_inline) .condbr_inline else .condbr;
@@ -6400,16 +6441,15 @@ fn forExpr(
const cond_block = try loop_scope.makeBlockInst(block_tag, node);
try cond_scope.setBlockBody(cond_block);
// cond_block unstacked now, can add new instructions to loop_scope
- try loop_scope.instructions.append(astgen.gpa, cond_block);
+ try loop_scope.instructions.append(gpa, cond_block);
- // Increment the index variable and ranges.
- for (counters) |counter_ptr| {
- const counter = try loop_scope.addUnNode(.load, counter_ptr, for_full.ast.cond_expr);
- const counter_plus_one = try loop_scope.addPlNode(.add, node, Zir.Inst.Bin{
- .lhs = counter,
+ // Increment the loop variables.
+ for (allocs) |alloc, i| {
+ const incremented = try loop_scope.addPlNode(.add, node, Zir.Inst.Bin{
+ .lhs = loaded_ptrs[i],
.rhs = .one_usize,
});
- _ = try loop_scope.addBin(.store, counter_ptr, counter_plus_one);
+ _ = try loop_scope.addBin(.store, alloc, incremented);
}
const repeat_tag: Zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat;
_ = try loop_scope.addNode(repeat_tag, node);
@@ -8960,6 +9000,25 @@ comptime {
}
}
+fn nodeIsTriviallyZero(tree: *const Ast, node: Ast.Node.Index) bool {
+ const node_tags = tree.nodes.items(.tag);
+ const main_tokens = tree.nodes.items(.main_token);
+
+ switch (node_tags[node]) {
+ .number_literal => {
+ const ident = main_tokens[node];
+ return switch (std.zig.parseNumberLiteral(tree.tokenSlice(ident))) {
+ .int => |number| switch (number) {
+ 0 => true,
+ else => false,
+ },
+ else => false,
+ };
+ },
+ else => return false,
+ }
+}
+
fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index, have_res_ty: bool) bool {
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
diff --git a/src/Sema.zig b/src/Sema.zig
index cf6350e35f..b5afe93511 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1386,6 +1386,11 @@ fn analyzeBodyInner(
i += 1;
continue;
},
+ .for_check_lens => {
+ try sema.zirForCheckLens(block, inst);
+ i += 1;
+ continue;
+ },
// Special case instructions to handle comptime control flow.
.@"break" => {
@@ -17096,6 +17101,16 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index)
return sema.popErrorReturnTrace(start_block, src, operand, saved_index);
}
+fn zirForCheckLens(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
+ const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
+ const args = sema.code.refSlice(extra.end, extra.data.operands_len);
+ const src = inst_data.src();
+
+ _ = args;
+ return sema.fail(block, src, "TODO implement zirForCheckLens", .{});
+}
+
fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void {
assert(sema.fn_ret_ty.zigTypeTag() == .ErrorUnion);
diff --git a/src/Zir.zig b/src/Zir.zig
index 58f9fdff14..de6c2f02d1 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -497,6 +497,15 @@ pub const Inst = struct {
/// Sends comptime control flow back to the beginning of the current block.
/// Uses the `node` field.
repeat_inline,
+ /// Asserts that all the lengths provided match. Used to build a for loop.
+ /// Return value is always void.
+ /// Uses the `pl_node` field with payload `MultiOp`.
+ /// There is exactly one item corresponding to each AST node inside the for
+ /// loop condition. Each item may be `none`, indicating an unbounded range.
+ /// Illegal behaviors:
+ /// * If all lengths are unbounded ranges (always a compile error).
+ /// * If any two lengths do not match each other.
+ for_check_lens,
/// Merge two error sets into one, `E1 || E2`.
/// Uses the `pl_node` field with payload `Bin`.
merge_error_sets,
@@ -1242,6 +1251,7 @@ pub const Inst = struct {
.defer_err_code,
.save_err_ret_index,
.restore_err_ret_index,
+ .for_check_lens,
=> false,
.@"break",
@@ -1309,6 +1319,7 @@ pub const Inst = struct {
.memcpy,
.memset,
.check_comptime_control_flow,
+ .for_check_lens,
.@"defer",
.defer_err_code,
.restore_err_ret_index,
@@ -1588,6 +1599,7 @@ pub const Inst = struct {
.@"break" = .@"break",
.break_inline = .@"break",
.check_comptime_control_flow = .un_node,
+ .for_check_lens = .pl_node,
.call = .pl_node,
.cmp_lt = .pl_node,
.cmp_lte = .pl_node,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 8d97000582..f2436f7679 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -355,6 +355,8 @@ const Writer = struct {
.array_type,
=> try self.writePlNodeBin(stream, inst),
+ .for_check_lens => try self.writePlNodeMultiOp(stream, inst),
+
.elem_ptr_imm => try self.writeElemPtrImm(stream, inst),
.@"export" => try self.writePlNodeExport(stream, inst),
@@ -868,6 +870,19 @@ const Writer = struct {
try self.writeSrc(stream, inst_data.src());
}
+ fn writePlNodeMultiOp(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
+ const args = self.code.refSlice(extra.end, extra.data.operands_len);
+ try stream.writeAll("{");
+ for (args) |arg, i| {
+ if (i != 0) try stream.writeAll(", ");
+ try self.writeInstRef(stream, arg);
+ }
+ try stream.writeAll("}) ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
fn writeElemPtrImm(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data;
From 841add6890d001d315591dc20f7d464c264d88bb Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 17 Feb 2023 13:44:35 -0700
Subject: [PATCH 09/36] AstGen: finish multi-object for loops
This strategy uses pointer arithmetic to iterate through the loop. This
has a problem, however, which is tuples. AstGen does not know whether a
given indexable is a tuple or can be iterated based on contiguous
memory. Tuples unlike other indexables cannot be represented as a
many-item pointer that is incremented as the loop counter.
So, after this commit, I will modify AstGen back closer to how @vexu had
it before, using a counter and array element access.
---
src/AstGen.zig | 135 +++++++++++++++++++++++-----------------------
src/Sema.zig | 37 +++++++------
src/Zir.zig | 11 +++-
src/print_zir.zig | 4 +-
4 files changed, 101 insertions(+), 86 deletions(-)
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 7d3a165d2f..523ac235ac 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -88,6 +88,7 @@ fn setExtra(astgen: *AstGen, index: usize, extra: anytype) void {
Zir.Inst.BuiltinCall.Flags => @bitCast(u32, @field(extra, field.name)),
Zir.Inst.SwitchBlock.Bits => @bitCast(u32, @field(extra, field.name)),
Zir.Inst.FuncFancy.Bits => @bitCast(u32, @field(extra, field.name)),
+ Zir.Inst.ElemPtrImm.Bits => @bitCast(u32, @field(extra, field.name)),
else => @compileError("bad field type"),
};
i += 1;
@@ -1565,7 +1566,9 @@ fn arrayInitExprRlPtrInner(
for (elements) |elem_init, i| {
const elem_ptr = try gz.addPlNode(.elem_ptr_imm, elem_init, Zir.Inst.ElemPtrImm{
.ptr = result_ptr,
- .index = @intCast(u32, i),
+ .bits = .{
+ .index = @intCast(u31, i),
+ },
});
astgen.extra.items[extra_index] = refToIndex(elem_ptr).?;
extra_index += 1;
@@ -6308,7 +6311,7 @@ fn forExpr(
const lens = try gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len);
defer gpa.free(lens);
- const counter_alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime_mut else .alloc;
+ const alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime_mut else .alloc_mut;
// Tracks the index of allocs/lens that has a length to be checked and is
// used for the end value.
@@ -6321,23 +6324,24 @@ fn forExpr(
var cond_end_val: Zir.Inst.Ref = .none;
{
- var payload = for_full.payload_token;
+ var capture_token = for_full.payload_token;
for (for_full.ast.inputs) |input, i_usize| {
const i = @intCast(u32, i_usize);
- const payload_is_ref = token_tags[payload] == .asterisk;
- const ident_tok = payload + @boolToInt(payload_is_ref);
+ const capture_is_ref = token_tags[capture_token] == .asterisk;
+ const ident_tok = capture_token + @boolToInt(capture_is_ref);
- if (mem.eql(u8, tree.tokenSlice(ident_tok), "_") and payload_is_ref) {
- return astgen.failTok(payload, "pointer modifier invalid on discard", .{});
+ if (mem.eql(u8, tree.tokenSlice(ident_tok), "_") and capture_is_ref) {
+ return astgen.failTok(capture_token, "pointer modifier invalid on discard", .{});
}
- payload = ident_tok + @as(u32, 2);
+ // Skip over the comma, and on to the next capture (or the ending pipe character).
+ capture_token = ident_tok + 2;
try emitDbgNode(parent_gz, input);
if (node_tags[input] == .for_range) {
- if (payload_is_ref) {
+ if (capture_is_ref) {
return astgen.failTok(ident_tok, "cannot capture reference to range", .{});
}
- const counter_ptr = try parent_gz.addUnNode(counter_alloc_tag, .usize_type, node);
+ const counter_ptr = try parent_gz.addUnNode(alloc_tag, .usize_type, node);
const start_node = node_data[input].lhs;
const start_val = try expr(parent_gz, scope, .{ .rl = .none }, start_node);
_ = try parent_gz.addBin(.store, counter_ptr, start_val);
@@ -6364,20 +6368,28 @@ fn forExpr(
allocs[i] = counter_ptr;
lens[i] = range_len;
} else {
- const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none };
- const indexable = try expr(parent_gz, scope, cond_ri, input);
+ const indexable = try expr(parent_gz, scope, .{ .rl = .none }, input);
+ // This instruction has nice compile errors so we put it before the other ones
+ // even though it is not needed until later in the block.
+ const ptr_len = try parent_gz.addUnNode(.indexable_ptr_len, indexable, input);
const base_ptr = try parent_gz.addPlNode(.elem_ptr_imm, input, Zir.Inst.ElemPtrImm{
.ptr = indexable,
- .index = 0,
+ .bits = .{
+ .index = 0,
+ .manyptr = true,
+ },
});
+ const alloc_ty_inst = try parent_gz.addUnNode(.typeof, base_ptr, node);
+ const alloc = try parent_gz.addUnNode(alloc_tag, alloc_ty_inst, node);
+ _ = try parent_gz.addBin(.store, alloc, base_ptr);
if (end_input_index == null) {
end_input_index = i;
assert(cond_end_val == .none);
}
- allocs[i] = base_ptr;
- lens[i] = try parent_gz.addUnNode(.indexable_ptr_len, indexable, input);
+ allocs[i] = alloc;
+ lens[i] = ptr_len;
}
}
}
@@ -6467,62 +6479,47 @@ fn forExpr(
var then_scope = parent_gz.makeSubBlock(&cond_scope.base);
defer then_scope.unstack();
- const then_sub_scope = &then_scope.base;
+ try then_scope.addDbgBlockBegin();
- // try then_scope.addDbgBlockBegin();
- // var payload_val_scope: Scope.LocalVal = undefined;
- // var index_scope: Scope.LocalPtr = undefined;
- // const then_sub_scope = blk: {
- // const payload_token = for_full.payload_token.?;
- // const ident = if (token_tags[payload_token] == .asterisk)
- // payload_token + 1
- // else
- // payload_token;
- // const is_ptr = ident != payload_token;
- // const value_name = tree.tokenSlice(ident);
- // var payload_sub_scope: *Scope = undefined;
- // if (!mem.eql(u8, value_name, "_")) {
- // const name_str_index = try astgen.identAsString(ident);
- // const tag: Zir.Inst.Tag = if (is_ptr) .elem_ptr else .elem_val;
- // const payload_inst = try then_scope.addPlNode(tag, for_full.ast.cond_expr, Zir.Inst.Bin{
- // .lhs = array_ptr,
- // .rhs = index,
- // });
- // try astgen.detectLocalShadowing(&then_scope.base, name_str_index, ident, value_name, .capture);
- // payload_val_scope = .{
- // .parent = &then_scope.base,
- // .gen_zir = &then_scope,
- // .name = name_str_index,
- // .inst = payload_inst,
- // .token_src = ident,
- // .id_cat = .capture,
- // };
- // try then_scope.addDbgVar(.dbg_var_val, name_str_index, payload_inst);
- // payload_sub_scope = &payload_val_scope.base;
- // } else if (is_ptr) {
- // } else {
- // payload_sub_scope = &then_scope.base;
- // }
+ const capture_scopes = try gpa.alloc(Scope.LocalVal, for_full.ast.inputs.len);
+ defer gpa.free(capture_scopes);
- // const index_token = if (token_tags[ident + 1] == .comma)
- // ident + 2
- // else
- // break :blk payload_sub_scope;
- // const token_bytes = tree.tokenSlice(index_token);
- // const index_name = try astgen.identAsString(index_token);
- // try astgen.detectLocalShadowing(payload_sub_scope, index_name, index_token, token_bytes, .@"loop index capture");
- // index_scope = .{
- // .parent = payload_sub_scope,
- // .gen_zir = &then_scope,
- // .name = index_name,
- // .ptr = index_ptr,
- // .token_src = index_token,
- // .maybe_comptime = is_inline,
- // .id_cat = .@"loop index capture",
- // };
- // try then_scope.addDbgVar(.dbg_var_val, index_name, index_ptr);
- // break :blk &index_scope.base;
- // };
+ const then_sub_scope = blk: {
+ var capture_token = for_full.payload_token;
+ var capture_sub_scope: *Scope = &then_scope.base;
+ for (for_full.ast.inputs) |input, i_usize| {
+ const i = @intCast(u32, i_usize);
+ const capture_is_ref = token_tags[capture_token] == .asterisk;
+ const ident_tok = capture_token + @boolToInt(capture_is_ref);
+ const capture_name = tree.tokenSlice(ident_tok);
+ // Skip over the comma, and on to the next capture (or the ending pipe character).
+ capture_token = ident_tok + 2;
+
+ if (mem.eql(u8, capture_name, "_")) continue;
+
+ const name_str_index = try astgen.identAsString(ident_tok);
+ try astgen.detectLocalShadowing(capture_sub_scope, name_str_index, ident_tok, capture_name, .capture);
+
+ const loaded = if (capture_is_ref)
+ loaded_ptrs[i]
+ else
+ try then_scope.addUnNode(.load, loaded_ptrs[i], input);
+
+ capture_scopes[i] = .{
+ .parent = capture_sub_scope,
+ .gen_zir = &then_scope,
+ .name = name_str_index,
+ .inst = loaded,
+ .token_src = ident_tok,
+ .id_cat = .capture,
+ };
+
+ try then_scope.addDbgVar(.dbg_var_val, name_str_index, loaded);
+ capture_sub_scope = &capture_scopes[i].base;
+ }
+
+ break :blk capture_sub_scope;
+ };
const then_result = try expr(&then_scope, then_sub_scope, .{ .rl = .none }, for_full.ast.then_expr);
_ = try addEnsureResult(&then_scope, then_result, for_full.ast.then_expr);
diff --git a/src/Sema.zig b/src/Sema.zig
index b5afe93511..c251aa9fbf 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -9649,7 +9649,7 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
- return sema.elemPtr(block, src, array_ptr, elem_index, src, false);
+ return sema.elemPtr(block, src, array_ptr, elem_index, src, false, .One);
}
fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9662,7 +9662,7 @@ fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
- return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false);
+ return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false, .One);
}
fn zirElemPtrImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9673,8 +9673,9 @@ fn zirElemPtrImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.ptr);
- const elem_index = try sema.addIntUnsigned(Type.usize, extra.index);
- return sema.elemPtr(block, src, array_ptr, elem_index, src, true);
+ const elem_index = try sema.addIntUnsigned(Type.usize, extra.bits.index);
+ const size: std.builtin.Type.Pointer.Size = if (extra.bits.manyptr) .Many else .One;
+ return sema.elemPtr(block, src, array_ptr, elem_index, src, true, size);
}
fn zirSliceStart(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -22905,7 +22906,7 @@ fn panicSentinelMismatch(
const actual_sentinel = if (ptr_ty.isSlice())
try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index)
else blk: {
- const elem_ptr_ty = try sema.elemPtrType(ptr_ty, null);
+ const elem_ptr_ty = try sema.elemPtrType(ptr_ty, null, .One);
const sentinel_ptr = try parent_block.addPtrElemPtr(ptr, sentinel_index, elem_ptr_ty);
break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr);
};
@@ -24072,6 +24073,7 @@ fn elemPtr(
elem_index: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
init: bool,
+ size: std.builtin.Type.Pointer.Size,
) CompileError!Air.Inst.Ref {
const indexable_ptr_src = src; // TODO better source location
const indexable_ptr_ty = sema.typeOf(indexable_ptr);
@@ -24098,13 +24100,12 @@ fn elemPtr(
const index_val = maybe_index_val orelse break :rs elem_index_src;
const index = @intCast(usize, index_val.toUnsignedInt(target));
const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, sema.mod);
- const result_ty = try sema.elemPtrType(indexable_ty, index);
- return sema.addConstant(result_ty, elem_ptr);
+ const elem_ptr_ty = try sema.elemPtrType(indexable_ty, index, size);
+ return sema.addConstant(elem_ptr_ty, elem_ptr);
};
- const result_ty = try sema.elemPtrType(indexable_ty, null);
-
+ const elem_ptr_ty = try sema.elemPtrType(indexable_ty, null, size);
try sema.requireRuntimeBlock(block, src, runtime_src);
- return block.addPtrElemPtr(indexable, elem_index, result_ty);
+ return block.addPtrElemPtr(indexable, elem_index, elem_ptr_ty);
},
.One => {
assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable
@@ -24166,7 +24167,7 @@ fn elemVal(
},
.One => {
assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable
- const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false);
+ const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, .One);
return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src);
},
},
@@ -24404,7 +24405,7 @@ fn elemPtrArray(
break :o index;
} else null;
- const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset);
+ const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset, .One);
if (maybe_undef_array_ptr_val) |array_ptr_val| {
if (array_ptr_val.isUndef()) {
@@ -24509,7 +24510,7 @@ fn elemPtrSlice(
break :o index;
} else null;
- const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset);
+ const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset, .One);
if (maybe_undef_slice_val) |slice_val| {
if (slice_val.isUndef()) {
@@ -26239,7 +26240,7 @@ fn storePtr2(
const elem_src = operand_src; // TODO better source location
const elem = try sema.tupleField(block, operand_src, uncasted_operand, elem_src, i);
const elem_index = try sema.addIntUnsigned(Type.usize, i);
- const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false);
+ const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false, .One);
try sema.storePtr2(block, src, elem_ptr, elem_src, elem, elem_src, .store);
}
return;
@@ -33276,7 +33277,12 @@ fn compareVector(
/// For []T, returns *T
/// Handles const-ness and address spaces in particular.
/// This code is duplicated in `analyzePtrArithmetic`.
-fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
+fn elemPtrType(
+ sema: *Sema,
+ ptr_ty: Type,
+ offset: ?usize,
+ size: std.builtin.Type.Pointer.Size,
+) !Type {
const ptr_info = ptr_ty.ptrInfo().data;
const elem_ty = ptr_ty.elemType2();
const allow_zero = ptr_info.@"allowzero" and (offset orelse 0) == 0;
@@ -33321,6 +33327,7 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
break :a new_align;
};
return try Type.ptr(sema.arena, sema.mod, .{
+ .size = size,
.pointee_type = elem_ty,
.mutable = ptr_info.mutable,
.@"addrspace" = ptr_info.@"addrspace",
diff --git a/src/Zir.zig b/src/Zir.zig
index de6c2f02d1..edbd70e170 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -79,6 +79,7 @@ pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, en
Inst.BuiltinCall.Flags => @bitCast(Inst.BuiltinCall.Flags, code.extra[i]),
Inst.SwitchBlock.Bits => @bitCast(Inst.SwitchBlock.Bits, code.extra[i]),
Inst.FuncFancy.Bits => @bitCast(Inst.FuncFancy.Bits, code.extra[i]),
+ Inst.ElemPtrImm.Bits => @bitCast(Inst.ElemPtrImm.Bits, code.extra[i]),
else => @compileError("bad field type"),
};
i += 1;
@@ -388,6 +389,8 @@ pub const Inst = struct {
/// as a reference to another ZIR instruction.
/// Uses the `pl_node` union field. AST node is an element inside array initialization
/// syntax. Payload is `ElemPtrImm`.
+ /// This instruction has a way to set the result type to be a
+ /// single-pointer or a many-pointer.
elem_ptr_imm,
/// Given an array, slice, or pointer, returns the element at the provided index.
/// Uses the `pl_node` union field. AST node is a[b] syntax. Payload is `Bin`.
@@ -2972,7 +2975,13 @@ pub const Inst = struct {
pub const ElemPtrImm = struct {
ptr: Ref,
- index: u32,
+ bits: Bits,
+
+ pub const Bits = packed struct(u32) {
+ index: u31,
+ /// Controls whether the type returned is `*T` or `[*]T`.
+ manyptr: bool = false,
+ };
};
/// 0. multi_cases_len: u32 // If has_multi_cases is set.
diff --git a/src/print_zir.zig b/src/print_zir.zig
index f2436f7679..0977a88d53 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -888,7 +888,9 @@ const Writer = struct {
const extra = self.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data;
try self.writeInstRef(stream, extra.ptr);
- try stream.print(", {d}) ", .{extra.index});
+ try stream.print(", {d}", .{extra.bits.index});
+ try self.writeFlag(stream, ", manyptr", extra.bits.manyptr);
+ try stream.writeAll(") ");
try self.writeSrc(stream, inst_data.src());
}
From 293d6bdc73c5fe01b07ebe3d09c9a78613fed093 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 17 Feb 2023 16:39:45 -0700
Subject: [PATCH 10/36] AstGen: back to index-based for loops
---
src/AstGen.zig | 159 +++++++++++++++++++++-------------------------
src/Sema.zig | 63 ++++++++----------
src/Zir.zig | 21 ++----
src/print_zir.zig | 6 +-
4 files changed, 106 insertions(+), 143 deletions(-)
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 523ac235ac..b90201713e 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -88,7 +88,6 @@ fn setExtra(astgen: *AstGen, index: usize, extra: anytype) void {
Zir.Inst.BuiltinCall.Flags => @bitCast(u32, @field(extra, field.name)),
Zir.Inst.SwitchBlock.Bits => @bitCast(u32, @field(extra, field.name)),
Zir.Inst.FuncFancy.Bits => @bitCast(u32, @field(extra, field.name)),
- Zir.Inst.ElemPtrImm.Bits => @bitCast(u32, @field(extra, field.name)),
else => @compileError("bad field type"),
};
i += 1;
@@ -1566,9 +1565,7 @@ fn arrayInitExprRlPtrInner(
for (elements) |elem_init, i| {
const elem_ptr = try gz.addPlNode(.elem_ptr_imm, elem_init, Zir.Inst.ElemPtrImm{
.ptr = result_ptr,
- .bits = .{
- .index = @intCast(u31, i),
- },
+ .index = @intCast(u32, i),
});
astgen.extra.items[extra_index] = refToIndex(elem_ptr).?;
extra_index += 1;
@@ -2601,6 +2598,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.field_base_ptr,
.ret_ptr,
.ret_type,
+ .for_len,
.@"try",
.try_ptr,
//.try_inline,
@@ -2669,7 +2667,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.validate_deref,
.save_err_ret_index,
.restore_err_ret_index,
- .for_check_lens,
=> break :b true,
.@"defer" => unreachable,
@@ -6305,23 +6302,26 @@ fn forExpr(
const node_data = tree.nodes.items(.data);
const gpa = astgen.gpa;
- const allocs = try gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len);
- defer gpa.free(allocs);
+ // For counters, this is the start value; for indexables, this is the base
+ // pointer that can be used with elem_ptr and similar instructions.
+ // Special value `none` means that this is a counter and its start value is
+ // zero, indicating that the main index counter can be used directly.
+ const indexables = try gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len);
+ defer gpa.free(indexables);
// elements of this array can be `none`, indicating no length check.
const lens = try gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len);
defer gpa.free(lens);
- const alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime_mut else .alloc_mut;
+ // We will use a single zero-based counter no matter how many indexables there are.
+ const index_ptr = blk: {
+ const alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime_mut else .alloc;
+ const index_ptr = try parent_gz.addUnNode(alloc_tag, .usize_type, node);
+ // initialize to zero
+ _ = try parent_gz.addBin(.store, index_ptr, .zero_usize);
+ break :blk index_ptr;
+ };
- // Tracks the index of allocs/lens that has a length to be checked and is
- // used for the end value.
- // If this is null, there are no len checks.
- var end_input_index: ?u32 = null;
- // This is a value to use to find out if the for loop has reached the end
- // yet. It prefers to use a counter since the end value is provided directly,
- // and otherwise falls back to adding ptr+len of a slice to compute end.
- // Corresponds to end_input_index and will be .none in case that value is null.
- var cond_end_val: Zir.Inst.Ref = .none;
+ var any_len_checks = false;
{
var capture_token = for_full.payload_token;
@@ -6341,10 +6341,8 @@ fn forExpr(
if (capture_is_ref) {
return astgen.failTok(ident_tok, "cannot capture reference to range", .{});
}
- const counter_ptr = try parent_gz.addUnNode(alloc_tag, .usize_type, node);
const start_node = node_data[input].lhs;
const start_val = try expr(parent_gz, scope, .{ .rl = .none }, start_node);
- _ = try parent_gz.addBin(.store, counter_ptr, start_val);
const end_node = node_data[input].rhs;
const end_val = if (end_node != 0)
@@ -6352,7 +6350,8 @@ fn forExpr(
else
.none;
- const range_len = if (end_val == .none or nodeIsTriviallyZero(tree, start_node))
+ const start_is_zero = nodeIsTriviallyZero(tree, start_node);
+ const range_len = if (end_val == .none or start_is_zero)
end_val
else
try parent_gz.addPlNode(.sub, input, Zir.Inst.Bin{
@@ -6360,61 +6359,33 @@ fn forExpr(
.rhs = start_val,
});
- if (range_len != .none and cond_end_val == .none) {
- end_input_index = i;
- cond_end_val = end_val;
- }
-
- allocs[i] = counter_ptr;
+ any_len_checks = any_len_checks or range_len != .none;
+ indexables[i] = if (start_is_zero) .none else start_val;
lens[i] = range_len;
} else {
const indexable = try expr(parent_gz, scope, .{ .rl = .none }, input);
- // This instruction has nice compile errors so we put it before the other ones
- // even though it is not needed until later in the block.
- const ptr_len = try parent_gz.addUnNode(.indexable_ptr_len, indexable, input);
- const base_ptr = try parent_gz.addPlNode(.elem_ptr_imm, input, Zir.Inst.ElemPtrImm{
- .ptr = indexable,
- .bits = .{
- .index = 0,
- .manyptr = true,
- },
- });
- const alloc_ty_inst = try parent_gz.addUnNode(.typeof, base_ptr, node);
- const alloc = try parent_gz.addUnNode(alloc_tag, alloc_ty_inst, node);
- _ = try parent_gz.addBin(.store, alloc, base_ptr);
+ const indexable_len = try parent_gz.addUnNode(.indexable_ptr_len, indexable, input);
- if (end_input_index == null) {
- end_input_index = i;
- assert(cond_end_val == .none);
- }
-
- allocs[i] = alloc;
- lens[i] = ptr_len;
+ any_len_checks = true;
+ indexables[i] = indexable;
+ lens[i] = indexable_len;
}
}
}
- // In case there are no counters which already have an end computed, we
- // compute an end from base pointer plus length.
- if (end_input_index) |i| {
- if (cond_end_val == .none) {
- cond_end_val = try parent_gz.addPlNode(.add, for_full.ast.inputs[i], Zir.Inst.Bin{
- .lhs = allocs[i],
- .rhs = lens[i],
- });
- }
- }
-
// We use a dedicated ZIR instruction to assert the lengths to assist with
// nicer error reporting as well as fewer ZIR bytes emitted.
- if (end_input_index != null) {
+ const len: Zir.Inst.Ref = len: {
+ if (!any_len_checks) break :len .none;
+
const lens_len = @intCast(u32, lens.len);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.MultiOp).Struct.fields.len + lens_len);
- _ = try parent_gz.addPlNode(.for_check_lens, node, Zir.Inst.MultiOp{
+ const len = try parent_gz.addPlNode(.for_len, node, Zir.Inst.MultiOp{
.operands_len = lens_len,
});
appendRefsAssumeCapacity(astgen, lens);
- }
+ break :len len;
+ };
const loop_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .loop;
const loop_block = try parent_gz.makeBlockInst(loop_tag, node);
@@ -6429,22 +6400,14 @@ fn forExpr(
var cond_scope = parent_gz.makeSubBlock(&loop_scope.base);
defer cond_scope.unstack();
- // Load all the iterables.
- const loaded_ptrs = try gpa.alloc(Zir.Inst.Ref, allocs.len);
- defer gpa.free(loaded_ptrs);
- for (allocs) |alloc, i| {
- loaded_ptrs[i] = try cond_scope.addUnNode(.load, alloc, for_full.ast.inputs[i]);
- }
-
// Check the condition.
- const input_index = end_input_index orelse {
+ if (!any_len_checks) {
return astgen.failNode(node, "TODO: handle infinite for loop", .{});
- };
- assert(cond_end_val != .none);
-
- const cond = try cond_scope.addPlNode(.cmp_neq, for_full.ast.inputs[input_index], Zir.Inst.Bin{
- .lhs = loaded_ptrs[input_index],
- .rhs = cond_end_val,
+ }
+ const index = try cond_scope.addUnNode(.load, index_ptr, node);
+ const cond = try cond_scope.addPlNode(.cmp_lt, node, Zir.Inst.Bin{
+ .lhs = index,
+ .rhs = len,
});
const condbr_tag: Zir.Inst.Tag = if (is_inline) .condbr_inline else .condbr;
@@ -6455,14 +6418,12 @@ fn forExpr(
// cond_block unstacked now, can add new instructions to loop_scope
try loop_scope.instructions.append(gpa, cond_block);
- // Increment the loop variables.
- for (allocs) |alloc, i| {
- const incremented = try loop_scope.addPlNode(.add, node, Zir.Inst.Bin{
- .lhs = loaded_ptrs[i],
- .rhs = .one_usize,
- });
- _ = try loop_scope.addBin(.store, alloc, incremented);
- }
+ // Increment the index variable.
+ const index_plus_one = try loop_scope.addPlNode(.add, node, Zir.Inst.Bin{
+ .lhs = index,
+ .rhs = .one_usize,
+ });
+ _ = try loop_scope.addBin(.store, index_ptr, index_plus_one);
const repeat_tag: Zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat;
_ = try loop_scope.addNode(repeat_tag, node);
@@ -6500,21 +6461,43 @@ fn forExpr(
const name_str_index = try astgen.identAsString(ident_tok);
try astgen.detectLocalShadowing(capture_sub_scope, name_str_index, ident_tok, capture_name, .capture);
- const loaded = if (capture_is_ref)
- loaded_ptrs[i]
- else
- try then_scope.addUnNode(.load, loaded_ptrs[i], input);
+ const capture_inst = inst: {
+ const is_counter = node_tags[input] == .for_range;
+
+ if (indexables[i] == .none) {
+ // Special case: the main index can be used directly.
+ assert(is_counter);
+ assert(!capture_is_ref);
+ break :inst index;
+ }
+
+ // For counters, we add the index variable to the start value; for
+ // indexables, we use it as an element index. This is so similar
+ // that they can share the same code paths, branching only on the
+ // ZIR tag.
+ const switch_cond = (@as(u2, @boolToInt(capture_is_ref)) << 1) | @boolToInt(is_counter);
+ const tag: Zir.Inst.Tag = switch (switch_cond) {
+ 0b00 => .elem_val,
+ 0b01 => .add,
+ 0b10 => .elem_ptr,
+ 0b11 => unreachable, // compile error emitted already
+ };
+ break :inst try then_scope.addPlNode(tag, input, Zir.Inst.Bin{
+ .lhs = indexables[i],
+ .rhs = index,
+ });
+ };
capture_scopes[i] = .{
.parent = capture_sub_scope,
.gen_zir = &then_scope,
.name = name_str_index,
- .inst = loaded,
+ .inst = capture_inst,
.token_src = ident_tok,
.id_cat = .capture,
};
- try then_scope.addDbgVar(.dbg_var_val, name_str_index, loaded);
+ try then_scope.addDbgVar(.dbg_var_val, name_str_index, capture_inst);
capture_sub_scope = &capture_scopes[i].base;
}
diff --git a/src/Sema.zig b/src/Sema.zig
index c251aa9fbf..cb40a85364 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1035,6 +1035,7 @@ fn analyzeBodyInner(
.@"await" => try sema.zirAwait(block, inst),
.array_base_ptr => try sema.zirArrayBasePtr(block, inst),
.field_base_ptr => try sema.zirFieldBasePtr(block, inst),
+ .for_len => try sema.zirForLen(block, inst),
.clz => try sema.zirBitCount(block, inst, .clz, Value.clz),
.ctz => try sema.zirBitCount(block, inst, .ctz, Value.ctz),
@@ -1386,11 +1387,6 @@ fn analyzeBodyInner(
i += 1;
continue;
},
- .for_check_lens => {
- try sema.zirForCheckLens(block, inst);
- i += 1;
- continue;
- },
// Special case instructions to handle comptime control flow.
.@"break" => {
@@ -3924,6 +3920,16 @@ fn zirFieldBasePtr(
return sema.failWithStructInitNotSupported(block, src, sema.typeOf(start_ptr).childType());
}
+fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
+ const args = sema.code.refSlice(extra.end, extra.data.operands_len);
+ const src = inst_data.src();
+
+ _ = args;
+ return sema.fail(block, src, "TODO implement zirForCheckLens", .{});
+}
+
fn validateArrayInitTy(
sema: *Sema,
block: *Block,
@@ -9649,7 +9655,7 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
- return sema.elemPtr(block, src, array_ptr, elem_index, src, false, .One);
+ return sema.elemPtr(block, src, array_ptr, elem_index, src, false);
}
fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9662,7 +9668,7 @@ fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
- return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false, .One);
+ return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false);
}
fn zirElemPtrImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9673,9 +9679,8 @@ fn zirElemPtrImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.ptr);
- const elem_index = try sema.addIntUnsigned(Type.usize, extra.bits.index);
- const size: std.builtin.Type.Pointer.Size = if (extra.bits.manyptr) .Many else .One;
- return sema.elemPtr(block, src, array_ptr, elem_index, src, true, size);
+ const elem_index = try sema.addIntUnsigned(Type.usize, extra.index);
+ return sema.elemPtr(block, src, array_ptr, elem_index, src, true);
}
fn zirSliceStart(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -17102,16 +17107,6 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index)
return sema.popErrorReturnTrace(start_block, src, operand, saved_index);
}
-fn zirForCheckLens(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
- const args = sema.code.refSlice(extra.end, extra.data.operands_len);
- const src = inst_data.src();
-
- _ = args;
- return sema.fail(block, src, "TODO implement zirForCheckLens", .{});
-}
-
fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void {
assert(sema.fn_ret_ty.zigTypeTag() == .ErrorUnion);
@@ -22906,7 +22901,7 @@ fn panicSentinelMismatch(
const actual_sentinel = if (ptr_ty.isSlice())
try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index)
else blk: {
- const elem_ptr_ty = try sema.elemPtrType(ptr_ty, null, .One);
+ const elem_ptr_ty = try sema.elemPtrType(ptr_ty, null);
const sentinel_ptr = try parent_block.addPtrElemPtr(ptr, sentinel_index, elem_ptr_ty);
break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr);
};
@@ -24073,7 +24068,6 @@ fn elemPtr(
elem_index: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
init: bool,
- size: std.builtin.Type.Pointer.Size,
) CompileError!Air.Inst.Ref {
const indexable_ptr_src = src; // TODO better source location
const indexable_ptr_ty = sema.typeOf(indexable_ptr);
@@ -24100,12 +24094,13 @@ fn elemPtr(
const index_val = maybe_index_val orelse break :rs elem_index_src;
const index = @intCast(usize, index_val.toUnsignedInt(target));
const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, sema.mod);
- const elem_ptr_ty = try sema.elemPtrType(indexable_ty, index, size);
- return sema.addConstant(elem_ptr_ty, elem_ptr);
+ const result_ty = try sema.elemPtrType(indexable_ty, index);
+ return sema.addConstant(result_ty, elem_ptr);
};
- const elem_ptr_ty = try sema.elemPtrType(indexable_ty, null, size);
+ const result_ty = try sema.elemPtrType(indexable_ty, null);
+
try sema.requireRuntimeBlock(block, src, runtime_src);
- return block.addPtrElemPtr(indexable, elem_index, elem_ptr_ty);
+ return block.addPtrElemPtr(indexable, elem_index, result_ty);
},
.One => {
assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable
@@ -24167,7 +24162,7 @@ fn elemVal(
},
.One => {
assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable
- const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, .One);
+ const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false);
return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src);
},
},
@@ -24405,7 +24400,7 @@ fn elemPtrArray(
break :o index;
} else null;
- const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset, .One);
+ const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset);
if (maybe_undef_array_ptr_val) |array_ptr_val| {
if (array_ptr_val.isUndef()) {
@@ -24510,7 +24505,7 @@ fn elemPtrSlice(
break :o index;
} else null;
- const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset, .One);
+ const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset);
if (maybe_undef_slice_val) |slice_val| {
if (slice_val.isUndef()) {
@@ -26240,7 +26235,7 @@ fn storePtr2(
const elem_src = operand_src; // TODO better source location
const elem = try sema.tupleField(block, operand_src, uncasted_operand, elem_src, i);
const elem_index = try sema.addIntUnsigned(Type.usize, i);
- const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false, .One);
+ const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false);
try sema.storePtr2(block, src, elem_ptr, elem_src, elem, elem_src, .store);
}
return;
@@ -33277,12 +33272,7 @@ fn compareVector(
/// For []T, returns *T
/// Handles const-ness and address spaces in particular.
/// This code is duplicated in `analyzePtrArithmetic`.
-fn elemPtrType(
- sema: *Sema,
- ptr_ty: Type,
- offset: ?usize,
- size: std.builtin.Type.Pointer.Size,
-) !Type {
+fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
const ptr_info = ptr_ty.ptrInfo().data;
const elem_ty = ptr_ty.elemType2();
const allow_zero = ptr_info.@"allowzero" and (offset orelse 0) == 0;
@@ -33327,7 +33317,6 @@ fn elemPtrType(
break :a new_align;
};
return try Type.ptr(sema.arena, sema.mod, .{
- .size = size,
.pointee_type = elem_ty,
.mutable = ptr_info.mutable,
.@"addrspace" = ptr_info.@"addrspace",
diff --git a/src/Zir.zig b/src/Zir.zig
index edbd70e170..e215dfac10 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -79,7 +79,6 @@ pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, en
Inst.BuiltinCall.Flags => @bitCast(Inst.BuiltinCall.Flags, code.extra[i]),
Inst.SwitchBlock.Bits => @bitCast(Inst.SwitchBlock.Bits, code.extra[i]),
Inst.FuncFancy.Bits => @bitCast(Inst.FuncFancy.Bits, code.extra[i]),
- Inst.ElemPtrImm.Bits => @bitCast(Inst.ElemPtrImm.Bits, code.extra[i]),
else => @compileError("bad field type"),
};
i += 1;
@@ -501,14 +500,14 @@ pub const Inst = struct {
/// Uses the `node` field.
repeat_inline,
/// Asserts that all the lengths provided match. Used to build a for loop.
- /// Return value is always void.
+ /// Return value is the length as a usize.
/// Uses the `pl_node` field with payload `MultiOp`.
/// There is exactly one item corresponding to each AST node inside the for
- /// loop condition. Each item may be `none`, indicating an unbounded range.
+ /// loop condition. Any item may be `none`, indicating an unbounded range.
/// Illegal behaviors:
/// * If all lengths are unbounded ranges (always a compile error).
/// * If any two lengths do not match each other.
- for_check_lens,
+ for_len,
/// Merge two error sets into one, `E1 || E2`.
/// Uses the `pl_node` field with payload `Bin`.
merge_error_sets,
@@ -1254,7 +1253,7 @@ pub const Inst = struct {
.defer_err_code,
.save_err_ret_index,
.restore_err_ret_index,
- .for_check_lens,
+ .for_len,
=> false,
.@"break",
@@ -1322,7 +1321,6 @@ pub const Inst = struct {
.memcpy,
.memset,
.check_comptime_control_flow,
- .for_check_lens,
.@"defer",
.defer_err_code,
.restore_err_ret_index,
@@ -1547,6 +1545,7 @@ pub const Inst = struct {
.repeat_inline,
.panic,
.panic_comptime,
+ .for_len,
.@"try",
.try_ptr,
//.try_inline,
@@ -1602,7 +1601,7 @@ pub const Inst = struct {
.@"break" = .@"break",
.break_inline = .@"break",
.check_comptime_control_flow = .un_node,
- .for_check_lens = .pl_node,
+ .for_len = .pl_node,
.call = .pl_node,
.cmp_lt = .pl_node,
.cmp_lte = .pl_node,
@@ -2975,13 +2974,7 @@ pub const Inst = struct {
pub const ElemPtrImm = struct {
ptr: Ref,
- bits: Bits,
-
- pub const Bits = packed struct(u32) {
- index: u31,
- /// Controls whether the type returned is `*T` or `[*]T`.
- manyptr: bool = false,
- };
+ index: u32,
};
/// 0. multi_cases_len: u32 // If has_multi_cases is set.
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 0977a88d53..e5ce9321f5 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -355,7 +355,7 @@ const Writer = struct {
.array_type,
=> try self.writePlNodeBin(stream, inst),
- .for_check_lens => try self.writePlNodeMultiOp(stream, inst),
+ .for_len => try self.writePlNodeMultiOp(stream, inst),
.elem_ptr_imm => try self.writeElemPtrImm(stream, inst),
@@ -888,9 +888,7 @@ const Writer = struct {
const extra = self.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data;
try self.writeInstRef(stream, extra.ptr);
- try stream.print(", {d}", .{extra.bits.index});
- try self.writeFlag(stream, ", manyptr", extra.bits.manyptr);
- try stream.writeAll(") ");
+ try stream.print(", {d}) ", .{extra.index});
try self.writeSrc(stream, inst_data.src());
}
From 5029e5364caccac07f2296c1f30f2f238f13864d Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 17 Feb 2023 17:14:28 -0700
Subject: [PATCH 11/36] make zig fmt perform upgrade to new for loop syntax
The intent here is to revert this commit after Zig 0.10.0 is released.
---
lib/std/zig/Ast.zig | 12 ++++++++++++
lib/std/zig/Parse.zig | 5 ++++-
lib/std/zig/render.zig | 11 +++++++++++
src/AstGen.zig | 17 +++++++++++++++++
4 files changed, 44 insertions(+), 1 deletion(-)
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index 42eb280966..3784f06160 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -2525,6 +2525,18 @@ pub const full = struct {
then_expr: Node.Index,
else_expr: Node.Index,
};
+
+ /// TODO: remove this after zig 0.11.0 is tagged.
+ pub fn isOldSyntax(f: For, token_tags: []const Token.Tag) bool {
+ if (f.ast.inputs.len != 1) return false;
+ if (token_tags[f.payload_token + 1] == .comma) return true;
+ if (token_tags[f.payload_token] == .asterisk and
+ token_tags[f.payload_token + 2] == .comma)
+ {
+ return true;
+ }
+ return false;
+ }
};
pub const ContainerField = struct {
diff --git a/lib/std/zig/Parse.zig b/lib/std/zig/Parse.zig
index 7ef884d22c..258e3b0368 100644
--- a/lib/std/zig/Parse.zig
+++ b/lib/std/zig/Parse.zig
@@ -2143,7 +2143,10 @@ fn forPrefix(p: *Parse) Error!usize {
_ = p.eatToken(.asterisk);
const identifier = try p.expectToken(.identifier);
captures += 1;
- if (captures > inputs and !warned_excess) {
+ if (!warned_excess and inputs == 1 and captures == 2) {
+ // TODO remove the above condition after 0.11.0 release. this silences
+ // the error so that zig fmt can fix it.
+ } else if (captures > inputs and !warned_excess) {
try p.warnMsg(.{ .tag = .extra_for_capture, .token = identifier });
warned_excess = true;
}
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index 0e8d3125ac..ea3748a9bd 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -1211,6 +1211,17 @@ fn renderFor(gpa: Allocator, ais: *Ais, tree: Ast, for_node: Ast.full.For, space
const lparen = for_node.ast.for_token + 1;
try renderParamList(gpa, ais, tree, lparen, for_node.ast.inputs, .space);
+ // TODO remove this after zig 0.11.0
+ if (for_node.isOldSyntax(token_tags)) {
+ // old: for (a) |b, c| {}
+ // new: for (a, 0..) |b, c| {}
+ const array_list = ais.underlying_writer.context; // abstractions? who needs 'em!
+ if (mem.endsWith(u8, array_list.items, ") ")) {
+ array_list.items.len -= 2;
+ try array_list.appendSlice(", 0..) ");
+ }
+ }
+
var cur = for_node.payload_token;
const pipe = std.mem.indexOfScalarPos(std.zig.Token.Tag, token_tags, cur, .pipe).?;
if (token_tags[pipe - 1] == .comma) {
diff --git a/src/AstGen.zig b/src/AstGen.zig
index b90201713e..c39dcfc9ef 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -6302,6 +6302,23 @@ fn forExpr(
const node_data = tree.nodes.items(.data);
const gpa = astgen.gpa;
+ // TODO this can be deleted after zig 0.11.0 is released because it
+ // will be caught in the parser.
+ if (for_full.isOldSyntax(token_tags)) {
+ return astgen.failTokNotes(
+ for_full.payload_token + 2,
+ "extra capture in for loop",
+ .{},
+ &[_]u32{
+ try astgen.errNoteTok(
+ for_full.payload_token + 2,
+ "run 'zig fmt' to upgrade your code automatically",
+ .{},
+ ),
+ },
+ );
+ }
+
// For counters, this is the start value; for indexables, this is the base
// pointer that can be used with elem_ptr and similar instructions.
// Special value `none` means that this is a counter and its start value is
From 321ccbdc525ab0f5862e42378b962c10ec54e4a1 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 17 Feb 2023 19:54:26 -0700
Subject: [PATCH 12/36] Sema: implement for_len
This also makes another breaking change to for loops: in order to
capture a pointer of an element, one must take the address of array
values. This simplifies a lot of things, and makes more sense than how
it was before semantically.
It is still legal to use a for loop on an array value if the
corresponding element capture is byval instead of byref.
---
lib/std/builtin.zig | 1 +
src/AstGen.zig | 3 +-
src/Sema.zig | 157 ++++++++++++++++++++++++++++++++++----------
src/type.zig | 13 ++++
4 files changed, 136 insertions(+), 38 deletions(-)
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 74c61d229b..869756fe5c 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -975,6 +975,7 @@ pub const panic_messages = struct {
pub const unwrap_error = "attempt to unwrap error";
pub const index_out_of_bounds = "index out of bounds";
pub const start_index_greater_than_end = "start index is larger than end index";
+ pub const for_len_mismatch = "for loop over objects with non-equal lengths";
};
pub noinline fn returnError(st: *StackTrace) void {
diff --git a/src/AstGen.zig b/src/AstGen.zig
index c39dcfc9ef..98883b0f8d 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -6381,11 +6381,10 @@ fn forExpr(
lens[i] = range_len;
} else {
const indexable = try expr(parent_gz, scope, .{ .rl = .none }, input);
- const indexable_len = try parent_gz.addUnNode(.indexable_ptr_len, indexable, input);
any_len_checks = true;
indexables[i] = indexable;
- lens[i] = indexable_len;
+ lens[i] = indexable;
}
}
}
diff --git a/src/Sema.zig b/src/Sema.zig
index cb40a85364..aeb4e25a24 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -3378,26 +3378,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
else
object_ty;
- if (!array_ty.isIndexable()) {
- const msg = msg: {
- const msg = try sema.errMsg(
- block,
- src,
- "type '{}' does not support indexing",
- .{array_ty.fmt(sema.mod)},
- );
- errdefer msg.destroy(sema.gpa);
- try sema.errNote(
- block,
- src,
- msg,
- "for loop operand must be an array, slice, tuple, or vector",
- .{},
- );
- break :msg msg;
- };
- return sema.failWithOwnedErrorMsg(msg);
- }
+ try checkIndexable(sema, block, src, array_ty);
return sema.fieldVal(block, src, object, "len", src);
}
@@ -3921,13 +3902,70 @@ fn zirFieldBasePtr(
}
fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
const args = sema.code.refSlice(extra.end, extra.data.operands_len);
const src = inst_data.src();
- _ = args;
- return sema.fail(block, src, "TODO implement zirForCheckLens", .{});
+ var len: Air.Inst.Ref = .none;
+ var len_val: ?Value = null;
+ var len_idx: usize = undefined;
+ var any_runtime = false;
+
+ const runtime_arg_lens = try gpa.alloc(Air.Inst.Ref, args.len);
+ defer gpa.free(runtime_arg_lens);
+
+ // First pass to look for comptime values.
+ for (args) |zir_arg, i| {
+ runtime_arg_lens[i] = .none;
+ if (zir_arg == .none) continue;
+ const object = try sema.resolveInst(zir_arg);
+ const object_ty = sema.typeOf(object);
+ // Each arg could be an indexable, or a range, in which case the length
+ // is passed directly as an integer.
+ const arg_len = if (object_ty.zigTypeTag() == .Int) object else l: {
+ try checkIndexable(sema, block, src, object_ty);
+ if (!object_ty.indexableHasLen()) continue;
+
+ break :l try sema.fieldVal(block, src, object, "len", src);
+ };
+ if (len == .none) {
+ len = arg_len;
+ len_idx = i;
+ }
+ if (try sema.resolveDefinedValue(block, src, arg_len)) |arg_val| {
+ if (len_val) |v| {
+ if (!(try sema.valuesEqual(arg_val, v, Type.usize))) {
+ // TODO error notes for each arg stating the differing values
+ return sema.fail(block, src, "non-matching for loop lengths", .{});
+ }
+ } else {
+ len = arg_len;
+ len_val = arg_val;
+ len_idx = i;
+ }
+ continue;
+ }
+ runtime_arg_lens[i] = arg_len;
+ any_runtime = true;
+ }
+
+ if (len == .none) {
+ return sema.fail(block, src, "non-obvious infinite loop", .{});
+ }
+
+ // Now for the runtime checks.
+ if (any_runtime and block.wantSafety()) {
+ for (runtime_arg_lens) |arg_len, i| {
+ if (arg_len == .none) continue;
+ if (i == len_idx) continue;
+ const ok = try block.addBinOp(.cmp_eq, len, arg_len);
+ try sema.addSafetyCheck(block, ok, .for_len_mismatch);
+ }
+ }
+
+ return len;
}
fn validateArrayInitTy(
@@ -9655,7 +9693,7 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
- return sema.elemPtr(block, src, array_ptr, elem_index, src, false);
+ return sema.elemPtrOneLayerOnly(block, src, array_ptr, elem_index, src, false);
}
fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -22687,6 +22725,7 @@ pub const PanicId = enum {
unwrap_error,
index_out_of_bounds,
start_index_greater_than_end,
+ for_len_mismatch,
};
fn addSafetyCheck(
@@ -24076,21 +24115,46 @@ fn elemPtr(
.Pointer => indexable_ptr_ty.elemType(),
else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}),
};
+ switch (indexable_ty.zigTypeTag()) {
+ .Array, .Vector => return sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init),
+ .Struct => {
+ // Tuple field access.
+ const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known");
+ const index = @intCast(u32, index_val.toUnsignedInt(target));
+ return sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init);
+ },
+ else => {
+ const indexable = try sema.analyzeLoad(block, indexable_ptr_src, indexable_ptr, indexable_ptr_src);
+ return elemPtrOneLayerOnly(sema, block, src, indexable, elem_index, elem_index_src, init);
+ },
+ }
+}
+
+fn elemPtrOneLayerOnly(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ indexable: Air.Inst.Ref,
+ elem_index: Air.Inst.Ref,
+ elem_index_src: LazySrcLoc,
+ init: bool,
+) CompileError!Air.Inst.Ref {
+ const indexable_src = src; // TODO better source location
+ const indexable_ty = sema.typeOf(indexable);
if (!indexable_ty.isIndexable()) {
return sema.fail(block, src, "element access of non-indexable type '{}'", .{indexable_ty.fmt(sema.mod)});
}
+ const target = sema.mod.getTarget();
switch (indexable_ty.zigTypeTag()) {
.Pointer => {
- // In all below cases, we have to deref the ptr operand to get the actual indexable pointer.
- const indexable = try sema.analyzeLoad(block, indexable_ptr_src, indexable_ptr, indexable_ptr_src);
switch (indexable_ty.ptrSize()) {
- .Slice => return sema.elemPtrSlice(block, src, indexable_ptr_src, indexable, elem_index_src, elem_index),
+ .Slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index),
.Many, .C => {
- const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_ptr_src, indexable);
+ const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
const runtime_src = rs: {
- const ptr_val = maybe_ptr_val orelse break :rs indexable_ptr_src;
+ const ptr_val = maybe_ptr_val orelse break :rs indexable_src;
const index_val = maybe_index_val orelse break :rs elem_index_src;
const index = @intCast(usize, index_val.toUnsignedInt(target));
const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, sema.mod);
@@ -24104,18 +24168,16 @@ fn elemPtr(
},
.One => {
assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable
- return sema.elemPtrArray(block, src, indexable_ptr_src, indexable, elem_index_src, elem_index, init);
+ return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init);
},
}
},
- .Array, .Vector => return sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init),
- .Struct => {
- // Tuple field access.
- const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known");
- const index = @intCast(u32, index_val.toUnsignedInt(target));
- return sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init);
+ else => {
+ // TODO add note pointing at corresponding for loop input and suggest using '&'
+ return sema.fail(block, indexable_src, "pointer capture of non pointer type '{}'", .{
+ indexable_ty.fmt(sema.mod),
+ });
},
- else => unreachable,
}
}
@@ -30202,6 +30264,29 @@ fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_
}
}
+fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, array_ty: Type) !void {
+ if (!array_ty.isIndexable()) {
+ const msg = msg: {
+ const msg = try sema.errMsg(
+ block,
+ src,
+ "type '{}' does not support indexing",
+ .{array_ty.fmt(sema.mod)},
+ );
+ errdefer msg.destroy(sema.gpa);
+ try sema.errNote(
+ block,
+ src,
+ msg,
+ "for loop operand must be an array, slice, tuple, or vector",
+ .{},
+ );
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+}
+
fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
const resolved_ty = try sema.resolveTypeFields(ty);
const union_obj = resolved_ty.cast(Type.Payload.Union).?.data;
diff --git a/src/type.zig b/src/type.zig
index a13e30cb4c..6226a7f2f7 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -5326,6 +5326,19 @@ pub const Type = extern union {
};
}
+ pub fn indexableHasLen(ty: Type) bool {
+ return switch (ty.zigTypeTag()) {
+ .Array, .Vector => true,
+ .Pointer => switch (ty.ptrSize()) {
+ .Many, .C => false,
+ .Slice => true,
+ .One => ty.elemType().zigTypeTag() == .Array,
+ },
+ .Struct => ty.isTuple(),
+ else => false,
+ };
+ }
+
/// Returns null if the type has no namespace.
pub fn getNamespace(self: Type) ?*Module.Namespace {
return switch (self.tag()) {
From f0530385b57218ef323747bdb7438330a07d25cc Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 17 Feb 2023 20:23:33 -0700
Subject: [PATCH 13/36] update existing behavior tests and std lib to new for
loop semantics
---
lib/std/crypto/blake3.zig | 2 +-
test/behavior/array.zig | 2 +-
test/behavior/bit_shifting.zig | 4 ++--
test/behavior/bugs/1607.zig | 2 +-
test/behavior/bugs/920.zig | 6 +++---
test/behavior/call.zig | 2 +-
test/behavior/const_slice_child.zig | 4 ++--
test/behavior/eval.zig | 6 +++---
test/behavior/fn.zig | 2 +-
test/behavior/for.zig | 20 ++++++++++----------
test/behavior/generics.zig | 2 +-
test/behavior/lower_strlit_to_vector.zig | 2 +-
test/behavior/math.zig | 2 +-
test/behavior/slice.zig | 4 ++--
test/behavior/tuple.zig | 2 +-
test/behavior/vector.zig | 22 +++++++++++-----------
test/behavior/void.zig | 2 +-
test/standalone/brace_expansion/main.zig | 4 ++--
test/tests.zig | 2 +-
19 files changed, 46 insertions(+), 46 deletions(-)
diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig
index 4f8d023532..0334abfdb0 100644
--- a/lib/std/crypto/blake3.zig
+++ b/lib/std/crypto/blake3.zig
@@ -211,7 +211,7 @@ fn first8Words(words: [16]u32) [8]u32 {
fn wordsFromLittleEndianBytes(comptime count: usize, bytes: [count * 4]u8) [count]u32 {
var words: [count]u32 = undefined;
- for (words) |*word, i| {
+ for (&words) |*word, i| {
word.* = mem.readIntSliceLittle(u32, bytes[4 * i ..]);
}
return words;
diff --git a/test/behavior/array.zig b/test/behavior/array.zig
index 012e078531..a5ecd6f115 100644
--- a/test/behavior/array.zig
+++ b/test/behavior/array.zig
@@ -185,7 +185,7 @@ test "nested arrays of strings" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const array_of_strings = [_][]const u8{ "hello", "this", "is", "my", "thing" };
- for (array_of_strings) |s, i| {
+ for (array_of_strings, 0..) |s, i| {
if (i == 0) try expect(mem.eql(u8, s, "hello"));
if (i == 1) try expect(mem.eql(u8, s, "this"));
if (i == 2) try expect(mem.eql(u8, s, "is"));
diff --git a/test/behavior/bit_shifting.zig b/test/behavior/bit_shifting.zig
index 9289badb1f..97186eb54a 100644
--- a/test/behavior/bit_shifting.zig
+++ b/test/behavior/bit_shifting.zig
@@ -84,14 +84,14 @@ fn testShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, c
var table = Table.create();
var node_buffer: [node_count]Table.Node = undefined;
- for (node_buffer) |*node, i| {
+ for (&node_buffer, 0..) |*node, i| {
const key = @intCast(Key, i);
try expect(table.get(key) == null);
node.init(key, {});
table.put(node);
}
- for (node_buffer) |*node, i| {
+ for (&node_buffer, 0..) |*node, i| {
try expect(table.get(@intCast(Key, i)) == node);
}
}
diff --git a/test/behavior/bugs/1607.zig b/test/behavior/bugs/1607.zig
index f857e19c04..d9e97e37b7 100644
--- a/test/behavior/bugs/1607.zig
+++ b/test/behavior/bugs/1607.zig
@@ -5,7 +5,7 @@ const builtin = @import("builtin");
const a = [_]u8{ 1, 2, 3 };
fn checkAddress(s: []const u8) !void {
- for (s) |*i, j| {
+ for (s, 0..) |*i, j| {
try testing.expect(i == &a[j]);
}
}
diff --git a/test/behavior/bugs/920.zig b/test/behavior/bugs/920.zig
index cda02c4670..b1c9bc07a3 100644
--- a/test/behavior/bugs/920.zig
+++ b/test/behavior/bugs/920.zig
@@ -23,13 +23,13 @@ fn ZigTableGen(comptime is_symmetric: bool, comptime r: f64, comptime v: f64, co
tables.x[0] = v / f(r);
tables.x[1] = r;
- for (tables.x[2..256]) |*entry, i| {
+ for (tables.x[2..256], 0..) |*entry, i| {
const last = tables.x[2 + i - 1];
entry.* = f_inv(v / last + f(last));
}
tables.x[256] = 0;
- for (tables.f[0..]) |*entry, i| {
+ for (tables.f[0..], 0..) |*entry, i| {
entry.* = f(tables.x[i]);
}
@@ -67,7 +67,7 @@ test "bug 920 fixed" {
break :blk ZigTableGen(true, norm_r, norm_v, norm_f, norm_f_inv, norm_zero_case);
};
- for (NormalDist1.f) |_, i| {
+ for (NormalDist1.f, 0..) |_, i| {
// Here we use `expectApproxEqAbs` instead of `expectEqual` to account for the small
// differences in math functions of different libcs. For example, if the compiler
// links against glibc, but the target is musl libc, then these values might be
diff --git a/test/behavior/call.zig b/test/behavior/call.zig
index 4cdd54584f..9622aa3144 100644
--- a/test/behavior/call.zig
+++ b/test/behavior/call.zig
@@ -364,7 +364,7 @@ test "Enum constructed by @Type passed as generic argument" {
try expect(@enumToInt(a) == b);
}
};
- inline for (@typeInfo(S.E).Enum.fields) |_, i| {
+ inline for (@typeInfo(S.E).Enum.fields, 0..) |_, i| {
try S.foo(@intToEnum(S.E, i), i);
}
}
diff --git a/test/behavior/const_slice_child.zig b/test/behavior/const_slice_child.zig
index c8711777b3..09c6a7233d 100644
--- a/test/behavior/const_slice_child.zig
+++ b/test/behavior/const_slice_child.zig
@@ -26,7 +26,7 @@ fn foo(args: [][]const u8) !void {
fn bar(argc: usize) !void {
var args_buffer: [10][]const u8 = undefined;
const args = args_buffer[0..argc];
- for (args) |_, i| {
+ for (args, 0..) |_, i| {
const ptr = argv[i];
args[i] = ptr[0..strlen(ptr)];
}
@@ -41,7 +41,7 @@ fn strlen(ptr: [*]const u8) usize {
fn streql(a: []const u8, b: []const u8) bool {
if (a.len != b.len) return false;
- for (a) |item, index| {
+ for (a, 0..) |item, index| {
if (b[index] != item) return false;
}
return true;
diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig
index 2a1f2b7155..680b0576d5 100644
--- a/test/behavior/eval.zig
+++ b/test/behavior/eval.zig
@@ -317,7 +317,7 @@ test "create global array with for loop" {
const global_array = x: {
var result: [10]usize = undefined;
- for (result) |*item, index| {
+ for (&result, 0..) |*item, index| {
item.* = index * index;
}
break :x result;
@@ -447,7 +447,7 @@ test "binary math operator in partially inlined function" {
var s: [4]u32 = undefined;
var b: [16]u8 = undefined;
- for (b) |*r, i|
+ for (&b, 0..) |*r, i|
r.* = @intCast(u8, i + 1);
copyWithPartialInline(s[0..], b[0..]);
@@ -915,7 +915,7 @@ test "comptime pointer load through elem_ptr" {
comptime {
var array: [10]S = undefined;
- for (array) |*elem, i| {
+ for (&array, 0..) |*elem, i| {
elem.* = .{
.x = i,
};
diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig
index d5f959e507..9c37b9a8d9 100644
--- a/test/behavior/fn.zig
+++ b/test/behavior/fn.zig
@@ -311,7 +311,7 @@ test "function pointers" {
&fn3,
&fn4,
};
- for (fns) |f, i| {
+ for (fns, 0..) |f, i| {
try expect(f() == @intCast(u32, i) + 5);
}
}
diff --git a/test/behavior/for.zig b/test/behavior/for.zig
index f48000a871..c9ae2f4461 100644
--- a/test/behavior/for.zig
+++ b/test/behavior/for.zig
@@ -55,9 +55,9 @@ fn testContinueOuter() !void {
}
test "ignore lval with underscore (for loop)" {
- for ([_]void{}) |_, i| {
+ for ([_]void{}, 0..) |_, i| {
_ = i;
- for ([_]void{}) |_, j| {
+ for ([_]void{}, 0..) |_, j| {
_ = j;
break;
}
@@ -81,7 +81,7 @@ test "basic for loop" {
buffer[buf_index] = item;
buf_index += 1;
}
- for (array) |item, index| {
+ for (array, 0..) |item, index| {
_ = item;
buffer[buf_index] = @intCast(u8, index);
buf_index += 1;
@@ -91,7 +91,7 @@ test "basic for loop" {
buffer[buf_index] = item;
buf_index += 1;
}
- for (array_ptr) |item, index| {
+ for (array_ptr, 0..) |item, index| {
_ = item;
buffer[buf_index] = @intCast(u8, index);
buf_index += 1;
@@ -101,7 +101,7 @@ test "basic for loop" {
buffer[buf_index] = item;
buf_index += 1;
}
- for (unknown_size) |_, index| {
+ for (unknown_size, 0..) |_, index| {
buffer[buf_index] = @intCast(u8, index);
buf_index += 1;
}
@@ -163,11 +163,11 @@ test "for loop with pointer elem var" {
mangleString(target[0..]);
try expect(mem.eql(u8, &target, "bcdefgh"));
- for (source) |*c, i| {
+ for (source, 0..) |*c, i| {
_ = i;
try expect(@TypeOf(c) == *const u8);
}
- for (target) |*c, i| {
+ for (&target, 0..) |*c, i| {
_ = i;
try expect(@TypeOf(c) == *u8);
}
@@ -186,7 +186,7 @@ test "for copies its payload" {
const S = struct {
fn doTheTest() !void {
var x = [_]usize{ 1, 2, 3 };
- for (x) |value, i| {
+ for (x, 0..) |value, i| {
// Modify the original array
x[i] += 99;
try expect(value == i + 1);
@@ -206,8 +206,8 @@ test "for on slice with allowzero ptr" {
const S = struct {
fn doTheTest(slice: []const u8) !void {
var ptr = @ptrCast([*]allowzero const u8, slice.ptr)[0..slice.len];
- for (ptr) |x, i| try expect(x == i + 1);
- for (ptr) |*x, i| try expect(x.* == i + 1);
+ for (ptr, 0..) |x, i| try expect(x == i + 1);
+ for (ptr, 0..) |*x, i| try expect(x.* == i + 1);
}
};
try S.doTheTest(&[_]u8{ 1, 2, 3, 4 });
diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig
index 17194fc445..205823430c 100644
--- a/test/behavior/generics.zig
+++ b/test/behavior/generics.zig
@@ -325,7 +325,7 @@ test "generic function instantiation non-duplicates" {
const S = struct {
fn copy(comptime T: type, dest: []T, source: []const T) void {
@export(foo, .{ .name = "test_generic_instantiation_non_dupe" });
- for (source) |s, i| dest[i] = s;
+ for (source, 0..) |s, i| dest[i] = s;
}
fn foo() callconv(.C) void {}
diff --git a/test/behavior/lower_strlit_to_vector.zig b/test/behavior/lower_strlit_to_vector.zig
index 4ca4cf716e..adbca8f0df 100644
--- a/test/behavior/lower_strlit_to_vector.zig
+++ b/test/behavior/lower_strlit_to_vector.zig
@@ -12,7 +12,7 @@ test "strlit to vector" {
const strlit = "0123456789abcdef0123456789ABCDEF";
const vec_from_strlit: @Vector(32, u8) = strlit.*;
const arr_from_vec = @as([32]u8, vec_from_strlit);
- for (strlit) |c, i|
+ for (strlit, 0..) |c, i|
try std.testing.expect(c == arr_from_vec[i]);
try std.testing.expectEqualSlices(u8, strlit, &arr_from_vec);
}
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index c5e36da383..8ab8614605 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -1221,7 +1221,7 @@ test "quad hex float literal parsing accurate" {
0xb6a0000000000000,
};
- for (exp2ft) |x, i| {
+ for (exp2ft, 0..) |x, i| {
try expect(@bitCast(u64, x) == answers[i]);
}
}
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index 76cadc8d84..435e1887bb 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -99,7 +99,7 @@ test "comptime slice of slice preserves comptime var" {
test "slice of type" {
comptime {
var types_array = [_]type{ i32, f64, type };
- for (types_array) |T, i| {
+ for (types_array, 0..) |T, i| {
switch (i) {
0 => try expect(T == i32),
1 => try expect(T == f64),
@@ -107,7 +107,7 @@ test "slice of type" {
else => unreachable,
}
}
- for (types_array[0..]) |T, i| {
+ for (types_array[0..], 0..) |T, i| {
switch (i) {
0 => try expect(T == i32),
1 => try expect(T == f64),
diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig
index f1c15fb3e3..13b02b40e8 100644
--- a/test/behavior/tuple.zig
+++ b/test/behavior/tuple.zig
@@ -40,7 +40,7 @@ test "tuple multiplication" {
{
const t = .{ 1, 2, 3 } ** 4;
try expect(@typeInfo(@TypeOf(t)).Struct.fields.len == 12);
- inline for (t) |x, i| try expect(x == 1 + i % 3);
+ inline for (t, 0..) |x, i| try expect(x == 1 + i % 3);
}
}
};
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 8bcae5fefc..e983e0cfb0 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -456,20 +456,20 @@ test "vector division operators" {
fn doTheTestDiv(comptime T: type, x: @Vector(4, T), y: @Vector(4, T)) !void {
if (!comptime std.meta.trait.isSignedInt(T)) {
const d0 = x / y;
- for (@as([4]T, d0)) |v, i| {
+ for (@as([4]T, d0), 0..) |v, i| {
try expect(x[i] / y[i] == v);
}
}
const d1 = @divExact(x, y);
- for (@as([4]T, d1)) |v, i| {
+ for (@as([4]T, d1), 0..) |v, i| {
try expect(@divExact(x[i], y[i]) == v);
}
const d2 = @divFloor(x, y);
- for (@as([4]T, d2)) |v, i| {
+ for (@as([4]T, d2), 0..) |v, i| {
try expect(@divFloor(x[i], y[i]) == v);
}
const d3 = @divTrunc(x, y);
- for (@as([4]T, d3)) |v, i| {
+ for (@as([4]T, d3), 0..) |v, i| {
try expect(@divTrunc(x[i], y[i]) == v);
}
}
@@ -477,16 +477,16 @@ test "vector division operators" {
fn doTheTestMod(comptime T: type, x: @Vector(4, T), y: @Vector(4, T)) !void {
if ((!comptime std.meta.trait.isSignedInt(T)) and @typeInfo(T) != .Float) {
const r0 = x % y;
- for (@as([4]T, r0)) |v, i| {
+ for (@as([4]T, r0), 0..) |v, i| {
try expect(x[i] % y[i] == v);
}
}
const r1 = @mod(x, y);
- for (@as([4]T, r1)) |v, i| {
+ for (@as([4]T, r1), 0..) |v, i| {
try expect(@mod(x[i], y[i]) == v);
}
const r2 = @rem(x, y);
- for (@as([4]T, r2)) |v, i| {
+ for (@as([4]T, r2), 0..) |v, i| {
try expect(@rem(x[i], y[i]) == v);
}
}
@@ -538,7 +538,7 @@ test "vector bitwise not operator" {
const S = struct {
fn doTheTestNot(comptime T: type, x: @Vector(4, T)) !void {
var y = ~x;
- for (@as([4]T, y)) |v, i| {
+ for (@as([4]T, y), 0..) |v, i| {
try expect(~x[i] == v);
}
}
@@ -577,11 +577,11 @@ test "vector shift operators" {
var yv = @as(@Vector(N, TY), y);
var z0 = xv >> yv;
- for (@as([N]TX, z0)) |v, i| {
+ for (@as([N]TX, z0), 0..) |v, i| {
try expect(x[i] >> y[i] == v);
}
var z1 = xv << yv;
- for (@as([N]TX, z1)) |v, i| {
+ for (@as([N]TX, z1), 0..) |v, i| {
try expect(x[i] << y[i] == v);
}
}
@@ -594,7 +594,7 @@ test "vector shift operators" {
var yv = @as(@Vector(N, TY), y);
var z = if (dir == .Left) @shlExact(xv, yv) else @shrExact(xv, yv);
- for (@as([N]TX, z)) |v, i| {
+ for (@as([N]TX, z), 0..) |v, i| {
const check = if (dir == .Left) x[i] << y[i] else x[i] >> y[i];
try expect(check == v);
}
diff --git a/test/behavior/void.zig b/test/behavior/void.zig
index 85a9178145..9b6c05d07d 100644
--- a/test/behavior/void.zig
+++ b/test/behavior/void.zig
@@ -22,7 +22,7 @@ test "iterate over a void slice" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var j: usize = 0;
- for (times(10)) |_, i| {
+ for (times(10), 0..) |_, i| {
try expect(i == j);
j += 1;
}
diff --git a/test/standalone/brace_expansion/main.zig b/test/standalone/brace_expansion/main.zig
index 54590c65b2..dcdcad3865 100644
--- a/test/standalone/brace_expansion/main.zig
+++ b/test/standalone/brace_expansion/main.zig
@@ -29,7 +29,7 @@ fn tokenize(input: []const u8) !ArrayList(Token) {
var tok_begin: usize = undefined;
var state = State.Start;
- for (input) |b, i| {
+ for (input, 0..) |b, i| {
switch (state) {
.Start => switch (b) {
'a'...'z', 'A'...'Z' => {
@@ -159,7 +159,7 @@ fn expandString(input: []const u8, output: *ArrayList(u8)) !void {
try expandNode(root, &result_list);
try output.resize(0);
- for (result_list.items) |buf, i| {
+ for (result_list.items, 0..) |buf, i| {
if (i != 0) {
try output.append(' ');
}
diff --git a/test/tests.zig b/test/tests.zig
index d3ebe5a046..035311372f 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -958,7 +958,7 @@ pub const StackTracesContext = struct {
// locate delims/anchor
const delims = [_][]const u8{ ":", ":", ":", " in ", "(", ")" };
var marks = [_]usize{0} ** delims.len;
- for (delims) |delim, i| {
+ for (delims, 0..) |delim, i| {
marks[i] = mem.indexOfPos(u8, line, pos, delim) orelse {
// unexpected pattern: emit raw line and cont
try buf.appendSlice(line);
From aeaef8c0ffadab4145fd002f2edd87a6db66ebd1 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 09:02:57 -0700
Subject: [PATCH 14/36] update std lib and compiler sources to new for loop
syntax
---
lib/compiler_rt/atomics.zig | 2 +-
lib/compiler_rt/comparedf2_test.zig | 4 +-
lib/compiler_rt/comparesf2_test.zig | 4 +-
lib/std/Build.zig | 8 +-
lib/std/Build/CompileStep.zig | 8 +-
lib/std/Build/ConfigHeaderStep.zig | 4 +-
lib/std/Build/FmtStep.zig | 2 +-
lib/std/Thread/Condition.zig | 12 +-
lib/std/Thread/Futex.zig | 6 +-
lib/std/Thread/Mutex.zig | 6 +-
lib/std/Thread/ResetEvent.zig | 2 +-
lib/std/Thread/RwLock.zig | 2 +-
lib/std/Thread/Semaphore.zig | 2 +-
lib/std/array_hash_map.zig | 20 +-
lib/std/array_list.zig | 10 +-
lib/std/ascii.zig | 6 +-
lib/std/atomic/Atomic.zig | 12 +-
lib/std/atomic/queue.zig | 4 +-
lib/std/atomic/stack.zig | 4 +-
lib/std/base64.zig | 6 +-
lib/std/bit_set.zig | 16 +-
lib/std/bounded_array.zig | 4 +-
lib/std/child_process.zig | 6 +-
lib/std/coff.zig | 2 +-
lib/std/compress/deflate/compressor.zig | 10 +-
lib/std/compress/deflate/compressor_test.zig | 8 +-
lib/std/compress/deflate/decompressor.zig | 4 +-
lib/std/compress/deflate/deflate_fast.zig | 10 +-
.../compress/deflate/deflate_fast_test.zig | 2 +-
lib/std/compress/deflate/dict_decoder.zig | 2 +-
.../compress/deflate/huffman_bit_writer.zig | 12 +-
lib/std/compress/deflate/huffman_code.zig | 10 +-
lib/std/compress/lzma/decode.zig | 2 +-
lib/std/compress/lzma/decode/rangecoder.zig | 4 +-
lib/std/comptime_string_map.zig | 2 +-
lib/std/crypto/25519/ed25519.zig | 10 +-
lib/std/crypto/25519/edwards25519.zig | 8 +-
lib/std/crypto/Certificate.zig | 2 +-
lib/std/crypto/aegis.zig | 6 +-
lib/std/crypto/aes.zig | 8 +-
lib/std/crypto/aes/aesni.zig | 4 +-
lib/std/crypto/aes/armcrypto.zig | 4 +-
lib/std/crypto/aes/soft.zig | 2 +-
lib/std/crypto/aes_gcm.zig | 6 +-
lib/std/crypto/aes_ocb.zig | 8 +-
lib/std/crypto/argon2.zig | 14 +-
lib/std/crypto/ascon.zig | 2 +-
lib/std/crypto/bcrypt.zig | 4 +-
lib/std/crypto/blake2.zig | 12 +-
lib/std/crypto/blake3.zig | 6 +-
lib/std/crypto/chacha20.zig | 6 +-
lib/std/crypto/cmac.zig | 8 +-
lib/std/crypto/ghash_polyval.zig | 4 +-
lib/std/crypto/gimli.zig | 16 +-
lib/std/crypto/hmac.zig | 4 +-
lib/std/crypto/md5.zig | 2 +-
lib/std/crypto/pbkdf2.zig | 2 +-
lib/std/crypto/pcurves/common.zig | 2 +-
lib/std/crypto/pcurves/p256.zig | 2 +-
lib/std/crypto/pcurves/p256/scalar.zig | 2 +-
lib/std/crypto/pcurves/p384.zig | 2 +-
lib/std/crypto/pcurves/p384/scalar.zig | 2 +-
lib/std/crypto/pcurves/secp256k1.zig | 2 +-
lib/std/crypto/pcurves/secp256k1/scalar.zig | 2 +-
lib/std/crypto/poly1305.zig | 4 +-
lib/std/crypto/salsa20.zig | 8 +-
lib/std/crypto/scrypt.zig | 14 +-
lib/std/crypto/sha1.zig | 2 +-
lib/std/crypto/sha2.zig | 6 +-
lib/std/crypto/sha3.zig | 8 +-
lib/std/crypto/siphash.zig | 6 +-
lib/std/crypto/test.zig | 2 +-
lib/std/crypto/tls.zig | 2 +-
lib/std/crypto/utils.zig | 4 +-
lib/std/crypto/xoodoo.zig | 2 +-
lib/std/debug.zig | 10 +-
lib/std/dwarf.zig | 2 +-
lib/std/enums.zig | 4 +-
lib/std/event/loop.zig | 4 +-
lib/std/fmt.zig | 10 +-
lib/std/fmt/parse_float/decimal.zig | 2 +-
lib/std/fs/path.zig | 6 +-
lib/std/fs/wasi.zig | 2 +-
lib/std/hash/crc.zig | 6 +-
lib/std/hash/wyhash.zig | 2 +-
lib/std/hash_map.zig | 4 +-
lib/std/heap.zig | 4 +-
lib/std/heap/WasmPageAllocator.zig | 2 +-
lib/std/heap/general_purpose_allocator.zig | 2 +-
lib/std/json.zig | 12 +-
lib/std/json/test.zig | 2 +-
lib/std/math/big/int.zig | 14 +-
lib/std/math/big/rational.zig | 2 +-
lib/std/mem.zig | 24 +--
lib/std/meta.zig | 28 +--
lib/std/meta/trailer_flags.zig | 10 +-
lib/std/multi_array_list.zig | 26 +--
lib/std/net.zig | 14 +-
lib/std/net/test.zig | 2 +-
lib/std/once.zig | 2 +-
lib/std/os/linux.zig | 2 +-
.../uefi/protocols/device_path_protocol.zig | 2 +-
lib/std/os/windows.zig | 2 +-
lib/std/packed_int_array.zig | 2 +-
lib/std/pdb.zig | 2 +-
lib/std/priority_dequeue.zig | 2 +-
lib/std/priority_queue.zig | 2 +-
lib/std/process.zig | 4 +-
lib/std/rand.zig | 2 +-
lib/std/rand/ziggurat.zig | 4 +-
lib/std/simd.zig | 2 +-
lib/std/sort.zig | 10 +-
lib/std/target.zig | 8 +-
lib/std/target/aarch64.zig | 2 +-
lib/std/target/amdgpu.zig | 2 +-
lib/std/target/arc.zig | 2 +-
lib/std/target/arm.zig | 2 +-
lib/std/target/avr.zig | 2 +-
lib/std/target/bpf.zig | 2 +-
lib/std/target/csky.zig | 2 +-
lib/std/target/hexagon.zig | 2 +-
lib/std/target/m68k.zig | 2 +-
lib/std/target/mips.zig | 2 +-
lib/std/target/msp430.zig | 2 +-
lib/std/target/nvptx.zig | 2 +-
lib/std/target/powerpc.zig | 2 +-
lib/std/target/riscv.zig | 2 +-
lib/std/target/s390x.zig | 2 +-
lib/std/target/sparc.zig | 2 +-
lib/std/target/spirv.zig | 2 +-
lib/std/target/ve.zig | 2 +-
lib/std/target/wasm.zig | 2 +-
lib/std/target/x86.zig | 2 +-
lib/std/testing.zig | 8 +-
lib/std/wasm.zig | 4 +-
lib/std/zig/Ast.zig | 6 +-
lib/std/zig/CrossTarget.zig | 2 +-
lib/std/zig/fmt.zig | 2 +-
lib/std/zig/parser_test.zig | 2 +-
lib/std/zig/render.zig | 22 +--
lib/std/zig/system/NativeTargetInfo.zig | 2 +-
lib/std/zig/system/linux.zig | 2 +-
lib/std/zig/system/windows.zig | 6 +-
lib/test_runner.zig | 2 +-
src/AstGen.zig | 16 +-
src/Autodoc.zig | 16 +-
src/Compilation.zig | 22 +--
src/Liveness.zig | 8 +-
src/Manifest.zig | 2 +-
src/Module.zig | 14 +-
src/Package.zig | 2 +-
src/RangeSet.zig | 2 +-
src/Sema.zig | 182 +++++++++---------
src/arch/aarch64/CodeGen.zig | 26 +--
src/arch/aarch64/Emit.zig | 6 +-
src/arch/arm/CodeGen.zig | 26 +--
src/arch/arm/Emit.zig | 6 +-
src/arch/arm/bits.zig | 4 +-
src/arch/riscv64/CodeGen.zig | 4 +-
src/arch/riscv64/Emit.zig | 2 +-
src/arch/sparc64/CodeGen.zig | 8 +-
src/arch/sparc64/Emit.zig | 6 +-
src/arch/wasm/CodeGen.zig | 18 +-
src/arch/wasm/Emit.zig | 2 +-
src/arch/x86_64/CodeGen.zig | 20 +-
src/arch/x86_64/Emit.zig | 4 +-
src/arch/x86_64/Mir.zig | 2 +-
src/arch/x86_64/abi.zig | 10 +-
src/codegen.zig | 4 +-
src/codegen/c.zig | 52 ++---
src/codegen/llvm.zig | 60 +++---
src/codegen/spirv.zig | 4 +-
src/codegen/spirv/Assembler.zig | 2 +-
src/codegen/spirv/Module.zig | 4 +-
src/codegen/spirv/Section.zig | 2 +-
src/codegen/spirv/type.zig | 2 +-
src/glibc.zig | 6 +-
src/libc_installation.zig | 6 +-
src/libunwind.zig | 2 +-
src/link/Coff.zig | 6 +-
src/link/Dwarf.zig | 10 +-
src/link/Elf.zig | 18 +-
src/link/MachO.zig | 36 ++--
src/link/MachO/DebugSymbols.zig | 8 +-
src/link/MachO/Dylib.zig | 2 +-
src/link/MachO/Object.zig | 14 +-
src/link/MachO/UnwindInfo.zig | 16 +-
src/link/MachO/dead_strip.zig | 2 +-
src/link/MachO/dyld_info/Rebase.zig | 2 +-
src/link/MachO/dyld_info/bind.zig | 2 +-
src/link/MachO/eh_frame.zig | 8 +-
src/link/MachO/thunks.zig | 2 +-
src/link/MachO/zld.zig | 62 +++---
src/link/SpirV.zig | 4 +-
src/link/Wasm.zig | 20 +-
src/link/Wasm/Object.zig | 4 +-
src/link/tapi.zig | 4 +-
src/link/tapi/yaml.zig | 14 +-
src/main.zig | 6 +-
src/mingw.zig | 4 +-
src/objcopy.zig | 2 +-
src/print_air.zig | 16 +-
src/print_targets.zig | 4 +-
src/print_zir.zig | 16 +-
src/register_manager.zig | 6 +-
src/test.zig | 10 +-
src/translate_c.zig | 14 +-
src/translate_c/ast.zig | 22 +--
src/type.zig | 64 +++---
src/value.zig | 112 +++++------
tools/gen_spirv_spec.zig | 18 +-
tools/gen_stubs.zig | 18 +-
tools/update_clang_options.zig | 2 +-
tools/update_cpu_features.zig | 4 +-
tools/update_crc_catalog.zig | 2 +-
tools/update_spirv_features.zig | 4 +-
216 files changed, 938 insertions(+), 938 deletions(-)
diff --git a/lib/compiler_rt/atomics.zig b/lib/compiler_rt/atomics.zig
index 4db6176dc0..5e25634f9d 100644
--- a/lib/compiler_rt/atomics.zig
+++ b/lib/compiler_rt/atomics.zig
@@ -151,7 +151,7 @@ fn __atomic_compare_exchange(
_ = failure;
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();
- for (ptr[0..size]) |b, i| {
+ for (ptr[0..size], 0..) |b, i| {
if (expected[i] != b) break;
} else {
// The two objects, ptr and expected, are equal
diff --git a/lib/compiler_rt/comparedf2_test.zig b/lib/compiler_rt/comparedf2_test.zig
index a77718e57c..9444c6adf7 100644
--- a/lib/compiler_rt/comparedf2_test.zig
+++ b/lib/compiler_rt/comparedf2_test.zig
@@ -94,8 +94,8 @@ fn generateVector(comptime a: f64, comptime b: f64) TestVector {
const test_vectors = init: {
@setEvalBranchQuota(10000);
var vectors: [arguments.len * arguments.len]TestVector = undefined;
- for (arguments[0..]) |arg_i, i| {
- for (arguments[0..]) |arg_j, j| {
+ for (arguments[0..], 0..) |arg_i, i| {
+ for (arguments[0..], 0..) |arg_j, j| {
vectors[(i * arguments.len) + j] = generateVector(arg_i, arg_j);
}
}
diff --git a/lib/compiler_rt/comparesf2_test.zig b/lib/compiler_rt/comparesf2_test.zig
index b2fafd38dd..40b1324cfa 100644
--- a/lib/compiler_rt/comparesf2_test.zig
+++ b/lib/compiler_rt/comparesf2_test.zig
@@ -94,8 +94,8 @@ fn generateVector(comptime a: f32, comptime b: f32) TestVector {
const test_vectors = init: {
@setEvalBranchQuota(10000);
var vectors: [arguments.len * arguments.len]TestVector = undefined;
- for (arguments[0..]) |arg_i, i| {
- for (arguments[0..]) |arg_j, j| {
+ for (arguments[0..], 0..) |arg_i, i| {
+ for (arguments[0..], 0..) |arg_j, j| {
vectors[(i * arguments.len) + j] = generateVector(arg_i, arg_j);
}
}
diff --git a/lib/std/Build.zig b/lib/std/Build.zig
index a375b45454..678120847f 100644
--- a/lib/std/Build.zig
+++ b/lib/std/Build.zig
@@ -650,7 +650,7 @@ pub fn dupe(self: *Build, bytes: []const u8) []u8 {
/// Duplicates an array of strings without the need to handle out of memory.
pub fn dupeStrings(self: *Build, strings: []const []const u8) [][]u8 {
const array = self.allocator.alloc([]u8, strings.len) catch @panic("OOM");
- for (strings) |s, i| {
+ for (strings, 0..) |s, i| {
array[i] = self.dupe(s);
}
return array;
@@ -1051,7 +1051,7 @@ pub fn standardTargetOptions(self: *Build, args: StandardTargetOptionsArgs) Cros
const all_features = whitelist_cpu.arch.allFeaturesList();
var populated_cpu_features = whitelist_cpu.model.features;
populated_cpu_features.populateDependencies(all_features);
- for (all_features) |feature, i_usize| {
+ for (all_features, 0..) |feature, i_usize| {
const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
const in_cpu_set = populated_cpu_features.isEnabled(i);
if (in_cpu_set) {
@@ -1059,7 +1059,7 @@ pub fn standardTargetOptions(self: *Build, args: StandardTargetOptionsArgs) Cros
}
}
log.err(" Remove: ", .{});
- for (all_features) |feature, i_usize| {
+ for (all_features, 0..) |feature, i_usize| {
const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
const in_cpu_set = populated_cpu_features.isEnabled(i);
const in_actual_set = selected_cpu.features.isEnabled(i);
@@ -1748,7 +1748,7 @@ pub fn serializeCpu(allocator: Allocator, cpu: std.Target.Cpu) ![]const u8 {
var mcpu_buffer = ArrayList(u8).init(allocator);
try mcpu_buffer.appendSlice(cpu.model.name);
- for (all_features) |feature, i_usize| {
+ for (all_features, 0..) |feature, i_usize| {
const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
const in_cpu_set = populated_cpu_features.isEnabled(i);
const in_actual_set = cpu.features.isEnabled(i);
diff --git a/lib/std/Build/CompileStep.zig b/lib/std/Build/CompileStep.zig
index 1f145f8171..a916de0fc6 100644
--- a/lib/std/Build/CompileStep.zig
+++ b/lib/std/Build/CompileStep.zig
@@ -1016,7 +1016,7 @@ pub fn addVcpkgPaths(self: *CompileStep, linkage: CompileStep.Linkage) !void {
pub fn setExecCmd(self: *CompileStep, args: []const ?[]const u8) void {
assert(self.kind == .@"test");
const duped_args = self.builder.allocator.alloc(?[]u8, args.len) catch @panic("OOM");
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
duped_args[i] = if (arg) |a| self.builder.dupe(a) else null;
}
self.exec_cmd_args = duped_args;
@@ -1040,7 +1040,7 @@ fn appendModuleArgs(
{
const keys = module.dependencies.keys();
- for (module.dependencies.values()) |sub_module, i| {
+ for (module.dependencies.values(), 0..) |sub_module, i| {
const sub_name = keys[i];
try cs.appendModuleArgs(zig_args, sub_name, sub_module);
}
@@ -1575,7 +1575,7 @@ fn make(step: *Step) !void {
{
const keys = self.modules.keys();
- for (self.modules.values()) |module, i| {
+ for (self.modules.values(), 0..) |module, i| {
const name = keys[i];
try self.appendModuleArgs(&zig_args, name, module);
}
@@ -1750,7 +1750,7 @@ fn make(step: *Step) !void {
const args_to_escape = zig_args.items[2..];
var escaped_args = try ArrayList([]const u8).initCapacity(builder.allocator, args_to_escape.len);
arg_blk: for (args_to_escape) |arg| {
- for (arg) |c, arg_idx| {
+ for (arg, 0..) |c, arg_idx| {
if (c == '\\' or c == '"') {
// Slow path for arguments that need to be escaped. We'll need to allocate and copy
var escaped = try ArrayList(u8).initCapacity(builder.allocator, arg.len + 1);
diff --git a/lib/std/Build/ConfigHeaderStep.zig b/lib/std/Build/ConfigHeaderStep.zig
index f8d6f7bd57..595c1018f7 100644
--- a/lib/std/Build/ConfigHeaderStep.zig
+++ b/lib/std/Build/ConfigHeaderStep.zig
@@ -350,7 +350,7 @@ fn render_blank(
try output.appendSlice("\n");
const values = defines.values();
- for (defines.keys()) |name, i| {
+ for (defines.keys(), 0..) |name, i| {
try renderValueC(output, name, values[i]);
}
@@ -361,7 +361,7 @@ fn render_blank(
fn render_nasm(output: *std.ArrayList(u8), defines: std.StringArrayHashMap(Value)) !void {
const values = defines.values();
- for (defines.keys()) |name, i| {
+ for (defines.keys(), 0..) |name, i| {
try renderValueNasm(output, name, values[i]);
}
}
diff --git a/lib/std/Build/FmtStep.zig b/lib/std/Build/FmtStep.zig
index 6404d22f13..4a5efde2bd 100644
--- a/lib/std/Build/FmtStep.zig
+++ b/lib/std/Build/FmtStep.zig
@@ -19,7 +19,7 @@ pub fn create(builder: *std.Build, paths: []const []const u8) *FmtStep {
self.argv[0] = builder.zig_exe;
self.argv[1] = "fmt";
- for (paths) |path, i| {
+ for (paths, 0..) |path, i| {
self.argv[2 + i] = builder.pathFromRoot(path);
}
return self;
diff --git a/lib/std/Thread/Condition.zig b/lib/std/Thread/Condition.zig
index ab75a0e5e2..793779dbdb 100644
--- a/lib/std/Thread/Condition.zig
+++ b/lib/std/Thread/Condition.zig
@@ -341,7 +341,7 @@ test "Condition - wait and signal" {
};
var multi_wait = MultiWait{};
- for (multi_wait.threads) |*t| {
+ for (&multi_wait.threads) |*t| {
t.* = try std.Thread.spawn(.{}, MultiWait.run, .{&multi_wait});
}
@@ -389,7 +389,7 @@ test "Condition - signal" {
};
var signal_test = SignalTest{};
- for (signal_test.threads) |*t| {
+ for (&signal_test.threads) |*t| {
t.* = try std.Thread.spawn(.{}, SignalTest.run, .{&signal_test});
}
@@ -457,7 +457,7 @@ test "Condition - multi signal" {
var threads = [_]std.Thread{undefined} ** num_threads;
// Create a circle of paddles which hit each other
- for (threads) |*t, i| {
+ for (&threads, 0..) |*t, i| {
const paddle = &paddles[i];
const hit_to = &paddles[(i + 1) % paddles.len];
t.* = try std.Thread.spawn(.{}, Paddle.run, .{ paddle, hit_to });
@@ -468,7 +468,7 @@ test "Condition - multi signal" {
for (threads) |t| t.join();
// The first paddle will be hit one last time by the last paddle.
- for (paddles) |p, i| {
+ for (paddles, 0..) |p, i| {
const expected = @as(u32, num_iterations) + @boolToInt(i == 0);
try testing.expectEqual(p.value, expected);
}
@@ -513,7 +513,7 @@ test "Condition - broadcasting" {
};
var broadcast_test = BroadcastTest{};
- for (broadcast_test.threads) |*t| {
+ for (&broadcast_test.threads) |*t| {
t.* = try std.Thread.spawn(.{}, BroadcastTest.run, .{&broadcast_test});
}
@@ -584,7 +584,7 @@ test "Condition - broadcasting - wake all threads" {
var broadcast_test = BroadcastTest{};
var thread_id: usize = 1;
- for (broadcast_test.threads) |*t| {
+ for (&broadcast_test.threads) |*t| {
t.* = try std.Thread.spawn(.{}, BroadcastTest.run, .{ &broadcast_test, thread_id });
thread_id += 1;
}
diff --git a/lib/std/Thread/Futex.zig b/lib/std/Thread/Futex.zig
index 15ef35698e..7efdc69d3b 100644
--- a/lib/std/Thread/Futex.zig
+++ b/lib/std/Thread/Futex.zig
@@ -895,7 +895,7 @@ test "Futex - signaling" {
var threads = [_]std.Thread{undefined} ** num_threads;
// Create a circle of paddles which hit each other
- for (threads) |*t, i| {
+ for (&threads, 0..) |*t, i| {
const paddle = &paddles[i];
const hit_to = &paddles[(i + 1) % paddles.len];
t.* = try std.Thread.spawn(.{}, Paddle.run, .{ paddle, hit_to });
@@ -950,14 +950,14 @@ test "Futex - broadcasting" {
threads: [num_threads]std.Thread = undefined,
fn run(self: *@This()) !void {
- for (self.barriers) |*barrier| {
+ for (&self.barriers) |*barrier| {
try barrier.wait();
}
}
};
var broadcast = Broadcast{};
- for (broadcast.threads) |*t| t.* = try std.Thread.spawn(.{}, Broadcast.run, .{&broadcast});
+ for (&broadcast.threads) |*t| t.* = try std.Thread.spawn(.{}, Broadcast.run, .{&broadcast});
for (broadcast.threads) |t| t.join();
}
diff --git a/lib/std/Thread/Mutex.zig b/lib/std/Thread/Mutex.zig
index ecb3556b6c..89dedaf2b7 100644
--- a/lib/std/Thread/Mutex.zig
+++ b/lib/std/Thread/Mutex.zig
@@ -245,7 +245,7 @@ const NonAtomicCounter = struct {
}
fn inc(self: *NonAtomicCounter) void {
- for (@bitCast([2]u64, self.get() + 1)) |v, i| {
+ for (@bitCast([2]u64, self.get() + 1), 0..) |v, i| {
@ptrCast(*volatile u64, &self.value[i]).* = v;
}
}
@@ -277,7 +277,7 @@ test "Mutex - many uncontended" {
};
var runners = [_]Runner{.{}} ** num_threads;
- for (runners) |*r| r.thread = try Thread.spawn(.{}, Runner.run, .{r});
+ for (&runners) |*r| r.thread = try Thread.spawn(.{}, Runner.run, .{r});
for (runners) |r| r.thread.join();
for (runners) |r| try testing.expectEqual(r.counter.get(), num_increments);
}
@@ -312,7 +312,7 @@ test "Mutex - many contended" {
var runner = Runner{};
var threads: [num_threads]Thread = undefined;
- for (threads) |*t| t.* = try Thread.spawn(.{}, Runner.run, .{&runner});
+ for (&threads) |*t| t.* = try Thread.spawn(.{}, Runner.run, .{&runner});
for (threads) |t| t.join();
try testing.expectEqual(runner.counter.get(), num_increments * num_threads);
diff --git a/lib/std/Thread/ResetEvent.zig b/lib/std/Thread/ResetEvent.zig
index 87232c29cf..42cf74fd42 100644
--- a/lib/std/Thread/ResetEvent.zig
+++ b/lib/std/Thread/ResetEvent.zig
@@ -274,7 +274,7 @@ test "ResetEvent - broadcast" {
var ctx = Context{};
var threads: [num_threads - 1]std.Thread = undefined;
- for (threads) |*t| t.* = try std.Thread.spawn(.{}, Context.run, .{&ctx});
+ for (&threads) |*t| t.* = try std.Thread.spawn(.{}, Context.run, .{&ctx});
defer for (threads) |t| t.join();
ctx.run();
diff --git a/lib/std/Thread/RwLock.zig b/lib/std/Thread/RwLock.zig
index c4105817b3..e78695a1d8 100644
--- a/lib/std/Thread/RwLock.zig
+++ b/lib/std/Thread/RwLock.zig
@@ -364,7 +364,7 @@ test "RwLock - concurrent access" {
var runner = Runner{};
var threads: [num_writers + num_readers]std.Thread = undefined;
- for (threads[0..num_writers]) |*t, i| t.* = try std.Thread.spawn(.{}, Runner.writer, .{ &runner, i });
+ for (threads[0..num_writers], 0..) |*t, i| t.* = try std.Thread.spawn(.{}, Runner.writer, .{ &runner, i });
for (threads[num_writers..]) |*t| t.* = try std.Thread.spawn(.{}, Runner.reader, .{&runner});
for (threads) |t| t.join();
diff --git a/lib/std/Thread/Semaphore.zig b/lib/std/Thread/Semaphore.zig
index 72191ffd6f..1b182d4c2a 100644
--- a/lib/std/Thread/Semaphore.zig
+++ b/lib/std/Thread/Semaphore.zig
@@ -54,7 +54,7 @@ test "Thread.Semaphore" {
var n: i32 = 0;
var ctx = TestContext{ .sem = &sem, .n = &n };
- for (threads) |*t| t.* = try std.Thread.spawn(.{}, TestContext.worker, .{&ctx});
+ for (&threads) |*t| t.* = try std.Thread.spawn(.{}, TestContext.worker, .{&ctx});
for (threads) |t| t.join();
sem.wait();
try testing.expect(n == num_threads);
diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig
index 57821d1b51..f62616cd85 100644
--- a/lib/std/array_hash_map.zig
+++ b/lib/std/array_hash_map.zig
@@ -715,7 +715,7 @@ pub fn ArrayHashMapUnmanaged(
const slice = self.entries.slice();
const hashes_array = slice.items(.hash);
const keys_array = slice.items(.key);
- for (keys_array) |*item_key, i| {
+ for (keys_array, 0..) |*item_key, i| {
if (hashes_array[i] == h and checkedEql(ctx, key, item_key.*, i)) {
return GetOrPutResult{
.key_ptr = item_key,
@@ -946,7 +946,7 @@ pub fn ArrayHashMapUnmanaged(
const slice = self.entries.slice();
const hashes_array = slice.items(.hash);
const keys_array = slice.items(.key);
- for (keys_array) |*item_key, i| {
+ for (keys_array, 0..) |*item_key, i| {
if (hashes_array[i] == h and checkedEql(ctx, key, item_key.*, i)) {
return i;
}
@@ -1285,7 +1285,7 @@ pub fn ArrayHashMapUnmanaged(
const slice = self.entries.slice();
const hashes_array = if (store_hash) slice.items(.hash) else {};
const keys_array = slice.items(.key);
- for (keys_array) |*item_key, i| {
+ for (keys_array, 0..) |*item_key, i| {
const hash_match = if (store_hash) hashes_array[i] == key_hash else true;
if (hash_match and key_ctx.eql(key, item_key.*, i)) {
const removed_entry: KV = .{
@@ -1326,7 +1326,7 @@ pub fn ArrayHashMapUnmanaged(
const slice = self.entries.slice();
const hashes_array = if (store_hash) slice.items(.hash) else {};
const keys_array = slice.items(.key);
- for (keys_array) |*item_key, i| {
+ for (keys_array, 0..) |*item_key, i| {
const hash_match = if (store_hash) hashes_array[i] == key_hash else true;
if (hash_match and key_ctx.eql(key, item_key.*, i)) {
switch (removal_type) {
@@ -1634,7 +1634,7 @@ pub fn ArrayHashMapUnmanaged(
const items = if (store_hash) slice.items(.hash) else slice.items(.key);
const indexes = header.indexes(I);
- entry_loop: for (items) |key, i| {
+ entry_loop: for (items, 0..) |key, i| {
const h = if (store_hash) key else checkedHash(ctx, key);
const start_index = safeTruncate(usize, h);
const end_index = start_index +% indexes.len;
@@ -1730,7 +1730,7 @@ pub fn ArrayHashMapUnmanaged(
const indexes = header.indexes(I);
if (indexes.len == 0) return;
var is_empty = false;
- for (indexes) |idx, i| {
+ for (indexes, 0..) |idx, i| {
if (idx.isEmpty()) {
is_empty = true;
} else {
@@ -1826,7 +1826,7 @@ const min_bit_index = 5;
const max_capacity = (1 << max_bit_index) - 1;
const index_capacities = blk: {
var caps: [max_bit_index + 1]u32 = undefined;
- for (caps[0..max_bit_index]) |*item, i| {
+ for (caps[0..max_bit_index], 0..) |*item, i| {
item.* = (1 << i) * 3 / 5;
}
caps[max_bit_index] = max_capacity;
@@ -2025,7 +2025,7 @@ test "iterator hash map" {
try testing.expect(count == 3);
try testing.expect(it.next() == null);
- for (buffer) |_, i| {
+ for (buffer, 0..) |_, i| {
try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]);
}
@@ -2037,7 +2037,7 @@ test "iterator hash map" {
if (count >= 2) break;
}
- for (buffer[0..2]) |_, i| {
+ for (buffer[0..2], 0..) |_, i| {
try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]);
}
@@ -2299,7 +2299,7 @@ test "sort" {
map.sort(C{ .keys = map.keys() });
var x: i32 = 1;
- for (map.keys()) |key, i| {
+ for (map.keys(), 0..) |key, i| {
try testing.expect(key == x);
try testing.expect(map.values()[i] == x * 3);
x += 1;
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index 2485668417..13aad53019 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -183,7 +183,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
mem.copy(T, range, new_items);
const after_subrange = start + new_items.len;
- for (self.items[after_range..]) |item, i| {
+ for (self.items[after_range..], 0..) |item, i| {
self.items[after_subrange..][i] = item;
}
@@ -216,7 +216,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
if (newlen == i) return self.pop();
const old_item = self.items[i];
- for (self.items[i..newlen]) |*b, j| b.* = self.items[i + 1 + j];
+ for (self.items[i..newlen], 0..) |*b, j| b.* = self.items[i + 1 + j];
self.items[newlen] = undefined;
self.items.len = newlen;
return old_item;
@@ -666,7 +666,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
if (newlen == i) return self.pop();
const old_item = self.items[i];
- for (self.items[i..newlen]) |*b, j| b.* = self.items[i + 1 + j];
+ for (self.items[i..newlen], 0..) |*b, j| b.* = self.items[i + 1 + j];
self.items[newlen] = undefined;
self.items.len = newlen;
return old_item;
@@ -1069,7 +1069,7 @@ test "std.ArrayList/ArrayListUnmanaged.basic" {
}
}
- for (list.items) |v, i| {
+ for (list.items, 0..) |v, i| {
try testing.expect(v == @intCast(i32, i + 1));
}
@@ -1119,7 +1119,7 @@ test "std.ArrayList/ArrayListUnmanaged.basic" {
}
}
- for (list.items) |v, i| {
+ for (list.items, 0..) |v, i| {
try testing.expect(v == @intCast(i32, i + 1));
}
diff --git a/lib/std/ascii.zig b/lib/std/ascii.zig
index 79038a37dc..941f398f20 100644
--- a/lib/std/ascii.zig
+++ b/lib/std/ascii.zig
@@ -272,7 +272,7 @@ test "ASCII character classes" {
/// Asserts `output.len >= ascii_string.len`.
pub fn lowerString(output: []u8, ascii_string: []const u8) []u8 {
std.debug.assert(output.len >= ascii_string.len);
- for (ascii_string) |c, i| {
+ for (ascii_string, 0..) |c, i| {
output[i] = toLower(c);
}
return output[0..ascii_string.len];
@@ -301,7 +301,7 @@ test "allocLowerString" {
/// Asserts `output.len >= ascii_string.len`.
pub fn upperString(output: []u8, ascii_string: []const u8) []u8 {
std.debug.assert(output.len >= ascii_string.len);
- for (ascii_string) |c, i| {
+ for (ascii_string, 0..) |c, i| {
output[i] = toUpper(c);
}
return output[0..ascii_string.len];
@@ -329,7 +329,7 @@ test "allocUpperString" {
/// Compares strings `a` and `b` case-insensitively and returns whether they are equal.
pub fn eqlIgnoreCase(a: []const u8, b: []const u8) bool {
if (a.len != b.len) return false;
- for (a) |a_c, i| {
+ for (a, 0..) |a_c, i| {
if (toLower(a_c) != toLower(b[i])) return false;
}
return true;
diff --git a/lib/std/atomic/Atomic.zig b/lib/std/atomic/Atomic.zig
index 850075d4cb..51e61ca628 100644
--- a/lib/std/atomic/Atomic.zig
+++ b/lib/std/atomic/Atomic.zig
@@ -548,7 +548,7 @@ test "Atomic.bitSet" {
var x = Atomic(Int).init(0);
const bit_array = @as([@bitSizeOf(Int)]void, undefined);
- for (bit_array) |_, bit_index| {
+ for (bit_array, 0..) |_, bit_index| {
const bit = @intCast(std.math.Log2Int(Int), bit_index);
const mask = @as(Int, 1) << bit;
@@ -562,7 +562,7 @@ test "Atomic.bitSet" {
try testing.expect(x.load(.SeqCst) & mask != 0);
// all the previous bits should have not changed (still be set)
- for (bit_array[0..bit_index]) |_, prev_bit_index| {
+ for (bit_array[0..bit_index], 0..) |_, prev_bit_index| {
const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
const prev_mask = @as(Int, 1) << prev_bit;
try testing.expect(x.load(.SeqCst) & prev_mask != 0);
@@ -578,7 +578,7 @@ test "Atomic.bitReset" {
var x = Atomic(Int).init(0);
const bit_array = @as([@bitSizeOf(Int)]void, undefined);
- for (bit_array) |_, bit_index| {
+ for (bit_array, 0..) |_, bit_index| {
const bit = @intCast(std.math.Log2Int(Int), bit_index);
const mask = @as(Int, 1) << bit;
x.storeUnchecked(x.loadUnchecked() | mask);
@@ -593,7 +593,7 @@ test "Atomic.bitReset" {
try testing.expect(x.load(.SeqCst) & mask == 0);
// all the previous bits should have not changed (still be reset)
- for (bit_array[0..bit_index]) |_, prev_bit_index| {
+ for (bit_array[0..bit_index], 0..) |_, prev_bit_index| {
const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
const prev_mask = @as(Int, 1) << prev_bit;
try testing.expect(x.load(.SeqCst) & prev_mask == 0);
@@ -609,7 +609,7 @@ test "Atomic.bitToggle" {
var x = Atomic(Int).init(0);
const bit_array = @as([@bitSizeOf(Int)]void, undefined);
- for (bit_array) |_, bit_index| {
+ for (bit_array, 0..) |_, bit_index| {
const bit = @intCast(std.math.Log2Int(Int), bit_index);
const mask = @as(Int, 1) << bit;
@@ -623,7 +623,7 @@ test "Atomic.bitToggle" {
try testing.expect(x.load(.SeqCst) & mask == 0);
// all the previous bits should have not changed (still be toggled back)
- for (bit_array[0..bit_index]) |_, prev_bit_index| {
+ for (bit_array[0..bit_index], 0..) |_, prev_bit_index| {
const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
const prev_mask = @as(Int, 1) << prev_bit;
try testing.expect(x.load(.SeqCst) & prev_mask == 0);
diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig
index 8100a9e26a..7c9ffa2684 100644
--- a/lib/std/atomic/queue.zig
+++ b/lib/std/atomic/queue.zig
@@ -212,11 +212,11 @@ test "std.atomic.Queue" {
try expect(context.queue.isEmpty());
var putters: [put_thread_count]std.Thread = undefined;
- for (putters) |*t| {
+ for (&putters) |*t| {
t.* = try std.Thread.spawn(.{}, startPuts, .{&context});
}
var getters: [put_thread_count]std.Thread = undefined;
- for (getters) |*t| {
+ for (&getters) |*t| {
t.* = try std.Thread.spawn(.{}, startGets, .{&context});
}
diff --git a/lib/std/atomic/stack.zig b/lib/std/atomic/stack.zig
index a6396bb22b..9ad7c76d81 100644
--- a/lib/std/atomic/stack.zig
+++ b/lib/std/atomic/stack.zig
@@ -117,11 +117,11 @@ test "std.atomic.stack" {
}
} else {
var putters: [put_thread_count]std.Thread = undefined;
- for (putters) |*t| {
+ for (&putters) |*t| {
t.* = try std.Thread.spawn(.{}, startPuts, .{&context});
}
var getters: [put_thread_count]std.Thread = undefined;
- for (getters) |*t| {
+ for (&getters) |*t| {
t.* = try std.Thread.spawn(.{}, startGets, .{&context});
}
diff --git a/lib/std/base64.zig b/lib/std/base64.zig
index 0eeb30abbc..83f07b9c18 100644
--- a/lib/std/base64.zig
+++ b/lib/std/base64.zig
@@ -140,7 +140,7 @@ pub const Base64Decoder = struct {
};
var char_in_alphabet = [_]bool{false} ** 256;
- for (alphabet_chars) |c, i| {
+ for (alphabet_chars, 0..) |c, i| {
assert(!char_in_alphabet[c]);
assert(pad_char == null or c != pad_char.?);
@@ -185,7 +185,7 @@ pub const Base64Decoder = struct {
var acc_len: u4 = 0;
var dest_idx: usize = 0;
var leftover_idx: ?usize = null;
- for (source) |c, src_idx| {
+ for (source, 0..) |c, src_idx| {
const d = decoder.char_to_index[c];
if (d == invalid_char) {
if (decoder.pad_char == null or c != decoder.pad_char.?) return error.InvalidCharacter;
@@ -258,7 +258,7 @@ pub const Base64DecoderWithIgnore = struct {
var acc_len: u4 = 0;
var dest_idx: usize = 0;
var leftover_idx: ?usize = null;
- for (source) |c, src_idx| {
+ for (source, 0..) |c, src_idx| {
if (decoder_with_ignore.char_is_ignored[c]) continue;
const d = decoder.char_to_index[c];
if (d == Base64Decoder.invalid_char) {
diff --git a/lib/std/bit_set.zig b/lib/std/bit_set.zig
index a56dec5beb..a027022497 100644
--- a/lib/std/bit_set.zig
+++ b/lib/std/bit_set.zig
@@ -494,14 +494,14 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
/// Flips all bits in this bit set which are present
/// in the toggles bit set.
pub fn toggleSet(self: *Self, toggles: Self) void {
- for (self.masks) |*mask, i| {
+ for (&self.masks, 0..) |*mask, i| {
mask.* ^= toggles.masks[i];
}
}
/// Flips every bit in the bit set.
pub fn toggleAll(self: *Self) void {
- for (self.masks) |*mask| {
+ for (&self.masks) |*mask| {
mask.* = ~mask.*;
}
@@ -515,7 +515,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
/// result in the first one. Bits in the result are
/// set if the corresponding bits were set in either input.
pub fn setUnion(self: *Self, other: Self) void {
- for (self.masks) |*mask, i| {
+ for (&self.masks, 0..) |*mask, i| {
mask.* |= other.masks[i];
}
}
@@ -524,7 +524,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
/// the result in the first one. Bits in the result are
/// set if the corresponding bits were set in both inputs.
pub fn setIntersection(self: *Self, other: Self) void {
- for (self.masks) |*mask, i| {
+ for (&self.masks, 0..) |*mask, i| {
mask.* &= other.masks[i];
}
}
@@ -544,7 +544,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
/// If no bits are set, returns null.
pub fn toggleFirstSet(self: *Self) ?usize {
var offset: usize = 0;
- const mask = for (self.masks) |*mask| {
+ const mask = for (&self.masks) |*mask| {
if (mask.* != 0) break mask;
offset += @bitSizeOf(MaskInt);
} else return null;
@@ -869,7 +869,7 @@ pub const DynamicBitSetUnmanaged = struct {
pub fn toggleSet(self: *Self, toggles: Self) void {
assert(toggles.bit_length == self.bit_length);
const num_masks = numMasks(self.bit_length);
- for (self.masks[0..num_masks]) |*mask, i| {
+ for (self.masks[0..num_masks], 0..) |*mask, i| {
mask.* ^= toggles.masks[i];
}
}
@@ -897,7 +897,7 @@ pub const DynamicBitSetUnmanaged = struct {
pub fn setUnion(self: *Self, other: Self) void {
assert(other.bit_length == self.bit_length);
const num_masks = numMasks(self.bit_length);
- for (self.masks[0..num_masks]) |*mask, i| {
+ for (self.masks[0..num_masks], 0..) |*mask, i| {
mask.* |= other.masks[i];
}
}
@@ -909,7 +909,7 @@ pub const DynamicBitSetUnmanaged = struct {
pub fn setIntersection(self: *Self, other: Self) void {
assert(other.bit_length == self.bit_length);
const num_masks = numMasks(self.bit_length);
- for (self.masks[0..num_masks]) |*mask, i| {
+ for (self.masks[0..num_masks], 0..) |*mask, i| {
mask.* &= other.masks[i];
}
}
diff --git a/lib/std/bounded_array.zig b/lib/std/bounded_array.zig
index 7f1957d6dc..5242470631 100644
--- a/lib/std/bounded_array.zig
+++ b/lib/std/bounded_array.zig
@@ -169,7 +169,7 @@ pub fn BoundedArray(comptime T: type, comptime buffer_capacity: usize) type {
} else {
mem.copy(T, range, new_items);
const after_subrange = start + new_items.len;
- for (self.constSlice()[after_range..]) |item, i| {
+ for (self.constSlice()[after_range..], 0..) |item, i| {
self.slice()[after_subrange..][i] = item;
}
self.len -= len - new_items.len;
@@ -197,7 +197,7 @@ pub fn BoundedArray(comptime T: type, comptime buffer_capacity: usize) type {
const newlen = self.len - 1;
if (newlen == i) return self.pop();
const old_item = self.get(i);
- for (self.slice()[i..newlen]) |*b, j| b.* = self.get(i + 1 + j);
+ for (self.slice()[i..newlen], 0..) |*b, j| b.* = self.get(i + 1 + j);
self.set(newlen, undefined);
self.len = newlen;
return old_item;
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index 003e37d76b..07dd1f27f5 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -604,7 +604,7 @@ pub const ChildProcess = struct {
const arena = arena_allocator.allocator();
const argv_buf = try arena.allocSentinel(?[*:0]u8, self.argv.len, null);
- for (self.argv) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
+ for (self.argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
const envp = if (self.env_map) |env_map| m: {
const envp_buf = try createNullDelimitedEnvMap(arena, env_map);
@@ -712,7 +712,7 @@ pub const ChildProcess = struct {
// Therefore, we do all the allocation for the execve() before the fork().
// This means we must do the null-termination of argv and env vars here.
const argv_buf = try arena.allocSentinel(?[*:0]u8, self.argv.len, null);
- for (self.argv) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
+ for (self.argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
const envp = m: {
if (self.env_map) |env_map| {
@@ -1424,7 +1424,7 @@ fn windowsCreateCommandLine(allocator: mem.Allocator, argv: []const []const u8)
var buf = std.ArrayList(u8).init(allocator);
defer buf.deinit();
- for (argv) |arg, arg_i| {
+ for (argv, 0..) |arg, arg_i| {
if (arg_i != 0) try buf.append(' ');
if (mem.indexOfAny(u8, arg, " \t\n\"") == null) {
try buf.appendSlice(arg);
diff --git a/lib/std/coff.zig b/lib/std/coff.zig
index 3225a2525f..a7c191e650 100644
--- a/lib/std/coff.zig
+++ b/lib/std/coff.zig
@@ -1223,7 +1223,7 @@ pub const Coff = struct {
pub fn getSectionHeadersAlloc(self: *const Coff, allocator: mem.Allocator) ![]SectionHeader {
const section_headers = self.getSectionHeaders();
const out_buff = try allocator.alloc(SectionHeader, section_headers.len);
- for (out_buff) |*section_header, i| {
+ for (out_buff, 0..) |*section_header, i| {
section_header.* = section_headers[i];
}
diff --git a/lib/std/compress/deflate/compressor.zig b/lib/std/compress/deflate/compressor.zig
index 45c5c6bf8e..6c21875941 100644
--- a/lib/std/compress/deflate/compressor.zig
+++ b/lib/std/compress/deflate/compressor.zig
@@ -159,7 +159,7 @@ fn levels(compression: Compression) CompressionLevel {
fn matchLen(a: []u8, b: []u8, max: u32) u32 {
var bounded_a = a[0..max];
var bounded_b = b[0..max];
- for (bounded_a) |av, i| {
+ for (bounded_a, 0..) |av, i| {
if (bounded_b[i] != av) {
return @intCast(u32, i);
}
@@ -312,14 +312,14 @@ pub fn Compressor(comptime WriterType: anytype) type {
// Iterate over slices instead of arrays to avoid copying
// the entire table onto the stack (https://golang.org/issue/18625).
- for (self.hash_prev) |v, i| {
+ for (self.hash_prev, 0..) |v, i| {
if (v > delta) {
self.hash_prev[i] = @intCast(u32, v - delta);
} else {
self.hash_prev[i] = 0;
}
}
- for (self.hash_head) |v, i| {
+ for (self.hash_head, 0..) |v, i| {
if (v > delta) {
self.hash_head[i] = @intCast(u32, v - delta);
} else {
@@ -391,7 +391,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
var dst = self.hash_match[0..dst_size];
_ = self.bulk_hasher(to_check, dst);
var new_h: u32 = 0;
- for (dst) |val, i| {
+ for (dst, 0..) |val, i| {
var di = i + index;
new_h = val;
var hh = &self.hash_head[new_h & hash_mask];
@@ -1102,7 +1102,7 @@ test "bulkHash4" {
defer testing.allocator.free(dst);
_ = bulkHash4(y, dst);
- for (dst) |got, i| {
+ for (dst, 0..) |got, i| {
var want = hash4(y[i..]);
try testing.expectEqual(want, got);
}
diff --git a/lib/std/compress/deflate/compressor_test.zig b/lib/std/compress/deflate/compressor_test.zig
index c51f68236d..858da8d8b5 100644
--- a/lib/std/compress/deflate/compressor_test.zig
+++ b/lib/std/compress/deflate/compressor_test.zig
@@ -171,7 +171,7 @@ test "deflate/inflate" {
var large_data_chunk = try testing.allocator.alloc(u8, 100_000);
defer testing.allocator.free(large_data_chunk);
// fill with random data
- for (large_data_chunk) |_, i| {
+ for (large_data_chunk, 0..) |_, i| {
large_data_chunk[i] = @truncate(u8, i) *% @truncate(u8, i);
}
try testToFromWithLimit(large_data_chunk, limits);
@@ -205,7 +205,7 @@ test "very long sparse chunk" {
n -= cur - s.l;
cur = s.l;
}
- for (b[0..n]) |_, i| {
+ for (b[0..n], 0..) |_, i| {
if (s.cur + i >= s.l -| (1 << 16)) {
b[i] = 1;
} else {
@@ -451,7 +451,7 @@ test "inflate reset" {
defer compressed_strings[0].deinit();
defer compressed_strings[1].deinit();
- for (strings) |s, i| {
+ for (strings, 0..) |s, i| {
var comp = try compressor(
testing.allocator,
compressed_strings[i].writer(),
@@ -498,7 +498,7 @@ test "inflate reset dictionary" {
defer compressed_strings[0].deinit();
defer compressed_strings[1].deinit();
- for (strings) |s, i| {
+ for (strings, 0..) |s, i| {
var comp = try compressor(
testing.allocator,
compressed_strings[i].writer(),
diff --git a/lib/std/compress/deflate/decompressor.zig b/lib/std/compress/deflate/decompressor.zig
index baef85cace..e5cfbb0f6b 100644
--- a/lib/std/compress/deflate/decompressor.zig
+++ b/lib/std/compress/deflate/decompressor.zig
@@ -165,7 +165,7 @@ const HuffmanDecoder = struct {
}
}
- for (lengths) |n, li| {
+ for (lengths, 0..) |n, li| {
if (n == 0) {
continue;
}
@@ -213,7 +213,7 @@ const HuffmanDecoder = struct {
// Above we've sanity checked that we never overwrote
// an existing entry. Here we additionally check that
// we filled the tables completely.
- for (self.chunks) |chunk, i| {
+ for (self.chunks, 0..) |chunk, i| {
// As an exception, in the degenerate
// single-code case, we allow odd
// chunks to be missing.
diff --git a/lib/std/compress/deflate/deflate_fast.zig b/lib/std/compress/deflate/deflate_fast.zig
index 12d3e4203a..2009af2611 100644
--- a/lib/std/compress/deflate/deflate_fast.zig
+++ b/lib/std/compress/deflate/deflate_fast.zig
@@ -264,7 +264,7 @@ pub const DeflateFast = struct {
var a = src[@intCast(usize, s)..@intCast(usize, s1)];
b = b[0..a.len];
// Extend the match to be as long as possible.
- for (a) |_, i| {
+ for (a, 0..) |_, i| {
if (a[i] != b[i]) {
return @intCast(i32, i);
}
@@ -285,7 +285,7 @@ pub const DeflateFast = struct {
b = b[0..a.len];
}
a = a[0..b.len];
- for (b) |_, i| {
+ for (b, 0..) |_, i| {
if (a[i] != b[i]) {
return @intCast(i32, i);
}
@@ -301,7 +301,7 @@ pub const DeflateFast = struct {
// Continue looking for more matches in the current block.
a = src[@intCast(usize, s + n)..@intCast(usize, s1)];
b = src[0..a.len];
- for (a) |_, i| {
+ for (a, 0..) |_, i| {
if (a[i] != b[i]) {
return @intCast(i32, i) + n;
}
@@ -330,7 +330,7 @@ pub const DeflateFast = struct {
fn shiftOffsets(self: *Self) void {
if (self.prev_len == 0) {
// We have no history; just clear the table.
- for (self.table) |_, i| {
+ for (self.table, 0..) |_, i| {
self.table[i] = TableEntry{ .val = 0, .offset = 0 };
}
self.cur = max_match_offset + 1;
@@ -338,7 +338,7 @@ pub const DeflateFast = struct {
}
// Shift down everything in the table that isn't already too far away.
- for (self.table) |_, i| {
+ for (self.table, 0..) |_, i| {
var v = self.table[i].offset - self.cur + max_match_offset + 1;
if (v < 0) {
// We want to reset self.cur to max_match_offset + 1, so we need to shift
diff --git a/lib/std/compress/deflate/deflate_fast_test.zig b/lib/std/compress/deflate/deflate_fast_test.zig
index 9f7b639cba..f8efa80630 100644
--- a/lib/std/compress/deflate/deflate_fast_test.zig
+++ b/lib/std/compress/deflate/deflate_fast_test.zig
@@ -18,7 +18,7 @@ test "best speed" {
var abcabc = try testing.allocator.alloc(u8, 131_072);
defer testing.allocator.free(abcabc);
- for (abcabc) |_, i| {
+ for (abcabc, 0..) |_, i| {
abcabc[i] = @intCast(u8, i % 128);
}
diff --git a/lib/std/compress/deflate/dict_decoder.zig b/lib/std/compress/deflate/dict_decoder.zig
index e2a185dc39..bf21572827 100644
--- a/lib/std/compress/deflate/dict_decoder.zig
+++ b/lib/std/compress/deflate/dict_decoder.zig
@@ -378,7 +378,7 @@ test "dictionary decoder" {
_ = try want.write(".");
var str = poem;
- for (poem_refs) |ref, i| {
+ for (poem_refs, 0..) |ref, i| {
_ = i;
if (ref.dist == 0) {
try util.writeString(&dd, got, str[0..ref.length]);
diff --git a/lib/std/compress/deflate/huffman_bit_writer.zig b/lib/std/compress/deflate/huffman_bit_writer.zig
index fc5727ca63..a42aae467b 100644
--- a/lib/std/compress/deflate/huffman_bit_writer.zig
+++ b/lib/std/compress/deflate/huffman_bit_writer.zig
@@ -197,7 +197,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
lit_enc: *hm_code.HuffmanEncoder,
off_enc: *hm_code.HuffmanEncoder,
) void {
- for (self.codegen_freq) |_, i| {
+ for (self.codegen_freq, 0..) |_, i| {
self.codegen_freq[i] = 0;
}
@@ -208,12 +208,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
var codegen = self.codegen; // cache
// Copy the concatenated code sizes to codegen. Put a marker at the end.
var cgnl = codegen[0..num_literals];
- for (cgnl) |_, i| {
+ for (cgnl, 0..) |_, i| {
cgnl[i] = @intCast(u8, lit_enc.codes[i].len);
}
cgnl = codegen[num_literals .. num_literals + num_offsets];
- for (cgnl) |_, i| {
+ for (cgnl, 0..) |_, i| {
cgnl[i] = @intCast(u8, off_enc.codes[i].len);
}
codegen[num_literals + num_offsets] = bad_code;
@@ -600,10 +600,10 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
var num_literals: u32 = 0;
var num_offsets: u32 = 0;
- for (self.literal_freq) |_, i| {
+ for (self.literal_freq, 0..) |_, i| {
self.literal_freq[i] = 0;
}
- for (self.offset_freq) |_, i| {
+ for (self.offset_freq, 0..) |_, i| {
self.offset_freq[i] = 0;
}
@@ -691,7 +691,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
}
// Clear histogram
- for (self.literal_freq) |_, i| {
+ for (self.literal_freq, 0..) |_, i| {
self.literal_freq[i] = 0;
}
diff --git a/lib/std/compress/deflate/huffman_code.zig b/lib/std/compress/deflate/huffman_code.zig
index 79598e59ae..e911e5219b 100644
--- a/lib/std/compress/deflate/huffman_code.zig
+++ b/lib/std/compress/deflate/huffman_code.zig
@@ -71,7 +71,7 @@ pub const HuffmanEncoder = struct {
// Number of non-zero literals
var count: u32 = 0;
// Set list to be the set of all non-zero literals and their frequencies
- for (freq) |f, i| {
+ for (freq, 0..) |f, i| {
if (f != 0) {
list[count] = LiteralNode{ .literal = @intCast(u16, i), .freq = f };
count += 1;
@@ -86,7 +86,7 @@ pub const HuffmanEncoder = struct {
if (count <= 2) {
// Handle the small cases here, because they are awkward for the general case code. With
// two or fewer literals, everything has bit length 1.
- for (list) |node, i| {
+ for (list, 0..) |node, i| {
// "list" is in order of increasing literal value.
self.codes[node.literal].set(@intCast(u16, i), 1);
}
@@ -103,7 +103,7 @@ pub const HuffmanEncoder = struct {
pub fn bitLength(self: *HuffmanEncoder, freq: []u16) u32 {
var total: u32 = 0;
- for (freq) |f, i| {
+ for (freq, 0..) |f, i| {
if (f != 0) {
total += @intCast(u32, f) * @intCast(u32, self.codes[i].len);
}
@@ -258,7 +258,7 @@ pub const HuffmanEncoder = struct {
var code = @as(u16, 0);
var list = list_arg;
- for (bit_count) |bits, n| {
+ for (bit_count, 0..) |bits, n| {
code <<= 1;
if (n == 0 or bits == 0) {
continue;
@@ -340,7 +340,7 @@ pub fn generateFixedLiteralEncoding(allocator: Allocator) !HuffmanEncoder {
pub fn generateFixedOffsetEncoding(allocator: Allocator) !HuffmanEncoder {
var h = try newHuffmanEncoder(allocator, 30);
var codes = h.codes;
- for (codes) |_, ch| {
+ for (codes, 0..) |_, ch| {
codes[ch] = HuffCode{ .code = bu.bitReverse(u16, @intCast(u16, ch), 5), .len = 5 };
}
return h;
diff --git a/lib/std/compress/lzma/decode.zig b/lib/std/compress/lzma/decode.zig
index 6c9a3ae862..dc220d8e87 100644
--- a/lib/std/compress/lzma/decode.zig
+++ b/lib/std/compress/lzma/decode.zig
@@ -143,7 +143,7 @@ pub const DecoderState = struct {
}
self.lzma_props = new_props;
- for (self.pos_slot_decoder) |*t| t.reset();
+ for (&self.pos_slot_decoder) |*t| t.reset();
self.align_decoder.reset();
self.pos_decoders = .{0x400} ** 115;
self.is_match = .{0x400} ** 192;
diff --git a/lib/std/compress/lzma/decode/rangecoder.zig b/lib/std/compress/lzma/decode/rangecoder.zig
index 6b6ca15997..5df10be060 100644
--- a/lib/std/compress/lzma/decode/rangecoder.zig
+++ b/lib/std/compress/lzma/decode/rangecoder.zig
@@ -174,8 +174,8 @@ pub const LenDecoder = struct {
pub fn reset(self: *LenDecoder) void {
self.choice = 0x400;
self.choice2 = 0x400;
- for (self.low_coder) |*t| t.reset();
- for (self.mid_coder) |*t| t.reset();
+ for (&self.low_coder) |*t| t.reset();
+ for (&self.mid_coder) |*t| t.reset();
self.high_coder.reset();
}
};
diff --git a/lib/std/comptime_string_map.zig b/lib/std/comptime_string_map.zig
index f7736413d5..267259225c 100644
--- a/lib/std/comptime_string_map.zig
+++ b/lib/std/comptime_string_map.zig
@@ -21,7 +21,7 @@ pub fn ComptimeStringMap(comptime V: type, comptime kvs_list: anytype) type {
return a.key.len < b.key.len;
}
}).lenAsc;
- for (kvs_list) |kv, i| {
+ for (kvs_list, 0..) |kv, i| {
if (V != void) {
sorted_kvs[i] = .{ .key = kv.@"0", .value = kv.@"1" };
} else {
diff --git a/lib/std/crypto/25519/ed25519.zig b/lib/std/crypto/25519/ed25519.zig
index 7d136fc12d..5d85fd5224 100644
--- a/lib/std/crypto/25519/ed25519.zig
+++ b/lib/std/crypto/25519/ed25519.zig
@@ -344,7 +344,7 @@ pub const Ed25519 = struct {
var a_batch: [count]Curve = undefined;
var expected_r_batch: [count]Curve = undefined;
- for (signature_batch) |signature, i| {
+ for (signature_batch, 0..) |signature, i| {
const r = signature.sig.r;
const s = signature.sig.s;
try Curve.scalar.rejectNonCanonical(s);
@@ -360,7 +360,7 @@ pub const Ed25519 = struct {
}
var hram_batch: [count]Curve.scalar.CompressedScalar = undefined;
- for (signature_batch) |signature, i| {
+ for (signature_batch, 0..) |signature, i| {
var h = Sha512.init(.{});
h.update(&r_batch[i]);
h.update(&signature.public_key.bytes);
@@ -371,20 +371,20 @@ pub const Ed25519 = struct {
}
var z_batch: [count]Curve.scalar.CompressedScalar = undefined;
- for (z_batch) |*z| {
+ for (&z_batch) |*z| {
crypto.random.bytes(z[0..16]);
mem.set(u8, z[16..], 0);
}
var zs_sum = Curve.scalar.zero;
- for (z_batch) |z, i| {
+ for (z_batch, 0..) |z, i| {
const zs = Curve.scalar.mul(z, s_batch[i]);
zs_sum = Curve.scalar.add(zs_sum, zs);
}
zs_sum = Curve.scalar.mul8(zs_sum);
var zhs: [count]Curve.scalar.CompressedScalar = undefined;
- for (z_batch) |z, i| {
+ for (z_batch, 0..) |z, i| {
zhs[i] = Curve.scalar.mul(z, hram_batch[i]);
}
diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig
index 840f4b67d5..df4f8467f9 100644
--- a/lib/std/crypto/25519/edwards25519.zig
+++ b/lib/std/crypto/25519/edwards25519.zig
@@ -161,7 +161,7 @@ pub const Edwards25519 = struct {
fn slide(s: [32]u8) [2 * 32]i8 {
const reduced = if ((s[s.len - 1] & 0x80) == 0) s else scalar.reduce(s);
var e: [2 * 32]i8 = undefined;
- for (reduced) |x, i| {
+ for (reduced, 0..) |x, i| {
e[i * 2 + 0] = @as(i8, @truncate(u4, x));
e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
}
@@ -308,7 +308,7 @@ pub const Edwards25519 = struct {
var bpc: [9]Edwards25519 = undefined;
mem.copy(Edwards25519, bpc[0..], basePointPc[0..bpc.len]);
- for (ps) |p, i| {
+ for (ps, 0..) |p, i| {
if (p.is_base) {
pcs[i] = bpc;
} else {
@@ -317,13 +317,13 @@ pub const Edwards25519 = struct {
}
}
var es: [count][2 * 32]i8 = undefined;
- for (ss) |s, i| {
+ for (ss, 0..) |s, i| {
es[i] = slide(s);
}
var q = Edwards25519.identityElement;
var pos: usize = 2 * 32 - 1;
while (true) : (pos -= 1) {
- for (es) |e, i| {
+ for (es, 0..) |e, i| {
const slot = e[pos];
if (slot > 0) {
q = q.add(pcs[i][@intCast(usize, slot)]);
diff --git a/lib/std/crypto/Certificate.zig b/lib/std/crypto/Certificate.zig
index 3b491fa32e..22513f9efe 100644
--- a/lib/std/crypto/Certificate.zig
+++ b/lib/std/crypto/Certificate.zig
@@ -1092,7 +1092,7 @@ pub const rsa = struct {
if (exponent_elem.identifier.tag != .integer) return error.CertificateFieldHasWrongDataType;
// Skip over meaningless zeroes in the modulus.
const modulus_raw = pub_key[modulus_elem.slice.start..modulus_elem.slice.end];
- const modulus_offset = for (modulus_raw) |byte, i| {
+ const modulus_offset = for (modulus_raw, 0..) |byte, i| {
if (byte != 0) break i;
} else modulus_raw.len;
return .{
diff --git a/lib/std/crypto/aegis.zig b/lib/std/crypto/aegis.zig
index da09aca351..4a030d7870 100644
--- a/lib/std/crypto/aegis.zig
+++ b/lib/std/crypto/aegis.zig
@@ -170,7 +170,7 @@ pub const Aegis128L = struct {
}
const computed_tag = state.mac(ad.len, m.len);
var acc: u8 = 0;
- for (computed_tag) |_, j| {
+ for (computed_tag, 0..) |_, j| {
acc |= (computed_tag[j] ^ tag[j]);
}
if (acc != 0) {
@@ -339,7 +339,7 @@ pub const Aegis256 = struct {
}
const computed_tag = state.mac(ad.len, m.len);
var acc: u8 = 0;
- for (computed_tag) |_, j| {
+ for (computed_tag, 0..) |_, j| {
acc |= (computed_tag[j] ^ tag[j]);
}
if (acc != 0) {
@@ -562,7 +562,7 @@ test "Aegis256 test vector 3" {
test "Aegis MAC" {
const key = [_]u8{0x00} ** Aegis128LMac.key_length;
var msg: [64]u8 = undefined;
- for (msg) |*m, i| {
+ for (&msg, 0..) |*m, i| {
m.* = @truncate(u8, i);
}
const st_init = Aegis128LMac.init(&key);
diff --git a/lib/std/crypto/aes.zig b/lib/std/crypto/aes.zig
index c969dfd0f7..e2efa5bb90 100644
--- a/lib/std/crypto/aes.zig
+++ b/lib/std/crypto/aes.zig
@@ -115,11 +115,11 @@ test "expand 128-bit key" {
const dec = Aes128.initDec(key);
var exp: [16]u8 = undefined;
- for (enc.key_schedule.round_keys) |round_key, i| {
+ for (enc.key_schedule.round_keys, 0..) |round_key, i| {
_ = try std.fmt.hexToBytes(&exp, exp_enc[i]);
try testing.expectEqualSlices(u8, &exp, &round_key.toBytes());
}
- for (dec.key_schedule.round_keys) |round_key, i| {
+ for (dec.key_schedule.round_keys, 0..) |round_key, i| {
_ = try std.fmt.hexToBytes(&exp, exp_dec[i]);
try testing.expectEqualSlices(u8, &exp, &round_key.toBytes());
}
@@ -154,11 +154,11 @@ test "expand 256-bit key" {
const dec = Aes256.initDec(key);
var exp: [16]u8 = undefined;
- for (enc.key_schedule.round_keys) |round_key, i| {
+ for (enc.key_schedule.round_keys, 0..) |round_key, i| {
_ = try std.fmt.hexToBytes(&exp, exp_enc[i]);
try testing.expectEqualSlices(u8, &exp, &round_key.toBytes());
}
- for (dec.key_schedule.round_keys) |round_key, i| {
+ for (dec.key_schedule.round_keys, 0..) |round_key, i| {
_ = try std.fmt.hexToBytes(&exp, exp_dec[i]);
try testing.expectEqualSlices(u8, &exp, &round_key.toBytes());
}
diff --git a/lib/std/crypto/aes/aesni.zig b/lib/std/crypto/aes/aesni.zig
index c513e71478..4b5f3d445f 100644
--- a/lib/std/crypto/aes/aesni.zig
+++ b/lib/std/crypto/aes/aesni.zig
@@ -200,7 +200,7 @@ fn KeySchedule(comptime Aes: type) type {
fn expand128(t1: *Block) Self {
var round_keys: [11]Block = undefined;
const rcs = [_]u8{ 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
- inline for (rcs) |rc, round| {
+ inline for (rcs, 0..) |rc, round| {
round_keys[round] = t1.*;
t1.repr = drc(false, rc, t1.repr, t1.repr);
}
@@ -212,7 +212,7 @@ fn KeySchedule(comptime Aes: type) type {
var round_keys: [15]Block = undefined;
const rcs = [_]u8{ 1, 2, 4, 8, 16, 32 };
round_keys[0] = t1.*;
- inline for (rcs) |rc, round| {
+ inline for (rcs, 0..) |rc, round| {
round_keys[round * 2 + 1] = t2.*;
t1.repr = drc(false, rc, t2.repr, t1.repr);
round_keys[round * 2 + 2] = t1.*;
diff --git a/lib/std/crypto/aes/armcrypto.zig b/lib/std/crypto/aes/armcrypto.zig
index 52b7433d11..3f4faf1b14 100644
--- a/lib/std/crypto/aes/armcrypto.zig
+++ b/lib/std/crypto/aes/armcrypto.zig
@@ -250,7 +250,7 @@ fn KeySchedule(comptime Aes: type) type {
fn expand128(t1: *Block) Self {
var round_keys: [11]Block = undefined;
const rcs = [_]u8{ 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
- inline for (rcs) |rc, round| {
+ inline for (rcs, 0..) |rc, round| {
round_keys[round] = t1.*;
t1.repr = drc128(rc, t1.repr);
}
@@ -262,7 +262,7 @@ fn KeySchedule(comptime Aes: type) type {
var round_keys: [15]Block = undefined;
const rcs = [_]u8{ 1, 2, 4, 8, 16, 32 };
round_keys[0] = t1.*;
- inline for (rcs) |rc, round| {
+ inline for (rcs, 0..) |rc, round| {
round_keys[round * 2 + 1] = t2.*;
t1.repr = drc256(false, rc, t2.repr, t1.repr);
round_keys[round * 2 + 2] = t1.*;
diff --git a/lib/std/crypto/aes/soft.zig b/lib/std/crypto/aes/soft.zig
index 57d433c115..b57f1746dc 100644
--- a/lib/std/crypto/aes/soft.zig
+++ b/lib/std/crypto/aes/soft.zig
@@ -471,7 +471,7 @@ fn generateSbox(invert: bool) [256]u8 {
fn generateTable(invert: bool) [4][256]u32 {
var table: [4][256]u32 = undefined;
- for (generateSbox(invert)) |value, index| {
+ for (generateSbox(invert), 0..) |value, index| {
table[0][index] = mul(value, if (invert) 0xb else 0x3);
table[0][index] |= math.shl(u32, mul(value, if (invert) 0xd else 0x1), 8);
table[0][index] |= math.shl(u32, mul(value, if (invert) 0x9 else 0x1), 16);
diff --git a/lib/std/crypto/aes_gcm.zig b/lib/std/crypto/aes_gcm.zig
index 6eadcdee2f..b9b2362d9f 100644
--- a/lib/std/crypto/aes_gcm.zig
+++ b/lib/std/crypto/aes_gcm.zig
@@ -50,7 +50,7 @@ fn AesGcm(comptime Aes: anytype) type {
mem.writeIntBig(u64, final_block[8..16], m.len * 8);
mac.update(&final_block);
mac.final(tag);
- for (t) |x, i| {
+ for (t, 0..) |x, i| {
tag[i] ^= x;
}
}
@@ -82,12 +82,12 @@ fn AesGcm(comptime Aes: anytype) type {
mac.update(&final_block);
var computed_tag: [Ghash.mac_length]u8 = undefined;
mac.final(&computed_tag);
- for (t) |x, i| {
+ for (t, 0..) |x, i| {
computed_tag[i] ^= x;
}
var acc: u8 = 0;
- for (computed_tag) |_, p| {
+ for (computed_tag, 0..) |_, p| {
acc |= (computed_tag[p] ^ tag[p]);
}
if (acc != 0) {
diff --git a/lib/std/crypto/aes_ocb.zig b/lib/std/crypto/aes_ocb.zig
index 68f5bc4a9a..a5d1001ed5 100644
--- a/lib/std/crypto/aes_ocb.zig
+++ b/lib/std/crypto/aes_ocb.zig
@@ -155,7 +155,7 @@ fn AesOcb(comptime Aes: anytype) type {
xorWith(&offset, lx.star);
var pad = offset;
aes_enc_ctx.encrypt(&pad, &pad);
- for (m[i * 16 ..]) |x, j| {
+ for (m[i * 16 ..], 0..) |x, j| {
c[i * 16 + j] = pad[j] ^ x;
}
var e = [_]u8{0} ** 16;
@@ -220,7 +220,7 @@ fn AesOcb(comptime Aes: anytype) type {
xorWith(&offset, lx.star);
var pad = offset;
aes_enc_ctx.encrypt(&pad, &pad);
- for (c[i * 16 ..]) |x, j| {
+ for (c[i * 16 ..], 0..) |x, j| {
m[i * 16 + j] = pad[j] ^ x;
}
var e = [_]u8{0} ** 16;
@@ -242,14 +242,14 @@ fn AesOcb(comptime Aes: anytype) type {
inline fn xorBlocks(x: Block, y: Block) Block {
var z: Block = x;
- for (z) |*v, i| {
+ for (&z, 0..) |*v, i| {
v.* = x[i] ^ y[i];
}
return z;
}
inline fn xorWith(x: *Block, y: Block) void {
- for (x) |*v, i| {
+ for (x, 0..) |*v, i| {
v.* ^= y[i];
}
}
diff --git a/lib/std/crypto/argon2.zig b/lib/std/crypto/argon2.zig
index 7269470d5f..a95e75e538 100644
--- a/lib/std/crypto/argon2.zig
+++ b/lib/std/crypto/argon2.zig
@@ -188,13 +188,13 @@ fn initBlocks(
mem.writeIntLittle(u32, h0[Blake2b512.digest_length..][0..4], 0);
blake2bLong(&block0, h0);
- for (blocks.items[j + 0]) |*v, i| {
+ for (&blocks.items[j + 0], 0..) |*v, i| {
v.* = mem.readIntLittle(u64, block0[i * 8 ..][0..8]);
}
mem.writeIntLittle(u32, h0[Blake2b512.digest_length..][0..4], 1);
blake2bLong(&block0, h0);
- for (blocks.items[j + 1]) |*v, i| {
+ for (&blocks.items[j + 1], 0..) |*v, i| {
v.* = mem.readIntLittle(u64, block0[i * 8 ..][0..8]);
}
}
@@ -352,7 +352,7 @@ fn processBlockGeneric(
comptime xor: bool,
) void {
var t: [block_length]u64 = undefined;
- for (t) |*v, i| {
+ for (&t, 0..) |*v, i| {
v.* = in1[i] ^ in2[i];
}
var i: usize = 0;
@@ -375,11 +375,11 @@ fn processBlockGeneric(
}
}
if (xor) {
- for (t) |v, j| {
+ for (t, 0..) |v, j| {
out[j] ^= in1[j] ^ in2[j] ^ v;
}
} else {
- for (t) |v, j| {
+ for (t, 0..) |v, j| {
out[j] = in1[j] ^ in2[j] ^ v;
}
}
@@ -428,12 +428,12 @@ fn finalize(
const lanes = memory / threads;
var lane: u24 = 0;
while (lane < threads - 1) : (lane += 1) {
- for (blocks.items[(lane * lanes) + lanes - 1]) |v, i| {
+ for (blocks.items[(lane * lanes) + lanes - 1], 0..) |v, i| {
blocks.items[memory - 1][i] ^= v;
}
}
var block: [1024]u8 = undefined;
- for (blocks.items[memory - 1]) |v, i| {
+ for (blocks.items[memory - 1], 0..) |v, i| {
mem.writeIntLittle(u64, block[i * 8 ..][0..8], v);
}
blake2bLong(out, &block);
diff --git a/lib/std/crypto/ascon.zig b/lib/std/crypto/ascon.zig
index f692bdbe71..6de003d436 100644
--- a/lib/std/crypto/ascon.zig
+++ b/lib/std/crypto/ascon.zig
@@ -74,7 +74,7 @@ pub fn State(comptime endian: builtin.Endian) type {
/// Byte-swap the entire state if the architecture doesn't match the required endianness.
pub fn endianSwap(self: *Self) void {
- for (self.st) |*w| {
+ for (&self.st) |*w| {
w.* = mem.toNative(u64, w.*, endian);
}
}
diff --git a/lib/std/crypto/bcrypt.zig b/lib/std/crypto/bcrypt.zig
index e5c688557f..2191ab0d9e 100644
--- a/lib/std/crypto/bcrypt.zig
+++ b/lib/std/crypto/bcrypt.zig
@@ -437,7 +437,7 @@ pub fn bcrypt(
}
var ct: [ct_length]u8 = undefined;
- for (cdata) |c, i| {
+ for (cdata, 0..) |c, i| {
mem.writeIntBig(u32, ct[i * 4 ..][0..4], c);
}
return ct[0..dk_length].*;
@@ -505,7 +505,7 @@ const pbkdf_prf = struct {
// copy out
var out: [32]u8 = undefined;
- for (cdata) |v, i| {
+ for (cdata, 0..) |v, i| {
std.mem.writeIntLittle(u32, out[4 * i ..][0..4], v);
}
diff --git a/lib/std/crypto/blake2.zig b/lib/std/crypto/blake2.zig
index 80557eb255..85c26ce599 100644
--- a/lib/std/crypto/blake2.zig
+++ b/lib/std/crypto/blake2.zig
@@ -133,7 +133,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
mem.set(u8, d.buf[d.buf_len..], 0);
d.t += d.buf_len;
d.round(d.buf[0..], true);
- for (d.h) |*x| x.* = mem.nativeToLittle(u32, x.*);
+ for (&d.h) |*x| x.* = mem.nativeToLittle(u32, x.*);
mem.copy(u8, out[0..], @ptrCast(*[digest_length]u8, &d.h));
}
@@ -141,7 +141,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
var m: [16]u32 = undefined;
var v: [16]u32 = undefined;
- for (m) |*r, i| {
+ for (&m, 0..) |*r, i| {
r.* = mem.readIntLittle(u32, b[4 * i ..][0..4]);
}
@@ -180,7 +180,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
}
}
- for (d.h) |*r, i| {
+ for (&d.h, 0..) |*r, i| {
r.* ^= v[i] ^ v[i + 8];
}
}
@@ -568,7 +568,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
mem.set(u8, d.buf[d.buf_len..], 0);
d.t += d.buf_len;
d.round(d.buf[0..], true);
- for (d.h) |*x| x.* = mem.nativeToLittle(u64, x.*);
+ for (&d.h) |*x| x.* = mem.nativeToLittle(u64, x.*);
mem.copy(u8, out[0..], @ptrCast(*[digest_length]u8, &d.h));
}
@@ -576,7 +576,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
var m: [16]u64 = undefined;
var v: [16]u64 = undefined;
- for (m) |*r, i| {
+ for (&m, 0..) |*r, i| {
r.* = mem.readIntLittle(u64, b[8 * i ..][0..8]);
}
@@ -615,7 +615,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
}
}
- for (d.h) |*r, i| {
+ for (&d.h, 0..) |*r, i| {
r.* ^= v[i] ^ v[i + 8];
}
}
diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig
index 0334abfdb0..5b8e21d922 100644
--- a/lib/std/crypto/blake3.zig
+++ b/lib/std/crypto/blake3.zig
@@ -192,7 +192,7 @@ const CompressGeneric = struct {
for (MSG_SCHEDULE) |schedule| {
round(&state, block_words, schedule);
}
- for (chaining_value) |_, i| {
+ for (chaining_value, 0..) |_, i| {
state[i] ^= state[i + 8];
state[i + 8] ^= chaining_value[i];
}
@@ -211,7 +211,7 @@ fn first8Words(words: [16]u32) [8]u32 {
fn wordsFromLittleEndianBytes(comptime count: usize, bytes: [count * 4]u8) [count]u32 {
var words: [count]u32 = undefined;
- for (&words) |*word, i| {
+ for (&words, 0..) |*word, i| {
word.* = mem.readIntSliceLittle(u32, bytes[4 * i ..]);
}
return words;
@@ -658,7 +658,7 @@ fn testBlake3(hasher: *Blake3, input_len: usize, expected_hex: [262]u8) !void {
// Setup input pattern
var input_pattern: [251]u8 = undefined;
- for (input_pattern) |*e, i| e.* = @truncate(u8, i);
+ for (&input_pattern, 0..) |*e, i| e.* = @truncate(u8, i);
// Write repeating input pattern to hasher
var input_counter = input_len;
diff --git a/lib/std/crypto/chacha20.zig b/lib/std/crypto/chacha20.zig
index 2a43f4b94c..883ee51a62 100644
--- a/lib/std/crypto/chacha20.zig
+++ b/lib/std/crypto/chacha20.zig
@@ -197,7 +197,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize) type {
fn hchacha20(input: [16]u8, key: [32]u8) [32]u8 {
var c: [4]u32 = undefined;
- for (c) |_, i| {
+ for (c, 0..) |_, i| {
c[i] = mem.readIntLittle(u32, input[4 * i ..][0..4]);
}
const ctx = initContext(keyToWords(key), c);
@@ -338,7 +338,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
fn hchacha20(input: [16]u8, key: [32]u8) [32]u8 {
var c: [4]u32 = undefined;
- for (c) |_, i| {
+ for (c, 0..) |_, i| {
c[i] = mem.readIntLittle(u32, input[4 * i ..][0..4]);
}
const ctx = initContext(keyToWords(key), c);
@@ -543,7 +543,7 @@ fn ChaChaPoly1305(comptime rounds_nb: usize) type {
mac.final(computedTag[0..]);
var acc: u8 = 0;
- for (computedTag) |_, i| {
+ for (computedTag, 0..) |_, i| {
acc |= computedTag[i] ^ tag[i];
}
if (acc != 0) {
diff --git a/lib/std/crypto/cmac.zig b/lib/std/crypto/cmac.zig
index 911eac7902..fd00461858 100644
--- a/lib/std/crypto/cmac.zig
+++ b/lib/std/crypto/cmac.zig
@@ -46,19 +46,19 @@ pub fn Cmac(comptime BlockCipher: type) type {
const left = block_length - self.pos;
var m = msg;
if (m.len > left) {
- for (self.buf[self.pos..]) |*b, i| b.* ^= m[i];
+ for (self.buf[self.pos..], 0..) |*b, i| b.* ^= m[i];
m = m[left..];
self.cipher_ctx.encrypt(&self.buf, &self.buf);
self.pos = 0;
}
while (m.len > block_length) {
- for (self.buf[0..block_length]) |*b, i| b.* ^= m[i];
+ for (self.buf[0..block_length], 0..) |*b, i| b.* ^= m[i];
m = m[block_length..];
self.cipher_ctx.encrypt(&self.buf, &self.buf);
self.pos = 0;
}
if (m.len > 0) {
- for (self.buf[self.pos..][0..m.len]) |*b, i| b.* ^= m[i];
+ for (self.buf[self.pos..][0..m.len], 0..) |*b, i| b.* ^= m[i];
self.pos += m.len;
}
}
@@ -69,7 +69,7 @@ pub fn Cmac(comptime BlockCipher: type) type {
mac = self.k2;
mac[self.pos] ^= 0x80;
}
- for (mac) |*b, i| b.* ^= self.buf[i];
+ for (&mac, 0..) |*b, i| b.* ^= self.buf[i];
self.cipher_ctx.encrypt(out, &mac);
}
diff --git a/lib/std/crypto/ghash_polyval.zig b/lib/std/crypto/ghash_polyval.zig
index bb7318325b..908bace73e 100644
--- a/lib/std/crypto/ghash_polyval.zig
+++ b/lib/std/crypto/ghash_polyval.zig
@@ -320,7 +320,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
if (st.leftover > 0) {
const want = math.min(block_length - st.leftover, mb.len);
const mc = mb[0..want];
- for (mc) |x, i| {
+ for (mc, 0..) |x, i| {
st.buf[st.leftover + i] = x;
}
mb = mb[want..];
@@ -337,7 +337,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
mb = mb[want..];
}
if (mb.len > 0) {
- for (mb) |x, i| {
+ for (mb, 0..) |x, i| {
st.buf[st.leftover + i] = x;
}
st.leftover += mb.len;
diff --git a/lib/std/crypto/gimli.zig b/lib/std/crypto/gimli.zig
index 4952937697..9443b97be7 100644
--- a/lib/std/crypto/gimli.zig
+++ b/lib/std/crypto/gimli.zig
@@ -45,7 +45,7 @@ pub const State = struct {
}
inline fn endianSwap(self: *Self) void {
- for (self.data) |*w| {
+ for (&self.data) |*w| {
w.* = mem.littleToNative(u32, w.*);
}
}
@@ -228,7 +228,7 @@ pub const Hash = struct {
while (in.len > 0) {
const left = State.RATE - self.buf_off;
const ps = math.min(in.len, left);
- for (buf[self.buf_off .. self.buf_off + ps]) |*p, i| {
+ for (buf[self.buf_off .. self.buf_off + ps], 0..) |*p, i| {
p.* ^= in[i];
}
self.buf_off += ps;
@@ -329,12 +329,12 @@ pub const Aead = struct {
// exactly one final non-full block, in the same way as Gimli-Hash.
var data = ad;
while (data.len >= State.RATE) : (data = data[State.RATE..]) {
- for (buf[0..State.RATE]) |*p, i| {
+ for (buf[0..State.RATE], 0..) |*p, i| {
p.* ^= data[i];
}
state.permute();
}
- for (buf[0..data.len]) |*p, i| {
+ for (buf[0..data.len], 0..) |*p, i| {
p.* ^= data[i];
}
@@ -371,13 +371,13 @@ pub const Aead = struct {
in = in[State.RATE..];
out = out[State.RATE..];
}) {
- for (in[0..State.RATE]) |v, i| {
+ for (in[0..State.RATE], 0..) |v, i| {
buf[i] ^= v;
}
mem.copy(u8, out[0..State.RATE], buf[0..State.RATE]);
state.permute();
}
- for (in[0..]) |v, i| {
+ for (in[0..], 0..) |v, i| {
buf[i] ^= v;
out[i] = buf[i];
}
@@ -414,13 +414,13 @@ pub const Aead = struct {
out = out[State.RATE..];
}) {
const d = in[0..State.RATE].*;
- for (d) |v, i| {
+ for (d, 0..) |v, i| {
out[i] = buf[i] ^ v;
}
mem.copy(u8, buf[0..State.RATE], d[0..State.RATE]);
state.permute();
}
- for (buf[0..in.len]) |*p, i| {
+ for (buf[0..in.len], 0..) |*p, i| {
const d = in[i];
out[i] = p.* ^ d;
p.* = d;
diff --git a/lib/std/crypto/hmac.zig b/lib/std/crypto/hmac.zig
index 5ff37f8112..457cc5ec18 100644
--- a/lib/std/crypto/hmac.zig
+++ b/lib/std/crypto/hmac.zig
@@ -46,11 +46,11 @@ pub fn Hmac(comptime Hash: type) type {
mem.copy(u8, scratch[0..], key);
}
- for (ctx.o_key_pad) |*b, i| {
+ for (&ctx.o_key_pad, 0..) |*b, i| {
b.* = scratch[i] ^ 0x5c;
}
- for (i_key_pad) |*b, i| {
+ for (&i_key_pad, 0..) |*b, i| {
b.* = scratch[i] ^ 0x36;
}
diff --git a/lib/std/crypto/md5.zig b/lib/std/crypto/md5.zig
index 9306c222ed..6276fadb43 100644
--- a/lib/std/crypto/md5.zig
+++ b/lib/std/crypto/md5.zig
@@ -110,7 +110,7 @@ pub const Md5 = struct {
d.round(d.buf[0..]);
- for (d.s) |s, j| {
+ for (d.s, 0..) |s, j| {
mem.writeIntLittle(u32, out[4 * j ..][0..4], s);
}
}
diff --git a/lib/std/crypto/pbkdf2.zig b/lib/std/crypto/pbkdf2.zig
index b8f03bceb7..6f9783df72 100644
--- a/lib/std/crypto/pbkdf2.zig
+++ b/lib/std/crypto/pbkdf2.zig
@@ -138,7 +138,7 @@ pub fn pbkdf2(dk: []u8, password: []const u8, salt: []const u8, rounds: u32, com
mem.copy(u8, prev_block[0..], new_block[0..]);
// F (P, S, c, i) = U_1 \xor U_2 \xor ... \xor U_c
- for (dk_block) |_, j| {
+ for (dk_block, 0..) |_, j| {
dk_block[j] ^= new_block[j];
}
}
diff --git a/lib/std/crypto/pcurves/common.zig b/lib/std/crypto/pcurves/common.zig
index 5abc6d348f..40f4a728c7 100644
--- a/lib/std/crypto/pcurves/common.zig
+++ b/lib/std/crypto/pcurves/common.zig
@@ -65,7 +65,7 @@ pub fn Field(comptime params: FieldParams) type {
/// Swap the endianness of an encoded element.
pub fn orderSwap(s: [encoded_length]u8) [encoded_length]u8 {
var t = s;
- for (s) |x, i| t[t.len - 1 - i] = x;
+ for (s, 0..) |x, i| t[t.len - 1 - i] = x;
return t;
}
diff --git a/lib/std/crypto/pcurves/p256.zig b/lib/std/crypto/pcurves/p256.zig
index 5898f83c10..d060abd12b 100644
--- a/lib/std/crypto/pcurves/p256.zig
+++ b/lib/std/crypto/pcurves/p256.zig
@@ -321,7 +321,7 @@ pub const P256 = struct {
fn slide(s: [32]u8) [2 * 32 + 1]i8 {
var e: [2 * 32 + 1]i8 = undefined;
- for (s) |x, i| {
+ for (s, 0..) |x, i| {
e[i * 2 + 0] = @as(i8, @truncate(u4, x));
e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
}
diff --git a/lib/std/crypto/pcurves/p256/scalar.zig b/lib/std/crypto/pcurves/p256/scalar.zig
index d3ac2a9b95..ce019082ef 100644
--- a/lib/std/crypto/pcurves/p256/scalar.zig
+++ b/lib/std/crypto/pcurves/p256/scalar.zig
@@ -187,7 +187,7 @@ const ScalarDouble = struct {
var s = s_;
if (endian == .Big) {
- for (s_) |x, i| s[s.len - 1 - i] = x;
+ for (s_, 0..) |x, i| s[s.len - 1 - i] = x;
}
var t = ScalarDouble{ .x1 = undefined, .x2 = Fe.zero, .x3 = Fe.zero };
{
diff --git a/lib/std/crypto/pcurves/p384.zig b/lib/std/crypto/pcurves/p384.zig
index 0694d4f259..8ea787a417 100644
--- a/lib/std/crypto/pcurves/p384.zig
+++ b/lib/std/crypto/pcurves/p384.zig
@@ -321,7 +321,7 @@ pub const P384 = struct {
fn slide(s: [48]u8) [2 * 48 + 1]i8 {
var e: [2 * 48 + 1]i8 = undefined;
- for (s) |x, i| {
+ for (s, 0..) |x, i| {
e[i * 2 + 0] = @as(i8, @truncate(u4, x));
e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
}
diff --git a/lib/std/crypto/pcurves/p384/scalar.zig b/lib/std/crypto/pcurves/p384/scalar.zig
index b6db0c83d4..ec71a52efa 100644
--- a/lib/std/crypto/pcurves/p384/scalar.zig
+++ b/lib/std/crypto/pcurves/p384/scalar.zig
@@ -175,7 +175,7 @@ const ScalarDouble = struct {
var s = s_;
if (endian == .Big) {
- for (s_) |x, i| s[s.len - 1 - i] = x;
+ for (s_, 0..) |x, i| s[s.len - 1 - i] = x;
}
var t = ScalarDouble{ .x1 = undefined, .x2 = Fe.zero };
{
diff --git a/lib/std/crypto/pcurves/secp256k1.zig b/lib/std/crypto/pcurves/secp256k1.zig
index 79698bd7dd..12bd022b11 100644
--- a/lib/std/crypto/pcurves/secp256k1.zig
+++ b/lib/std/crypto/pcurves/secp256k1.zig
@@ -349,7 +349,7 @@ pub const Secp256k1 = struct {
fn slide(s: [32]u8) [2 * 32 + 1]i8 {
var e: [2 * 32 + 1]i8 = undefined;
- for (s) |x, i| {
+ for (s, 0..) |x, i| {
e[i * 2 + 0] = @as(i8, @truncate(u4, x));
e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
}
diff --git a/lib/std/crypto/pcurves/secp256k1/scalar.zig b/lib/std/crypto/pcurves/secp256k1/scalar.zig
index 2d91f8bc99..0b7d6e952d 100644
--- a/lib/std/crypto/pcurves/secp256k1/scalar.zig
+++ b/lib/std/crypto/pcurves/secp256k1/scalar.zig
@@ -187,7 +187,7 @@ const ScalarDouble = struct {
var s = s_;
if (endian == .Big) {
- for (s_) |x, i| s[s.len - 1 - i] = x;
+ for (s_, 0..) |x, i| s[s.len - 1 - i] = x;
}
var t = ScalarDouble{ .x1 = undefined, .x2 = Fe.zero, .x3 = Fe.zero };
{
diff --git a/lib/std/crypto/poly1305.zig b/lib/std/crypto/poly1305.zig
index 4f16f66cd0..e99cf144d5 100644
--- a/lib/std/crypto/poly1305.zig
+++ b/lib/std/crypto/poly1305.zig
@@ -82,7 +82,7 @@ pub const Poly1305 = struct {
if (st.leftover > 0) {
const want = std.math.min(block_length - st.leftover, mb.len);
const mc = mb[0..want];
- for (mc) |x, i| {
+ for (mc, 0..) |x, i| {
st.buf[st.leftover + i] = x;
}
mb = mb[want..];
@@ -103,7 +103,7 @@ pub const Poly1305 = struct {
// store leftover
if (mb.len > 0) {
- for (mb) |x, i| {
+ for (mb, 0..) |x, i| {
st.buf[st.leftover + i] = x;
}
st.leftover += mb.len;
diff --git a/lib/std/crypto/salsa20.zig b/lib/std/crypto/salsa20.zig
index 2027403ee2..492b8e9988 100644
--- a/lib/std/crypto/salsa20.zig
+++ b/lib/std/crypto/salsa20.zig
@@ -157,7 +157,7 @@ fn SalsaVecImpl(comptime rounds: comptime_int) type {
fn hsalsa(input: [16]u8, key: [32]u8) [32]u8 {
var c: [4]u32 = undefined;
- for (c) |_, i| {
+ for (c, 0..) |_, i| {
c[i] = mem.readIntLittle(u32, input[4 * i ..][0..4]);
}
const ctx = initContext(keyToWords(key), c);
@@ -240,7 +240,7 @@ fn SalsaNonVecImpl(comptime rounds: comptime_int) type {
}
fn hashToBytes(out: *[64]u8, x: BlockVec) void {
- for (x) |w, i| {
+ for (x, 0..) |w, i| {
mem.writeIntLittle(u32, out[i * 4 ..][0..4], w);
}
}
@@ -282,7 +282,7 @@ fn SalsaNonVecImpl(comptime rounds: comptime_int) type {
fn hsalsa(input: [16]u8, key: [32]u8) [32]u8 {
var c: [4]u32 = undefined;
- for (c) |_, i| {
+ for (c, 0..) |_, i| {
c[i] = mem.readIntLittle(u32, input[4 * i ..][0..4]);
}
const ctx = initContext(keyToWords(key), c);
@@ -413,7 +413,7 @@ pub const XSalsa20Poly1305 = struct {
var computedTag: [tag_length]u8 = undefined;
mac.final(&computedTag);
var acc: u8 = 0;
- for (computedTag) |_, i| {
+ for (computedTag, 0..) |_, i| {
acc |= computedTag[i] ^ tag[i];
}
if (acc != 0) {
diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig
index 9b2bf01022..dc73d974c7 100644
--- a/lib/std/crypto/scrypt.zig
+++ b/lib/std/crypto/scrypt.zig
@@ -31,7 +31,7 @@ fn blockCopy(dst: []align(16) u32, src: []align(16) const u32, n: usize) void {
}
fn blockXor(dst: []align(16) u32, src: []align(16) const u32, n: usize) void {
- for (src[0 .. n * 16]) |v, i| {
+ for (src[0 .. n * 16], 0..) |v, i| {
dst[i] ^= v;
}
}
@@ -90,7 +90,7 @@ fn smix(b: []align(16) u8, r: u30, n: usize, v: []align(16) u32, xy: []align(16)
var x = @alignCast(16, xy[0 .. 32 * r]);
var y = @alignCast(16, xy[32 * r ..]);
- for (x) |*v1, j| {
+ for (x, 0..) |*v1, j| {
v1.* = mem.readIntSliceLittle(u32, b[4 * j ..]);
}
@@ -115,7 +115,7 @@ fn smix(b: []align(16) u8, r: u30, n: usize, v: []align(16) u32, xy: []align(16)
blockMix(&tmp, y, x, r);
}
- for (x) |v1, j| {
+ for (x, 0..) |v1, j| {
mem.writeIntLittle(u32, b[4 * j ..][0..4], v1);
}
}
@@ -350,7 +350,7 @@ const crypt_format = struct {
fn intDecode(comptime T: type, src: *const [(@bitSizeOf(T) + 5) / 6]u8) !T {
var v: T = 0;
- for (src) |x, i| {
+ for (src, 0..) |x, i| {
const vi = mem.indexOfScalar(u8, &map64, x) orelse return EncodingError.InvalidEncoding;
v |= @intCast(T, vi) << @intCast(math.Log2Int(T), i * 6);
}
@@ -365,10 +365,10 @@ const crypt_format = struct {
}
const leftover = src[i * 4 ..];
var v: u24 = 0;
- for (leftover) |_, j| {
+ for (leftover, 0..) |_, j| {
v |= @as(u24, try intDecode(u6, leftover[j..][0..1])) << @intCast(u5, j * 6);
}
- for (dst[i * 3 ..]) |*x, j| {
+ for (dst[i * 3 ..], 0..) |*x, j| {
x.* = @truncate(u8, v >> @intCast(u5, j * 8));
}
}
@@ -381,7 +381,7 @@ const crypt_format = struct {
}
const leftover = src[i * 3 ..];
var v: u24 = 0;
- for (leftover) |x, j| {
+ for (leftover, 0..) |x, j| {
v |= @as(u24, x) << @intCast(u5, j * 8);
}
intEncode(dst[i * 4 ..], v);
diff --git a/lib/std/crypto/sha1.zig b/lib/std/crypto/sha1.zig
index 99289e35c4..4d11b04eb5 100644
--- a/lib/std/crypto/sha1.zig
+++ b/lib/std/crypto/sha1.zig
@@ -105,7 +105,7 @@ pub const Sha1 = struct {
d.round(d.buf[0..]);
- for (d.s) |s, j| {
+ for (d.s, 0..) |s, j| {
mem.writeIntBig(u32, out[4 * j ..][0..4], s);
}
}
diff --git a/lib/std/crypto/sha2.zig b/lib/std/crypto/sha2.zig
index 217dea3723..24c22ecc9f 100644
--- a/lib/std/crypto/sha2.zig
+++ b/lib/std/crypto/sha2.zig
@@ -175,7 +175,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
// May truncate for possible 224 output
const rr = d.s[0 .. params.digest_bits / 32];
- for (rr) |s, j| {
+ for (rr, 0..) |s, j| {
mem.writeIntBig(u32, out[4 * j ..][0..4], s);
}
}
@@ -199,7 +199,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
fn round(d: *Self, b: *const [64]u8) void {
var s: [64]u32 align(16) = undefined;
- for (@ptrCast(*align(1) const [16]u32, b)) |*elem, i| {
+ for (@ptrCast(*align(1) const [16]u32, b), 0..) |*elem, i| {
s[i] = mem.readIntBig(u32, mem.asBytes(elem));
}
@@ -665,7 +665,7 @@ fn Sha2x64(comptime params: Sha2Params64) type {
// May truncate for possible 384 output
const rr = d.s[0 .. params.digest_bits / 64];
- for (rr) |s, j| {
+ for (rr, 0..) |s, j| {
mem.writeIntBig(u64, out[8 * j ..][0..8], s);
}
}
diff --git a/lib/std/crypto/sha3.zig b/lib/std/crypto/sha3.zig
index 7735d7bc71..c2801a4709 100644
--- a/lib/std/crypto/sha3.zig
+++ b/lib/std/crypto/sha3.zig
@@ -43,7 +43,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
// absorb
while (len >= rate) {
- for (d.s[offset .. offset + rate]) |*r, i|
+ for (d.s[offset .. offset + rate], 0..) |*r, i|
r.* ^= b[ip..][i];
keccakF(1600, &d.s);
@@ -54,7 +54,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
offset = 0;
}
- for (d.s[offset .. offset + len]) |*r, i|
+ for (d.s[offset .. offset + len], 0..) |*r, i|
r.* ^= b[ip..][i];
d.offset = offset + len;
@@ -126,7 +126,7 @@ fn keccakF(comptime F: usize, d: *[F / 8]u8) void {
var t = [_]u64{0} ** 1;
var c = [_]u64{0} ** 5;
- for (s) |*r, i| {
+ for (&s, 0..) |*r, i| {
r.* = mem.readIntLittle(u64, d[8 * i ..][0..8]);
}
@@ -171,7 +171,7 @@ fn keccakF(comptime F: usize, d: *[F / 8]u8) void {
s[0] ^= round;
}
- for (s) |r, i| {
+ for (s, 0..) |r, i| {
mem.writeIntLittle(u64, d[8 * i ..][0..8], r);
}
}
diff --git a/lib/std/crypto/siphash.zig b/lib/std/crypto/siphash.zig
index e527e4558a..16388439d1 100644
--- a/lib/std/crypto/siphash.zig
+++ b/lib/std/crypto/siphash.zig
@@ -339,7 +339,7 @@ test "siphash64-2-4 sanity" {
const siphash = SipHash64(2, 4);
var buffer: [64]u8 = undefined;
- for (vectors) |vector, i| {
+ for (vectors, 0..) |vector, i| {
buffer[i] = @intCast(u8, i);
var out: [siphash.mac_length]u8 = undefined;
@@ -419,7 +419,7 @@ test "siphash128-2-4 sanity" {
const siphash = SipHash128(2, 4);
var buffer: [64]u8 = undefined;
- for (vectors) |vector, i| {
+ for (vectors, 0..) |vector, i| {
buffer[i] = @intCast(u8, i);
var out: [siphash.mac_length]u8 = undefined;
@@ -430,7 +430,7 @@ test "siphash128-2-4 sanity" {
test "iterative non-divisible update" {
var buf: [1024]u8 = undefined;
- for (buf) |*e, i| {
+ for (&buf, 0..) |*e, i| {
e.* = @truncate(u8, i);
}
diff --git a/lib/std/crypto/test.zig b/lib/std/crypto/test.zig
index 656fa89cfe..f891d8c320 100644
--- a/lib/std/crypto/test.zig
+++ b/lib/std/crypto/test.zig
@@ -13,7 +13,7 @@ pub fn assertEqualHash(comptime Hasher: anytype, comptime expected_hex: *const [
// Assert `expected` == hex(`input`) where `input` is a bytestring
pub fn assertEqual(comptime expected_hex: [:0]const u8, input: []const u8) !void {
var expected_bytes: [expected_hex.len / 2]u8 = undefined;
- for (expected_bytes) |*r, i| {
+ for (&expected_bytes, 0..) |*r, i| {
r.* = fmt.parseInt(u8, expected_hex[2 * i .. 2 * i + 2], 16) catch unreachable;
}
diff --git a/lib/std/crypto/tls.zig b/lib/std/crypto/tls.zig
index 7d89da8929..526a5f2175 100644
--- a/lib/std/crypto/tls.zig
+++ b/lib/std/crypto/tls.zig
@@ -344,7 +344,7 @@ pub inline fn array(comptime elem_size: comptime_int, bytes: anytype) [2 + bytes
pub inline fn enum_array(comptime E: type, comptime tags: []const E) [2 + @sizeOf(E) * tags.len]u8 {
assert(@sizeOf(E) == 2);
var result: [tags.len * 2]u8 = undefined;
- for (tags) |elem, i| {
+ for (tags, 0..) |elem, i| {
result[i * 2] = @truncate(u8, @enumToInt(elem) >> 8);
result[i * 2 + 1] = @truncate(u8, @enumToInt(elem));
}
diff --git a/lib/std/crypto/utils.zig b/lib/std/crypto/utils.zig
index fd7264e737..ec4c322963 100644
--- a/lib/std/crypto/utils.zig
+++ b/lib/std/crypto/utils.zig
@@ -18,7 +18,7 @@ pub fn timingSafeEql(comptime T: type, a: T, b: T) bool {
@compileError("Elements to be compared must be integers");
}
var acc = @as(C, 0);
- for (a) |x, i| {
+ for (a, 0..) |x, i| {
acc |= x ^ b[i];
}
const s = @typeInfo(C).Int.bits;
@@ -64,7 +64,7 @@ pub fn timingSafeCompare(comptime T: type, a: []const T, b: []const T, endian: E
eq &= @truncate(T, (@as(Cext, (x2 ^ x1)) -% 1) >> bits);
}
} else {
- for (a) |x1, i| {
+ for (a, 0..) |x1, i| {
const x2 = b[i];
gt |= @truncate(T, (@as(Cext, x2) -% @as(Cext, x1)) >> bits) & eq;
eq &= @truncate(T, (@as(Cext, (x2 ^ x1)) -% 1) >> bits);
diff --git a/lib/std/crypto/xoodoo.zig b/lib/std/crypto/xoodoo.zig
index bbc579f073..ea3554a635 100644
--- a/lib/std/crypto/xoodoo.zig
+++ b/lib/std/crypto/xoodoo.zig
@@ -66,7 +66,7 @@ pub const State = struct {
/// XOR bytes into the beginning of the state.
pub fn addBytes(self: *State, bytes: []const u8) void {
self.endianSwap();
- for (self.asBytes()[0..bytes.len]) |*byte, i| {
+ for (self.asBytes()[0..bytes.len], 0..) |*byte, i| {
byte.* ^= bytes[i];
}
self.endianSwap();
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index 3708c4fe81..97acf81af6 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -213,7 +213,7 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *std.builtin.StackT
var addr_buf_stack: [32]usize = undefined;
const addr_buf = if (addr_buf_stack.len > addrs.len) addr_buf_stack[0..] else addrs;
const n = walkStackWindows(addr_buf[0..]);
- const first_index = for (addr_buf[0..n]) |addr, i| {
+ const first_index = for (addr_buf[0..n], 0..) |addr, i| {
if (addr == first_addr) {
break i;
}
@@ -224,13 +224,13 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *std.builtin.StackT
const end_index = math.min(first_index + addrs.len, n);
const slice = addr_buf[first_index..end_index];
// We use a for loop here because slice and addrs may alias.
- for (slice) |addr, i| {
+ for (slice, 0..) |addr, i| {
addrs[i] = addr;
}
stack_trace.index = slice.len;
} else {
var it = StackIterator.init(first_address, null);
- for (stack_trace.instruction_addresses) |*addr, i| {
+ for (stack_trace.instruction_addresses, 0..) |*addr, i| {
addr.* = it.next() orelse {
stack_trace.index = i;
return;
@@ -621,7 +621,7 @@ pub fn writeCurrentStackTraceWindows(
const n = walkStackWindows(addr_buf[0..]);
const addrs = addr_buf[0..n];
var start_i: usize = if (start_addr) |saddr| blk: {
- for (addrs) |addr, i| {
+ for (addrs, 0..) |addr, i| {
if (addr == saddr) break :blk i;
}
return;
@@ -2138,7 +2138,7 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
) catch return;
return;
};
- for (t.addrs[0..end]) |frames_array, i| {
+ for (t.addrs[0..end], 0..) |frames_array, i| {
stderr.print("{s}:\n", .{t.notes[i]}) catch return;
var frames_array_mutable = frames_array;
const frames = mem.sliceTo(frames_array_mutable[0..], 0);
diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig
index a6f38db437..99dff14c36 100644
--- a/lib/std/dwarf.zig
+++ b/lib/std/dwarf.zig
@@ -1064,7 +1064,7 @@ pub const DwarfInfo = struct {
.has_children = table_entry.has_children,
};
try result.attrs.resize(allocator, table_entry.attrs.items.len);
- for (table_entry.attrs.items) |attr, i| {
+ for (table_entry.attrs.items, 0..) |attr, i| {
result.attrs.items[i] = Die.Attr{
.id = attr.attr_id,
.value = try parseFormValue(
diff --git a/lib/std/enums.zig b/lib/std/enums.zig
index 2640e6aac9..61c2f01e23 100644
--- a/lib/std/enums.zig
+++ b/lib/std/enums.zig
@@ -35,7 +35,7 @@ pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_def
pub fn valuesFromFields(comptime E: type, comptime fields: []const EnumField) []const E {
comptime {
var result: [fields.len]E = undefined;
- for (fields) |f, i| {
+ for (fields, 0..) |f, i| {
result[i] = @field(E, f.name);
}
return &result;
@@ -1331,7 +1331,7 @@ pub fn EnumIndexer(comptime E: type) type {
pub const Key = E;
pub const count = fields_len;
pub fn indexOf(e: E) usize {
- for (keys) |k, i| {
+ for (keys, 0..) |k, i| {
if (k == e) return i;
}
unreachable;
diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig
index c851a9b80c..34f74e10d2 100644
--- a/lib/std/event/loop.zig
+++ b/lib/std/event/loop.zig
@@ -278,7 +278,7 @@ pub const Loop = struct {
const empty_kevs = &[0]os.Kevent{};
- for (self.eventfd_resume_nodes) |*eventfd_node, i| {
+ for (self.eventfd_resume_nodes, 0..) |*eventfd_node, i| {
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
.data = ResumeNode.EventFd{
.base = ResumeNode{
@@ -343,7 +343,7 @@ pub const Loop = struct {
const empty_kevs = &[0]os.Kevent{};
- for (self.eventfd_resume_nodes) |*eventfd_node, i| {
+ for (self.eventfd_resume_nodes, 0..) |*eventfd_node, i| {
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
.data = ResumeNode.EventFd{
.base = ResumeNode{
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index f2ee24a9e3..0da25fde78 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -570,7 +570,7 @@ pub fn formatType(
return writer.writeAll("{ ... }");
}
try writer.writeAll("{");
- inline for (info.fields) |f, i| {
+ inline for (info.fields, 0..) |f, i| {
if (i == 0) {
try writer.writeAll(" ");
} else {
@@ -585,7 +585,7 @@ pub fn formatType(
return writer.writeAll("{ ... }");
}
try writer.writeAll("{");
- inline for (info.fields) |f, i| {
+ inline for (info.fields, 0..) |f, i| {
if (i == 0) {
try writer.writeAll(" .");
} else {
@@ -612,7 +612,7 @@ pub fn formatType(
}
}
if (comptime std.meta.trait.isZigString(info.child)) {
- for (value) |item, i| {
+ for (value, 0..) |item, i| {
comptime checkTextFmt(actual_fmt);
if (i != 0) try formatBuf(", ", options, writer);
try formatBuf(item, options, writer);
@@ -659,7 +659,7 @@ pub fn formatType(
}
}
try writer.writeAll("{ ");
- for (value) |elem, i| {
+ for (value, 0..) |elem, i| {
try formatType(elem, actual_fmt, options, writer, max_depth - 1);
if (i != value.len - 1) {
try writer.writeAll(", ");
@@ -684,7 +684,7 @@ pub fn formatType(
}
}
try writer.writeAll("{ ");
- for (value) |elem, i| {
+ for (value, 0..) |elem, i| {
try formatType(elem, actual_fmt, options, writer, max_depth - 1);
if (i < value.len - 1) {
try writer.writeAll(", ");
diff --git a/lib/std/fmt/parse_float/decimal.zig b/lib/std/fmt/parse_float/decimal.zig
index 1a4c7ebb0d..9dbe7095ac 100644
--- a/lib/std/fmt/parse_float/decimal.zig
+++ b/lib/std/fmt/parse_float/decimal.zig
@@ -475,7 +475,7 @@ pub fn Decimal(comptime T: type) type {
const x = pow2_to_pow5_table[shift];
// Compare leading digits of current to check if lexicographically less than cutoff.
- for (x.cutoff) |p5, i| {
+ for (x.cutoff, 0..) |p5, i| {
if (i >= self.num_digits) {
return x.delta - 1;
} else if (self.digits[i] == p5 - '0') { // digits are stored as integers
diff --git a/lib/std/fs/path.zig b/lib/std/fs/path.zig
index ebb2ec82d8..150eada38b 100644
--- a/lib/std/fs/path.zig
+++ b/lib/std/fs/path.zig
@@ -48,7 +48,7 @@ fn joinSepMaybeZ(allocator: Allocator, separator: u8, comptime sepPredicate: fn
// Find first non-empty path index.
const first_path_index = blk: {
- for (paths) |path, index| {
+ for (paths, 0..) |path, index| {
if (path.len == 0) continue else break :blk index;
}
@@ -476,7 +476,7 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
var drive_kind = WindowsPath.Kind.None;
var have_abs_path = false;
var first_index: usize = 0;
- for (paths) |p, i| {
+ for (paths, 0..) |p, i| {
const parsed = windowsParsePath(p);
if (parsed.is_abs) {
have_abs_path = true;
@@ -504,7 +504,7 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
first_index = 0;
var correct_disk_designator = false;
- for (paths) |p, i| {
+ for (paths, 0..) |p, i| {
const parsed = windowsParsePath(p);
if (parsed.kind != WindowsPath.Kind.None) {
if (parsed.kind == drive_kind) {
diff --git a/lib/std/fs/wasi.zig b/lib/std/fs/wasi.zig
index fa9de0dff1..75c9b1df78 100644
--- a/lib/std/fs/wasi.zig
+++ b/lib/std/fs/wasi.zig
@@ -15,7 +15,7 @@ pub const Preopens = struct {
names: []const []const u8,
pub fn find(p: Preopens, name: []const u8) ?os.fd_t {
- for (p.names) |elem_name, i| {
+ for (p.names, 0..) |elem_name, i| {
if (mem.eql(u8, elem_name, name)) {
return @intCast(os.fd_t, i);
}
diff --git a/lib/std/hash/crc.zig b/lib/std/hash/crc.zig
index 1c69978f80..271b4f93da 100644
--- a/lib/std/hash/crc.zig
+++ b/lib/std/hash/crc.zig
@@ -35,7 +35,7 @@ pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type {
@as(I, algorithm.polynomial) << (@bitSizeOf(I) - @bitSizeOf(W));
var table: [256]I = undefined;
- for (table) |*e, i| {
+ for (&table, 0..) |*e, i| {
var crc: I = i;
if (algorithm.reflect_input) {
var j: usize = 0;
@@ -124,7 +124,7 @@ pub fn Crc32WithPoly(comptime poly: Polynomial) type {
@setEvalBranchQuota(20000);
var tables: [8][256]u32 = undefined;
- for (tables[0]) |*e, i| {
+ for (&tables[0], 0..) |*e, i| {
var crc = @intCast(u32, i);
var j: usize = 0;
while (j < 8) : (j += 1) {
@@ -217,7 +217,7 @@ pub fn Crc32SmallWithPoly(comptime poly: Polynomial) type {
const lookup_table = block: {
var table: [16]u32 = undefined;
- for (table) |*e, i| {
+ for (&table, 0..) |*e, i| {
var crc = @intCast(u32, i * 16);
var j: usize = 0;
while (j < 8) : (j += 1) {
diff --git a/lib/std/hash/wyhash.zig b/lib/std/hash/wyhash.zig
index 9dda198a06..2f30c26b75 100644
--- a/lib/std/hash/wyhash.zig
+++ b/lib/std/hash/wyhash.zig
@@ -207,7 +207,7 @@ test "test vectors streaming" {
test "iterative non-divisible update" {
var buf: [8192]u8 = undefined;
- for (buf) |*e, i| {
+ for (&buf, 0..) |*e, i| {
e.* = @truncate(u8, i);
}
diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig
index 05205e6f07..78fcf68b56 100644
--- a/lib/std/hash_map.zig
+++ b/lib/std/hash_map.zig
@@ -2119,7 +2119,7 @@ test "std.hash_map getOrPutAdapted" {
var real_keys: [keys.len]u64 = undefined;
- inline for (keys) |key_str, i| {
+ inline for (keys, 0..) |key_str, i| {
const result = try map.getOrPutAdapted(key_str, AdaptedContext{});
try testing.expect(!result.found_existing);
real_keys[i] = std.fmt.parseInt(u64, key_str, 10) catch unreachable;
@@ -2129,7 +2129,7 @@ test "std.hash_map getOrPutAdapted" {
try testing.expectEqual(map.count(), keys.len);
- inline for (keys) |key_str, i| {
+ inline for (keys, 0..) |key_str, i| {
const result = try map.getOrPutAdapted(key_str, AdaptedContext{});
try testing.expect(result.found_existing);
try testing.expectEqual(real_keys[i], result.key_ptr.*);
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index d8e88c4933..c15e5d0ec2 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -724,7 +724,7 @@ pub fn testAllocator(base_allocator: mem.Allocator) !void {
var slice = try allocator.alloc(*i32, 100);
try testing.expect(slice.len == 100);
- for (slice) |*item, i| {
+ for (slice, 0..) |*item, i| {
item.* = try allocator.create(i32);
item.*.* = @intCast(i32, i);
}
@@ -732,7 +732,7 @@ pub fn testAllocator(base_allocator: mem.Allocator) !void {
slice = try allocator.realloc(slice, 20000);
try testing.expect(slice.len == 20000);
- for (slice[0..100]) |item, i| {
+ for (slice[0..100], 0..) |item, i| {
try testing.expect(item.* == @intCast(i32, i));
allocator.destroy(item);
}
diff --git a/lib/std/heap/WasmPageAllocator.zig b/lib/std/heap/WasmPageAllocator.zig
index 70b93c0508..4084eaa88e 100644
--- a/lib/std/heap/WasmPageAllocator.zig
+++ b/lib/std/heap/WasmPageAllocator.zig
@@ -62,7 +62,7 @@ const FreeBlock = struct {
fn useRecycled(self: FreeBlock, num_pages: usize, log2_align: u8) usize {
@setCold(true);
- for (self.data) |segment, i| {
+ for (self.data, 0..) |segment, i| {
const spills_into_next = @bitCast(i128, segment) < 0;
const has_enough_bits = @popCount(segment) >= num_pages;
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 4f8be3804c..452480dc7a 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -349,7 +349,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
/// Emits log messages for leaks and then returns whether there were any leaks.
pub fn detectLeaks(self: *Self) bool {
var leaks = false;
- for (self.buckets) |optional_bucket, bucket_i| {
+ for (self.buckets, 0..) |optional_bucket, bucket_i| {
const first_bucket = optional_bucket orelse continue;
const size_class = @as(usize, 1) << @intCast(math.Log2Int(usize), bucket_i);
const used_bits_count = usedBitsCount(size_class);
diff --git a/lib/std/json.zig b/lib/std/json.zig
index 96e41e93c2..0cce71b1e6 100644
--- a/lib/std/json.zig
+++ b/lib/std/json.zig
@@ -1280,7 +1280,7 @@ fn parsedEqual(a: anytype, b: @TypeOf(a)) bool {
}
},
.Array => {
- for (a) |e, i|
+ for (a, 0..) |e, i|
if (!parsedEqual(e, b[i])) return false;
return true;
},
@@ -1294,7 +1294,7 @@ fn parsedEqual(a: anytype, b: @TypeOf(a)) bool {
.One => return parsedEqual(a.*, b.*),
.Slice => {
if (a.len != b.len) return false;
- for (a) |e, i|
+ for (a, 0..) |e, i|
if (!parsedEqual(e, b[i])) return false;
return true;
},
@@ -1518,7 +1518,7 @@ fn parseInternal(
var r: T = undefined;
var fields_seen = [_]bool{false} ** structInfo.fields.len;
errdefer {
- inline for (structInfo.fields) |field, i| {
+ inline for (structInfo.fields, 0..) |field, i| {
if (fields_seen[i] and !field.is_comptime) {
parseFree(field.type, @field(r, field.name), options);
}
@@ -1533,7 +1533,7 @@ fn parseInternal(
var child_options = options;
child_options.allow_trailing_data = true;
var found = false;
- inline for (structInfo.fields) |field, i| {
+ inline for (structInfo.fields, 0..) |field, i| {
// TODO: using switches here segfault the compiler (#2727?)
if ((stringToken.escapes == .None and mem.eql(u8, field.name, key_source_slice)) or (stringToken.escapes == .Some and (field.name.len == stringToken.decodedLength() and encodesTo(field.name, key_source_slice)))) {
// if (switch (stringToken.escapes) {
@@ -1584,7 +1584,7 @@ fn parseInternal(
else => return error.UnexpectedToken,
}
}
- inline for (structInfo.fields) |field, i| {
+ inline for (structInfo.fields, 0..) |field, i| {
if (!fields_seen[i]) {
if (field.default_value) |default_ptr| {
if (!field.is_comptime) {
@@ -2367,7 +2367,7 @@ pub fn stringify(
if (child_options.whitespace) |*whitespace| {
whitespace.indent_level += 1;
}
- for (value) |x, i| {
+ for (value, 0..) |x, i| {
if (i != 0) {
try out_stream.writeByte(',');
}
diff --git a/lib/std/json/test.zig b/lib/std/json/test.zig
index 3c9414a59c..067bc2920b 100644
--- a/lib/std/json/test.zig
+++ b/lib/std/json/test.zig
@@ -2717,7 +2717,7 @@ test "string copy option" {
const copy_addr = &obj_copy.get("noescape").?.String[0];
var found_nocopy = false;
- for (input) |_, index| {
+ for (input, 0..) |_, index| {
try testing.expect(copy_addr != &input[index]);
if (nocopy_addr == &input[index]) {
found_nocopy = true;
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index 21f5015c6c..b7725b9ae9 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -1478,11 +1478,11 @@ pub const Mutable = struct {
// const x_trailing = std.mem.indexOfScalar(Limb, x.limbs[0..x.len], 0).?;
// const y_trailing = std.mem.indexOfScalar(Limb, y.limbs[0..y.len], 0).?;
- const x_trailing = for (x.limbs[0..x.len]) |xi, i| {
+ const x_trailing = for (x.limbs[0..x.len], 0..) |xi, i| {
if (xi != 0) break i;
} else unreachable;
- const y_trailing = for (y.limbs[0..y.len]) |yi, i| {
+ const y_trailing = for (y.limbs[0..y.len], 0..) |yi, i| {
if (yi != 0) break i;
} else unreachable;
@@ -2108,7 +2108,7 @@ pub const Const = struct {
if (@sizeOf(UT) <= @sizeOf(Limb)) {
r = @intCast(UT, self.limbs[0]);
} else {
- for (self.limbs[0..self.limbs.len]) |_, ri| {
+ for (self.limbs[0..self.limbs.len], 0..) |_, ri| {
const limb = self.limbs[self.limbs.len - ri - 1];
r <<= limb_bits;
r |= limb;
@@ -3594,7 +3594,7 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
assert(quo.len >= a.len);
rem.* = 0;
- for (a) |_, ri| {
+ for (a, 0..) |_, ri| {
const i = a.len - ri - 1;
const pdiv = ((@as(DoubleLimb, rem.*) << limb_bits) | a[i]);
@@ -3620,7 +3620,7 @@ fn lldiv0p5(quo: []Limb, rem: *Limb, a: []const Limb, b: HalfLimb) void {
assert(quo.len >= a.len);
rem.* = 0;
- for (a) |_, ri| {
+ for (a, 0..) |_, ri| {
const i = a.len - ri - 1;
const ai_high = a[i] >> half_limb_bits;
const ai_low = a[i] & ((1 << half_limb_bits) - 1);
@@ -4028,7 +4028,7 @@ fn llsquareBasecase(r: []Limb, x: []const Limb) void {
// - Each mixed-product term appears twice for each column,
// - Squares are always in the 2k (0 <= k < N) column
- for (x_norm) |v, i| {
+ for (x_norm, 0..) |v, i| {
// Accumulate all the x[i]*x[j] (with x!=j) products
const overflow = llmulLimb(.add, r[2 * i + 1 ..], x_norm[i + 1 ..], v);
assert(!overflow);
@@ -4037,7 +4037,7 @@ fn llsquareBasecase(r: []Limb, x: []const Limb) void {
// Each product appears twice, multiply by 2
llshl(r, r[0 .. 2 * x_norm.len], 1);
- for (x_norm) |v, i| {
+ for (x_norm, 0..) |v, i| {
// Compute and add the squares
const overflow = llmulLimb(.add, r[2 * i ..], x[i .. i + 1], v);
assert(!overflow);
diff --git a/lib/std/math/big/rational.zig b/lib/std/math/big/rational.zig
index 98433b26ff..c3609a6fa2 100644
--- a/lib/std/math/big/rational.zig
+++ b/lib/std/math/big/rational.zig
@@ -70,7 +70,7 @@ pub const Rational = struct {
start += 1;
}
- for (str) |c, i| {
+ for (str, 0..) |c, i| {
switch (state) {
State.Integer => {
switch (c) {
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 371ef8fd8d..fdd1c05862 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -169,7 +169,7 @@ test "Allocator.resize" {
var values = try testing.allocator.alloc(T, 100);
defer testing.allocator.free(values);
- for (values) |*v, i| v.* = @intCast(T, i);
+ for (values, 0..) |*v, i| v.* = @intCast(T, i);
if (!testing.allocator.resize(values, values.len + 10)) return error.OutOfMemory;
values = values.ptr[0 .. values.len + 10];
try testing.expect(values.len == 110);
@@ -185,7 +185,7 @@ test "Allocator.resize" {
var values = try testing.allocator.alloc(T, 100);
defer testing.allocator.free(values);
- for (values) |*v, i| v.* = @intToFloat(T, i);
+ for (values, 0..) |*v, i| v.* = @intToFloat(T, i);
if (!testing.allocator.resize(values, values.len + 10)) return error.OutOfMemory;
values = values.ptr[0 .. values.len + 10];
try testing.expect(values.len == 110);
@@ -201,7 +201,7 @@ pub fn copy(comptime T: type, dest: []T, source: []const T) void {
// this and automatically omit safety checks for loops
@setRuntimeSafety(false);
assert(dest.len >= source.len);
- for (source) |s, i|
+ for (source, 0..) |s, i|
dest[i] = s;
}
@@ -445,7 +445,7 @@ pub fn zeroInit(comptime T: type, init: anytype) T {
var value: T = undefined;
- inline for (struct_info.fields) |field, i| {
+ inline for (struct_info.fields, 0..) |field, i| {
if (field.is_comptime) {
continue;
}
@@ -611,7 +611,7 @@ test "lessThan" {
pub fn eql(comptime T: type, a: []const T, b: []const T) bool {
if (a.len != b.len) return false;
if (a.ptr == b.ptr) return true;
- for (a) |item, index| {
+ for (a, 0..) |item, index| {
if (b[index] != item) return false;
}
return true;
@@ -1261,7 +1261,7 @@ pub fn readVarInt(comptime ReturnType: type, bytes: []const u8, endian: Endian)
},
.Little => {
const ShiftType = math.Log2Int(ReturnType);
- for (bytes) |b, index| {
+ for (bytes, 0..) |b, index| {
result = result | (@as(ReturnType, b) << @intCast(ShiftType, index * 8));
}
},
@@ -1328,7 +1328,7 @@ pub fn readVarPackedInt(
},
.Little => {
int = read_bytes[0] >> bit_shift;
- for (read_bytes[1..]) |elem, i| {
+ for (read_bytes[1..], 0..) |elem, i| {
int |= (@as(uN, elem) << @intCast(Log2N, (8 * (i + 1) - bit_shift)));
}
},
@@ -2907,7 +2907,7 @@ pub fn indexOfMin(comptime T: type, slice: []const T) usize {
assert(slice.len > 0);
var best = slice[0];
var index: usize = 0;
- for (slice[1..]) |item, i| {
+ for (slice[1..], 0..) |item, i| {
if (item < best) {
best = item;
index = i + 1;
@@ -2928,7 +2928,7 @@ pub fn indexOfMax(comptime T: type, slice: []const T) usize {
assert(slice.len > 0);
var best = slice[0];
var index: usize = 0;
- for (slice[1..]) |item, i| {
+ for (slice[1..], 0..) |item, i| {
if (item > best) {
best = item;
index = i + 1;
@@ -2952,7 +2952,7 @@ pub fn indexOfMinMax(comptime T: type, slice: []const T) struct { index_min: usi
var maxVal = slice[0];
var minIdx: usize = 0;
var maxIdx: usize = 0;
- for (slice[1..]) |item, i| {
+ for (slice[1..], 0..) |item, i| {
if (item < minVal) {
minVal = item;
minIdx = i + 1;
@@ -3117,7 +3117,7 @@ test "replace" {
/// Replace all occurences of `needle` with `replacement`.
pub fn replaceScalar(comptime T: type, slice: []T, needle: T, replacement: T) void {
- for (slice) |e, i| {
+ for (slice, 0..) |e, i| {
if (e == needle) {
slice[i] = replacement;
}
@@ -3372,7 +3372,7 @@ test "asBytes" {
try testing.expect(eql(u8, asBytes(&deadbeef), deadbeef_bytes));
var codeface = @as(u32, 0xC0DEFACE);
- for (asBytes(&codeface).*) |*b|
+ for (asBytes(&codeface)) |*b|
b.* = 0;
try testing.expect(codeface == 0);
diff --git a/lib/std/meta.zig b/lib/std/meta.zig
index 7ab4c9f25c..07f42075b4 100644
--- a/lib/std/meta.zig
+++ b/lib/std/meta.zig
@@ -117,7 +117,7 @@ pub fn stringToEnum(comptime T: type, str: []const u8) ?T {
const kvs = comptime build_kvs: {
const EnumKV = struct { []const u8, T };
var kvs_array: [@typeInfo(T).Enum.fields.len]EnumKV = undefined;
- inline for (@typeInfo(T).Enum.fields) |enumField, i| {
+ inline for (@typeInfo(T).Enum.fields, 0..) |enumField, i| {
kvs_array[i] = .{ enumField.name, @field(T, enumField.name) };
}
break :build_kvs kvs_array[0..];
@@ -552,7 +552,7 @@ pub fn fieldNames(comptime T: type) *const [fields(T).len][]const u8 {
comptime {
const fieldInfos = fields(T);
var names: [fieldInfos.len][]const u8 = undefined;
- for (fieldInfos) |field, i| {
+ for (fieldInfos, 0..) |field, i| {
names[i] = field.name;
}
return &names;
@@ -593,7 +593,7 @@ pub fn tags(comptime T: type) *const [fields(T).len]T {
comptime {
const fieldInfos = fields(T);
var res: [fieldInfos.len]T = undefined;
- for (fieldInfos) |field, i| {
+ for (fieldInfos, 0..) |field, i| {
res[i] = @field(T, field.name);
}
return &res;
@@ -631,7 +631,7 @@ pub fn FieldEnum(comptime T: type) type {
if (@typeInfo(T) == .Union) {
if (@typeInfo(T).Union.tag_type) |tag_type| {
- for (std.enums.values(tag_type)) |v, i| {
+ for (std.enums.values(tag_type), 0..) |v, i| {
if (@enumToInt(v) != i) break; // enum values not consecutive
if (!std.mem.eql(u8, @tagName(v), field_infos[i].name)) break; // fields out of order
} else {
@@ -642,7 +642,7 @@ pub fn FieldEnum(comptime T: type) type {
var enumFields: [field_infos.len]std.builtin.Type.EnumField = undefined;
var decls = [_]std.builtin.Type.Declaration{};
- inline for (field_infos) |field, i| {
+ inline for (field_infos, 0..) |field, i| {
enumFields[i] = .{
.name = field.name,
.value = i,
@@ -672,7 +672,7 @@ fn expectEqualEnum(expected: anytype, actual: @TypeOf(expected)) !void {
const expected_fields = @typeInfo(expected).Enum.fields;
const actual_fields = @typeInfo(actual).Enum.fields;
if (expected_fields.len != actual_fields.len) return error.FailedTest;
- for (expected_fields) |expected_field, i| {
+ for (expected_fields, 0..) |expected_field, i| {
const actual_field = actual_fields[i];
try testing.expectEqual(expected_field.value, actual_field.value);
try testing.expectEqualStrings(expected_field.name, actual_field.name);
@@ -682,7 +682,7 @@ fn expectEqualEnum(expected: anytype, actual: @TypeOf(expected)) !void {
const expected_decls = @typeInfo(expected).Enum.decls;
const actual_decls = @typeInfo(actual).Enum.decls;
if (expected_decls.len != actual_decls.len) return error.FailedTest;
- for (expected_decls) |expected_decl, i| {
+ for (expected_decls, 0..) |expected_decl, i| {
const actual_decl = actual_decls[i];
try testing.expectEqual(expected_decl.is_pub, actual_decl.is_pub);
try testing.expectEqualStrings(expected_decl.name, actual_decl.name);
@@ -716,7 +716,7 @@ pub fn DeclEnum(comptime T: type) type {
const fieldInfos = std.meta.declarations(T);
var enumDecls: [fieldInfos.len]std.builtin.Type.EnumField = undefined;
var decls = [_]std.builtin.Type.Declaration{};
- inline for (fieldInfos) |field, i| {
+ inline for (fieldInfos, 0..) |field, i| {
enumDecls[i] = .{ .name = field.name, .value = i };
}
return @Type(.{
@@ -870,7 +870,7 @@ pub fn eql(a: anytype, b: @TypeOf(a)) bool {
},
.Array => {
if (a.len != b.len) return false;
- for (a) |e, i|
+ for (a, 0..) |e, i|
if (!eql(e, b[i])) return false;
return true;
},
@@ -988,7 +988,7 @@ pub fn intToEnum(comptime EnumTag: type, tag_int: anytype) IntToEnumError!EnumTa
/// Given a type and a name, return the field index according to source order.
/// Returns `null` if the field is not found.
pub fn fieldIndex(comptime T: type, comptime name: []const u8) ?comptime_int {
- inline for (fields(T)) |field, i| {
+ inline for (fields(T), 0..) |field, i| {
if (mem.eql(u8, field.name, name))
return i;
}
@@ -1008,7 +1008,7 @@ pub fn declList(comptime Namespace: type, comptime Decl: type) []const *const De
comptime {
const decls = declarations(Namespace);
var array: [decls.len]*const Decl = undefined;
- for (decls) |decl, i| {
+ for (decls, 0..) |decl, i| {
array[i] = &@field(Namespace, decl.name);
}
std.sort.sort(*const Decl, &array, {}, S.declNameLessThan);
@@ -1069,7 +1069,7 @@ pub fn ArgsTuple(comptime Function: type) type {
@compileError("Cannot create ArgsTuple for variadic function");
var argument_field_list: [function_info.params.len]type = undefined;
- inline for (function_info.params) |arg, i| {
+ inline for (function_info.params, 0..) |arg, i| {
const T = arg.type.?;
argument_field_list[i] = T;
}
@@ -1090,7 +1090,7 @@ pub fn Tuple(comptime types: []const type) type {
fn CreateUniqueTuple(comptime N: comptime_int, comptime types: [N]type) type {
var tuple_fields: [types.len]std.builtin.Type.StructField = undefined;
- inline for (types) |T, i| {
+ inline for (types, 0..) |T, i| {
@setEvalBranchQuota(10_000);
var num_buf: [128]u8 = undefined;
tuple_fields[i] = .{
@@ -1129,7 +1129,7 @@ const TupleTester = struct {
if (expected.len != fields_list.len)
@compileError("Argument count mismatch");
- inline for (fields_list) |fld, i| {
+ inline for (fields_list, 0..) |fld, i| {
if (expected[i] != fld.type) {
@compileError("Field " ++ fld.name ++ " expected to be type " ++ @typeName(expected[i]) ++ ", but was type " ++ @typeName(fld.type));
}
diff --git a/lib/std/meta/trailer_flags.zig b/lib/std/meta/trailer_flags.zig
index e50950018d..0c43a5ff28 100644
--- a/lib/std/meta/trailer_flags.zig
+++ b/lib/std/meta/trailer_flags.zig
@@ -21,7 +21,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
pub const ActiveFields = std.enums.EnumFieldStruct(FieldEnum, bool, false);
pub const FieldValues = blk: {
comptime var fields: [bit_count]Type.StructField = undefined;
- inline for (@typeInfo(Fields).Struct.fields) |struct_field, i| {
+ inline for (@typeInfo(Fields).Struct.fields, 0..) |struct_field, i| {
fields[i] = Type.StructField{
.name = struct_field.name,
.type = ?struct_field.type,
@@ -61,7 +61,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
/// `fields` is a boolean struct where each active field is set to `true`
pub fn init(fields: ActiveFields) Self {
var self: Self = .{ .bits = 0 };
- inline for (@typeInfo(Fields).Struct.fields) |field, i| {
+ inline for (@typeInfo(Fields).Struct.fields, 0..) |field, i| {
if (@field(fields, field.name))
self.bits |= 1 << i;
}
@@ -70,7 +70,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
/// `fields` is a struct with each field set to an optional value
pub fn setMany(self: Self, p: [*]align(@alignOf(Fields)) u8, fields: FieldValues) void {
- inline for (@typeInfo(Fields).Struct.fields) |field, i| {
+ inline for (@typeInfo(Fields).Struct.fields, 0..) |field, i| {
if (@field(fields, field.name)) |value|
self.set(p, @intToEnum(FieldEnum, i), value);
}
@@ -101,7 +101,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
pub fn offset(self: Self, comptime field: FieldEnum) usize {
var off: usize = 0;
- inline for (@typeInfo(Fields).Struct.fields) |field_info, i| {
+ inline for (@typeInfo(Fields).Struct.fields, 0..) |field_info, i| {
const active = (self.bits & (1 << i)) != 0;
if (i == @enumToInt(field)) {
assert(active);
@@ -119,7 +119,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
pub fn sizeInBytes(self: Self) usize {
var off: usize = 0;
- inline for (@typeInfo(Fields).Struct.fields) |field, i| {
+ inline for (@typeInfo(Fields).Struct.fields, 0..) |field, i| {
if (@sizeOf(field.type) == 0)
continue;
if ((self.bits & (1 << i)) != 0) {
diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig
index 38c8276b9a..afdd6a5a8d 100644
--- a/lib/std/multi_array_list.zig
+++ b/lib/std/multi_array_list.zig
@@ -82,7 +82,7 @@ pub fn MultiArrayList(comptime S: type) type {
alignment: usize,
};
var data: [fields.len]Data = undefined;
- for (fields) |field_info, i| {
+ for (fields, 0..) |field_info, i| {
data[i] = .{
.size = @sizeOf(field_info.type),
.size_index = i,
@@ -98,7 +98,7 @@ pub fn MultiArrayList(comptime S: type) type {
std.sort.sort(Data, &data, {}, Sort.lessThan);
var sizes_bytes: [fields.len]usize = undefined;
var field_indexes: [fields.len]usize = undefined;
- for (data) |elem, i| {
+ for (data, 0..) |elem, i| {
sizes_bytes[i] = elem.size;
field_indexes[i] = elem.size_index;
}
@@ -131,7 +131,7 @@ pub fn MultiArrayList(comptime S: type) type {
.capacity = self.capacity,
};
var ptr: [*]u8 = self.bytes;
- for (sizes.bytes) |field_size, i| {
+ for (sizes.bytes, 0..) |field_size, i| {
result.ptrs[sizes.fields[i]] = ptr;
ptr += field_size * self.capacity;
}
@@ -148,7 +148,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// Overwrite one array element with new data.
pub fn set(self: *Self, index: usize, elem: S) void {
const slices = self.slice();
- inline for (fields) |field_info, i| {
+ inline for (fields, 0..) |field_info, i| {
slices.items(@intToEnum(Field, i))[index] = @field(elem, field_info.name);
}
}
@@ -157,7 +157,7 @@ pub fn MultiArrayList(comptime S: type) type {
pub fn get(self: Self, index: usize) S {
const slices = self.slice();
var result: S = undefined;
- inline for (fields) |field_info, i| {
+ inline for (fields, 0..) |field_info, i| {
@field(result, field_info.name) = slices.items(@intToEnum(Field, i))[index];
}
return result;
@@ -230,7 +230,7 @@ pub fn MultiArrayList(comptime S: type) type {
assert(index <= self.len);
self.len += 1;
const slices = self.slice();
- inline for (fields) |field_info, field_index| {
+ inline for (fields, 0..) |field_info, field_index| {
const field_slice = slices.items(@intToEnum(Field, field_index));
var i: usize = self.len - 1;
while (i > index) : (i -= 1) {
@@ -245,7 +245,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// retain list ordering.
pub fn swapRemove(self: *Self, index: usize) void {
const slices = self.slice();
- inline for (fields) |_, i| {
+ inline for (fields, 0..) |_, i| {
const field_slice = slices.items(@intToEnum(Field, i));
field_slice[index] = field_slice[self.len - 1];
field_slice[self.len - 1] = undefined;
@@ -257,7 +257,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// after it to preserve order.
pub fn orderedRemove(self: *Self, index: usize) void {
const slices = self.slice();
- inline for (fields) |_, field_index| {
+ inline for (fields, 0..) |_, field_index| {
const field_slice = slices.items(@intToEnum(Field, field_index));
var i = index;
while (i < self.len - 1) : (i += 1) {
@@ -293,7 +293,7 @@ pub fn MultiArrayList(comptime S: type) type {
capacityInBytes(new_len),
) catch {
const self_slice = self.slice();
- inline for (fields) |field_info, i| {
+ inline for (fields, 0..) |field_info, i| {
if (@sizeOf(field_info.type) != 0) {
const field = @intToEnum(Field, i);
const dest_slice = self_slice.items(field)[new_len..];
@@ -315,7 +315,7 @@ pub fn MultiArrayList(comptime S: type) type {
self.len = new_len;
const self_slice = self.slice();
const other_slice = other.slice();
- inline for (fields) |field_info, i| {
+ inline for (fields, 0..) |field_info, i| {
if (@sizeOf(field_info.type) != 0) {
const field = @intToEnum(Field, i);
mem.copy(field_info.type, other_slice.items(field), self_slice.items(field));
@@ -376,7 +376,7 @@ pub fn MultiArrayList(comptime S: type) type {
};
const self_slice = self.slice();
const other_slice = other.slice();
- inline for (fields) |field_info, i| {
+ inline for (fields, 0..) |field_info, i| {
if (@sizeOf(field_info.type) != 0) {
const field = @intToEnum(Field, i);
mem.copy(field_info.type, other_slice.items(field), self_slice.items(field));
@@ -395,7 +395,7 @@ pub fn MultiArrayList(comptime S: type) type {
result.len = self.len;
const self_slice = self.slice();
const result_slice = result.slice();
- inline for (fields) |field_info, i| {
+ inline for (fields, 0..) |field_info, i| {
if (@sizeOf(field_info.type) != 0) {
const field = @intToEnum(Field, i);
mem.copy(field_info.type, result_slice.items(field), self_slice.items(field));
@@ -412,7 +412,7 @@ pub fn MultiArrayList(comptime S: type) type {
slice: Slice,
pub fn swap(sc: @This(), a_index: usize, b_index: usize) void {
- inline for (fields) |field_info, i| {
+ inline for (fields, 0..) |field_info, i| {
if (@sizeOf(field_info.type) != 0) {
const field = @intToEnum(Field, i);
const ptr = sc.slice.items(field);
diff --git a/lib/std/net.zig b/lib/std/net.zig
index fa7c489b70..50a0f8b9d7 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -325,7 +325,7 @@ pub const Ip6Address = extern struct {
var index: u8 = 0;
var scope_id = false;
var abbrv = false;
- for (buf) |c, i| {
+ for (buf, 0..) |c, i| {
if (scope_id) {
if (c >= '0' and c <= '9') {
const digit = c - '0';
@@ -444,7 +444,7 @@ pub const Ip6Address = extern struct {
var scope_id_value: [os.IFNAMESIZE - 1]u8 = undefined;
var scope_id_index: usize = 0;
- for (buf) |c, i| {
+ for (buf, 0..) |c, i| {
if (scope_id) {
// Handling of percent-encoding should be for an URI library.
if ((c >= '0' and c <= '9') or
@@ -602,7 +602,7 @@ pub const Ip6Address = extern struct {
.Big => big_endian_parts.*,
.Little => blk: {
var buf: [8]u16 = undefined;
- for (big_endian_parts) |part, i| {
+ for (big_endian_parts, 0..) |part, i| {
buf[i] = mem.bigToNative(u16, part);
}
break :blk buf;
@@ -909,7 +909,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) !*A
result.canon_name = try canon.toOwnedSlice();
}
- for (lookup_addrs.items) |lookup_addr, i| {
+ for (lookup_addrs.items, 0..) |lookup_addr, i| {
result.addrs[i] = lookup_addr.addr;
assert(result.addrs[i].getPort() == port);
}
@@ -989,7 +989,7 @@ fn linuxLookupName(
// So far the label/precedence table cannot be customized.
// This implementation is ported from musl libc.
// A more idiomatic "ziggy" implementation would be welcome.
- for (addrs.items) |*addr, i| {
+ for (addrs.items, 0..) |*addr, i| {
var key: i32 = 0;
var sa6: os.sockaddr.in6 = undefined;
@memset(@ptrCast([*]u8, &sa6), 0, @sizeOf(os.sockaddr.in6));
@@ -1118,7 +1118,7 @@ const defined_policies = [_]Policy{
};
fn policyOf(a: [16]u8) *const Policy {
- for (defined_policies) |*policy| {
+ for (&defined_policies) |*policy| {
if (!mem.eql(u8, a[0..policy.len], policy.addr[0..policy.len])) continue;
if ((a[policy.len] & policy.mask) != policy.addr[policy.len]) continue;
return policy;
@@ -1502,7 +1502,7 @@ fn resMSendRc(
try ns_list.resize(rc.ns.items.len);
const ns = ns_list.items;
- for (rc.ns.items) |iplit, i| {
+ for (rc.ns.items, 0..) |iplit, i| {
ns[i] = iplit.addr;
assert(ns[i].getPort() == 53);
if (iplit.addr.any.family != os.AF.INET) {
diff --git a/lib/std/net/test.zig b/lib/std/net/test.zig
index f2946777bd..30a63d9e3d 100644
--- a/lib/std/net/test.zig
+++ b/lib/std/net/test.zig
@@ -30,7 +30,7 @@ test "parse and render IPv6 addresses" {
"ff01::fb",
"::ffff:123.5.123.5",
};
- for (ips) |ip, i| {
+ for (ips, 0..) |ip, i| {
var addr = net.Address.parseIp6(ip, 0) catch unreachable;
var newIp = std.fmt.bufPrint(buffer[0..], "{}", .{addr}) catch unreachable;
try std.testing.expect(std.mem.eql(u8, printed[i], newIp[1 .. newIp.len - 3]));
diff --git a/lib/std/once.zig b/lib/std/once.zig
index 775c4328a2..f012e017dd 100644
--- a/lib/std/once.zig
+++ b/lib/std/once.zig
@@ -53,7 +53,7 @@ test "Once executes its function just once" {
var threads: [10]std.Thread = undefined;
defer for (threads) |handle| handle.join();
- for (threads) |*handle| {
+ for (&threads) |*handle| {
handle.* = try std.Thread.spawn(.{}, struct {
fn thread_fn(x: u8) void {
_ = x;
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index 5d6c9f5cc9..fe2f8404e2 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -1245,7 +1245,7 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize
// see https://www.openwall.com/lists/musl/2014/06/07/5
const kvlen = if (vlen > IOV_MAX) IOV_MAX else vlen; // matches kernel
var next_unsent: usize = 0;
- for (msgvec[0..kvlen]) |*msg, i| {
+ for (msgvec[0..kvlen], 0..) |*msg, i| {
var size: i32 = 0;
const msg_iovlen = @intCast(usize, msg.msg_hdr.msg_iovlen); // kernel side this is treated as unsigned
for (msg.msg_hdr.msg_iov[0..msg_iovlen]) |iov| {
diff --git a/lib/std/os/uefi/protocols/device_path_protocol.zig b/lib/std/os/uefi/protocols/device_path_protocol.zig
index 6df6442fe6..fb497a79da 100644
--- a/lib/std/os/uefi/protocols/device_path_protocol.zig
+++ b/lib/std/os/uefi/protocols/device_path_protocol.zig
@@ -61,7 +61,7 @@ pub const DevicePathProtocol = extern struct {
// The same as new.getPath(), but not const as we're filling it in.
var ptr = @ptrCast([*:0]align(1) u16, @ptrCast([*]u8, new) + @sizeOf(MediaDevicePath.FilePathDevicePath));
- for (path) |s, i|
+ for (path, 0..) |s, i|
ptr[i] = s;
ptr[path.len] = 0;
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index 711bc9f349..fe0ebc4951 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -2858,7 +2858,7 @@ pub const GUID = extern struct {
assert(s[18] == '-');
assert(s[23] == '-');
var bytes: [16]u8 = undefined;
- for (hex_offsets) |hex_offset, i| {
+ for (hex_offsets, 0..) |hex_offset, i| {
bytes[i] = (try std.fmt.charToDigit(s[hex_offset], 16)) << 4 |
try std.fmt.charToDigit(s[hex_offset + 1], 16);
}
diff --git a/lib/std/packed_int_array.zig b/lib/std/packed_int_array.zig
index f15ddbe974..f42e1016aa 100644
--- a/lib/std/packed_int_array.zig
+++ b/lib/std/packed_int_array.zig
@@ -215,7 +215,7 @@ pub fn PackedIntArrayEndian(comptime Int: type, comptime endian: Endian, comptim
/// or, more likely, an array literal.
pub fn init(ints: [int_count]Int) Self {
var self = @as(Self, undefined);
- for (ints) |int, i| self.set(i, int);
+ for (ints, 0..) |int, i| self.set(i, int);
return self;
}
diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig
index 287b65b59d..fdd162a34f 100644
--- a/lib/std/pdb.zig
+++ b/lib/std/pdb.zig
@@ -922,7 +922,7 @@ const Msf = struct {
}
const streams = try allocator.alloc(MsfStream, stream_count);
- for (streams) |*stream, i| {
+ for (streams, 0..) |*stream, i| {
const size = stream_sizes[i];
if (size == 0) {
stream.* = MsfStream{
diff --git a/lib/std/priority_dequeue.zig b/lib/std/priority_dequeue.zig
index d710288105..db55be3804 100644
--- a/lib/std/priority_dequeue.zig
+++ b/lib/std/priority_dequeue.zig
@@ -430,7 +430,7 @@ pub fn PriorityDequeue(comptime T: type, comptime Context: type, comptime compar
const print = std.debug.print;
print("{{ ", .{});
print("items: ", .{});
- for (self.items) |e, i| {
+ for (self.items, 0..) |e, i| {
if (i >= self.len) break;
print("{}, ", .{e});
}
diff --git a/lib/std/priority_queue.zig b/lib/std/priority_queue.zig
index 6e5e85a819..8925202c32 100644
--- a/lib/std/priority_queue.zig
+++ b/lib/std/priority_queue.zig
@@ -263,7 +263,7 @@ pub fn PriorityQueue(comptime T: type, comptime Context: type, comptime compareF
const print = std.debug.print;
print("{{ ", .{});
print("items: ", .{});
- for (self.items) |e, i| {
+ for (self.items, 0..) |e, i| {
if (i >= self.len) break;
print("{}, ", .{e});
}
diff --git a/lib/std/process.zig b/lib/std/process.zig
index b901a9f0fa..777bcbbab0 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -874,7 +874,7 @@ pub fn argsAlloc(allocator: Allocator) ![][:0]u8 {
mem.copy(u8, result_contents, contents_slice);
var contents_index: usize = 0;
- for (slice_sizes) |len, i| {
+ for (slice_sizes, 0..) |len, i| {
const new_index = contents_index + len;
result_slice_list[i] = result_contents[contents_index..new_index :0];
contents_index = new_index + 1;
@@ -1148,7 +1148,7 @@ pub fn execve(
const arena = arena_allocator.allocator();
const argv_buf = try arena.allocSentinel(?[*:0]u8, argv.len, null);
- for (argv) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
+ for (argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
const envp = m: {
if (env_map) |m| {
diff --git a/lib/std/rand.zig b/lib/std/rand.zig
index f5a1ffe57e..9722a47682 100644
--- a/lib/std/rand.zig
+++ b/lib/std/rand.zig
@@ -414,7 +414,7 @@ pub const Random = struct {
std.debug.assert(point < sum);
var accumulator: T = 0;
- for (proportions) |p, index| {
+ for (proportions, 0..) |p, index| {
accumulator += p;
if (point < accumulator) return index;
}
diff --git a/lib/std/rand/ziggurat.zig b/lib/std/rand/ziggurat.zig
index ec6dd0f080..fba8dfad96 100644
--- a/lib/std/rand/ziggurat.zig
+++ b/lib/std/rand/ziggurat.zig
@@ -83,13 +83,13 @@ fn ZigTableGen(
tables.x[0] = v / f(r);
tables.x[1] = r;
- for (tables.x[2..256]) |*entry, i| {
+ for (tables.x[2..256], 0..) |*entry, i| {
const last = tables.x[2 + i - 1];
entry.* = f_inv(v / last + f(last));
}
tables.x[256] = 0;
- for (tables.f[0..]) |*entry, i| {
+ for (tables.f[0..], 0..) |*entry, i| {
entry.* = f(tables.x[i]);
}
diff --git a/lib/std/simd.zig b/lib/std/simd.zig
index 868f9864e7..71d56daec3 100644
--- a/lib/std/simd.zig
+++ b/lib/std/simd.zig
@@ -89,7 +89,7 @@ pub fn VectorCount(comptime VectorType: type) type {
pub inline fn iota(comptime T: type, comptime len: usize) @Vector(len, T) {
comptime {
var out: [len]T = undefined;
- for (out) |*element, i| {
+ for (&out, 0..) |*element, i| {
element.* = switch (@typeInfo(T)) {
.Int => @intCast(T, i),
.Float => @intToFloat(T, i),
diff --git a/lib/std/sort.zig b/lib/std/sort.zig
index 64b0711d83..fa1e33e7ce 100644
--- a/lib/std/sort.zig
+++ b/lib/std/sort.zig
@@ -1219,9 +1219,9 @@ fn testStableSort() !void {
IdAndValue{ .id = 2, .value = 0 },
},
};
- for (cases) |*case| {
+ for (&cases) |*case| {
insertionSort(IdAndValue, (case.*)[0..], {}, cmpByValue);
- for (case.*) |item, i| {
+ for (case.*, 0..) |item, i| {
try testing.expect(item.id == expected[i].id);
try testing.expect(item.value == expected[i].value);
}
@@ -1373,7 +1373,7 @@ fn fuzzTest(rng: std.rand.Random) !void {
var array = try testing.allocator.alloc(IdAndValue, array_size);
defer testing.allocator.free(array);
// populate with random data
- for (array) |*item, index| {
+ for (array, 0..) |*item, index| {
item.id = index;
item.value = rng.intRangeLessThan(i32, 0, 100);
}
@@ -1401,7 +1401,7 @@ pub fn argMin(
var smallest = items[0];
var smallest_index: usize = 0;
- for (items[1..]) |item, i| {
+ for (items[1..], 0..) |item, i| {
if (lessThan(context, item, smallest)) {
smallest = item;
smallest_index = i + 1;
@@ -1453,7 +1453,7 @@ pub fn argMax(
var biggest = items[0];
var biggest_index: usize = 0;
- for (items[1..]) |item, i| {
+ for (items[1..], 0..) |item, i| {
if (lessThan(context, biggest, item)) {
biggest = item;
biggest_index = i + 1;
diff --git a/lib/std/target.zig b/lib/std/target.zig
index 4429f8be2d..9e3a8d62f4 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -720,7 +720,7 @@ pub const Target = struct {
/// Adds the specified feature set but not its dependencies.
pub fn addFeatureSet(set: *Set, other_set: Set) void {
if (builtin.zig_backend == .stage2_c) {
- for (set.ints) |*int, i| int.* |= other_set.ints[i];
+ for (&set.ints, 0..) |*int, i| int.* |= other_set.ints[i];
} else {
set.ints = @as(@Vector(usize_count, usize), set.ints) | @as(@Vector(usize_count, usize), other_set.ints);
}
@@ -736,7 +736,7 @@ pub const Target = struct {
/// Removes the specified feature but not its dependents.
pub fn removeFeatureSet(set: *Set, other_set: Set) void {
if (builtin.zig_backend == .stage2_c) {
- for (set.ints) |*int, i| int.* &= ~other_set.ints[i];
+ for (&set.ints, 0..) |*int, i| int.* &= ~other_set.ints[i];
} else {
set.ints = @as(@Vector(usize_count, usize), set.ints) & ~@as(@Vector(usize_count, usize), other_set.ints);
}
@@ -747,7 +747,7 @@ pub const Target = struct {
var old = set.ints;
while (true) {
- for (all_features_list) |feature, index_usize| {
+ for (all_features_list, 0..) |feature, index_usize| {
const index = @intCast(Index, index_usize);
if (set.isEnabled(index)) {
set.addFeatureSet(feature.dependencies);
@@ -1330,7 +1330,7 @@ pub const Target = struct {
fn allCpusFromDecls(comptime cpus: type) []const *const Cpu.Model {
const decls = @typeInfo(cpus).Struct.decls;
var array: [decls.len]*const Cpu.Model = undefined;
- for (decls) |decl, i| {
+ for (decls, 0..) |decl, i| {
array[i] = &@field(cpus, decl.name);
}
return &array;
diff --git a/lib/std/target/aarch64.zig b/lib/std/target/aarch64.zig
index af50c9d890..b84e2f5d8e 100644
--- a/lib/std/target/aarch64.zig
+++ b/lib/std/target/aarch64.zig
@@ -1269,7 +1269,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/amdgpu.zig b/lib/std/target/amdgpu.zig
index 7138845d69..4451a0cc9b 100644
--- a/lib/std/target/amdgpu.zig
+++ b/lib/std/target/amdgpu.zig
@@ -1033,7 +1033,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/arc.zig b/lib/std/target/arc.zig
index 822104b466..86d803c217 100644
--- a/lib/std/target/arc.zig
+++ b/lib/std/target/arc.zig
@@ -23,7 +23,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/arm.zig b/lib/std/target/arm.zig
index 2f5371187e..14241b4b7e 100644
--- a/lib/std/target/arm.zig
+++ b/lib/std/target/arm.zig
@@ -1631,7 +1631,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/avr.zig b/lib/std/target/avr.zig
index c9e5d1de23..ef234e3502 100644
--- a/lib/std/target/avr.zig
+++ b/lib/std/target/avr.zig
@@ -329,7 +329,7 @@ pub const all_features = blk: {
}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/bpf.zig b/lib/std/target/bpf.zig
index a8c7a81a3b..82503c11a4 100644
--- a/lib/std/target/bpf.zig
+++ b/lib/std/target/bpf.zig
@@ -35,7 +35,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/csky.zig b/lib/std/target/csky.zig
index 4c719c776f..0a985f0648 100644
--- a/lib/std/target/csky.zig
+++ b/lib/std/target/csky.zig
@@ -416,7 +416,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/hexagon.zig b/lib/std/target/hexagon.zig
index 1bab66fb05..5d167ca3ee 100644
--- a/lib/std/target/hexagon.zig
+++ b/lib/std/target/hexagon.zig
@@ -268,7 +268,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/m68k.zig b/lib/std/target/m68k.zig
index 6e147dffd7..10a8ae4dc2 100644
--- a/lib/std/target/m68k.zig
+++ b/lib/std/target/m68k.zig
@@ -153,7 +153,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/mips.zig b/lib/std/target/mips.zig
index 881a46694b..5650bd64c2 100644
--- a/lib/std/target/mips.zig
+++ b/lib/std/target/mips.zig
@@ -387,7 +387,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/msp430.zig b/lib/std/target/msp430.zig
index 6e8aac29b9..8e2b8536c8 100644
--- a/lib/std/target/msp430.zig
+++ b/lib/std/target/msp430.zig
@@ -41,7 +41,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/nvptx.zig b/lib/std/target/nvptx.zig
index 4863d82bde..b062b21327 100644
--- a/lib/std/target/nvptx.zig
+++ b/lib/std/target/nvptx.zig
@@ -221,7 +221,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/powerpc.zig b/lib/std/target/powerpc.zig
index 25c4162b0d..0532671a5b 100644
--- a/lib/std/target/powerpc.zig
+++ b/lib/std/target/powerpc.zig
@@ -592,7 +592,7 @@ pub const all_features = blk: {
}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/riscv.zig b/lib/std/target/riscv.zig
index b3b24fece3..fba5bbfdf6 100644
--- a/lib/std/target/riscv.zig
+++ b/lib/std/target/riscv.zig
@@ -660,7 +660,7 @@ pub const all_features = blk: {
}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/s390x.zig b/lib/std/target/s390x.zig
index 8b93019e24..546cbadfbd 100644
--- a/lib/std/target/s390x.zig
+++ b/lib/std/target/s390x.zig
@@ -263,7 +263,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/sparc.zig b/lib/std/target/sparc.zig
index 1c1d5082cc..7deb01db24 100644
--- a/lib/std/target/sparc.zig
+++ b/lib/std/target/sparc.zig
@@ -131,7 +131,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/spirv.zig b/lib/std/target/spirv.zig
index 84b92a764c..39a91c7537 100644
--- a/lib/std/target/spirv.zig
+++ b/lib/std/target/spirv.zig
@@ -2075,7 +2075,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/ve.zig b/lib/std/target/ve.zig
index f77c07883d..224da897c8 100644
--- a/lib/std/target/ve.zig
+++ b/lib/std/target/ve.zig
@@ -23,7 +23,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/wasm.zig b/lib/std/target/wasm.zig
index 514b765a97..d894670646 100644
--- a/lib/std/target/wasm.zig
+++ b/lib/std/target/wasm.zig
@@ -89,7 +89,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/target/x86.zig b/lib/std/target/x86.zig
index a561c317bd..8b8e47a813 100644
--- a/lib/std/target/x86.zig
+++ b/lib/std/target/x86.zig
@@ -1045,7 +1045,7 @@ pub const all_features = blk: {
}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
diff --git a/lib/std/testing.zig b/lib/std/testing.zig
index 895a9a0973..37e15ff08b 100644
--- a/lib/std/testing.zig
+++ b/lib/std/testing.zig
@@ -384,7 +384,7 @@ fn SliceDiffer(comptime T: type) type {
const Self = @This();
pub fn write(self: Self, writer: anytype) !void {
- for (self.expected) |value, i| {
+ for (self.expected, 0..) |value, i| {
var full_index = self.start_index + i;
const diff = if (i < self.actual.len) !std.meta.eql(self.actual[i], value) else true;
if (diff) try self.ttyconf.setColor(writer, .Red);
@@ -405,7 +405,7 @@ const BytesDiffer = struct {
while (expected_iterator.next()) |chunk| {
// to avoid having to calculate diffs twice per chunk
var diffs: std.bit_set.IntegerBitSet(16) = .{ .mask = 0 };
- for (chunk) |byte, i| {
+ for (chunk, 0..) |byte, i| {
var absolute_byte_index = (expected_iterator.index - chunk.len) + i;
const diff = if (absolute_byte_index < self.actual.len) self.actual[absolute_byte_index] != byte else true;
if (diff) diffs.set(i);
@@ -418,7 +418,7 @@ const BytesDiffer = struct {
if (chunk.len < 8) missing_columns += 1;
try writer.writeByteNTimes(' ', missing_columns);
}
- for (chunk) |byte, i| {
+ for (chunk, 0..) |byte, i| {
const byte_to_print = if (std.ascii.isPrint(byte)) byte else '.';
try self.writeByteDiff(writer, "{c}", byte_to_print, diffs.isSet(i));
}
@@ -1059,7 +1059,7 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime
// Setup the tuple that will actually be used with @call (we'll need to insert
// the failing allocator in field @"0" before each @call)
var args: ArgsTuple = undefined;
- inline for (@typeInfo(@TypeOf(extra_args)).Struct.fields) |field, i| {
+ inline for (@typeInfo(@TypeOf(extra_args)).Struct.fields, 0..) |field, i| {
const arg_i_str = comptime str: {
var str_buf: [100]u8 = undefined;
const args_i = i + 1;
diff --git a/lib/std/wasm.zig b/lib/std/wasm.zig
index 2d519ace8b..25a0bb7abf 100644
--- a/lib/std/wasm.zig
+++ b/lib/std/wasm.zig
@@ -636,7 +636,7 @@ pub const Type = struct {
if (fmt.len != 0) std.fmt.invalidFmtError(fmt, self);
_ = opt;
try writer.writeByte('(');
- for (self.params) |param, i| {
+ for (self.params, 0..) |param, i| {
try writer.print("{s}", .{@tagName(param)});
if (i + 1 != self.params.len) {
try writer.writeAll(", ");
@@ -646,7 +646,7 @@ pub const Type = struct {
if (self.returns.len == 0) {
try writer.writeAll("nil");
} else {
- for (self.returns) |return_ty, i| {
+ for (self.returns, 0..) |return_ty, i| {
try writer.print("{s}", .{@tagName(return_ty)});
if (i + 1 != self.returns.len) {
try writer.writeAll(", ");
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index 3784f06160..0ba717e974 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -136,7 +136,7 @@ pub fn tokenLocation(self: Ast, start_offset: ByteOffset, token_index: TokenInde
.line_end = self.source.len,
};
const token_start = self.tokens.items(.start)[token_index];
- for (self.source[start_offset..]) |c, i| {
+ for (self.source[start_offset..], 0..) |c, i| {
if (i + start_offset == token_start) {
loc.line_end = i + start_offset;
while (loc.line_end < self.source.len and self.source[loc.line_end] != '\n') {
@@ -179,7 +179,7 @@ pub fn tokenSlice(tree: Ast, token_index: TokenIndex) []const u8 {
pub fn extraData(tree: Ast, index: usize, comptime T: type) T {
const fields = std.meta.fields(T);
var result: T = undefined;
- inline for (fields) |field, i| {
+ inline for (fields, 0..) |field, i| {
comptime assert(field.type == Node.Index);
@field(result, field.name) = tree.extra_data[index + i];
}
@@ -2183,7 +2183,7 @@ fn fullAsmComponents(tree: Ast, info: full.Asm.Components) full.Asm {
if (token_tags[info.asm_token + 1] == .keyword_volatile) {
result.volatile_token = info.asm_token + 1;
}
- const outputs_end: usize = for (info.items) |item, i| {
+ const outputs_end: usize = for (info.items, 0..) |item, i| {
switch (node_tags[item]) {
.asm_output => continue,
else => break i,
diff --git a/lib/std/zig/CrossTarget.zig b/lib/std/zig/CrossTarget.zig
index aad0cb42f2..93b6d97d75 100644
--- a/lib/std/zig/CrossTarget.zig
+++ b/lib/std/zig/CrossTarget.zig
@@ -317,7 +317,7 @@ pub fn parse(args: ParseOptions) !CrossTarget {
index += 1;
}
const feature_name = cpu_features[start..index];
- for (all_features) |feature, feat_index_usize| {
+ for (all_features, 0..) |feature, feat_index_usize| {
const feat_index = @intCast(Target.Cpu.Feature.Set.Index, feat_index_usize);
if (mem.eql(u8, feature_name, feature.name)) {
set.addFeature(feat_index);
diff --git a/lib/std/zig/fmt.zig b/lib/std/zig/fmt.zig
index bc9d694b0e..4afcc7ac2e 100644
--- a/lib/std/zig/fmt.zig
+++ b/lib/std/zig/fmt.zig
@@ -25,7 +25,7 @@ pub fn fmtId(bytes: []const u8) std.fmt.Formatter(formatId) {
pub fn isValidId(bytes: []const u8) bool {
if (bytes.len == 0) return false;
if (mem.eql(u8, bytes, "_")) return false;
- for (bytes) |c, i| {
+ for (bytes, 0..) |c, i| {
switch (c) {
'_', 'a'...'z', 'A'...'Z' => {},
'0'...'9' => if (i == 0) return false,
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index d24dedfeff..4346ee6286 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -6158,7 +6158,7 @@ fn testError(source: [:0]const u8, expected_errors: []const Error) !void {
std.debug.print("errors found: {any}\n", .{tree.errors});
return err;
};
- for (expected_errors) |expected, i| {
+ for (expected_errors, 0..) |expected, i| {
try std.testing.expectEqual(expected, tree.errors[i].tag);
}
}
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index ea3748a9bd..61d789c3c4 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -1407,7 +1407,7 @@ fn renderBuiltinCall(
// Render all on one line, no trailing comma.
try renderToken(ais, tree, builtin_token + 1, .none); // (
- for (params) |param_node, i| {
+ for (params, 0..) |param_node, i| {
const first_param_token = tree.firstToken(param_node);
if (token_tags[first_param_token] == .multiline_string_literal_line or
hasSameLineComment(tree, first_param_token - 1))
@@ -1739,7 +1739,7 @@ fn renderBlock(
try renderToken(ais, tree, lbrace, .none);
} else {
try renderToken(ais, tree, lbrace, .newline);
- for (statements) |stmt, i| {
+ for (statements, 0..) |stmt, i| {
if (i != 0) try renderExtraNewline(ais, tree, stmt);
switch (node_tags[stmt]) {
.global_var_decl,
@@ -1902,7 +1902,7 @@ fn renderArrayInit(
const section_end = sec_end: {
var this_line_first_expr: usize = 0;
var this_line_size = rowSize(tree, row_exprs, rbrace);
- for (row_exprs) |expr, i| {
+ for (row_exprs, 0..) |expr, i| {
// Ignore comment on first line of this section.
if (i == 0) continue;
const expr_last_token = tree.lastToken(expr);
@@ -1941,7 +1941,7 @@ fn renderArrayInit(
var column_counter: usize = 0;
var single_line = true;
var contains_newline = false;
- for (section_exprs) |expr, i| {
+ for (section_exprs, 0..) |expr, i| {
const start = sub_expr_buffer.items.len;
sub_expr_buffer_starts[i] = start;
@@ -1983,7 +1983,7 @@ fn renderArrayInit(
// Render exprs in current section.
column_counter = 0;
- for (section_exprs) |expr, i| {
+ for (section_exprs, 0..) |expr, i| {
const start = sub_expr_buffer_starts[i];
const end = sub_expr_buffer_starts[i + 1];
const expr_text = sub_expr_buffer.items[start..end];
@@ -2140,7 +2140,7 @@ fn renderContainerDecl(
if (token_tags[lbrace + 1] == .container_doc_comment) {
try renderContainerDocComments(ais, tree, lbrace + 1);
}
- for (container_decl.ast.members) |member, i| {
+ for (container_decl.ast.members, 0..) |member, i| {
if (i != 0) try renderExtraNewline(ais, tree, member);
switch (tree.nodes.items(.tag)[member]) {
// For container fields, ensure a trailing comma is added if necessary.
@@ -2226,7 +2226,7 @@ fn renderAsm(
try renderToken(ais, tree, colon1, .space); // :
ais.pushIndent();
- for (asm_node.outputs) |asm_output, i| {
+ for (asm_node.outputs, 0..) |asm_output, i| {
if (i + 1 < asm_node.outputs.len) {
const next_asm_output = asm_node.outputs[i + 1];
try renderAsmOutput(gpa, ais, tree, asm_output, .none);
@@ -2258,7 +2258,7 @@ fn renderAsm(
} else colon3: {
try renderToken(ais, tree, colon2, .space); // :
ais.pushIndent();
- for (asm_node.inputs) |asm_input, i| {
+ for (asm_node.inputs, 0..) |asm_input, i| {
if (i + 1 < asm_node.inputs.len) {
const next_asm_input = asm_node.inputs[i + 1];
try renderAsmInput(gpa, ais, tree, asm_input, .none);
@@ -2352,7 +2352,7 @@ fn renderParamList(
if (token_tags[after_last_param_tok] == .comma) {
ais.pushIndentNextLine();
try renderToken(ais, tree, lparen, .newline); // (
- for (params) |param_node, i| {
+ for (params, 0..) |param_node, i| {
if (i + 1 < params.len) {
try renderExpression(gpa, ais, tree, param_node, .none);
@@ -2377,7 +2377,7 @@ fn renderParamList(
try renderToken(ais, tree, lparen, .none); // (
- for (params) |param_node, i| {
+ for (params, 0..) |param_node, i| {
const first_param_token = tree.firstToken(param_node);
if (token_tags[first_param_token] == .multiline_string_literal_line or
hasSameLineComment(tree, first_param_token - 1))
@@ -3015,7 +3015,7 @@ fn rowSize(tree: Ast, exprs: []const Ast.Node.Index, rtoken: Ast.TokenIndex) usi
}
var count: usize = 1;
- for (exprs) |expr, i| {
+ for (exprs, 0..) |expr, i| {
if (i + 1 < exprs.len) {
const expr_last_token = tree.lastToken(expr) + 1;
if (!tree.tokensOnSameLine(expr_last_token, tree.firstToken(exprs[i + 1]))) return count;
diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig
index 7e57cdda9b..dbbebb43c9 100644
--- a/lib/std/zig/system/NativeTargetInfo.zig
+++ b/lib/std/zig/system/NativeTargetInfo.zig
@@ -273,7 +273,7 @@ fn detectAbiAndDynamicLinker(
assert(@enumToInt(Target.Abi.none) == 0);
const fields = std.meta.fields(Target.Abi)[1..];
var array: [fields.len]Target.Abi = undefined;
- inline for (fields) |field, i| {
+ inline for (fields, 0..) |field, i| {
array[i] = @field(Target.Abi, field.name);
}
break :blk array;
diff --git a/lib/std/zig/system/linux.zig b/lib/std/zig/system/linux.zig
index 63a49c6472..ee07daebe9 100644
--- a/lib/std/zig/system/linux.zig
+++ b/lib/std/zig/system/linux.zig
@@ -223,7 +223,7 @@ const ArmCpuinfoImpl = struct {
};
var known_models: [self.cores.len]?*const Target.Cpu.Model = undefined;
- for (self.cores[0..self.core_no]) |core, i| {
+ for (self.cores[0..self.core_no], 0..) |core, i| {
known_models[i] = cpu_models.isKnown(.{
.architecture = core.architecture,
.implementer = core.implementer,
diff --git a/lib/std/zig/system/windows.zig b/lib/std/zig/system/windows.zig
index 9dded69ca0..45b44560a2 100644
--- a/lib/std/zig/system/windows.zig
+++ b/lib/std/zig/system/windows.zig
@@ -34,7 +34,7 @@ pub fn detectRuntimeVersion() WindowsVersion {
// checking the build number against a known set of
// values
var last_idx: usize = 0;
- for (WindowsVersion.known_win10_build_numbers) |build, i| {
+ for (WindowsVersion.known_win10_build_numbers, 0..) |build, i| {
if (version_info.dwBuildNumber >= build)
last_idx = i;
}
@@ -92,7 +92,7 @@ fn getCpuInfoFromRegistry(core: usize, args: anytype) !void {
var tmp_bufs: [fields_info.len][max_value_len]u8 align(@alignOf(std.os.windows.UNICODE_STRING)) = undefined;
- inline for (fields_info) |field, i| {
+ inline for (fields_info, 0..) |field, i| {
const ctx: *anyopaque = blk: {
switch (@field(args, field.name).value_type) {
REG.SZ,
@@ -153,7 +153,7 @@ fn getCpuInfoFromRegistry(core: usize, args: anytype) !void {
);
switch (res) {
.SUCCESS => {
- inline for (fields_info) |field, i| switch (@field(args, field.name).value_type) {
+ inline for (fields_info, 0..) |field, i| switch (@field(args, field.name).value_type) {
REG.SZ,
REG.EXPAND_SZ,
REG.MULTI_SZ,
diff --git a/lib/test_runner.zig b/lib/test_runner.zig
index 3f57b42bf5..5968fdaa54 100644
--- a/lib/test_runner.zig
+++ b/lib/test_runner.zig
@@ -33,7 +33,7 @@ pub fn main() void {
async_frame_buffer = &[_]u8{};
var leaks: usize = 0;
- for (test_fn_list) |test_fn, i| {
+ for (test_fn_list, 0..) |test_fn, i| {
std.testing.allocator_instance = .{};
defer {
if (std.testing.allocator_instance.deinit()) {
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 98883b0f8d..9b96b16677 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -1505,7 +1505,7 @@ fn arrayInitExprInner(
extra_index += 1;
}
- for (elements) |elem_init, i| {
+ for (elements, 0..) |elem_init, i| {
const ri = if (elem_ty != .none)
ResultInfo{ .rl = .{ .coerced_ty = elem_ty } }
else if (array_ty_inst != .none and nodeMayNeedMemoryLocation(astgen.tree, elem_init, true)) ri: {
@@ -1562,7 +1562,7 @@ fn arrayInitExprRlPtrInner(
});
var extra_index = try reserveExtra(astgen, elements.len);
- for (elements) |elem_init, i| {
+ for (elements, 0..) |elem_init, i| {
const elem_ptr = try gz.addPlNode(.elem_ptr_imm, elem_init, Zir.Inst.ElemPtrImm{
.ptr = result_ptr,
.index = @intCast(u32, i),
@@ -6342,7 +6342,7 @@ fn forExpr(
{
var capture_token = for_full.payload_token;
- for (for_full.ast.inputs) |input, i_usize| {
+ for (for_full.ast.inputs, 0..) |input, i_usize| {
const i = @intCast(u32, i_usize);
const capture_is_ref = token_tags[capture_token] == .asterisk;
const ident_tok = capture_token + @boolToInt(capture_is_ref);
@@ -6464,7 +6464,7 @@ fn forExpr(
const then_sub_scope = blk: {
var capture_token = for_full.payload_token;
var capture_sub_scope: *Scope = &then_scope.base;
- for (for_full.ast.inputs) |input, i_usize| {
+ for (for_full.ast.inputs, 0..) |input, i_usize| {
const i = @intCast(u32, i_usize);
const capture_is_ref = token_tags[capture_token] == .asterisk;
const ident_tok = capture_token + @boolToInt(capture_is_ref);
@@ -6974,7 +6974,7 @@ fn switchExpr(
zir_datas[switch_block].pl_node.payload_index = payload_index;
const strat = ri.rl.strategy(&block_scope);
- for (payloads.items[case_table_start..case_table_end]) |start_index, i| {
+ for (payloads.items[case_table_start..case_table_end], 0..) |start_index, i| {
var body_len_index = start_index;
var end_index = start_index;
const table_index = case_table_start + i;
@@ -7638,7 +7638,7 @@ fn asmExpr(
var output_type_bits: u32 = 0;
- for (full.outputs) |output_node, i| {
+ for (full.outputs, 0..) |output_node, i| {
const symbolic_name = main_tokens[output_node];
const name = try astgen.identAsString(symbolic_name);
const constraint_token = symbolic_name + 2;
@@ -7675,7 +7675,7 @@ fn asmExpr(
var inputs_buffer: [32]Zir.Inst.Asm.Input = undefined;
const inputs = inputs_buffer[0..full.inputs.len];
- for (full.inputs) |input_node, i| {
+ for (full.inputs, 0..) |input_node, i| {
const symbolic_name = main_tokens[input_node];
const name = try astgen.identAsString(symbolic_name);
const constraint_token = symbolic_name + 2;
@@ -7848,7 +7848,7 @@ fn typeOf(
var typeof_scope = gz.makeSubBlock(scope);
typeof_scope.force_comptime = false;
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
const param_ref = try reachableExpr(&typeof_scope, &typeof_scope.base, .{ .rl = .none }, arg, node);
astgen.extra.items[args_index + i] = @enumToInt(param_ref);
}
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index 44325e3836..2fc54cc0ec 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -1647,7 +1647,7 @@ fn walkInstruction(
std.debug.assert(operands.len > 0);
var array_type = try self.walkRef(file, parent_scope, parent_src, operands[0], false);
- for (operands[1..]) |op, idx| {
+ for (operands[1..], 0..) |op, idx| {
const wr = try self.walkRef(file, parent_scope, parent_src, op, false);
const expr_index = self.exprs.items.len;
try self.exprs.append(self.arena, wr.expr);
@@ -1665,7 +1665,7 @@ fn walkInstruction(
const operands = file.zir.refSlice(extra.end, extra.data.operands_len);
const array_data = try self.arena.alloc(usize, operands.len);
- for (operands) |op, idx| {
+ for (operands, 0..) |op, idx| {
const wr = try self.walkRef(file, parent_scope, parent_src, op, false);
const expr_index = self.exprs.items.len;
try self.exprs.append(self.arena, wr.expr);
@@ -1686,7 +1686,7 @@ fn walkInstruction(
std.debug.assert(operands.len > 0);
var array_type = try self.walkRef(file, parent_scope, parent_src, operands[0], false);
- for (operands[1..]) |op, idx| {
+ for (operands[1..], 0..) |op, idx| {
const wr = try self.walkRef(file, parent_scope, parent_src, op, false);
const expr_index = self.exprs.items.len;
try self.exprs.append(self.arena, wr.expr);
@@ -1715,7 +1715,7 @@ fn walkInstruction(
const operands = file.zir.refSlice(extra.end, extra.data.operands_len);
const array_data = try self.arena.alloc(usize, operands.len);
- for (operands) |op, idx| {
+ for (operands, 0..) |op, idx| {
const wr = try self.walkRef(file, parent_scope, parent_src, op, false);
const expr_index = self.exprs.items.len;
try self.exprs.append(self.arena, wr.expr);
@@ -2386,7 +2386,7 @@ fn walkInstruction(
const array_data = try self.arena.alloc(usize, args.len);
var array_type: ?DocData.Expr = null;
- for (args) |arg, idx| {
+ for (args, 0..) |arg, idx| {
const wr = try self.walkRef(file, parent_scope, parent_src, arg, idx == 0);
if (idx == 0) {
array_type = wr.typeRef;
@@ -3470,7 +3470,7 @@ fn tryResolveRefPath(
}
}
- for (self.ast_nodes.items[t_enum.src].fields.?) |ast_node, idx| {
+ for (self.ast_nodes.items[t_enum.src].fields.?, 0..) |ast_node, idx| {
const name = self.ast_nodes.items[ast_node].name.?;
if (std.mem.eql(u8, name, child_string)) {
// TODO: should we really create an artificial
@@ -3517,7 +3517,7 @@ fn tryResolveRefPath(
}
}
- for (self.ast_nodes.items[t_union.src].fields.?) |ast_node, idx| {
+ for (self.ast_nodes.items[t_union.src].fields.?, 0..) |ast_node, idx| {
const name = self.ast_nodes.items[ast_node].name.?;
if (std.mem.eql(u8, name, child_string)) {
// TODO: should we really create an artificial
@@ -3564,7 +3564,7 @@ fn tryResolveRefPath(
}
}
- for (self.ast_nodes.items[t_struct.src].fields.?) |ast_node, idx| {
+ for (self.ast_nodes.items[t_struct.src].fields.?, 0..) |ast_node, idx| {
const name = self.ast_nodes.items[ast_node].name.?;
if (std.mem.eql(u8, name, child_string)) {
// TODO: should we really create an artificial
diff --git a/src/Compilation.zig b/src/Compilation.zig
index ce0bfb9908..ebc0e9b563 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -641,7 +641,7 @@ pub const AllErrors = struct {
}
const reference_trace = try allocator.alloc(Message, module_err_msg.reference_trace.len);
- for (reference_trace) |*reference, i| {
+ for (reference_trace, 0..) |*reference, i| {
const module_reference = module_err_msg.reference_trace[i];
if (module_reference.hidden != 0) {
reference.* = .{ .plain = .{ .msg = undefined, .count = module_reference.hidden } };
@@ -714,7 +714,7 @@ pub const AllErrors = struct {
const block = file.zir.extraData(Zir.Inst.Block, item.data.notes);
const body = file.zir.extra[block.end..][0..block.data.body_len];
notes = try arena.alloc(Message, body.len);
- for (notes) |*note, i| {
+ for (notes, 0..) |*note, i| {
const note_item = file.zir.extraData(Zir.Inst.CompileErrors.Item, body[i]);
const msg = file.zir.nullTerminatedString(note_item.data.msg);
const span = blk: {
@@ -786,7 +786,7 @@ pub const AllErrors = struct {
fn dupeList(list: []const Message, arena: Allocator) Allocator.Error![]Message {
const duped_list = try arena.alloc(Message, list.len);
- for (list) |item, i| {
+ for (list, 0..) |item, i| {
duped_list[i] = switch (item) {
.src => |src| .{ .src = .{
.msg = try arena.dupe(u8, src.msg),
@@ -1441,7 +1441,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
const llvm_cpu_features: ?[*:0]const u8 = if (build_options.have_llvm and use_llvm) blk: {
var buf = std.ArrayList(u8).init(arena);
- for (options.target.cpu.arch.allFeaturesList()) |feature, index_usize| {
+ for (options.target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| {
const index = @intCast(Target.Cpu.Feature.Set.Index, index_usize);
const is_enabled = options.target.cpu.features.isEnabled(index);
@@ -1818,7 +1818,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
var system_libs: std.StringArrayHashMapUnmanaged(SystemLib) = .{};
errdefer system_libs.deinit(gpa);
try system_libs.ensureTotalCapacity(gpa, options.system_lib_names.len);
- for (options.system_lib_names) |lib_name, i| {
+ for (options.system_lib_names, 0..) |lib_name, i| {
system_libs.putAssumeCapacity(lib_name, options.system_lib_infos[i]);
}
@@ -2880,7 +2880,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
}
for (self.lld_errors.items) |lld_error| {
const notes = try arena_allocator.alloc(AllErrors.Message, lld_error.context_lines.len);
- for (lld_error.context_lines) |context_line, i| {
+ for (lld_error.context_lines, 0..) |context_line, i| {
notes[i] = .{ .plain = .{
.msg = try arena_allocator.dupe(u8, context_line),
} };
@@ -3007,7 +3007,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
};
defer self.gpa.free(err_msg.notes);
- for (keys[1..]) |key, i| {
+ for (keys[1..], 0..) |key, i| {
const note_decl = module.declPtr(key);
err_msg.notes[i] = .{
.src_loc = note_decl.nodeOffsetSrcLoc(values[i + 1]),
@@ -3104,7 +3104,7 @@ pub fn performAllTheWork(
const notes = try mod.gpa.alloc(Module.ErrorMsg, file.references.items.len);
errdefer mod.gpa.free(notes);
- for (notes) |*note, i| {
+ for (notes, 0..) |*note, i| {
errdefer for (notes[0..i]) |*n| n.deinit(mod.gpa);
note.* = switch (file.references.items[i]) {
.import => |loc| try Module.ErrorMsg.init(
@@ -3740,7 +3740,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, argv.items.len + 1);
new_argv_with_sentinel[argv.items.len] = null;
const new_argv = new_argv_with_sentinel[0..argv.items.len :null];
- for (argv.items) |arg, i| {
+ for (argv.items, 0..) |arg, i| {
new_argv[i] = try arena.dupeZ(u8, arg);
}
@@ -4375,7 +4375,7 @@ pub fn addCCArgs(
// It would be really nice if there was a more compact way to communicate this info to Clang.
const all_features_list = target.cpu.arch.allFeaturesList();
try argv.ensureUnusedCapacity(all_features_list.len * 4);
- for (all_features_list) |feature, index_usize| {
+ for (all_features_list, 0..) |feature, index_usize| {
const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize);
const is_enabled = target.cpu.features.isEnabled(index);
@@ -5203,7 +5203,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca
std.zig.fmtId(generic_arch_name),
});
- for (target.cpu.arch.allFeaturesList()) |feature, index_usize| {
+ for (target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| {
const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize);
const is_enabled = target.cpu.features.isEnabled(index);
if (is_enabled) {
diff --git a/src/Liveness.zig b/src/Liveness.zig
index e775883b1f..481cf25d04 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -384,7 +384,7 @@ pub fn categorizeOperand(
const args = @ptrCast([]const Air.Inst.Ref, air.extra[extra.end..][0..extra.data.args_len]);
if (args.len + 1 <= bpi - 1) {
if (callee == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (arg == operand_ref) return matchOperandSmallIndex(l, inst, @intCast(OperandInt, i + 1), .write);
}
return .write;
@@ -436,7 +436,7 @@ pub fn categorizeOperand(
const elements = @ptrCast([]const Air.Inst.Ref, air.extra[ty_pl.payload..][0..len]);
if (elements.len <= bpi - 1) {
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
if (elem == operand_ref) return matchOperandSmallIndex(l, inst, @intCast(OperandInt, i), .none);
}
return .none;
@@ -1272,12 +1272,12 @@ fn analyzeInst(
defer for (case_deaths) |*cd| cd.deinit(gpa);
var total_deaths: u32 = 0;
- for (case_tables) |*ct, i| {
+ for (case_tables, 0..) |*ct, i| {
total_deaths += ct.count();
var it = ct.keyIterator();
while (it.next()) |key| {
const case_death = key.*;
- for (case_tables) |*ct_inner, j| {
+ for (case_tables, 0..) |*ct_inner, j| {
if (i == j) continue;
if (!ct_inner.contains(case_death)) {
// instruction is not referenced in this case
diff --git a/src/Manifest.zig b/src/Manifest.zig
index c3f77aec98..068a14942f 100644
--- a/src/Manifest.zig
+++ b/src/Manifest.zig
@@ -123,7 +123,7 @@ pub fn hexDigest(digest: [Hash.digest_length]u8) [multihash_len * 2]u8 {
result[2] = hex_charset[Hash.digest_length >> 4];
result[3] = hex_charset[Hash.digest_length & 15];
- for (digest) |byte, i| {
+ for (digest, 0..) |byte, i| {
result[4 + i * 2] = hex_charset[byte >> 4];
result[5 + i * 2] = hex_charset[byte & 15];
}
diff --git a/src/Module.zig b/src/Module.zig
index a129cb0cb6..4feb04abdd 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -268,7 +268,7 @@ pub const MemoizedCall = struct {
if (a.func != b.func) return false;
assert(a.args.len == b.args.len);
- for (a.args) |a_arg, arg_i| {
+ for (a.args, 0..) |a_arg, arg_i| {
const b_arg = b.args[arg_i];
if (!a_arg.eql(b_arg, ctx.module)) {
return false;
@@ -1082,7 +1082,7 @@ pub const Struct = struct {
assert(s.layout == .Packed);
assert(s.haveLayout());
var bit_sum: u64 = 0;
- for (s.fields.values()) |field, i| {
+ for (s.fields.values(), 0..) |field, i| {
if (i == index) {
return @intCast(u16, bit_sum);
}
@@ -1341,7 +1341,7 @@ pub const Union = struct {
assert(u.haveFieldTypes());
var most_alignment: u32 = 0;
var most_index: usize = undefined;
- for (u.fields.values()) |field, i| {
+ for (u.fields.values(), 0..) |field, i| {
if (!field.ty.hasRuntimeBits()) continue;
const field_align = field.normalAlignment(target);
@@ -1405,7 +1405,7 @@ pub const Union = struct {
var payload_size: u64 = 0;
var payload_align: u32 = 0;
const fields = u.fields.values();
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
const field_align = a: {
@@ -3553,7 +3553,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
}
if (data_has_safety_tag) {
const tags = zir.instructions.items(.tag);
- for (zir.instructions.items(.data)) |*data, i| {
+ for (zir.instructions.items(.data), 0..) |*data, i| {
const union_tag = Zir.Inst.Tag.data_tags[@enumToInt(tags[i])];
const as_struct = @ptrCast(*HackDataLayout, data);
as_struct.* = .{
@@ -3740,7 +3740,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
@ptrCast([*]const u8, file.zir.instructions.items(.data).ptr);
if (data_has_safety_tag) {
// The `Data` union has a safety tag but in the file format we store it without.
- for (file.zir.instructions.items(.data)) |*data, i| {
+ for (file.zir.instructions.items(.data), 0..) |*data, i| {
const as_struct = @ptrCast(*const HackDataLayout, data);
safety_buffer[i] = as_struct.data;
}
@@ -6293,7 +6293,7 @@ pub fn populateTestFunctions(
// Add a dependency on each test name and function pointer.
try array_decl.dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2);
- for (mod.test_functions.keys()) |test_decl_index, i| {
+ for (mod.test_functions.keys(), 0..) |test_decl_index, i| {
const test_decl = mod.declPtr(test_decl_index);
const test_name_slice = mem.sliceTo(test_decl.name, 0);
const test_name_decl_index = n: {
diff --git a/src/Package.zig b/src/Package.zig
index a3afe21009..5878e7bad6 100644
--- a/src/Package.zig
+++ b/src/Package.zig
@@ -207,7 +207,7 @@ pub fn fetchAndAddDependencies(
var any_error = false;
const deps_list = manifest.dependencies.values();
- for (manifest.dependencies.keys()) |name, i| {
+ for (manifest.dependencies.keys(), 0..) |name, i| {
const dep = deps_list[i];
const sub_prefix = try std.fmt.allocPrint(arena, "{s}{s}.", .{ name_prefix, name });
diff --git a/src/RangeSet.zig b/src/RangeSet.zig
index a5007ef7c8..7e501f984b 100644
--- a/src/RangeSet.zig
+++ b/src/RangeSet.zig
@@ -79,7 +79,7 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
const target = self.module.getTarget();
// look for gaps
- for (self.ranges.items[1..]) |cur, i| {
+ for (self.ranges.items[1..], 0..) |cur, i| {
// i starts counting from the second item.
const prev = self.ranges.items[i];
diff --git a/src/Sema.zig b/src/Sema.zig
index aeb4e25a24..fde9072d71 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -3801,7 +3801,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const dummy_ptr = try trash_block.addTy(.alloc, mut_final_ptr_ty);
const empty_trash_count = trash_block.instructions.items.len;
- for (placeholders) |bitcast_inst, i| {
+ for (placeholders, 0..) |bitcast_inst, i| {
const sub_ptr_ty = sema.typeOf(Air.indexToRef(bitcast_inst));
if (mut_final_ptr_ty.eql(sub_ptr_ty, sema.mod)) {
@@ -3917,7 +3917,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
defer gpa.free(runtime_arg_lens);
// First pass to look for comptime values.
- for (args) |zir_arg, i| {
+ for (args, 0..) |zir_arg, i| {
runtime_arg_lens[i] = .none;
if (zir_arg == .none) continue;
const object = try sema.resolveInst(zir_arg);
@@ -3957,7 +3957,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
// Now for the runtime checks.
if (any_runtime and block.wantSafety()) {
- for (runtime_arg_lens) |arg_len, i| {
+ for (runtime_arg_lens, 0..) |arg_len, i| {
if (arg_len == .none) continue;
if (i == len_idx) continue;
const ok = try block.addBinOp(.cmp_eq, len, arg_len);
@@ -4247,7 +4247,7 @@ fn validateStructInit(
// In this case the only thing we need to do is evaluate the implicit
// store instructions for default field values, and report any missing fields.
// Avoid the cost of the extra machinery for detecting a comptime struct init value.
- for (found_fields) |field_ptr, i| {
+ for (found_fields, 0..) |field_ptr, i| {
if (field_ptr != 0) continue;
const default_val = struct_ty.structFieldDefaultValue(i);
@@ -4313,7 +4313,7 @@ fn validateStructInit(
// ends up being comptime-known.
const field_values = try sema.arena.alloc(Value, struct_ty.structFieldCount());
- field: for (found_fields) |field_ptr, i| {
+ field: for (found_fields, 0..) |field_ptr, i| {
if (field_ptr != 0) {
// Determine whether the value stored to this pointer is comptime-known.
const field_ty = struct_ty.structFieldType(i);
@@ -4446,7 +4446,7 @@ fn validateStructInit(
try sema.resolveStructLayout(struct_ty);
// Our task is to insert `store` instructions for all the default field values.
- for (found_fields) |field_ptr, i| {
+ for (found_fields, 0..) |field_ptr, i| {
if (field_ptr != 0) continue;
const field_src = init_src; // TODO better source location
@@ -4540,7 +4540,7 @@ fn zirValidateArrayInit(
const air_tags = sema.air_instructions.items(.tag);
const air_datas = sema.air_instructions.items(.data);
- outer: for (instrs) |elem_ptr, i| {
+ outer: for (instrs, 0..) |elem_ptr, i| {
// Determine whether the value stored to this pointer is comptime-known.
if (array_ty.isTuple()) {
@@ -5059,7 +5059,7 @@ fn zirCompileLog(
const src_node = extra.data.src_node;
const args = sema.code.refSlice(extra.end, extended.small);
- for (args) |arg_ref, i| {
+ for (args, 0..) |arg_ref, i| {
if (i != 0) try writer.print(", ", .{});
const arg = try sema.resolveInst(arg_ref);
@@ -6277,7 +6277,7 @@ const GenericCallAdapter = struct {
if (ctx.generic_fn.owner_decl != other_key.generic_owner_decl.unwrap().?) return false;
const other_comptime_args = other_key.comptime_args.?;
- for (other_comptime_args[0..ctx.func_ty_info.param_types.len]) |other_arg, i| {
+ for (other_comptime_args[0..ctx.func_ty_info.param_types.len], 0..) |other_arg, i| {
const this_arg = ctx.args[i];
const this_is_comptime = this_arg.val.tag() != .generic_poison;
const other_is_comptime = other_arg.val.tag() != .generic_poison;
@@ -6793,7 +6793,7 @@ fn analyzeCall(
assert(!func_ty_info.is_generic);
const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len);
- for (uncasted_args) |uncasted_arg, i| {
+ for (uncasted_args, 0..) |uncasted_arg, i| {
if (i < fn_params_len) {
const opts: CoerceOpts = .{ .param_src = .{
.func_inst = func,
@@ -7568,7 +7568,7 @@ fn resolveGenericInstantiationType(
fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
if (!ty.isSimpleTupleOrAnonStruct()) return;
const tuple = ty.tupleFields();
- for (tuple.values) |field_val, i| {
+ for (tuple.values, 0..) |field_val, i| {
try sema.resolveTupleLazyValues(block, src, tuple.types[i]);
if (field_val.tag() == .unreachable_value) continue;
try sema.resolveLazyValue(field_val);
@@ -8642,7 +8642,7 @@ fn funcCommon(
const cc_resolved = cc orelse .Unspecified;
const param_types = try sema.arena.alloc(Type, block.params.items.len);
const comptime_params = try sema.arena.alloc(bool, block.params.items.len);
- for (block.params.items) |param, i| {
+ for (block.params.items, 0..) |param, i| {
const is_noalias = blk: {
const index = std.math.cast(u5, i) orelse break :blk false;
break :blk @truncate(u1, noalias_bits >> index) != 0;
@@ -8751,7 +8751,7 @@ fn funcCommon(
const tags = sema.code.instructions.items(.tag);
const data = sema.code.instructions.items(.data);
const param_body = sema.code.getParamBody(func_inst);
- for (block.params.items) |param, i| {
+ for (block.params.items, 0..) |param, i| {
if (!param.is_comptime) {
const param_index = param_body[i];
const param_src = switch (tags[param_index]) {
@@ -9850,7 +9850,7 @@ fn zirSwitchCapture(
const first_field_index = @intCast(u32, operand_ty.unionTagFieldIndex(first_item_val, sema.mod).?);
const first_field = union_obj.fields.values()[first_field_index];
- for (items[1..]) |item, i| {
+ for (items[1..], 0..) |item, i| {
const item_ref = try sema.resolveInst(item);
// Previous switch validation ensured this will succeed
const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable;
@@ -10180,7 +10180,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItemEnum(
block,
seen_enum_fields,
@@ -10214,7 +10214,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
.{},
);
errdefer msg.destroy(sema.gpa);
- for (seen_enum_fields) |seen_src, i| {
+ for (seen_enum_fields, 0..) |seen_src, i| {
if (seen_src != null) continue;
const field_name = operand_ty.enumFieldName(i);
@@ -10276,7 +10276,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItemError(
block,
&seen_errors,
@@ -10418,7 +10418,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len;
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItem(
block,
&range_set,
@@ -10513,7 +10513,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItemBool(
block,
&true_count,
@@ -10597,7 +10597,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItemSparse(
block,
&seen_values,
@@ -10908,7 +10908,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
}
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
cases_len += 1;
const item = try sema.resolveInst(item_ref);
@@ -11094,7 +11094,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
operand_ty.fmt(sema.mod),
});
}
- for (seen_enum_fields) |f, i| {
+ for (seen_enum_fields, 0..) |f, i| {
if (f != null) continue;
cases_len += 1;
@@ -11237,7 +11237,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
const analyze_body = if (union_originally and !special.is_inline)
- for (seen_enum_fields) |seen_field, index| {
+ for (seen_enum_fields, 0..) |seen_field, index| {
if (seen_field != null) continue;
const union_obj = maybe_union_ty.cast(Type.Payload.Union).?.data;
const field_ty = union_obj.fields.values()[index].ty;
@@ -12217,7 +12217,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen());
var elem_val_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_val_buf);
elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod);
}
@@ -13661,7 +13661,7 @@ fn intRem(
) CompileError!Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -14771,7 +14771,7 @@ fn zirAsm(
const outputs = try sema.arena.alloc(ConstraintName, outputs_len);
var expr_ty = Air.Inst.Ref.void_type;
- for (out_args) |*arg, out_i| {
+ for (out_args, 0..) |*arg, out_i| {
const output = sema.code.extraData(Zir.Inst.Asm.Output, extra_i);
extra_i = output.end;
@@ -14798,7 +14798,7 @@ fn zirAsm(
const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len);
const inputs = try sema.arena.alloc(ConstraintName, inputs_len);
- for (args) |*arg, arg_i| {
+ for (args, 0..) |*arg, arg_i| {
const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i);
extra_i = input.end;
@@ -15522,7 +15522,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
defer params_anon_decl.deinit();
const param_vals = try params_anon_decl.arena().alloc(Value, info.param_types.len);
- for (param_vals) |*param_val, i| {
+ for (param_vals, 0..) |*param_val, i| {
const param_ty = info.param_types[i];
const is_generic = param_ty.tag() == .generic_poison;
const param_ty_val = if (is_generic)
@@ -15766,7 +15766,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const error_field_vals: ?[]Value = if (ty.isAnyError()) null else blk: {
const names = ty.errorSetNames();
const vals = try fields_anon_decl.arena().alloc(Value, names.len);
- for (vals) |*field_val, i| {
+ for (vals, 0..) |*field_val, i| {
const name = names[i];
const name_val = v: {
var anon_decl = try block.startAnonDecl();
@@ -15868,7 +15868,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const enum_fields = ty.enumFields();
const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_fields.count());
- for (enum_field_vals) |*field_val, i| {
+ for (enum_field_vals, 0..) |*field_val, i| {
var tag_val_payload: Value.Payload.U32 = .{
.base = .{ .tag = .enum_field_index },
.data = @intCast(u32, i),
@@ -15965,7 +15965,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const union_fields = union_ty.unionFields();
const union_field_vals = try fields_anon_decl.arena().alloc(Value, union_fields.count());
- for (union_field_vals) |*field_val, i| {
+ for (union_field_vals, 0..) |*field_val, i| {
const field = union_fields.values()[i];
const name = union_fields.keys()[i];
const name_val = v: {
@@ -16074,7 +16074,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const tuple = struct_ty.tupleFields();
const field_types = tuple.types;
const struct_field_vals = try fields_anon_decl.arena().alloc(Value, field_types.len);
- for (struct_field_vals) |*struct_field_val, i| {
+ for (struct_field_vals, 0..) |*struct_field_val, i| {
const field_ty = field_types[i];
const name_val = v: {
var anon_decl = try block.startAnonDecl();
@@ -16118,7 +16118,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const struct_fields = struct_ty.structFields();
const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_fields.count());
- for (struct_field_vals) |*field_val, i| {
+ for (struct_field_vals, 0..) |*field_val, i| {
const field = struct_fields.values()[i];
const name = struct_fields.keys()[i];
const name_val = v: {
@@ -16457,7 +16457,7 @@ fn zirTypeofPeer(
const inst_list = try sema.gpa.alloc(Air.Inst.Ref, args.len);
defer sema.gpa.free(inst_list);
- for (args) |arg_ref, i| {
+ for (args, 0..) |arg_ref, i| {
inst_list[i] = try sema.resolveInst(arg_ref);
}
@@ -17568,7 +17568,7 @@ fn finishStructInit(
if (struct_ty.isAnonStruct()) {
const struct_obj = struct_ty.castTag(.anon_struct).?.data;
- for (struct_obj.values) |default_val, i| {
+ for (struct_obj.values, 0..) |default_val, i| {
if (field_inits[i] != .none) continue;
if (default_val.tag() == .unreachable_value) {
@@ -17604,7 +17604,7 @@ fn finishStructInit(
}
} else {
const struct_obj = struct_ty.castTag(.@"struct").?.data;
- for (struct_obj.fields.values()) |field, i| {
+ for (struct_obj.fields.values(), 0..) |field, i| {
if (field_inits[i] != .none) continue;
if (field.default_val.tag() == .unreachable_value) {
@@ -17645,7 +17645,7 @@ fn finishStructInit(
if (is_comptime) {
const values = try sema.arena.alloc(Value, field_inits.len);
- for (field_inits) |field_init, i| {
+ for (field_inits, 0..) |field_init, i| {
values[i] = (sema.resolveMaybeUndefVal(field_init) catch unreachable).?;
}
const struct_val = try Value.Tag.aggregate.create(sema.arena, values);
@@ -17660,7 +17660,7 @@ fn finishStructInit(
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
const alloc = try block.addTy(.alloc, alloc_ty);
- for (field_inits) |field_init, i_usize| {
+ for (field_inits, 0..) |field_init, i_usize| {
const i = @intCast(u32, i_usize);
const field_src = dest_src;
const field_ptr = try sema.structFieldPtrByIndex(block, dest_src, alloc, i, field_src, struct_ty, true);
@@ -17693,7 +17693,7 @@ fn zirStructInitAnon(
const opt_runtime_index = rs: {
var runtime_index: ?usize = null;
var extra_index = extra.end;
- for (types) |*field_ty, i| {
+ for (types, 0..) |*field_ty, i| {
const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index);
extra_index = item.end;
@@ -17767,7 +17767,7 @@ fn zirStructInitAnon(
});
const alloc = try block.addTy(.alloc, alloc_ty);
var extra_index = extra.end;
- for (types) |field_ty, i_usize| {
+ for (types, 0..) |field_ty, i_usize| {
const i = @intCast(u32, i_usize);
const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index);
extra_index = item.end;
@@ -17789,7 +17789,7 @@ fn zirStructInitAnon(
const element_refs = try sema.arena.alloc(Air.Inst.Ref, types.len);
var extra_index = extra.end;
- for (types) |_, i| {
+ for (types, 0..) |_, i| {
const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index);
extra_index = item.end;
element_refs[i] = try sema.resolveInst(item.data.init);
@@ -17817,7 +17817,7 @@ fn zirArrayInit(
const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @boolToInt(sentinel_val != null));
defer gpa.free(resolved_args);
- for (args[1..]) |arg, i| {
+ for (args[1..], 0..) |arg, i| {
const resolved_arg = try sema.resolveInst(arg);
const elem_ty = if (array_ty.zigTypeTag() == .Struct)
array_ty.structFieldType(i)
@@ -17838,7 +17838,7 @@ fn zirArrayInit(
resolved_args[resolved_args.len - 1] = try sema.addConstant(array_ty.elemType2(), some);
}
- const opt_runtime_index: ?u32 = for (resolved_args) |arg, i| {
+ const opt_runtime_index: ?u32 = for (resolved_args, 0..) |arg, i| {
const comptime_known = try sema.isComptimeKnown(arg);
if (!comptime_known) break @intCast(u32, i);
} else null;
@@ -17846,7 +17846,7 @@ fn zirArrayInit(
const runtime_index = opt_runtime_index orelse {
const elem_vals = try sema.arena.alloc(Value, resolved_args.len);
- for (resolved_args) |arg, i| {
+ for (resolved_args, 0..) |arg, i| {
// We checked that all args are comptime above.
elem_vals[i] = (sema.resolveMaybeUndefVal(arg) catch unreachable).?;
}
@@ -17875,7 +17875,7 @@ fn zirArrayInit(
const alloc = try block.addTy(.alloc, alloc_ty);
if (array_ty.isTuple()) {
- for (resolved_args) |arg, i| {
+ for (resolved_args, 0..) |arg, i| {
const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.mutable = true,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
@@ -17897,7 +17897,7 @@ fn zirArrayInit(
});
const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty);
- for (resolved_args) |arg, i| {
+ for (resolved_args, 0..) |arg, i| {
const index = try sema.addIntUnsigned(Type.usize, i);
const elem_ptr = try block.addPtrElemPtrTypeRef(alloc, index, elem_ptr_ty_ref);
_ = try block.addBinOp(.store, elem_ptr, arg);
@@ -17924,7 +17924,7 @@ fn zirArrayInitAnon(
const opt_runtime_src = rs: {
var runtime_src: ?LazySrcLoc = null;
- for (operands) |operand, i| {
+ for (operands, 0..) |operand, i| {
const operand_src = src; // TODO better source location
const elem = try sema.resolveInst(operand);
types[i] = sema.typeOf(elem);
@@ -17967,7 +17967,7 @@ fn zirArrayInitAnon(
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
const alloc = try block.addTy(.alloc, alloc_ty);
- for (operands) |operand, i_usize| {
+ for (operands, 0..) |operand, i_usize| {
const i = @intCast(u32, i_usize);
const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.mutable = true,
@@ -17984,7 +17984,7 @@ fn zirArrayInitAnon(
}
const element_refs = try sema.arena.alloc(Air.Inst.Ref, operands.len);
- for (operands) |operand, i| {
+ for (operands, 0..) |operand, i| {
element_refs[i] = try sema.resolveInst(operand);
}
@@ -18187,7 +18187,7 @@ fn zirUnaryMath(
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
elem.* = try eval(elem_val, scalar_ty, sema.arena, sema.mod);
}
@@ -19191,7 +19191,7 @@ fn reifyStruct(
if (layout == .Packed) {
struct_obj.status = .layout_wip;
- for (struct_obj.fields.values()) |field, index| {
+ for (struct_obj.fields.values(), 0..) |field, index| {
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
@@ -19820,7 +19820,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, operand_ty.vectorLen());
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod);
}
@@ -19922,7 +19922,7 @@ fn zirBitCount(
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
const scalar_ty = operand_ty.scalarType();
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
const count = comptimeOp(elem_val, scalar_ty, target);
elem.* = try Value.Tag.int_u64.create(sema.arena, count);
@@ -19991,7 +19991,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const vec_len = operand_ty.vectorLen();
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
elem.* = try elem_val.byteSwap(operand_ty, target, sema.arena);
}
@@ -20040,7 +20040,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const vec_len = operand_ty.vectorLen();
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
elem.* = try elem_val.bitReverse(scalar_ty, target, sema.arena);
}
@@ -20109,7 +20109,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
.Packed => {
var bit_sum: u64 = 0;
const fields = ty.structFields();
- for (fields.values()) |field, i| {
+ for (fields.values(), 0..) |field, i| {
if (i == field_index) {
return bit_sum;
}
@@ -21046,7 +21046,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
var buf: Value.ElemValueBuffer = undefined;
const elems = try sema.gpa.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const pred_elem_val = pred_val.elemValueBuffer(sema.mod, i, &buf);
const should_choose_a = pred_elem_val.toBool();
if (should_choose_a) {
@@ -21396,12 +21396,12 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
func = bound_data.func_inst;
resolved_args = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount() + 1);
resolved_args[0] = bound_data.arg0_inst;
- for (resolved_args[1..]) |*resolved, i| {
+ for (resolved_args[1..], 0..) |*resolved, i| {
resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(u32, i), args_ty);
}
} else {
resolved_args = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount());
- for (resolved_args) |*resolved, i| {
+ for (resolved_args, 0..) |*resolved, i| {
resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(u32, i), args_ty);
}
}
@@ -21556,7 +21556,7 @@ fn analyzeMinMax(
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const lhs_elem_val = lhs_val.elemValueBuffer(sema.mod, i, &lhs_buf);
const rhs_elem_val = rhs_val.elemValueBuffer(sema.mod, i, &rhs_buf);
elem.* = opFunc(lhs_elem_val, rhs_elem_val, target);
@@ -22453,7 +22453,7 @@ fn explainWhyTypeIsComptimeInner(
if (ty.castTag(.@"struct")) |payload| {
const struct_obj = payload.data;
- for (struct_obj.fields.values()) |field, i| {
+ for (struct_obj.fields.values(), 0..) |field, i| {
const field_src_loc = struct_obj.fieldSrcLoc(sema.mod, .{
.index = i,
.range = .type,
@@ -22473,7 +22473,7 @@ fn explainWhyTypeIsComptimeInner(
if (ty.cast(Type.Payload.Union)) |payload| {
const union_obj = payload.data;
- for (union_obj.fields.values()) |field, i| {
+ for (union_obj.fields.values(), 0..) |field, i| {
const field_src_loc = union_obj.fieldSrcLoc(sema.mod, .{
.index = i,
.range = .type,
@@ -23744,7 +23744,7 @@ fn structFieldPtrByIndex(
comptime assert(Type.packed_struct_layout_version == 2);
var running_bits: u16 = 0;
- for (struct_obj.fields.values()) |f, i| {
+ for (struct_obj.fields.values(), 0..) |f, i| {
if (!(try sema.typeHasRuntimeBits(f.ty))) continue;
if (i == field_index) {
@@ -26053,7 +26053,7 @@ fn coerceInMemoryAllowedFns(
} };
}
- for (dest_info.param_types) |dest_param_ty, i| {
+ for (dest_info.param_types, 0..) |dest_param_ty, i| {
const src_param_ty = src_info.param_types[i];
if (dest_info.comptime_params[i] != src_info.comptime_params[i]) {
@@ -26583,7 +26583,7 @@ fn beginComptimePtrMutation(
// assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted.
assert(bytes.len >= dest_len);
const elems = try arena.alloc(Value, @intCast(usize, dest_len));
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
elem.* = try Value.Tag.int_u64.create(arena, bytes[i]);
}
@@ -26612,7 +26612,7 @@ fn beginComptimePtrMutation(
const dest_len = parent.ty.arrayLenIncludingSentinel();
const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
const elems = try arena.alloc(Value, @intCast(usize, dest_len));
- for (bytes) |byte, i| {
+ for (bytes, 0..) |byte, i| {
elems[i] = try Value.Tag.int_u64.create(arena, byte);
}
if (parent.ty.sentinel()) |sent_val| {
@@ -27583,7 +27583,7 @@ fn coerceEnumToUnion(
var msg: ?*Module.ErrorMsg = null;
errdefer if (msg) |some| some.destroy(sema.gpa);
- for (union_obj.fields.values()) |field, i| {
+ for (union_obj.fields.values(), 0..) |field, i| {
if (field.ty.zigTypeTag() == .NoReturn) {
const err_msg = msg orelse try sema.errMsg(
block,
@@ -27742,7 +27742,7 @@ fn coerceArrayLike(
const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_len);
var runtime_src: ?LazySrcLoc = null;
- for (element_vals) |*elem, i| {
+ for (element_vals, 0..) |*elem, i| {
const index_ref = try sema.addConstant(
Type.usize,
try Value.Tag.int_u64.create(sema.arena, i),
@@ -27804,7 +27804,7 @@ fn coerceTupleToArray(
const dest_elem_ty = dest_ty.childType();
var runtime_src: ?LazySrcLoc = null;
- for (element_vals) |*elem, i_usize| {
+ for (element_vals, 0..) |*elem, i_usize| {
const i = @intCast(u32, i_usize);
if (i_usize == inst_len) {
elem.* = dest_ty.sentinel().?;
@@ -27933,7 +27933,7 @@ fn coerceTupleToStruct(
var root_msg: ?*Module.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
- for (field_refs) |*field_ref, i| {
+ for (field_refs, 0..) |*field_ref, i| {
if (field_ref.* != .none) continue;
const field_name = fields.keys()[i];
@@ -28031,7 +28031,7 @@ fn coerceTupleToTuple(
var root_msg: ?*Module.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
- for (field_refs) |*field_ref, i| {
+ for (field_refs, 0..) |*field_ref, i| {
if (field_ref.* != .none) continue;
const default_val = tuple_ty.structFieldDefaultValue(i);
@@ -29407,7 +29407,7 @@ fn resolvePeerTypes(
var seen_const = false;
var convert_to_slice = false;
var chosen_i: usize = 0;
- for (instructions[1..]) |candidate, candidate_i| {
+ for (instructions[1..], 0..) |candidate, candidate_i| {
const candidate_ty = sema.typeOf(candidate);
const chosen_ty = sema.typeOf(chosen);
@@ -30066,7 +30066,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
};
struct_obj.status = .layout_wip;
- for (struct_obj.fields.values()) |field, i| {
+ for (struct_obj.fields.values(), 0..) |field, i| {
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
@@ -30104,7 +30104,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
break :blk try decl_arena_allocator.alloc(u32, struct_obj.fields.count());
};
- for (struct_obj.fields.values()) |field, i| {
+ for (struct_obj.fields.values(), 0..) |field, i| {
optimized_order[i] = if (field.ty.hasRuntimeBits())
@intCast(u32, i)
else
@@ -30309,7 +30309,7 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
};
union_obj.status = .layout_wip;
- for (union_obj.fields.values()) |field, i| {
+ for (union_obj.fields.values(), 0..) |field, i| {
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
@@ -30457,7 +30457,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const have_comptime_val = tuple.values[i].tag() != .unreachable_value;
if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty)) {
return true;
@@ -30972,7 +30972,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
// so that init values may depend on type layout.
const bodies_index = extra_index;
- for (fields) |zir_field, field_i| {
+ for (fields, 0..) |zir_field, field_i| {
const field_ty: Type = ty: {
if (zir_field.type_ref != .none) {
break :ty sema.resolveType(&block_scope, .unneeded, zir_field.type_ref) catch |err| switch (err) {
@@ -31094,7 +31094,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
if (any_inits) {
extra_index = bodies_index;
- for (fields) |zir_field, field_i| {
+ for (fields, 0..) |zir_field, field_i| {
extra_index += zir_field.type_body_len;
extra_index += zir_field.align_body_len;
if (zir_field.init_body_len > 0) {
@@ -31814,7 +31814,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.@"struct" => {
const resolved_ty = try sema.resolveTypeFields(ty);
const s = resolved_ty.castTag(.@"struct").?.data;
- for (s.fields.values()) |field, i| {
+ for (s.fields.values(), 0..) |field, i| {
if (field.is_comptime) continue;
if (field.ty.eql(resolved_ty, sema.mod)) {
const msg = try Module.ErrorMsg.create(
@@ -31835,7 +31835,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.values) |val, i| {
+ for (tuple.values, 0..) |val, i| {
const is_comptime = val.tag() != .unreachable_value;
if (is_comptime) continue;
if ((try sema.typeHasOnePossibleValue(tuple.types[i])) != null) continue;
@@ -32475,7 +32475,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const have_comptime_val = tuple.values[i].tag() != .unreachable_value;
if (!have_comptime_val and try sema.typeRequiresComptime(field_ty)) {
return true;
@@ -32635,7 +32635,7 @@ fn anonStructFieldIndex(
field_src: LazySrcLoc,
) !u32 {
const anon_struct = struct_ty.castTag(.anon_struct).?.data;
- for (anon_struct.names) |name, i| {
+ for (anon_struct.names, 0..) |name, i| {
if (mem.eql(u8, name, field_name)) {
return @intCast(u32, i);
}
@@ -32653,7 +32653,7 @@ fn queueFullTypeResolution(sema: *Sema, ty: Type) !void {
fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -32711,7 +32711,7 @@ fn intSub(
) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -32769,7 +32769,7 @@ fn floatAdd(
) !Value {
if (float_type.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -32826,7 +32826,7 @@ fn floatSub(
) !Value {
if (float_type.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -32884,7 +32884,7 @@ fn intSubWithOverflow(
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen());
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -32938,7 +32938,7 @@ fn floatToInt(
if (float_ty.zigTypeTag() == .Vector) {
const elem_ty = float_ty.childType();
const result_data = try sema.arena.alloc(Value, float_ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(sema.mod, i, &buf);
scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType());
@@ -33138,7 +33138,7 @@ fn intFitsInType(
.aggregate => {
assert(ty.zigTypeTag() == .Vector);
- for (val.castTag(.aggregate).?.data) |elem, i| {
+ for (val.castTag(.aggregate).?.data, 0..) |elem, i| {
if (!(try sema.intFitsInType(elem, ty.scalarType(), null))) {
if (vector_index) |some| some.* = i;
return false;
@@ -33235,7 +33235,7 @@ fn intAddWithOverflow(
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen());
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -33339,7 +33339,7 @@ fn compareVector(
) !Value {
assert(ty.zigTypeTag() == .Vector);
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 473a62fd83..5b0db30757 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -515,7 +515,7 @@ fn gen(self: *Self) !void {
self.ret_mcv = MCValue{ .stack_offset = stack_offset };
}
- for (self.args) |*arg, arg_index| {
+ for (self.args, 0..) |*arg, arg_index| {
// Copy register arguments to the stack
switch (arg.*) {
.register => |reg| {
@@ -1633,14 +1633,14 @@ fn allocRegs(
var reused_read_arg: ?usize = null;
// Lock all args which are already allocated to registers
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
const mcv = try arg.bind.resolveToMcv(self);
if (mcv == .register) {
read_locks[i] = self.register_manager.lockReg(mcv.register);
}
}
- for (write_args) |arg, i| {
+ for (write_args, 0..) |arg, i| {
if (arg.bind == .reg) {
write_locks[i] = self.register_manager.lockReg(arg.bind.reg);
}
@@ -1648,7 +1648,7 @@ fn allocRegs(
// Allocate registers for all args which aren't allocated to
// registers yet
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
const mcv = try arg.bind.resolveToMcv(self);
if (mcv == .register) {
const raw_reg = mcv.register;
@@ -1672,7 +1672,7 @@ fn allocRegs(
const raw_reg = arg.bind.reg;
arg.reg.* = self.registerAlias(raw_reg, arg.ty);
} else {
- reuse_operand: for (read_args) |read_arg, i| {
+ reuse_operand: for (read_args, 0..) |read_arg, i| {
if (read_arg.bind == .inst) {
const operand = read_arg.bind.inst;
const mcv = try self.resolveInst(operand);
@@ -1694,7 +1694,7 @@ fn allocRegs(
}
}
} else {
- for (write_args) |arg, i| {
+ for (write_args, 0..) |arg, i| {
if (arg.bind == .reg) {
const raw_reg = arg.bind.reg;
arg.reg.* = self.registerAlias(raw_reg, arg.ty);
@@ -1708,7 +1708,7 @@ fn allocRegs(
// For all read_args which need to be moved from non-register to
// register, perform the move
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
if (reused_read_arg) |j| {
// Check whether this read_arg was reused
if (i == j) continue;
@@ -4267,7 +4267,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Make space for the arguments passed via the stack
self.max_end_stack += info.stack_byte_count;
- for (info.args) |mc_arg, arg_i| {
+ for (info.args, 0..) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
@@ -4757,7 +4757,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const else_slice = else_branch.inst_table.entries.slice();
const else_keys = else_slice.items(.key);
const else_values = else_slice.items(.value);
- for (else_keys) |else_key, else_idx| {
+ for (else_keys, 0..) |else_key, else_idx| {
const else_value = else_values[else_idx];
const canon_mcv = if (saved_then_branch.inst_table.fetchSwapRemove(else_key)) |then_entry| blk: {
// The instruction's MCValue is overridden in both branches.
@@ -4790,7 +4790,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const then_slice = saved_then_branch.inst_table.entries.slice();
const then_keys = then_slice.items(.key);
const then_values = then_slice.items(.value);
- for (then_keys) |then_key, then_idx| {
+ for (then_keys, 0..) |then_key, then_idx| {
const then_value = then_values[then_idx];
// We already deleted the items from this table that matched the else_branch.
// So these are all instructions that are only overridden in the then branch.
@@ -5069,7 +5069,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
const branch_into_prong_relocs = try self.gpa.alloc(u32, items.len);
defer self.gpa.free(branch_into_prong_relocs);
- for (items) |item, idx| {
+ for (items, 0..) |item, idx| {
const cmp_result = try self.cmp(.{ .inst = pl_op.operand }, .{ .inst = item }, condition_ty, .neq);
branch_into_prong_relocs[idx] = try self.condBr(cmp_result);
}
@@ -6373,7 +6373,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
}
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
if (param_size == 0) {
result.args[i] = .{ .none = {} };
@@ -6438,7 +6438,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
if (ty.abiSize(self.target.*) > 0) {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
const param_alignment = ty.abiAlignment(self.target.*);
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index 3c2a81d5d1..b2e23c6278 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -80,7 +80,7 @@ pub fn emitMir(
try emit.lowerBranches();
// Emit machine code
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
switch (tag) {
.add_immediate => try emit.mirAddSubtractImmediate(inst),
@@ -323,7 +323,7 @@ fn lowerBranches(emit: *Emit) !void {
//
// TODO optimization opportunity: do this in codegen while
// generating MIR
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
@@ -368,7 +368,7 @@ fn lowerBranches(emit: *Emit) !void {
all_branches_lowered = true;
var current_code_offset: usize = 0;
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
// If this instruction contained in the code offset
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 57a8aed699..0fbf1ee984 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -513,7 +513,7 @@ fn gen(self: *Self) !void {
self.ret_mcv = MCValue{ .stack_offset = stack_offset };
}
- for (self.args) |*arg, arg_index| {
+ for (self.args, 0..) |*arg, arg_index| {
// Copy register arguments to the stack
switch (arg.*) {
.register => |reg| {
@@ -3105,14 +3105,14 @@ fn allocRegs(
var reused_read_arg: ?usize = null;
// Lock all args which are already allocated to registers
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
const mcv = try arg.bind.resolveToMcv(self);
if (mcv == .register) {
read_locks[i] = self.register_manager.lockReg(mcv.register);
}
}
- for (write_args) |arg, i| {
+ for (write_args, 0..) |arg, i| {
if (arg.bind == .reg) {
write_locks[i] = self.register_manager.lockReg(arg.bind.reg);
}
@@ -3120,7 +3120,7 @@ fn allocRegs(
// Allocate registers for all args which aren't allocated to
// registers yet
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
const mcv = try arg.bind.resolveToMcv(self);
if (mcv == .register) {
arg.reg.* = mcv.register;
@@ -3141,7 +3141,7 @@ fn allocRegs(
if (arg.bind == .reg) {
arg.reg.* = arg.bind.reg;
} else {
- reuse_operand: for (read_args) |read_arg, i| {
+ reuse_operand: for (read_args, 0..) |read_arg, i| {
if (read_arg.bind == .inst) {
const operand = read_arg.bind.inst;
const mcv = try self.resolveInst(operand);
@@ -3161,7 +3161,7 @@ fn allocRegs(
}
}
} else {
- for (write_args) |arg, i| {
+ for (write_args, 0..) |arg, i| {
if (arg.bind == .reg) {
arg.reg.* = arg.bind.reg;
} else {
@@ -3173,7 +3173,7 @@ fn allocRegs(
// For all read_args which need to be moved from non-register to
// register, perform the move
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
if (reused_read_arg) |j| {
// Check whether this read_arg was reused
if (i == j) continue;
@@ -4217,7 +4217,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Make space for the arguments passed via the stack
self.max_end_stack += info.stack_byte_count;
- for (info.args) |mc_arg, arg_i| {
+ for (info.args, 0..) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
@@ -4669,7 +4669,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const else_slice = else_branch.inst_table.entries.slice();
const else_keys = else_slice.items(.key);
const else_values = else_slice.items(.value);
- for (else_keys) |else_key, else_idx| {
+ for (else_keys, 0..) |else_key, else_idx| {
const else_value = else_values[else_idx];
const canon_mcv = if (saved_then_branch.inst_table.fetchSwapRemove(else_key)) |then_entry| blk: {
// The instruction's MCValue is overridden in both branches.
@@ -4702,7 +4702,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const then_slice = saved_then_branch.inst_table.entries.slice();
const then_keys = then_slice.items(.key);
const then_values = then_slice.items(.value);
- for (then_keys) |then_key, then_idx| {
+ for (then_keys, 0..) |then_key, then_idx| {
const then_value = then_values[then_idx];
// We already deleted the items from this table that matched the else_branch.
// So these are all instructions that are only overridden in the then branch.
@@ -4991,7 +4991,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
const branch_into_prong_relocs = try self.gpa.alloc(u32, items.len);
defer self.gpa.free(branch_into_prong_relocs);
- for (items) |item, idx| {
+ for (items, 0..) |item, idx| {
const cmp_result = try self.cmp(.{ .inst = pl_op.operand }, .{ .inst = item }, condition_ty, .neq);
branch_into_prong_relocs[idx] = try self.condBr(cmp_result);
}
@@ -6296,7 +6296,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
}
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
if (ty.abiAlignment(self.target.*) == 8)
ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2);
@@ -6346,7 +6346,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
if (ty.abiSize(self.target.*) > 0) {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
const param_alignment = ty.abiAlignment(self.target.*);
diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig
index fe34a28b6e..17540f0968 100644
--- a/src/arch/arm/Emit.zig
+++ b/src/arch/arm/Emit.zig
@@ -77,7 +77,7 @@ pub fn emitMir(
try emit.lowerBranches();
// Emit machine code
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
switch (tag) {
.add => try emit.mirDataProcessing(inst),
@@ -239,7 +239,7 @@ fn lowerBranches(emit: *Emit) !void {
//
// TODO optimization opportunity: do this in codegen while
// generating MIR
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
@@ -284,7 +284,7 @@ fn lowerBranches(emit: *Emit) !void {
all_branches_lowered = true;
var current_code_offset: usize = 0;
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
// If this instruction contained in the code offset
diff --git a/src/arch/arm/bits.zig b/src/arch/arm/bits.zig
index af7fb301b9..8e76ae9409 100644
--- a/src/arch/arm/bits.zig
+++ b/src/arch/arm/bits.zig
@@ -452,11 +452,11 @@ pub const Instruction = union(enum) {
const masks = comptime blk: {
const base_mask: u32 = std.math.maxInt(u8);
var result = [_]u32{0} ** 16;
- for (result) |*mask, i| mask.* = std.math.rotr(u32, base_mask, 2 * i);
+ for (&result, 0..) |*mask, i| mask.* = std.math.rotr(u32, base_mask, 2 * i);
break :blk result;
};
- return for (masks) |mask, i| {
+ return for (masks, 0..) |mask, i| {
if (x & mask == x) {
break Operand{
.immediate = .{
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index 8b8fca4859..b97ac727c1 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -1689,7 +1689,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- for (info.args) |mc_arg, arg_i| {
+ for (info.args, 0..) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
@@ -2727,7 +2727,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var next_stack_offset: u32 = 0;
const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 };
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig
index 4b2dad4981..387c735896 100644
--- a/src/arch/riscv64/Emit.zig
+++ b/src/arch/riscv64/Emit.zig
@@ -38,7 +38,7 @@ pub fn emitMir(
const mir_tags = emit.mir.instructions.items(.tag);
// Emit machine code
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
switch (tag) {
.add => try emit.mirRType(inst),
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index 418c67c580..8344b6e0cc 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -1189,7 +1189,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
try self.register_manager.getReg(reg, null);
}
- for (info.args) |mc_arg, arg_i| {
+ for (info.args, 0..) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(arg);
@@ -1450,7 +1450,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const else_slice = else_branch.inst_table.entries.slice();
const else_keys = else_slice.items(.key);
const else_values = else_slice.items(.value);
- for (else_keys) |else_key, else_idx| {
+ for (else_keys, 0..) |else_key, else_idx| {
const else_value = else_values[else_idx];
const canon_mcv = if (saved_then_branch.inst_table.fetchSwapRemove(else_key)) |then_entry| blk: {
// The instruction's MCValue is overridden in both branches.
@@ -1484,7 +1484,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const then_slice = saved_then_branch.inst_table.entries.slice();
const then_keys = then_slice.items(.key);
const then_values = then_slice.items(.value);
- for (then_keys) |then_key, then_idx| {
+ for (then_keys, 0..) |then_key, then_idx| {
const then_value = then_values[then_idx];
// We already deleted the items from this table that matched the else_branch.
// So these are all instructions that are only overridden in the then branch.
@@ -4363,7 +4363,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
.callee => abi.c_abi_int_param_regs_callee_view,
};
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig
index 8500f338ec..7e71492af7 100644
--- a/src/arch/sparc64/Emit.zig
+++ b/src/arch/sparc64/Emit.zig
@@ -69,7 +69,7 @@ pub fn emitMir(
try emit.lowerBranches();
// Emit machine code
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
switch (tag) {
.dbg_line => try emit.mirDbgLine(inst),
@@ -513,7 +513,7 @@ fn lowerBranches(emit: *Emit) !void {
//
// TODO optimization opportunity: do this in codegen while
// generating MIR
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
@@ -558,7 +558,7 @@ fn lowerBranches(emit: *Emit) !void {
all_branches_lowered = true;
var current_code_offset: usize = 0;
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
// If this instruction contained in the code offset
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 7ce6a0482b..53dc28626c 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1255,7 +1255,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
// reserve space and insert all prologue instructions at the front of the instruction list
// We insert them in reserve order as there is no insertSlice in multiArrayList.
try func.mir_instructions.ensureUnusedCapacity(func.gpa, prologue.items.len);
- for (prologue.items) |_, index| {
+ for (prologue.items, 0..) |_, index| {
const inst = prologue.items[prologue.items.len - 1 - index];
func.mir_instructions.insertAssumeCapacity(0, inst);
}
@@ -3117,7 +3117,7 @@ fn mergeBranch(func: *CodeGen, branch: *const Branch) !void {
const target_values = target_slice.items(.value);
try parent.values.ensureUnusedCapacity(func.gpa, branch.values.count());
- for (target_keys) |key, index| {
+ for (target_keys, 0..) |key, index| {
// TODO: process deaths from branches
parent.values.putAssumeCapacity(key, target_values[index]);
}
@@ -3501,7 +3501,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const values = try func.gpa.alloc(CaseValue, items.len);
errdefer func.gpa.free(values);
- for (items) |ref, i| {
+ for (items, 0..) |ref, i| {
const item_val = func.air.value(ref).?;
const int_val = func.valueAsI32(item_val, target_ty);
if (lowest_maybe == null or int_val < lowest_maybe.?) {
@@ -3561,7 +3561,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
while (value <= highest) : (value += 1) {
// idx represents the branch we jump to
const idx = blk: {
- for (case_list.items) |case, idx| {
+ for (case_list.items, 0..) |case, idx| {
for (case.values) |case_value| {
if (case_value.integer == value) break :blk @intCast(u32, idx);
}
@@ -3588,7 +3588,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
};
try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @boolToInt(has_else_body));
- for (case_list.items) |case, index| {
+ for (case_list.items, 0..) |case, index| {
// when sparse, we use if/else-chain, so emit conditional checks
if (is_sparse) {
// for single value prong we can emit a simple if
@@ -4558,7 +4558,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// copy stack pointer into a temporary local, which is
// moved for each element to store each value in the right position.
const offset = try func.buildPointerOffset(result, 0, .new);
- for (elements) |elem, elem_index| {
+ for (elements, 0..) |elem, elem_index| {
const elem_val = try func.resolveInst(elem);
try func.store(offset, elem_val, elem_ty, 0);
@@ -4587,7 +4587,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// we ensure a new local is created so it's zero-initialized
const result = try func.ensureAllocLocal(backing_type);
var current_bit: u16 = 0;
- for (elements) |elem, elem_index| {
+ for (elements, 0..) |elem, elem_index| {
const field = fields[elem_index];
if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -4623,7 +4623,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => {
const result = try func.allocStack(result_ty);
const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset
- for (elements) |elem, elem_index| {
+ for (elements, 0..) |elem, elem_index| {
if (result_ty.structFieldValueComptime(elem_index) != null) continue;
const elem_ty = result_ty.structFieldType(elem_index);
@@ -6149,7 +6149,7 @@ fn callIntrinsic(
} else WValue{ .none = {} };
// Lower all arguments to the stack before we call our function
- for (args) |arg, arg_i| {
+ for (args, 0..) |arg, arg_i| {
assert(!(want_sret_param and arg == .stack));
assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime());
try func.lowerArg(.C, param_types[arg_i], arg);
diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig
index a340ac5da8..7d44d3622f 100644
--- a/src/arch/wasm/Emit.zig
+++ b/src/arch/wasm/Emit.zig
@@ -44,7 +44,7 @@ pub fn emitMir(emit: *Emit) InnerError!void {
// before we emit the function body when lowering MIR
try emit.emitLocals();
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
switch (tag) {
// block instructions
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index c11ea4e63e..f63d80486e 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -186,7 +186,7 @@ const Branch = struct {
_ = options;
comptime assert(unused_format_string.len == 0);
try writer.writeAll("Branch {\n");
- for (ctx.insts) |inst, i| {
+ for (ctx.insts, 0..) |inst, i| {
const mcv = ctx.mcvs[i];
try writer.print(" %{d} => {}\n", .{ inst, mcv });
}
@@ -3951,7 +3951,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
};
defer if (ret_reg_lock) |lock| self.register_manager.unlockReg(lock);
- for (args) |arg, arg_i| {
+ for (args, 0..) |arg, arg_i| {
const mc_arg = info.args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
@@ -4912,7 +4912,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
var relocs = try self.gpa.alloc(u32, items.len);
defer self.gpa.free(relocs);
- for (items) |item, item_i| {
+ for (items, 0..) |item, item_i| {
const item_mcv = try self.resolveInst(item);
relocs[item_i] = try self.genCondSwitchMir(condition_ty, condition, item_mcv);
}
@@ -4974,7 +4974,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
for (self.branch_stack.items) |bs| {
log.debug("{}", .{bs.fmtDebug()});
}
- for (branch_stack.items) |bs, i| {
+ for (branch_stack.items, 0..) |bs, i| {
log.debug("Case-{d} branch: {}", .{ i, bs.fmtDebug() });
}
@@ -4999,7 +4999,7 @@ fn canonicaliseBranches(self: *Self, parent_branch: *Branch, canon_branch: *Bran
const target_keys = target_slice.items(.key);
const target_values = target_slice.items(.value);
- for (target_keys) |target_key, target_idx| {
+ for (target_keys, 0..) |target_key, target_idx| {
const target_value = target_values[target_idx];
const canon_mcv = if (canon_branch.inst_table.fetchSwapRemove(target_key)) |canon_entry| blk: {
// The instruction's MCValue is overridden in both branches.
@@ -5032,7 +5032,7 @@ fn canonicaliseBranches(self: *Self, parent_branch: *Branch, canon_branch: *Bran
const canon_slice = canon_branch.inst_table.entries.slice();
const canon_keys = canon_slice.items(.key);
const canon_values = canon_slice.items(.value);
- for (canon_keys) |canon_key, canon_idx| {
+ for (canon_keys, 0..) |canon_key, canon_idx| {
const canon_value = canon_values[canon_idx];
// We already deleted the items from this table that matched the target_branch.
// So these are all instructions that are only overridden in the canon branch.
@@ -6571,7 +6571,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
switch (result_ty.zigTypeTag()) {
.Struct => {
const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align));
- for (elements) |elem, elem_i| {
+ for (elements, 0..) |elem, elem_i| {
if (result_ty.structFieldValueComptime(elem_i) != null) continue; // comptime elem
const elem_ty = result_ty.structFieldType(elem_i);
@@ -6586,7 +6586,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const elem_ty = result_ty.childType();
const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
- for (elements) |elem, elem_i| {
+ for (elements, 0..) |elem, elem_i| {
const elem_mcv = try self.resolveInst(elem);
const elem_off = @intCast(i32, elem_size * elem_i);
try self.genSetStack(elem_ty, stack_offset - elem_off, elem_mcv, .{});
@@ -6963,7 +6963,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
else => 0,
};
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
assert(ty.hasRuntimeBits());
const classes: []const abi.Class = switch (self.target.os.tag) {
@@ -7039,7 +7039,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
else => 0,
};
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
if (!ty.hasRuntimeBits()) {
result.args[i] = .{ .none = {} };
continue;
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index c4f9b4eb42..12c19915c6 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -61,7 +61,7 @@ const Reloc = struct {
pub fn lowerMir(emit: *Emit) InnerError!void {
const mir_tags = emit.mir.instructions.items(.tag);
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
try emit.code_offset_mapping.putNoClobber(emit.bin_file.allocator, inst, emit.code.items.len);
switch (tag) {
@@ -1544,7 +1544,7 @@ const OpCode = struct {
fn init(comptime in_bytes: []const u8) OpCode {
comptime assert(in_bytes.len <= 3);
comptime var bytes: [3]u8 = undefined;
- inline for (in_bytes) |x, i| {
+ inline for (in_bytes, 0..) |x, i| {
bytes[i] = x;
}
return .{ .bytes = bytes, .count = in_bytes.len };
diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig
index df2052ca6e..112d9a5982 100644
--- a/src/arch/x86_64/Mir.zig
+++ b/src/arch/x86_64/Mir.zig
@@ -535,7 +535,7 @@ pub const RegisterList = struct {
const Self = @This();
fn getIndexForReg(registers: []const Register, reg: Register) BitSet.MaskInt {
- for (registers) |cpreg, i| {
+ for (registers, 0..) |cpreg, i| {
if (reg.id() == cpreg.id()) return @intCast(u32, i);
}
unreachable; // register not in input register list!
diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig
index 35ac3dcb55..193efa6dc4 100644
--- a/src/arch/x86_64/abi.zig
+++ b/src/arch/x86_64/abi.zig
@@ -335,7 +335,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
// "If one of the classes is MEMORY, the whole argument is passed in memory"
// "If X87UP is not preceded by X87, the whole argument is passed in memory."
var found_sseup = false;
- for (result) |item, i| switch (item) {
+ for (result, 0..) |item, i| switch (item) {
.memory => return memory_class,
.x87up => if (i == 0 or result[i - 1] != .x87) return memory_class,
.sseup => found_sseup = true,
@@ -347,7 +347,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
if (ty_size > 16 and (result[0] != .sse or !found_sseup)) return memory_class;
// "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
- for (result) |*item, i| {
+ for (&result, 0..) |*item, i| {
if (item.* == .sseup) switch (result[i - 1]) {
.sse, .sseup => continue,
else => item.* = .sse,
@@ -379,7 +379,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
}
// Combine this field with the previous one.
const field_class = classifySystemV(field.ty, target, .other);
- for (result) |*result_item, i| {
+ for (&result, 0..) |*result_item, i| {
const field_item = field_class[i];
// "If both classes are equal, this is the resulting class."
if (result_item.* == field_item) {
@@ -431,7 +431,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
// "If one of the classes is MEMORY, the whole argument is passed in memory"
// "If X87UP is not preceded by X87, the whole argument is passed in memory."
var found_sseup = false;
- for (result) |item, i| switch (item) {
+ for (result, 0..) |item, i| switch (item) {
.memory => return memory_class,
.x87up => if (i == 0 or result[i - 1] != .x87) return memory_class,
.sseup => found_sseup = true,
@@ -443,7 +443,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
if (ty_size > 16 and (result[0] != .sse or !found_sseup)) return memory_class;
// "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
- for (result) |*item, i| {
+ for (&result, 0..) |*item, i| {
if (item.* == .sseup) switch (result[i - 1]) {
.sse, .sseup => continue,
else => item.* = .sse,
diff --git a/src/codegen.zig b/src/codegen.zig
index c0a04765b0..9eea1c667d 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -511,7 +511,7 @@ pub fn generateSymbol(
try code.resize(current_pos + abi_size);
var bits: u16 = 0;
- for (field_vals) |field_val, index| {
+ for (field_vals, 0..) |field_val, index| {
const field_ty = fields[index].ty;
// pointer may point to a decl which must be marked used
// but can also result in a relocation. Therefore we handle those seperately.
@@ -537,7 +537,7 @@ pub fn generateSymbol(
const struct_begin = code.items.len;
const field_vals = typed_value.val.castTag(.aggregate).?.data;
- for (field_vals) |field_val, index| {
+ for (field_vals, 0..) |field_val, index| {
const field_ty = typed_value.ty.structFieldType(index);
if (!field_ty.hasRuntimeBits()) continue;
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 2f721e1b4b..0beb00b236 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -253,7 +253,7 @@ fn formatIdent(
if (solo and isReservedIdent(ident)) {
try writer.writeAll("zig_e_");
}
- for (ident) |c, i| {
+ for (ident, 0..) |c, i| {
switch (c) {
'a'...'z', 'A'...'Z', '_' => try writer.writeByte(c),
'.' => try writer.writeByte('_'),
@@ -361,7 +361,7 @@ pub const Function = struct {
_ = mutability;
if (f.getFreeLocals().getPtrContext(ty, f.tyHashCtx())) |locals_list| {
- for (locals_list.items) |local_index, i| {
+ for (locals_list.items, 0..) |local_index, i| {
const local = &f.locals.items[local_index];
if (local.alignment >= alignment) {
local.loop_depth = @intCast(LoopDepth, f.free_locals_stack.items.len - 1);
@@ -1283,7 +1283,7 @@ pub const DeclGen = struct {
try writer.writeByte('{');
var empty = true;
- for (field_vals) |field_val, field_index| {
+ for (field_vals, 0..) |field_val, field_index| {
const field_ty = ty.structFieldType(field_index);
if (!field_ty.hasRuntimeBits()) continue;
@@ -1309,7 +1309,7 @@ pub const DeclGen = struct {
const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
var eff_num_fields: usize = 0;
- for (field_vals) |_, index| {
+ for (field_vals, 0..) |_, index| {
const field_ty = ty.structFieldType(index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -1331,7 +1331,7 @@ pub const DeclGen = struct {
var eff_index: usize = 0;
var needs_closing_paren = false;
- for (field_vals) |field_val, index| {
+ for (field_vals, 0..) |field_val, index| {
const field_ty = ty.structFieldType(index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -1359,7 +1359,7 @@ pub const DeclGen = struct {
try writer.writeByte('(');
// a << a_off | b << b_off | c << c_off
var empty = true;
- for (field_vals) |field_val, index| {
+ for (field_vals, 0..) |field_val, index| {
const field_ty = ty.structFieldType(index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -1719,7 +1719,7 @@ pub const DeclGen = struct {
{
const fields = t.tupleFields();
var field_id: usize = 0;
- for (fields.types) |field_ty, i| {
+ for (fields.types, 0..) |field_ty, i| {
if (!field_ty.hasRuntimeBits() or fields.values[i].tag() != .unreachable_value) continue;
try buffer.append(' ');
@@ -2130,7 +2130,7 @@ pub const DeclGen = struct {
try tuple_storage.ensureTotalCapacity(allocator, t.structFieldCount());
const fields = t.tupleFields();
- for (fields.values) |value, index|
+ for (fields.values, 0..) |value, index|
if (value.tag() == .unreachable_value)
tuple_storage.appendAssumeCapacity(.{
.type = fields.types[index],
@@ -2415,7 +2415,7 @@ pub const DeclGen = struct {
const name_end = buffer.items.len - "(".len;
try dg.renderTypeAndName(bw, enum_ty, .{ .identifier = "tag" }, .Const, 0, .Complete);
try buffer.appendSlice(") {\n switch (tag) {\n");
- for (enum_ty.enumFields().keys()) |name, index| {
+ for (enum_ty.enumFields().keys(), 0..) |name, index| {
const name_z = try dg.typedefs.allocator.dupeZ(u8, name);
defer dg.typedefs.allocator.free(name_z);
const name_bytes = name_z[0 .. name_z.len + 1];
@@ -2681,7 +2681,7 @@ pub fn genErrDecls(o: *Object) !void {
try writer.writeAll("enum {\n");
o.indent_writer.pushIndent();
var max_name_len: usize = 0;
- for (o.dg.module.error_name_list.items) |name, value| {
+ for (o.dg.module.error_name_list.items, 0..) |name, value| {
max_name_len = std.math.max(name.len, max_name_len);
var err_pl = Value.Payload.Error{ .data = .{ .name = name } };
try o.dg.renderValue(writer, Type.anyerror, Value.initPayload(&err_pl.base), .Other);
@@ -2724,7 +2724,7 @@ pub fn genErrDecls(o: *Object) !void {
try writer.writeAll("static ");
try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = name_prefix }, .Const, 0, .Complete);
try writer.writeAll(" = {");
- for (o.dg.module.error_name_list.items) |name, value| {
+ for (o.dg.module.error_name_list.items, 0..) |name, value| {
if (value != 0) try writer.writeByte(',');
var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len };
@@ -2742,7 +2742,7 @@ fn genExports(o: *Object) !void {
defer tracy.end();
const fwd_decl_writer = o.dg.fwd_decl.writer();
- if (o.dg.module.decl_exports.get(o.dg.decl_index)) |exports| for (exports.items[1..]) |@"export", i| {
+ if (o.dg.module.decl_exports.get(o.dg.decl_index)) |exports| for (exports.items[1..], 0..) |@"export", i| {
try fwd_decl_writer.writeAll("zig_export(");
try o.dg.renderFunctionSignature(fwd_decl_writer, .Forward, @intCast(u32, 1 + i));
try fwd_decl_writer.print(", {s}, {s});\n", .{
@@ -2800,7 +2800,7 @@ pub fn genFunc(f: *Function) !void {
// alignment, descending.
const free_locals = f.getFreeLocals();
const values = f.allocs.values();
- for (f.allocs.keys()) |local_index, i| {
+ for (f.allocs.keys(), 0..) |local_index, i| {
if (values[i]) continue; // static
const local = f.locals.items[local_index];
log.debug("inserting local {d} into free_locals", .{local_index});
@@ -4238,7 +4238,7 @@ fn airCall(
const resolved_args = try gpa.alloc(CValue, args.len);
defer gpa.free(resolved_args);
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
resolved_args[i] = try f.resolveInst(arg);
}
@@ -4303,7 +4303,7 @@ fn airCall(
try writer.writeByte('(');
var args_written: usize = 0;
- for (args) |arg, arg_i| {
+ for (args, 0..) |arg, arg_i| {
const ty = f.air.typeOf(arg);
if (!ty.hasRuntimeBitsIgnoreComptime()) continue;
if (args_written != 0) {
@@ -5043,7 +5043,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
extra_i = constraints_extra_begin;
var locals_index = locals_begin;
try writer.writeByte(':');
- for (outputs) |output, index| {
+ for (outputs, 0..) |output, index| {
const extra_bytes = std.mem.sliceAsBytes(f.air.extra[extra_i..]);
const constraint = std.mem.sliceTo(extra_bytes, 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
@@ -5067,7 +5067,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte(')');
}
try writer.writeByte(':');
- for (inputs) |input, index| {
+ for (inputs, 0..) |input, index| {
const extra_bytes = std.mem.sliceAsBytes(f.air.extra[extra_i..]);
const constraint = std.mem.sliceTo(extra_bytes, 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
@@ -5426,7 +5426,7 @@ fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struc
};
const field_loc = switch (struct_ty.tag()) {
.@"struct" => switch (struct_ty.containerLayout()) {
- .Auto, .Extern => for (struct_ty.structFields().values()[index..]) |field, offset| {
+ .Auto, .Extern => for (struct_ty.structFields().values()[index..], 0..) |field, offset| {
if (field.ty.hasRuntimeBitsIgnoreComptime()) break FieldLoc{ .field = .{
.identifier = struct_ty.structFieldName(index + offset),
} };
@@ -5469,7 +5469,7 @@ fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struc
if (tuple.values[index].tag() != .unreachable_value) return CValue.none;
var id: usize = 0;
- break :field_name for (tuple.values) |value, i| {
+ break :field_name for (tuple.values, 0..) |value, i| {
if (value.tag() != .unreachable_value) continue;
if (!tuple.types[i].hasRuntimeBitsIgnoreComptime()) continue;
if (i >= index) break FieldLoc{ .field = .{ .field = id } };
@@ -6687,7 +6687,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
const gpa = f.object.dg.gpa;
const resolved_elements = try gpa.alloc(CValue, elements.len);
defer gpa.free(resolved_elements);
- for (elements) |element, i| {
+ for (elements, 0..) |element, i| {
resolved_elements[i] = try f.resolveInst(element);
}
{
@@ -6706,7 +6706,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
switch (inst_ty.zigTypeTag()) {
.Array, .Vector => {
const elem_ty = inst_ty.childType();
- for (resolved_elements) |element, i| {
+ for (resolved_elements, 0..) |element, i| {
try f.writeCValue(writer, local, .Other);
try writer.print("[{d}] = ", .{i});
try f.writeCValue(writer, element, .Other);
@@ -6727,7 +6727,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(")");
try writer.writeByte('{');
var empty = true;
- for (elements) |element, index| {
+ for (elements, 0..) |element, index| {
if (inst_ty.structFieldValueComptime(index)) |_| continue;
if (!empty) try writer.writeAll(", ");
@@ -6746,7 +6746,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("};\n");
var field_id: usize = 0;
- for (elements) |element, index| {
+ for (elements, 0..) |element, index| {
if (inst_ty.structFieldValueComptime(index)) |_| continue;
const element_ty = f.air.typeOf(element);
@@ -6784,7 +6784,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
var empty = true;
- for (elements) |_, index| {
+ for (elements, 0..) |_, index| {
const field_ty = inst_ty.structFieldType(index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -6796,7 +6796,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
empty = false;
}
empty = true;
- for (resolved_elements) |element, index| {
+ for (resolved_elements, 0..) |element, index| {
const field_ty = inst_ty.structFieldType(index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -7608,7 +7608,7 @@ fn deinitFreeLocalsMap(gpa: mem.Allocator, map: *LocalsMap) void {
}
fn noticeBranchFrees(f: *Function, pre_locals_len: LocalIndex, inst: Air.Inst.Index) !void {
- for (f.locals.items[pre_locals_len..]) |*local, local_offset| {
+ for (f.locals.items[pre_locals_len..], 0..) |*local, local_offset| {
const local_index = pre_locals_len + @intCast(LocalIndex, local_offset);
if (f.allocs.contains(local_index)) continue; // allocs are not freeable
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 8c6943bfdb..aa794827a8 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -600,7 +600,7 @@ pub const Object = struct {
defer mod.gpa.free(llvm_errors);
llvm_errors[0] = llvm_slice_ty.getUndef();
- for (llvm_errors[1..]) |*llvm_error, i| {
+ for (llvm_errors[1..], 0..) |*llvm_error, i| {
const name = error_name_list[1..][i];
const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False);
const str_global = self.llvm_module.addGlobal(str_init.typeOf(), "");
@@ -691,7 +691,7 @@ pub const Object = struct {
object.extern_collisions.clearRetainingCapacity();
const export_keys = mod.decl_exports.keys();
- for (mod.decl_exports.values()) |export_list, i| {
+ for (mod.decl_exports.values(), 0..) |export_list, i| {
const decl_index = export_keys[i];
const llvm_global = object.decl_map.get(decl_index) orelse continue;
for (export_list.items) |exp| {
@@ -1076,7 +1076,7 @@ pub const Object = struct {
const param_alignment = param_ty.abiAlignment(target);
const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target);
const llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
- for (field_types) |_, field_i_usize| {
+ for (field_types, 0..) |_, field_i_usize| {
const field_i = @intCast(c_uint, field_i_usize);
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
@@ -1495,7 +1495,7 @@ pub const Object = struct {
const int_info = ty.intInfo(target);
assert(int_info.bits != 0);
- for (field_names) |field_name, i| {
+ for (field_names, 0..) |field_name, i| {
const field_name_z = try gpa.dupeZ(u8, field_name);
defer gpa.free(field_name_z);
@@ -1992,7 +1992,7 @@ pub const Object = struct {
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
@@ -2921,7 +2921,7 @@ pub const DeclGen = struct {
var offset: u64 = 0;
var big_align: u32 = 0;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
@@ -3432,7 +3432,7 @@ pub const DeclGen = struct {
const llvm_elems = try gpa.alloc(*llvm.Value, len);
defer gpa.free(llvm_elems);
var need_unnamed = false;
- for (elem_vals[0..len]) |elem_val, i| {
+ for (elem_vals[0..len], 0..) |elem_val, i| {
llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val });
need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]);
}
@@ -3618,7 +3618,7 @@ pub const DeclGen = struct {
var big_align: u32 = 0;
var need_unnamed = false;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
if (tuple.values[i].tag() != .unreachable_value) continue;
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -3680,7 +3680,7 @@ pub const DeclGen = struct {
comptime assert(Type.packed_struct_layout_version == 2);
var running_int: *llvm.Value = int_llvm_ty.constNull();
var running_bits: u16 = 0;
- for (field_vals) |field_val, i| {
+ for (field_vals, 0..) |field_val, i| {
const field = fields[i];
if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -3855,7 +3855,7 @@ pub const DeclGen = struct {
const elem_ty = tv.ty.elemType();
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
- for (llvm_elems) |*elem, i| {
+ for (llvm_elems, 0..) |*elem, i| {
var byte_payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = bytes[i],
@@ -3880,7 +3880,7 @@ pub const DeclGen = struct {
const elem_ty = tv.ty.elemType();
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
- for (llvm_elems) |*elem, i| {
+ for (llvm_elems, 0..) |*elem, i| {
elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_vals[i] });
}
return llvm.constVector(
@@ -3913,7 +3913,7 @@ pub const DeclGen = struct {
const elem_ty = tv.ty.elemType();
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
- for (llvm_elems) |*elem, i| {
+ for (llvm_elems, 0..) |*elem, i| {
var byte_payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = bytes[i],
@@ -4479,7 +4479,7 @@ pub const FuncGen = struct {
fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void {
const air_tags = self.air.instructions.items(.tag);
- for (body) |inst, i| {
+ for (body, 0..) |inst, i| {
const opt_value: ?*llvm.Value = switch (air_tags[inst]) {
// zig fmt: off
.add => try self.airAdd(inst, false),
@@ -4852,7 +4852,7 @@ pub const FuncGen = struct {
const llvm_ty = self.context.structType(llvm_types.ptr, @intCast(c_uint, llvm_types.len), .False);
try llvm_args.ensureUnusedCapacity(it.llvm_types_len);
- for (llvm_types) |field_ty, i_usize| {
+ for (llvm_types, 0..) |field_ty, i_usize| {
const i = @intCast(c_uint, i_usize);
const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, "");
const load_inst = self.builder.buildLoad(field_ty, field_ptr, "");
@@ -6250,7 +6250,7 @@ pub const FuncGen = struct {
var name_map: std.StringArrayHashMapUnmanaged(u16) = .{};
try name_map.ensureUnusedCapacity(arena, max_param_count);
- for (outputs) |output, i| {
+ for (outputs, 0..) |output, i| {
const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
@@ -6435,7 +6435,7 @@ pub const FuncGen = struct {
var name_start: usize = undefined;
var modifier_start: usize = undefined;
- for (asm_source) |byte, i| {
+ for (asm_source, 0..) |byte, i| {
switch (state) {
.start => switch (byte) {
'%' => state = .percent,
@@ -6526,7 +6526,7 @@ pub const FuncGen = struct {
.Auto,
"",
);
- for (llvm_param_attrs[0..param_count]) |llvm_elem_ty, i| {
+ for (llvm_param_attrs[0..param_count], 0..) |llvm_elem_ty, i| {
if (llvm_elem_ty) |llvm_ty| {
llvm.setCallElemTypeAttr(call, i, llvm_ty);
}
@@ -6534,7 +6534,7 @@ pub const FuncGen = struct {
var ret_val = call;
llvm_ret_i = 0;
- for (outputs) |output, i| {
+ for (outputs, 0..) |output, i| {
if (llvm_ret_indirect[i]) continue;
const output_value = if (return_count > 1) b: {
@@ -7416,7 +7416,7 @@ pub const FuncGen = struct {
const index_i32 = llvm_i32.constInt(i, .False);
var args: [3]*llvm.Value = undefined;
- for (args_vectors) |arg_vector, k| {
+ for (args_vectors, 0..) |arg_vector, k| {
args[k] = self.builder.buildExtractElement(arg_vector, index_i32, "");
}
const result_elem = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args_len, .C, .Auto, "");
@@ -8785,7 +8785,7 @@ pub const FuncGen = struct {
const tag_int_value = fn_val.getParam(0);
const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, fields.count()));
- for (fields.keys()) |_, field_index| {
+ for (fields.keys(), 0..) |_, field_index| {
const this_tag_int_value = int: {
var tag_val_payload: Value.Payload.U32 = .{
.base = .{ .tag = .enum_field_index },
@@ -8874,7 +8874,7 @@ pub const FuncGen = struct {
usize_llvm_ty.constNull(), usize_llvm_ty.constNull(),
};
- for (fields.keys()) |name, field_index| {
+ for (fields.keys(), 0..) |name, field_index| {
const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False);
const str_init_llvm_ty = str_init.typeOf();
const str_global = self.dg.object.llvm_module.addGlobal(str_init_llvm_ty, "");
@@ -8998,7 +8998,7 @@ pub const FuncGen = struct {
const llvm_i32 = self.context.intType(32);
- for (values) |*val, i| {
+ for (values, 0..) |*val, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem = mask.elemValueBuffer(self.dg.module, i, &buf);
if (elem.isUndef()) {
@@ -9180,7 +9180,7 @@ pub const FuncGen = struct {
const llvm_u32 = self.context.intType(32);
var vector = llvm_result_ty.getUndef();
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
const index_u32 = llvm_u32.constInt(i, .False);
const llvm_elem = try self.resolveInst(elem);
vector = self.builder.buildInsertElement(vector, llvm_elem, index_u32, "");
@@ -9197,7 +9197,7 @@ pub const FuncGen = struct {
comptime assert(Type.packed_struct_layout_version == 2);
var running_int: *llvm.Value = int_llvm_ty.constNull();
var running_bits: u16 = 0;
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
const field = fields[i];
if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -9229,7 +9229,7 @@ pub const FuncGen = struct {
const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(target));
var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined };
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
if (result_ty.structFieldValueComptime(i) != null) continue;
const llvm_elem = try self.resolveInst(elem);
@@ -9250,7 +9250,7 @@ pub const FuncGen = struct {
return alloca_inst;
} else {
var result = llvm_result_ty.getUndef();
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
if (result_ty.structFieldValueComptime(i) != null) continue;
const llvm_elem = try self.resolveInst(elem);
@@ -9275,7 +9275,7 @@ pub const FuncGen = struct {
};
const elem_ptr_ty = Type.initPayload(&elem_ptr_payload.base);
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
const indices: [2]*llvm.Value = .{
llvm_usize.constNull(),
llvm_usize.constInt(@intCast(c_uint, i), .False),
@@ -9914,7 +9914,7 @@ pub const FuncGen = struct {
};
const array_elements = [_]*llvm.Value{ request, a1, a2, a3, a4, a5 };
const zero = usize_llvm_ty.constInt(0, .False);
- for (array_elements) |elem, i| {
+ for (array_elements, 0..) |elem, i| {
const indexes = [_]*llvm.Value{
zero, usize_llvm_ty.constInt(@intCast(c_uint, i), .False),
};
@@ -10327,7 +10327,7 @@ fn llvmFieldIndex(
if (ty.isSimpleTupleOrAnonStruct()) {
const tuple = ty.tupleFields();
var llvm_field_index: c_uint = 0;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
if (tuple.values[i].tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
const field_align = field_ty.abiAlignment(target);
@@ -10938,7 +10938,7 @@ fn isByRef(ty: Type) bool {
if (ty.isSimpleTupleOrAnonStruct()) {
const tuple = ty.tupleFields();
var count: usize = 0;
- for (tuple.values) |field_val, i| {
+ for (tuple.values, 0..) |field_val, i| {
if (field_val.tag() != .unreachable_value or !tuple.types[i].hasRuntimeBits()) continue;
count += 1;
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index c5a3d57d07..5f27c14e95 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -418,7 +418,7 @@ pub const DeclGen = struct {
const elem_refs = try self.gpa.alloc(IdRef, vector_len);
defer self.gpa.free(elem_refs);
- for (elem_refs) |*elem, i| {
+ for (elem_refs, 0..) |*elem, i| {
elem.* = try self.genConstant(elem_ty, elem_vals[i]);
}
try section.emit(self.spv.gpa, .OpConstantComposite, .{
@@ -498,7 +498,7 @@ pub const DeclGen = struct {
return self.fail("VarArgs functions are unsupported for SPIR-V", .{});
const param_types = try self.spv.arena.alloc(SpvType.Ref, ty.fnParamLen());
- for (param_types) |*param, i| {
+ for (param_types, 0..) |*param, i| {
param.* = try self.resolveType(ty.fnParamType(i));
}
diff --git a/src/codegen/spirv/Assembler.zig b/src/codegen/spirv/Assembler.zig
index fc4ab406b9..6e77818fa5 100644
--- a/src/codegen/spirv/Assembler.zig
+++ b/src/codegen/spirv/Assembler.zig
@@ -392,7 +392,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
.OpTypeFunction => blk: {
const param_operands = operands[2..];
const param_types = try self.spv.arena.alloc(SpvType.Ref, param_operands.len);
- for (param_types) |*param, i| {
+ for (param_types, 0..) |*param, i| {
param.* = try self.resolveTypeRef(param_operands[i].ref_id);
}
const payload = try self.spv.arena.create(SpvType.Payload.Function);
diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig
index f37b04bff3..3562e87be4 100644
--- a/src/codegen/spirv/Module.zig
+++ b/src/codegen/spirv/Module.zig
@@ -161,7 +161,7 @@ pub fn flush(self: Module, file: std.fs.File) !void {
var iovc_buffers: [buffers.len]std.os.iovec_const = undefined;
var file_size: u64 = 0;
- for (iovc_buffers) |*iovc, i| {
+ for (&iovc_buffers, 0..) |*iovc, i| {
// Note, since spir-v supports both little and big endian we can ignore byte order here and
// just treat the words as a sequence of bytes.
const bytes = std.mem.sliceAsBytes(buffers[i]);
@@ -389,7 +389,7 @@ fn decorateStruct(self: *Module, target: IdRef, info: *const Type.Payload.Struct
// Decorations for the struct members.
const extra = info.member_decoration_extra;
var extra_i: u32 = 0;
- for (info.members) |member, i| {
+ for (info.members, 0..) |member, i| {
const d = member.decorations;
const index = @intCast(Word, i);
switch (d.matrix_layout) {
diff --git a/src/codegen/spirv/Section.zig b/src/codegen/spirv/Section.zig
index 83f594dcef..a76314f5fa 100644
--- a/src/codegen/spirv/Section.zig
+++ b/src/codegen/spirv/Section.zig
@@ -195,7 +195,7 @@ fn writeContextDependentNumber(section: *Section, operand: spec.LiteralContextDe
fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand) void {
var mask: Word = 0;
- inline for (@typeInfo(Operand).Struct.fields) |field, bit| {
+ inline for (@typeInfo(Operand).Struct.fields, 0..) |field, bit| {
switch (@typeInfo(field.type)) {
.Optional => if (@field(operand, field.name) != null) {
mask |= 1 << @intCast(u5, bit);
diff --git a/src/codegen/spirv/type.zig b/src/codegen/spirv/type.zig
index 6cc1b8f3bd..dc993b62ff 100644
--- a/src/codegen/spirv/type.zig
+++ b/src/codegen/spirv/type.zig
@@ -98,7 +98,7 @@ pub const Type = extern union {
const struct_b = b.payload(.@"struct");
if (struct_a.members.len != struct_b.members.len)
return false;
- for (struct_a.members) |mem_a, i| {
+ for (struct_a.members, 0..) |mem_a, i| {
if (!std.meta.eql(mem_a, struct_b.members[i]))
return false;
}
diff --git a/src/glibc.zig b/src/glibc.zig
index 2a2887c334..3021e7c7ba 100644
--- a/src/glibc.zig
+++ b/src/glibc.zig
@@ -698,7 +698,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
const metadata = try loadMetaData(comp.gpa, abilists_contents);
defer metadata.destroy(comp.gpa);
- const target_targ_index = for (metadata.all_targets) |targ, i| {
+ const target_targ_index = for (metadata.all_targets, 0..) |targ, i| {
if (targ.arch == target.cpu.arch and
targ.os == target.os.tag and
targ.abi == target.abi)
@@ -709,7 +709,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
unreachable; // target_util.available_libcs prevents us from getting here
};
- const target_ver_index = for (metadata.all_versions) |ver, i| {
+ const target_ver_index = for (metadata.all_versions, 0..) |ver, i| {
switch (ver.order(target_version)) {
.eq => break i,
.lt => continue,
@@ -743,7 +743,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
var stubs_asm = std.ArrayList(u8).init(comp.gpa);
defer stubs_asm.deinit();
- for (libs) |lib, lib_i| {
+ for (libs, 0..) |lib, lib_i| {
stubs_asm.shrinkRetainingCapacity(0);
try stubs_asm.appendSlice(".text\n");
diff --git a/src/libc_installation.zig b/src/libc_installation.zig
index 0a50f97012..da877e1291 100644
--- a/src/libc_installation.zig
+++ b/src/libc_installation.zig
@@ -66,7 +66,7 @@ pub const LibCInstallation = struct {
var line_it = std.mem.split(u8, line, "=");
const name = line_it.first();
const value = line_it.rest();
- inline for (fields) |field, i| {
+ inline for (fields, 0..) |field, i| {
if (std.mem.eql(u8, name, field.name)) {
found_keys[i].found = true;
if (value.len == 0) {
@@ -79,7 +79,7 @@ pub const LibCInstallation = struct {
}
}
}
- inline for (fields) |field, i| {
+ inline for (fields, 0..) |field, i| {
if (!found_keys[i].found) {
log.err("missing field: {s}\n", .{field.name});
return error.ParseError;
@@ -640,7 +640,7 @@ fn printVerboseInvocation(
} else {
std.debug.print("Zig attempted to find the path to native system libc headers by executing this command:\n", .{});
}
- for (argv) |arg, i| {
+ for (argv, 0..) |arg, i| {
if (i != 0) std.debug.print(" ", .{});
std.debug.print("{s}", .{arg});
}
diff --git a/src/libunwind.zig b/src/libunwind.zig
index 56113d1355..a20b5e81f7 100644
--- a/src/libunwind.zig
+++ b/src/libunwind.zig
@@ -34,7 +34,7 @@ pub fn buildStaticLib(comp: *Compilation) !void {
.basename = basename,
};
var c_source_files: [unwind_src_list.len]Compilation.CSourceFile = undefined;
- for (unwind_src_list) |unwind_src, i| {
+ for (unwind_src_list, 0..) |unwind_src, i| {
var cflags = std.ArrayList([]const u8).init(arena);
switch (Compilation.classifyFileExt(unwind_src)) {
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 2922e783e1..c0ac7e0b88 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -486,7 +486,7 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
// TODO: enforce order by increasing VM addresses in self.sections container.
// This is required by the loader anyhow as far as I can tell.
- for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
+ for (self.sections.items(.header)[sect_id + 1 ..], 0..) |*next_header, next_sect_id| {
const maybe_last_atom_index = self.sections.items(.last_atom_index)[sect_id + 1 + next_sect_id];
next_header.virtual_address += diff;
@@ -2191,7 +2191,7 @@ fn logSymtab(self: *Coff) void {
log.debug("symtab:", .{});
log.debug(" object(null)", .{});
- for (self.locals.items) |*sym, sym_id| {
+ for (self.locals.items, 0..) |*sym, sym_id| {
const where = if (sym.section_number == .UNDEFINED) "ord" else "sect";
const def_index: u16 = switch (sym.section_number) {
.UNDEFINED => 0, // TODO
@@ -2216,7 +2216,7 @@ fn logSymtab(self: *Coff) void {
}
log.debug("GOT entries:", .{});
- for (self.got_entries.items) |entry, i| {
+ for (self.got_entries.items, 0..) |entry, i| {
const got_sym = self.getSymbol(.{ .sym_index = entry.sym_index, .file = null });
const target_sym = self.getSymbol(entry.target);
if (target_sym.section_number == .UNDEFINED) {
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index a3d0aa8a53..3fb6de7b73 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -339,7 +339,7 @@ pub const DeclState = struct {
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
const fields = ty.tupleFields();
- for (fields.types) |field, field_index| {
+ for (fields.types, 0..) |field, field_index| {
// DW.AT.member
try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member));
// DW.AT.name, DW.FORM.string
@@ -367,7 +367,7 @@ pub const DeclState = struct {
}
const fields = ty.structFields();
- for (fields.keys()) |field_name, field_index| {
+ for (fields.keys(), 0..) |field_name, field_index| {
const field = fields.get(field_name).?;
if (!field.ty.hasRuntimeBits()) continue;
// DW.AT.member
@@ -409,7 +409,7 @@ pub const DeclState = struct {
.enum_numbered => ty.castTag(.enum_numbered).?.data.values,
else => unreachable,
};
- for (fields.keys()) |field_name, field_i| {
+ for (fields.keys(), 0..) |field_name, field_i| {
// DW.AT.enumerator
try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2 + @sizeOf(u64));
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.enum_variant));
@@ -2252,14 +2252,14 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
1, // `DW.LNS.set_isa`
});
- for (paths.dirs) |dir, i| {
+ for (paths.dirs, 0..) |dir, i| {
log.debug("adding new include dir at {d} of '{s}'", .{ i + 1, dir });
di_buf.appendSliceAssumeCapacity(dir);
di_buf.appendAssumeCapacity(0);
}
di_buf.appendAssumeCapacity(0); // include directories sentinel
- for (paths.files) |file, i| {
+ for (paths.files, 0..) |file, i| {
const dir_index = paths.files_dirs_indexes[i];
log.debug("adding new file name at {d} of '{s}' referencing directory {d}", .{ i + 1, file, dir_index + 1 });
di_buf.appendSliceAssumeCapacity(file);
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index d936a347cf..1a9d594c56 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1126,7 +1126,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const buf = try gpa.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
defer gpa.free(buf);
- for (buf) |*phdr, i| {
+ for (buf, 0..) |*phdr, i| {
phdr.* = progHeaderTo32(self.program_headers.items[i]);
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf32_Phdr, phdr);
@@ -1138,7 +1138,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const buf = try gpa.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
defer gpa.free(buf);
- for (buf) |*phdr, i| {
+ for (buf, 0..) |*phdr, i| {
phdr.* = self.program_headers.items[i];
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf64_Phdr, phdr);
@@ -1193,7 +1193,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const buf = try gpa.alloc(elf.Elf32_Shdr, slice.len);
defer gpa.free(buf);
- for (buf) |*shdr, i| {
+ for (buf, 0..) |*shdr, i| {
shdr.* = sectHeaderTo32(slice.items(.shdr)[i]);
log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
if (foreign_endian) {
@@ -1207,7 +1207,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const buf = try gpa.alloc(elf.Elf64_Shdr, slice.len);
defer gpa.free(buf);
- for (buf) |*shdr, i| {
+ for (buf, 0..) |*shdr, i| {
shdr.* = slice.items(.shdr)[i];
log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
if (foreign_endian) {
@@ -1732,7 +1732,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
argv.appendAssumeCapacity("--as-needed");
var as_needed = true;
- for (system_libs) |link_lib, i| {
+ for (system_libs, 0..) |link_lib, i| {
const lib_as_needed = !system_libs_values[i].needed;
switch ((@as(u2, @boolToInt(lib_as_needed)) << 1) | @boolToInt(as_needed)) {
0b00, 0b11 => {},
@@ -2909,7 +2909,7 @@ fn writeAllGlobalSymbols(self: *Elf) !void {
const buf = try self.base.allocator.alloc(elf.Elf32_Sym, self.global_symbols.items.len);
defer self.base.allocator.free(buf);
- for (buf) |*sym, i| {
+ for (buf, 0..) |*sym, i| {
const global = self.global_symbols.items[i];
sym.* = .{
.st_name = global.st_name,
@@ -2929,7 +2929,7 @@ fn writeAllGlobalSymbols(self: *Elf) !void {
const buf = try self.base.allocator.alloc(elf.Elf64_Sym, self.global_symbols.items.len);
defer self.base.allocator.free(buf);
- for (buf) |*sym, i| {
+ for (buf, 0..) |*sym, i| {
const global = self.global_symbols.items[i];
sym.* = .{
.st_name = global.st_name,
@@ -3238,11 +3238,11 @@ const CsuObjects = struct {
fn logSymtab(self: Elf) void {
log.debug("locals:", .{});
- for (self.local_symbols.items) |sym, id| {
+ for (self.local_symbols.items, 0..) |sym, id| {
log.debug(" {d}: {?s}: @{x} in {d}", .{ id, self.shstrtab.get(sym.st_name), sym.st_value, sym.st_shndx });
}
log.debug("globals:", .{});
- for (self.global_symbols.items) |sym, id| {
+ for (self.global_symbols.items, 0..) |sym, id| {
log.debug(" {d}: {?s}: @{x} in {d}", .{ id, self.shstrtab.get(sym.st_name), sym.st_value, sym.st_shndx });
}
}
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 35f5f1b562..7c1d4776af 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -962,7 +962,7 @@ pub fn parseLibs(
syslibroot: ?[]const u8,
dependent_libs: anytype,
) !void {
- for (lib_names) |lib, i| {
+ for (lib_names, 0..) |lib, i| {
const lib_info = lib_infos[i];
log.debug("parsing lib path '{s}'", .{lib});
if (try self.parseDylib(lib, dependent_libs, .{
@@ -1584,7 +1584,7 @@ pub fn resolveSymbolsInDylibs(self: *MachO) !void {
const sym = self.getSymbolPtr(global);
const sym_name = self.getSymbolName(global);
- for (self.dylibs.items) |dylib, id| {
+ for (self.dylibs.items, 0..) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
const dylib_id = @intCast(u16, id);
@@ -1686,7 +1686,7 @@ pub fn resolveDyldStubBinder(self: *MachO) !void {
gop.value_ptr.* = sym_loc;
const global = gop.value_ptr.*;
- for (self.dylibs.items) |dylib, id| {
+ for (self.dylibs.items, 0..) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
const dylib_id = @intCast(u16, id);
@@ -2852,7 +2852,7 @@ fn moveSectionInVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void
});
// TODO: enforce order by increasing VM addresses in self.sections container.
- for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
+ for (self.sections.items(.header)[sect_id + 1 ..], 0..) |*next_header, next_sect_id| {
const index = @intCast(u8, sect_id + 1 + next_sect_id);
const next_segment = self.getSegmentPtr(index);
next_header.addr += diff;
@@ -3082,7 +3082,7 @@ pub fn initSection(self: *MachO, segname: []const u8, sectname: []const u8, opts
fn insertSection(self: *MachO, segment_index: u8, header: macho.section_64) !u8 {
const precedence = getSectionPrecedence(header);
const indexes = self.getSectionIndexes(segment_index);
- const insertion_index = for (self.sections.items(.header)[indexes.start..indexes.end]) |hdr, i| {
+ const insertion_index = for (self.sections.items(.header)[indexes.start..indexes.end], 0..) |hdr, i| {
if (getSectionPrecedence(hdr) > precedence) break @intCast(u8, i + indexes.start);
} else indexes.end;
log.debug("inserting section '{s},{s}' at index {d}", .{
@@ -3133,7 +3133,7 @@ pub fn getGlobalSymbol(self: *MachO, name: []const u8) !u32 {
}
fn writeSegmentHeaders(self: *MachO, writer: anytype) !void {
- for (self.segments.items) |seg, i| {
+ for (self.segments.items, 0..) |seg, i| {
const indexes = self.getSectionIndexes(@intCast(u8, i));
try writer.writeStruct(seg);
for (self.sections.items(.header)[indexes.start..indexes.end]) |header| {
@@ -3147,7 +3147,7 @@ fn writeLinkeditSegmentData(self: *MachO) !void {
seg.filesize = 0;
seg.vmsize = 0;
- for (self.segments.items) |segment, id| {
+ for (self.segments.items, 0..) |segment, id| {
if (self.linkedit_segment_cmd_index.? == @intCast(u8, id)) continue;
if (seg.vmaddr < segment.vmaddr + segment.vmsize) {
seg.vmaddr = mem.alignForwardGeneric(u64, segment.vmaddr + segment.vmsize, self.page_size);
@@ -3167,7 +3167,7 @@ fn collectRebaseData(self: *MachO, rebase: *Rebase) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
- for (self.rebases.keys()) |atom_index, i| {
+ for (self.rebases.keys(), 0..) |atom_index, i| {
const atom = self.getAtom(atom_index);
log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
@@ -3197,7 +3197,7 @@ fn collectBindData(self: *MachO, bind: anytype, raw_bindings: anytype) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
- for (raw_bindings.keys()) |atom_index, i| {
+ for (raw_bindings.keys(), 0..) |atom_index, i| {
const atom = self.getAtom(atom_index);
log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
@@ -3417,7 +3417,7 @@ fn writeSymtab(self: *MachO) !SymtabCtx {
var locals = std.ArrayList(macho.nlist_64).init(gpa);
defer locals.deinit();
- for (self.locals.items) |sym, sym_id| {
+ for (self.locals.items, 0..) |sym, sym_id| {
if (sym.n_strx == 0) continue; // no name, skip
const sym_loc = SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null };
if (self.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip
@@ -3736,7 +3736,7 @@ pub fn makeStaticString(bytes: []const u8) [16]u8 {
}
fn getSegmentByName(self: MachO, segname: []const u8) ?u8 {
- for (self.segments.items) |seg, i| {
+ for (self.segments.items, 0..) |seg, i| {
if (mem.eql(u8, segname, seg.segName())) return @intCast(u8, i);
} else return null;
}
@@ -3758,7 +3758,7 @@ pub fn getLinkeditSegmentPtr(self: *MachO) *macho.segment_command_64 {
pub fn getSectionByName(self: MachO, segname: []const u8, sectname: []const u8) ?u8 {
// TODO investigate caching with a hashmap
- for (self.sections.items(.header)) |header, i| {
+ for (self.sections.items(.header), 0..) |header, i| {
if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname))
return @intCast(u8, i);
} else return null;
@@ -3766,7 +3766,7 @@ pub fn getSectionByName(self: MachO, segname: []const u8, sectname: []const u8)
pub fn getSectionIndexes(self: MachO, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
- const nsects = for (self.segments.items) |seg, i| {
+ const nsects = for (self.segments.items, 0..) |seg, i| {
if (i == segment_index) break @intCast(u8, seg.nsects);
start += @intCast(u8, seg.nsects);
} else 0;
@@ -4160,7 +4160,7 @@ pub fn findFirst(comptime T: type, haystack: []align(1) const T, start: usize, p
pub fn logSections(self: *MachO) void {
log.debug("sections:", .{});
- for (self.sections.items(.header)) |header, i| {
+ for (self.sections.items(.header), 0..) |header, i| {
log.debug(" sect({d}): {s},{s} @{x}, sizeof({x})", .{
i + 1,
header.segName(),
@@ -4197,7 +4197,7 @@ pub fn logSymtab(self: *MachO) void {
var buf: [4]u8 = undefined;
log.debug("symtab:", .{});
- for (self.locals.items) |sym, sym_id| {
+ for (self.locals.items, 0..) |sym, sym_id| {
const where = if (sym.undf() and !sym.tentative()) "ord" else "sect";
const def_index = if (sym.undf() and !sym.tentative())
@divTrunc(sym.n_desc, macho.N_SYMBOL_RESOLVER)
@@ -4220,7 +4220,7 @@ pub fn logSymtab(self: *MachO) void {
}
log.debug("GOT entries:", .{});
- for (self.got_entries.items) |entry, i| {
+ for (self.got_entries.items, 0..) |entry, i| {
const atom_sym = entry.getSymbol(self);
const target_sym = self.getSymbol(entry.target);
if (target_sym.undf()) {
@@ -4241,7 +4241,7 @@ pub fn logSymtab(self: *MachO) void {
}
log.debug("stubs entries:", .{});
- for (self.stubs.items) |entry, i| {
+ for (self.stubs.items, 0..) |entry, i| {
const target_sym = self.getSymbol(entry.target);
const atom_sym = entry.getSymbol(self);
assert(target_sym.undf());
@@ -4257,7 +4257,7 @@ pub fn logAtoms(self: *MachO) void {
log.debug("atoms:", .{});
const slice = self.sections.slice();
- for (slice.items(.last_atom_index)) |last_atom_index, i| {
+ for (slice.items(.last_atom_index), 0..) |last_atom_index, i| {
var atom_index = last_atom_index orelse continue;
const header = slice.items(.header)[i];
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 0a5c8b0372..1f41fc1cb8 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -383,7 +383,7 @@ fn finalizeDwarfSegment(self: *DebugSymbols, macho_file: *MachO) void {
fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype) !void {
// Write segment/section headers from the binary file first.
const end = macho_file.linkedit_segment_cmd_index.?;
- for (macho_file.segments.items[0..end]) |seg, i| {
+ for (macho_file.segments.items[0..end], 0..) |seg, i| {
const indexes = macho_file.getSectionIndexes(@intCast(u8, i));
var out_seg = seg;
out_seg.fileoff = 0;
@@ -412,7 +412,7 @@ fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype)
}
}
// Next, commit DSYM's __LINKEDIT and __DWARF segments headers.
- for (self.segments.items) |seg, i| {
+ for (self.segments.items, 0..) |seg, i| {
const indexes = self.getSectionIndexes(@intCast(u8, i));
try writer.writeStruct(seg);
for (self.sections.items[indexes.start..indexes.end]) |header| {
@@ -477,7 +477,7 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void {
var locals = std.ArrayList(macho.nlist_64).init(gpa);
defer locals.deinit();
- for (macho_file.locals.items) |sym, sym_id| {
+ for (macho_file.locals.items, 0..) |sym, sym_id| {
if (sym.n_strx == 0) continue; // no name, skip
const sym_loc = MachO.SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null };
if (macho_file.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip
@@ -547,7 +547,7 @@ fn writeStrtab(self: *DebugSymbols) !void {
pub fn getSectionIndexes(self: *DebugSymbols, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
- const nsects = for (self.segments.items) |seg, i| {
+ const nsects = for (self.segments.items, 0..) |seg, i| {
if (i == segment_index) break @intCast(u8, seg.nsects);
start += @intCast(u8, seg.nsects);
} else 0;
diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig
index a2c4bad942..863f1e805a 100644
--- a/src/link/MachO/Dylib.zig
+++ b/src/link/MachO/Dylib.zig
@@ -347,7 +347,7 @@ pub fn parseFromStub(
});
defer matcher.deinit();
- for (lib_stub.inner) |elem, stub_index| {
+ for (lib_stub.inner, 0..) |elem, stub_index| {
const is_match = switch (elem) {
.v3 => |stub| matcher.matchesArch(stub.archs),
.v4 => |stub| matcher.matchesTarget(stub.targets),
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index 4d24b2ed6a..fdcdb47224 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -201,7 +201,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
var sorted_all_syms = try std.ArrayList(SymbolAtIndex).initCapacity(allocator, self.in_symtab.?.len);
defer sorted_all_syms.deinit();
- for (self.in_symtab.?) |_, index| {
+ for (self.in_symtab.?, 0..) |_, index| {
sorted_all_syms.appendAssumeCapacity(.{ .index = @intCast(u32, index) });
}
@@ -211,7 +211,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
// is kind enough to specify the symbols in the correct order.
sort.sort(SymbolAtIndex, sorted_all_syms.items, self, SymbolAtIndex.lessThan);
- for (sorted_all_syms.items) |sym_id, i| {
+ for (sorted_all_syms.items, 0..) |sym_id, i| {
const sym = sym_id.getSymbol(self);
if (sym.sect() and self.source_section_index_lookup[sym.n_sect - 1] == -1) {
@@ -380,7 +380,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
const gpa = zld.gpa;
const sections = self.getSourceSections();
- for (sections) |sect, id| {
+ for (sections, 0..) |sect, id| {
if (sect.isDebug()) continue;
const out_sect_id = (try zld.getOutputSection(sect)) orelse {
log.debug(" unhandled section '{s},{s}'", .{ sect.segName(), sect.sectName() });
@@ -400,7 +400,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
}
if (self.in_symtab == null) {
- for (sections) |sect, id| {
+ for (sections, 0..) |sect, id| {
if (sect.isDebug()) continue;
const out_sect_id = (try zld.getOutputSection(sect)) orelse continue;
if (sect.size == 0) continue;
@@ -446,7 +446,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
var sorted_sections = try gpa.alloc(SortedSection, sections.len);
defer gpa.free(sorted_sections);
- for (sections) |sect, id| {
+ for (sections, 0..) |sect, id| {
sorted_sections[id] = .{ .header = sect, .id = @intCast(u8, id) };
}
@@ -804,7 +804,7 @@ fn parseUnwindInfo(self: *Object, zld: *Zld, object_id: u32) !void {
try self.parseRelocs(gpa, sect_id);
const relocs = self.getRelocs(sect_id);
- for (unwind_records) |record, record_id| {
+ for (unwind_records, 0..) |record, record_id| {
const offset = record_id * @sizeOf(macho.compact_unwind_entry);
const rel_pos = filterRelocs(
relocs,
@@ -857,7 +857,7 @@ pub fn getSourceSectionByName(self: Object, segname: []const u8, sectname: []con
pub fn getSourceSectionIndexByName(self: Object, segname: []const u8, sectname: []const u8) ?u8 {
const sections = self.getSourceSections();
- for (sections) |sect, i| {
+ for (sections, 0..) |sect, i| {
if (mem.eql(u8, segname, sect.segName()) and mem.eql(u8, sectname, sect.sectName()))
return @intCast(u8, i);
} else return null;
diff --git a/src/link/MachO/UnwindInfo.zig b/src/link/MachO/UnwindInfo.zig
index 5e61834bbc..c64e617a35 100644
--- a/src/link/MachO/UnwindInfo.zig
+++ b/src/link/MachO/UnwindInfo.zig
@@ -126,7 +126,7 @@ const Page = struct {
ctx.page.start + ctx.page.count,
});
try writer.print(" encodings (count = {d})\n", .{ctx.page.page_encodings_count});
- for (ctx.page.page_encodings[0..ctx.page.page_encodings_count]) |record_id, i| {
+ for (ctx.page.page_encodings[0..ctx.page.page_encodings_count], 0..) |record_id, i| {
const record = ctx.info.records.items[record_id];
const enc = record.compactUnwindEncoding;
try writer.print(" {d}: 0x{x:0>8}\n", .{ ctx.info.common_encodings_count + i, enc });
@@ -205,7 +205,7 @@ pub fn scanRelocs(zld: *Zld) !void {
if (zld.getSectionByName("__TEXT", "__unwind_info") == null) return;
const cpu_arch = zld.options.target.cpu.arch;
- for (zld.objects.items) |*object, object_id| {
+ for (zld.objects.items, 0..) |*object, object_id| {
const unwind_records = object.getUnwindRecords();
for (object.exec_atoms.items) |atom_index| {
const record_id = object.unwind_records_lookup.get(atom_index) orelse continue;
@@ -244,7 +244,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
defer atom_indexes.deinit();
// TODO handle dead stripping
- for (zld.objects.items) |*object, object_id| {
+ for (zld.objects.items, 0..) |*object, object_id| {
log.debug("collecting unwind records in {s} ({d})", .{ object.name, object_id });
const unwind_records = object.getUnwindRecords();
@@ -335,7 +335,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
try info.records_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, atom_indexes.items.len));
var maybe_prev: ?macho.compact_unwind_entry = null;
- for (records.items) |record, i| {
+ for (records.items, 0..) |record, i| {
const record_id = blk: {
if (maybe_prev) |prev| {
const is_dwarf = UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch);
@@ -483,7 +483,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
// Save indices of records requiring LSDA relocation
try info.lsdas_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, info.records.items.len));
- for (info.records.items) |rec, i| {
+ for (info.records.items, 0..) |rec, i| {
info.lsdas_lookup.putAssumeCapacityNoClobber(@intCast(RecordIndex, i), @intCast(u32, info.lsdas.items.len));
if (rec.lsda == 0) continue;
try info.lsdas.append(info.gpa, @intCast(RecordIndex, i));
@@ -556,7 +556,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
const cpu_arch = zld.options.target.cpu.arch;
log.debug("Personalities:", .{});
- for (info.personalities[0..info.personalities_count]) |target, i| {
+ for (info.personalities[0..info.personalities_count], 0..) |target, i| {
const atom_index = zld.getGotAtomIndexForSymbol(target).?;
const atom = zld.getAtom(atom_index);
const sym = zld.getSymbol(atom.getSymbolWithLoc());
@@ -581,7 +581,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
}
}
- for (info.records.items) |record, i| {
+ for (info.records.items, 0..) |record, i| {
log.debug("Unwind record at offset 0x{x}", .{i * @sizeOf(macho.compact_unwind_entry)});
log.debug(" start: 0x{x}", .{record.rangeStart});
log.debug(" length: 0x{x}", .{record.rangeLength});
@@ -621,7 +621,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
const pages_base_offset = @intCast(u32, size - (info.pages.items.len * second_level_page_bytes));
const lsda_base_offset = @intCast(u32, pages_base_offset -
(info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry)));
- for (info.pages.items) |page, i| {
+ for (info.pages.items, 0..) |page, i| {
assert(page.count > 0);
const first_entry = info.records.items[page.start];
try writer.writeStruct(macho.unwind_info_section_header_index_entry{
diff --git a/src/link/MachO/dead_strip.zig b/src/link/MachO/dead_strip.zig
index 6ba70acbfd..9dfd6226b4 100644
--- a/src/link/MachO/dead_strip.zig
+++ b/src/link/MachO/dead_strip.zig
@@ -238,7 +238,7 @@ fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable) !void {
}
}
- for (zld.objects.items) |_, object_id| {
+ for (zld.objects.items, 0..) |_, object_id| {
// Traverse unwind and eh_frame records noting if the source symbol has been marked, and if so,
// marking all references as live.
try markUnwindRecords(zld, @intCast(u32, object_id), alive);
diff --git a/src/link/MachO/dyld_info/Rebase.zig b/src/link/MachO/dyld_info/Rebase.zig
index 0233744c43..1d7a0c94c0 100644
--- a/src/link/MachO/dyld_info/Rebase.zig
+++ b/src/link/MachO/dyld_info/Rebase.zig
@@ -45,7 +45,7 @@ pub fn finalize(rebase: *Rebase, gpa: Allocator) !void {
var start: usize = 0;
var seg_id: ?u8 = null;
- for (rebase.entries.items) |entry, i| {
+ for (rebase.entries.items, 0..) |entry, i| {
if (seg_id != null and seg_id.? == entry.segment_id) continue;
try finalizeSegment(rebase.entries.items[start..i], writer);
seg_id = entry.segment_id;
diff --git a/src/link/MachO/dyld_info/bind.zig b/src/link/MachO/dyld_info/bind.zig
index b4e51478e0..98a693920a 100644
--- a/src/link/MachO/dyld_info/bind.zig
+++ b/src/link/MachO/dyld_info/bind.zig
@@ -51,7 +51,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type {
var start: usize = 0;
var seg_id: ?u8 = null;
- for (self.entries.items) |entry, i| {
+ for (self.entries.items, 0..) |entry, i| {
if (seg_id != null and seg_id.? == entry.segment_id) continue;
try finalizeSegment(self.entries.items[start..i], ctx, writer);
seg_id = entry.segment_id;
diff --git a/src/link/MachO/eh_frame.zig b/src/link/MachO/eh_frame.zig
index 3867b15a96..5420bf6c29 100644
--- a/src/link/MachO/eh_frame.zig
+++ b/src/link/MachO/eh_frame.zig
@@ -16,7 +16,7 @@ const Zld = @import("zld.zig").Zld;
pub fn scanRelocs(zld: *Zld) !void {
const gpa = zld.gpa;
- for (zld.objects.items) |*object, object_id| {
+ for (zld.objects.items, 0..) |*object, object_id| {
var cies = std.AutoHashMap(u32, void).init(gpa);
defer cies.deinit();
@@ -108,7 +108,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void {
var eh_frame_offset: u32 = 0;
- for (zld.objects.items) |*object, object_id| {
+ for (zld.objects.items, 0..) |*object, object_id| {
try eh_records.ensureUnusedCapacity(2 * @intCast(u32, object.exec_atoms.items.len));
var cies = std.AutoHashMap(u32, u32).init(gpa);
@@ -407,7 +407,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
var creader = std.io.countingReader(stream.reader());
const reader = creader.reader();
- for (aug_str) |ch, i| switch (ch) {
+ for (aug_str, 0..) |ch, i| switch (ch) {
'z' => if (i > 0) {
return error.BadDwarfCfi;
} else {
@@ -467,7 +467,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
var creader = std.io.countingReader(stream.reader());
const reader = creader.reader();
- for (aug_str) |ch, i| switch (ch) {
+ for (aug_str, 0..) |ch, i| switch (ch) {
'z' => if (i > 0) {
return error.BadDwarfCfi;
} else {
diff --git a/src/link/MachO/thunks.zig b/src/link/MachO/thunks.zig
index f12fb80668..ce3fda0b1f 100644
--- a/src/link/MachO/thunks.zig
+++ b/src/link/MachO/thunks.zig
@@ -329,7 +329,7 @@ fn createThunkAtom(zld: *Zld) !AtomIndex {
fn getThunkIndex(zld: *Zld, atom_index: AtomIndex) ?ThunkIndex {
const atom = zld.getAtom(atom_index);
const sym = zld.getSymbol(atom.getSymbolWithLoc());
- for (zld.thunks.items) |thunk, i| {
+ for (zld.thunks.items, 0..) |thunk, i| {
if (thunk.len == 0) continue;
const thunk_atom_index = thunk.getStartAtomIndex();
diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig
index a94a0828fc..a901e4fd4b 100644
--- a/src/link/MachO/zld.zig
+++ b/src/link/MachO/zld.zig
@@ -321,7 +321,7 @@ pub const Zld = struct {
syslibroot: ?[]const u8,
dependent_libs: anytype,
) !void {
- for (lib_names) |lib, i| {
+ for (lib_names, 0..) |lib, i| {
const lib_info = lib_infos[i];
log.debug("parsing lib path '{s}'", .{lib});
if (try self.parseDylib(lib, dependent_libs, .{
@@ -1092,7 +1092,7 @@ pub const Zld = struct {
const sym = self.getSymbolPtr(global);
const sym_name = self.getSymbolName(global);
- for (self.dylibs.items) |dylib, id| {
+ for (self.dylibs.items, 0..) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
const dylib_id = @intCast(u16, id);
@@ -1223,7 +1223,7 @@ pub const Zld = struct {
const global = SymbolWithLoc{ .sym_index = sym_index };
try self.globals.append(gpa, global);
- for (self.dylibs.items) |dylib, id| {
+ for (self.dylibs.items, 0..) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
const dylib_id = @intCast(u16, id);
@@ -1311,7 +1311,7 @@ pub const Zld = struct {
});
}
- for (self.sections.items(.header)) |header, sect_id| {
+ for (self.sections.items(.header), 0..) |header, sect_id| {
if (header.size == 0) continue; // empty section
const segname = header.segName();
@@ -1385,7 +1385,7 @@ pub const Zld = struct {
const gpa = self.gpa;
const slice = self.sections.slice();
- for (slice.items(.first_atom_index)) |first_atom_index, sect_id| {
+ for (slice.items(.first_atom_index), 0..) |first_atom_index, sect_id| {
const header = slice.items(.header)[sect_id];
var atom_index = first_atom_index;
@@ -1525,7 +1525,7 @@ pub const Zld = struct {
fn calcSectionSizes(self: *Zld) !void {
const slice = self.sections.slice();
- for (slice.items(.header)) |*header, sect_id| {
+ for (slice.items(.header), 0..) |*header, sect_id| {
if (header.size == 0) continue;
if (self.requiresThunks()) {
if (header.isCode() and !(header.type() == macho.S_SYMBOL_STUBS) and !mem.eql(u8, header.sectName(), "__stub_helper")) continue;
@@ -1556,7 +1556,7 @@ pub const Zld = struct {
}
if (self.requiresThunks()) {
- for (slice.items(.header)) |header, sect_id| {
+ for (slice.items(.header), 0..) |header, sect_id| {
if (!header.isCode()) continue;
if (header.type() == macho.S_SYMBOL_STUBS) continue;
if (mem.eql(u8, header.sectName(), "__stub_helper")) continue;
@@ -1568,7 +1568,7 @@ pub const Zld = struct {
}
fn allocateSegments(self: *Zld) !void {
- for (self.segments.items) |*segment, segment_index| {
+ for (self.segments.items, 0..) |*segment, segment_index| {
const is_text_segment = mem.eql(u8, segment.segName(), "__TEXT");
const base_size = if (is_text_segment) try load_commands.calcMinHeaderPad(self.gpa, self.options, .{
.segments = self.segments.items,
@@ -1606,7 +1606,7 @@ pub const Zld = struct {
var start = init_size;
const slice = self.sections.slice();
- for (slice.items(.header)[indexes.start..indexes.end]) |*header, sect_id| {
+ for (slice.items(.header)[indexes.start..indexes.end], 0..) |*header, sect_id| {
const alignment = try math.powi(u32, 2, header.@"align");
const start_aligned = mem.alignForwardGeneric(u64, start, alignment);
const n_sect = @intCast(u8, indexes.start + sect_id + 1);
@@ -1750,7 +1750,7 @@ pub const Zld = struct {
}
fn writeSegmentHeaders(self: *Zld, writer: anytype) !void {
- for (self.segments.items) |seg, i| {
+ for (self.segments.items, 0..) |seg, i| {
const indexes = self.getSectionIndexes(@intCast(u8, i));
var out_seg = seg;
out_seg.cmdsize = @sizeOf(macho.segment_command_64);
@@ -1852,7 +1852,7 @@ pub const Zld = struct {
}
// Finally, unpack the rest.
- for (slice.items(.header)) |header, sect_id| {
+ for (slice.items(.header), 0..) |header, sect_id| {
switch (header.type()) {
macho.S_LITERAL_POINTERS,
macho.S_REGULAR,
@@ -1989,7 +1989,7 @@ pub const Zld = struct {
// Finally, unpack the rest.
const slice = self.sections.slice();
- for (slice.items(.header)) |header, sect_id| {
+ for (slice.items(.header), 0..) |header, sect_id| {
switch (header.type()) {
macho.S_LITERAL_POINTERS,
macho.S_REGULAR,
@@ -2710,7 +2710,7 @@ pub const Zld = struct {
const amt = try self.file.preadAll(locals_buf, self.symtab_cmd.symoff);
if (amt != locals_buf.len) return error.InputOutput;
- const istab: usize = for (locals) |local, i| {
+ const istab: usize = for (locals, 0..) |local, i| {
if (local.stab()) break i;
} else locals.len;
const nstabs = locals.len - istab;
@@ -2897,7 +2897,7 @@ pub const Zld = struct {
}
fn getSegmentByName(self: Zld, segname: []const u8) ?u8 {
- for (self.segments.items) |seg, i| {
+ for (self.segments.items, 0..) |seg, i| {
if (mem.eql(u8, segname, seg.segName())) return @intCast(u8, i);
} else return null;
}
@@ -2921,7 +2921,7 @@ pub const Zld = struct {
pub fn getSectionByName(self: Zld, segname: []const u8, sectname: []const u8) ?u8 {
// TODO investigate caching with a hashmap
- for (self.sections.items(.header)) |header, i| {
+ for (self.sections.items(.header), 0..) |header, i| {
if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname))
return @intCast(u8, i);
} else return null;
@@ -2929,7 +2929,7 @@ pub const Zld = struct {
pub fn getSectionIndexes(self: Zld, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
- const nsects = for (self.segments.items) |seg, i| {
+ const nsects = for (self.segments.items, 0..) |seg, i| {
if (i == segment_index) break @intCast(u8, seg.nsects);
start += @intCast(u8, seg.nsects);
} else 0;
@@ -3220,7 +3220,7 @@ pub const Zld = struct {
fn logSegments(self: *Zld) void {
log.debug("segments:", .{});
- for (self.segments.items) |segment, i| {
+ for (self.segments.items, 0..) |segment, i| {
log.debug(" segment({d}): {s} @{x} ({x}), sizeof({x})", .{
i,
segment.segName(),
@@ -3233,7 +3233,7 @@ pub const Zld = struct {
fn logSections(self: *Zld) void {
log.debug("sections:", .{});
- for (self.sections.items(.header)) |header, i| {
+ for (self.sections.items(.header), 0..) |header, i| {
log.debug(" sect({d}): {s},{s} @{x} ({x}), sizeof({x})", .{
i + 1,
header.segName(),
@@ -3271,10 +3271,10 @@ pub const Zld = struct {
const scoped_log = std.log.scoped(.symtab);
scoped_log.debug("locals:", .{});
- for (self.objects.items) |object, id| {
+ for (self.objects.items, 0..) |object, id| {
scoped_log.debug(" object({d}): {s}", .{ id, object.name });
if (object.in_symtab == null) continue;
- for (object.symtab) |sym, sym_id| {
+ for (object.symtab, 0..) |sym, sym_id| {
mem.set(u8, &buf, '_');
scoped_log.debug(" %{d}: {s} @{x} in sect({d}), {s}", .{
sym_id,
@@ -3286,7 +3286,7 @@ pub const Zld = struct {
}
}
scoped_log.debug(" object(-1)", .{});
- for (self.locals.items) |sym, sym_id| {
+ for (self.locals.items, 0..) |sym, sym_id| {
if (sym.undf()) continue;
scoped_log.debug(" %{d}: {s} @{x} in sect({d}), {s}", .{
sym_id,
@@ -3298,7 +3298,7 @@ pub const Zld = struct {
}
scoped_log.debug("exports:", .{});
- for (self.globals.items) |global, i| {
+ for (self.globals.items, 0..) |global, i| {
const sym = self.getSymbol(global);
if (sym.undf()) continue;
if (sym.n_desc == N_DEAD) continue;
@@ -3313,7 +3313,7 @@ pub const Zld = struct {
}
scoped_log.debug("imports:", .{});
- for (self.globals.items) |global, i| {
+ for (self.globals.items, 0..) |global, i| {
const sym = self.getSymbol(global);
if (!sym.undf()) continue;
if (sym.n_desc == N_DEAD) continue;
@@ -3328,7 +3328,7 @@ pub const Zld = struct {
}
scoped_log.debug("GOT entries:", .{});
- for (self.got_entries.items) |entry, i| {
+ for (self.got_entries.items, 0..) |entry, i| {
const atom_sym = entry.getAtomSymbol(self);
const target_sym = entry.getTargetSymbol(self);
const target_sym_name = entry.getTargetSymbolName(self);
@@ -3350,7 +3350,7 @@ pub const Zld = struct {
}
scoped_log.debug("__thread_ptrs entries:", .{});
- for (self.tlv_ptr_entries.items) |entry, i| {
+ for (self.tlv_ptr_entries.items, 0..) |entry, i| {
const atom_sym = entry.getAtomSymbol(self);
const target_sym = entry.getTargetSymbol(self);
const target_sym_name = entry.getTargetSymbolName(self);
@@ -3363,7 +3363,7 @@ pub const Zld = struct {
}
scoped_log.debug("stubs entries:", .{});
- for (self.stubs.items) |entry, i| {
+ for (self.stubs.items, 0..) |entry, i| {
const atom_sym = entry.getAtomSymbol(self);
const target_sym = entry.getTargetSymbol(self);
const target_sym_name = entry.getTargetSymbolName(self);
@@ -3376,9 +3376,9 @@ pub const Zld = struct {
}
scoped_log.debug("thunks:", .{});
- for (self.thunks.items) |thunk, i| {
+ for (self.thunks.items, 0..) |thunk, i| {
scoped_log.debug(" thunk({d})", .{i});
- for (thunk.lookup.keys()) |target, j| {
+ for (thunk.lookup.keys(), 0..) |target, j| {
const target_sym = self.getSymbol(target);
const atom = self.getAtom(thunk.lookup.get(target).?);
const atom_sym = self.getSymbol(atom.getSymbolWithLoc());
@@ -3395,7 +3395,7 @@ pub const Zld = struct {
fn logAtoms(self: *Zld) void {
log.debug("atoms:", .{});
const slice = self.sections.slice();
- for (slice.items(.first_atom_index)) |first_atom_index, sect_id| {
+ for (slice.items(.first_atom_index), 0..) |first_atom_index, sect_id| {
var atom_index = first_atom_index;
if (atom_index == 0) continue;
@@ -3980,7 +3980,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
.unresolved = std.AutoArrayHashMap(u32, void).init(arena),
};
- for (zld.objects.items) |_, object_id| {
+ for (zld.objects.items, 0..) |_, object_id| {
try zld.resolveSymbolsInObject(@intCast(u32, object_id), &resolver);
}
@@ -4010,7 +4010,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
zld.entry_index = global_index;
}
- for (zld.objects.items) |*object, object_id| {
+ for (zld.objects.items, 0..) |*object, object_id| {
try object.splitIntoAtoms(&zld, @intCast(u32, object_id));
}
diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig
index 14a29e4498..2d74e404eb 100644
--- a/src/link/SpirV.zig
+++ b/src/link/SpirV.zig
@@ -298,7 +298,7 @@ fn cloneAir(air: Air, gpa: Allocator, air_arena: Allocator) !Air {
const values = try gpa.alloc(Value, air.values.len);
errdefer gpa.free(values);
- for (values) |*value, i| {
+ for (values, 0..) |*value, i| {
value.* = try air.values[i].copy(air_arena);
}
@@ -308,7 +308,7 @@ fn cloneAir(air: Air, gpa: Allocator, air_arena: Allocator) !Air {
const air_tags = instructions.items(.tag);
const air_datas = instructions.items(.data);
- for (air_tags) |tag, i| {
+ for (air_tags, 0..) |tag, i| {
switch (tag) {
.alloc, .ret_ptr, .const_ty => air_datas[i].ty = try air_datas[i].ty.copy(air_arena),
else => {},
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index e62a2050d7..00a52177f7 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -590,7 +590,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
const object: Object = wasm.objects.items[object_index];
log.debug("Resolving symbols in object: '{s}'", .{object.name});
- for (object.symtable) |symbol, i| {
+ for (object.symtable, 0..) |symbol, i| {
const sym_index = @intCast(u32, i);
const location: SymbolLoc = .{
.file = object_index,
@@ -794,7 +794,7 @@ fn validateFeatures(
// extract all the used, disallowed and required features from each
// linked object file so we can test them.
- for (wasm.objects.items) |object, object_index| {
+ for (wasm.objects.items, 0..) |object, object_index| {
for (object.features) |feature| {
const value = @intCast(u16, object_index) << 1 | @as(u1, 1);
switch (feature.prefix) {
@@ -815,7 +815,7 @@ fn validateFeatures(
// when we infer the features, we allow each feature found in the 'used' set
// and insert it into the 'allowed' set. When features are not inferred,
// we validate that a used feature is allowed.
- for (used) |used_set, used_index| {
+ for (used, 0..) |used_set, used_index| {
const is_enabled = @truncate(u1, used_set) != 0;
if (infer) {
allowed[used_index] = is_enabled;
@@ -849,7 +849,7 @@ fn validateFeatures(
}
// validate the linked object file has each required feature
- for (required) |required_feature, feature_index| {
+ for (required, 0..) |required_feature, feature_index| {
const is_required = @truncate(u1, required_feature) != 0;
if (is_required and !object_used_features[feature_index]) {
log.err("feature '{s}' is required but not used in linked object", .{(@intToEnum(types.Feature.Tag, feature_index)).toString()});
@@ -1818,7 +1818,7 @@ fn sortDataSegments(wasm: *Wasm) !void {
/// original functions and their types. We need to know the type to verify it doesn't
/// contain any parameters.
fn setupInitFunctions(wasm: *Wasm) !void {
- for (wasm.objects.items) |object, file_index| {
+ for (wasm.objects.items, 0..) |object, file_index| {
try wasm.init_funcs.ensureUnusedCapacity(wasm.base.allocator, object.init_funcs.len);
for (object.init_funcs) |init_func| {
const symbol = object.symtable[init_func.symbol_index];
@@ -2717,7 +2717,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
try wasm.parseInputFiles(positionals.items);
- for (wasm.objects.items) |_, object_index| {
+ for (wasm.objects.items, 0..) |_, object_index| {
try wasm.resolveSymbolsInObject(@intCast(u16, object_index));
}
@@ -2732,7 +2732,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
try wasm.setupStart();
try wasm.setupImports();
- for (wasm.objects.items) |*object, object_index| {
+ for (wasm.objects.items, 0..) |*object, object_index| {
try object.parseIntoAtoms(gpa, @intCast(u16, object_index), wasm);
}
@@ -2801,7 +2801,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
try wasm.parseInputFiles(positionals.items);
- for (wasm.objects.items) |_, object_index| {
+ for (wasm.objects.items, 0..) |_, object_index| {
try wasm.resolveSymbolsInObject(@intCast(u16, object_index));
}
@@ -2850,7 +2850,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
}
}
- for (wasm.objects.items) |*object, object_index| {
+ for (wasm.objects.items, 0..) |*object, object_index| {
try object.parseIntoAtoms(wasm.base.allocator, @intCast(u16, object_index), wasm);
}
@@ -3362,7 +3362,7 @@ fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []con
try writer.writeAll(target_features);
try leb.writeULEB128(writer, features_count);
- for (enabled_features) |enabled, feature_index| {
+ for (enabled_features, 0..) |enabled, feature_index| {
if (enabled) {
const feature: types.Feature = .{ .prefix = .used, .tag = @intToEnum(types.Feature.Tag, feature_index) };
try leb.writeULEB128(writer, @enumToInt(feature.prefix));
diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig
index 7d4f6a4e36..82cab2528a 100644
--- a/src/link/Wasm/Object.zig
+++ b/src/link/Wasm/Object.zig
@@ -882,7 +882,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
list.deinit();
} else symbol_for_segment.deinit();
- for (object.symtable) |symbol, symbol_index| {
+ for (object.symtable, 0..) |symbol, symbol_index| {
switch (symbol.tag) {
.function, .data, .section => if (!symbol.isUndefined()) {
const gop = try symbol_for_segment.getOrPut(.{ .kind = symbol.tag, .index = symbol.index });
@@ -896,7 +896,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
}
}
- for (object.relocatable_data) |relocatable_data, index| {
+ for (object.relocatable_data, 0..) |relocatable_data, index| {
const final_index = (try wasm_bin.getMatchingSegment(object_index, @intCast(u32, index))) orelse {
continue; // found unknown section, so skip parsing into atom as we do not know how to handle it.
};
diff --git a/src/link/tapi.zig b/src/link/tapi.zig
index 20a3a2493e..c97332984f 100644
--- a/src/link/tapi.zig
+++ b/src/link/tapi.zig
@@ -124,7 +124,7 @@ pub const LibStub = struct {
log.debug("trying to parse as []TbdV4", .{});
const inner = lib_stub.yaml.parse([]TbdV4) catch break :err;
var out = try lib_stub.yaml.arena.allocator().alloc(Tbd, inner.len);
- for (inner) |doc, i| {
+ for (inner, 0..) |doc, i| {
out[i] = .{ .v4 = doc };
}
break :blk out;
@@ -142,7 +142,7 @@ pub const LibStub = struct {
log.debug("trying to parse as []TbdV3", .{});
const inner = lib_stub.yaml.parse([]TbdV3) catch break :err;
var out = try lib_stub.yaml.arena.allocator().alloc(Tbd, inner.len);
- for (inner) |doc, i| {
+ for (inner, 0..) |doc, i| {
out[i] = .{ .v3 = doc };
}
break :blk out;
diff --git a/src/link/tapi/yaml.zig b/src/link/tapi/yaml.zig
index 748f1c138f..d4136b35d3 100644
--- a/src/link/tapi/yaml.zig
+++ b/src/link/tapi/yaml.zig
@@ -84,7 +84,7 @@ pub const Value = union(ValueType) {
const first = list[0];
if (first.is_compound()) {
- for (list) |elem, i| {
+ for (list, 0..) |elem, i| {
try writer.writeByteNTimes(' ', args.indentation);
try writer.writeAll("- ");
try elem.stringify(writer, .{
@@ -99,7 +99,7 @@ pub const Value = union(ValueType) {
}
try writer.writeAll("[ ");
- for (list) |elem, i| {
+ for (list, 0..) |elem, i| {
try elem.stringify(writer, args);
if (i < len - 1) {
try writer.writeAll(", ");
@@ -112,7 +112,7 @@ pub const Value = union(ValueType) {
const len = keys.len;
if (len == 0) return;
- for (keys) |key, i| {
+ for (keys, 0..) |key, i| {
if (!args.should_inline_first_key or i != 0) {
try writer.writeByteNTimes(' ', args.indentation);
}
@@ -292,7 +292,7 @@ pub const Yaml = struct {
switch (@typeInfo(T)) {
.Array => |info| {
var parsed: T = undefined;
- for (self.docs.items) |doc, i| {
+ for (self.docs.items, 0..) |doc, i| {
parsed[i] = try self.parseValue(info.child, doc);
}
return parsed;
@@ -301,7 +301,7 @@ pub const Yaml = struct {
switch (info.size) {
.Slice => {
var parsed = try self.arena.allocator().alloc(info.child, self.docs.items.len);
- for (self.docs.items) |doc, i| {
+ for (self.docs.items, 0..) |doc, i| {
parsed[i] = try self.parseValue(info.child, doc);
}
return parsed;
@@ -393,7 +393,7 @@ pub const Yaml = struct {
}
var parsed = try arena.alloc(ptr_info.child, value.list.len);
- for (value.list) |elem, i| {
+ for (value.list, 0..) |elem, i| {
parsed[i] = try self.parseValue(ptr_info.child, elem);
}
return parsed;
@@ -407,7 +407,7 @@ pub const Yaml = struct {
if (array_info.len != list.len) return error.ArraySizeMismatch;
var parsed: T = undefined;
- for (list) |elem, i| {
+ for (list, 0..) |elem, i| {
parsed[i] = try self.parseValue(array_info.child, elem);
}
diff --git a/src/main.zig b/src/main.zig
index a0cdfb36b6..e80be06a36 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -3684,10 +3684,10 @@ fn cmdTranslateC(comp: *Compilation, arena: Allocator, enable_cache: bool) !void
const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, clang_args_len + 1);
new_argv_with_sentinel[clang_args_len] = null;
const new_argv = new_argv_with_sentinel[0..clang_args_len :null];
- for (argv.items) |arg, i| {
+ for (argv.items, 0..) |arg, i| {
new_argv[i] = try arena.dupeZ(u8, arg);
}
- for (c_source_file.extra_flags) |arg, i| {
+ for (c_source_file.extra_flags, 0..) |arg, i| {
new_argv[argv.items.len + i] = try arena.dupeZ(u8, arg);
}
@@ -4816,7 +4816,7 @@ extern "c" fn ZigLlvmAr_main(argc: c_int, argv: [*:null]?[*:0]u8) c_int;
fn argsCopyZ(alloc: Allocator, args: []const []const u8) ![:null]?[*:0]u8 {
var argv = try alloc.allocSentinel(?[*:0]u8, args.len, null);
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
argv[i] = try alloc.dupeZ(u8, arg); // TODO If there was an argsAllocZ we could avoid this allocation.
}
return argv;
diff --git a/src/mingw.zig b/src/mingw.zig
index 4f94e26a98..9e9e180945 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -72,7 +72,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
.mingw32_lib => {
var c_source_files: [mingw32_lib_deps.len]Compilation.CSourceFile = undefined;
- for (mingw32_lib_deps) |dep, i| {
+ for (mingw32_lib_deps, 0..) |dep, i| {
var args = std.ArrayList([]const u8).init(arena);
try args.appendSlice(&[_][]const u8{
"-DHAVE_CONFIG_H",
@@ -236,7 +236,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
}),
});
var c_source_files: [uuid_src.len]Compilation.CSourceFile = undefined;
- for (uuid_src) |dep, i| {
+ for (uuid_src, 0..) |dep, i| {
c_source_files[i] = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "mingw", "libsrc", dep,
diff --git a/src/objcopy.zig b/src/objcopy.zig
index 72ff8deafd..31e3d60d0d 100644
--- a/src/objcopy.zig
+++ b/src/objcopy.zig
@@ -312,7 +312,7 @@ const BinaryElfOutput = struct {
std.sort.sort(*BinaryElfSegment, self.segments.items, {}, segmentSortCompare);
- for (self.segments.items) |firstSegment, i| {
+ for (self.segments.items, 0..) |firstSegment, i| {
if (firstSegment.firstSection) |firstSection| {
const diff = firstSection.elfOffset - firstSegment.elfOffset;
diff --git a/src/print_air.zig b/src/print_air.zig
index 133e987285..447af5a9c7 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -68,7 +68,7 @@ const Writer = struct {
indent: usize,
fn writeAllConstants(w: *Writer, s: anytype) @TypeOf(s).Error!void {
- for (w.air.instructions.items(.tag)) |tag, i| {
+ for (w.air.instructions.items(.tag), 0..) |tag, i| {
const inst = @intCast(u32, i);
switch (tag) {
.constant, .const_ty => {
@@ -388,7 +388,7 @@ const Writer = struct {
try w.writeType(s, vector_ty);
try s.writeAll(", [");
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
if (i != 0) try s.writeAll(", ");
try w.writeOperand(s, inst, i, elem);
}
@@ -682,7 +682,7 @@ const Writer = struct {
const args = @ptrCast([]const Air.Inst.Ref, w.air.extra[extra.end..][0..extra.data.args_len]);
try w.writeOperand(s, inst, 0, pl_op.operand);
try s.writeAll(", [");
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (i != 0) try s.writeAll(", ");
try w.writeOperand(s, inst, 1 + i, arg);
}
@@ -743,7 +743,7 @@ const Writer = struct {
if (liveness_condbr.then_deaths.len != 0) {
try s.writeByteNTimes(' ', w.indent);
- for (liveness_condbr.then_deaths) |operand, i| {
+ for (liveness_condbr.then_deaths, 0..) |operand, i| {
if (i != 0) try s.writeAll(" ");
try s.print("%{d}!", .{operand});
}
@@ -756,7 +756,7 @@ const Writer = struct {
if (liveness_condbr.else_deaths.len != 0) {
try s.writeByteNTimes(' ', w.indent);
- for (liveness_condbr.else_deaths) |operand, i| {
+ for (liveness_condbr.else_deaths, 0..) |operand, i| {
if (i != 0) try s.writeAll(" ");
try s.print("%{d}!", .{operand});
}
@@ -790,7 +790,7 @@ const Writer = struct {
extra_index = case.end + case.data.items_len + case_body.len;
try s.writeAll(", [");
- for (items) |item, item_i| {
+ for (items, 0..) |item, item_i| {
if (item_i != 0) try s.writeAll(", ");
try w.writeInstRef(s, item, false);
}
@@ -800,7 +800,7 @@ const Writer = struct {
const deaths = liveness.deaths[case_i];
if (deaths.len != 0) {
try s.writeByteNTimes(' ', w.indent);
- for (deaths) |operand, i| {
+ for (deaths, 0..) |operand, i| {
if (i != 0) try s.writeAll(" ");
try s.print("%{d}!", .{operand});
}
@@ -821,7 +821,7 @@ const Writer = struct {
const deaths = liveness.deaths[liveness.deaths.len - 1];
if (deaths.len != 0) {
try s.writeByteNTimes(' ', w.indent);
- for (deaths) |operand, i| {
+ for (deaths, 0..) |operand, i| {
if (i != 0) try s.writeAll(" ");
try s.print("%{d}!", .{operand});
}
diff --git a/src/print_targets.zig b/src/print_targets.zig
index 64149d6fba..19518a3368 100644
--- a/src/print_targets.zig
+++ b/src/print_targets.zig
@@ -99,7 +99,7 @@ pub fn cmdTargets(
for (arch.allCpuModels()) |model| {
try jws.objectField(model.name);
try jws.beginArray();
- for (arch.allFeaturesList()) |feature, i| {
+ for (arch.allFeaturesList(), 0..) |feature, i| {
if (model.features.isEnabled(@intCast(u8, i))) {
try jws.arrayElem();
try jws.emitString(feature.name);
@@ -145,7 +145,7 @@ pub fn cmdTargets(
{
try jws.objectField("features");
try jws.beginArray();
- for (native_target.cpu.arch.allFeaturesList()) |feature, i_usize| {
+ for (native_target.cpu.arch.allFeaturesList(), 0..) |feature, i_usize| {
const index = @intCast(Target.Cpu.Feature.Set.Index, i_usize);
if (cpu.features.isEnabled(index)) {
try jws.arrayElem();
diff --git a/src/print_zir.zig b/src/print_zir.zig
index e5ce9321f5..1674bd136e 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -875,7 +875,7 @@ const Writer = struct {
const extra = self.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
const args = self.code.refSlice(extra.end, extra.data.operands_len);
try stream.writeAll("{");
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, arg);
}
@@ -1068,7 +1068,7 @@ const Writer = struct {
const src = LazySrcLoc.nodeOffset(extra.data.src_node);
const operands = self.code.refSlice(extra.end, extended.small);
- for (operands) |operand, i| {
+ for (operands, 0..) |operand, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, operand);
}
@@ -1392,7 +1392,7 @@ const Writer = struct {
try stream.writeAll("{\n");
self.indent += 2;
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
try self.writeDocComment(stream, field.doc_comment_index);
try stream.writeByteNTimes(' ', self.indent);
try self.writeFlag(stream, "comptime ", field.is_comptime);
@@ -1959,7 +1959,7 @@ const Writer = struct {
try stream.writeByteNTimes(' ', self.indent);
if (is_inline) try stream.writeAll("inline ");
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
if (item_i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, item_ref);
}
@@ -2275,7 +2275,7 @@ const Writer = struct {
try self.writeBracedBody(stream, body);
try stream.writeAll(",[");
const args = self.code.refSlice(extra.end, extended.small);
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, arg);
}
@@ -2334,7 +2334,7 @@ const Writer = struct {
try self.writeInstRef(stream, args[0]);
try stream.writeAll("{");
- for (args[1..]) |arg, i| {
+ for (args[1..], 0..) |arg, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, arg);
}
@@ -2349,7 +2349,7 @@ const Writer = struct {
const args = self.code.refSlice(extra.end, extra.data.operands_len);
try stream.writeAll("{");
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, arg);
}
@@ -2369,7 +2369,7 @@ const Writer = struct {
try stream.writeAll(", ");
try stream.writeAll(".{");
- for (elems) |elem, i| {
+ for (elems, 0..) |elem, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, elem);
}
diff --git a/src/register_manager.zig b/src/register_manager.zig
index 0ab9d103b8..2fe0cd2b6a 100644
--- a/src/register_manager.zig
+++ b/src/register_manager.zig
@@ -82,7 +82,7 @@ pub fn RegisterManager(
comptime registers: []const Register,
reg: Register,
) ?std.math.IntFittingRange(0, registers.len - 1) {
- inline for (tracked_registers) |cpreg, i| {
+ inline for (tracked_registers, 0..) |cpreg, i| {
if (reg.id() == cpreg.id()) return i;
}
return null;
@@ -153,7 +153,7 @@ pub fn RegisterManager(
regs: [count]Register,
) [count]RegisterLock {
var buf: [count]RegisterLock = undefined;
- for (regs) |reg, i| {
+ for (regs, 0..) |reg, i| {
buf[i] = self.lockRegAssumeUnused(reg);
}
return buf;
@@ -207,7 +207,7 @@ pub fn RegisterManager(
}
assert(i == count);
- for (regs) |reg, j| {
+ for (regs, 0..) |reg, j| {
self.markRegAllocated(reg);
if (insts[j]) |inst| {
diff --git a/src/test.zig b/src/test.zig
index b25a6c1e78..acc1bcdc1f 100644
--- a/src/test.zig
+++ b/src/test.zig
@@ -664,7 +664,7 @@ pub const TestContext = struct {
errors: []const []const u8,
) void {
var array = self.updates.allocator.alloc(ErrorMsg, errors.len) catch @panic("out of memory");
- for (errors) |err_msg_line, i| {
+ for (errors, 0..) |err_msg_line, i| {
if (std.mem.startsWith(u8, err_msg_line, "error: ")) {
array[i] = .{
.plain = .{
@@ -1558,7 +1558,7 @@ pub const TestContext = struct {
});
defer comp.destroy();
- update: for (case.updates.items) |update, update_index| {
+ update: for (case.updates.items, 0..) |update, update_index| {
var update_node = root_node.start(update.name, 3);
update_node.activate();
defer update_node.end();
@@ -1631,7 +1631,7 @@ pub const TestContext = struct {
defer notes_to_check.deinit();
for (actual_errors.list) |actual_error| {
- for (case_error_list) |case_msg, i| {
+ for (case_error_list, 0..) |case_msg, i| {
if (handled_errors[i]) continue;
const ex_tag: std.meta.Tag(@TypeOf(case_msg)) = case_msg;
@@ -1702,7 +1702,7 @@ pub const TestContext = struct {
}
}
while (notes_to_check.popOrNull()) |note| {
- for (case_error_list) |case_msg, i| {
+ for (case_error_list, 0..) |case_msg, i| {
const ex_tag: std.meta.Tag(@TypeOf(case_msg)) = case_msg;
switch (note.*) {
.src => |actual_msg| {
@@ -1752,7 +1752,7 @@ pub const TestContext = struct {
}
}
- for (handled_errors) |handled, i| {
+ for (handled_errors, 0..) |handled, i| {
if (!handled) {
print(
"\nExpected error not found:\n{s}\n{}\n{s}",
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 7fd4b93821..5b2b1c2df5 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -1423,7 +1423,7 @@ fn transConvertVectorExpr(
}
const init_list = try c.arena.alloc(Node, num_elements);
- for (init_list) |*init, init_index| {
+ for (init_list, 0..) |*init, init_index| {
const tmp_decl = block_scope.statements.items[init_index];
const name = tmp_decl.castTag(.var_simple).?.data.name;
init.* = try Tag.identifier.create(c.arena, name);
@@ -1454,7 +1454,7 @@ fn makeShuffleMask(c: *Context, scope: *Scope, expr: *const clang.ShuffleVectorE
const init_list = try c.arena.alloc(Node, mask_len);
- for (init_list) |*init, i| {
+ for (init_list, 0..) |*init, i| {
const index_expr = try transExprCoercing(c, scope, expr.getExpr(@intCast(c_uint, i + 2)), .used);
const converted_index = try Tag.helpers_shuffle_vector_index.create(c.arena, .{ .lhs = index_expr, .rhs = vector_len });
init.* = converted_index;
@@ -2686,7 +2686,7 @@ fn transInitListExprArray(
const init_node = if (init_count != 0) blk: {
const init_list = try c.arena.alloc(Node, init_count);
- for (init_list) |*init, i| {
+ for (init_list, 0..) |*init, i| {
const elem_expr = expr.getInit(@intCast(c_uint, i));
init.* = try transExprCoercing(c, scope, elem_expr, .used);
}
@@ -2760,7 +2760,7 @@ fn transInitListExprVector(
}
const init_list = try c.arena.alloc(Node, num_elements);
- for (init_list) |*init, init_index| {
+ for (init_list, 0..) |*init, init_index| {
if (init_index < init_count) {
const tmp_decl = block_scope.statements.items[init_index];
const name = tmp_decl.castTag(.var_simple).?.data.name;
@@ -4649,7 +4649,7 @@ fn transCreateNodeMacroFn(c: *Context, name: []const u8, ref: Node, proto_alias:
const unwrap_expr = try Tag.unwrap.create(c.arena, init);
const args = try c.arena.alloc(Node, fn_params.items.len);
- for (fn_params.items) |param, i| {
+ for (fn_params.items, 0..) |param, i| {
args[i] = try Tag.identifier.create(c.arena, param.name.?);
}
const call_expr = try Tag.call.create(c.arena, .{
@@ -5293,7 +5293,7 @@ const PatternList = struct {
fn init(allocator: mem.Allocator) Error!PatternList {
const patterns = try allocator.alloc(Pattern, templates.len);
- for (templates) |template, i| {
+ for (templates, 0..) |template, i| {
try patterns[i].init(allocator, template);
}
return PatternList{ .patterns = patterns };
@@ -5778,7 +5778,7 @@ fn parseCNumLit(c: *Context, m: *MacroCtx) ParseError!Node {
fn zigifyEscapeSequences(ctx: *Context, m: *MacroCtx) ![]const u8 {
var source = m.slice();
- for (source) |c, i| {
+ for (source, 0..) |c, i| {
if (c == '\"' or c == '\'') {
source = source[i..];
break;
diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig
index 78175a611b..81a19eb39d 100644
--- a/src/translate_c/ast.zig
+++ b/src/translate_c/ast.zig
@@ -1765,7 +1765,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
_ = try c.addToken(.l_brace, "{");
var cases = try c.gpa.alloc(NodeIndex, payload.cases.len);
defer c.gpa.free(cases);
- for (payload.cases) |case, i| {
+ for (payload.cases, 0..) |case, i| {
cases[i] = try renderNode(c, case);
_ = try c.addToken(.comma, ",");
}
@@ -1800,7 +1800,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
var items = try c.gpa.alloc(NodeIndex, std.math.max(payload.cases.len, 1));
defer c.gpa.free(items);
items[0] = 0;
- for (payload.cases) |item, i| {
+ for (payload.cases, 0..) |item, i| {
if (i != 0) _ = try c.addToken(.comma, ",");
items[i] = try renderNode(c, item);
}
@@ -1950,7 +1950,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
defer c.gpa.free(inits);
inits[0] = 0;
inits[1] = 0;
- for (payload) |init, i| {
+ for (payload, 0..) |init, i| {
if (i != 0) _ = try c.addToken(.comma, ",");
inits[i] = try renderNode(c, init);
}
@@ -1984,7 +1984,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
defer c.gpa.free(inits);
inits[0] = 0;
inits[1] = 0;
- for (payload) |init, i| {
+ for (payload, 0..) |init, i| {
_ = try c.addToken(.period, ".");
_ = try c.addIdentifier(init.name);
_ = try c.addToken(.equal, "=");
@@ -2022,7 +2022,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.inits.len, 1));
defer c.gpa.free(inits);
inits[0] = 0;
- for (payload.inits) |init, i| {
+ for (payload.inits, 0..) |init, i| {
_ = try c.addToken(.period, ".");
_ = try c.addIdentifier(init.name);
_ = try c.addToken(.equal, "=");
@@ -2080,7 +2080,7 @@ fn renderRecord(c: *Context, node: Node) !NodeIndex {
members[0] = 0;
members[1] = 0;
- for (payload.fields) |field, i| {
+ for (payload.fields, 0..) |field, i| {
const name_tok = try c.addTokenFmt(.identifier, "{s}", .{std.zig.fmtId(field.name)});
_ = try c.addToken(.colon, ":");
const type_expr = try renderNode(c, field.type);
@@ -2116,10 +2116,10 @@ fn renderRecord(c: *Context, node: Node) !NodeIndex {
});
_ = try c.addToken(.comma, ",");
}
- for (payload.variables) |variable, i| {
+ for (payload.variables, 0..) |variable, i| {
members[payload.fields.len + i] = try renderNode(c, variable);
}
- for (payload.functions) |function, i| {
+ for (payload.functions, 0..) |function, i| {
members[payload.fields.len + num_vars + i] = try renderNode(c, function);
}
_ = try c.addToken(.r_brace, "}");
@@ -2171,7 +2171,7 @@ fn renderArrayInit(c: *Context, lhs: NodeIndex, inits: []const Node) !NodeIndex
var rendered = try c.gpa.alloc(NodeIndex, std.math.max(inits.len, 1));
defer c.gpa.free(rendered);
rendered[0] = 0;
- for (inits) |init, i| {
+ for (inits, 0..) |init, i| {
rendered[i] = try renderNode(c, init);
_ = try c.addToken(.comma, ",");
}
@@ -2539,7 +2539,7 @@ fn renderCall(c: *Context, lhs: NodeIndex, args: []const Node) !NodeIndex {
var rendered = try c.gpa.alloc(NodeIndex, args.len);
defer c.gpa.free(rendered);
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (i != 0) _ = try c.addToken(.comma, ",");
rendered[i] = try renderNode(c, arg);
}
@@ -2879,7 +2879,7 @@ fn renderParams(c: *Context, params: []Payload.Param, is_var_args: bool) !std.Ar
var rendered = try std.ArrayList(NodeIndex).initCapacity(c.gpa, std.math.max(params.len, 1));
errdefer rendered.deinit();
- for (params) |param, i| {
+ for (params, 0..) |param, i| {
if (i != 0) _ = try c.addToken(.comma, ",");
if (param.is_noalias) _ = try c.addToken(.keyword_noalias, "noalias");
if (param.name) |some| {
diff --git a/src/type.zig b/src/type.zig
index 6226a7f2f7..ec4db8689f 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -628,7 +628,7 @@ pub const Type = extern union {
const a_set = a.errorSetNames();
const b_set = b.errorSetNames();
if (a_set.len != b_set.len) return false;
- for (a_set) |a_item, i| {
+ for (a_set, 0..) |a_item, i| {
const b_item = b_set[i];
if (!std.mem.eql(u8, a_item, b_item)) return false;
}
@@ -675,7 +675,7 @@ pub const Type = extern union {
if (a_info.param_types.len != b_info.param_types.len)
return false;
- for (a_info.param_types) |a_param_ty, i| {
+ for (a_info.param_types, 0..) |a_param_ty, i| {
const b_param_ty = b_info.param_types[i];
if (a_info.comptime_params[i] != b_info.comptime_params[i])
return false;
@@ -824,12 +824,12 @@ pub const Type = extern union {
if (a_tuple.types.len != b_tuple.types.len) return false;
- for (a_tuple.types) |a_ty, i| {
+ for (a_tuple.types, 0..) |a_ty, i| {
const b_ty = b_tuple.types[i];
if (!eql(a_ty, b_ty, mod)) return false;
}
- for (a_tuple.values) |a_val, i| {
+ for (a_tuple.values, 0..) |a_val, i| {
const ty = a_tuple.types[i];
const b_val = b_tuple.values[i];
if (a_val.tag() == .unreachable_value) {
@@ -855,17 +855,17 @@ pub const Type = extern union {
if (a_struct_obj.types.len != b_struct_obj.types.len) return false;
- for (a_struct_obj.names) |a_name, i| {
+ for (a_struct_obj.names, 0..) |a_name, i| {
const b_name = b_struct_obj.names[i];
if (!std.mem.eql(u8, a_name, b_name)) return false;
}
- for (a_struct_obj.types) |a_ty, i| {
+ for (a_struct_obj.types, 0..) |a_ty, i| {
const b_ty = b_struct_obj.types[i];
if (!eql(a_ty, b_ty, mod)) return false;
}
- for (a_struct_obj.values) |a_val, i| {
+ for (a_struct_obj.values, 0..) |a_val, i| {
const ty = a_struct_obj.types[i];
const b_val = b_struct_obj.values[i];
if (a_val.tag() == .unreachable_value) {
@@ -1073,7 +1073,7 @@ pub const Type = extern union {
std.hash.autoHash(hasher, fn_info.noalias_bits);
std.hash.autoHash(hasher, fn_info.param_types.len);
- for (fn_info.param_types) |param_ty, i| {
+ for (fn_info.param_types, 0..) |param_ty, i| {
std.hash.autoHash(hasher, fn_info.paramIsComptime(i));
if (param_ty.tag() == .generic_poison) continue;
hashWithHasher(param_ty, hasher, mod);
@@ -1175,7 +1175,7 @@ pub const Type = extern union {
const tuple = ty.tupleFields();
std.hash.autoHash(hasher, tuple.types.len);
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
hashWithHasher(field_ty, hasher, mod);
const field_val = tuple.values[i];
if (field_val.tag() == .unreachable_value) continue;
@@ -1187,7 +1187,7 @@ pub const Type = extern union {
std.hash.autoHash(hasher, std.builtin.TypeId.Struct);
std.hash.autoHash(hasher, struct_obj.types.len);
- for (struct_obj.types) |field_ty, i| {
+ for (struct_obj.types, 0..) |field_ty, i| {
const field_name = struct_obj.names[i];
const field_val = struct_obj.values[i];
hasher.update(field_name);
@@ -1403,10 +1403,10 @@ pub const Type = extern union {
const payload = self.castTag(.tuple).?.data;
const types = try allocator.alloc(Type, payload.types.len);
const values = try allocator.alloc(Value, payload.values.len);
- for (payload.types) |ty, i| {
+ for (payload.types, 0..) |ty, i| {
types[i] = try ty.copy(allocator);
}
- for (payload.values) |val, i| {
+ for (payload.values, 0..) |val, i| {
values[i] = try val.copy(allocator);
}
return Tag.tuple.create(allocator, .{
@@ -1419,13 +1419,13 @@ pub const Type = extern union {
const names = try allocator.alloc([]const u8, payload.names.len);
const types = try allocator.alloc(Type, payload.types.len);
const values = try allocator.alloc(Value, payload.values.len);
- for (payload.names) |name, i| {
+ for (payload.names, 0..) |name, i| {
names[i] = try allocator.dupe(u8, name);
}
- for (payload.types) |ty, i| {
+ for (payload.types, 0..) |ty, i| {
types[i] = try ty.copy(allocator);
}
- for (payload.values) |val, i| {
+ for (payload.values, 0..) |val, i| {
values[i] = try val.copy(allocator);
}
return Tag.anon_struct.create(allocator, .{
@@ -1437,7 +1437,7 @@ pub const Type = extern union {
.function => {
const payload = self.castTag(.function).?.data;
const param_types = try allocator.alloc(Type, payload.param_types.len);
- for (payload.param_types) |param_ty, i| {
+ for (payload.param_types, 0..) |param_ty, i| {
param_types[i] = try param_ty.copy(allocator);
}
const other_comptime_params = payload.comptime_params[0..payload.param_types.len];
@@ -1678,7 +1678,7 @@ pub const Type = extern union {
.function => {
const payload = ty.castTag(.function).?.data;
try writer.writeAll("fn(");
- for (payload.param_types) |param_type, i| {
+ for (payload.param_types, 0..) |param_type, i| {
if (i != 0) try writer.writeAll(", ");
try param_type.dump("", .{}, writer);
}
@@ -1739,7 +1739,7 @@ pub const Type = extern union {
.tuple => {
const tuple = ty.castTag(.tuple).?.data;
try writer.writeAll("tuple{");
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
if (i != 0) try writer.writeAll(", ");
const val = tuple.values[i];
if (val.tag() != .unreachable_value) {
@@ -1756,7 +1756,7 @@ pub const Type = extern union {
.anon_struct => {
const anon_struct = ty.castTag(.anon_struct).?.data;
try writer.writeAll("struct{");
- for (anon_struct.types) |field_ty, i| {
+ for (anon_struct.types, 0..) |field_ty, i| {
if (i != 0) try writer.writeAll(", ");
const val = anon_struct.values[i];
if (val.tag() != .unreachable_value) {
@@ -1892,7 +1892,7 @@ pub const Type = extern union {
.error_set => {
const names = ty.castTag(.error_set).?.data.names.keys();
try writer.writeAll("error{");
- for (names) |name, i| {
+ for (names, 0..) |name, i| {
if (i != 0) try writer.writeByte(',');
try writer.writeAll(name);
}
@@ -1908,7 +1908,7 @@ pub const Type = extern union {
.error_set_merged => {
const names = ty.castTag(.error_set_merged).?.data.keys();
try writer.writeAll("error{");
- for (names) |name, i| {
+ for (names, 0..) |name, i| {
if (i != 0) try writer.writeByte(',');
try writer.writeAll(name);
}
@@ -2063,7 +2063,7 @@ pub const Type = extern union {
.function => {
const fn_info = ty.fnInfo();
try writer.writeAll("fn(");
- for (fn_info.param_types) |param_ty, i| {
+ for (fn_info.param_types, 0..) |param_ty, i| {
if (i != 0) try writer.writeAll(", ");
if (fn_info.paramIsComptime(i)) {
try writer.writeAll("comptime ");
@@ -2137,7 +2137,7 @@ pub const Type = extern union {
const tuple = ty.castTag(.tuple).?.data;
try writer.writeAll("tuple{");
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
if (i != 0) try writer.writeAll(", ");
const val = tuple.values[i];
if (val.tag() != .unreachable_value) {
@@ -2154,7 +2154,7 @@ pub const Type = extern union {
const anon_struct = ty.castTag(.anon_struct).?.data;
try writer.writeAll("struct{");
- for (anon_struct.types) |field_ty, i| {
+ for (anon_struct.types, 0..) |field_ty, i| {
if (i != 0) try writer.writeAll(", ");
const val = anon_struct.values[i];
if (val.tag() != .unreachable_value) {
@@ -2253,7 +2253,7 @@ pub const Type = extern union {
.error_set => {
const names = ty.castTag(.error_set).?.data.names.keys();
try writer.writeAll("error{");
- for (names) |name, i| {
+ for (names, 0..) |name, i| {
if (i != 0) try writer.writeByte(',');
try writer.writeAll(name);
}
@@ -2266,7 +2266,7 @@ pub const Type = extern union {
.error_set_merged => {
const names = ty.castTag(.error_set_merged).?.data.keys();
try writer.writeAll("error{");
- for (names) |name, i| {
+ for (names, 0..) |name, i| {
if (i != 0) try writer.writeByte(',');
try writer.writeAll(name);
}
@@ -2568,7 +2568,7 @@ pub const Type = extern union {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
if (try field_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) return true;
@@ -3125,7 +3125,7 @@ pub const Type = extern union {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
var big_align: u32 = 0;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
if (!(field_ty.hasRuntimeBits())) continue;
@@ -5044,7 +5044,7 @@ pub const Type = extern union {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.values) |val, i| {
+ for (tuple.values, 0..) |val, i| {
const is_comptime = val.tag() != .unreachable_value;
if (is_comptime) continue;
if (tuple.types[i].onePossibleValue() != null) continue;
@@ -5256,7 +5256,7 @@ pub const Type = extern union {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const have_comptime_val = tuple.values[i].tag() != .unreachable_value;
if (!have_comptime_val and field_ty.comptimeOnly()) return true;
}
@@ -5753,7 +5753,7 @@ pub const Type = extern union {
var bit_offset: u16 = undefined;
var elem_size_bits: u16 = undefined;
var running_bits: u16 = 0;
- for (struct_obj.fields.values()) |f, i| {
+ for (struct_obj.fields.values(), 0..) |f, i| {
if (!f.ty.hasRuntimeBits()) continue;
const field_bits = @intCast(u16, f.ty.bitSize(target));
@@ -5834,7 +5834,7 @@ pub const Type = extern union {
var offset: u64 = 0;
var big_align: u32 = 0;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) {
// comptime field
diff --git a/src/value.zig b/src/value.zig
index 98842a4ca7..0d80bf7927 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -614,7 +614,7 @@ pub const Value = extern union {
.base = payload.base,
.data = try arena.alloc(Value, payload.data.len),
};
- for (new_payload.data) |*elem, i| {
+ for (new_payload.data, 0..) |*elem, i| {
elem.* = try payload.data[i].copy(arena);
}
return Value{ .ptr_otherwise = &new_payload.base };
@@ -891,7 +891,7 @@ pub const Value = extern union {
fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 {
const result = try allocator.alloc(u8, @intCast(usize, len));
var elem_value_buf: ElemValueBuffer = undefined;
- for (result) |*elem, i| {
+ for (result, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(mod, i, &elem_value_buf);
elem.* = @intCast(u8, elem_val.toUnsignedInt(mod.getTarget()));
}
@@ -1282,7 +1282,7 @@ pub const Value = extern union {
.int_i64 => @bitCast(u64, int_val.castTag(.int_i64).?.data),
else => unreachable,
};
- for (buffer[0..byte_count]) |_, i| switch (endian) {
+ for (buffer[0..byte_count], 0..) |_, i| switch (endian) {
.Little => buffer[i] = @truncate(u8, (int >> @intCast(u6, (8 * i)))),
.Big => buffer[byte_count - i - 1] = @truncate(u8, (int >> @intCast(u6, (8 * i)))),
};
@@ -1324,7 +1324,7 @@ pub const Value = extern union {
.Extern => {
const fields = ty.structFields().values();
const field_vals = val.castTag(.aggregate).?.data;
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
const off = @intCast(usize, ty.structFieldOffset(i, target));
writeToMemory(field_vals[i], field.ty, mod, buffer[off..]);
}
@@ -1431,7 +1431,7 @@ pub const Value = extern union {
var bits: u16 = 0;
const fields = ty.structFields().values();
const field_vals = val.castTag(.aggregate).?.data;
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
const field_bits = @intCast(u16, field.ty.bitSize(target));
field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits);
bits += field_bits;
@@ -1529,7 +1529,7 @@ pub const Value = extern union {
.Extern => {
const fields = ty.structFields().values();
const field_vals = try arena.alloc(Value, fields.len);
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
const off = @intCast(usize, ty.structFieldOffset(i, target));
const sz = @intCast(usize, ty.structFieldType(i).abiSize(target));
field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena);
@@ -1617,7 +1617,7 @@ pub const Value = extern union {
var bits: u16 = 0;
const elem_bit_size = @intCast(u16, elem_ty.bitSize(target));
- for (elems) |_, i| {
+ for (elems, 0..) |_, i| {
// On big-endian systems, LLVM reverses the element order of vectors by default
const tgt_elem_i = if (endian == .Big) elems.len - i - 1 else i;
elems[tgt_elem_i] = try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena);
@@ -1632,7 +1632,7 @@ pub const Value = extern union {
var bits: u16 = 0;
const fields = ty.structFields().values();
const field_vals = try arena.alloc(Value, fields.len);
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
const field_bits = @intCast(u16, field.ty.bitSize(target));
field_vals[i] = try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena);
bits += field_bits;
@@ -2259,7 +2259,7 @@ pub const Value = extern union {
if (ty.isSimpleTupleOrAnonStruct()) {
const types = ty.tupleFields().types;
assert(types.len == a_field_vals.len);
- for (types) |field_ty, i| {
+ for (types, 0..) |field_ty, i| {
if (!(try eqlAdvanced(a_field_vals[i], field_ty, b_field_vals[i], field_ty, mod, opt_sema))) {
return false;
}
@@ -2270,7 +2270,7 @@ pub const Value = extern union {
if (ty.zigTypeTag() == .Struct) {
const fields = ty.structFields().values();
assert(fields.len == a_field_vals.len);
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) {
return false;
}
@@ -2279,7 +2279,7 @@ pub const Value = extern union {
}
const elem_ty = ty.childType();
- for (a_field_vals) |a_elem, i| {
+ for (a_field_vals, 0..) |a_elem, i| {
const b_elem = b_field_vals[i];
if (!(try eqlAdvanced(a_elem, elem_ty, b_elem, elem_ty, mod, opt_sema))) {
@@ -2526,7 +2526,7 @@ pub const Value = extern union {
.empty_struct_value => {},
.aggregate => {
const field_values = val.castTag(.aggregate).?.data;
- for (field_values) |field_val, i| {
+ for (field_values, 0..) |field_val, i| {
const field_ty = ty.structFieldType(i);
field_val.hash(field_ty, hasher, mod);
}
@@ -3228,7 +3228,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (int_ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, int_ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(), target, opt_sema);
@@ -3341,7 +3341,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3390,7 +3390,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3439,7 +3439,7 @@ pub const Value = extern union {
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try arena.alloc(Value, ty.vectorLen());
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3501,7 +3501,7 @@ pub const Value = extern union {
) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3546,7 +3546,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3622,7 +3622,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(), arena, target);
@@ -3661,7 +3661,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3697,7 +3697,7 @@ pub const Value = extern union {
pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3728,7 +3728,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3764,7 +3764,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3800,7 +3800,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3841,7 +3841,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3882,7 +3882,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3958,7 +3958,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4005,7 +4005,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4052,7 +4052,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4089,7 +4089,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, target);
@@ -4111,7 +4111,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
var bits_buf: Value.ElemValueBuffer = undefined;
@@ -4143,7 +4143,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4185,7 +4185,7 @@ pub const Value = extern union {
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try allocator.alloc(Value, ty.vectorLen());
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4243,7 +4243,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4291,7 +4291,7 @@ pub const Value = extern union {
) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4320,7 +4320,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4372,7 +4372,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try floatNegScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4408,7 +4408,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4467,7 +4467,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4526,7 +4526,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4585,7 +4585,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4638,7 +4638,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try sqrtScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4678,7 +4678,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try sinScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4718,7 +4718,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try cosScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4758,7 +4758,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try tanScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4798,7 +4798,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try expScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4838,7 +4838,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try exp2Scalar(elem_val, float_type.scalarType(), arena, target);
@@ -4878,7 +4878,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try logScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4918,7 +4918,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try log2Scalar(elem_val, float_type.scalarType(), arena, target);
@@ -4958,7 +4958,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try log10Scalar(elem_val, float_type.scalarType(), arena, target);
@@ -4998,7 +4998,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try fabsScalar(elem_val, float_type.scalarType(), arena, target);
@@ -5038,7 +5038,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try floorScalar(elem_val, float_type.scalarType(), arena, target);
@@ -5078,7 +5078,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try ceilScalar(elem_val, float_type.scalarType(), arena, target);
@@ -5118,7 +5118,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try roundScalar(elem_val, float_type.scalarType(), arena, target);
@@ -5158,7 +5158,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try truncScalar(elem_val, float_type.scalarType(), arena, target);
@@ -5205,7 +5205,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var mulend1_buf: Value.ElemValueBuffer = undefined;
const mulend1_elem = mulend1.elemValueBuffer(mod, i, &mulend1_buf);
var mulend2_buf: Value.ElemValueBuffer = undefined;
diff --git a/tools/gen_spirv_spec.zig b/tools/gen_spirv_spec.zig
index 426276122c..31dbbb1911 100644
--- a/tools/gen_spirv_spec.zig
+++ b/tools/gen_spirv_spec.zig
@@ -251,7 +251,7 @@ fn renderEnumerant(writer: anytype, enumerant: g.Enumerant) !void {
.int => |int| try writer.print("{}", .{int}),
}
try writer.writeAll(", .parameters = &[_]OperandKind{");
- for (enumerant.parameters) |param, i| {
+ for (enumerant.parameters, 0..) |param, i| {
if (i != 0)
try writer.writeAll(", ");
// Note, param.quantifier will always be one.
@@ -272,7 +272,7 @@ fn renderOpcodes(
var aliases = std.ArrayList(struct { inst: usize, alias: usize }).init(allocator);
try aliases.ensureTotalCapacity(instructions.len);
- for (instructions) |inst, i| {
+ for (instructions, 0..) |inst, i| {
if (std.mem.eql(u8, inst.class.?, "@exclude")) {
continue;
}
@@ -397,7 +397,7 @@ fn renderValueEnum(
var aliases = std.ArrayList(struct { enumerant: usize, alias: usize }).init(allocator);
try aliases.ensureTotalCapacity(enumerants.len);
- for (enumerants) |enumerant, i| {
+ for (enumerants, 0..) |enumerant, i| {
const result = enum_map.getOrPutAssumeCapacity(enumerant.value.int);
if (!result.found_existing) {
result.value_ptr.* = i;
@@ -468,7 +468,7 @@ fn renderBitEnum(
var aliases = std.ArrayList(struct { flag: usize, alias: u5 }).init(allocator);
try aliases.ensureTotalCapacity(enumerants.len);
- for (enumerants) |enumerant, i| {
+ for (enumerants, 0..) |enumerant, i| {
if (enumerant.value != .bitflag) return error.InvalidRegistry;
const value = try parseHexInt(enumerant.value.bitflag);
if (value == 0) {
@@ -494,7 +494,7 @@ fn renderBitEnum(
}
}
- for (flags_by_bitpos) |maybe_flag_index, bitpos| {
+ for (flags_by_bitpos, 0..) |maybe_flag_index, bitpos| {
if (maybe_flag_index) |flag_index| {
try writer.print("{}", .{std.zig.fmtId(enumerants[flag_index].enumerant)});
} else {
@@ -521,7 +521,7 @@ fn renderBitEnum(
try writer.print("\npub const Extended = struct {{\n", .{});
- for (flags_by_bitpos) |maybe_flag_index, bitpos| {
+ for (flags_by_bitpos, 0..) |maybe_flag_index, bitpos| {
const flag_index = maybe_flag_index orelse {
try writer.print("_reserved_bit_{}: bool = false,\n", .{bitpos});
continue;
@@ -570,7 +570,7 @@ fn renderOperand(
try writer.writeAll("struct{");
- for (parameters) |param, j| {
+ for (parameters, 0..) |param, j| {
if (j != 0) {
try writer.writeAll(", ");
}
@@ -642,7 +642,7 @@ fn renderFieldName(writer: anytype, operands: []const g.Operand, field_index: us
// Translate to snake case.
name_buffer.len = 0;
- for (operand.kind) |c, i| {
+ for (operand.kind, 0..) |c, i| {
switch (c) {
'a'...'z', '0'...'9' => try name_buffer.append(c),
'A'...'Z' => if (i > 0 and std.ascii.isLower(operand.kind[i - 1])) {
@@ -658,7 +658,7 @@ fn renderFieldName(writer: anytype, operands: []const g.Operand, field_index: us
// For fields derived from type name, there could be any amount.
// Simply check against all other fields, and if another similar one exists, add a number.
- const need_extra_index = for (operands) |other_operand, i| {
+ const need_extra_index = for (operands, 0..) |other_operand, i| {
if (i != field_index and std.mem.eql(u8, operand.kind, other_operand.kind)) {
break true;
}
diff --git a/tools/gen_stubs.zig b/tools/gen_stubs.zig
index 89d7a40228..bc2637e197 100644
--- a/tools/gen_stubs.zig
+++ b/tools/gen_stubs.zig
@@ -45,7 +45,7 @@ const MultiSym = struct {
visib: elf.STV,
fn allPresent(ms: MultiSym) bool {
- for (arches) |_, i| {
+ for (arches, 0..) |_, i| {
if (!ms.present[i]) {
return false;
}
@@ -65,7 +65,7 @@ const MultiSym = struct {
fn commonSize(ms: MultiSym) ?u64 {
var size: ?u64 = null;
- for (arches) |_, i| {
+ for (arches, 0..) |_, i| {
if (!ms.present[i]) continue;
if (size) |s| {
if (ms.size[i] != s) {
@@ -80,7 +80,7 @@ const MultiSym = struct {
fn commonBinding(ms: MultiSym) ?u4 {
var binding: ?u4 = null;
- for (arches) |_, i| {
+ for (arches, 0..) |_, i| {
if (!ms.present[i]) continue;
if (binding) |b| {
if (ms.binding[i] != b) {
@@ -268,7 +268,7 @@ pub fn main() !void {
var prev_section: u16 = std.math.maxInt(u16);
var prev_pp_state: enum { none, ptr32, special } = .none;
- for (sym_table.values()) |multi_sym, sym_index| {
+ for (sym_table.values(), 0..) |multi_sym, sym_index| {
const name = sym_table.keys()[sym_index];
if (multi_sym.section != prev_section) {
@@ -309,7 +309,7 @@ pub fn main() !void {
var first = true;
try stdout.writeAll("#if ");
- for (arches) |arch, i| {
+ for (arches, 0..) |arch, i| {
if (multi_sym.present[i]) continue;
if (!first) try stdout.writeAll(" && ");
@@ -333,7 +333,7 @@ pub fn main() !void {
} else if (multi_sym.isWeak64()) {
try stdout.print("WEAK64 {s}\n", .{name});
} else {
- for (arches) |arch, i| {
+ for (arches, 0..) |arch, i| {
log.info("symbol '{s}' binding on {s}: {d}", .{
name, @tagName(arch), multi_sym.binding[i],
});
@@ -355,7 +355,7 @@ pub fn main() !void {
} else if (multi_sym.isPtr2Size()) {
try stdout.print(".size {s}, PTR2_SIZE_BYTES\n", .{name});
} else {
- for (arches) |arch, i| {
+ for (arches, 0..) |arch, i| {
log.info("symbol '{s}' size on {s}: {d}", .{
name, @tagName(arch), multi_sym.size[i],
});
@@ -415,7 +415,7 @@ fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: builtin.Endian)
// Find the offset of the dynamic symbol table.
var dynsym_index: u16 = 0;
- for (shdrs) |shdr, i| {
+ for (shdrs, 0..) |shdr, i| {
const sh_name = try arena.dupe(u8, mem.sliceTo(shstrtab[s(shdr.sh_name)..], 0));
log.debug("found section: {s}", .{sh_name});
if (mem.eql(u8, sh_name, ".dynsym")) {
@@ -566,7 +566,7 @@ fn archIndex(arch: std.Target.Cpu.Arch) u8 {
}
fn archSetName(arch_set: [arches.len]bool) []const u8 {
- for (arches) |arch, i| {
+ for (arches, 0..) |arch, i| {
if (arch_set[i]) {
return @tagName(arch);
}
diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig
index 85558576df..a1719c5ab6 100644
--- a/tools/update_clang_options.zig
+++ b/tools/update_clang_options.zig
@@ -573,7 +573,7 @@ pub fn main() anyerror!void {
const Feature = @field(cpu_targets, decl.name).Feature;
const all_features = @field(cpu_targets, decl.name).all_features;
- for (all_features) |feat, i| {
+ for (all_features, 0..) |feat, i| {
const llvm_name = feat.llvm_name orelse continue;
const zig_feat = @intToEnum(Feature, i);
const zig_name = @tagName(zig_feat);
diff --git a/tools/update_cpu_features.zig b/tools/update_cpu_features.zig
index ee92d63227..3fef468380 100644
--- a/tools/update_cpu_features.zig
+++ b/tools/update_cpu_features.zig
@@ -899,7 +899,7 @@ pub fn main() anyerror!void {
}
} else {
var threads = try arena.alloc(std.Thread, llvm_targets.len);
- for (llvm_targets) |llvm_target, i| {
+ for (llvm_targets, 0..) |llvm_target, i| {
const job = Job{
.llvm_tblgen_exe = llvm_tblgen_exe,
.llvm_src_root = llvm_src_root,
@@ -1226,7 +1226,7 @@ fn processOneTarget(job: Job) anyerror!void {
}
try w.writeAll(
\\ const ti = @typeInfo(Feature);
- \\ for (result) |*elem, i| {
+ \\ for (&result, 0..) |*elem, i| {
\\ elem.index = i;
\\ elem.name = ti.Enum.fields[i].name;
\\ }
diff --git a/tools/update_crc_catalog.zig b/tools/update_crc_catalog.zig
index 8182e8d810..034b7afc9d 100644
--- a/tools/update_crc_catalog.zig
+++ b/tools/update_crc_catalog.zig
@@ -116,7 +116,7 @@ pub fn main() anyerror!void {
defer buf.deinit();
var prev: u8 = 0;
- for (snakecase) |c, i| {
+ for (snakecase, 0..) |c, i| {
if (c == '_') {
// do nothing
} else if (i == 0) {
diff --git a/tools/update_spirv_features.zig b/tools/update_spirv_features.zig
index a7eb3c18f9..bb859ed5b4 100644
--- a/tools/update_spirv_features.zig
+++ b/tools/update_spirv_features.zig
@@ -130,7 +130,7 @@ pub fn main() !void {
\\
);
- for (versions) |ver, i| {
+ for (versions, 0..) |ver, i| {
try w.print(
\\ result[@enumToInt(Feature.v{0}_{1})] = .{{
\\ .llvm_name = null,
@@ -203,7 +203,7 @@ pub fn main() !void {
try w.writeAll(
\\ const ti = @typeInfo(Feature);
- \\ for (result) |*elem, i| {
+ \\ for (&result, 0..) |*elem, i| {
\\ elem.index = i;
\\ elem.name = ti.Enum.fields[i].name;
\\ }
From a005b5f198827b15ee069839fb9dbb80e66daa06 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 09:24:25 -0700
Subject: [PATCH 15/36] add zig fmt test for upgrading for loop syntax
---
lib/std/zig/parser_test.zig | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index 4346ee6286..1afc0e2e18 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -1,3 +1,23 @@
+// TODO: remove this after zig 0.11.0 is released
+test "zig fmt: transform old for loop syntax to new" {
+ try testTransform(
+ \\fn foo() void {
+ \\ for (a) |b, i| {
+ \\ _ = b; _ = i;
+ \\ }
+ \\}
+ \\
+ ,
+ \\fn foo() void {
+ \\ for (a, 0..) |b, i| {
+ \\ _ = b;
+ \\ _ = i;
+ \\ }
+ \\}
+ \\
+ );
+}
+
test "zig fmt: tuple struct" {
try testCanonical(
\\const T = struct {
From 552e8095ae62654d0ba7dffbc8e0e1dfb6499c9d Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 10:26:46 -0700
Subject: [PATCH 16/36] update docgen to new for loop syntax
---
doc/docgen.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/doc/docgen.zig b/doc/docgen.zig
index 82fafe2b64..fae513f8c3 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -239,7 +239,7 @@ const Tokenizer = struct {
.line_start = 0,
.line_end = 0,
};
- for (self.buffer) |c, i| {
+ for (self.buffer, 0..) |c, i| {
if (i == token.start) {
loc.line_end = i;
while (loc.line_end < self.buffer.len and self.buffer[loc.line_end] != '\n') : (loc.line_end += 1) {}
From f2a6a1756bd3d3e284410c001ee997c4d12a260b Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 10:27:08 -0700
Subject: [PATCH 17/36] Sema: fix for loops with comptime-known int ranges
---
src/Sema.zig | 6 +++++-
test/behavior/for.zig | 12 ++++++++++++
2 files changed, 17 insertions(+), 1 deletion(-)
diff --git a/src/Sema.zig b/src/Sema.zig
index fde9072d71..980aee720b 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -3924,7 +3924,11 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const object_ty = sema.typeOf(object);
// Each arg could be an indexable, or a range, in which case the length
// is passed directly as an integer.
- const arg_len = if (object_ty.zigTypeTag() == .Int) object else l: {
+ const is_int = switch (object_ty.zigTypeTag()) {
+ .Int, .ComptimeInt => true,
+ else => false,
+ };
+ const arg_len = if (is_int) object else l: {
try checkIndexable(sema, block, src, object_ty);
if (!object_ty.indexableHasLen()) continue;
diff --git a/test/behavior/for.zig b/test/behavior/for.zig
index c9ae2f4461..20788f7269 100644
--- a/test/behavior/for.zig
+++ b/test/behavior/for.zig
@@ -249,3 +249,15 @@ test "for loop with else branch" {
try expect(q == 4);
}
}
+
+test "count over fixed range" {
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ var sum: usize = 0;
+ for (0..6) |i| {
+ sum += i;
+ }
+
+ try expect(sum == 15);
+}
From b6a5e52decf4494d9506caa929139b630ce08dea Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 10:30:48 -0700
Subject: [PATCH 18/36] add passing for loop test: two counters
---
test/behavior/for.zig | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/test/behavior/for.zig b/test/behavior/for.zig
index 20788f7269..4704df8bd2 100644
--- a/test/behavior/for.zig
+++ b/test/behavior/for.zig
@@ -261,3 +261,16 @@ test "count over fixed range" {
try expect(sum == 15);
}
+
+test "two counters" {
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ var sum: usize = 0;
+ for (0..10, 10..20) |i, j| {
+ sum += 1;
+ try expect(i + 10 == j);
+ }
+
+ try expect(sum == 10);
+}
From 8c96d0dddddfb4cd597661ff47551fcaf67cbf39 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 11:44:38 -0700
Subject: [PATCH 19/36] update test-cases for new for loop syntax
---
test/cases/compile_errors/invalid_pointer_for_var_type.zig | 2 +-
.../underscore_should_not_be_usable_inside_for.zig | 4 ++--
test/cases/variable_shadowing.3.zig | 4 ++--
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/test/cases/compile_errors/invalid_pointer_for_var_type.zig b/test/cases/compile_errors/invalid_pointer_for_var_type.zig
index ee193bab15..d94c7bd3b8 100644
--- a/test/cases/compile_errors/invalid_pointer_for_var_type.zig
+++ b/test/cases/compile_errors/invalid_pointer_for_var_type.zig
@@ -1,7 +1,7 @@
extern fn ext() usize;
var bytes: [ext()]u8 = undefined;
export fn f() void {
- for (bytes) |*b, i| {
+ for (&bytes, 0..) |*b, i| {
b.* = @as(u8, i);
}
}
diff --git a/test/cases/compile_errors/underscore_should_not_be_usable_inside_for.zig b/test/cases/compile_errors/underscore_should_not_be_usable_inside_for.zig
index 1fb79e11bd..b527d0d17e 100644
--- a/test/cases/compile_errors/underscore_should_not_be_usable_inside_for.zig
+++ b/test/cases/compile_errors/underscore_should_not_be_usable_inside_for.zig
@@ -1,6 +1,6 @@
export fn returns() void {
- for ([_]void{}) |_, i| {
- for ([_]void{}) |_, j| {
+ for ([_]void{}, 0..) |_, i| {
+ for ([_]void{}, 0..) |_, j| {
return _;
}
}
diff --git a/test/cases/variable_shadowing.3.zig b/test/cases/variable_shadowing.3.zig
index 1e22ccf123..3d899e72cc 100644
--- a/test/cases/variable_shadowing.3.zig
+++ b/test/cases/variable_shadowing.3.zig
@@ -1,10 +1,10 @@
pub fn main() void {
var i = 0;
- for ("n") |_, i| {
+ for ("n", 0..) |_, i| {
}
}
// error
//
-// :3:19: error: loop index capture 'i' shadows local variable from outer scope
+// :3:24: error: capture 'i' shadows local variable from outer scope
// :2:9: note: previous declaration here
From 209e83f3950e8cb0daca389a4fa7cd79fe60394b Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 11:56:05 -0700
Subject: [PATCH 20/36] AstGen: fix ZIR for for loops accessing instruction out
of block
---
src/AstGen.zig | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 9b96b16677..b4fda8e274 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -6413,6 +6413,8 @@ fn forExpr(
defer loop_scope.unstack();
defer loop_scope.labeled_breaks.deinit(gpa);
+ const index = try loop_scope.addUnNode(.load, index_ptr, node);
+
var cond_scope = parent_gz.makeSubBlock(&loop_scope.base);
defer cond_scope.unstack();
@@ -6420,7 +6422,6 @@ fn forExpr(
if (!any_len_checks) {
return astgen.failNode(node, "TODO: handle infinite for loop", .{});
}
- const index = try cond_scope.addUnNode(.load, index_ptr, node);
const cond = try cond_scope.addPlNode(.cmp_lt, node, Zir.Inst.Bin{
.lhs = index,
.rhs = len,
@@ -10695,7 +10696,6 @@ const Scope = struct {
@"function parameter",
@"local constant",
@"local variable",
- @"loop index capture",
@"switch tag capture",
capture,
};
From b13745ac03195c87d9efec2b12f564d4d3cbd477 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 11:56:32 -0700
Subject: [PATCH 21/36] disable failing x86_64 backend tests
---
test/behavior/for.zig | 2 ++
test/cases/llvm/for_loop.zig | 2 +-
test/tests.zig | 16 ++++++++--------
3 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/test/behavior/for.zig b/test/behavior/for.zig
index 4704df8bd2..67ea5df808 100644
--- a/test/behavior/for.zig
+++ b/test/behavior/for.zig
@@ -253,6 +253,7 @@ test "for loop with else branch" {
test "count over fixed range" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
var sum: usize = 0;
for (0..6) |i| {
@@ -265,6 +266,7 @@ test "count over fixed range" {
test "two counters" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
var sum: usize = 0;
for (0..10, 10..20) |i, j| {
diff --git a/test/cases/llvm/for_loop.zig b/test/cases/llvm/for_loop.zig
index e7e701aafa..e48f2edd71 100644
--- a/test/cases/llvm/for_loop.zig
+++ b/test/cases/llvm/for_loop.zig
@@ -11,6 +11,6 @@ pub fn main() void {
}
// run
-// backend=stage2,llvm
+// backend=llvm
// target=x86_64-linux,x86_64-macos
//
diff --git a/test/tests.zig b/test/tests.zig
index 035311372f..851de9f2a6 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -58,14 +58,14 @@ const test_targets = blk: {
.link_libc = true,
.backend = .stage2_c,
},
- .{
- .target = .{
- .cpu_arch = .x86_64,
- .os_tag = .linux,
- .abi = .none,
- },
- .backend = .stage2_x86_64,
- },
+ //.{
+ // .target = .{
+ // .cpu_arch = .x86_64,
+ // .os_tag = .linux,
+ // .abi = .none,
+ // },
+ // .backend = .stage2_x86_64,
+ //},
.{
.target = .{
.cpu_arch = .aarch64,
From 22965e6fcbafbcba207a6da8eb493af2cf7ef924 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 12:26:22 -0700
Subject: [PATCH 22/36] Sema: improve error message for mismatched for loop
lengths
---
src/Module.zig | 16 ++++++++++++++++
src/Sema.zig | 27 +++++++++++++++++++++++----
test/cases/compile_errors/for.zig | 13 +++++++++++++
3 files changed, 52 insertions(+), 4 deletions(-)
create mode 100644 test/cases/compile_errors/for.zig
diff --git a/src/Module.zig b/src/Module.zig
index 4feb04abdd..377ccd2441 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -2462,6 +2462,13 @@ pub const SrcLoc = struct {
};
return nodeToSpan(tree, src_node);
},
+ .for_input => |for_input| {
+ const tree = try src_loc.file_scope.getTree(gpa);
+ const node = src_loc.declRelativeToNodeIndex(for_input.for_node_offset);
+ const for_full = tree.fullFor(node).?;
+ const src_node = for_full.ast.inputs[for_input.input_index];
+ return nodeToSpan(tree, src_node);
+ },
.node_offset_bin_lhs => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
const node = src_loc.declRelativeToNodeIndex(node_off);
@@ -3114,6 +3121,14 @@ pub const LazySrcLoc = union(enum) {
/// The source location points to the RHS of an assignment.
/// The Decl is determined contextually.
node_offset_store_operand: i32,
+ /// The source location points to a for loop input.
+ /// The Decl is determined contextually.
+ for_input: struct {
+ /// Points to the for loop AST node.
+ for_node_offset: i32,
+ /// Picks one of the inputs from the condition.
+ input_index: u32,
+ },
pub const nodeOffset = if (TracedOffset.want_tracing) nodeOffsetDebug else nodeOffsetRelease;
@@ -3200,6 +3215,7 @@ pub const LazySrcLoc = union(enum) {
.node_offset_init_ty,
.node_offset_store_ptr,
.node_offset_store_operand,
+ .for_input,
=> .{
.file_scope = decl.getFileScope(),
.parent_decl_node = decl.src_node,
diff --git a/src/Sema.zig b/src/Sema.zig
index 980aee720b..5a185a709c 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -3910,14 +3910,15 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
var len: Air.Inst.Ref = .none;
var len_val: ?Value = null;
- var len_idx: usize = undefined;
+ var len_idx: u32 = undefined;
var any_runtime = false;
const runtime_arg_lens = try gpa.alloc(Air.Inst.Ref, args.len);
defer gpa.free(runtime_arg_lens);
// First pass to look for comptime values.
- for (args, 0..) |zir_arg, i| {
+ for (args, 0..) |zir_arg, i_usize| {
+ const i = @intCast(u32, i_usize);
runtime_arg_lens[i] = .none;
if (zir_arg == .none) continue;
const object = try sema.resolveInst(zir_arg);
@@ -3941,8 +3942,26 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
if (try sema.resolveDefinedValue(block, src, arg_len)) |arg_val| {
if (len_val) |v| {
if (!(try sema.valuesEqual(arg_val, v, Type.usize))) {
- // TODO error notes for each arg stating the differing values
- return sema.fail(block, src, "non-matching for loop lengths", .{});
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "non-matching for loop lengths", .{});
+ errdefer msg.destroy(gpa);
+ const a_src: LazySrcLoc = .{ .for_input = .{
+ .for_node_offset = inst_data.src_node,
+ .input_index = len_idx,
+ } };
+ const b_src: LazySrcLoc = .{ .for_input = .{
+ .for_node_offset = inst_data.src_node,
+ .input_index = i,
+ } };
+ try sema.errNote(block, a_src, msg, "length {} here", .{
+ v.fmtValue(Type.usize, sema.mod),
+ });
+ try sema.errNote(block, b_src, msg, "length {} here", .{
+ arg_val.fmtValue(Type.usize, sema.mod),
+ });
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
}
} else {
len = arg_len;
diff --git a/test/cases/compile_errors/for.zig b/test/cases/compile_errors/for.zig
new file mode 100644
index 0000000000..999782c991
--- /dev/null
+++ b/test/cases/compile_errors/for.zig
@@ -0,0 +1,13 @@
+export fn a() void {
+ for (0..10, 10..21) |i, j| {
+ _ = i; _ = j;
+ }
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:5: error: non-matching for loop lengths
+// :2:11: note: length 10 here
+// :2:19: note: length 11 here
From bcb72401d3cf01c190a346af9c9d8eec4a334b45 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 13:04:27 -0700
Subject: [PATCH 23/36] AstGen: add error for discard of unbounded counter
---
src/AstGen.zig | 7 +-
test/behavior/for.zig | 100 ++++++++++++++++++
.../compile_errors/for_discard_unbounded.zig | 10 ++
test/cases/compile_errors/for_empty.zig | 11 ++
.../compile_errors/for_extra_capture.zig | 12 +++
.../compile_errors/for_extra_condition.zig | 11 ++
6 files changed, 150 insertions(+), 1 deletion(-)
create mode 100644 test/cases/compile_errors/for_discard_unbounded.zig
create mode 100644 test/cases/compile_errors/for_empty.zig
create mode 100644 test/cases/compile_errors/for_extra_capture.zig
create mode 100644 test/cases/compile_errors/for_extra_condition.zig
diff --git a/src/AstGen.zig b/src/AstGen.zig
index b4fda8e274..866dce02e5 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -6346,8 +6346,9 @@ fn forExpr(
const i = @intCast(u32, i_usize);
const capture_is_ref = token_tags[capture_token] == .asterisk;
const ident_tok = capture_token + @boolToInt(capture_is_ref);
+ const is_discard = mem.eql(u8, tree.tokenSlice(ident_tok), "_");
- if (mem.eql(u8, tree.tokenSlice(ident_tok), "_") and capture_is_ref) {
+ if (is_discard and capture_is_ref) {
return astgen.failTok(capture_token, "pointer modifier invalid on discard", .{});
}
// Skip over the comma, and on to the next capture (or the ending pipe character).
@@ -6367,6 +6368,10 @@ fn forExpr(
else
.none;
+ if (end_val == .none and is_discard) {
+ return astgen.failTok(ident_tok, "discard of unbounded counter", .{});
+ }
+
const start_is_zero = nodeIsTriviallyZero(tree, start_node);
const range_len = if (end_val == .none or start_is_zero)
end_val
diff --git a/test/behavior/for.zig b/test/behavior/for.zig
index 67ea5df808..e3c4a8dcc0 100644
--- a/test/behavior/for.zig
+++ b/test/behavior/for.zig
@@ -276,3 +276,103 @@ test "two counters" {
try expect(sum == 10);
}
+
+test "1-based counter and ptr to array" {
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var ok: usize = 0;
+
+ for (1..6, "hello") |i, b| {
+ if (i == 1) {
+ try expect(b == 'h');
+ ok += 1;
+ }
+ if (i == 2) {
+ try expect(b == 'e');
+ ok += 1;
+ }
+ if (i == 3) {
+ try expect(b == 'l');
+ ok += 1;
+ }
+ if (i == 4) {
+ try expect(b == 'l');
+ ok += 1;
+ }
+ if (i == 5) {
+ try expect(b == 'o');
+ ok += 1;
+ }
+ }
+
+ try expect(ok == 5);
+}
+
+test "slice and two counters, one is offset and one is runtime" {
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const slice: []const u8 = "blah";
+ var start: usize = 0;
+
+ for (slice, start..4, 1..5) |a, b, c| {
+ if (a == 'b') {
+ try expect(b == 0);
+ try expect(c == 1);
+ }
+ if (a == 'l') {
+ try expect(b == 1);
+ try expect(c == 2);
+ }
+ if (a == 'a') {
+ try expect(b == 2);
+ try expect(c == 3);
+ }
+ if (a == 'h') {
+ try expect(b == 3);
+ try expect(c == 4);
+ }
+ }
+}
+
+test "two slices, one captured by-ref" {
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ var buf: [10]u8 = undefined;
+ const slice1: []const u8 = "blah";
+ const slice2: []u8 = buf[0..4];
+
+ for (slice1, slice2) |a, *b| {
+ b.* = a;
+ }
+
+ try expect(slice2[0] == 'b');
+ try expect(slice2[1] == 'l');
+ try expect(slice2[2] == 'a');
+ try expect(slice2[3] == 'h');
+}
+
+test "raw pointer and slice" {
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+
+ var buf: [10]u8 = undefined;
+ const slice: []const u8 = "blah";
+ const ptr: [*]u8 = buf[0..4];
+
+ for (ptr, slice) |*a, b| {
+ a.* = b;
+ }
+
+ try expect(buf[0] == 'b');
+ try expect(buf[1] == 'l');
+ try expect(buf[2] == 'a');
+ try expect(buf[3] == 'h');
+}
diff --git a/test/cases/compile_errors/for_discard_unbounded.zig b/test/cases/compile_errors/for_discard_unbounded.zig
new file mode 100644
index 0000000000..93434d0c21
--- /dev/null
+++ b/test/cases/compile_errors/for_discard_unbounded.zig
@@ -0,0 +1,10 @@
+export fn a() void {
+ for (0..10, 10..) |i, _| {
+ _ = i;
+ }
+}
+// error
+// backend=stage2
+// target=native
+//
+// :2:27: error: discard of unbounded counter
diff --git a/test/cases/compile_errors/for_empty.zig b/test/cases/compile_errors/for_empty.zig
new file mode 100644
index 0000000000..a4cb5b3c4e
--- /dev/null
+++ b/test/cases/compile_errors/for_empty.zig
@@ -0,0 +1,11 @@
+export fn b() void {
+ for () |i| {
+ _ = i;
+ }
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:10: error: expected expression, found ')'
diff --git a/test/cases/compile_errors/for_extra_capture.zig b/test/cases/compile_errors/for_extra_capture.zig
new file mode 100644
index 0000000000..a137b57d51
--- /dev/null
+++ b/test/cases/compile_errors/for_extra_capture.zig
@@ -0,0 +1,12 @@
+export fn b() void {
+ for (0..10) |i, j| {
+ _ = i; _ = j;
+ }
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:21: error: extra capture in for loop
+// :2:21: note: run 'zig fmt' to upgrade your code automatically
diff --git a/test/cases/compile_errors/for_extra_condition.zig b/test/cases/compile_errors/for_extra_condition.zig
new file mode 100644
index 0000000000..11c013acee
--- /dev/null
+++ b/test/cases/compile_errors/for_extra_condition.zig
@@ -0,0 +1,11 @@
+export fn a() void {
+ for (0..10, 10..20) |i| {
+ _ = i;
+ }
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:19: error: for input is not captured
From 601db3981ce820cfbca6001cbdfa87e24aa35ab5 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 13:22:29 -0700
Subject: [PATCH 24/36] fix source location for not-indexable for loop errors
---
src/Sema.zig | 14 +++++++-------
test/cases/compile_errors/for.zig | 9 +++++++++
2 files changed, 16 insertions(+), 7 deletions(-)
diff --git a/src/Sema.zig b/src/Sema.zig
index 5a185a709c..7e8520eaa1 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -3929,11 +3929,15 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.Int, .ComptimeInt => true,
else => false,
};
+ const arg_src: LazySrcLoc = .{ .for_input = .{
+ .for_node_offset = inst_data.src_node,
+ .input_index = i,
+ } };
const arg_len = if (is_int) object else l: {
- try checkIndexable(sema, block, src, object_ty);
+ try checkIndexable(sema, block, arg_src, object_ty);
if (!object_ty.indexableHasLen()) continue;
- break :l try sema.fieldVal(block, src, object, "len", src);
+ break :l try sema.fieldVal(block, arg_src, object, "len", arg_src);
};
if (len == .none) {
len = arg_len;
@@ -3949,14 +3953,10 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.for_node_offset = inst_data.src_node,
.input_index = len_idx,
} };
- const b_src: LazySrcLoc = .{ .for_input = .{
- .for_node_offset = inst_data.src_node,
- .input_index = i,
- } };
try sema.errNote(block, a_src, msg, "length {} here", .{
v.fmtValue(Type.usize, sema.mod),
});
- try sema.errNote(block, b_src, msg, "length {} here", .{
+ try sema.errNote(block, arg_src, msg, "length {} here", .{
arg_val.fmtValue(Type.usize, sema.mod),
});
break :msg msg;
diff --git a/test/cases/compile_errors/for.zig b/test/cases/compile_errors/for.zig
index 999782c991..abb87084b4 100644
--- a/test/cases/compile_errors/for.zig
+++ b/test/cases/compile_errors/for.zig
@@ -3,6 +3,13 @@ export fn a() void {
_ = i; _ = j;
}
}
+export fn b() void {
+ const s1 = "hello";
+ const s2 = true;
+ for (s1, s2) |i, j| {
+ _ = i; _ = j;
+ }
+}
// error
// backend=stage2
@@ -11,3 +18,5 @@ export fn a() void {
// :2:5: error: non-matching for loop lengths
// :2:11: note: length 10 here
// :2:19: note: length 11 here
+// :9:14: error: type 'bool' does not support indexing
+// :9:14: note: for loop operand must be an array, slice, tuple, or vector
From 4dd958d585256df3119d5617d22492f41ed02884 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 14:10:56 -0700
Subject: [PATCH 25/36] improve error message for byref capture of byval array
---
lib/std/crypto/aes/soft.zig | 2 +-
src/Module.zig | 49 +++++++++++++++++++++++++++++++
src/Sema.zig | 22 ++++++++++----
test/cases/compile_errors/for.zig | 8 +++++
4 files changed, 74 insertions(+), 7 deletions(-)
diff --git a/lib/std/crypto/aes/soft.zig b/lib/std/crypto/aes/soft.zig
index b57f1746dc..d8bd3d4ac0 100644
--- a/lib/std/crypto/aes/soft.zig
+++ b/lib/std/crypto/aes/soft.zig
@@ -420,7 +420,7 @@ const powx = init: {
var array: [16]u8 = undefined;
var value = 1;
- for (array) |*power| {
+ for (&array) |*power| {
power.* = value;
value = mul(value, 2);
}
diff --git a/src/Module.zig b/src/Module.zig
index 377ccd2441..76777532ab 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -2469,6 +2469,48 @@ pub const SrcLoc = struct {
const src_node = for_full.ast.inputs[for_input.input_index];
return nodeToSpan(tree, src_node);
},
+ .for_capture_from_input => |node_off| {
+ const tree = try src_loc.file_scope.getTree(gpa);
+ const token_tags = tree.tokens.items(.tag);
+ const input_node = src_loc.declRelativeToNodeIndex(node_off);
+ // We have to actually linear scan the whole AST to find the for loop
+ // that contains this input.
+ const node_tags = tree.nodes.items(.tag);
+ for (node_tags, 0..) |node_tag, node_usize| {
+ const node = @intCast(Ast.Node.Index, node_usize);
+ switch (node_tag) {
+ .for_simple, .@"for" => {
+ const for_full = tree.fullFor(node).?;
+ for (for_full.ast.inputs, 0..) |input, input_index| {
+ if (input_node == input) {
+ var count = input_index;
+ var tok = for_full.payload_token;
+ while (true) {
+ switch (token_tags[tok]) {
+ .comma => {
+ count -= 1;
+ tok += 1;
+ },
+ .identifier => {
+ if (count == 0)
+ return tokensToSpan(tree, tok, tok + 1, tok);
+ tok += 1;
+ },
+ .asterisk => {
+ if (count == 0)
+ return tokensToSpan(tree, tok, tok + 2, tok);
+ tok += 1;
+ },
+ else => unreachable,
+ }
+ }
+ }
+ }
+ },
+ else => continue,
+ }
+ } else unreachable;
+ },
.node_offset_bin_lhs => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
const node = src_loc.declRelativeToNodeIndex(node_off);
@@ -3129,6 +3171,12 @@ pub const LazySrcLoc = union(enum) {
/// Picks one of the inputs from the condition.
input_index: u32,
},
+ /// The source location points to one of the captures of a for loop, found
+ /// by taking this AST node index offset from the containing
+ /// Decl AST node, which points to one of the input nodes of a for loop.
+ /// Next, navigate to the corresponding capture.
+ /// The Decl is determined contextually.
+ for_capture_from_input: i32,
pub const nodeOffset = if (TracedOffset.want_tracing) nodeOffsetDebug else nodeOffsetRelease;
@@ -3216,6 +3264,7 @@ pub const LazySrcLoc = union(enum) {
.node_offset_store_ptr,
.node_offset_store_operand,
.for_input,
+ .for_capture_from_input,
=> .{
.file_scope = decl.getFileScope(),
.parent_decl_node = decl.src_node,
diff --git a/src/Sema.zig b/src/Sema.zig
index 7e8520eaa1..07176ad1a0 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -9716,6 +9716,21 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
+ const indexable_ty = sema.typeOf(array_ptr);
+ if (indexable_ty.zigTypeTag() != .Pointer) {
+ const capture_src: LazySrcLoc = .{ .for_capture_from_input = inst_data.src_node };
+ const msg = msg: {
+ const msg = try sema.errMsg(block, capture_src, "pointer capture of non pointer type '{}'", .{
+ indexable_ty.fmt(sema.mod),
+ });
+ errdefer msg.destroy(sema.gpa);
+ if (indexable_ty.zigTypeTag() == .Array) {
+ try sema.errNote(block, src, msg, "consider using '&' here", .{});
+ }
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
return sema.elemPtrOneLayerOnly(block, src, array_ptr, elem_index, src, false);
}
@@ -24195,12 +24210,7 @@ fn elemPtrOneLayerOnly(
},
}
},
- else => {
- // TODO add note pointing at corresponding for loop input and suggest using '&'
- return sema.fail(block, indexable_src, "pointer capture of non pointer type '{}'", .{
- indexable_ty.fmt(sema.mod),
- });
- },
+ else => unreachable,
}
}
diff --git a/test/cases/compile_errors/for.zig b/test/cases/compile_errors/for.zig
index abb87084b4..dff46af085 100644
--- a/test/cases/compile_errors/for.zig
+++ b/test/cases/compile_errors/for.zig
@@ -10,6 +10,12 @@ export fn b() void {
_ = i; _ = j;
}
}
+export fn c() void {
+ var buf: [10]u8 = undefined;
+ for (buf) |*byte| {
+ _ = byte;
+ }
+}
// error
// backend=stage2
@@ -20,3 +26,5 @@ export fn b() void {
// :2:19: note: length 11 here
// :9:14: error: type 'bool' does not support indexing
// :9:14: note: for loop operand must be an array, slice, tuple, or vector
+// :15:16: error: pointer capture of non pointer type '[10]u8'
+// :15:10: note: consider using '&' here
From 7abeb52abc9e9e38d8af4e17e25e89083cdec397 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 14:55:02 -0700
Subject: [PATCH 26/36] langref: update to new for loop syntax
---
doc/langref.html.in | 34 +++++++++++++++++-----------------
1 file changed, 17 insertions(+), 17 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index a74d06ccbf..3c2bdce671 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -2367,7 +2367,7 @@ test "iterate over an array" {
var some_integers: [100]i32 = undefined;
test "modify an array" {
- for (some_integers) |*item, i| {
+ for (&some_integers, 0..) |*item, i| {
item.* = @intCast(i32, i);
}
try expect(some_integers[10] == 10);
@@ -2408,7 +2408,7 @@ comptime {
// use compile-time code to initialize an array
var fancy_array = init: {
var initial_value: [10]Point = undefined;
- for (initial_value) |*pt, i| {
+ for (&initial_value, 0..) |*pt, i| {
pt.* = Point{
.x = @intCast(i32, i),
.y = @intCast(i32, i) * 2,
@@ -2461,8 +2461,8 @@ test "multidimensional arrays" {
try expect(mat4x4[1][1] == 1.0);
// Here we iterate with for loops.
- for (mat4x4) |row, row_index| {
- for (row) |cell, column_index| {
+ for (mat4x4, 0..) |row, row_index| {
+ for (row, 0..) |cell, column_index| {
if (row_index == column_index) {
try expect(cell == 1.0);
}
@@ -3579,7 +3579,7 @@ test "tuple" {
} ++ .{false} ** 2;
try expect(values[0] == 1234);
try expect(values[4] == false);
- inline for (values) |v, i| {
+ inline for (values, 0..) |v, i| {
if (i != 2) continue;
try expect(v);
}
@@ -4659,10 +4659,10 @@ test "for basics" {
}
try expect(sum == 20);
- // To access the index of iteration, specify a second capture value.
- // This is zero-indexed.
+ // To access the index of iteration, specify a second condition as well
+ // as a second capture value.
var sum2: i32 = 0;
- for (items) |_, i| {
+ for (items, 0..) |_, i| {
try expect(@TypeOf(i) == usize);
sum2 += @intCast(i32, i);
}
@@ -4674,7 +4674,7 @@ test "for reference" {
// Iterate over the slice by reference by
// specifying that the capture value is a pointer.
- for (items) |*value| {
+ for (&items) |*value| {
value.* += 1;
}
@@ -5659,7 +5659,7 @@ fn genFoos(allocator: Allocator, num: usize) ![]Foo {
var foos = try allocator.alloc(Foo, num);
errdefer allocator.free(foos);
- for(foos) |*foo, i| {
+ for (foos, 0..) |*foo, i| {
foo.data = try allocator.create(u32);
// This errdefer does not last between iterations
errdefer allocator.destroy(foo.data);
@@ -5700,14 +5700,14 @@ fn genFoos(allocator: Allocator, num: usize) ![]Foo {
// Used to track how many foos have been initialized
// (including their data being allocated)
var num_allocated: usize = 0;
- errdefer for(foos[0..num_allocated]) |foo| {
+ errdefer for (foos[0..num_allocated]) |foo| {
allocator.destroy(foo.data);
};
- for(foos) |*foo, i| {
+ for (foos, 0..) |*foo, i| {
foo.data = try allocator.create(u32);
num_allocated += 1;
- if(i >= 3) return error.TooManyFoos;
+ if (i >= 3) return error.TooManyFoos;
foo.data.* = try getData();
}
@@ -7265,7 +7265,7 @@ const Writer = struct {
comptime var state = State.start;
comptime var next_arg: usize = 0;
- inline for (format) |c, i| {
+ inline for (format, 0..) |c, i| {
switch (state) {
State.start => switch (c) {
'{' => {
@@ -8629,7 +8629,7 @@ test "integer cast panic" {
This function is a low level intrinsic with no safety mechanisms. Most code
should not use this function, instead using something like this:
- {#syntax#}for (source[0..byte_count]) |b, i| dest[i] = b;{#endsyntax#}
+ {#syntax#}for (dest, source[0..byte_count]) |*d, s| d.* = s;{#endsyntax#}
The optimizer is intelligent enough to turn the above snippet into a memcpy.
@@ -11116,7 +11116,7 @@ pub fn main() !void {
const args = try std.process.argsAlloc(gpa);
defer std.process.argsFree(gpa, args);
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
std.debug.print("{}: {s}\n", .{ i, arg });
}
}
@@ -11142,7 +11142,7 @@ pub fn main() !void {
const preopens = try fs.wasi.preopensAlloc(arena);
- for (preopens.names) |preopen, i| {
+ for (preopens.names, 0..) |preopen, i| {
std.debug.print("{}: {s}\n", .{ i, preopen });
}
}
From 74db8c2e8348cb7e9bf264294f73d26be956ea61 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 15:34:00 -0700
Subject: [PATCH 27/36] omit safety checks for element access in for loops
One of the main points of for loops is that you can safety check the
length once, before entering the loop, and then safely assume that every
element inside the loop is in bounds.
In master branch, the safety checks are incorrectly intact even inside
for loops. This commit fixes it. It's especially nice with multi-object
loops because the number of elided checks is N * M where N is how many
iterations and M is how many objects.
---
src/Sema.zig | 101 ++++++++++++++++++++++--------------------
src/Zir.zig | 8 +++-
test/behavior/for.zig | 2 +
3 files changed, 60 insertions(+), 51 deletions(-)
diff --git a/src/Sema.zig b/src/Sema.zig
index 07176ad1a0..5fa754d6cf 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -4544,7 +4544,7 @@ fn zirValidateArrayInit(
// any ZIR instructions at comptime; we need to do that here.
if (array_ty.sentinel()) |sentinel_val| {
const array_len_ref = try sema.addIntUnsigned(Type.usize, array_len);
- const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true);
+ const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true);
const sentinel = try sema.addConstant(array_ty.childType(), sentinel_val);
try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store);
}
@@ -9691,7 +9691,7 @@ fn zirElemVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
- return sema.elemVal(block, src, array, elem_index, src);
+ return sema.elemVal(block, src, array, elem_index, src, false);
}
fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9704,7 +9704,7 @@ fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
- return sema.elemVal(block, src, array, elem_index, elem_index_src);
+ return sema.elemVal(block, src, array, elem_index, elem_index_src, true);
}
fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9731,7 +9731,7 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
};
return sema.failWithOwnedErrorMsg(msg);
}
- return sema.elemPtrOneLayerOnly(block, src, array_ptr, elem_index, src, false);
+ return sema.elemPtrOneLayerOnly(block, src, array_ptr, elem_index, src, false, false);
}
fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9744,7 +9744,7 @@ fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
- return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false);
+ return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false, true);
}
fn zirElemPtrImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9756,7 +9756,7 @@ fn zirElemPtrImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.ptr);
const elem_index = try sema.addIntUnsigned(Type.usize, extra.index);
- return sema.elemPtr(block, src, array_ptr, elem_index, src, true);
+ return sema.elemPtr(block, src, array_ptr, elem_index, src, true, true);
}
fn zirSliceStart(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -12521,14 +12521,14 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
while (elem_i < lhs_len) : (elem_i += 1) {
const elem_index = try sema.addIntUnsigned(Type.usize, elem_i);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
- const init = try sema.elemVal(block, lhs_src, lhs, elem_index, src);
+ const init = try sema.elemVal(block, lhs_src, lhs, elem_index, src, true);
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
}
while (elem_i < result_len) : (elem_i += 1) {
const elem_index = try sema.addIntUnsigned(Type.usize, elem_i);
const rhs_index = try sema.addIntUnsigned(Type.usize, elem_i - lhs_len);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
- const init = try sema.elemVal(block, rhs_src, rhs, rhs_index, src);
+ const init = try sema.elemVal(block, rhs_src, rhs, rhs_index, src, true);
try sema.storePtr2(block, src, elem_ptr, src, init, rhs_src, .store);
}
if (res_sent_val) |sent_val| {
@@ -12546,12 +12546,12 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
var elem_i: usize = 0;
while (elem_i < lhs_len) : (elem_i += 1) {
const index = try sema.addIntUnsigned(Type.usize, elem_i);
- const init = try sema.elemVal(block, lhs_src, lhs, index, src);
+ const init = try sema.elemVal(block, lhs_src, lhs, index, src, true);
element_refs[elem_i] = try sema.coerce(block, resolved_elem_ty, init, lhs_src);
}
while (elem_i < result_len) : (elem_i += 1) {
const index = try sema.addIntUnsigned(Type.usize, elem_i - lhs_len);
- const init = try sema.elemVal(block, rhs_src, rhs, index, src);
+ const init = try sema.elemVal(block, rhs_src, rhs, index, src, true);
element_refs[elem_i] = try sema.coerce(block, resolved_elem_ty, init, rhs_src);
}
}
@@ -12771,7 +12771,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
elem_i += 1;
const lhs_index = try sema.addIntUnsigned(Type.usize, lhs_i);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
- const init = try sema.elemVal(block, lhs_src, lhs, lhs_index, src);
+ const init = try sema.elemVal(block, lhs_src, lhs, lhs_index, src, true);
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
}
}
@@ -12791,7 +12791,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
var lhs_i: usize = 0;
while (lhs_i < lhs_len) : (lhs_i += 1) {
const lhs_index = try sema.addIntUnsigned(Type.usize, lhs_i);
- const init = try sema.elemVal(block, lhs_src, lhs, lhs_index, src);
+ const init = try sema.elemVal(block, lhs_src, lhs, lhs_index, src, true);
element_refs[elem_i] = init;
elem_i += 1;
}
@@ -24145,6 +24145,7 @@ fn elemPtr(
elem_index: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
init: bool,
+ oob_safety: bool,
) CompileError!Air.Inst.Ref {
const indexable_ptr_src = src; // TODO better source location
const indexable_ptr_ty = sema.typeOf(indexable_ptr);
@@ -24154,7 +24155,7 @@ fn elemPtr(
else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}),
};
switch (indexable_ty.zigTypeTag()) {
- .Array, .Vector => return sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init),
+ .Array, .Vector => return sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety),
.Struct => {
// Tuple field access.
const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known");
@@ -24163,11 +24164,12 @@ fn elemPtr(
},
else => {
const indexable = try sema.analyzeLoad(block, indexable_ptr_src, indexable_ptr, indexable_ptr_src);
- return elemPtrOneLayerOnly(sema, block, src, indexable, elem_index, elem_index_src, init);
+ return elemPtrOneLayerOnly(sema, block, src, indexable, elem_index, elem_index_src, init, oob_safety);
},
}
}
+/// Asserts that the type of indexable is pointer.
fn elemPtrOneLayerOnly(
sema: *Sema,
block: *Block,
@@ -24176,6 +24178,7 @@ fn elemPtrOneLayerOnly(
elem_index: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
init: bool,
+ oob_safety: bool,
) CompileError!Air.Inst.Ref {
const indexable_src = src; // TODO better source location
const indexable_ty = sema.typeOf(indexable);
@@ -24184,33 +24187,28 @@ fn elemPtrOneLayerOnly(
}
const target = sema.mod.getTarget();
- switch (indexable_ty.zigTypeTag()) {
- .Pointer => {
- switch (indexable_ty.ptrSize()) {
- .Slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index),
- .Many, .C => {
- const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
- const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
- const runtime_src = rs: {
- const ptr_val = maybe_ptr_val orelse break :rs indexable_src;
- const index_val = maybe_index_val orelse break :rs elem_index_src;
- const index = @intCast(usize, index_val.toUnsignedInt(target));
- const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, sema.mod);
- const result_ty = try sema.elemPtrType(indexable_ty, index);
- return sema.addConstant(result_ty, elem_ptr);
- };
- const result_ty = try sema.elemPtrType(indexable_ty, null);
+ switch (indexable_ty.ptrSize()) {
+ .Slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
+ .Many, .C => {
+ const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
+ const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
+ const runtime_src = rs: {
+ const ptr_val = maybe_ptr_val orelse break :rs indexable_src;
+ const index_val = maybe_index_val orelse break :rs elem_index_src;
+ const index = @intCast(usize, index_val.toUnsignedInt(target));
+ const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, sema.mod);
+ const result_ty = try sema.elemPtrType(indexable_ty, index);
+ return sema.addConstant(result_ty, elem_ptr);
+ };
+ const result_ty = try sema.elemPtrType(indexable_ty, null);
- try sema.requireRuntimeBlock(block, src, runtime_src);
- return block.addPtrElemPtr(indexable, elem_index, result_ty);
- },
- .One => {
- assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable
- return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init);
- },
- }
+ try sema.requireRuntimeBlock(block, src, runtime_src);
+ return block.addPtrElemPtr(indexable, elem_index, result_ty);
+ },
+ .One => {
+ assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable
+ return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety);
},
- else => unreachable,
}
}
@@ -24221,6 +24219,7 @@ fn elemVal(
indexable: Air.Inst.Ref,
elem_index_uncasted: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
+ oob_safety: bool,
) CompileError!Air.Inst.Ref {
const indexable_src = src; // TODO better source location
const indexable_ty = sema.typeOf(indexable);
@@ -24236,7 +24235,7 @@ fn elemVal(
switch (indexable_ty.zigTypeTag()) {
.Pointer => switch (indexable_ty.ptrSize()) {
- .Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index),
+ .Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
.Many, .C => {
const maybe_indexable_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
@@ -24257,14 +24256,14 @@ fn elemVal(
},
.One => {
assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable
- const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false);
+ const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety);
return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src);
},
},
- .Array => return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index),
+ .Array => return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
.Vector => {
// TODO: If the index is a vector, the result should be a vector.
- return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index);
+ return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety);
},
.Struct => {
// Tuple field access.
@@ -24409,6 +24408,7 @@ fn elemValArray(
array: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
elem_index: Air.Inst.Ref,
+ oob_safety: bool,
) CompileError!Air.Inst.Ref {
const array_ty = sema.typeOf(array);
const array_sent = array_ty.sentinel();
@@ -24452,7 +24452,7 @@ fn elemValArray(
const runtime_src = if (maybe_undef_array_val != null) elem_index_src else array_src;
try sema.requireRuntimeBlock(block, src, runtime_src);
- if (block.wantSafety()) {
+ if (oob_safety and block.wantSafety()) {
// Runtime check is only needed if unable to comptime check
if (maybe_index_val == null) {
const len_inst = try sema.addIntUnsigned(Type.usize, array_len);
@@ -24472,6 +24472,7 @@ fn elemPtrArray(
elem_index_src: LazySrcLoc,
elem_index: Air.Inst.Ref,
init: bool,
+ oob_safety: bool,
) CompileError!Air.Inst.Ref {
const target = sema.mod.getTarget();
const array_ptr_ty = sema.typeOf(array_ptr);
@@ -24515,7 +24516,7 @@ fn elemPtrArray(
try sema.requireRuntimeBlock(block, src, runtime_src);
// Runtime check is only needed if unable to comptime check.
- if (block.wantSafety() and offset == null) {
+ if (oob_safety and block.wantSafety() and offset == null) {
const len_inst = try sema.addIntUnsigned(Type.usize, array_len);
const cmp_op: Air.Inst.Tag = if (array_sent) .cmp_lte else .cmp_lt;
try sema.panicIndexOutOfBounds(block, elem_index, len_inst, cmp_op);
@@ -24532,6 +24533,7 @@ fn elemValSlice(
slice: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
elem_index: Air.Inst.Ref,
+ oob_safety: bool,
) CompileError!Air.Inst.Ref {
const slice_ty = sema.typeOf(slice);
const slice_sent = slice_ty.sentinel() != null;
@@ -24568,7 +24570,7 @@ fn elemValSlice(
try sema.validateRuntimeElemAccess(block, elem_index_src, elem_ty, slice_ty, slice_src);
try sema.requireRuntimeBlock(block, src, runtime_src);
- if (block.wantSafety()) {
+ if (oob_safety and block.wantSafety()) {
const len_inst = if (maybe_slice_val) |slice_val|
try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod))
else
@@ -24588,6 +24590,7 @@ fn elemPtrSlice(
slice: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
elem_index: Air.Inst.Ref,
+ oob_safety: bool,
) CompileError!Air.Inst.Ref {
const target = sema.mod.getTarget();
const slice_ty = sema.typeOf(slice);
@@ -24625,7 +24628,7 @@ fn elemPtrSlice(
const runtime_src = if (maybe_undef_slice_val != null) elem_index_src else slice_src;
try sema.requireRuntimeBlock(block, src, runtime_src);
- if (block.wantSafety()) {
+ if (oob_safety and block.wantSafety()) {
const len_inst = len: {
if (maybe_undef_slice_val) |slice_val|
if (!slice_val.isUndef())
@@ -26330,7 +26333,7 @@ fn storePtr2(
const elem_src = operand_src; // TODO better source location
const elem = try sema.tupleField(block, operand_src, uncasted_operand, elem_src, i);
const elem_index = try sema.addIntUnsigned(Type.usize, i);
- const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false);
+ const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false, true);
try sema.storePtr2(block, src, elem_ptr, elem_src, elem, elem_src, .store);
}
return;
@@ -27782,7 +27785,7 @@ fn coerceArrayLike(
);
const src = inst_src; // TODO better source location
const elem_src = inst_src; // TODO better source location
- const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref);
+ const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref, true);
const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src);
element_refs[i] = coerced;
if (runtime_src == null) {
diff --git a/src/Zir.zig b/src/Zir.zig
index e215dfac10..e13140d7ae 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -382,7 +382,9 @@ pub const Inst = struct {
/// Uses the `pl_node` union field. AST node is a[b] syntax. Payload is `Bin`.
elem_ptr_node,
/// Same as `elem_ptr_node` but used only for for loop.
- /// Uses the `pl_node` union field. AST node is the condition of a for loop. Payload is `Bin`.
+ /// Uses the `pl_node` union field. AST node is the condition of a for loop.
+ /// Payload is `Bin`.
+ /// No OOB safety check is emitted.
elem_ptr,
/// Same as `elem_ptr_node` except the index is stored immediately rather than
/// as a reference to another ZIR instruction.
@@ -395,7 +397,9 @@ pub const Inst = struct {
/// Uses the `pl_node` union field. AST node is a[b] syntax. Payload is `Bin`.
elem_val_node,
/// Same as `elem_val_node` but used only for for loop.
- /// Uses the `pl_node` union field. AST node is the condition of a for loop. Payload is `Bin`.
+ /// Uses the `pl_node` union field. AST node is the condition of a for loop.
+ /// Payload is `Bin`.
+ /// No OOB safety check is emitted.
elem_val,
/// Emits a compile error if the operand is not `void`.
/// Uses the `un_node` field.
diff --git a/test/behavior/for.zig b/test/behavior/for.zig
index e3c4a8dcc0..20d43c6ff2 100644
--- a/test/behavior/for.zig
+++ b/test/behavior/for.zig
@@ -314,6 +314,7 @@ test "slice and two counters, one is offset and one is runtime" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const slice: []const u8 = "blah";
var start: usize = 0;
@@ -342,6 +343,7 @@ test "two slices, one captured by-ref" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
var buf: [10]u8 = undefined;
const slice1: []const u8 = "blah";
From e89bfedd8d68a731cb227327a325e16fc7812df9 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 15:56:29 -0700
Subject: [PATCH 28/36] update compare-output test case to new for loop syntax
---
test/compare_output.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/test/compare_output.zig b/test/compare_output.zig
index b3a6144729..e2525eb119 100644
--- a/test/compare_output.zig
+++ b/test/compare_output.zig
@@ -196,7 +196,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\
\\ c.qsort(@ptrCast(?*anyopaque, &array), @intCast(c_ulong, array.len), @sizeOf(i32), compare_fn);
\\
- \\ for (array) |item, i| {
+ \\ for (array, 0..) |item, i| {
\\ if (item != i) {
\\ c.abort();
\\ }
From 8b05205bb71fca55569a9ff4cab89ec9e09640ba Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 15:58:27 -0700
Subject: [PATCH 29/36] implement error for unbounded for loops
---
src/AstGen.zig | 9 ++++---
src/Sema.zig | 26 ++++++++++++++++++++-
test/behavior/for.zig | 19 +++++++++++++++
test/cases/compile_errors/for.zig | 10 ++++++++
test/cases/compile_errors/for_unbounded.zig | 11 +++++++++
5 files changed, 69 insertions(+), 6 deletions(-)
create mode 100644 test/cases/compile_errors/for_unbounded.zig
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 866dce02e5..bd1bba168b 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -6394,11 +6394,13 @@ fn forExpr(
}
}
+ if (!any_len_checks) {
+ return astgen.failNode(node, "unbounded for loop", .{});
+ }
+
// We use a dedicated ZIR instruction to assert the lengths to assist with
// nicer error reporting as well as fewer ZIR bytes emitted.
const len: Zir.Inst.Ref = len: {
- if (!any_len_checks) break :len .none;
-
const lens_len = @intCast(u32, lens.len);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.MultiOp).Struct.fields.len + lens_len);
const len = try parent_gz.addPlNode(.for_len, node, Zir.Inst.MultiOp{
@@ -6424,9 +6426,6 @@ fn forExpr(
defer cond_scope.unstack();
// Check the condition.
- if (!any_len_checks) {
- return astgen.failNode(node, "TODO: handle infinite for loop", .{});
- }
const cond = try cond_scope.addPlNode(.cmp_lt, node, Zir.Inst.Bin{
.lhs = index,
.rhs = len,
diff --git a/src/Sema.zig b/src/Sema.zig
index 5fa754d6cf..84185f649c 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -3975,7 +3975,31 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
}
if (len == .none) {
- return sema.fail(block, src, "non-obvious infinite loop", .{});
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "unbounded for loop", .{});
+ errdefer msg.destroy(gpa);
+ for (args, 0..) |zir_arg, i_usize| {
+ const i = @intCast(u32, i_usize);
+ if (zir_arg == .none) continue;
+ const object = try sema.resolveInst(zir_arg);
+ const object_ty = sema.typeOf(object);
+ // Each arg could be an indexable, or a range, in which case the length
+ // is passed directly as an integer.
+ switch (object_ty.zigTypeTag()) {
+ .Int, .ComptimeInt => continue,
+ else => {},
+ }
+ const arg_src: LazySrcLoc = .{ .for_input = .{
+ .for_node_offset = inst_data.src_node,
+ .input_index = i,
+ } };
+ try sema.errNote(block, arg_src, msg, "type '{}' has no upper bound", .{
+ object_ty.fmt(sema.mod),
+ });
+ }
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
}
// Now for the runtime checks.
diff --git a/test/behavior/for.zig b/test/behavior/for.zig
index 20d43c6ff2..0c5ab392f4 100644
--- a/test/behavior/for.zig
+++ b/test/behavior/for.zig
@@ -378,3 +378,22 @@ test "raw pointer and slice" {
try expect(buf[2] == 'a');
try expect(buf[3] == 'h');
}
+
+test "raw pointer and counter" {
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+
+ var buf: [10]u8 = undefined;
+ const ptr: [*]u8 = &buf;
+
+ for (ptr, 0..4) |*a, b| {
+ a.* = @intCast(u8, 'A' + b);
+ }
+
+ try expect(buf[0] == 'A');
+ try expect(buf[1] == 'B');
+ try expect(buf[2] == 'C');
+ try expect(buf[3] == 'D');
+}
diff --git a/test/cases/compile_errors/for.zig b/test/cases/compile_errors/for.zig
index dff46af085..5bd3aa0c64 100644
--- a/test/cases/compile_errors/for.zig
+++ b/test/cases/compile_errors/for.zig
@@ -16,6 +16,13 @@ export fn c() void {
_ = byte;
}
}
+export fn d() void {
+ const x: [*]const u8 = "hello";
+ const y: [*]const u8 = "world";
+ for (x, 0.., y) |x1, x2, x3| {
+ _ = x1; _ = x2; _ = x3;
+ }
+}
// error
// backend=stage2
@@ -28,3 +35,6 @@ export fn c() void {
// :9:14: note: for loop operand must be an array, slice, tuple, or vector
// :15:16: error: pointer capture of non pointer type '[10]u8'
// :15:10: note: consider using '&' here
+// :22:5: error: unbounded for loop
+// :22:10: note: type '[*]const u8' has no upper bound
+// :22:18: note: type '[*]const u8' has no upper bound
diff --git a/test/cases/compile_errors/for_unbounded.zig b/test/cases/compile_errors/for_unbounded.zig
new file mode 100644
index 0000000000..5d05b1061f
--- /dev/null
+++ b/test/cases/compile_errors/for_unbounded.zig
@@ -0,0 +1,11 @@
+export fn b() void {
+ for (0..) |i| {
+ _ = i;
+ }
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:5: error: unbounded for loop
From 12a7a0d76f9435c8c538f762daa79a49ca0470af Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 16:28:21 -0700
Subject: [PATCH 30/36] omit safety check when incrementing for loop counter
Since for loops are statically analyzed to have an upper bound, and the
loop counter is a usize, it is impossible for it to overflow.
---
src/AstGen.zig | 3 ++-
src/Sema.zig | 24 +++++++++++++-----------
src/Zir.zig | 5 +++++
src/print_zir.zig | 1 +
4 files changed, 21 insertions(+), 12 deletions(-)
diff --git a/src/AstGen.zig b/src/AstGen.zig
index bd1bba168b..0a86fd3d0e 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -2400,6 +2400,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.add,
.addwrap,
.add_sat,
+ .add_unsafe,
.param,
.param_comptime,
.param_anytype,
@@ -6440,7 +6441,7 @@ fn forExpr(
try loop_scope.instructions.append(gpa, cond_block);
// Increment the index variable.
- const index_plus_one = try loop_scope.addPlNode(.add, node, Zir.Inst.Bin{
+ const index_plus_one = try loop_scope.addPlNode(.add_unsafe, node, Zir.Inst.Bin{
.lhs = index,
.rhs = .one_usize,
});
diff --git a/src/Sema.zig b/src/Sema.zig
index 84185f649c..6e8d77475e 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1060,15 +1060,16 @@ fn analyzeBodyInner(
.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon),
.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func),
- .add => try sema.zirArithmetic(block, inst, .add),
- .addwrap => try sema.zirArithmetic(block, inst, .addwrap),
- .add_sat => try sema.zirArithmetic(block, inst, .add_sat),
- .mul => try sema.zirArithmetic(block, inst, .mul),
- .mulwrap => try sema.zirArithmetic(block, inst, .mulwrap),
- .mul_sat => try sema.zirArithmetic(block, inst, .mul_sat),
- .sub => try sema.zirArithmetic(block, inst, .sub),
- .subwrap => try sema.zirArithmetic(block, inst, .subwrap),
- .sub_sat => try sema.zirArithmetic(block, inst, .sub_sat),
+ .add => try sema.zirArithmetic(block, inst, .add, true),
+ .addwrap => try sema.zirArithmetic(block, inst, .addwrap, true),
+ .add_sat => try sema.zirArithmetic(block, inst, .add_sat, true),
+ .add_unsafe=> try sema.zirArithmetic(block, inst, .add_unsafe, false),
+ .mul => try sema.zirArithmetic(block, inst, .mul, true),
+ .mulwrap => try sema.zirArithmetic(block, inst, .mulwrap, true),
+ .mul_sat => try sema.zirArithmetic(block, inst, .mul_sat, true),
+ .sub => try sema.zirArithmetic(block, inst, .sub, true),
+ .subwrap => try sema.zirArithmetic(block, inst, .subwrap, true),
+ .sub_sat => try sema.zirArithmetic(block, inst, .sub_sat, true),
.div => try sema.zirDiv(block, inst),
.div_exact => try sema.zirDivExact(block, inst),
@@ -12887,6 +12888,7 @@ fn zirArithmetic(
block: *Block,
inst: Zir.Inst.Index,
zir_tag: Zir.Inst.Tag,
+ safety: bool,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -12899,7 +12901,7 @@ fn zirArithmetic(
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
- return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, sema.src, lhs_src, rhs_src, true);
+ return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, sema.src, lhs_src, rhs_src, safety);
}
fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -14250,7 +14252,7 @@ fn analyzeArithmetic(
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: {
switch (zir_tag) {
- .add => {
+ .add, .add_unsafe => {
// For integers:intAddSat
// If either of the operands are zero, then the other operand is
// returned, even if it is undefined.
diff --git a/src/Zir.zig b/src/Zir.zig
index e13140d7ae..4dd2386c51 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -137,6 +137,8 @@ pub const Inst = struct {
/// Saturating addition.
/// Uses the `pl_node` union field. Payload is `Bin`.
add_sat,
+ /// The same as `add` except no safety check.
+ add_unsafe,
/// Arithmetic subtraction. Asserts no integer overflow.
/// Uses the `pl_node` union field. Payload is `Bin`.
sub,
@@ -1023,6 +1025,7 @@ pub const Inst = struct {
.add,
.addwrap,
.add_sat,
+ .add_unsafe,
.alloc,
.alloc_mut,
.alloc_comptime_mut,
@@ -1338,6 +1341,7 @@ pub const Inst = struct {
.add,
.addwrap,
.add_sat,
+ .add_unsafe,
.alloc,
.alloc_mut,
.alloc_comptime_mut,
@@ -1570,6 +1574,7 @@ pub const Inst = struct {
.add = .pl_node,
.addwrap = .pl_node,
.add_sat = .pl_node,
+ .add_unsafe = .pl_node,
.sub = .pl_node,
.subwrap = .pl_node,
.sub_sat = .pl_node,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 1674bd136e..fb9031296d 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -296,6 +296,7 @@ const Writer = struct {
.add,
.addwrap,
.add_sat,
+ .add_unsafe,
.array_cat,
.array_mul,
.mul,
From 40c4c25e2b5e2242c3180cd8564dc22697bf363f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 18:56:37 -0700
Subject: [PATCH 31/36] Sema: add missing coercion when checking for loop len
---
src/Sema.zig | 3 ++-
test/behavior/for.zig | 62 +++++++++++++++++++++++++++++++++++++++++++
2 files changed, 64 insertions(+), 1 deletion(-)
diff --git a/src/Sema.zig b/src/Sema.zig
index 6e8d77475e..fcdb1ce518 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -3934,12 +3934,13 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.for_node_offset = inst_data.src_node,
.input_index = i,
} };
- const arg_len = if (is_int) object else l: {
+ const arg_len_uncoerced = if (is_int) object else l: {
try checkIndexable(sema, block, arg_src, object_ty);
if (!object_ty.indexableHasLen()) continue;
break :l try sema.fieldVal(block, arg_src, object, "len", arg_src);
};
+ const arg_len = try sema.coerce(block, Type.usize, arg_len_uncoerced, arg_src);
if (len == .none) {
len = arg_len;
len_idx = i;
diff --git a/test/behavior/for.zig b/test/behavior/for.zig
index 0c5ab392f4..67a20e4840 100644
--- a/test/behavior/for.zig
+++ b/test/behavior/for.zig
@@ -397,3 +397,65 @@ test "raw pointer and counter" {
try expect(buf[2] == 'C');
try expect(buf[3] == 'D');
}
+
+test "inline for with slice as the comptime-known" {
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+
+ const comptime_slice = "hello";
+ var runtime_i: usize = 3;
+
+ const S = struct {
+ var ok: usize = 0;
+ fn check(comptime a: u8, b: usize) !void {
+ if (a == 'l') {
+ try expect(b == 3);
+ ok += 1;
+ } else if (a == 'o') {
+ try expect(b == 4);
+ ok += 1;
+ } else {
+ @compileError("fail");
+ }
+ }
+ };
+
+ inline for (comptime_slice[3..5], runtime_i..5) |a, b| {
+ try S.check(a, b);
+ }
+
+ try expect(S.ok == 2);
+}
+
+test "inline for with counter as the comptime-known" {
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+
+ var runtime_slice = "hello";
+ var runtime_i: usize = 3;
+
+ const S = struct {
+ var ok: usize = 0;
+ fn check(a: u8, comptime b: usize) !void {
+ if (b == 3) {
+ try expect(a == 'l');
+ ok += 1;
+ } else if (b == 4) {
+ try expect(a == 'o');
+ ok += 1;
+ } else {
+ @compileError("fail");
+ }
+ }
+ };
+
+ inline for (runtime_slice[runtime_i..5], 3..5) |a, b| {
+ try S.check(a, b);
+ }
+
+ try expect(S.ok == 2);
+}
From 15d767c62154859724dbf63dd0fdfa4195e33256 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 19:17:05 -0700
Subject: [PATCH 32/36] add safety test cases for multi-object for loops
---
test/cases/safety/for_len_mismatch.zig | 24 ++++++++++++++++++++
test/cases/safety/for_len_mismatch_three.zig | 24 ++++++++++++++++++++
2 files changed, 48 insertions(+)
create mode 100644 test/cases/safety/for_len_mismatch.zig
create mode 100644 test/cases/safety/for_len_mismatch_three.zig
diff --git a/test/cases/safety/for_len_mismatch.zig b/test/cases/safety/for_len_mismatch.zig
new file mode 100644
index 0000000000..871e203f61
--- /dev/null
+++ b/test/cases/safety/for_len_mismatch.zig
@@ -0,0 +1,24 @@
+const std = @import("std");
+
+pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
+ _ = stack_trace;
+ if (std.mem.eql(u8, message, "for loop over objects with non-equal lengths")) {
+ std.process.exit(0);
+ }
+ std.process.exit(1);
+}
+
+pub fn main() !void {
+ var runtime_i: usize = 1;
+ var j: usize = 3;
+ var slice = "too long";
+ for (runtime_i..j, slice) |a, b| {
+ _ = a;
+ _ = b;
+ return error.TestFailed;
+ }
+ return error.TestFailed;
+}
+// run
+// backend=llvm
+// target=native
diff --git a/test/cases/safety/for_len_mismatch_three.zig b/test/cases/safety/for_len_mismatch_three.zig
new file mode 100644
index 0000000000..95bc244269
--- /dev/null
+++ b/test/cases/safety/for_len_mismatch_three.zig
@@ -0,0 +1,24 @@
+const std = @import("std");
+
+pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
+ _ = stack_trace;
+ if (std.mem.eql(u8, message, "for loop over objects with non-equal lengths")) {
+ std.process.exit(0);
+ }
+ std.process.exit(1);
+}
+
+pub fn main() !void {
+ var slice: []const u8 = "hello";
+ for (10..20, slice, 20..30) |a, b, c| {
+ _ = a;
+ _ = b;
+ _ = c;
+ return error.TestFailed;
+ }
+ return error.TestFailed;
+}
+// run
+// backend=llvm
+// target=native
+
From 5fc6bbe71eeecb195d2cda2a2522e7fd04749d5b Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 18 Feb 2023 09:49:40 -0700
Subject: [PATCH 33/36] update zig1.wasm
---
stage1/zig1.wasm | Bin 2377691 -> 2388586 bytes
1 file changed, 0 insertions(+), 0 deletions(-)
diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm
index 9a3b25eb30ad036d83caaf3bc78cb2a235ded55e..2fe8728cb3562fe78ff3acede61698bf3a98adf3 100644
GIT binary patch
literal 2388586
zcmeEv3w&Kgo&R~RoiinLU8cYy{7
z5THnu3IXbFz+!{65};~;fwMbU0{J-DdoOABEX;PZve%8-t
zw~#w?=FH@|Dl9QmjI7^g&@UL
zX0`d0Hc#?sh3}AVDn%p{WRSWqzS#OuHcUWDSQ`1zx_vS0yclIK#y>Q0k$usJ>pV!;MW;cW~YmKFfq^%vp|pkhHLUC7Q{^da{`RDL0vV9(OhS2ir|
zAg93(+LS(_cBEM)v@xrTLP$*@4_ZhvU@qdEuR)>UjYI@L(m!^(gnpC&M$yBuEI;I*
zVEZCe&UAw`Q)i0kV6&CwB@ReH^|k?qPyi~@g$0S*3jsX-35^YMfEVBkg`A5>c4oTB
zZ~}@DHy2yZQ|TineD>>hYu!O>8XL>^(R0?GdG1-CI@?N(&}{a+wV(dT
znP-3QoOM>k2w5_y?<1f5^!aC7nGteSpMCBH=dAtoxrflrT$uG>Jsp4ES?fM==110^
zeb&0O&qNo_{>Zvdul?r{`;Z2J$oo+%IRboD$nddqKKc5n%Spp)&lZ&BMyPFEytebt
zUMIb_jvOI(7P-&+?AmkIoo!7WAxp*Dv(EkKr$2R2qK+Th=(=-0b@utzaU)b&VD^u`
zE~``^8!-nQj|d&fZ{QyZy;;H9r_Ve4Tx-b)@Kl0eCk-
z3gta%`7`H!@|<&53x!AI5%vPY111@X3e{3q8JpzYFOR!r-0_a%*iNdVqQZ3&sic!|
z>}1leNTpoIsj!o2HoNuI<>>6$v+CSJ+Ml1#P^^BvBk$
zQ<;RFLUk3kZM&|Gn(ahIMLJ_URw9$AuoEbp!GB)D&5l706%KMCNM}uAM~Y1fH_lbjHm%Xx?`6b_H6?
zBpoM#(oA8pjjHf3h%;6}Xe&@U<=B(>4n0Cswq55qRtkx><+>>+owl5Wm&pMBG&(mX
zgN(qI?WWK+O4t>dWXAPUZZ7X;08s@>TfnZH0gRbsMZ!W3Ckt$$wBuxm|4iP>CelCx
zFkv%=|A2S=1LSClKZyybe*(a$cI@1dDL3W12>^uF?Rq+Q3D$SR&Snz
zq-4S!BdlZFz*HtplCe%VkAL=<1pmv}VQvtRIYZj7Zo{Yes0Cho-Y9M}MI-7M;dFvD)-b!alcCZ3^GzLfo6DL3$
zLYlVlAD~0yV_f1Eg@Ae|gPzuqcZuaB`kf+$GXa?ikHC@;it;t`lW<_3OzoUWc4cKM
zHMb_0Cqr8)m(963VcxX-OQz@6aDfOU-i;=Igllj2flNe^a?ATN~A#!m!OsZn=oj`iDVi-#ZwbUyva(SFOaQN8toOTs*)*a3Opz9UlzP-rBNWANI-R>A5JSuuCrO24GHxn|r@VPixs$>2
z@;1pJ3C)uN-zRJ;X#V4`#6VcEJSdX1Cso=MLTEawaPT#h2f7aZMZU5tTyS3!njIPp
zvX{snVI7f$Vnj~-p|X_atSk~$(LX1jw~lbD&iNx+46@P9Ig
zr(~i~g)STcO)$og9WA3Tg{o{-4uumr(7{P%^HmAIkg)RURd2TJETs*YqsY25X4cd|
zVSx~WJ|ZszZ3D#FIoHi1AG8mYh69XIt%ILXuj6`F7WxY$LL8jzl9iH$bh?0UQ*D8N
zT>zL%+i4$Y1XjVW6RN79H&HuB5{D^H{J8#Tq}m=-
zndMntGL^FOE~Fls(w>&izQqR6nKZaGlPOj~WpNKgSeZlN0?iBls1+{Z#RlZ~4{C6&_96RoqV`k*XA3Jl=
zyDG|kQAwztJW?DB0tzly6eEncjx{eKY?H_;hX%!YG|FIqH7B5>*4w(V
zvC+$=$2zG`!H8Y^&ybKaVTP|=2Td@^wzQqge$~FiZnkeowkB^%zB*=b%zur^JHMJ#
z@y#!-zh&I_DxC@}G|aH{fW5kC&9s!I+rD|Vx5SyEtQFQ$*F!=JnwaC_A*1*{)3C&u
zq#fUJmgA|>ZoqGo-QZdJwb$G=1xtVJsjoftFMd{8bKFUOMevwm?pmkOF1iWCRK-%a
z*=FTZz8Sx+H(6N=>{h$z6daSI4LNF^jkcd;*kb!GO0-LfHvZN+O}1CVQXME2*4xQ?
zJM9MF&O14UBDGGt?b!len5WBT50I}b%!gc!wvVC&t)EqNOGEwmwLPF?f!z~pq?b8*
zgTmTkUA4|G+fOT(mDD=jb}{28+~rE%G}>pZl(y|!$MUAC1hHc+bu4A;1RzxyXdC>5
zax|)hft<@3K>FBBA3$(b(woM_^b%(wg3J==n8b704
zBN=!orhEsCk_221xItxsTH@RyPE`W(W)WGdLG3KL1*(AD!k2CXa05mchB*+XqU#j_
zer%7i_+pHBcp>ac?dp2+})KQP|Ty*o@4LpGghgJl)jT
z>Iv^cJ0k|Pf#kJLrf91aa3<8%S8Z(abk;ixomfdAP*kN&=G_$iynuc8NBC
zgYcY(B9OX(Lu;MAfhJszJ^^%g=Xler6N?GYHOg5C8qt;gqeLV~SC^WFeWer!WD@%1
zt(+AD#haH$cfoo3*Zr4;JrFVk9oQ6hAeqzM*In-E6TPFQg5i{bo71n{eW|Av^XMDy
zL3Ea6M+1Sz1FL%rM0x_nvR-#JZt~_^7Bs3vwOwaP3(gWp0V4>Gca{r&^mm)C@^so;
zX!;L{(%w4leTGWG)f*oDl1Fl&Gc941g!GOHvrA>~Kve)XK
z=OwJ%SV0~dAea~mz2A?`VIH_1gR8f?=wfVjl`T@oaTNRwmFe9J#A~l%W~?|#1+C`Y
zGA=jAcGg2N*vdg(3c7<{7ueNMH6nF2sF$lQH4fG3-99dddG&habG&2GQI5>P_=A*1
z7!`91BG7Dr27a029-ClnvkLlAgj)S
z$ZHDFB$U!Tnh+vR0^xuumvZZPHz$?5!1jts$R7GcQAdym7;Kq83GWezKidIL4gE+*
z@M;Fk4Uuxec%VcwSB3n*6&dM5@0GE+c@}uUSWToqm=17&8OY4!36#Xx;BpKI`GD;K
zA>N&KZgoWG#7xUsuUvK+V^hG1v^&_CH&=5MLC>9)s8CpNjdEA5Em$zi*at%75V0Q&3oP#`{D_P`?96&>0$-=-PpOB0gY&khe>`=m29Nb2W4bCI?ZR+qgJWP{#Y?c2%%iWTvU!h&6c
z>v}K~`k8zFk%aTSy?z>P=Dx?R)kR*wydo?~_KCb54Kx(mtoM42AL)4g~iA0%j__ohtPAB2NJ*bH9d`_>@r
zB#&Fm?41mEqYYrK_)B}K+s=gkbOSRJ{J`KZ?y;9TyQR<)rw3tZO?ESmynP52GPwgm
zrbgI^OmLbI33CqyFI&Txn_@3p5GU3;!k61)FWZBc-Qmk!v6o$Z*_#c4mS*<^1^Qz7
zdSmec#3keYAmd={Wpg%oxe*b<)PgYVL|gcBYw)r?_Odg4xifg##g{-(Pb^bkm}zg2
zsXt6;sxa*}A`IIbWJ%P}!gpO24c>7Gw^p#X!eOBPj#74Kdn~?*ab(+!kXCwE#S*8r
z0@IzFZD;w=|C9?(2jf>_`s~;?6ebdEWYxA`pguqV^>j0|UqWxLXz*|2RApnf!GD;M
zO_0^^Ffv%d5z4_>1O}2JV2QI20O;feNI+6M
zXPAvSy>-v^-U1tHTYo)>5p#{GBeB;13R`UfK#Kt$p(yKRMq9njzrg|4$
ztv=viZmTN)I5pG1!d8FhUuml|{BzX%eXR=qJJb??g{tvSRd4Z|ZMDq5%2reSt8F#Q
zzs6P*{12&j`e&&(`5#vA@;BJ(WdB-Qz1#o1t>*jJ*=myCVyjyJ3$|M7x7uov|3zE9
z$G_fIC;A(0<@x`heE&U~^VCFtjZ*$v^>f?*g{>z0=c}pyI(4M~88z0wKpo?MR^|PVsd4_t
z)qDNVsekhSSz%R%MD}H_O-vMkil&uc-
zOrGpX6_W|fco^TEDyH;bHeEr}3Phwy50+mDJ3*Mf;}KZxHk3A}i^U8qXslE?T=z(N
z$E(BwyYtkdEn`}(({ZX_#VWU->gT~qW{hiPecg|tmV|EYLXbyAST#V=o_Z98@f~Ju
zIT?9cPHs?DD(@{OH)43+#1PZ%R)#Q!0`t)rk23JI;f-$1Vc0T;3Jj_dOjN8oko_gd
zILi^i#OWu^Fe_)3OjH?dIl5vd5*eaW1hWbZX!fdVwY>{q<{_LuwGSCumHv6YJGUm~yL>cS25At$E4gQby&@OIrPxEi
z#e~2yQyQUeN`K{Nu;^34EJu_G#YUUiJksq3oC!`@LbC62ebkJl7uT;w3tf26H`OZ(P#jQ%E~!l&(wT#q
z0H#9XPS;yTR_MwVD+=@ZjwnYE&(5jETo+hemB~U5HotQx%A@6uj571(A3uLu@@Mn0vRdTdTZ&$
zbzh69gBKkJ5Vo=9KJ;N?Sk?B@i|c+^;u~9cmR@XRi7`P5qtCkQsn3=;yAX`P#e0m$PJZ5L>+Ze|}c>kc*7vD74e5w(Ox`aq_Y
zh4~;iXtl>JR_WwweGw*uutOYG#p&Ii?iQKd6`j>Z8+AFS*U;84z=X+vQV;U2tjKXS
z4u-g@S_R>7BnBEAc_~ygH)kvbjz|SFKj1*o%(16xiL(KrZblaCkcI^k7Ndg)z(vb{
z`RhQ-;s{OAyoQ4imPE=&h5%Q)S;i?}p+I)g^2cB@Jl}HsowSD@wez!#=(S#sk=N*1
z5xs#3o1y{sx&B-ta=`VE4~uO7w}{21$Y6Pq{jUF*D2+ka|8CUI^S_K(QwlX=%5Urn
zw4+<{qIDY+M#P#D7~(DORI1QctRFy90W_B25_8;^y#HUKieyx7BeH?E%?!Dq6&sh^
zNr1LPCIC2)1c=7v%Q?)>7?uD-upf-ecOJ&Ld^gLCz}*W&&E4}=Xo$Q~(}B*q>DfqW
z7|q>Uj!!<-I<1M~CLzV9MDeRO#x6X4RT5fcZm=;Jb>;zio1hDNh{dlWvYV*uIb_rg
z<=xMJ5qvmxSk#TkyI*ISnIrNpM5(NY9fo6?1CH4c^>Kfk`WWo$&-%xx3;eO_
zGyaikoj+Bb?@w00u>GIgYOSx-8h@fX&tIrM?N3+d`s3B7{MqW0{?Tf+UsN~R{@<$`
zY=1$S#nRx95sT$Zw(qNd@I8h7z7y5;w*MaWMcZGbS~0g%U%=!}VFPfIy3Y3JtIym1
zyVbR}f3n(O`|na8_TQw=^53aG0X+9GsX7KvcUiS@c)F$TT?zfWVd3c-v0#@J+?mk($_o-xVzymF6<81;
z3PnkI`rm-L$>A(AsV6u1RTxfFdh+Vx1f9U*pDY$&-xABl2`~%CdyF(~j~jb{SuoJ+
zO?)nLz}FbbL({w(Ll7)WOi>Ymf{_J5HDQVK990Vj{9dY)-a}R=LzU*g-vtGD^04Y;
zM5WosG9wK53%Ppp_xM{N$amw5UJgDz}v0V_{@0SK&=6wnohR2eOL`Bdxl1qyZ#
z6s)(Bxs2lN74kfXm0vSV_eaC@<1$S5<^4aFhUo#K2wHd_!y#IDkZ2n?WVBgBhUq_Z
zS7tQR=i4tIy%xTcWkwvPRT{Ifl6E6*um4V@ehYI{7Vnw5g{kA>G`RU8Ov;R83k?R!
zXwYwHFc8pSUr2+-g3zX4Xt0+wfDwPR-BbuorSHig-&pX!U!p;C!O&m>!yz_pY_i#nR
ztRfWVpio8zHNNTIQko;pf!bhN(>{==d`aU^J)mJm>W1Y-9X}YSS*^1_;eSieKbY`4
zBm4J77V}EN)|dndWnE2E%$kz^E<>}V{{t2Z0J#5F6f2*!Kl%Tld;^#a0L`(w{%;sR
zjA6VlE%MYX3z8d>X6#%Qjh$w24k&U5DPjnT%_K6(E)vAhC`h@9d7^a{@<_}7k?4(C
zS^RA|b~$M82^L!WEvKq>x#Z31qhQ|LE~op+~FGV^h*tX~WWE@a6;T
z*>A*5T(M`{W8~oSrP=2Z@e=!5TGam7HmPZI
zGRnTP5Hr2+;7VYi+XGYgb}r5a++?(EN38Du)3%49ZMlVKlV&7HhY)0YIcZ}gxo0Ov
z1C$YOr=IKdddzaf1u%zNybgMCt
zyOU-;aaR(Pm(yq>Z>EWi>7uL+wFegQ)?qB-4qC+Rhiwrz(ZDT^!Xm!oFf8J`SZ3r|
z2?(H{e*ma7yACqH=#8TZTPR8Y6e6-W>CXt4cOQH%Qi4*Xue?Z~&~s1Hdy9WxWZmxI
z%sUm%yi+>0+W#^{T&EiREm5XB56pCm$rH8R`R}MF8`hK5Sf5Dg?CP?%f)(pvuQ&y-
z11wP|wbM{qp?>W9P#Q5~yT7~(dz1eC!aw~<|A7d+d#OW=87KW7C@-`x>Fk3LzHdD!&K}MQxj5gghsneA5
ze<)2iru-j8@P6$sz-#t&q)>Btp$#d&M+!Bk{3oJ9Pj;8L)KXq(W6J-r6lzKNPez6Q
z`1$flYHO^J>2IG|NNN>{-<0xBk4EXJ*FE1r%fZBJy7c-%ro=kX9;+6;-kS2;1eW%c
z|8Eg2_PpRUPGpwx|5vQm7;4KPZI=AhCYetI?=|897t-{TK)wAI=h)^6aHpBZ;ku
zbe31XBjrCOm3OB6pGf6dEQYe+JwHNK1}apZ4pO_A8cY?q2ym<{o-%(E7i^<9X}n?T
zmzZkSY-Q2FEX4B7mv4+HAob6a{OwE)Tg6<~Q(U~2nqG{Sc7o1s{x0f{y2f=@EahO`
z5KCf1sD+9a$wDkE`qM9fZnM&diey1n>VFh1o7!X+{Z?tU?PatP?0s@>J<}|ovcR{0
z7()dMc$C+xZG(nSDX=eU=ZY%4a)$ig|fTOwQ9C=&6n{u*aswB3nldyrUsj=
zB)5{4aOS^}n_nTJkl#rC?Mw}sU_}#;@`TIiEObQ&stZxS0&mT0zG~&GXdNjVt-$l$
zZ=M9{jlm?WGFf2zBU2*?vcbB&+Pe%zhRa7z0uW!qI
zo0tuFU>fA>2-%||&VNeUMBJCg8>YU<)KDIQ^S3BY*ZAg+o#3Vzcho@cmzcdGzJB4S
zC>SvUQ?4eF%Ip&VY3vqbUy0^Pz7~$th%@K!ST?r4NcYM2M>|l=aSb(bBoIt#qt%lO
zSsP@YW|z#p7Q1!4f5Ii{!;U$A`h_UsQJi!
zoXwXqefxI67*Xi1^5&Z(+F!=r=gWHEVw%sR`3;8ldGx+iR9Bj&gM&*ZoeFv
zjMRZb9p%lpM$P|@X1`a~e4A5?(*i_
z(|H`1q<}m|rQln8Li(T+=vzleTvLE(lXaQhMG258inHrjbXy*(Of9#-(Dv@&Z
z4-8Ev2`TKUsfvw5`pWe@eoKIuj7hmYQB-YGf&M
zPd|%2edd4me)KOkTAS3Dm>Q1OfalHP1Hk+baThY`n}s(_{V`L`@(s*ME7)%5o156v
zn5I5W^54VM$Od7)N62Jj11ih$m9*p>>%F!3b9)P1p;!C`hg#C_tAU3&v=V@NkL>HS
z@b`X#?#0j@Aq1kU0BqpWGZ$!_
zFA(i#YOFxuL%{+s5|804@vlII?~+o_bfF)?IL2mbjjIJq{q|l!5O}b%nOZ5(!EQ#4
zM@}0o@WjsnL11pNz)~sjBc=xIi3vRNy+B1(9TL&@ambp<)!tkEKZ&Kor=K!YD9-@w
z8I!3Jnf3?f5Kdh7&>IQ@)ZL#*IzIJV9K9{kmR`%NnVQ@xrdEJCKO$D
zuycxKgVG{TxQb$yeI!U0kUN^$e&Gcn0y+@4gA{4co=5z;lSNc9l#AnH&e6MSS(+
zAAsPYx6j2Tol^_w@%jIsf-xT!&LLBZk#akY_;S}y51^dIhsU6E*2EIf2
zF!Dub&7_7tqMK<-7vO-b)5bB0GldCoOiE*tI>9(5b%u^f$U?`Y?Ip*gG|q|#$`kh^
zK$_r&Ovj``=$HgA&OKCum=pF<3HBYf5`3Co#ow_;rv*11#4}0Uw!34)pyi)n7zyVj
zF#K^C+=by#-?|&BF7#0)e)ag(`jf_&yMhb{ci-Nt%Y3;jj4wB_1z+wyzAf|RPDZ}m
z5IPkMN@JOuzTB9agCc=z7lw~sVi3d0bHrfpVPo*ddw@ZIbO?NaWkzuAf-kp#)lcvM
z=3au=f7syd>OfO!bm0AlWkvu`JkNdx63n+U;7b2L?EK7}O2KO~39rd%D+{#%cR$uS
zjxy&3r!8>?*^{~@&H%&r!BLDo8aU)0Swoz{p4|rMrVrbr5gd|OU~*Xjo?emL8b&&q
zaPvXD?Zx>Mr!<=@4TYK*f{_{#>Nc*H>&D6k?`_dK`H}g#B&s>H*1}w^mCV(`Us#87
z^kiqodpl1nwN>J@(j2C5s)Q+lu(c8kQV3hJML7buAzRFyUN{1$7wPL*c0}IB$tUMD
z9+A)CXgrU|SG^56ooy?oTnt-zg=*fStyNG$u$#FRU+XkyefT3m{;Dv~vw1`YxAJ6h
z_Nhuw`J~3V7idnxfQw6lI)>e`Tvn$F`OP=*aF9KN4}6L)^Ut>(b|V)O+@J&D2t*;Zp4iXM;T&%qD;H_2Zx
z1RBs69-PGltwFtDqN-m)Fz9MDW$P&o!96Mu+w7Tl2BbK-3Nh@AS0?Y$#EF_5};
zV4705l~ii4JXEK-p|<5_vfMFcEN2A=XGXLwds$`#sl#9_r+J~_9J-GkkAWTN`~Z&e
z3i0M*w9%h4%F|VIsXTWD<4brD#w}Bxjo%x35N2+qJm;$8)m&q&@Xmw-mFL_g&UWJI
zcpUB~o;nU2PmkP+O0P{h{4&$
z4Kx+dS!$|I)UUyby|3cNCLETi@n_;!KeiLc0QppfU&*t!P*Bsru3!zEiJ?)Bq)@3uJ&iSZCCxsf|H_mhuAUK!Jmg(*8`%Q;B{Rp`NOf^@aRs&H#Sl
zCJ^LV*bu`U0-OW6;vFsn6(-z^5KBGI9)vvc(_O#Rd5-Zs24sftwcd@8%Q8KDHcGR*
zB0n`Z8kh!e2TVA>F$_pY2BO}<5Td@FAa)$`5#OPr{wSw}#nG|wiwBuT(&K@P{<|3-
z2P`lg;?mnokb4dpWNXL>+I1;F9*tXdKMO$Q3eOmoWZ!T-kPdr$7|AXY
zu`%uc4Je-O`uv2PY!2}_^?%y|D9!UvflW+fB7nj_2`^5_j}wYx1BRnzXE0hW$1#)<
z$&3p(a9k19ARw6?UJlT<6{N24I7p)}SDzNIAF2-%qaA
z_B#EG%~yC03oL7yWs!eyBM$EsLgkjuz6~WEJsqd&QBp5#zzsBbiENxR$s4AZSHfOO
z3GFL;H6G$eR|WD(NW=vN&l2vTBetWLuLkj_jf(i+zX3|+xM7Juy78ff6%BuGeF{;6
zQFvFnxP)eGLhek!@C3tQg>2R1_7}v}=HP_2x*ZW7xz-4uW7y1*TMk}(@6vmo2~U|f
zySM`pHE+OhEEkvr>$6;?!4QPoFkrZ6VL`kbsKs3^Y#pJ6^LD7MvY~inY`3C|78oz&
z*+r&mM2pkC_aL?l?6r`sG`etALD=N_$HG=z_|J1Q+5ieLgMlI4QTo(u1
zBvU+MWg$%~#lr49jTLOVBgkX_0(Es)@=6U%St^UOacjhEomhRGW3A}1IJ@9N{2uGl
zi|}(Hk{Y+*T94evi^dGR1
zt^xkiI0#&H04y)Tni*X6QDMO9uE&--ZVFT8!aG>6iu;~W3X0jyU_*H>%&t_0m1_$&
z%BFF@qdJm@4F%xATL3p7{C-D*~ZH(
zPhevzhDxTwShUp11mM&%uu$_GSO!b5>@UGG
z?_jWq-A_c8!$NI})EM$WFp4brcM0jaM0g`K`&+o;-C9L7weT0ttRN6vTPwZ4m$j0r
zV#P27p$U!UlPcr?+FIq$IG|Zx_@?{N>FM4F5Zez(p}a*4V8IHFaB|Y;;Qljm!~j74
z2P~%`78Th5fJ#N00V#_NphzR3ZmcpzjszEgd`GHzh_HouQVL3;0=bZ~6AjF9yDI4(
zCRKEtx)hIyV_n1TIF+r+MM(?252^|Xrf$N0mi}CJ5v8!zLM25qPoNUYt0Pp=CTNd%FCG(3&3mhi8I_>$ErXcDJuwO$xLj8d9K@iU@Zfi$VhCMIp5dIJgSV
zdlw3IERk`gT_~{C@&1t*k==O-oI!J4&_kb?Z_rH?M*(phy%tvlE7A(3vho!_LqiWoNcCL}zF~>rQE%
zGAq4lJ3)HWDZSYt=VBJvTTduVB|K;rN&$mS+X?2T$pgA4Le5D%vbUcQF0unjWv!s|
z(n%M&2GF2%a(mF&e$&|2sIh^du~M&t1_r|hFiYTc6B>*HD)!2cPioM=zVB+@1T7fX
z-$TM9_s0_ccw&S8Zam>@NO(G)@B+$nwrltBUblzwN}js;^l11rNG0-A_|(p)K0M8_
ztfkhKxPwqX6Fj+%eER7!z=f0ut{1|hy|`*l?jY2^LsCf*UX2p3?ueCm1rGv9^D=!D
zqzqdr3C;dKi7UOLdfDGQzZFYhfA5MXu)p7qCp>}jrT*T5SEc@L3!kLF4~9?D-)=mW
z`ulM3RO;`ONQvNjA}m_!?@y6b-rwirl>1po2K09h`YVG@t6?arZ74Ph0n{mM8%8%7
zeF_fWNKzTS+Ja~(2+fF7pEfc?wkBq4ks2FMk{V5&8>vStSL@F}WgC-td~6)Lm$cdY&?NveLkK*vi>HXa0wntWPK)l5;p!>
z_#|X~DSQ&L4&tdq*53wCC9+=jPz-><7pNeU=
z(qK_%?WUsKaFUD^GA=irWJZkUlgvobcoK&|-CQCfIiZ;>!J$uvzhFTHepKcj(H>lp
z&v`vYGK?aaSz^(^JB_4YU_WVJHe6Rqyy5MpiXawz+ct1#y-^UY3kyqmh
zq{x*I$GUz~*mY4XzYU*+BCmu`LXpc+OpqiLX~9#8B3A`ZC5qfq)<{^iM3LK&R8EmC
zkHo;b9S!DCWA1wD-ovh6>=CoiRD00=O=oKn3vc@r1iWNfQNd6<&$li2}Gbd=drl^yyvtnZS*%W;QjdAfDOfI}ZvmcX$Y%Py)s?Po@fPu5F%6Sl
zQOh%s5Sy%UPK1?jbkxl9Xw@qlus~dn4Zz
zU}GL-){QY;l1oixTF12w2YW1lUOFn9JwO1qk}uFFuHkAb`>1VYuUiYGfC`~jd^*lm
zl#jc<8)Gxd$G!0c%Eu$|gr`DA6#3YOS0(wlJA4xPcsP6#`PhZ0l6*WGJeB0*X{1E*
z@l;r}Bp?5Qq;mQA`Qxz?|A~h{K0rAvy|uz-4_AI$5I4>S&ZcFGt1@H=2mscru%=Hb
zDwthPh*1H%ahyaK48HO0vH~AD>uXuoP~@UAZBvBd5}4j&w!_a1x6oa^@n;)76zGG=
z`k^p2s3Dl#$x=+P_Z}5Y4_mN6O4GkiAPBajcr&^V_Tf!=EOZE!0VUU9WutvDLOPNn
z)P0K^4)aReHhSfZ@>g4NYy(cNSD_Hyib`<8_Y;T+-InWr=N6ezeIA*^XFCAhQc?7d#
zaQJHxO3lD>GD1DL=x{On5S>NGm+$(T-f$v4#N{!#Echn}aWKXH=6bco+^av|dipe+
z@kq?KdQTI54~G9TKK^AUO_(E9@Db{@_lD~a;)>gRBi@!xlD;haF;p<90IlJ7$YPVB
znQ_OJQTaDd+=1!
zIzJ1ZN?PYPNQt!0OJUKH*7*aH%C*j)cEw8k7aq#APJc}6^p&*EUPd8-Mt}4N`lI7i
zP{eEi=%L)YIBXTPwCDvHav3P>4jVu^9KaK+oxv;68V~40L0DXu}
z^-(4c+SHzF(W{bnTy)q?byKk&w5e-E>sPx{o{DQEdy)7!2>lKjNtN_(9Gkh`rGJcxeYGTz{%sQ>?ASFo#uUoa&Wjjv|Kh
z`PPr>qow5{uc_UF^^r)=W4Ph9U{|IdCvuTvS3S<)CN#!R
zZ1X4d1F;@*eVZF+`bV)ucJDrPkAo#Dv>sfAEp9A^Bil~G7cP|xdXSGJ<9uwI;p8&H
zCsyIc8H7Jxg`=SepIpUV`T1B9^ye^qY88Aq5bj##zm?&RRXDl_Xt%DyDQonjW0n7V
z=~(9~oWnCq?3lWYD;sf(*iF^Xg7^{rept=Dj$HKoJv?JiHhO*x&v}&zp9%Xjr!BLa
zWe~>KgHBpz@4|;>kPZ@q%t3tcM0{fnas3^X$M#jMJn*y3?vi|SuywH1`j&|USJ=j9
znXjV^7AxardY3J;xAI+k6%p^0di7N#VEZ?oxfz)^)6d{EIC^7ImG!?FTaFBE$@ny*
z0C`vSGV3W;yJ<{=dbiAY^+rSoCr
zZxHW8{MRO~UPQ6|Z1Q(V=w;;Z=yZ==q^($7P`}0N&5YDzR}P`R3r$5N5!3?CZOrkW
zI;(>@?nRD>NX)SlIqqhTw@aV982vq3>ty6j*bHN-M^GwGN|0fh^)Pe1tm~4gP&vclsfBKhD#;sr;IMD
zvz}pix+L`?s{aF3W=kafIX?A6cRsU+_|7`3ZyfU1;)_dl*7J;_zniO;S^tSR=NhPe
zhB~FD_!iW;L@lZ*j`7ZdHTa&DW>K6Ipw*R*Wn+26AbV#WW59M7A}{i^4=96Q{}mgg
zXXFny=+4NiHi*v1FKv*S5gfePE0Gs$5SS@&%2=-67PdwL#2VB+0Faa*K0%0Gb+U}Z
zD8Sf)3xZz35uQ;vb2VI$QjC^yGls+>7{K2vE1+3YP)=
z9s?k^_{RdmE%2>-3BO|j+ZH(0{Tjb{^;TJA>H`2!U)2q@7OXt->2*8hOJI=o5X*WG
zc1D%AO{r}w)D=x;A-s!e`EBZGlO`;_pJ{=l>}CAeZEAX~OfS;{(dcG;+%`2YR(>DT
zj@hQh$IA3DZR|F6WGNq+@ZM?&3um&BU0U3szDo&_3z`;mG}FJs#(e?j$Rkt~WRf~?0^a?eUIf%YdZuyb
znQ!f>!;r!+qAq*)#Ioeu3@UfVw@B{W_4xSb(6`YgO4I>%df+Z
zo^}!7L(@2W3bf(752A%^eI%;JSA=ziPnD?qxm8#?H3%X}f}AZhLmc+PkVDQL8kOb%
zQ}FZVroT8<(9=O58F)9{1E-N}40&QZB4=Fl(5i#xDK{OtitKriqzU+dD@PlE~14AT~vgHIB#2#+CHp{wj(rB5ys#6
zZN+`D@-#%Dmx>q_j@(xKL#YgOM-cnZDIs^jBYY2E9}+5*()TB2Feda+9KukBY8C1?sk+A(;GotQPEaK
zQe08d1dR{vP*m+~4~l9lLn^8cMvS8BWE6^O2SX^TT?_#b?TkBuI2F}>5;uzKLBz!(
zMleKCJ;7{HR8KO5qI!yUX4iptV-vUKNr?^m2{Qz-szk{@j1z1v5v}KVtL{;^|
zZVyxyo$5kW^~`CIJ6m%E{E4xFYbc~p1SjL(16G=|kb}-_I#27QVPyL!)F}
zv>=
z?IcPeTj@C1l0xUQx(Hi{52&Xfdt8xJ*MO)Uy{YFkdbKv6jQ)*oDsdXVt|G+-B}
zl`9c~x&RGG6W5={;~VsrU!&WF2o2bOal*dB+bim;zK<(&20gM>6>MW!Z`YUM0Lg{Q
zzF>_ZM`bI
z+eu`ysciHwoi@=-CYvFeOGN7;MB}_qCc>7bred3xhs
zq*tMyr0hACgC2j9u|hkmE9)>kR~8C|O8HlTf0emhK3`dxN@o%ox3V&u%_Nb*ystDd
zc`OX+!=r3_GTI(f4%1WY^Hii)m#2U6r6AoaPw!(r9`aX}r~lz6NGI-a_BNW{8rl5;
z(Q!9^46~#tVEPCRA*=B|5fC7pGUjo7IZl$_vN?rn^X2KcGkrSJW#lfA@aF`Bq^gL(
z$f8211MC)1b#VBuhj=IJ={Bv$K#=}($CVx=RZl{EAL7`BjNl6AJzxop)s9F*B)p=E
zK&7N@L1tx`cSNTcA$5?y`-v+LQ%HYBU6lyMW7Y^l3dd%2kIFA5SueAw9Tn9rL{>9?
z;3?K+iKvd#u%d<&ReDN~M#BveiQki*^1%NPk!T|ikZnSd2sJun0WDHF2P)n;IF%^+
z?cET*2`DLMYAOBl>#%5BK)TqO@e0J6j3uyvFXEHQrkRpv4pU3kHFIv|Kr$H-*izj5
zmnqqHCKizhiz`ap!Nl1}gk6ReEt$M=t%sgUn>idjj0^~1cts}HuT2sxugGG>u-&m_
zne_roylh@`#Jeo;L05G$vwn$V85yq5+RG>vU7gj>D8~>?02qZ|Z5u~e37jvo-W~LnAQu(!*r802}Pcoo|HR|ZK(QIqI3=v*5ZouLkb7+J95q{@!Vxk8RT&{c<<@KqpiooR
z#QGj~TIEX_yi$f@)E^u#i)!#o86fH*^eS7*VTU;c?EZ0_HaYsa@C-xuMEEubkMTi<
zarGfCRt`+mC$G^<>gkYpGaM^;{Q{gn(AixR`Q+kI8qTb!)8a-o`&m*HS
zOZB9e@db$C3u|TG=%7s@JFv%lAJV{!t=KM2J9Pi;A
zvT@$z60`Np1TRjNW3gYBk>b)PXt1AcwR#mu4+8>8*EgV$6{M_obO@4n)BXdDcmez=@
z)(oT`;l>Pa_Oa9yo?ybwKF}i<;6yg&r4VHJ`QRWd)Q;TjW2-3$S*Rt$b1!NNdZScM
zO+lNS3`_^ct8*66$<8aLnbh!tp0v53hbM19Uo0H3{lGywpv|{hC*hd&Y?xZ=NL^`c
z9oP5umr#VZ4z2ne=pDSq8Ut>M*7rTl3JsTz1qlL6zI!ruM1upHBvu3l=DKn`Tz|6(
zY-@XGmNI$h%<#p@NyEQcq9ThV-ekb`=(*?f0~I*LG#L`<9S7(^lXf5o8!eN)bHS6Z
zy@r*0Ae9973!sZ}v~l~Xl?En@Wb-#xOpagnpUE_v1-^`oQIr@%_79Zd$DRt
z_hO+Tp$cG=QXwCtk{~-ygyMtun2`7x45|1WCpbcgqbqq1PY8>*R8~n!OvE|l8xGBe
zZ(un?|BwShjxcS4QEP;kZ17}&(-y!s9I;AX5%++~uqAZj~nNSzM;4y8_4&9Kzz38>RqgM)Y_>U19tb!<=v63AnnIN2?>
zrmzAC_E9tFovrQjp@wki9BF1xgA#0u1ZQm9&Y*m_9GE7hc>ock{Mj-lx&vN-O~>k9`o3`$PbzujS`2#(3u;fycWbGcbxfPb}um0uWaUx&E;NwQYs~>DzD(OJL~&
zGuuxrLS>tEpjw=(ELL*OLp)1i#lwm-6C`g9(b#JjAzGCf6!7qzjiWWOFRF
zsBht1wGO3_ua{T4P@%v}Q14(~8+oDXT22O|tiVOouT{N|*HO^s+>E_)`gjFvC2*tw
z|7`Ifa#-kGh}$TU4~M@Onl*Vaf}B_+BaH+kdI}CNff+2xSwo=>*4@CVrmF24Gv$^=&pKt=u1H46T{b392HERyfkX@
zu=fT*XXf-Tsi01X=^Dz7(KU3A)bMx=c#n%F!5e!0X}I#4Hze$H)VE_1!EPP2^Im>o
zIOm;5OM`N|z-~SX=h7U@wV9hcJfU{paVipwc9wI#5S^xzis5NrJT;zFgyyC@DF1_T
z4mJ{jm9xL1q1#16^Jq2lZ?%hAeH%YIWu`!)q1%R(Xp1!T_Xz6z7y`hWK{x4fsNqJa
zunGnZjd3>62%Cr32y`3N@C6L=V?7I7Wp9H~Mi&h|A5&t+a8|=&bS$(14(cGw>R9VB
z*{5ruhQD7>9ap^$L5V*^#XMV-cs4Zfx<^przY3H%Ni;-_lMN0tu870kkgcvAEX70edQ4ZUr$TlrFuE4yttN
zD5>H77$BaSJE!mU0|_sX76QZaJHz6pXGv?iZ@yBvuu8c
z4KEIaoi^v2x&v=K?{iGqZqwRFkO42C@U&7-FcgB5
zOB-vzU^#CD9g9jv{To`6o?fL8WYJR^+!N{`kHDukP;#&
z1Zu<~gc{DQdVx|_35k>z(1CwOg+uMBKSrWBqA2%s`Fr{kodp@c&`rQC-?n*GFj
zL6l`6EQV2rvv@b$W-MNgLL;fm-Tz$_3IL7J9SL_CV-H8D0PLuwVe(N?3)8WZ&ZG~=
zFw_d2qDP1S&l9KFL&N{guaE!#J_!i`jj(v=_z%F2O8)f)g{~!Xn@dS68AbLrDzf}=
z(!#xdxu&I5yxI9PJi@ri;z85{};cLmKlyT
z;v&dE@aeb4`i4EJ5b&9ihY&vq>>VO<2qI8M!xkFF68BuKftZwf8>6
z58N%^pZ3pS`XIgxtSxjbatBApmZ&qhXI6>(ON0IH1c2wZH`eL1Sl>x=GZ)6P4X-J=
z;WyZunrrTu@KGbZwgJf=v@ScATpouVUjrHr2D_CRvs;O6c&_`!cPp{ZQ4hxu%m~8W
z%EFM{$_coRLSD;mWzOtYX0T|E%6ym+PQku6b}L1-7}D&i!ER;V>{ezpe8jPs1O^JHrmbX~wkfi6!Cz=M
zp(u*UZe{zFU_T5U3tO@3tWFf;WTtHjoQ0?GCfWvkGdSF|?3hy8v@}=ynbI@3ssXZf
zqjbnXU93qK4xL?yl9yPSC}KEYIOf_8$^zEbkt*bdcMw=0xV!_GNs#zmQ$YM)>?Cum
znpC$yb#Rd72b?e$yV22(b(TDUUC@vRxLfVd)o!?BjkuMA+7Ge_9~2B9;C6hu>qOLn
zX_l*u070%uxSJc)rAWW9CBT1spS^c#LC)yb07xjM?HD
z$T%cLj4HsM9=HTw1jLGVcoQ;&G@eJ+Ye4UQQfvx@kKs7ngMm=@;fq0swqd|wi~9z^
z7{@RDES3JmVK)qhT-5*{0*y=kUtxiovI3yf!M{#*eG|$Y_%(&_!xI6zFbJ%MLnXIo
zBlM8t9vttd0dC;1gza=Z;C}Trl?ltXEiO&-6EytYkX6(0y{HCUe2GhKa(eJ5SHme&
zeH*F_NV4m!rl~;MAV%-~#NOAioHI!D4FY}QiXq>d|!FM=aoSdvw0X2?lA9TP3^MR%_Jj5#?|QD_YTIy5JZ>~P;Rc4bTeepD2_kevnZqF@&l3$N5cLy=YnNC3va(c}6+q{-ST
z@<%A!9OfH}EB{b&g^Z)bRXK=3cZ(xniZnCKaUj%TQQ=@5fSy1eh8avpCG|9JxKbA~KU;K6m$Ule^12>(cDzrsN;?FrTTclp|s%2_}cO064HG`GJ
zsXjsAN0C}7X*01~k5IQwC8NS+PWM{Dk)*5f$&a%Pl#Pf|7s>viMQ~B{!*GbGv#!Fe
zwnp&r`8vjF%;U4;th7Zw8Up?Y1Q&C&s&yJzUl}aGm1~+Yf?B4*bIS5P{7o@F$SqjD
zk9}(jAGH?PZ49A;0~xjA-9}?^OwKjcG>6rk$l}d*(c|UT)I$73E;@}%zZ_Ttn4$nu
z2bSp+4<1`b+gqoJVM(L`D0CKHFw=@T7^2)PqHf>Lu5$Z~A^j3#8?2p3(;d?eSKn{D
z9IANuFPIIl?|WIm9i6`a50)80-wTg+Pb1fWUva>N9|;#EbWihXTneRxqq4YOpAI^d
z&_1~SGy|(F}lmHvu*`x(q1F<4~w=!@eMG;z#)&4L$}p0=a-YxWk`Tw
z!5f-J-d4YkWw?i%(_M|5&?|Dgt!2=YtE#&qBm@ZP8iD}24bUi*I;4pb>y)
zRG~O`;Id1nFd3KrB`2H9cLyOpm}bHm7LtMiQOxU}`eNF<1Qb5n-ZZ^f$pL=8)s@B&
z&8XZv4NqcT6xPA8a)=NiO?OOh@GiAOH-*mhQnziGtnJL&HvMqr?dEGB3d@G&oDt-W
z1x71xEHi?<0iD>p6%+!I;R)HpVx=E`4TJLEE-MA#m9vuc-Mb71RT)q9PbW|H^1Hvo
z^3(Cx_TpjV$W@UJ)jo}cUYDb!GQ*oZ^O~Em0n5?bL+M*7Zr+$1j$bHf%c}b$c9@xx(i(fN7;;@3xQ+{+VvX$!xNcw=t{WuR4HOSo
zW8J#}MBX_XjrB*C7$qw;9)y+Tvo0-y9v~@XCEY%Q2y0F^=%;Rn<&+5Q)m<}+aPfC&
zu(p&?RBoX5%|OY%8Hb9Jq4SkreI6YgewK%c_7F85+`$@L4?E&~MW#|PlKY0CU_U9?
zci0rXiQqmm8VWwo5~HNxFIZ*-Wx)L{cCZ;8>;dkv)KWI(FDx+1{x!YcnA#}(2(C07YGgM24c1p>!IS>JXK^_Qi>d0vYg7%+~
zFXs~F)Fm(S&Z+0kwY<6mJdT|O%gj;R)3P=+-|9$9fy0_?a3g_>p+=|vjm=sE;6jb+
z=J2_1FbEKAG%9p~i5!ZKtub@mno)U(zjPB;7k)UruQ{iD_~KW+xhfwEida4m6*P%b
zTGhs}An=6jlP2`5!X8*YoFCr#Hn+~&M`6Gg;s8S|P3>ifrKx@j1Xk1=XW|))ZyTmA
zvtB_OXzesH4QmjCGsM{vyY6YTrQeRjk|@?PlX|fo14$b9o|sAe_L)$MoVx3N0Rwpq
zmr51-hIb57)!sj`-
z!AnhBDNf4WK$xYbsiVAQ#C6ZiU{??Y44WMu<4rY;N7jV{#ms1M7&hTt7en#IzR){aIB^!1fnjy9oWD#K5V5T}lc&L$Iv{;PnO<-l-A~X;6J9K^
z2meZFRi;mH{Dwvt0y|qo+XLWNc(%+m5H|s0!~4h{fc!=gJKX>Q%On$t)f*vHsXh_y
zjl7S9b5({F!!60S;*D-Wf2>0~$TU?T_L&|A>u2aeWD>v##
z*=A?Z=Jue?4%22w*rvdK1U8_sqCZ^Yv>#04xWnT%+2?V_;SLkHMcl?kcIhikjN5FE
zF#uOA4la$apo5Eg(CDhJS=5ye+6o5%MO$`5x!WWd4k7)f!(|w06
zK7HJQnKKH5bHf#*7oRV(%n0HmI~)Vheu2;ImFP1xUVx?nK@&WbW{K258PCdrk3+W<
zXTxk@h98S>LPR$@%j}JefJGV^w$A22xflZ_=zgbsNCO)S$A&oP>9@2{8ZT8Bz9#J9
zUc?1<@3F;YMqZCad9(TWYecX4LG1en`%3R#MfAq_yT(OPxyum!O6(nNTplzbZ|*gr
zJII44S0NMc60~06)0e`hSNL>m_;kr4JZ&h|-dc)oE=9MLqSqmMSFCw`3lbiPC5Rn{
zS_xN+37t)B1(4j%a9h~+wS2lSe7b27_avb%aHW-1&x;l0=!}D}#86{U#%!LWJNO(u)9|0y4B>L%4-)!BCU}~AI~!@mT@=`lL6p3(``F?%_+bW5J;xT0g5qN9
zC*tn}{LR2$5r2#EcM|?)JLGkWnh+$GrpuyW@H=cFBw8G2t-^B`dE-IeN
z5M>bJW^vq{jF022Vj9XVZ!mV*Cyd4ONt{o<<{MZ|Fn7m@R;|Mvi5-8Rg4c@BjI!hQ
zvN0ZY#zu!6cwXgQC5BG_qT(`HfRJnh%r<}wN2zJN7DZo%dko7&;yNUh$-ym1h%>*M
z8hkxwviW+9TCl`vMpbGSzIMrQ67Ei6I3E@Rn}~$s5L-mEtxm7Tm=R~hR4Gzf#FHuH-?MBg$#?5wS!T71!p_M
z6ENLicp`4#L|77<+xZj;&D~^CClVUCL-PAM>J11DuL8J|WUi81gM`e-g3RRgdF5p8
zA}Bm;4hJ4NavqYoXEDj_0({{HX6r?Ukj%dZs>;axDiX@beA#gal6eoRQ6O_4Ly&oZ
zA;`R!A;`R+A;>(4uteq;_!N=(6+T5|zJx9kAoD(!tQH#dGiu1($hTVnNT9E;L$r+I
zw;*8{if=$UQoNZVDBi>n6mMY&if?3SDBj8_D87jyDBi{p6yJOsXM2N`e{DM`Z@zXdls^WPr^b74Ipy043J-KcXUVbdkn)|!k@C>8LisLc>q3T*
z@=pR~Wt4v&31yW3H4=sjxO_~r285*iwG1Q5zk-4#%3ngSqH@aP>mqNI^8Zem&zEF=At=vrFJDgi
z76QgI?wDfXw^D_aZ(BmjC(#Ycd^@wXBST2}2Y|9N%6}ILWt4vs31yTw64{BGsO7ga
z1QW=`$c9cknFN05VhDcN!O#eA52Ij>T@1k*-3UvpaUY)|)_9ap5oAokBA
zT1Na|BOy+F^3iiBM9TLv1m$-#1m*h}g7SM98p`)G3d-+g2+9vI1m*W3B;{XW7*YOb
ze2QfLg##(iud=^EncqsvdnL*r4a(E{IkKGcg9L@Q6<~-dqkQvHQa*)lkn$}&D%gT-
zA?3fabO`0|LP8nkA3(w|ly5^#r2Hm^puAi-Y$(5#NuYc?Lr{J*LqqvaMnU=Q3_9AnY%@SyJ{H+c8T@9okQMQpf(qG3-
z&<%tB!z472{#FvkA$4CXs%nLP&2{ZadPiRuZJ|RS4-A*3Hr2-x$#R#glfbwyA{GQ
zcA}f~l^ib?$8;-%WA>;y2;8f0IA@hYIH!lO#yP)Hu8ec8)NdK*+@P;}03l(MPY`Wj
zliNuchfUU~CD^1-A#Ac*A#BpG5H?w>(6PySMPZW-3SpB0g|Nw8gkqDs6=rPmF@0rh
z@@bi>8U>0Dw!Z{6k$L2$vx!@^0O`T8$wqZ@BhO={0_9P#+`uN^B%yzE8qvHqq0|
zVw24ZVUs~s3!7}yH*7Mb5H{JGqz~&GHhD%NY@$c5YHaebzA`p>N?#e9Jg={-J2E!;
zZ=h*lli!h0&nDuh7bqki*{Kj7*`W{~*`*L3*{#s=$cUow$R368NH-f9;*phv;*l#A
zW<2s!a@Ba`H)DB3Z?nAw9uZ70ok!gI21pN%NA%KR+{1+dDUW;<^f&OxIuaUqO|`?bA2hu|^@>(XVtG
zT5BQuKMFJMxL#iwcYH((&&DZ6x4^yx?s(&VxFcJiJg@c18I6}-CUz`=xSkt?)4_iq
z*l8f|LnJhi_el~)lXoN4h`e_xguDZ)7JO~iH{=~u2zfUp=|lR4yjvAQ-YrVc$a}B8
zGV(s4uZ+A;=<7K^XrSqTkuaL3yr5?^-L4RtZc_+NhZRE89fX2$r@o=-3ksp>t|Wc8
z!i<*B>MNt=kH^wd*D${XTAnmfTCNo>UpXEv?bYFtI$B=Gy^;;I{4fa(w7ivs(X{Mg
zO)OfjR0u6~>{@tQt#4@As}NeQO49rF4K3FwgqD3u&uDp#zA{?gq_2#Yx9IC3KuCt_
z=ZH3t^*$0tla<#FjjZbxLe{kkA?tub$a)u{z}={C$ht`(WF1V>H!EbOe?VbI-aGV_
zk@sG0iZ%{a-Q#-j8s>RYMBls;z_~->|;ns%I3!Roj)Gan-~6%DCz&ePvwryuPkJCF8392AT%0
z`W*?Qx#|T9nF^^8uG*mxuG*y#uG&o~utxL^SM5;VqN#1^r`sXI~P&rBUuO-?I^*ld0=Iu!yp4{iFlRP=O&-W&GdEuCs
zCV%3EW8RqLfyw&UCC^6UF9U&ij%4pY>b={BcOv;w@7>z!
zxyeG~Gm<=F+3kg%ko_3$FXHHUFWkZGrlMr5=h($oQblJ(nfDq
z_w7Ci?i<-V_oY__XWVxy*UL1r-{+Llz<>8CWi$i!GaxmWu2BdB+MP8}bc4QO!1W4Y
zz_o;;^hSNdfOjc`0SAQC
zW)^!y0A{%h7dSDCJz@mvH|rZ_8B_?f*t-KFiN0Z$tqNh5Erc~@xmRBqvpk@$j9H%0
z*RKUb!Yt1z-oPyXrIgXkVvp0qEZZqBX4$3?W*Jrpv+Pj4K(EM|qJ_E;ez0_(u$U8+Zy+f1I3wXESP#gXY0#{)O
zrWKwHrWMY{6s2$hlu^hroB@TWFi$9a1H)M1%NfQBPhwtCxR_zD@Qn=n6YYsm-ZeV4
zdQ60=k~{8BtsWh{(Qdop+{tOx`4P^rI6$6OJs~>Dp4a1I{nM+{Bh*x`)<3;EFM30A
zj$_TjYDjwZCo8#oQT5NF*@;*23yZ3+i2g>&I~P@tk5b=u
zruvUNXYt_^Q=j3TC(2Lz<6hYB1=pFEOtVBne2mB{or@CjSo$8sb
zPJO})Z%I3~YPOwOfX&)fNzd$Px=?tedW7%kAGf2i7qtV)bm)N#Tx!pTjh_8ces3{33TN6;W2$by!C|eRx1jH~vw9(IaCqEv=IS2J``|K)h
z5P4>HH6z^%%IT+++bPQNR>{67mz{P{^@r-5)*q?C+99exQiF8|obY__3{MRg1z6MC
z{j;m5nzMAf|C(r#osRiqA-yIM8kkPwe!9;3XwXD+Ag;Y2CpYo<^Dl#e+-XcP{hj*M
zb#CNzhUP7nvi(?l-8ip2LzXgJm$Kbbb{;DSf1S8tnRAlr*%;PJ^;pVaT}rQ|Yzebc
zIM
z&g}wH+;5VF?FUrB`Z=B9kq-T9iS~1RK%`hhoRbBL_7S}$N$w@i$uA|ZBFgC^MSq1L
z(-Dpbn=%fVlRqQ+vFh(`XPV$JM|$xdsQ5UQ^}|q187v$X;dD}uLU?L(Q-`-(;w-s?
zkEkXLt#sz(N1CG%i}1SZ_f`7_fI#X@tGn{A2e#ryK*4qY%TT{Ma*A#u_05hqHxhlF
z%WxObg)TZkl!M61yMburQr8pZII>dviEc{J&_`SsHpjVeT7r%*t1VZ42fu>i@-56y}#%nEQAgTen#!}YTrEIg59>?d_nP)7ew=QLe
zrSu(Ns~JpeQb+!Faqd1sVdjhacDFw+&fTkzu8!P23jcwf-^IDR6@|wA(IS6!GGe-q
zU*vBk9&fU-wY@om6ie13f5Tjg{F=yGMSdltnIgX=vRaYdL{=&C6Cx`W*-2!xB0naw
zRS|nqQjvFbc!L7r-#WanDqPXwZB}?;hxeeu|7}oyS<$H${gR@GS@eEIfqkf%Ge}ex
zKL^_1#t_VafYeS3%ZjKa5dc;8m|PxjMxMc>=u
zJ+1J94sV;nf3%;zq3FMNctZ-`)!{v+@SPprR)rULc#kN&sKeW$@CRV_8R1|AnjPWB
z&Mu-`ld-dt_&rH{bvsPF&<0(9duR9^777ft@Hz+oFIC;OF1}mwYh3&niod5L_cMj>
z?#TVO!nbzheyZ@FJ97V}unjx4$oh4fdO?wmM0P0hdz@T*&`ZN_(_AkRfpCKo{=hGN
zN?2Qi>y}z{01%&XAjY4sCEt}r!}ASYzf$`zQrCJ_YWDyOA6)J^!Y>*NzgCX_)8YMA
z;Uyj39)))S;L3SKR=I9oIiGl+i?3At1{c3V@m?3dT=5UM_+^T}-^IHXzuv`POd9=z
z!a@45JwfyD0CA(byE*GY{FPdCh^*gAvhE@47G>R5`~8Vp^xFBE@XIaquA=dH)%?!z
z!wIsG!Pg6*4>^FY)9-!rI(2AJlC?LBDy0V$#Qx&k8s$PU=u^oLx{@DOd`?_gwmA18
zMY$4|L3ER%SGpXl6<_J%H!6OGi+@n@%Ut{h#V>d9Ud89c?twdgLg4kuZEMAn_ky1VxK&$6g`yk50$q{*f^
zd5034>t>@l=$9=G=ous_yqmf|Awc1!kE;jwdiI9+#}u~}Br}u8%rvA5wo~<|6KK9f
zZ?-A%J0$9KVIrjXV)ETR+h)t~KRUdAReu{qeAbnWZzcXf65mDkZ#ewKJBbe^@nPbR
zC2?y0l)(8eW!5UP*}?cp#h)b83+l-eM0P0hIFV-r*vgZ|7&g=Fv7Rfcl->HZ4-xuQ
z(n2rs=UqH}nsBv7(f5d~5~p{g8a$lz`nS+vL^N1AClffAWzq2OJ96vP-F_;3Bq{PJ
z^{i1nx1OBw_AQD!eS%!Kt0QL_2YyD;n;b`dS~YFRIw8o{s-6MHhz|J^?cq(1-eifG
zE)(yXuT!u75>%z(oNj{j;>GS-t8rfY;N*pN(|Q+7^xlKfH#a@0tI|)%HWPHoY;J!y
z&Hip5*Q@Ko?RX;`;*_!WJuMRC_AK*c(w?_pZaBO%p6z~#S1stQ=J{pQ;hpU{HwA=~
z?fq-EJB7_cd+y%D7v`>^pQ>o%;nij?XI8JDU{6mbFo}*Q>*7z<;vqlVZW3j*!4&5g
z=e91hTK$dltEVShZd%#!{)hISwkg*jHAH!6&>WYSLx5Lx$^>6LH`(^tnl+F|ohARu
z1*yiPuI!m_cT4+rZ$jSjurgXq_<5T0)pDGRuW%1e#?sVT@aI?wa>$$D#1)a$~V3R{H%m@F1A;t(17cNYGU~!;Y
zJ$rnEABQkILhbm{i&nVFSxX50jPT(W_;r#!T<`t^Z=JH-fr7L&>{-XOuE3I(hF7h0
zl6!b`(Rh&BRndfs?;KNn%K;QOg-aLL8^{9_09hB*$Gd41R-2VD;%X5kL(Vhv8*!t}
zV#AHScTPj`W~1_c`)Be?PuVt>-g*F~SB)vXZQ`Xt4o?~-$niJL-#@^+L5?n|<=`*l
zZR;`_I_-h(=D2ua{AhZk`ptGSNL%gM*m2SO>XMjV*uL)Y>|hu1
zPZPJ}eZ=o2uJfMra%-21%jdB9ZRH#9gWcmHzcP|Pc$8$Zp-voNK
z$OKu8=jEPU9)$|Ov0Sg@%wvB#LW<8r!kAe^n|j}P~vNby2qddMNe^anQE(jJ6YCuNAe(ItZNYHOBwt4jpw
z)w9yX5Rtd))ik9A&Pqj?(7F{aKDma_oDm-p+?tXk-%4ALYp)$uIZ
zdxd>-wE*#&4NTm^+{79XFXj$wC^sLH8nVUIVFQ}iE~jpx*<3b2bDt7Gv$<)2VRMcU
zh6nTm7~Y^T!La#s5Eq6&oumuH_Yya*rOO$H#o%EBZa1k2xZSD{+&-lcv}!^VTAx%1
zMr|4dqvl)zGHrTHka@rI0D_QN??};rOifm!kg2^^V`a2;qV`4z96uJLzh+;Q-(lsH
zE#lv~Bp5kMy6!8M3pP%Z#KgPRh_L&DB4Br?La;lcP)v+o5EB)GyA=V!d-Pj^;9n+j
zA^0~*TnN5~yzy#lvxeYLI2d5z33ao$m}mkHT1eRVXowfTT9pS=3R
zxDkAH{L3dJIDk#t`Rq^p3qz%?kote=twTRiuIuG7AemoDhi2bffzVF#X
zHZecMesmu1gT!HT^ilY#DOl}QTO6B;N!3>pZ=$F3z4c4j9J5sBeAbKnR=;HLTf)NZ
zOts|xk!8)9QYf{wi+AXmsHtJoyj)+=?#A@F02Sz3LIX=zqUH-_aW?#;aiyUI(#Z4!
zD{5WlY4SEUC8>}MhWfU6vngkLb<)u2f?SEB;WG1uKN^mwxo|3R&42&-1rCT8S&ig^
z4|zvhZGr?tg1wl+r`{_mlZ&76)oZwbb>()y
z66NAAdi1l6J0tcAr31&ns>y_}-GJxF;mJBx&Ud*`tOdBiDfKU&xVLgPGT$3u)m*&Z
z69~4#k$mdc>?_SH7jN*~8$ePFO)3xwk?HCj=k?&jMs8mJaOBo%&mM3f*&YGSY11xd
zG8)#I3;wowy&B?ryFN=)-{Z0kLa9
z3!^Bg=zmFHRRoO^)iBi5PT4my)h(Q})xaf-mqq1P4&
z$sha`y0$%8^j8t=LIqwI)*1CMub#%Dp>;91*<(Sm^hN%g{Ko9=AM1y)q)~_^p>a2e%-U@=_&C+)*@*N5qUVpJaKz&O&TPJapVpUE?O
zsnBiom~fbyia||SRb;MR$UvB}b-eL1>dG(B_(~E@`tQ)3tsC!B3ct8iCR_B@sl=|ocoNwWcc&iwF%;u$P5l2?#dvxr^5t4D
zc(-WK?$4|;Z)%p)$8@oPsUUi`b1~$z=_CGZIyAg=ZB+M^EOR}prtFu5nb_OcF;6D@
z_zPo7WBzpvp|T7tS9sLG^3p+aoU@E%Bf#lM^+Wf$bXk^EWIPvihA&qyQ9$%(TDnZr
zl$!xOYMtiVWd2Ci6q6RgCp8gjs>R@hibC~&s=4EEC-4rh>`h0-C9ne(i4L#skcK1`
zT7z0){p-LlY+yotd2M%v9+F8?W7MTNH350c9TXXLQlphehzndS%Z}ZNL~bWCq)5kj
z7h!Mbmw4#JiEao+IGK;Hqa`q
zJM>1CN#6eWlFIDmCSS0QUe`KRT?VjTQ};9!EA3ubbn=k8F-ojz#+8v)o|A&a!t2vsc&F!XRlU&1rf>3}1}hAexE(V%4+DMq*J25Q>+
zG6|To+stnXb9P(w8GGXbS```!Z!1L4Tj8NX^gU0DJ}(+p;ru`=rtV7kAJI|aB^c-b
z4@PN$`Cg3A?}`2{d^h(sz7;RGf5x};-?4?i3nTSAF=SsH9TQ#@9gP+)Q~HHb7_Nwp
z3@^Zp{%@GK-+{^dU!xPlw`1n-imKt?SFo#p*u#zaLtZ7CAO2J1hdN&JkBm$fq2XD0
ztls2BQTTsH0nRJYEAd*1UeCUKbSgdy_{-jamn~ii(RADqqIvisME@5K2+^5%GemzK
z{zLTY@c)VaDm*_r0`CqCO4sW3(rdh^180@!FTy{MW`s*CJdGMhZwk+=pve9U@1K{J^ko^vw$YRxdbD?9to8UT|3Nqr
zSbehivaz-jFkO{HA4nt5{hFJ#x3jl7lO?oEx4ht{V!idip)P6ae|h=3roccs@)B!?
zTCy7K?Onm)o(lzg5-EU{K7`6=a9X4>qAQ?9n94t9_2y^g@=HwZ<033pCXDlDTInxm^NW0g
z?WZaiKbwv#u@N(mgvG9^Zr&`OGb1V{K&*8@n3C)Hta{R_0;*!JuCIyZdiv{he#MRO
zHdi_<20i7ZxwgDC=rmtQzOXJ|?7gclt8528om}X*ldpYLYqeh&@GEAftTp*hHdf^5
z#1XS~Ys@OzSi4}#Ck
zU?igpVhE2%(NCxxrZPk^hnZa~xOrOTWt5A%yZc1tD%_}9ZKmeC_*2*QXkwdtGz^T&
zm#cJpULj$0V-+#)X^_nW0~B~2T8=j_Cq>8b?8eRS0i)e-6$5Et_(BKe=7vQs?dO$e
zdcbc&55k$b%_g!D4$ohrTyLs+
z7z9dl^3$v3#5BaHm}n<@dw#I`q+Q1mKL|vsU#iS18t<=p~qp&N(=AeR&D)Ozf+)
zJtxnV=d>)pa(5Imgzr#`WlngBEoYSb0YqV37@c>3tXQ
zO2|aQFm(Bt_nORn0ULC9s5TC}pwhv|^WMmvO(m~MbRI?~AUuJJ?D2QEa5UEsF85^<
z$;H%a$)8?8B)+WE_k5pDX8>B#_nrpk#!j!@N>in@)0S)J{Up_Lf$@d@s~NT$kqfEZUz*&N
ze};zkLjN=ZIBWq~kW@ayv6|__hfDmD$tD0vbP;6FI(p^eQUj@z
z-`lMH+6m3te?Ux5VAlRs-Y?dqUBS)Uq7G7%c{`r3H))s|l4D@ys&JYMi;Q`R=7J~)
z1&~XcbrHc;=nbo^Byqksz1l=}iks3Bk{v!OLcG(cqbaFFKRWYu;NFp%RxJnJi0>6v
zws}!ru8FWC2f-+q)~3A~a{yCw94|~7(cPb%;jl&w!jKApK@(xpvKBLvSUd@VIm^y*
zXf6R^3;ov+FkPHY0o|@bFT`+6UQbzv-c;Xzm1+0Sa~*$uwNwYR>+t!MhL}ma`2MNJ
z1d|uk*Cb4;bzx1-cPpgsw^#YN#KqqxYstHTS1|E17o$xuCEe~(Y7b(Zsf^7NKtp`H
zZrWN8k|CHyFejPc?=b7nYFVA|bgnCYceD6Sjw`0aNiA
zc`yi*wPTgoV_rtPVBHw{Gnm_qp;aor6i%vWUCD0|9j26~3i3gQ%C#QaqiVB8t)aYY
z2$f5q7-SJMbd)!8S{kH>Hp{qbJs%dqNEdKOKTvNBKETM+G5gs;ldl3WaG2UMJ}j3N
zohI84++{{0^KqhdmnD?S!95<3mNmLs=zLj{shK^$0{RN@0i*DrmJ0BGLL`bz&`@qQu6faN7<;1pBIumh6z#%g{RuT}}sb_&IV>mF=yUNaR8z|r?
z%|^`5L(v=x(B7J<1M+OObQ-dnU;#q7Y}{WqVe7^hK)=b_N!vJ|Ok(aP8_tr!=`U~N
z7qPl}yYJ?)Vu%Nt#hi4q-G4o_A~+8>Q}C%erTSD`*v~7QKGR~gUYdLX)FpO4_~@*0
zhE6$_K4SwROJ;nOZG1RzQ&4-A%!qKX?iEuaNZiLOJ05^w0Py6qwm3Hz$kDc12^gER
z!e$v;roqy73bomEK-WK!Kx>+2n^r9i-Pl}T;g}YUhL#Qw6vX|eIQ_=+%T6q_L`Evx
z>MY8~D<8Ni&_j}@uWR74Na|vCWY`98s8-g2wY<3SQgL8DE_9h-W;j%;%`2i@SXugt
zo^??RBZ#7%a~bHRv%=z8++jeSEzILD=2k2x$o-{UYp7M!pmo!*Q2|k4VN<&w-z=lT
zLMzC#&J`FPQIoh=8~AW9x?N|ecHMzZw)P9C+j-thQ%!n*EcP#!PAkP5(PV4YsYnFX
z7XqTXb+1>s-OrW_PKyMcr?EB}wOoK*5KaN>1eArGDY`@oEO3!RDq_>CVsin4H3PV_
zfnB$b54)-pSjQu>0ucd-;}^QI+J!YspGd7XKnnP%S^DVfLe@k%Eh#^6**KQI9Frp^
z1ZQ-pgR-h=6>JhnxUp!dt!!-gm~5AHHvBE~U;;=+A@T)}c*t^FB7+{6j6{{a4*&%!
z-ruaAoER3$VNO`5@?f52P+rR*P*ODA-G{TRQ}JNmjX|>V$i*A8517th0$ym)G<&ov
zpvs?<%n-D@fMthf3jq~g$QaGXU&(bTdwldcGZd0(#2ow!@qa6nY6gto%PnUY-bcho
z{2>WrE0z~An@da*V$^Z*eq&-F%4?E}3YxMwfX1s@#$HSxnlzGx{2fbQ^sb1S7#~bT
z-F{e7;l=M~z=}#zxq`xk*uA7$dJ_;a(Da+lpdS6BADWoyQT>FDce(l~{@NQUGzP^^
zl{1;=Ga@>mtmAa!ODvMrjhvt=vb08J(;I2f-xUsXl{cB102mn+MMG-X7Y#*0@-)yu
zB2F_sfw9wy5?~C!0
zrjEhTd
zb%|+gwRXm+DVMAWW`!;Dy>@#-GHlUGO8Nxn6l}WSv8k|Z!;+f9R${bIovj>-(A2@n
zh;TAJ(NTz&u=x)40^qyXRawaeKxnT|ng+sdfVzAbWIfEy0ns$gV69!4xM{Ns>-J&U
z%azl)Ldl+yi;l_*ac$u=4UV?(Fij9~?rf@Q3#XgXcQnsJve8YaklqDM>JsA7BuX`F
zJ`jA>DX?sHMmQ;KWlB0`-3ru+V{F=)$*@u%HSJ72E1Z%{J2N>jYSYftWZI!*csMly
zms*;tD1U0xPK%};kg3Txln79h80ecc-^`#vH{S>v!7`;DmIQ|CRO{PciuJA76cy7j
zo9duNhqzGQU3Gv=(akIP&$_3Yk2DB7!=r7dB^P4h>caSsVxTiBvfbL_+--b2uQ1>>
zNTX*a)3WRAG*2rSH^Hamqq#qAXzbMp{*0m;S3M| S{;(#JMx0%KwciH*t(B#DL!f?FJlF6Dkgc5r?Q
zHr60%M*R{eSe0B_(wgdFC~Z(cm)9hs(@Q);@}8g1X~{+-nh4E_2@NFnE<~lOPxn_~
zH)l$|kcR5h3oB^Q-Bt}mq@(p3;8CD#92R)XNw~uC&A+`m(DGG21So!(#;U~`a+f6+HhVN!lKqLw=l%MC#^_wPLDdk^}){v*e`9btq;Jmp?Y8IgO7cFth)nrtY05Uvo4FXIoVug
zh))7z(v{HJhJ3HgK`X`pgQtvsDPdC0pQ}+774obO)x&qObw>@dUWK3wwB=t*m-#9Y
zk|TeyY2NZ;^;R&LSb}IUV(QTFf_b28v8}+Sj#S5HI4gf~46nw(04QXo*(ntS6)tGD
zJ*WJ6Tpq1JKKAQ#=B5-;(ikSO$JU~hH#C*cz|_)rSgpMawpN9}M1YkAIi81srBEQI
zxy{85Tm~t&A0)P|Ma4-#7Q759qeam@dZt;|Ri#e~)D}awEUY4oYMQF{gl_hI&uN=1
zhVmd8ATX4{JboA3+pzd*Y3Sc>KYI2wxRVG=fZc~}j
zJ}C{HxX1Tfz|9zb^Buoot%Kh{^1EcKgWoim3h*1qFn+_9FMea`)%w0_tDU0FiPu`!
zF%pf`*gNR+Xoiz0V{B{C?vLSOt(&QeZToSUuGqkIMKPTnKt>q6!*LtagoC6Rrek5q
z7{ObiMT(n@A4Mi!dS9H%fT1
zG*{hniAd^3UO8SDXP)n*BWx)v5p%PCV##8Ld0SQG1I*`I=Gi7CMg*o1*%GEDmijpW
z(_kW|e%7GWVCzR)dVvuTSBPf@8>UrSLAMQ1l+MaK-PFr6&YU9zD&+%Rik^cmh}hO@
zzDtv*?civ+ZAM(rUjijzL77irb8n1p*
z2su}3d`w}{(w#6jtbsCTixIEGdIWONnkHNZj{=0w#k4|ET9+j&okW#!8icS}$$Y37
zDUv?Y7jTMzsC#J#H$;BnQs1_)OO7yJeEte2GQ;BQB&LK)><*M;M9#^TuUz__Cx))P
z`PXc;S6+E#RI+)Wk{E{w}KXqc|*-lC-SfUD!Q3cv+G6;|}RI{{v)yh%0
z9~TsMHj}E(Q&k);@XaJoRi{r37|irZ4d<(d8LGj24TKK(iE1Va3|mr3RTro#rg@*m
zjP>i(w5niY{bC<0sjAykT}P=dS?LW&hUx-fIq;@mwQ@qp!hD}p+G~|^e!!QtU6r1m
z0?5@36G1$ur;@t*t*#?g*FXy3*uk`~25eNt8?zb*tVVoW{dh1%-@(+lC984JYCK#u
z4y7<+(mFL)cC;Ne^7jwy_N?k5s~Y1HjF+PFV1V73)i`W59;O<1rNFj|LI(@%NLKYO
zs~Q(t7!&2q_$aB`81sPf(Br3U(Cv#)r>N>)SM@1qGQrt}y8wGBpP5_A`37B*8yQCWTn&m=
z?nN))UPHiR2(ey+0S^^)1FoV|a%{>1;)Id7F~iz`l$I^o=U{?Fc5LCqLj~rLtLW4e
zOp`_vp!N0)t|5WBGy5D)z-%^_#E}=b2D+MQLZYZI1FcuqwDs9%e*!5MQ~2~yK|0_nIwd##%B7=~
z>Kv<;M9Pl`e20L2tFtd
z*`7fdc!1#hl=)UR`Hl7qcW
zDOiPU(^BS5Sz5H6j0;5W(Vrf3;$XaMsG#uNNC7B{-@e8#K#
zquqB>i?y?aV>x3F4Vp~I7HAhv?9~={0>JFU7Pzu3`&B;5$G`vj73@pBG#kyqmxg3q
zJc)IuV}I72FsdFu>x2_$x6gUmNpt6&Jb%F{FF*CP(-$r};}wfLDqL%m=Si9?=}GJ2
zN{oMh{Oen_-`4sH^9YY`>9*g_a9__oZoe(6#5W9D`oc>5*zK1q|LLx-+n*6uT2FId
z>s-yJR&@Lz{>}Ps{q}PA^?`3$&Qn~gPd;E@3o0>g+>zSv&LH*A55K$*$Z6e9Nh2
zp8LA}_RI7&SGQQ@*aPjp{jTWdK_aIoC+WBYBEYyFyE+i%CaIvx(Q87q*;wZ%tLe+Vj`)!i@?Q8D0Hdo7U?=r4wb&y@=e#0kO@IU*gwcg^s
zzUg3Tc3;;#X+15w7CzQzec;Tl+I#jVhB;2|iWk28b*rW5zHWCd6e{r~etX=}h7Eju
zJ?3b`7B^mX@p@M$TkNXy_RXqA)@7NM3D~SCRAx$$`|(=sYliV<3TDLL|J=1fFfh%@
zc{comFHEcSdAJbz@8jPRCW1;={O@=IV%ML_7kjqdPxZPz+q~Peotp01;vo9=c!Pd5
zUvJ!4W%CYw?b+JyDb{ZsL$zmHSmL(e*)P(DIL=-2^`rt>n`_&+OOdzCwjl=qRT?TsdDk^yt@RID+?lov=+_yXO@=%es1{+kihl?
za;S99ioG4xSY{qJ`#WIyF@T@IQH|U3Zqq|<4s7-~@d+%g+69(LpDhmXU=xZ?SF$x8
z%4bx&Kb)>C>2xLgFr;Wcv%MjD75u&Pk4=9$n_n6l>V|e|zjA4ftovydY(F*BVm=2+MajXDF(B2DwuH(s^*1!dNI`Xx=K?l-
zSgBaq<~z^E1#YL9+%QR14r_ax@W_$xnsjtiuHsjkD_*fwZmHD$SFrO^FlF5R@f!no
zGjFRIAYeWp7Bkk5*Eam9(NLd+CBTxl)N--dwxkzoNvvuusrM6ELo6IE>8@H5-L<6c
zwIo(Omb9mqWaAJZ`unrG5G9stb1eyF*phZ;Nw%3K=-m~>Iuwgq^a_v~@A=e?$RiOT
z-8lP8ygIdArg4Z!x$8S_ORQ*Farq7Y5f4^h&Or$|)R^`G;mx!GL#dBu^E`2umdS~W9!;QtWoW=cCH<5#-aVMV2dr%js3If5Zv#a~xvtq97vcEAM0
zw3~;%%7&@-DDgkV4}t{SY=@2U7Ya^b@Tv?sB?S`aH8%47PORyjhVJ-kTR^uXq~
z!RCA~SNR{3JpZ(J$@5Q#M)IW9>{asI!m)Peab_pFQ=u|O^3?LxWJ{go$=a#?I-caI
zt7Y$!r!lNjvdN((45^GUuWNCR-<0JtyNkJUozKxf^YCYE
z;Nj0N({Tla*I2k9Q60C#hMcO5y!;Zs#)SGLdx4=8w=ctSio$r|s2C1hFjZu`aJ^K!3`a!#HA^d!mpRO?7COHi>7J;{Mn6*I>vCYxM5
z>9y!tG<9+OhhcBU3_IRaa1Y5NXl$OghvZ+^X2}MWw>=lEt24j6NHM6Ihktahun*DsSZ&=8r}~e
zCqGr2kh0Z^1d#?4Uw*YouDZpr(&0&M#Xq{M!xOVnvG3g5$7_3KW7|T6#{S-0=clUq
z_R=~BDKUZQURdddDjW5oL)2wm%)FCtz$pVT@&o-U>T<+Wm(WL1$!0Vw#^GTtc
z4Mfhj1uLQ6#b{)bOZl`bCWF;ePblAeeG;z>u=@J0C<=q2uhLn4b*P~^N0uwcphjy3
zOh3ps%v@s$lUju!%%z4I&A5gcJghaqk&BSUjqwaKh4@l_i*xlIK@*fvMrQ^~wx;1Y
ztw*bXtiIrK$wxW3(r&WQD$6=)u45c$%2i-t-rPVY2VeD&Oa8^OT;U$6D`gd(UMV^(Gc_5>qaHK=
zfGWm^NXimf)_NS$q98Y_dDSBVjMwUh^1Q$`(=+`gdAgJS<4n>t9d@uKcffHdL?WC4
zr@(4F9Hn9}_Fu*EqwPe~Qwu}H)kW@&*`gfKhqXUvM*mvPwxWX(lp|bRrAqP_vMNdC
zxO3Q(A1Tj(c$_{_0wpEIx`M~}ojYXwHg0(0_86Z_&j`DHZfGNXn>x|%pIU$~M~kFF#6dQAmf9rt#w@g!Qtwl?esq7d4}RLM?4WMhU4d3Og$&a7M2L@^v(P7SdY1PR{7HCA%lHMxW7>Tm1}bLif=;IrfYmjK{5})JaVo
zu;bwaH%lLDP=bswQfeuzijI>7%VbU9;sp`PPU~@oOuIZ%aqM|f5mD#WHUPef2Ccib
zTq%Y}6H!fEJ>aAVUvDvq5uHFG#?^Y?iKY{H!_La%Z57T8-W(o-ZkHa>(@@~hk03Y;
z8N#`RF3zj9hAl=Iww0PB$~7n0wiib=b$A{=)wWl;M!3nKj2j`0aFw&zD7&z&0Ldp~
zbhm9UB)V;TQM2)ELO30&)5*HpwwDoZZ9=$y!nmK4*3EXpSxAFBL_jrd1Z+60C;QmM
zWGX0f(sGwXk6itZ2oJBj-ep9|`0gPCrv=CAk!{0CbOI<5&d|gRFFFZW2~V(-fGKjI
zVpmm(oCE|XLg`@yxNT2L*jy(8iwVZ6ZOKVMa7LYmw9zL4g|)+kD#z{`WEyGXp9Dn8
z)J_75P$|wTDfik*K$&Xk$=k@V(0^NK2U*%+tn#^>@v5B!RA&(qI&iCNa+0&UuIyW&
zgt(6F$&iy;i~*!qF>Dc|bArP0Wp5_Cg+V8r`%Fs!0`6=z+dhKWWczCS0{%|O%NBv{
z$)d)KOMOs71U^Lr5K^1#`BXL5_!J#;d_J85)^Q?Fw`bG4CvH#9qptM`rD
zv!etI+%gECni;1a=NX?$;bB+VG;u0ROe01D&Dx37H=LNXTTe9uHYMA-
z4loHuo+!@gSaN4H#Yq#JJdfhecV$a?e^^XDJcjpoDjAp9g!iQo
z#Gxi&^F24!L^f|}Ftlz|mb_W8C~q?@hX@nt;s`cV1hY|~dn>h$gmlqMN$5Y5-eNknq~x3IM5gnEQrQ&2?mHpfP}5=R7Z+?
zafUTxR+pmLT)0UA76dEPI|Eb{J0rE{i@iF)D-`A8-F&!pi``F~$RBNZ$p7ALdoYf;
zRL73o@G@wo8#3|F$)BecPg1>UX-N$mMXrrws5q$PL9eso$S5MhJ{f2`civb7ZI}-S
znq~84^8S|Rm9TE>zz+NZCZ%ubsGio>rpzLo-4R!_w%Iw?;0YEOrYA<=)fVBN0{u@(
zEN-9!dBkR)Dq&4gS}q$pk)rf)S6-{rJc6DYx+p_gjV$;uFOE)Dr0Sj)uiQQ$7FT6X
z1Zak)M(E5y;?!G&@P+}62Z67b_W#(`iXyE1aqY
zg{l|46Z?x4Vb_o?$lQL{*J)B`>1=VbK$iKOGg5~BGI530UaZ3HbRMgst~)Ll&)c2Ze7mpabLWAryYT;a{%T9HE)agKNNl0U7%BepcXiMj2ID_koxLw8_NzE^}
zQ~LbYaQu`LmKyDiCs46#f^g$AUAQ1gDOI08SbFopOx=VMRP!?FG14#zS&Qg-S_m;V
zv=Gv@NxJf-30sj(*NG8?VIYT~x+AC3j@_YCrecgY@UM1TT-Ey2$k9ZhR``$#&75i}
zw)F(}GTMsJP~*dGxqI=$SLpy}{51u0k)-e~pJ-PP|cye0@|t9QjtfQIzxkFXOq8Z^d+{
z-=22mA_vP8t{jT7TG;Ns%p1jzzkj8{Y2GM)eCMyMPP|c6=ZL&*@|?1=I)BP_j(nL%
z3eFIpvoG^V@#DvSZ#nTu;lN;g`E%A=JW>oYmlLlP{q{ZAKfF@(^;3E2);I&zi_bNm7-eqI5^EK#gA9HzO$bwTyJ^a;KVCM
zEj)+8o?7rq5hg}{Y^|GDiXUG$Y+q~(D(41Q4zCoo@MDKFyi)Y_epe2!loq}of7r_5
zl~UpB^YVzw>ppqqT;=F)UMYV3bJttEQq=mxKePU!a;m+rJYwarLy3M{KWV*1YU=CO
zA<-xqRGK0*ZYsYlXcWlP>XvQti#c@spYi>y3GuxMl#hQPlK5grIaS@}+;L#XGGC86
zcO2MJ4nKDbDM&Dv^ONs4rykf*ZaiC@GZ5^ka%)^Iu%mw4xZT{FV8^ex8em1G54aj&
zMb-X`4?$v|6Lj&<*(D+d8>+7RT^ZQ0g|7{+3~Z<}Pq|iM!yk%4{MN)V>bs9x4X~iv
z`GW?BFBVjpA?MNr3#z}*4?rTn6&6&^$6Z^nV3DsqnPZfGdrrf_7YhoiO&_-}Snyh|
zwu&=}^>KkHWpM|XT
z`CIM__Upf%;p%I(BN%@4L#rLu`{4^#+H*Bue!ITJcD6(^kzS~5^s)oks4`3t?#&(S
z!#nI6YxM0-{1Z&%@5sHQnj`(~+(Cj4qb>Fyti6gYcDr;7g#%?vDvjQc%c^PSx>po1
zYB^5k(*>O}G;2Rnb8}id)5++%CU;h(HevYcjHiUTF82h97hFu5ZAb>1BwNztZIc@B
z0e{4++~zqg&3R5(*XsJjTkxu&u1{k?Xy{I@A3Szm>ql7YhiV+#kK^k45sm7H?c6r@
zT5UeS9uh4M052Ui!XZMA#EJ>wj*Iit#UIKxiL
z8Ci4yAghZ(21Kkce70OtyA^hcc_m9$KC6jreU-;v9Pi{IuIgu_4^$%gG5$
z>Sh2m7N@BISq@JXQ1f&5t|d!_dASuTDziKX(6nViTfBg!H3;v?a+kh?tlAV@9NQ(<
zx>=d4YGMmZII6`-K60qHI>~nMU7eF3_AxruhV-s(*40kqEm
zZb%0oBXxKYuJ(c(4lItsgf8@jICV`z9}P-L6_GZcH1@;E9iV|n=1HIjYtyk89KhJK1F`G}(JvtZX)Ih}p61+%
zi!wN6x@)DiRV3RtbcC&lL
zU@t;VmoH9$l1&=Q`=cbNtplw=sAWuQxUW$-qhyBT1l$bAPS`xEcp&II5psq$>VJe@)(=0$zx2%CXcr|v|>a))bdzWX7YIQ0ie~c0Qe)z
zWA-|cK6R1{F;s1-ABy_GS5k625xEjOeR?fKB3HXYlF4Mqn14FLPdbPv)r;wd^lWe=;!RMdolge0_FMC@
ziX>v1%k)SvF`Y^F1SMG!iRtl+bf=O%K~z?R-tDDGhWG@1S&?y6EG>%vq=Ni9(Z3Y@
zDFt~0t?LAQ9pN&RCg6@j=|luMS7n$W$A88I`F1C9wj&-6u^?BKnILaF0Ewf0mvL8;
zoV{sSN$yDcFszV4hSSeZ*B$mB5A|YFRc4FH1D%)Z<`Kjd6Zs+0?e{dyB_qNDy8SMN
z%-y>kDsV$M)N_fd%;u5XxGc`Wf>05Dzm|nGG|o
ziA)M$ |