std: fix typos (#20560)

This commit is contained in:
Jora Troosh 2024-07-10 00:25:42 +03:00 committed by GitHub
parent 49f2cca872
commit 13070448f5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
54 changed files with 95 additions and 95 deletions

View file

@ -3498,7 +3498,7 @@ void do_a_thing(struct Foo *foo) {
<p> <p>
As a motivating example, consider the statement {#syntax#}const x: u32 = 42;{#endsyntax#}. The type As a motivating example, consider the statement {#syntax#}const x: u32 = 42;{#endsyntax#}. The type
annotation here provides a result type of {#syntax#}u32{#endsyntax#} to the initialization expression annotation here provides a result type of {#syntax#}u32{#endsyntax#} to the initialization expression
{#syntax#}42{#endsyntax#}, instructing the compiler to coerce this integer (initally of type {#syntax#}42{#endsyntax#}, instructing the compiler to coerce this integer (initially of type
{#syntax#}comptime_int{#endsyntax#}) to this type. We will see more examples shortly. {#syntax#}comptime_int{#endsyntax#}) to this type. We will see more examples shortly.
</p> </p>
<p> <p>
@ -6833,7 +6833,7 @@ coding style.
<li>utils, misc, or somebody's initials</li> <li>utils, misc, or somebody's initials</li>
</ul> </ul>
<p>Everything is a value, all types are data, everything is context, all logic manages state. <p>Everything is a value, all types are data, everything is context, all logic manages state.
Nothing is communicated by using a word that applies to all types.</p> Nothing is communicated by using a word that applies to all types.</p>
<p>Temptation to use "utilities", "miscellaneous", or somebody's initials <p>Temptation to use "utilities", "miscellaneous", or somebody's initials
is a failure to categorize, or more commonly, overcategorization. Such is a failure to categorize, or more commonly, overcategorization. Such
declarations can live at the root of a module that needs them with no declarations can live at the root of a module that needs them with no

View file

@ -2,7 +2,7 @@ const std = @import("std");
const expect = std.testing.expect; const expect = std.testing.expect;
const Tuple = struct { u8, u8 }; const Tuple = struct { u8, u8 };
test "coercion from homogenous tuple to array" { test "coercion from homogeneous tuple to array" {
const tuple: Tuple = .{ 5, 6 }; const tuple: Tuple = .{ 5, 6 };
const array: [2]u8 = tuple; const array: [2]u8 = tuple;
_ = array; _ = array;

View file

@ -20,8 +20,8 @@ pub fn deinit(self: *@This()) void {
self.* = undefined; self.* = undefined;
} }
pub fn ensureTotalCapacity(self: *@This(), bit_capcity: usize) Allocator.Error!void { pub fn ensureTotalCapacity(self: *@This(), bit_capacity: usize) Allocator.Error!void {
const byte_capacity = (bit_capcity + 7) >> 3; const byte_capacity = (bit_capacity + 7) >> 3;
try self.bytes.ensureTotalCapacity(byte_capacity); try self.bytes.ensureTotalCapacity(byte_capacity);
} }

View file

@ -2055,7 +2055,7 @@ pub fn dependencyFromBuildZig(
} }
const full_path = b.pathFromRoot("build.zig.zon"); const full_path = b.pathFromRoot("build.zig.zon");
debug.panic("'{}' is not a build.zig struct of a dependecy in '{s}'", .{ build_zig, full_path }); debug.panic("'{}' is not a build.zig struct of a dependency in '{s}'", .{ build_zig, full_path });
} }
fn userValuesAreSame(lhs: UserValue, rhs: UserValue) bool { fn userValuesAreSame(lhs: UserValue, rhs: UserValue) bool {

View file

@ -887,7 +887,7 @@ pub const all_features = blk: {
}; };
result[@intFromEnum(Feature.nv)] = .{ result[@intFromEnum(Feature.nv)] = .{
.llvm_name = "nv", .llvm_name = "nv",
.description = "Enable v8.4-A Nested Virtualization Enchancement (FEAT_NV, FEAT_NV2)", .description = "Enable v8.4-A Nested Virtualization Enhancement (FEAT_NV, FEAT_NV2)",
.dependencies = featureSet(&[_]Feature{}), .dependencies = featureSet(&[_]Feature{}),
}; };
result[@intFromEnum(Feature.outline_atomics)] = .{ result[@intFromEnum(Feature.outline_atomics)] = .{

View file

@ -446,7 +446,7 @@ pub const all_features = blk: {
}; };
result[@intFromEnum(Feature.fix_cmse_cve_2021_35465)] = .{ result[@intFromEnum(Feature.fix_cmse_cve_2021_35465)] = .{
.llvm_name = "fix-cmse-cve-2021-35465", .llvm_name = "fix-cmse-cve-2021-35465",
.description = "Mitigate against the cve-2021-35465 security vulnurability", .description = "Mitigate against the cve-2021-35465 security vulnerability",
.dependencies = featureSet(&[_]Feature{}), .dependencies = featureSet(&[_]Feature{}),
}; };
result[@intFromEnum(Feature.fix_cortex_a57_aes_1742098)] = .{ result[@intFromEnum(Feature.fix_cortex_a57_aes_1742098)] = .{

View file

@ -214,7 +214,7 @@ pub const all_features = blk: {
}; };
result[@intFromEnum(Feature.dsp_silan)] = .{ result[@intFromEnum(Feature.dsp_silan)] = .{
.llvm_name = "dsp_silan", .llvm_name = "dsp_silan",
.description = "Enable DSP Silan instrutions", .description = "Enable DSP Silan instructions",
.dependencies = featureSet(&[_]Feature{}), .dependencies = featureSet(&[_]Feature{}),
}; };
result[@intFromEnum(Feature.dspe60)] = .{ result[@intFromEnum(Feature.dspe60)] = .{
@ -224,7 +224,7 @@ pub const all_features = blk: {
}; };
result[@intFromEnum(Feature.dspv2)] = .{ result[@intFromEnum(Feature.dspv2)] = .{
.llvm_name = "dspv2", .llvm_name = "dspv2",
.description = "Enable DSP V2.0 instrutions", .description = "Enable DSP V2.0 instructions",
.dependencies = featureSet(&[_]Feature{}), .dependencies = featureSet(&[_]Feature{}),
}; };
result[@intFromEnum(Feature.e1)] = .{ result[@intFromEnum(Feature.e1)] = .{
@ -243,7 +243,7 @@ pub const all_features = blk: {
}; };
result[@intFromEnum(Feature.edsp)] = .{ result[@intFromEnum(Feature.edsp)] = .{
.llvm_name = "edsp", .llvm_name = "edsp",
.description = "Enable DSP instrutions", .description = "Enable DSP instructions",
.dependencies = featureSet(&[_]Feature{}), .dependencies = featureSet(&[_]Feature{}),
}; };
result[@intFromEnum(Feature.elrw)] = .{ result[@intFromEnum(Feature.elrw)] = .{
@ -333,12 +333,12 @@ pub const all_features = blk: {
}; };
result[@intFromEnum(Feature.hwdiv)] = .{ result[@intFromEnum(Feature.hwdiv)] = .{
.llvm_name = "hwdiv", .llvm_name = "hwdiv",
.description = "Enable divide instrutions", .description = "Enable divide instructions",
.dependencies = featureSet(&[_]Feature{}), .dependencies = featureSet(&[_]Feature{}),
}; };
result[@intFromEnum(Feature.istack)] = .{ result[@intFromEnum(Feature.istack)] = .{
.llvm_name = "istack", .llvm_name = "istack",
.description = "Enable interrput attribute", .description = "Enable interrupt attribute",
.dependencies = featureSet(&[_]Feature{}), .dependencies = featureSet(&[_]Feature{}),
}; };
result[@intFromEnum(Feature.java)] = .{ result[@intFromEnum(Feature.java)] = .{
@ -362,7 +362,7 @@ pub const all_features = blk: {
}; };
result[@intFromEnum(Feature.multiple_stld)] = .{ result[@intFromEnum(Feature.multiple_stld)] = .{
.llvm_name = "multiple_stld", .llvm_name = "multiple_stld",
.description = "Enable multiple load/store instrutions", .description = "Enable multiple load/store instructions",
.dependencies = featureSet(&[_]Feature{}), .dependencies = featureSet(&[_]Feature{}),
}; };
result[@intFromEnum(Feature.nvic)] = .{ result[@intFromEnum(Feature.nvic)] = .{
@ -372,7 +372,7 @@ pub const all_features = blk: {
}; };
result[@intFromEnum(Feature.pushpop)] = .{ result[@intFromEnum(Feature.pushpop)] = .{
.llvm_name = "pushpop", .llvm_name = "pushpop",
.description = "Enable push/pop instrutions", .description = "Enable push/pop instructions",
.dependencies = featureSet(&[_]Feature{}), .dependencies = featureSet(&[_]Feature{}),
}; };
result[@intFromEnum(Feature.smart)] = .{ result[@intFromEnum(Feature.smart)] = .{

View file

@ -823,14 +823,14 @@ pub const all_features = blk: {
}; };
result[@intFromEnum(Feature.zcmp)] = .{ result[@intFromEnum(Feature.zcmp)] = .{
.llvm_name = "zcmp", .llvm_name = "zcmp",
.description = "'Zcmp' (sequenced instuctions for code-size reduction)", .description = "'Zcmp' (sequenced instructions for code-size reduction)",
.dependencies = featureSet(&[_]Feature{ .dependencies = featureSet(&[_]Feature{
.zca, .zca,
}), }),
}; };
result[@intFromEnum(Feature.zcmt)] = .{ result[@intFromEnum(Feature.zcmt)] = .{
.llvm_name = "zcmt", .llvm_name = "zcmt",
.description = "'Zcmt' (table jump instuctions for code-size reduction)", .description = "'Zcmt' (table jump instructions for code-size reduction)",
.dependencies = featureSet(&[_]Feature{ .dependencies = featureSet(&[_]Feature{
.zca, .zca,
.zicsr, .zicsr,

View file

@ -170,7 +170,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// operations. /// operations.
/// Invalidates pre-existing pointers to elements at and after `index`. /// Invalidates pre-existing pointers to elements at and after `index`.
/// Invalidates all pre-existing element pointers if capacity must be /// Invalidates all pre-existing element pointers if capacity must be
/// increased to accomodate the new elements. /// increased to accommodate the new elements.
/// Asserts that the index is in bounds or equal to the length. /// Asserts that the index is in bounds or equal to the length.
pub fn addManyAt(self: *Self, index: usize, count: usize) Allocator.Error![]T { pub fn addManyAt(self: *Self, index: usize, count: usize) Allocator.Error![]T {
const new_len = try addOrOom(self.items.len, count); const new_len = try addOrOom(self.items.len, count);
@ -227,7 +227,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// This operation is O(N). /// This operation is O(N).
/// Invalidates pre-existing pointers to elements at and after `index`. /// Invalidates pre-existing pointers to elements at and after `index`.
/// Invalidates all pre-existing element pointers if capacity must be /// Invalidates all pre-existing element pointers if capacity must be
/// increased to accomodate the new elements. /// increased to accommodate the new elements.
/// Asserts that the index is in bounds or equal to the length. /// Asserts that the index is in bounds or equal to the length.
pub fn insertSlice( pub fn insertSlice(
self: *Self, self: *Self,
@ -740,7 +740,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// operations. /// operations.
/// Invalidates pre-existing pointers to elements at and after `index`. /// Invalidates pre-existing pointers to elements at and after `index`.
/// Invalidates all pre-existing element pointers if capacity must be /// Invalidates all pre-existing element pointers if capacity must be
/// increased to accomodate the new elements. /// increased to accommodate the new elements.
/// Asserts that the index is in bounds or equal to the length. /// Asserts that the index is in bounds or equal to the length.
pub fn addManyAt( pub fn addManyAt(
self: *Self, self: *Self,
@ -776,7 +776,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// This operation is O(N). /// This operation is O(N).
/// Invalidates pre-existing pointers to elements at and after `index`. /// Invalidates pre-existing pointers to elements at and after `index`.
/// Invalidates all pre-existing element pointers if capacity must be /// Invalidates all pre-existing element pointers if capacity must be
/// increased to accomodate the new elements. /// increased to accommodate the new elements.
/// Asserts that the index is in bounds or equal to the length. /// Asserts that the index is in bounds or equal to the length.
pub fn insertSlice( pub fn insertSlice(
self: *Self, self: *Self,

View file

@ -2523,7 +2523,7 @@ pub const F = struct {
/// add signature from same file (used by dyld for shared libs) /// add signature from same file (used by dyld for shared libs)
pub const ADDFILESIGS = 61; pub const ADDFILESIGS = 61;
/// used in conjunction with F.NOCACHE to indicate that DIRECT, synchronous writes /// used in conjunction with F.NOCACHE to indicate that DIRECT, synchronous writes
/// should not be used (i.e. its ok to temporaily create cached pages) /// should not be used (i.e. its ok to temporarily create cached pages)
pub const NODIRECT = 62; pub const NODIRECT = 62;
///Get the protection class of a file from the EA, returns int ///Get the protection class of a file from the EA, returns int
pub const GETPROTECTIONCLASS = 63; pub const GETPROTECTIONCLASS = 63;

View file

@ -581,7 +581,7 @@ pub const SectionHeaderFlags = packed struct {
/// This is valid for object files only. /// This is valid for object files only.
LNK_INFO: u1 = 0, LNK_INFO: u1 = 0,
_reserverd_2: u1 = 0, _reserved_2: u1 = 0,
/// The section will not become part of the image. /// The section will not become part of the image.
/// This is valid only for object files. /// This is valid only for object files.

View file

@ -70,8 +70,8 @@ pub const store = struct {
} }
}; };
/// Container defines header/footer arround deflate bit stream. Gzip and zlib /// Container defines header/footer around deflate bit stream. Gzip and zlib
/// compression algorithms are containers arround deflate bit stream body. /// compression algorithms are containers around deflate bit stream body.
const Container = @import("flate/container.zig").Container; const Container = @import("flate/container.zig").Container;
const std = @import("std"); const std = @import("std");
const testing = std.testing; const testing = std.testing;

View file

@ -109,7 +109,7 @@ const ReadBlock = struct {
len: usize, len: usize,
}; };
/// Returns position of continous read block data. /// Returns position of continuous read block data.
fn readBlock(self: *Self, max: usize) ReadBlock { fn readBlock(self: *Self, max: usize) ReadBlock {
const r = self.rp & mask; const r = self.rp & mask;
const w = self.wp & mask; const w = self.wp & mask;

View file

@ -26,7 +26,7 @@ pub fn add(self: *Self, data: []const u8, pos: u16) u16 {
return self.set(h, pos); return self.set(h, pos);
} }
// Retruns previous location with the same hash value given the current // Returns previous location with the same hash value given the current
// position. // position.
pub fn prev(self: *Self, pos: u16) u16 { pub fn prev(self: *Self, pos: u16) u16 {
return self.chain[pos]; return self.chain[pos];

View file

@ -84,7 +84,7 @@ pub fn match(self: *Self, prev_pos: u16, curr_pos: u16, min_len: u16) u16 {
const prev_lh = self.buffer[prev_pos..][0..max_len]; const prev_lh = self.buffer[prev_pos..][0..max_len];
const curr_lh = self.buffer[curr_pos..][0..max_len]; const curr_lh = self.buffer[curr_pos..][0..max_len];
// If we alread have match (min_len > 0), // If we already have match (min_len > 0),
// test the first byte above previous len a[min_len] != b[min_len] // test the first byte above previous len a[min_len] != b[min_len]
// and then all the bytes from that position to zero. // and then all the bytes from that position to zero.
// That is likely positions to find difference than looping from first bytes. // That is likely positions to find difference than looping from first bytes.

View file

@ -110,7 +110,7 @@ pub fn show(t: Token) void {
} }
} }
// Retruns index in match_lengths table for each length in range 0-255. // Returns index in match_lengths table for each length in range 0-255.
const match_lengths_index = [_]u8{ const match_lengths_index = [_]u8{
0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12,

View file

@ -57,7 +57,7 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type {
/// it may be some extra zero bits in buffer. /// it may be some extra zero bits in buffer.
pub inline fn fill(self: *Self, nice: u6) !void { pub inline fn fill(self: *Self, nice: u6) !void {
if (self.nbits >= nice and nice != 0) { if (self.nbits >= nice and nice != 0) {
return; // We have enought bits return; // We have enough bits
} }
// Read more bits from forward reader // Read more bits from forward reader
@ -96,7 +96,7 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type {
pub const flag = struct { pub const flag = struct {
pub const peek: u3 = 0b001; // dont advance internal buffer, just get bits, leave them in buffer pub const peek: u3 = 0b001; // dont advance internal buffer, just get bits, leave them in buffer
pub const buffered: u3 = 0b010; // assume that there is no need to fill, fill should be called before pub const buffered: u3 = 0b010; // assume that there is no need to fill, fill should be called before
pub const reverse: u3 = 0b100; // bit reverse readed bits pub const reverse: u3 = 0b100; // bit reverse read bits
}; };
/// Alias for readF(U, 0). /// Alias for readF(U, 0).
@ -133,7 +133,7 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type {
try self.fill(n); try self.fill(n);
return @truncate(self.bits); return @truncate(self.bits);
}, },
flag.buffered => { // no fill, assume that buffer has enought bits flag.buffered => { // no fill, assume that buffer has enough bits
const u: U = @truncate(self.bits); const u: U = @truncate(self.bits);
try self.shift(n); try self.shift(n);
return u; return u;
@ -212,7 +212,7 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type {
} }
/// Read deflate fixed fixed code. /// Read deflate fixed fixed code.
/// Reads first 7 bits, and then mybe 1 or 2 more to get full 7,8 or 9 bit code. /// Reads first 7 bits, and then maybe 1 or 2 more to get full 7,8 or 9 bit code.
/// ref: https://datatracker.ietf.org/doc/html/rfc1951#page-12 /// ref: https://datatracker.ietf.org/doc/html/rfc1951#page-12
/// Lit Value Bits Codes /// Lit Value Bits Codes
/// --------- ---- ----- /// --------- ---- -----

View file

@ -48,7 +48,7 @@ pub fn BlockWriter(comptime WriterType: type) type {
/// Should be called only when bit stream is at byte boundary. /// Should be called only when bit stream is at byte boundary.
/// ///
/// That is after final block; when last byte could be incomplete or /// That is after final block; when last byte could be incomplete or
/// after stored block; which is aligned to the byte bounday (it has x /// after stored block; which is aligned to the byte boundary (it has x
/// padding bits after first 3 bits). /// padding bits after first 3 bits).
pub fn flush(self: *Self) Error!void { pub fn flush(self: *Self) Error!void {
try self.bit_writer.flush(); try self.bit_writer.flush();

View file

@ -1,5 +1,5 @@
pub const deflate = struct { pub const deflate = struct {
// Number of tokens to accumlate in deflate before starting block encoding. // Number of tokens to accumulate in deflate before starting block encoding.
// //
// In zlib this depends on memlevel: 6 + memlevel, where default memlevel is // In zlib this depends on memlevel: 6 + memlevel, where default memlevel is
// 8 and max 9 that gives 14 or 15 bits. // 8 and max 9 that gives 14 or 15 bits.

View file

@ -183,7 +183,7 @@ fn Deflate(comptime container: Container, comptime WriterType: type, comptime Bl
// Write match from previous position. // Write match from previous position.
step = try self.addMatch(m) - 1; // we already advanced 1 from previous position step = try self.addMatch(m) - 1; // we already advanced 1 from previous position
} else { } else {
// No match at previous postition. // No match at previous position.
// Write previous literal if any, and remember this literal. // Write previous literal if any, and remember this literal.
try self.addPrevLiteral(); try self.addPrevLiteral();
self.prev_literal = literal; self.prev_literal = literal;
@ -268,9 +268,9 @@ fn Deflate(comptime container: Container, comptime WriterType: type, comptime Bl
fn flushTokens(self: *Self, flush_opt: FlushOption) !void { fn flushTokens(self: *Self, flush_opt: FlushOption) !void {
// Pass tokens to the token writer // Pass tokens to the token writer
try self.block_writer.write(self.tokens.tokens(), flush_opt == .final, self.win.tokensBuffer()); try self.block_writer.write(self.tokens.tokens(), flush_opt == .final, self.win.tokensBuffer());
// Stored block ensures byte aligment. // Stored block ensures byte alignment.
// It has 3 bits (final, block_type) and then padding until byte boundary. // It has 3 bits (final, block_type) and then padding until byte boundary.
// After that everyting is aligned to the boundary in the stored block. // After that everything is aligned to the boundary in the stored block.
// Empty stored block is Ob000 + (0-7) bits of padding + 0x00 0x00 0xFF 0xFF. // Empty stored block is Ob000 + (0-7) bits of padding + 0x00 0x00 0xFF 0xFF.
// Last 4 bytes are byte aligned. // Last 4 bytes are byte aligned.
if (flush_opt == .flush) { if (flush_opt == .flush) {
@ -572,7 +572,7 @@ test "tokenization" {
} }
} }
// Tests that tokens writen are equal to expected token list. // Tests that tokens written are equal to expected token list.
const TestTokenWriter = struct { const TestTokenWriter = struct {
const Self = @This(); const Self = @This();
@ -655,7 +655,7 @@ test "file tokenization" {
const TokenWriter = TokenDecoder(@TypeOf(writer)); const TokenWriter = TokenDecoder(@TypeOf(writer));
var cmp = try Deflate(.raw, WriterType, TokenWriter).init(writer, .{ .level = level }); var cmp = try Deflate(.raw, WriterType, TokenWriter).init(writer, .{ .level = level });
// Stream uncompressed `orignal` data to the compressor. It will // Stream uncompressed `original` data to the compressor. It will
// produce tokens list and pass that list to the TokenDecoder. This // produce tokens list and pass that list to the TokenDecoder. This
// TokenDecoder uses CircularBuffer from inflate to convert list of // TokenDecoder uses CircularBuffer from inflate to convert list of
// tokens back to the uncompressed stream. // tokens back to the uncompressed stream.

View file

@ -132,7 +132,7 @@ fn HuffmanDecoder(
if (n > max) max = n; if (n > max) max = n;
count[n] += 1; count[n] += 1;
} }
if (max == 0) // emtpy tree if (max == 0) // empty tree
return; return;
// check for an over-subscribed or incomplete set of lengths // check for an over-subscribed or incomplete set of lengths
@ -255,7 +255,7 @@ test "encode/decode literals" {
for (1..286) |j| { // for all different number of codes for (1..286) |j| { // for all different number of codes
var enc: LiteralEncoder = .{}; var enc: LiteralEncoder = .{};
// create freqencies // create frequencies
var freq = [_]u16{0} ** 286; var freq = [_]u16{0} ** 286;
freq[256] = 1; // ensure we have end of block code freq[256] = 1; // ensure we have end of block code
for (&freq, 1..) |*f, i| { for (&freq, 1..) |*f, i| {
@ -263,7 +263,7 @@ test "encode/decode literals" {
f.* = @intCast(i); f.* = @intCast(i);
} }
// encoder from freqencies // encoder from frequencies
enc.generate(&freq, 15); enc.generate(&freq, 15);
// get code_lens from encoder // get code_lens from encoder

View file

@ -168,7 +168,7 @@ pub fn HuffmanEncoder(comptime size: usize) type {
while (true) { while (true) {
var l = &levels[level]; var l = &levels[level];
if (l.next_pair_freq == math.maxInt(i32) and l.next_char_freq == math.maxInt(i32)) { if (l.next_pair_freq == math.maxInt(i32) and l.next_char_freq == math.maxInt(i32)) {
// We've run out of both leafs and pairs. // We've run out of both leaves and pairs.
// End all calculations for this level. // End all calculations for this level.
// To make sure we never come back to this level or any lower level, // To make sure we never come back to this level or any lower level,
// set next_pair_freq impossibly large. // set next_pair_freq impossibly large.

View file

@ -99,7 +99,7 @@ pub fn Inflate(comptime container: Container, comptime LookaheadType: type, comp
fn storedBlock(self: *Self) !bool { fn storedBlock(self: *Self) !bool {
self.bits.alignToByte(); // skip padding until byte boundary self.bits.alignToByte(); // skip padding until byte boundary
// everyting after this is byte aligned in stored block // everything after this is byte aligned in stored block
var len = try self.bits.read(u16); var len = try self.bits.read(u16);
const nlen = try self.bits.read(u16); const nlen = try self.bits.read(u16);
if (len != ~nlen) return error.WrongStoredBlockNlen; if (len != ~nlen) return error.WrongStoredBlockNlen;
@ -155,7 +155,7 @@ pub fn Inflate(comptime container: Container, comptime LookaheadType: type, comp
fn dynamicBlockHeader(self: *Self) !void { fn dynamicBlockHeader(self: *Self) !void {
const hlit: u16 = @as(u16, try self.bits.read(u5)) + 257; // number of ll code entries present - 257 const hlit: u16 = @as(u16, try self.bits.read(u5)) + 257; // number of ll code entries present - 257
const hdist: u16 = @as(u16, try self.bits.read(u5)) + 1; // number of distance code entries - 1 const hdist: u16 = @as(u16, try self.bits.read(u5)) + 1; // number of distance code entries - 1
const hclen: u8 = @as(u8, try self.bits.read(u4)) + 4; // hclen + 4 code lenths are encoded const hclen: u8 = @as(u8, try self.bits.read(u4)) + 4; // hclen + 4 code lengths are encoded
if (hlit > 286 or hdist > 30) if (hlit > 286 or hdist > 30)
return error.InvalidDynamicBlockHeader; return error.InvalidDynamicBlockHeader;
@ -180,7 +180,7 @@ pub fn Inflate(comptime container: Container, comptime LookaheadType: type, comp
return error.InvalidDynamicBlockHeader; return error.InvalidDynamicBlockHeader;
} }
// literal code lengts to literal decoder // literal code lengths to literal decoder
try self.lit_dec.generate(dec_lens[0..hlit]); try self.lit_dec.generate(dec_lens[0..hlit]);
// distance code lengths to distance decoder // distance code lengths to distance decoder

View file

@ -977,7 +977,7 @@ pub const rsa = struct {
// the hash function (2^61 - 1 octets for SHA-1), output // the hash function (2^61 - 1 octets for SHA-1), output
// "inconsistent" and stop. // "inconsistent" and stop.
// All the cryptographic hash functions in the standard library have a limit of >= 2^61 - 1. // All the cryptographic hash functions in the standard library have a limit of >= 2^61 - 1.
// Even then, this check is only there for paranoia. In the context of TLS certifcates, emBit cannot exceed 4096. // Even then, this check is only there for paranoia. In the context of TLS certificates, emBit cannot exceed 4096.
if (emBit >= 1 << 61) return error.InvalidSignature; if (emBit >= 1 << 61) return error.InvalidSignature;
// emLen = \ceil(emBits/8) // emLen = \ceil(emBits/8)

View file

@ -41,7 +41,7 @@ pub const OverflowError = error{Overflow};
/// Invalid modulus. Modulus must be odd. /// Invalid modulus. Modulus must be odd.
pub const InvalidModulusError = error{ EvenModulus, ModulusTooSmall }; pub const InvalidModulusError = error{ EvenModulus, ModulusTooSmall };
/// Exponentation with a null exponent. /// Exponentiation with a null exponent.
/// Exponentiation in cryptographic protocols is almost always a sign of a bug which can lead to trivial attacks. /// Exponentiation in cryptographic protocols is almost always a sign of a bug which can lead to trivial attacks.
/// Therefore, this module returns an error when a null exponent is encountered, encouraging applications to handle this case explicitly. /// Therefore, this module returns an error when a null exponent is encountered, encouraging applications to handle this case explicitly.
pub const NullExponentError = error{NullExponent}; pub const NullExponentError = error{NullExponent};

View file

@ -379,7 +379,7 @@ fn Kyber(comptime p: Params) type {
/// Create a new key pair. /// Create a new key pair.
/// If seed is null, a random seed will be generated. /// If seed is null, a random seed will be generated.
/// If a seed is provided, the key pair will be determinsitic. /// If a seed is provided, the key pair will be deterministic.
pub fn create(seed_: ?[seed_length]u8) !KeyPair { pub fn create(seed_: ?[seed_length]u8) !KeyPair {
const seed = seed_ orelse sk: { const seed = seed_ orelse sk: {
var random_seed: [seed_length]u8 = undefined; var random_seed: [seed_length]u8 = undefined;
@ -1253,7 +1253,7 @@ const Poly = struct {
t |= @as(T, buf[batch_bytes * i + j]) << (8 * j); t |= @as(T, buf[batch_bytes * i + j]) << (8 * j);
} }
// Accumelate `a's and `b's together by masking them out, shifting // Accumulate `a's and `b's together by masking them out, shifting
// and adding. For η=3, we have d = a + a + a + 8(b + b + b) + // and adding. For η=3, we have d = a + a + a + 8(b + b + b) +
var d: T = 0; var d: T = 0;
inline for (0..eta) |j| { inline for (0..eta) |j| {

View file

@ -487,7 +487,7 @@ pub const Box = struct {
/// A key pair. /// A key pair.
pub const KeyPair = X25519.KeyPair; pub const KeyPair = X25519.KeyPair;
/// Compute a secret suitable for `secretbox` given a recipent's public key and a sender's secret key. /// Compute a secret suitable for `secretbox` given a recipient's public key and a sender's secret key.
pub fn createSharedSecret(public_key: [public_length]u8, secret_key: [secret_length]u8) (IdentityElementError || WeakPublicKeyError)![shared_length]u8 { pub fn createSharedSecret(public_key: [public_length]u8, secret_key: [secret_length]u8) (IdentityElementError || WeakPublicKeyError)![shared_length]u8 {
const p = try X25519.scalarmult(secret_key, public_key); const p = try X25519.scalarmult(secret_key, public_key);
const zero = [_]u8{0} ** 16; const zero = [_]u8{0} ** 16;

View file

@ -1,6 +1,6 @@
//! Secure Hashing Algorithm 2 (SHA2) //! Secure Hashing Algorithm 2 (SHA2)
//! //!
//! Published by the National Institue of Standards and Technology (NIST) [1] [2]. //! Published by the National Institute of Standards and Technology (NIST) [1] [2].
//! //!
//! Truncation mitigates length-extension attacks but increases vulnerability to collision //! Truncation mitigates length-extension attacks but increases vulnerability to collision
//! attacks. Collision attacks remain impractical for all types defined here. //! attacks. Collision attacks remain impractical for all types defined here.

View file

@ -40,8 +40,8 @@ const assert = std.debug.assert;
pub const Client = @import("tls/Client.zig"); pub const Client = @import("tls/Client.zig");
pub const record_header_len = 5; pub const record_header_len = 5;
pub const max_cipertext_inner_record_len = 1 << 14; pub const max_ciphertext_inner_record_len = 1 << 14;
pub const max_ciphertext_len = max_cipertext_inner_record_len + 256; pub const max_ciphertext_len = max_ciphertext_inner_record_len + 256;
pub const max_ciphertext_record_len = max_ciphertext_len + record_header_len; pub const max_ciphertext_record_len = max_ciphertext_len + record_header_len;
pub const hello_retry_request_sequence = [32]u8{ pub const hello_retry_request_sequence = [32]u8{
0xCF, 0x21, 0xAD, 0x74, 0xE5, 0x9A, 0x61, 0x11, 0xBE, 0x1D, 0x8C, 0x02, 0x1E, 0x65, 0xB8, 0x91, 0xCF, 0x21, 0xAD, 0x74, 0xE5, 0x9A, 0x61, 0x11, 0xBE, 0x1D, 0x8C, 0x02, 0x1E, 0x65, 0xB8, 0x91,

View file

@ -819,7 +819,7 @@ fn prepareCiphertextRecord(
const close_notify_alert_reserved = tls.close_notify_alert.len + overhead_len; const close_notify_alert_reserved = tls.close_notify_alert.len + overhead_len;
while (true) { while (true) {
const encrypted_content_len: u16 = @intCast(@min( const encrypted_content_len: u16 = @intCast(@min(
@min(bytes.len - bytes_i, tls.max_cipertext_inner_record_len), @min(bytes.len - bytes_i, tls.max_ciphertext_inner_record_len),
ciphertext_buf.len -| ciphertext_buf.len -|
(close_notify_alert_reserved + overhead_len + ciphertext_end), (close_notify_alert_reserved + overhead_len + ciphertext_end),
)); ));

View file

@ -38,7 +38,7 @@ pub fn ipRegNum() u8 {
pub fn fpRegNum(reg_context: RegisterContext) u8 { pub fn fpRegNum(reg_context: RegisterContext) u8 {
return switch (builtin.cpu.arch) { return switch (builtin.cpu.arch) {
// GCC on OS X historicaly did the opposite of ELF for these registers (only in .eh_frame), and that is now the convention for MachO // GCC on OS X historically did the opposite of ELF for these registers (only in .eh_frame), and that is now the convention for MachO
.x86 => if (reg_context.eh_frame and reg_context.is_macho) 4 else 5, .x86 => if (reg_context.eh_frame and reg_context.is_macho) 4 else 5,
.x86_64 => 6, .x86_64 => 6,
.arm => 11, .arm => 11,

View file

@ -15,7 +15,7 @@ pub const ExpressionContext = struct {
/// The dwarf format of the section this expression is in /// The dwarf format of the section this expression is in
format: dwarf.Format = .@"32", format: dwarf.Format = .@"32",
/// If specified, any addresses will pass through this function before being acccessed /// If specified, any addresses will pass through this function before being accessed
isValidMemory: ?*const fn (address: usize) bool = null, isValidMemory: ?*const fn (address: usize) bool = null,
/// The compilation unit this expression relates to, if any /// The compilation unit this expression relates to, if any
@ -42,14 +42,14 @@ pub const ExpressionOptions = struct {
/// The address size of the target architecture /// The address size of the target architecture
addr_size: u8 = @sizeOf(usize), addr_size: u8 = @sizeOf(usize),
/// Endianess of the target architecture /// Endianness of the target architecture
endian: std.builtin.Endian = builtin.target.cpu.arch.endian(), endian: std.builtin.Endian = builtin.target.cpu.arch.endian(),
/// Restrict the stack machine to a subset of opcodes used in call frame instructions /// Restrict the stack machine to a subset of opcodes used in call frame instructions
call_frame_context: bool = false, call_frame_context: bool = false,
}; };
// Explcitly defined to support executing sub-expressions // Explicitly defined to support executing sub-expressions
pub const ExpressionError = error{ pub const ExpressionError = error{
UnimplementedExpressionCall, UnimplementedExpressionCall,
UnimplementedOpcode, UnimplementedOpcode,

View file

@ -1,4 +1,4 @@
// There is a generic CRC implementation "Crc()" which can be paramterized via // There is a generic CRC implementation "Crc()" which can be parameterized via
// the Algorithm struct for a plethora of uses. // the Algorithm struct for a plethora of uses.
// //
// The primary interface for all of the standard CRC algorithms is the // The primary interface for all of the standard CRC algorithms is the

View file

@ -79,7 +79,7 @@ pub const Wyhash = struct {
@memcpy(scratch[0..rem], self.buf[self.buf.len - rem ..][0..rem]); @memcpy(scratch[0..rem], self.buf[self.buf.len - rem ..][0..rem]);
@memcpy(scratch[rem..][0..self.buf_len], self.buf[0..self.buf_len]); @memcpy(scratch[rem..][0..self.buf_len], self.buf[0..self.buf_len]);
// Same as input but with additional bytes preceeding start in case of a short buffer // Same as input but with additional bytes preceding start in case of a short buffer
input = &scratch; input = &scratch;
offset = rem; offset = rem;
} }

View file

@ -902,7 +902,7 @@ pub fn intToEnum(comptime EnumTag: type, tag_int: anytype) IntToEnumError!EnumTa
return error.InvalidEnumTag; return error.InvalidEnumTag;
} }
// We don't direcly iterate over the fields of EnumTag, as that // We don't directly iterate over the fields of EnumTag, as that
// would require an inline loop. Instead, we create an array of // would require an inline loop. Instead, we create an array of
// values that is comptime-know, but can be iterated at runtime // values that is comptime-know, but can be iterated at runtime
// without requiring an inline loop. This generates better // without requiring an inline loop. This generates better

View file

@ -570,7 +570,7 @@ pub fn futex_wake(uaddr: *const i32, futex_op: u32, val: i32) usize {
/// Returns the array index of one of the woken futexes. /// Returns the array index of one of the woken futexes.
/// No further information is provided: any number of other futexes may also /// No further information is provided: any number of other futexes may also
/// have been woken by the same event, and if more than one futex was woken, /// have been woken by the same event, and if more than one futex was woken,
/// the retrned index may refer to any one of them. /// the returned index may refer to any one of them.
/// (It is not necessaryily the futex with the smallest index, nor the one /// (It is not necessaryily the futex with the smallest index, nor the one
/// most recently woken, nor...) /// most recently woken, nor...)
pub fn futex2_waitv( pub fn futex2_waitv(
@ -648,7 +648,7 @@ pub fn futex2_wake(
pub fn futex2_requeue( pub fn futex2_requeue(
/// Array describing the source and destination futex. /// Array describing the source and destination futex.
waiters: [*]futex_waitv, waiters: [*]futex_waitv,
/// Unsed. /// Unused.
flags: u32, flags: u32,
/// Number of futexes to wake. /// Number of futexes to wake.
nr_wake: i32, nr_wake: i32,
@ -6009,7 +6009,7 @@ else
/// values of this resource limit. /// values of this resource limit.
NICE, NICE,
/// Maximum realtime priority allowed for non-priviledged /// Maximum realtime priority allowed for non-privileged
/// processes. /// processes.
RTPRIO, RTPRIO,
@ -7228,7 +7228,7 @@ pub const futex_waitv = extern struct {
uaddr: u64, uaddr: u64,
/// Flags for this waiter. /// Flags for this waiter.
flags: u32, flags: u32,
/// Reserved memeber to preserve alignment. /// Reserved member to preserve alignment.
/// Should be 0. /// Should be 0.
__reserved: u32, __reserved: u32,
}; };

View file

@ -3938,7 +3938,7 @@ test BufferGroup {
// Server uses buffer group receive // Server uses buffer group receive
{ {
// Submit recv operation, buffer will be choosen from buffer group // Submit recv operation, buffer will be chosen from buffer group
_ = try buf_grp.recv(2, fds.server, 0); _ = try buf_grp.recv(2, fds.server, 0);
const submitted = try ring.submit(); const submitted = try ring.submit();
try testing.expectEqual(1, submitted); try testing.expectEqual(1, submitted);
@ -3956,7 +3956,7 @@ test BufferGroup {
// Get buffer from pool // Get buffer from pool
const buf = buf_grp.get(buffer_id)[0..len]; const buf = buf_grp.get(buffer_id)[0..len];
try testing.expectEqualSlices(u8, &data, buf); try testing.expectEqualSlices(u8, &data, buf);
// Releaase buffer to the kernel when application is done with it // Release buffer to the kernel when application is done with it
buf_grp.put(buffer_id); buf_grp.put(buffer_id);
} }
} }

View file

@ -140,7 +140,7 @@ pub const F_STRICT_ALIGNMENT = 0x1;
/// If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the verifier will /// If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the verifier will
/// allow any alignment whatsoever. On platforms with strict alignment /// allow any alignment whatsoever. On platforms with strict alignment
/// requirements for loads ands stores (such as sparc and mips) the verifier /// requirements for loads and stores (such as sparc and mips) the verifier
/// validates that all loads and stores provably follow this requirement. This /// validates that all loads and stores provably follow this requirement. This
/// flag turns that checking and enforcement off. /// flag turns that checking and enforcement off.
/// ///

View file

@ -385,7 +385,7 @@ pub const rlimit_resource = enum(c_int) {
/// values of this resource limit. /// values of this resource limit.
NICE, NICE,
/// Maximum realtime priority allowed for non-priviledged /// Maximum realtime priority allowed for non-privileged
/// processes. /// processes.
RTPRIO, RTPRIO,

View file

@ -370,7 +370,7 @@ pub const rlimit_resource = enum(c_int) {
/// values of this resource limit. /// values of this resource limit.
NICE, NICE,
/// Maximum realtime priority allowed for non-priviledged /// Maximum realtime priority allowed for non-privileged
/// processes. /// processes.
RTPRIO, RTPRIO,

View file

@ -460,7 +460,7 @@ pub const rlimit_resource = enum(c_int) {
/// values of this resource limit. /// values of this resource limit.
NICE, NICE,
/// Maximum realtime priority allowed for non-priviledged /// Maximum realtime priority allowed for non-privileged
/// processes. /// processes.
RTPRIO, RTPRIO,

View file

@ -184,7 +184,7 @@ pub const SIG = struct {
pub const sigset_t = c_long; pub const sigset_t = c_long;
pub const empty_sigset = 0; pub const empty_sigset = 0;
pub const siginfo_t = c_long; pub const siginfo_t = c_long;
// TODO plan9 doesn't have sigaction_fn. Sigaction is not a union, but we incude it here to be compatible. // TODO plan9 doesn't have sigaction_fn. Sigaction is not a union, but we include it here to be compatible.
pub const Sigaction = extern struct { pub const Sigaction = extern struct {
pub const handler_fn = *const fn (i32) callconv(.C) void; pub const handler_fn = *const fn (i32) callconv(.C) void;
pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void; pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;

View file

@ -3474,7 +3474,7 @@ pub const SocketError = error{
pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t { pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t {
if (native_os == .windows) { if (native_os == .windows) {
// NOTE: windows translates the SOCK.NONBLOCK/SOCK.CLOEXEC flags into // NOTE: windows translates the SOCK.NONBLOCK/SOCK.CLOEXEC flags into
// windows-analagous operations // windows-analogous operations
const filtered_sock_type = socket_type & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC); const filtered_sock_type = socket_type & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC);
const flags: u32 = if ((socket_type & SOCK.CLOEXEC) != 0) const flags: u32 = if ((socket_type & SOCK.CLOEXEC) != 0)
windows.ws2_32.WSA_FLAG_NO_HANDLE_INHERIT windows.ws2_32.WSA_FLAG_NO_HANDLE_INHERIT

View file

@ -617,7 +617,7 @@ fn contextLessThan(context: []const u32, a: usize, b: usize) Order {
const CPQlt = PriorityQueue(usize, []const u32, contextLessThan); const CPQlt = PriorityQueue(usize, []const u32, contextLessThan);
test "add and remove min heap with contextful comparator" { test "add and remove min heap with context comparator" {
const context = [_]u32{ 5, 3, 4, 2, 2, 8, 0 }; const context = [_]u32{ 5, 3, 4, 2, 2, 8, 0 };
var queue = CPQlt.init(testing.allocator, context[0..]); var queue = CPQlt.init(testing.allocator, context[0..]);

View file

@ -1818,7 +1818,7 @@ pub const CreateEnvironOptions = struct {
zig_progress_fd: ?i32 = null, zig_progress_fd: ?i32 = null,
}; };
/// Creates a null-deliminated environment variable block in the format /// Creates a null-delimited environment variable block in the format
/// expected by POSIX, from a hash map plus options. /// expected by POSIX, from a hash map plus options.
pub fn createEnvironFromMap( pub fn createEnvironFromMap(
arena: Allocator, arena: Allocator,
@ -1880,7 +1880,7 @@ pub fn createEnvironFromMap(
return envp_buf; return envp_buf;
} }
/// Creates a null-deliminated environment variable block in the format /// Creates a null-delimited environment variable block in the format
/// expected by POSIX, from a hash map plus options. /// expected by POSIX, from a hash map plus options.
pub fn createEnvironFromExisting( pub fn createEnvironFromExisting(
arena: Allocator, arena: Allocator,

View file

@ -268,7 +268,7 @@ fn breakPatterns(a: usize, b: usize, context: anytype) void {
} }
} }
/// choses a pivot in `items[a..b]`. /// chooses a pivot in `items[a..b]`.
/// swaps likely_sorted when `items[a..b]` seems to be already sorted. /// swaps likely_sorted when `items[a..b]` seems to be already sorted.
fn chosePivot(a: usize, b: usize, pivot: *usize, context: anytype) Hint { fn chosePivot(a: usize, b: usize, pivot: *usize, context: anytype) Hint {
// minimum length for using the Tukey's ninther method // minimum length for using the Tukey's ninther method

View file

@ -315,7 +315,7 @@ pub const FileKind = enum {
file, file,
}; };
/// Iteartor over entries in the tar file represented by reader. /// Iterator over entries in the tar file represented by reader.
pub fn Iterator(comptime ReaderType: type) type { pub fn Iterator(comptime ReaderType: type) type {
return struct { return struct {
reader: ReaderType, reader: ReaderType,
@ -423,7 +423,7 @@ pub fn Iterator(comptime ReaderType: type) type {
self.padding = blockPadding(size); self.padding = blockPadding(size);
switch (kind) { switch (kind) {
// File types to retrun upstream // File types to return upstream
.directory, .normal, .symbolic_link => { .directory, .normal, .symbolic_link => {
file.kind = switch (kind) { file.kind = switch (kind) {
.directory => .directory, .directory => .directory,

View file

@ -9,7 +9,7 @@ const Case = struct {
mode: u32 = 0, mode: u32 = 0,
link_name: []const u8 = &[0]u8{}, link_name: []const u8 = &[0]u8{},
kind: tar.FileKind = .file, kind: tar.FileKind = .file,
truncated: bool = false, // when there is no file body, just header, usefull for huge files truncated: bool = false, // when there is no file body, just header, useful for huge files
}; };
data: []const u8, // testdata file content data: []const u8, // testdata file content

View file

@ -54,7 +54,7 @@ pub fn startInstrumentation() void {
/// Stop full callgrind instrumentation if not already switched off. /// Stop full callgrind instrumentation if not already switched off.
/// This flushes Valgrinds translation cache, and does no additional /// This flushes Valgrinds translation cache, and does no additional
/// instrumentation afterwards, which effectivly will run at the same /// instrumentation afterwards, which effectively will run at the same
/// speed as the "none" tool (ie. at minimal slowdown). /// speed as the "none" tool (ie. at minimal slowdown).
/// Use this to bypass Callgrind aggregation for uninteresting code parts. /// Use this to bypass Callgrind aggregation for uninteresting code parts.
/// To start Callgrind in this mode to ignore the setup phase, use /// To start Callgrind in this mode to ignore the setup phase, use

View file

@ -5085,7 +5085,7 @@ fn structDeclInner(
any_default_inits = true; any_default_inits = true;
// The decl_inst is used as here so that we can easily reconstruct a mapping // The decl_inst is used as here so that we can easily reconstruct a mapping
// between it and the field type when the fields inits are analzyed. // between it and the field type when the fields inits are analyzed.
const ri: ResultInfo = .{ .rl = if (field_type == .none) .none else .{ .coerced_ty = decl_inst.toRef() } }; const ri: ResultInfo = .{ .rl = if (field_type == .none) .none else .{ .coerced_ty = decl_inst.toRef() } };
const default_inst = try expr(&block_scope, &namespace.base, ri, member.ast.value_expr); const default_inst = try expr(&block_scope, &namespace.base, ri, member.ast.value_expr);
@ -11559,7 +11559,7 @@ fn identAsString(astgen: *AstGen, ident_token: Ast.TokenIndex) !Zir.NullTerminat
} }
/// Adds a doc comment block to `string_bytes` by walking backwards from `end_token`. /// Adds a doc comment block to `string_bytes` by walking backwards from `end_token`.
/// `end_token` must point at the first token after the last doc coment line. /// `end_token` must point at the first token after the last doc comment line.
/// Returns 0 if no doc comment is present. /// Returns 0 if no doc comment is present.
fn docCommentAsString(astgen: *AstGen, end_token: Ast.TokenIndex) !Zir.NullTerminatedString { fn docCommentAsString(astgen: *AstGen, end_token: Ast.TokenIndex) !Zir.NullTerminatedString {
if (end_token == 0) return .empty; if (end_token == 0) return .empty;
@ -11780,7 +11780,7 @@ const Scope = struct {
inst: Zir.Inst.Ref, inst: Zir.Inst.Ref,
/// Source location of the corresponding variable declaration. /// Source location of the corresponding variable declaration.
token_src: Ast.TokenIndex, token_src: Ast.TokenIndex,
/// Track the first identifer where it is referenced. /// Track the first identifier where it is referenced.
/// 0 means never referenced. /// 0 means never referenced.
used: Ast.TokenIndex = 0, used: Ast.TokenIndex = 0,
/// Track the identifier where it is discarded, like this `_ = foo;`. /// Track the identifier where it is discarded, like this `_ = foo;`.
@ -11803,13 +11803,13 @@ const Scope = struct {
ptr: Zir.Inst.Ref, ptr: Zir.Inst.Ref,
/// Source location of the corresponding variable declaration. /// Source location of the corresponding variable declaration.
token_src: Ast.TokenIndex, token_src: Ast.TokenIndex,
/// Track the first identifer where it is referenced. /// Track the first identifier where it is referenced.
/// 0 means never referenced. /// 0 means never referenced.
used: Ast.TokenIndex = 0, used: Ast.TokenIndex = 0,
/// Track the identifier where it is discarded, like this `_ = foo;`. /// Track the identifier where it is discarded, like this `_ = foo;`.
/// 0 means never discarded. /// 0 means never discarded.
discarded: Ast.TokenIndex = 0, discarded: Ast.TokenIndex = 0,
/// Whether this value is used as an lvalue after inititialization. /// Whether this value is used as an lvalue after initialization.
/// If not, we know it can be `const`, so will emit a compile error if it is `var`. /// If not, we know it can be `const`, so will emit a compile error if it is `var`.
used_as_lvalue: bool = false, used_as_lvalue: bool = false,
/// String table index. /// String table index.

View file

@ -7,8 +7,8 @@
//! occur. Thus, we want to provide a real result pointer (from an alloc) only //! occur. Thus, we want to provide a real result pointer (from an alloc) only
//! when necessary. //! when necessary.
//! //!
//! To achive this, we need to determine which expressions require a result //! To achieve this, we need to determine which expressions require a result
//! pointer. This pass is reponsible for analyzing all syntax forms which may //! pointer. This pass is responsible for analyzing all syntax forms which may
//! provide a result location and, if sub-expressions consume this result //! provide a result location and, if sub-expressions consume this result
//! pointer non-trivially (e.g. writing through field pointers), marking the //! pointer non-trivially (e.g. writing through field pointers), marking the
//! node as requiring a result location. //! node as requiring a result location.

View file

@ -878,7 +878,7 @@ const MsvcLibDir = struct {
error.OutOfMemory => return error.OutOfMemory, error.OutOfMemory => return error.OutOfMemory,
else => continue, else => continue,
}; };
if (source_directories_value.len > (std.fs.max_path_bytes * 30)) { // note(bratishkaerik): guessing from the fact that on my computer it has 15 pathes and at least some of them are not of max length if (source_directories_value.len > (std.fs.max_path_bytes * 30)) { // note(bratishkaerik): guessing from the fact that on my computer it has 15 paths and at least some of them are not of max length
allocator.free(source_directories_value); allocator.free(source_directories_value);
continue; continue;
} }
@ -887,10 +887,10 @@ const MsvcLibDir = struct {
} else return error.PathNotFound; } else return error.PathNotFound;
defer allocator.free(source_directories); defer allocator.free(source_directories);
var source_directories_splitted = std.mem.splitScalar(u8, source_directories, ';'); var source_directories_split = std.mem.splitScalar(u8, source_directories, ';');
const msvc_dir: []const u8 = msvc_dir: { const msvc_dir: []const u8 = msvc_dir: {
const msvc_include_dir_maybe_with_trailing_slash = try allocator.dupe(u8, source_directories_splitted.first()); const msvc_include_dir_maybe_with_trailing_slash = try allocator.dupe(u8, source_directories_split.first());
if (msvc_include_dir_maybe_with_trailing_slash.len > std.fs.max_path_bytes or !std.fs.path.isAbsolute(msvc_include_dir_maybe_with_trailing_slash)) { if (msvc_include_dir_maybe_with_trailing_slash.len > std.fs.max_path_bytes or !std.fs.path.isAbsolute(msvc_include_dir_maybe_with_trailing_slash)) {
allocator.free(msvc_include_dir_maybe_with_trailing_slash); allocator.free(msvc_include_dir_maybe_with_trailing_slash);

View file

@ -4540,7 +4540,7 @@ test "zig fmt: decimal float literals with underscore separators" {
); );
} }
test "zig fmt: hexadeciaml float literals with underscore separators" { test "zig fmt: hexadecimal float literals with underscore separators" {
try testTransform( try testTransform(
\\pub fn main() void { \\pub fn main() void {
\\ const a: f64 = (0x10.0p-0+(0x10.0p+0))+0x10_00.00_00p-8+0x00_00.00_10p+16; \\ const a: f64 = (0x10.0p-0+(0x10.0p+0))+0x10_00.00_00p-8+0x00_00.00_10p+16;

View file

@ -1044,7 +1044,7 @@ fn detectAbiAndDynamicLinker(
defer if (is_elf_file == false) file.close(); defer if (is_elf_file == false) file.close();
// Shortest working interpreter path is "#!/i" (4) // Shortest working interpreter path is "#!/i" (4)
// (interpreter is "/i", assuming all pathes are absolute, like in above comment). // (interpreter is "/i", assuming all paths are absolute, like in above comment).
// ELF magic number length is also 4. // ELF magic number length is also 4.
// //
// If file is shorter than that, it is definitely not ELF file // If file is shorter than that, it is definitely not ELF file