std.mem: remove deprecated functions

This commit is contained in:
D-Berg 2025-11-30 13:33:30 +01:00
parent bfe3317059
commit a98d2540ae
137 changed files with 422 additions and 514 deletions

View file

@ -84,7 +84,7 @@ pub fn compileResultMessage(msg_bytes: []u8) error{ OutOfMemory, WriteFailed }!v
defer gpa.free(slowest_decls); defer gpa.free(slowest_decls);
for (slowest_files) |*file_out| { for (slowest_files) |*file_out| {
const i = std.mem.indexOfScalar(u8, trailing, 0) orelse @panic("malformed CompileResult message"); const i = std.mem.findScalar(u8, trailing, 0) orelse @panic("malformed CompileResult message");
file_out.* = .{ file_out.* = .{
.name = trailing[0..i], .name = trailing[0..i],
.ns_sema = 0, .ns_sema = 0,
@ -95,7 +95,7 @@ pub fn compileResultMessage(msg_bytes: []u8) error{ OutOfMemory, WriteFailed }!v
} }
for (slowest_decls) |*decl_out| { for (slowest_decls) |*decl_out| {
const i = std.mem.indexOfScalar(u8, trailing, 0) orelse @panic("malformed CompileResult message"); const i = std.mem.findScalar(u8, trailing, 0) orelse @panic("malformed CompileResult message");
const file_idx = std.mem.readInt(u32, trailing[i..][1..5], .little); const file_idx = std.mem.readInt(u32, trailing[i..][1..5], .little);
const sema_count = std.mem.readInt(u32, trailing[i..][5..9], .little); const sema_count = std.mem.readInt(u32, trailing[i..][5..9], .little);
const sema_ns = std.mem.readInt(u64, trailing[i..][9..17], .little); const sema_ns = std.mem.readInt(u64, trailing[i..][9..17], .little);
@ -258,7 +258,7 @@ pub fn runTestResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
defer table_html.deinit(gpa); defer table_html.deinit(gpa);
for (durations) |test_ns| { for (durations) |test_ns| {
const test_name_len = std.mem.indexOfScalar(u8, trailing[offset..], 0) orelse @panic("malformed RunTestResult message"); const test_name_len = std.mem.findScalar(u8, trailing[offset..], 0) orelse @panic("malformed RunTestResult message");
const test_name = trailing[offset..][0..test_name_len]; const test_name = trailing[offset..][0..test_name_len];
offset += test_name_len + 1; offset += test_name_len + 1;
try table_html.print(gpa, "<tr><th scope=\"row\"><code>{f}</code></th>", .{fmtEscapeHtml(test_name)}); try table_html.print(gpa, "<tr><th scope=\"row\"><code>{f}</code></th>", .{fmtEscapeHtml(test_name)});

View file

@ -1634,7 +1634,7 @@ pub fn addSourceFromPath(comp: *Compilation, path: []const u8) !Source {
fn addSourceFromPathExtra(comp: *Compilation, path: []const u8, kind: Source.Kind) !Source { fn addSourceFromPathExtra(comp: *Compilation, path: []const u8, kind: Source.Kind) !Source {
if (comp.sources.get(path)) |some| return some; if (comp.sources.get(path)) |some| return some;
if (mem.indexOfScalar(u8, path, 0) != null) { if (mem.findScalar(u8, path, 0) != null) {
return error.FileNotFound; return error.FileNotFound;
} }
@ -1913,7 +1913,7 @@ const FindInclude = struct {
} }
// For an include like 'Foo/Bar.h', search in '<framework_dir>/Foo.framework/Headers/Bar.h'. // For an include like 'Foo/Bar.h', search in '<framework_dir>/Foo.framework/Headers/Bar.h'.
const framework_name: []const u8, const header_sub_path: []const u8 = f: { const framework_name: []const u8, const header_sub_path: []const u8 = f: {
const i = std.mem.indexOfScalar(u8, find.include_path, '/') orelse return null; const i = std.mem.findScalar(u8, find.include_path, '/') orelse return null;
break :f .{ find.include_path[0..i], find.include_path[i + 1 ..] }; break :f .{ find.include_path[0..i], find.include_path[i + 1 ..] };
}; };
return find.check("{s}{c}{s}.framework{c}Headers{c}{s}", .{ return find.check("{s}{c}{s}.framework{c}Headers{c}{s}", .{
@ -1966,7 +1966,7 @@ pub const IncludeType = enum {
}; };
fn getPathContents(comp: *Compilation, path: []const u8, limit: Io.Limit) ![]u8 { fn getPathContents(comp: *Compilation, path: []const u8, limit: Io.Limit) ![]u8 {
if (mem.indexOfScalar(u8, path, 0) != null) { if (mem.findScalar(u8, path, 0) != null) {
return error.FileNotFound; return error.FileNotFound;
} }
@ -2316,15 +2316,15 @@ test "addSourceFromBuffer - exhaustive check for carriage return elimination" {
while (true) { while (true) {
const source = try comp.addSourceFromBuffer(&buf, &buf); const source = try comp.addSourceFromBuffer(&buf, &buf);
source_count += 1; source_count += 1;
try std.testing.expect(std.mem.indexOfScalar(u8, source.buf, '\r') == null); try std.testing.expect(std.mem.findScalar(u8, source.buf, '\r') == null);
if (std.mem.allEqual(u8, &buf, alphabet[alen - 1])) break; if (std.mem.allEqual(u8, &buf, alphabet[alen - 1])) break;
var idx = std.mem.indexOfScalar(u8, &alphabet, buf[buf.len - 1]).?; var idx = std.mem.findScalar(u8, &alphabet, buf[buf.len - 1]).?;
buf[buf.len - 1] = alphabet[(idx + 1) % alen]; buf[buf.len - 1] = alphabet[(idx + 1) % alen];
var j = buf.len - 1; var j = buf.len - 1;
while (j > 0) : (j -= 1) { while (j > 0) : (j -= 1) {
idx = std.mem.indexOfScalar(u8, &alphabet, buf[j - 1]).?; idx = std.mem.findScalar(u8, &alphabet, buf[j - 1]).?;
if (buf[j] == alphabet[0]) buf[j - 1] = alphabet[(idx + 1) % alen] else break; if (buf[j] == alphabet[0]) buf[j - 1] = alphabet[(idx + 1) % alen] else break;
} }
} }

View file

@ -71,7 +71,7 @@ fn writeTarget(path: []const u8, w: *std.Io.Writer) !void {
fn writePath(d: *const DepFile, path: []const u8, w: *std.Io.Writer) !void { fn writePath(d: *const DepFile, path: []const u8, w: *std.Io.Writer) !void {
switch (d.format) { switch (d.format) {
.nmake => { .nmake => {
if (std.mem.indexOfAny(u8, path, " #${}^!")) |_| if (std.mem.findAny(u8, path, " #${}^!")) |_|
try w.print("\"{s}\"", .{path}) try w.print("\"{s}\"", .{path})
else else
try w.writeAll(path); try w.writeAll(path);

View file

@ -507,7 +507,7 @@ pub fn formatArgs(w: *std.Io.Writer, fmt: []const u8, args: anytype) std.Io.Writ
} }
pub fn templateIndex(w: *std.Io.Writer, fmt: []const u8, template: []const u8) std.Io.Writer.Error!usize { pub fn templateIndex(w: *std.Io.Writer, fmt: []const u8, template: []const u8) std.Io.Writer.Error!usize {
const i = std.mem.indexOf(u8, fmt, template) orelse { const i = std.mem.find(u8, fmt, template) orelse {
if (@import("builtin").mode == .Debug) { if (@import("builtin").mode == .Debug) {
std.debug.panic("template `{s}` not found in format string `{s}`", .{ template, fmt }); std.debug.panic("template `{s}` not found in format string `{s}`", .{ template, fmt });
} }

View file

@ -322,7 +322,7 @@ pub fn parseArgs(
macro = args[i]; macro = args[i];
} }
var value: []const u8 = "1"; var value: []const u8 = "1";
if (mem.indexOfScalar(u8, macro, '=')) |some| { if (mem.findScalar(u8, macro, '=')) |some| {
value = macro[some + 1 ..]; value = macro[some + 1 ..];
macro = macro[0..some]; macro = macro[0..some];
} }

View file

@ -226,9 +226,9 @@ fn detectLSBRelease(tc: *const Toolchain) ?Tag {
fn scanForRedHat(buf: []const u8) Tag { fn scanForRedHat(buf: []const u8) Tag {
if (mem.startsWith(u8, buf, "Fedora release")) return .fedora; if (mem.startsWith(u8, buf, "Fedora release")) return .fedora;
if (mem.startsWith(u8, buf, "Red Hat Enterprise Linux") or mem.startsWith(u8, buf, "CentOS") or mem.startsWith(u8, buf, "Scientific Linux")) { if (mem.startsWith(u8, buf, "Red Hat Enterprise Linux") or mem.startsWith(u8, buf, "CentOS") or mem.startsWith(u8, buf, "Scientific Linux")) {
if (mem.indexOfPos(u8, buf, 0, "release 7") != null) return .rhel7; if (mem.findPos(u8, buf, 0, "release 7") != null) return .rhel7;
if (mem.indexOfPos(u8, buf, 0, "release 6") != null) return .rhel6; if (mem.findPos(u8, buf, 0, "release 6") != null) return .rhel6;
if (mem.indexOfPos(u8, buf, 0, "release 5") != null) return .rhel5; if (mem.findPos(u8, buf, 0, "release 5") != null) return .rhel5;
} }
return .unknown; return .unknown;

View file

@ -17,7 +17,7 @@ fn readFileFake(entries: []const Filesystem.Entry, path: []const u8, buf: []u8)
fn findProgramByNameFake(entries: []const Filesystem.Entry, name: []const u8, path: ?[]const u8, buf: []u8) ?[]const u8 { fn findProgramByNameFake(entries: []const Filesystem.Entry, name: []const u8, path: ?[]const u8, buf: []u8) ?[]const u8 {
@branchHint(.cold); @branchHint(.cold);
if (mem.indexOfScalar(u8, name, '/') != null) { if (mem.findScalar(u8, name, '/') != null) {
@memcpy(buf[0..name.len], name); @memcpy(buf[0..name.len], name);
return buf[0..name.len]; return buf[0..name.len];
} }
@ -78,7 +78,7 @@ fn findProgramByNameWindows(allocator: std.mem.Allocator, name: []const u8, path
/// TODO: does WASI need special handling? /// TODO: does WASI need special handling?
fn findProgramByNamePosix(name: []const u8, path: ?[]const u8, buf: []u8) ?[]const u8 { fn findProgramByNamePosix(name: []const u8, path: ?[]const u8, buf: []u8) ?[]const u8 {
if (mem.indexOfScalar(u8, name, '/') != null) { if (mem.findScalar(u8, name, '/') != null) {
@memcpy(buf[0..name.len], name); @memcpy(buf[0..name.len], name);
return buf[0..name.len]; return buf[0..name.len];
} }
@ -128,7 +128,7 @@ pub const Filesystem = union(enum) {
if (entry.path.len == self.base.len) continue; if (entry.path.len == self.base.len) continue;
if (std.mem.startsWith(u8, entry.path, self.base)) { if (std.mem.startsWith(u8, entry.path, self.base)) {
const remaining = entry.path[self.base.len + 1 ..]; const remaining = entry.path[self.base.len + 1 ..];
if (std.mem.indexOfScalar(u8, remaining, std.fs.path.sep) != null) continue; if (std.mem.findScalar(u8, remaining, std.fs.path.sep) != null) continue;
const extension = std.fs.path.extension(remaining); const extension = std.fs.path.extension(remaining);
const kind: std.fs.Dir.Entry.Kind = if (extension.len == 0) .directory else .file; const kind: std.fs.Dir.Entry.Kind = if (extension.len == 0) .directory else .file;
return .{ .name = remaining, .kind = kind }; return .{ .name = remaining, .kind = kind };

View file

@ -69,7 +69,7 @@ pub fn parse(text: []const u8) GCCVersion {
var minor_str = second; var minor_str = second;
if (rest.len == 0) { if (rest.len == 0) {
const end = mem.indexOfNone(u8, minor_str, "0123456789") orelse minor_str.len; const end = mem.findNone(u8, minor_str, "0123456789") orelse minor_str.len;
if (end > 0) { if (end > 0) {
good.suffix = minor_str[end..]; good.suffix = minor_str[end..];
minor_str = minor_str[0..end]; minor_str = minor_str[0..end];
@ -80,7 +80,7 @@ pub fn parse(text: []const u8) GCCVersion {
good.minor_str = minor_str; good.minor_str = minor_str;
if (rest.len > 0) { if (rest.len > 0) {
const end = mem.indexOfNone(u8, rest, "0123456789") orelse rest.len; const end = mem.findNone(u8, rest, "0123456789") orelse rest.len;
if (end > 0) { if (end > 0) {
const patch_num_text = rest[0..end]; const patch_num_text = rest[0..end];
good.patch = std.fmt.parseInt(i32, patch_num_text, 10) catch return bad; good.patch = std.fmt.parseInt(i32, patch_num_text, 10) catch return bad;

View file

@ -747,7 +747,7 @@ fn pragma(p: *Parser) Compilation.Error!bool {
const name_tok = p.tok_i; const name_tok = p.tok_i;
const name = p.tokSlice(name_tok); const name = p.tokSlice(name_tok);
const end_idx = mem.indexOfScalarPos(Token.Id, p.tok_ids, p.tok_i, .nl).?; const end_idx = mem.findScalarPos(Token.Id, p.tok_ids, p.tok_i, .nl).?;
const pragma_len = @as(TokenIndex, @intCast(end_idx)) - p.tok_i; const pragma_len = @as(TokenIndex, @intCast(end_idx)) - p.tok_i;
defer p.tok_i += pragma_len + 1; // skip past .nl as well defer p.tok_i += pragma_len + 1; // skip past .nl as well
if (p.comp.getPragma(name)) |prag| { if (p.comp.getPragma(name)) |prag| {
@ -10395,7 +10395,7 @@ fn getExponent(p: *Parser, buf: []const u8, prefix: NumberPrefix, tok_i: TokenIn
} }
} else buf.len; } else buf.len;
const exponent = buf[0..end]; const exponent = buf[0..end];
if (std.mem.indexOfAny(u8, exponent, "0123456789") == null) { if (std.mem.findAny(u8, exponent, "0123456789") == null) {
try p.err(tok_i, .exponent_has_no_digits, .{}); try p.err(tok_i, .exponent_has_no_digits, .{});
return error.ParsingFailed; return error.ParsingFailed;
} }

View file

@ -2743,7 +2743,7 @@ fn unescapeUcn(pp: *Preprocessor, tok: TokenWithExpansionLocs) !TokenWithExpansi
@branchHint(.cold); @branchHint(.cold);
const identifier = pp.expandedSlice(tok); const identifier = pp.expandedSlice(tok);
const gpa = pp.comp.gpa; const gpa = pp.comp.gpa;
if (mem.indexOfScalar(u8, identifier, '\\') != null) { if (mem.findScalar(u8, identifier, '\\') != null) {
@branchHint(.cold); @branchHint(.cold);
const start = pp.comp.generated_buf.items.len; const start = pp.comp.generated_buf.items.len;
try pp.comp.generated_buf.ensureUnusedCapacity(gpa, identifier.len + 1); try pp.comp.generated_buf.ensureUnusedCapacity(gpa, identifier.len + 1);
@ -3791,7 +3791,7 @@ pub fn prettyPrintTokens(pp: *Preprocessor, w: *std.Io.Writer, macro_dump_mode:
}, },
.keyword_pragma => { .keyword_pragma => {
const pragma_name = pp.expandedSlice(pp.tokens.get(i + 1)); const pragma_name = pp.expandedSlice(pp.tokens.get(i + 1));
const end_idx = mem.indexOfScalarPos(Token.Id, tok_ids, i, .nl) orelse i + 1; const end_idx = mem.findScalarPos(Token.Id, tok_ids, i, .nl) orelse i + 1;
const pragma_len = @as(u32, @intCast(end_idx)) - i; const pragma_len = @as(u32, @intCast(end_idx)) - i;
if (pp.comp.getPragma(pragma_name)) |prag| { if (pp.comp.getPragma(pragma_name)) |prag| {
@ -3818,7 +3818,7 @@ pub fn prettyPrintTokens(pp: *Preprocessor, w: *std.Io.Writer, macro_dump_mode:
}, },
.whitespace => { .whitespace => {
var slice = pp.expandedSlice(cur); var slice = pp.expandedSlice(cur);
while (mem.indexOfScalar(u8, slice, '\n')) |some| { while (mem.findScalar(u8, slice, '\n')) |some| {
if (pp.linemarkers != .none) try w.writeByte('\n'); if (pp.linemarkers != .none) try w.writeByte('\n');
slice = slice[some + 1 ..]; slice = slice[some + 1 ..];
} }

View file

@ -61,7 +61,7 @@ kind: Kind,
pub fn lineCol(source: Source, loc: Location) ExpandedLocation { pub fn lineCol(source: Source, loc: Location) ExpandedLocation {
var start: usize = 0; var start: usize = 0;
// find the start of the line which is either a newline or a splice // find the start of the line which is either a newline or a splice
if (std.mem.lastIndexOfScalar(u8, source.buf[0..loc.byte_offset], '\n')) |some| start = some + 1; if (std.mem.findScalarLast(u8, source.buf[0..loc.byte_offset], '\n')) |some| start = some + 1;
const splice_index: u32 = for (source.splice_locs, 0..) |splice_offset, i| { const splice_index: u32 = for (source.splice_locs, 0..) |splice_offset, i| {
if (splice_offset > start) { if (splice_offset > start) {
if (splice_offset < loc.byte_offset) { if (splice_offset < loc.byte_offset) {
@ -101,7 +101,7 @@ pub fn lineCol(source: Source, loc: Location) ExpandedLocation {
// find the end of the line which is either a newline, EOF or a splice // find the end of the line which is either a newline, EOF or a splice
var nl = source.buf.len; var nl = source.buf.len;
var end_with_splice = false; var end_with_splice = false;
if (std.mem.indexOfScalar(u8, source.buf[start..], '\n')) |some| nl = some + start; if (std.mem.findScalar(u8, source.buf[start..], '\n')) |some| nl = some + start;
if (source.splice_locs.len > splice_index and nl > source.splice_locs[splice_index] and source.splice_locs[splice_index] > start) { if (source.splice_locs.len > splice_index and nl > source.splice_locs[splice_index] and source.splice_locs[splice_index] > start) {
end_with_splice = true; end_with_splice = true;
nl = source.splice_locs[splice_index]; nl = source.splice_locs[splice_index];

View file

@ -127,7 +127,7 @@ pub fn getLinkerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
// for the linker flavor is brittle. In addition, prepending "ld." or "ld64." // for the linker flavor is brittle. In addition, prepending "ld." or "ld64."
// to a relative path is surprising. This is more complex due to priorities // to a relative path is surprising. This is more complex due to priorities
// among -B, COMPILER_PATH and PATH. --ld-path= should be used instead. // among -B, COMPILER_PATH and PATH. --ld-path= should be used instead.
if (mem.indexOfScalar(u8, use_linker, '/') != null) { if (mem.findScalar(u8, use_linker, '/') != null) {
try tc.driver.comp.diagnostics.add(.{ try tc.driver.comp.diagnostics.add(.{
.text = "'-fuse-ld=' taking a path is deprecated; use '--ld-path=' instead", .text = "'-fuse-ld=' taking a path is deprecated; use '--ld-path=' instead",
.kind = .off, .kind = .off,
@ -563,7 +563,7 @@ pub fn findProgramByName(tc: *const Toolchain, name: []const u8, buf: []u8) ?[]c
const comp = tc.driver.comp; const comp = tc.driver.comp;
// TODO: does WASI need special handling? // TODO: does WASI need special handling?
if (mem.indexOfScalar(u8, name, '/') != null) { if (mem.findScalar(u8, name, '/') != null) {
@memcpy(buf[0..name.len], name); @memcpy(buf[0..name.len], name);
return buf[0..name.len]; return buf[0..name.len];
} }

View file

@ -29,7 +29,7 @@ pub const Prefix = enum(u8) {
'x', 'X' => return if (buf.len == 2) .decimal else .hex, 'x', 'X' => return if (buf.len == 2) .decimal else .hex,
'b', 'B' => return if (buf.len == 2) .decimal else .binary, 'b', 'B' => return if (buf.len == 2) .decimal else .binary,
else => { else => {
if (mem.indexOfAny(u8, buf, "eE.")) |_| { if (mem.findAny(u8, buf, "eE.")) |_| {
// This is a decimal floating point number that happens to start with zero // This is a decimal floating point number that happens to start with zero
return .decimal; return .decimal;
} else if (Suffix.fromString(buf[1..], .int)) |_| { } else if (Suffix.fromString(buf[1..], .int)) |_| {
@ -193,7 +193,7 @@ pub const Suffix = enum {
for (parts) |part| { for (parts) |part| {
const lower = std.ascii.lowerString(&scratch, part); const lower = std.ascii.lowerString(&scratch, part);
if (mem.indexOf(u8, buf, part) == null and mem.indexOf(u8, buf, lower) == null) continue :top; if (mem.find(u8, buf, part) == null and mem.find(u8, buf, lower) == null) continue :top;
} }
return tag; return tag;
} }

View file

@ -354,7 +354,7 @@ pub const Parser = struct {
const start = p.i; const start = p.i;
if (p.literal[start] != '\\') { if (p.literal[start] != '\\') {
p.i = mem.indexOfScalarPos(u8, p.literal, start + 1, '\\') orelse p.literal.len; p.i = mem.findScalarPos(u8, p.literal, start + 1, '\\') orelse p.literal.len;
const unescaped_slice = p.literal[start..p.i]; const unescaped_slice = p.literal[start..p.i];
const view = std.unicode.Utf8View.init(unescaped_slice) catch { const view = std.unicode.Utf8View.init(unescaped_slice) catch {

View file

@ -145,7 +145,7 @@ pub fn main() !void {
const option_contents = arg[2..]; const option_contents = arg[2..];
if (option_contents.len == 0) if (option_contents.len == 0)
fatalWithHint("expected option name after '-D'", .{}); fatalWithHint("expected option name after '-D'", .{});
if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| { if (mem.findScalar(u8, option_contents, '=')) |name_end| {
const option_name = option_contents[0..name_end]; const option_name = option_contents[0..name_end];
const option_value = option_contents[name_end + 1 ..]; const option_value = option_contents[name_end + 1 ..];
if (try builder.addUserInputOption(option_name, option_value)) if (try builder.addUserInputOption(option_name, option_value))
@ -1498,7 +1498,7 @@ pub fn printErrorMessages(
try ttyconf.setColor(stderr, .red); try ttyconf.setColor(stderr, .red);
try stderr.writeAll("error:"); try stderr.writeAll("error:");
try ttyconf.setColor(stderr, .reset); try ttyconf.setColor(stderr, .reset);
if (std.mem.indexOfScalar(u8, msg, '\n') == null) { if (std.mem.findScalar(u8, msg, '\n') == null) {
try stderr.print(" {s}\n", .{msg}); try stderr.print(" {s}\n", .{msg});
} else switch (multiline_errors) { } else switch (multiline_errors) {
.indent => { .indent => {

View file

@ -538,7 +538,7 @@ pub const Compiler = struct {
// This currently only checks for NUL bytes, but it should probably also check for // This currently only checks for NUL bytes, but it should probably also check for
// platform-specific invalid characters like '*', '?', '"', '<', '>', '|' (Windows) // platform-specific invalid characters like '*', '?', '"', '<', '>', '|' (Windows)
// Related: https://github.com/ziglang/zig/pull/14533#issuecomment-1416888193 // Related: https://github.com/ziglang/zig/pull/14533#issuecomment-1416888193
if (std.mem.indexOfScalar(u8, filename_utf8, 0) != null) { if (std.mem.findScalar(u8, filename_utf8, 0) != null) {
return self.addErrorDetailsAndFail(.{ return self.addErrorDetailsAndFail(.{
.err = .invalid_filename, .err = .invalid_filename,
.token = node.filename.getFirstToken(), .token = node.filename.getFirstToken(),
@ -2917,11 +2917,11 @@ fn validateSearchPath(path: []const u8) error{BadPathName}!void {
var component_iterator = std.fs.path.componentIterator(path); var component_iterator = std.fs.path.componentIterator(path);
while (component_iterator.next()) |component| { while (component_iterator.next()) |component| {
// https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file // https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file
if (std.mem.indexOfAny(u8, component.name, "\x00<>:\"|?*") != null) return error.BadPathName; if (std.mem.findAny(u8, component.name, "\x00<>:\"|?*") != null) return error.BadPathName;
} }
}, },
else => { else => {
if (std.mem.indexOfScalar(u8, path, 0) != null) return error.BadPathName; if (std.mem.findScalar(u8, path, 0) != null) return error.BadPathName;
}, },
} }
} }

View file

@ -1055,7 +1055,7 @@ pub const supported_targets = struct {
}; };
comptime { comptime {
for (@typeInfo(Arch).@"enum".fields) |enum_field| { for (@typeInfo(Arch).@"enum".fields) |enum_field| {
_ = std.mem.indexOfScalar(Arch, ordered_for_display, @enumFromInt(enum_field.value)) orelse { _ = std.mem.findScalar(Arch, ordered_for_display, @enumFromInt(enum_field.value)) orelse {
@compileError(std.fmt.comptimePrint("'{s}' missing from ordered_for_display", .{enum_field.name})); @compileError(std.fmt.comptimePrint("'{s}' missing from ordered_for_display", .{enum_field.name}));
}; };
} }

View file

@ -509,7 +509,7 @@ pub const ErrorDetails = struct {
// We know that the token slice is a well-formed #pragma code_page(N), so // We know that the token slice is a well-formed #pragma code_page(N), so
// we can skip to the first ( and then get the number that follows // we can skip to the first ( and then get the number that follows
const token_slice = self.token.slice(source); const token_slice = self.token.slice(source);
var number_start = std.mem.indexOfScalar(u8, token_slice, '(').? + 1; var number_start = std.mem.findScalar(u8, token_slice, '(').? + 1;
while (std.ascii.isWhitespace(token_slice[number_start])) { while (std.ascii.isWhitespace(token_slice[number_start])) {
number_start += 1; number_start += 1;
} }

View file

@ -538,7 +538,7 @@ pub fn handleLineCommand(allocator: Allocator, line_command: []const u8, current
defer allocator.free(filename); defer allocator.free(filename);
// \x00 bytes in the filename is incompatible with how StringTable works // \x00 bytes in the filename is incompatible with how StringTable works
if (std.mem.indexOfScalar(u8, filename, '\x00') != null) return error.InvalidLineCommand; if (std.mem.findScalar(u8, filename, '\x00') != null) return error.InvalidLineCommand;
current_mapping.line_num = linenum; current_mapping.line_num = linenum;
current_mapping.filename.clearRetainingCapacity(); current_mapping.filename.clearRetainingCapacity();

View file

@ -152,11 +152,11 @@ fn query_exec_fallible(query: []const u8, ignore_case: bool) !void {
continue; continue;
} }
// substring, case insensitive match of full decl path // substring, case insensitive match of full decl path
if (std.mem.indexOf(u8, g.full_path_search_text_lower.items, term) != null) { if (std.mem.find(u8, g.full_path_search_text_lower.items, term) != null) {
points += 2; points += 2;
continue; continue;
} }
if (std.mem.indexOf(u8, g.doc_search_text.items, term) != null) { if (std.mem.find(u8, g.doc_search_text.items, term) != null) {
points += 1; points += 1;
continue; continue;
} }
@ -792,7 +792,7 @@ fn unpackInner(tar_bytes: []u8) !void {
if (std.mem.endsWith(u8, tar_file.name, ".zig")) { if (std.mem.endsWith(u8, tar_file.name, ".zig")) {
log.debug("found file: '{s}'", .{tar_file.name}); log.debug("found file: '{s}'", .{tar_file.name});
const file_name = try gpa.dupe(u8, tar_file.name); const file_name = try gpa.dupe(u8, tar_file.name);
if (std.mem.indexOfScalar(u8, file_name, '/')) |pkg_name_end| { if (std.mem.findScalar(u8, file_name, '/')) |pkg_name_end| {
const pkg_name = file_name[0..pkg_name_end]; const pkg_name = file_name[0..pkg_name_end];
const gop = try Walk.modules.getOrPut(gpa, pkg_name); const gop = try Walk.modules.getOrPut(gpa, pkg_name);
const file: Walk.File.Index = @enumFromInt(Walk.files.entries.len); const file: Walk.File.Index = @enumFromInt(Walk.files.entries.len);

View file

@ -159,7 +159,7 @@ const Block = struct {
.heading => null, .heading => null,
.code_block => code_block: { .code_block => code_block: {
const trimmed = mem.trimEnd(u8, unindented, " \t"); const trimmed = mem.trimEnd(u8, unindented, " \t");
if (mem.indexOfNone(u8, trimmed, "`") != null or trimmed.len != b.data.code_block.fence_len) { if (mem.findNone(u8, trimmed, "`") != null or trimmed.len != b.data.code_block.fence_len) {
const effective_indent = @min(indent, b.data.code_block.indent); const effective_indent = @min(indent, b.data.code_block.indent);
break :code_block line[effective_indent..]; break :code_block line[effective_indent..];
} else { } else {
@ -598,7 +598,7 @@ fn startListItem(unindented_line: []const u8) ?ListItemStart {
}; };
} }
const number_end = mem.indexOfNone(u8, unindented_line, "0123456789") orelse return null; const number_end = mem.findNone(u8, unindented_line, "0123456789") orelse return null;
const after_number = unindented_line[number_end..]; const after_number = unindented_line[number_end..];
const marker: Block.Data.ListMarker = if (mem.startsWith(u8, after_number, ". ")) const marker: Block.Data.ListMarker = if (mem.startsWith(u8, after_number, ". "))
.number_dot .number_dot
@ -643,10 +643,10 @@ fn startTableRow(unindented_line: []const u8) ?TableRowStart {
// Ignoring pipes in code spans allows table cells to contain // Ignoring pipes in code spans allows table cells to contain
// code using ||, for example. // code using ||, for example.
const open_start = i; const open_start = i;
i = mem.indexOfNonePos(u8, table_row_content, i, "`") orelse return null; i = mem.findNonePos(u8, table_row_content, i, "`") orelse return null;
const open_len = i - open_start; const open_len = i - open_start;
while (mem.indexOfScalarPos(u8, table_row_content, i, '`')) |close_start| { while (mem.findScalarPos(u8, table_row_content, i, '`')) |close_start| {
i = mem.indexOfNonePos(u8, table_row_content, close_start, "`") orelse return null; i = mem.findNonePos(u8, table_row_content, close_start, "`") orelse return null;
const close_len = i - close_start; const close_len = i - close_start;
if (close_len == open_len) break; if (close_len == open_len) break;
} else return null; } else return null;
@ -798,7 +798,7 @@ fn startCodeBlock(p: *Parser, unindented_line: []const u8) !?CodeBlockStart {
} else ""; } else "";
// Code block tags may not contain backticks, since that would create // Code block tags may not contain backticks, since that would create
// potential confusion with inline code spans. // potential confusion with inline code spans.
if (fence_len < 3 or mem.indexOfScalar(u8, tag_bytes, '`') != null) return null; if (fence_len < 3 or mem.findScalar(u8, tag_bytes, '`') != null) return null;
return .{ return .{
.tag = try p.addString(mem.trim(u8, tag_bytes, " ")), .tag = try p.addString(mem.trim(u8, tag_bytes, " ")),
.fence_len = fence_len, .fence_len = fence_len,
@ -1386,12 +1386,12 @@ const InlineParser = struct {
/// parsing. /// parsing.
fn parseCodeSpan(ip: *InlineParser) !void { fn parseCodeSpan(ip: *InlineParser) !void {
const opener_start = ip.pos; const opener_start = ip.pos;
ip.pos = mem.indexOfNonePos(u8, ip.content, ip.pos, "`") orelse ip.content.len; ip.pos = mem.findNonePos(u8, ip.content, ip.pos, "`") orelse ip.content.len;
const opener_len = ip.pos - opener_start; const opener_len = ip.pos - opener_start;
const start = ip.pos; const start = ip.pos;
const end = while (mem.indexOfScalarPos(u8, ip.content, ip.pos, '`')) |closer_start| { const end = while (mem.findScalarPos(u8, ip.content, ip.pos, '`')) |closer_start| {
ip.pos = mem.indexOfNonePos(u8, ip.content, closer_start, "`") orelse ip.content.len; ip.pos = mem.findNonePos(u8, ip.content, closer_start, "`") orelse ip.content.len;
const closer_len = ip.pos - closer_start; const closer_len = ip.pos - closer_start;
if (closer_len == opener_len) break closer_start; if (closer_len == opener_len) break closer_start;
@ -1631,7 +1631,7 @@ fn addScratchStringLine(p: *Parser, line: []const u8) !void {
} }
fn isBlank(line: []const u8) bool { fn isBlank(line: []const u8) bool {
return mem.indexOfNone(u8, line, " \t") == null; return mem.findNone(u8, line, " \t") == null;
} }
fn isPunctuation(c: u8) bool { fn isPunctuation(c: u8) bool {

View file

@ -153,7 +153,7 @@ const Executable = struct {
"incompatible existing coverage file '{s}' (differing pcs length: {} != {})", "incompatible existing coverage file '{s}' (differing pcs length: {} != {})",
.{ &coverage_file_name, seen_pcs_header.pcs_len, pcs.len }, .{ &coverage_file_name, seen_pcs_header.pcs_len, pcs.len },
); );
if (mem.indexOfDiff(usize, seen_pcs_header.pcAddrs(), pcs)) |i| panic( if (mem.findDiff(usize, seen_pcs_header.pcAddrs(), pcs)) |i| panic(
"incompatible existing coverage file '{s}' (differing pc at index {d}: {x} != {x})", "incompatible existing coverage file '{s}' (differing pc at index {d}: {x} != {x})",
.{ &coverage_file_name, i, seen_pcs_header.pcAddrs()[i], pcs[i] }, .{ &coverage_file_name, i, seen_pcs_header.pcAddrs()[i], pcs[i] },
); );

View file

@ -60,7 +60,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
}; };
for (check_file.expected_matches) |expected_match| { for (check_file.expected_matches) |expected_match| {
if (mem.indexOf(u8, contents, expected_match) == null) { if (mem.find(u8, contents, expected_match) == null) {
return step.fail( return step.fail(
\\ \\
\\========= expected to find: =================== \\========= expected to find: ===================

View file

@ -88,7 +88,7 @@ const Action = struct {
while (needle_it.next()) |needle_tok| { while (needle_it.next()) |needle_tok| {
const hay_tok = hay_it.next() orelse break; const hay_tok = hay_it.next() orelse break;
if (mem.startsWith(u8, needle_tok, "{")) { if (mem.startsWith(u8, needle_tok, "{")) {
const closing_brace = mem.indexOf(u8, needle_tok, "}") orelse return error.MissingClosingBrace; const closing_brace = mem.find(u8, needle_tok, "}") orelse return error.MissingClosingBrace;
if (closing_brace != needle_tok.len - 1) return error.ClosingBraceNotLast; if (closing_brace != needle_tok.len - 1) return error.ClosingBraceNotLast;
const name = needle_tok[1..closing_brace]; const name = needle_tok[1..closing_brace];
@ -133,7 +133,7 @@ const Action = struct {
assert(act.tag == .contains); assert(act.tag == .contains);
const hay = mem.trim(u8, haystack, " "); const hay = mem.trim(u8, haystack, " ");
const phrase = mem.trim(u8, act.phrase.resolve(b, step), " "); const phrase = mem.trim(u8, act.phrase.resolve(b, step), " ");
return mem.indexOf(u8, hay, phrase) != null; return mem.find(u8, hay, phrase) != null;
} }
/// Returns true if the `phrase` does not exist within the haystack. /// Returns true if the `phrase` does not exist within the haystack.
@ -1142,7 +1142,7 @@ const MachODumper = struct {
const full_path = ctx.imports.items[@as(u16, @bitCast(ordinal)) - 1]; const full_path = ctx.imports.items[@as(u16, @bitCast(ordinal)) - 1];
const basename = fs.path.basename(full_path); const basename = fs.path.basename(full_path);
assert(basename.len > 0); assert(basename.len > 0);
const ext = mem.lastIndexOfScalar(u8, basename, '.') orelse basename.len; const ext = mem.findScalarLast(u8, basename, '.') orelse basename.len;
break :blk basename[0..ext]; break :blk basename[0..ext];
}; };
try writer.writeAll("(undefined)"); try writer.writeAll("(undefined)");
@ -1662,7 +1662,7 @@ const MachODumper = struct {
.dump_section => { .dump_section => {
const name = mem.sliceTo(@as([*:0]const u8, @ptrCast(check.data.items.ptr + check.payload.dump_section)), 0); const name = mem.sliceTo(@as([*:0]const u8, @ptrCast(check.data.items.ptr + check.payload.dump_section)), 0);
const sep_index = mem.indexOfScalar(u8, name, ',') orelse const sep_index = mem.findScalar(u8, name, ',') orelse
return step.fail("invalid section name: {s}", .{name}); return step.fail("invalid section name: {s}", .{name});
const segname = name[0..sep_index]; const segname = name[0..sep_index];
const sectname = name[sep_index + 1 ..]; const sectname = name[sep_index + 1 ..];

View file

@ -372,7 +372,7 @@ pub const TestRunner = struct {
pub fn create(owner: *std.Build, options: Options) *Compile { pub fn create(owner: *std.Build, options: Options) *Compile {
const name = owner.dupe(options.name); const name = owner.dupe(options.name);
if (mem.indexOf(u8, name, "/") != null or mem.indexOf(u8, name, "\\") != null) { if (mem.find(u8, name, "/") != null or mem.find(u8, name, "\\") != null) {
panic("invalid name: '{s}'. It looks like a file path, but it is supposed to be the library or application name.", .{name}); panic("invalid name: '{s}'. It looks like a file path, but it is supposed to be the library or application name.", .{name});
} }
@ -2129,7 +2129,7 @@ fn matchCompileError(actual: []const u8, expected: []const u8) bool {
// We scan for /?/ in expected line and if there is a match, we match everything // We scan for /?/ in expected line and if there is a match, we match everything
// up to and after /?/. // up to and after /?/.
const expected_trim = mem.trim(u8, expected, " "); const expected_trim = mem.trim(u8, expected, " ");
if (mem.indexOf(u8, expected_trim, "/?/")) |index| { if (mem.find(u8, expected_trim, "/?/")) |index| {
const actual_trim = mem.trim(u8, actual, " "); const actual_trim = mem.trim(u8, actual, " ");
const lhs = expected_trim[0..index]; const lhs = expected_trim[0..index];
const rhs = expected_trim[index + "/?/".len ..]; const rhs = expected_trim[index + "/?/".len ..];

View file

@ -581,12 +581,12 @@ fn expand_variables_autoconf_at(
var source_offset: usize = 0; var source_offset: usize = 0;
while (curr < contents.len) : (curr += 1) { while (curr < contents.len) : (curr += 1) {
if (contents[curr] != '@') continue; if (contents[curr] != '@') continue;
if (std.mem.indexOfScalarPos(u8, contents, curr + 1, '@')) |close_pos| { if (std.mem.findScalarPos(u8, contents, curr + 1, '@')) |close_pos| {
if (close_pos == curr + 1) { if (close_pos == curr + 1) {
// closed immediately, preserve as a literal // closed immediately, preserve as a literal
continue; continue;
} }
const valid_varname_end = std.mem.indexOfNonePos(u8, contents, curr + 1, valid_varname_chars) orelse 0; const valid_varname_end = std.mem.findNonePos(u8, contents, curr + 1, valid_varname_chars) orelse 0;
if (valid_varname_end != close_pos) { if (valid_varname_end != close_pos) {
// contains invalid characters, preserve as a literal // contains invalid characters, preserve as a literal
continue; continue;
@ -638,12 +638,12 @@ fn expand_variables_cmake(
loop: while (curr < contents.len) : (curr += 1) { loop: while (curr < contents.len) : (curr += 1) {
switch (contents[curr]) { switch (contents[curr]) {
'@' => blk: { '@' => blk: {
if (std.mem.indexOfScalarPos(u8, contents, curr + 1, '@')) |close_pos| { if (std.mem.findScalarPos(u8, contents, curr + 1, '@')) |close_pos| {
if (close_pos == curr + 1) { if (close_pos == curr + 1) {
// closed immediately, preserve as a literal // closed immediately, preserve as a literal
break :blk; break :blk;
} }
const valid_varname_end = std.mem.indexOfNonePos(u8, contents, curr + 1, valid_varname_chars) orelse 0; const valid_varname_end = std.mem.findNonePos(u8, contents, curr + 1, valid_varname_chars) orelse 0;
if (valid_varname_end != close_pos) { if (valid_varname_end != close_pos) {
// contains invalid characters, preserve as a literal // contains invalid characters, preserve as a literal
break :blk; break :blk;
@ -734,7 +734,7 @@ fn expand_variables_cmake(
else => {}, else => {},
} }
if (var_stack.items.len > 0 and std.mem.indexOfScalar(u8, valid_varname_chars, contents[curr]) == null) { if (var_stack.items.len > 0 and std.mem.findScalar(u8, valid_varname_chars, contents[curr]) == null) {
return error.InvalidCharacter; return error.InvalidCharacter;
} }
} }

View file

@ -1509,7 +1509,7 @@ fn runCommand(
} }
}, },
.expect_stderr_match => |match| { .expect_stderr_match => |match| {
if (mem.indexOf(u8, generic_result.stderr.?, match) == null) { if (mem.find(u8, generic_result.stderr.?, match) == null) {
return step.fail( return step.fail(
\\========= expected to find in stderr: ========= \\========= expected to find in stderr: =========
\\{s} \\{s}
@ -1535,7 +1535,7 @@ fn runCommand(
} }
}, },
.expect_stdout_match => |match| { .expect_stdout_match => |match| {
if (mem.indexOf(u8, generic_result.stdout.?, match) == null) { if (mem.find(u8, generic_result.stdout.?, match) == null) {
return step.fail( return step.fail(
\\========= expected to find in stdout: ========= \\========= expected to find in stdout: =========
\\{s} \\{s}

View file

@ -993,7 +993,7 @@ pub fn streamDelimiterLimit(
error.ReadFailed => return error.ReadFailed, error.ReadFailed => return error.ReadFailed,
error.EndOfStream => return @intFromEnum(limit) - remaining, error.EndOfStream => return @intFromEnum(limit) - remaining,
}); });
if (std.mem.indexOfScalar(u8, available, delimiter)) |delimiter_index| { if (std.mem.findScalar(u8, available, delimiter)) |delimiter_index| {
try w.writeAll(available[0..delimiter_index]); try w.writeAll(available[0..delimiter_index]);
r.toss(delimiter_index); r.toss(delimiter_index);
remaining -= delimiter_index; remaining -= delimiter_index;
@ -1064,7 +1064,7 @@ pub fn discardDelimiterLimit(r: *Reader, delimiter: u8, limit: Limit) DiscardDel
error.ReadFailed => return error.ReadFailed, error.ReadFailed => return error.ReadFailed,
error.EndOfStream => return @intFromEnum(limit) - remaining, error.EndOfStream => return @intFromEnum(limit) - remaining,
}); });
if (std.mem.indexOfScalar(u8, available, delimiter)) |delimiter_index| { if (std.mem.findScalar(u8, available, delimiter)) |delimiter_index| {
r.toss(delimiter_index); r.toss(delimiter_index);
remaining -= delimiter_index; remaining -= delimiter_index;
return @intFromEnum(limit) - remaining; return @intFromEnum(limit) - remaining;

View file

@ -1702,7 +1702,7 @@ pub fn printFloatHex(w: *Writer, value: anytype, case: std.fmt.Case, opt_precisi
try w.writeAll("0x"); try w.writeAll("0x");
try w.writeByte(buf[0]); try w.writeByte(buf[0]);
const trimmed = std.mem.trimRight(u8, buf[1..], "0"); const trimmed = std.mem.trimEnd(u8, buf[1..], "0");
if (opt_precision) |precision| { if (opt_precision) |precision| {
if (precision > 0) try w.writeAll("."); if (precision > 0) try w.writeAll(".");
} else if (trimmed.len > 0) { } else if (trimmed.len > 0) {

View file

@ -257,7 +257,7 @@ pub const Node = struct {
const index = n.index.unwrap() orelse return; const index = n.index.unwrap() orelse return;
const storage = storageByIndex(index); const storage = storageByIndex(index);
const name_len = @min(max_name_len, std.mem.indexOfScalar(u8, new_name, 0) orelse new_name.len); const name_len = @min(max_name_len, std.mem.findScalar(u8, new_name, 0) orelse new_name.len);
copyAtomicStore(storage.name[0..name_len], new_name[0..name_len]); copyAtomicStore(storage.name[0..name_len], new_name[0..name_len]);
if (name_len < storage.name.len) if (name_len < storage.name.len)
@ -1347,7 +1347,7 @@ fn computeNode(
const storage = &serialized.storage[@intFromEnum(node_index)]; const storage = &serialized.storage[@intFromEnum(node_index)];
const estimated_total = storage.estimated_total_count; const estimated_total = storage.estimated_total_count;
const completed_items = storage.completed_count; const completed_items = storage.completed_count;
const name = if (std.mem.indexOfScalar(u8, &storage.name, 0)) |end| storage.name[0..end] else &storage.name; const name = if (std.mem.findScalar(u8, &storage.name, 0)) |end| storage.name[0..end] else &storage.name;
const parent = serialized.parents[@intFromEnum(node_index)]; const parent = serialized.parents[@intFromEnum(node_index)];
if (parent != .none) p: { if (parent != .none) p: {

View file

@ -180,7 +180,7 @@ pub fn main() !void {
if (bench_prngs) { if (bench_prngs) {
if (bench_long) { if (bench_long) {
inline for (prngs) |R| { inline for (prngs) |R| {
if (filter == null or std.mem.indexOf(u8, R.name, filter.?) != null) { if (filter == null or std.mem.find(u8, R.name, filter.?) != null) {
try stdout.print("{s} (long outputs)\n", .{R.name}); try stdout.print("{s} (long outputs)\n", .{R.name});
try stdout.flush(); try stdout.flush();
@ -191,7 +191,7 @@ pub fn main() !void {
} }
if (bench_short) { if (bench_short) {
inline for (prngs) |R| { inline for (prngs) |R| {
if (filter == null or std.mem.indexOf(u8, R.name, filter.?) != null) { if (filter == null or std.mem.find(u8, R.name, filter.?) != null) {
try stdout.print("{s} (short outputs)\n", .{R.name}); try stdout.print("{s} (short outputs)\n", .{R.name});
try stdout.flush(); try stdout.flush();
@ -204,7 +204,7 @@ pub fn main() !void {
if (bench_csprngs) { if (bench_csprngs) {
if (bench_long) { if (bench_long) {
inline for (csprngs) |R| { inline for (csprngs) |R| {
if (filter == null or std.mem.indexOf(u8, R.name, filter.?) != null) { if (filter == null or std.mem.find(u8, R.name, filter.?) != null) {
try stdout.print("{s} (cryptographic, long outputs)\n", .{R.name}); try stdout.print("{s} (cryptographic, long outputs)\n", .{R.name});
try stdout.flush(); try stdout.flush();
@ -215,7 +215,7 @@ pub fn main() !void {
} }
if (bench_short) { if (bench_short) {
inline for (csprngs) |R| { inline for (csprngs) |R| {
if (filter == null or std.mem.indexOf(u8, R.name, filter.?) != null) { if (filter == null or std.mem.find(u8, R.name, filter.?) != null) {
try stdout.print("{s} (cryptographic, short outputs)\n", .{R.name}); try stdout.print("{s} (cryptographic, short outputs)\n", .{R.name});
try stdout.flush(); try stdout.flush();

View file

@ -84,7 +84,7 @@ pub fn order(lhs: Version, rhs: Version) std.math.Order {
pub fn parse(text: []const u8) !Version { pub fn parse(text: []const u8) !Version {
// Parse the required major, minor, and patch numbers. // Parse the required major, minor, and patch numbers.
const extra_index = std.mem.indexOfAny(u8, text, "-+"); const extra_index = std.mem.findAny(u8, text, "-+");
const required = text[0..(extra_index orelse text.len)]; const required = text[0..(extra_index orelse text.len)];
var it = std.mem.splitScalar(u8, required, '.'); var it = std.mem.splitScalar(u8, required, '.');
var ver = Version{ var ver = Version{
@ -98,7 +98,7 @@ pub fn parse(text: []const u8) !Version {
// Slice optional pre-release or build metadata components. // Slice optional pre-release or build metadata components.
const extra: []const u8 = text[extra_index.?..text.len]; const extra: []const u8 = text[extra_index.?..text.len];
if (extra[0] == '-') { if (extra[0] == '-') {
const build_index = std.mem.indexOfScalar(u8, extra, '+'); const build_index = std.mem.findScalar(u8, extra, '+');
ver.pre = extra[1..(build_index orelse extra.len)]; ver.pre = extra[1..(build_index orelse extra.len)];
if (build_index) |idx| ver.build = extra[(idx + 1)..]; if (build_index) |idx| ver.build = extra[(idx + 1)..];
} else { } else {

View file

@ -65,7 +65,7 @@ pub const Component = union(enum) {
pub fn toRaw(component: Component, buffer: []u8) error{NoSpaceLeft}![]const u8 { pub fn toRaw(component: Component, buffer: []u8) error{NoSpaceLeft}![]const u8 {
return switch (component) { return switch (component) {
.raw => |raw| raw, .raw => |raw| raw,
.percent_encoded => |percent_encoded| if (std.mem.indexOfScalar(u8, percent_encoded, '%')) |_| .percent_encoded => |percent_encoded| if (std.mem.findScalar(u8, percent_encoded, '%')) |_|
try std.fmt.bufPrint(buffer, "{f}", .{std.fmt.alt(component, .formatRaw)}) try std.fmt.bufPrint(buffer, "{f}", .{std.fmt.alt(component, .formatRaw)})
else else
percent_encoded, percent_encoded,
@ -76,7 +76,7 @@ pub const Component = union(enum) {
pub fn toRawMaybeAlloc(component: Component, arena: Allocator) Allocator.Error![]const u8 { pub fn toRawMaybeAlloc(component: Component, arena: Allocator) Allocator.Error![]const u8 {
return switch (component) { return switch (component) {
.raw => |raw| raw, .raw => |raw| raw,
.percent_encoded => |percent_encoded| if (std.mem.indexOfScalar(u8, percent_encoded, '%')) |_| .percent_encoded => |percent_encoded| if (std.mem.findScalar(u8, percent_encoded, '%')) |_|
try std.fmt.allocPrint(arena, "{f}", .{std.fmt.alt(component, .formatRaw)}) try std.fmt.allocPrint(arena, "{f}", .{std.fmt.alt(component, .formatRaw)})
else else
percent_encoded, percent_encoded,
@ -89,7 +89,7 @@ pub const Component = union(enum) {
.percent_encoded => |percent_encoded| { .percent_encoded => |percent_encoded| {
var start: usize = 0; var start: usize = 0;
var index: usize = 0; var index: usize = 0;
while (std.mem.indexOfScalarPos(u8, percent_encoded, index, '%')) |percent| { while (std.mem.findScalarPos(u8, percent_encoded, index, '%')) |percent| {
index = percent + 1; index = percent + 1;
if (percent_encoded.len - index < 2) continue; if (percent_encoded.len - index < 2) continue;
const percent_encoded_char = const percent_encoded_char =
@ -213,7 +213,7 @@ pub fn parseAfterScheme(scheme: []const u8, text: []const u8) ParseError!Uri {
var i: usize = 0; var i: usize = 0;
if (std.mem.startsWith(u8, text, "//")) a: { if (std.mem.startsWith(u8, text, "//")) a: {
i = std.mem.indexOfAnyPos(u8, text, 2, &authority_sep) orelse text.len; i = std.mem.findAnyPos(u8, text, 2, &authority_sep) orelse text.len;
const authority = text[2..i]; const authority = text[2..i];
if (authority.len == 0) { if (authority.len == 0) {
if (!std.mem.startsWith(u8, text[2..], "/")) return error.InvalidFormat; if (!std.mem.startsWith(u8, text[2..], "/")) return error.InvalidFormat;
@ -221,11 +221,11 @@ pub fn parseAfterScheme(scheme: []const u8, text: []const u8) ParseError!Uri {
} }
var start_of_host: usize = 0; var start_of_host: usize = 0;
if (std.mem.indexOf(u8, authority, "@")) |index| { if (std.mem.find(u8, authority, "@")) |index| {
start_of_host = index + 1; start_of_host = index + 1;
const user_info = authority[0..index]; const user_info = authority[0..index];
if (std.mem.indexOf(u8, user_info, ":")) |idx| { if (std.mem.find(u8, user_info, ":")) |idx| {
uri.user = .{ .percent_encoded = user_info[0..idx] }; uri.user = .{ .percent_encoded = user_info[0..idx] };
if (idx < user_info.len - 1) { // empty password is also "no password" if (idx < user_info.len - 1) { // empty password is also "no password"
uri.password = .{ .percent_encoded = user_info[idx + 1 ..] }; uri.password = .{ .percent_encoded = user_info[idx + 1 ..] };
@ -247,16 +247,16 @@ pub fn parseAfterScheme(scheme: []const u8, text: []const u8) ParseError!Uri {
} }
if (authority.len > start_of_host and authority[start_of_host] == '[') { // IPv6 if (authority.len > start_of_host and authority[start_of_host] == '[') { // IPv6
end_of_host = std.mem.lastIndexOf(u8, authority, "]") orelse return error.InvalidFormat; end_of_host = std.mem.findLast(u8, authority, "]") orelse return error.InvalidFormat;
end_of_host += 1; end_of_host += 1;
if (std.mem.lastIndexOf(u8, authority, ":")) |index| { if (std.mem.findLast(u8, authority, ":")) |index| {
if (index >= end_of_host) { // if not part of the V6 address field if (index >= end_of_host) { // if not part of the V6 address field
end_of_host = @min(end_of_host, index); end_of_host = @min(end_of_host, index);
uri.port = std.fmt.parseInt(u16, authority[index + 1 ..], 10) catch return error.InvalidPort; uri.port = std.fmt.parseInt(u16, authority[index + 1 ..], 10) catch return error.InvalidPort;
} }
} }
} else if (std.mem.lastIndexOf(u8, authority, ":")) |index| { } else if (std.mem.findLast(u8, authority, ":")) |index| {
if (index >= start_of_host) { // if not part of the userinfo field if (index >= start_of_host) { // if not part of the userinfo field
end_of_host = @min(end_of_host, index); end_of_host = @min(end_of_host, index);
uri.port = std.fmt.parseInt(u16, authority[index + 1 ..], 10) catch return error.InvalidPort; uri.port = std.fmt.parseInt(u16, authority[index + 1 ..], 10) catch return error.InvalidPort;
@ -268,12 +268,12 @@ pub fn parseAfterScheme(scheme: []const u8, text: []const u8) ParseError!Uri {
} }
const path_start = i; const path_start = i;
i = std.mem.indexOfAnyPos(u8, text, path_start, &path_sep) orelse text.len; i = std.mem.findAnyPos(u8, text, path_start, &path_sep) orelse text.len;
uri.path = .{ .percent_encoded = text[path_start..i] }; uri.path = .{ .percent_encoded = text[path_start..i] };
if (std.mem.startsWith(u8, text[i..], "?")) { if (std.mem.startsWith(u8, text[i..], "?")) {
const query_start = i + 1; const query_start = i + 1;
i = std.mem.indexOfScalarPos(u8, text, query_start, '#') orelse text.len; i = std.mem.findScalarPos(u8, text, query_start, '#') orelse text.len;
uri.query = .{ .percent_encoded = text[query_start..i] }; uri.query = .{ .percent_encoded = text[query_start..i] };
} }
@ -513,7 +513,7 @@ fn merge_paths(base: Component, new: []u8, aux_buf: *[]u8) error{NoSpaceLeft}!Co
var aux: Writer = .fixed(aux_buf.*); var aux: Writer = .fixed(aux_buf.*);
if (!base.isEmpty()) { if (!base.isEmpty()) {
base.formatPath(&aux) catch return error.NoSpaceLeft; base.formatPath(&aux) catch return error.NoSpaceLeft;
aux.end = std.mem.lastIndexOfScalar(u8, aux.buffered(), '/') orelse return remove_dot_segments(new); aux.end = std.mem.findScalarLast(u8, aux.buffered(), '/') orelse return remove_dot_segments(new);
} }
aux.print("/{s}", .{new}) catch return error.NoSpaceLeft; aux.print("/{s}", .{new}) catch return error.NoSpaceLeft;
const merged_path = remove_dot_segments(aux.buffered()); const merged_path = remove_dot_segments(aux.buffered());

View file

@ -156,7 +156,7 @@ test whitespace {
var i: u8 = 0; var i: u8 = 0;
while (isAscii(i)) : (i += 1) { while (isAscii(i)) : (i += 1) {
if (isWhitespace(i)) try std.testing.expect(std.mem.indexOfScalar(u8, &whitespace, i) != null); if (isWhitespace(i)) try std.testing.expect(std.mem.findScalar(u8, &whitespace, i) != null);
} }
} }

View file

@ -466,13 +466,13 @@ pub const SectionHeader = extern struct {
pub fn getName(self: *align(1) const SectionHeader) ?[]const u8 { pub fn getName(self: *align(1) const SectionHeader) ?[]const u8 {
if (self.name[0] == '/') return null; if (self.name[0] == '/') return null;
const len = std.mem.indexOfScalar(u8, &self.name, @as(u8, 0)) orelse self.name.len; const len = std.mem.findScalar(u8, &self.name, @as(u8, 0)) orelse self.name.len;
return self.name[0..len]; return self.name[0..len];
} }
pub fn getNameOffset(self: SectionHeader) ?u32 { pub fn getNameOffset(self: SectionHeader) ?u32 {
if (self.name[0] != '/') return null; if (self.name[0] != '/') return null;
const len = std.mem.indexOfScalar(u8, &self.name, @as(u8, 0)) orelse self.name.len; const len = std.mem.findScalar(u8, &self.name, @as(u8, 0)) orelse self.name.len;
const offset = std.fmt.parseInt(u32, self.name[1..len], 10) catch unreachable; const offset = std.fmt.parseInt(u32, self.name[1..len], 10) catch unreachable;
return offset; return offset;
} }
@ -628,7 +628,7 @@ pub const Symbol = struct {
pub fn getName(self: *const Symbol) ?[]const u8 { pub fn getName(self: *const Symbol) ?[]const u8 {
if (std.mem.eql(u8, self.name[0..4], "\x00\x00\x00\x00")) return null; if (std.mem.eql(u8, self.name[0..4], "\x00\x00\x00\x00")) return null;
const len = std.mem.indexOfScalar(u8, &self.name, @as(u8, 0)) orelse self.name.len; const len = std.mem.findScalar(u8, &self.name, @as(u8, 0)) orelse self.name.len;
return self.name[0..len]; return self.name[0..len];
} }
@ -869,7 +869,7 @@ pub const FileDefinition = struct {
file_name: [18]u8, file_name: [18]u8,
pub fn getFileName(self: *const FileDefinition) []const u8 { pub fn getFileName(self: *const FileDefinition) []const u8 {
const len = std.mem.indexOfScalar(u8, &self.file_name, @as(u8, 0)) orelse self.file_name.len; const len = std.mem.findScalar(u8, &self.file_name, @as(u8, 0)) orelse self.file_name.len;
return self.file_name[0..len]; return self.file_name[0..len];
} }
}; };
@ -1044,7 +1044,7 @@ pub const Coff = struct {
// Finally read the null-terminated string. // Finally read the null-terminated string.
const start = reader.seek; const start = reader.seek;
const len = std.mem.indexOfScalar(u8, self.data[start..], 0) orelse return null; const len = std.mem.findScalar(u8, self.data[start..], 0) orelse return null;
return self.data[start .. start + len]; return self.data[start .. start + len];
} }

View file

@ -598,7 +598,7 @@ fn testFuzzedMatchLen(_: void, input: []const u8) !void {
const bytes = w.buffered()[bytes_off..]; const bytes = w.buffered()[bytes_off..];
old = @min(old, bytes.len - 1, token.max_length - 1); old = @min(old, bytes.len - 1, token.max_length - 1);
const diff_index = mem.indexOfDiff(u8, prev, bytes).?; // unwrap since lengths are not same const diff_index = mem.findDiff(u8, prev, bytes).?; // unwrap since lengths are not same
const expected_len = @min(diff_index, 258); const expected_len = @min(diff_index, 258);
errdefer std.debug.print( errdefer std.debug.print(
\\prev : '{any}' \\prev : '{any}'

View file

@ -358,10 +358,10 @@ pub const Parsed = struct {
const wildcard_suffix = dns_name[2..]; const wildcard_suffix = dns_name[2..];
// No additional wildcards allowed in the suffix // No additional wildcards allowed in the suffix
if (mem.indexOf(u8, wildcard_suffix, "*") != null) return false; if (mem.find(u8, wildcard_suffix, "*") != null) return false;
// Find the first dot in hostname to split first label from rest // Find the first dot in hostname to split first label from rest
const dot_pos = mem.indexOf(u8, host_name, ".") orelse return false; const dot_pos = mem.find(u8, host_name, ".") orelse return false;
// Wildcard matches exactly one label, so compare the rest // Wildcard matches exactly one label, so compare the rest
const host_suffix = host_name[dot_pos + 1 ..]; const host_suffix = host_name[dot_pos + 1 ..];
@ -1060,9 +1060,9 @@ pub const rsa = struct {
} }
var m_p_buf: [8 + Hash.digest_length + Hash.digest_length]u8 = undefined; var m_p_buf: [8 + Hash.digest_length + Hash.digest_length]u8 = undefined;
var m_p = m_p_buf[0 .. 8 + Hash.digest_length + sLen]; var m_p = m_p_buf[0 .. 8 + Hash.digest_length + sLen];
std.mem.copyForwards(u8, m_p, &([_]u8{0} ** 8)); @memmove(m_p, &([_]u8{0} ** 8));
std.mem.copyForwards(u8, m_p[8..], &mHash); @memmove(m_p[8..], &mHash);
std.mem.copyForwards(u8, m_p[(8 + Hash.digest_length)..], salt); @memmove(m_p[(8 + Hash.digest_length)..], salt);
// 13. Let H' = Hash(M'), an octet string of length hLen. // 13. Let H' = Hash(M'), an octet string of length hLen.
var h_p: [Hash.digest_length]u8 = undefined; var h_p: [Hash.digest_length]u8 = undefined;

View file

@ -269,9 +269,9 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file_reader: *Io.File.Reade
const end_marker = "-----END CERTIFICATE-----"; const end_marker = "-----END CERTIFICATE-----";
var start_index: usize = 0; var start_index: usize = 0;
while (mem.indexOfPos(u8, encoded_bytes, start_index, begin_marker)) |begin_marker_start| { while (mem.findPos(u8, encoded_bytes, start_index, begin_marker)) |begin_marker_start| {
const cert_start = begin_marker_start + begin_marker.len; const cert_start = begin_marker_start + begin_marker.len;
const cert_end = mem.indexOfPos(u8, encoded_bytes, cert_start, end_marker) orelse const cert_end = mem.findPos(u8, encoded_bytes, cert_start, end_marker) orelse
return error.MissingEndCertificateMarker; return error.MissingEndCertificateMarker;
start_index = cert_end + end_marker.len; start_index = cert_end + end_marker.len;
const encoded_cert = mem.trim(u8, encoded_bytes[cert_start..cert_end], " \t\r\n"); const encoded_cert = mem.trim(u8, encoded_bytes[cert_start..cert_end], " \t\r\n");

View file

@ -538,7 +538,7 @@ pub fn main() !void {
} }
inline for (hashes) |H| { inline for (hashes) |H| {
if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) { if (filter == null or std.mem.find(u8, H.name, filter.?) != null) {
const throughput = try benchmarkHash(H.ty, mode(128 * MiB)); const throughput = try benchmarkHash(H.ty, mode(128 * MiB));
try stdout.print("{s:>17}: {:10} MiB/s\n", .{ H.name, throughput / (1 * MiB) }); try stdout.print("{s:>17}: {:10} MiB/s\n", .{ H.name, throughput / (1 * MiB) });
try stdout.flush(); try stdout.flush();
@ -550,7 +550,7 @@ pub fn main() !void {
const io = io_threaded.io(); const io = io_threaded.io();
inline for (parallel_hashes) |H| { inline for (parallel_hashes) |H| {
if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) { if (filter == null or std.mem.find(u8, H.name, filter.?) != null) {
const throughput = try benchmarkHashParallel(H.ty, mode(128 * MiB), arena_allocator, io); const throughput = try benchmarkHashParallel(H.ty, mode(128 * MiB), arena_allocator, io);
try stdout.print("{s:>17}: {:10} MiB/s\n", .{ H.name, throughput / (1 * MiB) }); try stdout.print("{s:>17}: {:10} MiB/s\n", .{ H.name, throughput / (1 * MiB) });
try stdout.flush(); try stdout.flush();
@ -558,7 +558,7 @@ pub fn main() !void {
} }
inline for (macs) |M| { inline for (macs) |M| {
if (filter == null or std.mem.indexOf(u8, M.name, filter.?) != null) { if (filter == null or std.mem.find(u8, M.name, filter.?) != null) {
const throughput = try benchmarkMac(M.ty, mode(128 * MiB)); const throughput = try benchmarkMac(M.ty, mode(128 * MiB));
try stdout.print("{s:>17}: {:10} MiB/s\n", .{ M.name, throughput / (1 * MiB) }); try stdout.print("{s:>17}: {:10} MiB/s\n", .{ M.name, throughput / (1 * MiB) });
try stdout.flush(); try stdout.flush();
@ -566,7 +566,7 @@ pub fn main() !void {
} }
inline for (exchanges) |E| { inline for (exchanges) |E| {
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) { if (filter == null or std.mem.find(u8, E.name, filter.?) != null) {
const throughput = try benchmarkKeyExchange(E.ty, mode(1000)); const throughput = try benchmarkKeyExchange(E.ty, mode(1000));
try stdout.print("{s:>17}: {:10} exchanges/s\n", .{ E.name, throughput }); try stdout.print("{s:>17}: {:10} exchanges/s\n", .{ E.name, throughput });
try stdout.flush(); try stdout.flush();
@ -574,7 +574,7 @@ pub fn main() !void {
} }
inline for (signatures) |E| { inline for (signatures) |E| {
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) { if (filter == null or std.mem.find(u8, E.name, filter.?) != null) {
const throughput = try benchmarkSignature(E.ty, mode(1000)); const throughput = try benchmarkSignature(E.ty, mode(1000));
try stdout.print("{s:>17}: {:10} signatures/s\n", .{ E.name, throughput }); try stdout.print("{s:>17}: {:10} signatures/s\n", .{ E.name, throughput });
try stdout.flush(); try stdout.flush();
@ -582,7 +582,7 @@ pub fn main() !void {
} }
inline for (signature_verifications) |E| { inline for (signature_verifications) |E| {
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) { if (filter == null or std.mem.find(u8, E.name, filter.?) != null) {
const throughput = try benchmarkSignatureVerification(E.ty, mode(1000)); const throughput = try benchmarkSignatureVerification(E.ty, mode(1000));
try stdout.print("{s:>17}: {:10} verifications/s\n", .{ E.name, throughput }); try stdout.print("{s:>17}: {:10} verifications/s\n", .{ E.name, throughput });
try stdout.flush(); try stdout.flush();
@ -590,7 +590,7 @@ pub fn main() !void {
} }
inline for (batch_signature_verifications) |E| { inline for (batch_signature_verifications) |E| {
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) { if (filter == null or std.mem.find(u8, E.name, filter.?) != null) {
const throughput = try benchmarkBatchSignatureVerification(E.ty, mode(1000)); const throughput = try benchmarkBatchSignatureVerification(E.ty, mode(1000));
try stdout.print("{s:>17}: {:10} verifications/s (batch)\n", .{ E.name, throughput }); try stdout.print("{s:>17}: {:10} verifications/s (batch)\n", .{ E.name, throughput });
try stdout.flush(); try stdout.flush();
@ -598,7 +598,7 @@ pub fn main() !void {
} }
inline for (aeads) |E| { inline for (aeads) |E| {
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) { if (filter == null or std.mem.find(u8, E.name, filter.?) != null) {
const throughput = try benchmarkAead(E.ty, mode(128 * MiB)); const throughput = try benchmarkAead(E.ty, mode(128 * MiB));
try stdout.print("{s:>17}: {:10} MiB/s\n", .{ E.name, throughput / (1 * MiB) }); try stdout.print("{s:>17}: {:10} MiB/s\n", .{ E.name, throughput / (1 * MiB) });
try stdout.flush(); try stdout.flush();
@ -606,7 +606,7 @@ pub fn main() !void {
} }
inline for (aes) |E| { inline for (aes) |E| {
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) { if (filter == null or std.mem.find(u8, E.name, filter.?) != null) {
const throughput = try benchmarkAes(E.ty, mode(100000000)); const throughput = try benchmarkAes(E.ty, mode(100000000));
try stdout.print("{s:>17}: {:10} ops/s\n", .{ E.name, throughput }); try stdout.print("{s:>17}: {:10} ops/s\n", .{ E.name, throughput });
try stdout.flush(); try stdout.flush();
@ -614,7 +614,7 @@ pub fn main() !void {
} }
inline for (aes8) |E| { inline for (aes8) |E| {
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) { if (filter == null or std.mem.find(u8, E.name, filter.?) != null) {
const throughput = try benchmarkAes8(E.ty, mode(10000000)); const throughput = try benchmarkAes8(E.ty, mode(10000000));
try stdout.print("{s:>17}: {:10} ops/s\n", .{ E.name, throughput }); try stdout.print("{s:>17}: {:10} ops/s\n", .{ E.name, throughput });
try stdout.flush(); try stdout.flush();
@ -622,7 +622,7 @@ pub fn main() !void {
} }
inline for (pwhashes) |H| { inline for (pwhashes) |H| {
if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) { if (filter == null or std.mem.find(u8, H.name, filter.?) != null) {
const throughput = try benchmarkPwhash(arena_allocator, H.ty, H.params, mode(64)); const throughput = try benchmarkPwhash(arena_allocator, H.ty, H.params, mode(64));
try stdout.print("{s:>17}: {d:10.3} s/ops\n", .{ H.name, throughput }); try stdout.print("{s:>17}: {d:10.3} s/ops\n", .{ H.name, throughput });
try stdout.flush(); try stdout.flush();
@ -630,7 +630,7 @@ pub fn main() !void {
} }
inline for (kems) |E| { inline for (kems) |E| {
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) { if (filter == null or std.mem.find(u8, E.name, filter.?) != null) {
const throughput = try benchmarkKem(E.ty, mode(1000)); const throughput = try benchmarkKem(E.ty, mode(1000));
try stdout.print("{s:>17}: {:10} encaps/s\n", .{ E.name, throughput }); try stdout.print("{s:>17}: {:10} encaps/s\n", .{ E.name, throughput });
try stdout.flush(); try stdout.flush();
@ -638,7 +638,7 @@ pub fn main() !void {
} }
inline for (kems) |E| { inline for (kems) |E| {
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) { if (filter == null or std.mem.find(u8, E.name, filter.?) != null) {
const throughput = try benchmarkKemDecaps(E.ty, mode(25000)); const throughput = try benchmarkKemDecaps(E.ty, mode(25000));
try stdout.print("{s:>17}: {:10} decaps/s\n", .{ E.name, throughput }); try stdout.print("{s:>17}: {:10} decaps/s\n", .{ E.name, throughput });
try stdout.flush(); try stdout.flush();
@ -646,7 +646,7 @@ pub fn main() !void {
} }
inline for (kems) |E| { inline for (kems) |E| {
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) { if (filter == null or std.mem.find(u8, E.name, filter.?) != null) {
const throughput = try benchmarkKemKeyGen(E.ty, mode(25000)); const throughput = try benchmarkKemKeyGen(E.ty, mode(25000));
try stdout.print("{s:>17}: {:10} keygen/s\n", .{ E.name, throughput }); try stdout.print("{s:>17}: {:10} keygen/s\n", .{ E.name, throughput });
try stdout.flush(); try stdout.flush();

View file

@ -135,8 +135,8 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
/// The function returns a slice, that can be shorter than der_encoded_length_max. /// The function returns a slice, that can be shorter than der_encoded_length_max.
pub fn toDer(sig: Signature, buf: *[der_encoded_length_max]u8) []u8 { pub fn toDer(sig: Signature, buf: *[der_encoded_length_max]u8) []u8 {
var w: std.Io.Writer = .fixed(buf); var w: std.Io.Writer = .fixed(buf);
const sig_r = mem.trimLeft(u8, &sig.r, &.{0}); const sig_r = mem.trimStart(u8, &sig.r, &.{0});
const sig_s = mem.trimLeft(u8, &sig.s, &.{0}); const sig_s = mem.trimStart(u8, &sig.s, &.{0});
const r_len = @as(u8, @intCast(sig_r.len + (sig_r[0] >> 7))); const r_len = @as(u8, @intCast(sig_r.len + (sig_r[0] >> 7)));
const s_len = @as(u8, @intCast(sig_s.len + (sig_s[0] >> 7))); const s_len = @as(u8, @intCast(sig_s.len + (sig_s[0] >> 7)));
const seq_len = @as(u8, @intCast(2 + r_len + 2 + s_len)); const seq_len = @as(u8, @intCast(2 + r_len + 2 + s_len));

View file

@ -358,7 +358,7 @@ const crypt_format = struct {
fn intDecode(comptime T: type, src: *const [(@bitSizeOf(T) + 5) / 6]u8) !T { fn intDecode(comptime T: type, src: *const [(@bitSizeOf(T) + 5) / 6]u8) !T {
var v: T = 0; var v: T = 0;
for (src, 0..) |x, i| { for (src, 0..) |x, i| {
const vi = mem.indexOfScalar(u8, &map64, x) orelse return EncodingError.InvalidEncoding; const vi = mem.findScalar(u8, &map64, x) orelse return EncodingError.InvalidEncoding;
v |= @as(T, @intCast(vi)) << @as(math.Log2Int(T), @intCast(i * 6)); v |= @as(T, @intCast(vi)) << @as(math.Log2Int(T), @intCast(i * 6));
} }
return v; return v;

View file

@ -1158,7 +1158,7 @@ fn readIndirect(c: *Client) Reader.Error!usize {
P.AEAD.decrypt(cleartext, ciphertext, auth_tag, ad, nonce, pv.server_key) catch P.AEAD.decrypt(cleartext, ciphertext, auth_tag, ad, nonce, pv.server_key) catch
return failRead(c, error.TlsBadRecordMac); return failRead(c, error.TlsBadRecordMac);
// TODO use scalar, non-slice version // TODO use scalar, non-slice version
const msg = mem.trimRight(u8, cleartext, "\x00"); const msg = mem.trimEnd(u8, cleartext, "\x00");
break :cleartext .{ msg.len - 1, @enumFromInt(msg[msg.len - 1]) }; break :cleartext .{ msg.len - 1, @enumFromInt(msg[msg.len - 1]) };
}, },
.tls_1_2 => { .tls_1_2 => {

View file

@ -1196,7 +1196,7 @@ fn printLineFromFile(writer: *Writer, source_location: SourceLocation) !void {
var next_line: usize = 1; var next_line: usize = 1;
while (next_line != source_location.line) { while (next_line != source_location.line) {
const slice = buf[current_line_start..amt_read]; const slice = buf[current_line_start..amt_read];
if (mem.indexOfScalar(u8, slice, '\n')) |pos| { if (mem.findScalar(u8, slice, '\n')) |pos| {
next_line += 1; next_line += 1;
if (pos == slice.len - 1) { if (pos == slice.len - 1) {
amt_read = try f.read(buf[0..]); amt_read = try f.read(buf[0..]);
@ -1212,7 +1212,7 @@ fn printLineFromFile(writer: *Writer, source_location: SourceLocation) !void {
break :seek current_line_start; break :seek current_line_start;
}; };
const slice = buf[line_start..amt_read]; const slice = buf[line_start..amt_read];
if (mem.indexOfScalar(u8, slice, '\n')) |pos| { if (mem.findScalar(u8, slice, '\n')) |pos| {
const line = slice[0 .. pos + 1]; const line = slice[0 .. pos + 1];
mem.replaceScalar(u8, line, '\t', ' '); mem.replaceScalar(u8, line, '\t', ' ');
return writer.writeAll(line); return writer.writeAll(line);
@ -1221,7 +1221,7 @@ fn printLineFromFile(writer: *Writer, source_location: SourceLocation) !void {
try writer.writeAll(slice); try writer.writeAll(slice);
while (amt_read == buf.len) { while (amt_read == buf.len) {
amt_read = try f.read(buf[0..]); amt_read = try f.read(buf[0..]);
if (mem.indexOfScalar(u8, buf[0..amt_read], '\n')) |pos| { if (mem.findScalar(u8, buf[0..amt_read], '\n')) |pos| {
const line = buf[0 .. pos + 1]; const line = buf[0 .. pos + 1];
mem.replaceScalar(u8, line, '\t', ' '); mem.replaceScalar(u8, line, '\t', ' ');
return writer.writeAll(line); return writer.writeAll(line);

View file

@ -437,7 +437,7 @@ fn scanAllFunctions(di: *Dwarf, gpa: Allocator, endian: Endian) ScanError!void {
}; };
while (true) { while (true) {
fr.seek = std.mem.indexOfNonePos(u8, fr.buffer, fr.seek, &.{ fr.seek = std.mem.findNonePos(u8, fr.buffer, fr.seek, &.{
zig_padding_abbrev_code, 0, zig_padding_abbrev_code, 0,
}) orelse fr.buffer.len; }) orelse fr.buffer.len;
if (fr.seek >= next_unit_pos) break; if (fr.seek >= next_unit_pos) break;
@ -1539,7 +1539,7 @@ fn getStringGeneric(opt_str: ?[]const u8, offset: u64) ![:0]const u8 {
if (offset > str.len) return bad(); if (offset > str.len) return bad();
const casted_offset = cast(usize, offset) orelse return bad(); const casted_offset = cast(usize, offset) orelse return bad();
// Valid strings always have a terminating zero byte // Valid strings always have a terminating zero byte
const last = std.mem.indexOfScalarPos(u8, str, casted_offset, 0) orelse return bad(); const last = std.mem.findScalarPos(u8, str, casted_offset, 0) orelse return bad();
return str[casted_offset..last :0]; return str[casted_offset..last :0];
} }

View file

@ -197,7 +197,7 @@ pub const ElfDynLib = struct {
// - /etc/ld.so.cache is not read // - /etc/ld.so.cache is not read
fn resolveFromName(path_or_name: []const u8) !posix.fd_t { fn resolveFromName(path_or_name: []const u8) !posix.fd_t {
// If filename contains a slash ("/"), then it is interpreted as a (relative or absolute) pathname // If filename contains a slash ("/"), then it is interpreted as a (relative or absolute) pathname
if (std.mem.indexOfScalarPos(u8, path_or_name, 0, '/')) |_| { if (std.mem.findScalarPos(u8, path_or_name, 0, '/')) |_| {
return posix.open(path_or_name, .{ .ACCMODE = .RDONLY, .CLOEXEC = true }, 0); return posix.open(path_or_name, .{ .ACCMODE = .RDONLY, .CLOEXEC = true }, 0);
} }

View file

@ -3039,7 +3039,7 @@ pub const ar_hdr = extern struct {
pub fn name(self: *const ar_hdr) ?[]const u8 { pub fn name(self: *const ar_hdr) ?[]const u8 {
const value = &self.ar_name; const value = &self.ar_name;
if (value[0] == '/') return null; if (value[0] == '/') return null;
const sentinel = mem.indexOfScalar(u8, value, '/') orelse value.len; const sentinel = mem.findScalar(u8, value, '/') orelse value.len;
return value[0..sentinel]; return value[0..sentinel];
} }

View file

@ -182,7 +182,7 @@ pub const Parser = struct {
pub fn until(self: *@This(), delimiter: u8) []const u8 { pub fn until(self: *@This(), delimiter: u8) []const u8 {
const start = self.i; const start = self.i;
self.i = std.mem.indexOfScalarPos(u8, self.bytes, self.i, delimiter) orelse self.bytes.len; self.i = std.mem.findScalarPos(u8, self.bytes, self.i, delimiter) orelse self.bytes.len;
return self.bytes[start..self.i]; return self.bytes[start..self.i];
} }

View file

@ -469,7 +469,7 @@ pub fn selfExePath(out_buffer: []u8) SelfExePathError![]u8 {
return error.FileNotFound; return error.FileNotFound;
const argv0 = mem.span(std.os.argv[0]); const argv0 = mem.span(std.os.argv[0]);
if (mem.indexOf(u8, argv0, "/") != null) { if (mem.find(u8, argv0, "/") != null) {
// argv[0] is a path (relative or absolute): use realpath(3) directly // argv[0] is a path (relative or absolute): use realpath(3) directly
var real_path_buf: [max_path_bytes]u8 = undefined; var real_path_buf: [max_path_bytes]u8 = undefined;
const real_path = posix.realpathZ(std.os.argv[0], &real_path_buf) catch |err| switch (err) { const real_path = posix.realpathZ(std.os.argv[0], &real_path_buf) catch |err| switch (err) {

View file

@ -179,7 +179,7 @@ pub fn isCygwinPty(file: File) bool {
// The name we get from NtQueryInformationFile will be prefixed with a '\', e.g. \msys-1888ae32e00d56aa-pty0-to-master // The name we get from NtQueryInformationFile will be prefixed with a '\', e.g. \msys-1888ae32e00d56aa-pty0-to-master
return (std.mem.startsWith(u16, name_wide, &[_]u16{ '\\', 'm', 's', 'y', 's', '-' }) or return (std.mem.startsWith(u16, name_wide, &[_]u16{ '\\', 'm', 's', 'y', 's', '-' }) or
std.mem.startsWith(u16, name_wide, &[_]u16{ '\\', 'c', 'y', 'g', 'w', 'i', 'n', '-' })) and std.mem.startsWith(u16, name_wide, &[_]u16{ '\\', 'c', 'y', 'g', 'w', 'i', 'n', '-' })) and
std.mem.indexOf(u16, name_wide, &[_]u16{ '-', 'p', 't', 'y' }) != null; std.mem.find(u16, name_wide, &[_]u16{ '-', 'p', 't', 'y' }) != null;
} }
/// Returns whether or not ANSI escape codes will be treated as such, /// Returns whether or not ANSI escape codes will be treated as such,

View file

@ -402,9 +402,9 @@ pub fn windowsParsePath(path: []const u8) WindowsPath {
if (path.len >= 2 and PathType.windows.isSep(u8, path[0]) and PathType.windows.isSep(u8, path[1])) { if (path.len >= 2 and PathType.windows.isSep(u8, path[0]) and PathType.windows.isSep(u8, path[1])) {
const root_end = root_end: { const root_end = root_end: {
var server_end = mem.indexOfAnyPos(u8, path, 2, "/\\") orelse break :root_end path.len; var server_end = mem.findAnyPos(u8, path, 2, "/\\") orelse break :root_end path.len;
while (server_end < path.len and PathType.windows.isSep(u8, path[server_end])) server_end += 1; while (server_end < path.len and PathType.windows.isSep(u8, path[server_end])) server_end += 1;
break :root_end mem.indexOfAnyPos(u8, path, server_end, "/\\") orelse path.len; break :root_end mem.findAnyPos(u8, path, server_end, "/\\") orelse path.len;
}; };
return WindowsPath{ return WindowsPath{
.is_abs = true, .is_abs = true,
@ -722,7 +722,7 @@ fn parseUNC(comptime T: type, path: []const T) WindowsUNC(T) {
// For the server, the first path separator after the initial two is always // For the server, the first path separator after the initial two is always
// the terminator of the server name, even if that means the server name is // the terminator of the server name, even if that means the server name is
// zero-length. // zero-length.
const server_end = mem.indexOfAnyPos(T, path, 2, any_sep) orelse return .{ const server_end = mem.findAnyPos(T, path, 2, any_sep) orelse return .{
.server = path[2..path.len], .server = path[2..path.len],
.sep_after_server = false, .sep_after_server = false,
.share = path[path.len..path.len], .share = path[path.len..path.len],
@ -1819,7 +1819,7 @@ fn testRelativeWindows(from: []const u8, to: []const u8, expected_output: []cons
/// pointer address range of `path`, even if it is length zero. /// pointer address range of `path`, even if it is length zero.
pub fn extension(path: []const u8) []const u8 { pub fn extension(path: []const u8) []const u8 {
const filename = basename(path); const filename = basename(path);
const index = mem.lastIndexOfScalar(u8, filename, '.') orelse return path[path.len..]; const index = mem.findScalarLast(u8, filename, '.') orelse return path[path.len..];
if (index == 0) return path[path.len..]; if (index == 0) return path[path.len..];
return filename[index..]; return filename[index..];
} }
@ -1876,7 +1876,7 @@ test extension {
/// - "hello/world/lib" "lib" /// - "hello/world/lib" "lib"
pub fn stem(path: []const u8) []const u8 { pub fn stem(path: []const u8) []const u8 {
const filename = basename(path); const filename = basename(path);
const index = mem.lastIndexOfScalar(u8, filename, '.') orelse return filename[0..]; const index = mem.findScalarLast(u8, filename, '.') orelse return filename[0..];
if (index == 0) return path; if (index == 0) return path;
return filename[0..index]; return filename[0..index];
} }

View file

@ -443,7 +443,7 @@ pub fn main() !void {
const allocator = gpa.allocator(); const allocator = gpa.allocator();
inline for (hashes) |H| { inline for (hashes) |H| {
if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) hash: { if (filter == null or std.mem.find(u8, H.name, filter.?) != null) hash: {
if (!test_iterative_only or H.has_iterative_api) { if (!test_iterative_only or H.has_iterative_api) {
try stdout.print("{s}\n", .{H.name}); try stdout.print("{s}\n", .{H.name});
try stdout.flush(); try stdout.flush();

View file

@ -110,7 +110,7 @@ pub const StringIndexAdapter = struct {
} }
pub fn hash(_: @This(), adapted_key: []const u8) u64 { pub fn hash(_: @This(), adapted_key: []const u8) u64 {
assert(mem.indexOfScalar(u8, adapted_key, 0) == null); assert(mem.findScalar(u8, adapted_key, 0) == null);
return hashString(adapted_key); return hashString(adapted_key);
} }
}; };

View file

@ -529,7 +529,7 @@ pub const Response = struct {
}; };
if (first_line[8] != ' ') return error.HttpHeadersInvalid; if (first_line[8] != ' ') return error.HttpHeadersInvalid;
const status: http.Status = @enumFromInt(parseInt3(first_line[9..12])); const status: http.Status = @enumFromInt(parseInt3(first_line[9..12]));
const reason = mem.trimLeft(u8, first_line[12..], " "); const reason = mem.trimStart(u8, first_line[12..], " ");
res.version = version; res.version = version;
res.status = status; res.status = status;
@ -1674,14 +1674,14 @@ pub fn request(
if (std.debug.runtime_safety) { if (std.debug.runtime_safety) {
for (options.extra_headers) |header| { for (options.extra_headers) |header| {
assert(header.name.len != 0); assert(header.name.len != 0);
assert(std.mem.indexOfScalar(u8, header.name, ':') == null); assert(std.mem.findScalar(u8, header.name, ':') == null);
assert(std.mem.indexOfPosLinear(u8, header.name, 0, "\r\n") == null); assert(std.mem.findPosLinear(u8, header.name, 0, "\r\n") == null);
assert(std.mem.indexOfPosLinear(u8, header.value, 0, "\r\n") == null); assert(std.mem.findPosLinear(u8, header.value, 0, "\r\n") == null);
} }
for (options.privileged_headers) |header| { for (options.privileged_headers) |header| {
assert(header.name.len != 0); assert(header.name.len != 0);
assert(std.mem.indexOfPosLinear(u8, header.name, 0, "\r\n") == null); assert(std.mem.findPosLinear(u8, header.name, 0, "\r\n") == null);
assert(std.mem.indexOfPosLinear(u8, header.value, 0, "\r\n") == null); assert(std.mem.findPosLinear(u8, header.value, 0, "\r\n") == null);
} }
} }

View file

@ -5,17 +5,17 @@ is_trailer: bool,
pub fn init(bytes: []const u8) HeaderIterator { pub fn init(bytes: []const u8) HeaderIterator {
return .{ return .{
.bytes = bytes, .bytes = bytes,
.index = std.mem.indexOfPosLinear(u8, bytes, 0, "\r\n").? + 2, .index = std.mem.findPosLinear(u8, bytes, 0, "\r\n").? + 2,
.is_trailer = false, .is_trailer = false,
}; };
} }
pub fn next(it: *HeaderIterator) ?std.http.Header { pub fn next(it: *HeaderIterator) ?std.http.Header {
const end = std.mem.indexOfPosLinear(u8, it.bytes, it.index, "\r\n").?; const end = std.mem.findPosLinear(u8, it.bytes, it.index, "\r\n").?;
if (it.index == end) { // found the trailer boundary (\r\n\r\n) if (it.index == end) { // found the trailer boundary (\r\n\r\n)
if (it.is_trailer) return null; if (it.is_trailer) return null;
const next_end = std.mem.indexOfPosLinear(u8, it.bytes, end + 2, "\r\n") orelse const next_end = std.mem.findPosLinear(u8, it.bytes, end + 2, "\r\n") orelse
return null; return null;
var kv_it = std.mem.splitScalar(u8, it.bytes[end + 2 .. next_end], ':'); var kv_it = std.mem.splitScalar(u8, it.bytes[end + 2 .. next_end], ':');

View file

@ -96,13 +96,13 @@ pub const Request = struct {
if (first_line.len < 10) if (first_line.len < 10)
return error.HttpHeadersInvalid; return error.HttpHeadersInvalid;
const method_end = mem.indexOfScalar(u8, first_line, ' ') orelse const method_end = mem.findScalar(u8, first_line, ' ') orelse
return error.HttpHeadersInvalid; return error.HttpHeadersInvalid;
const method = std.meta.stringToEnum(http.Method, first_line[0..method_end]) orelse const method = std.meta.stringToEnum(http.Method, first_line[0..method_end]) orelse
return error.UnknownHttpMethod; return error.UnknownHttpMethod;
const version_start = mem.lastIndexOfScalar(u8, first_line, ' ') orelse const version_start = mem.findScalarLast(u8, first_line, ' ') orelse
return error.HttpHeadersInvalid; return error.HttpHeadersInvalid;
if (version_start == method_end) return error.HttpHeadersInvalid; if (version_start == method_end) return error.HttpHeadersInvalid;
@ -338,9 +338,9 @@ pub const Request = struct {
if (std.debug.runtime_safety) { if (std.debug.runtime_safety) {
for (options.extra_headers) |header| { for (options.extra_headers) |header| {
assert(header.name.len != 0); assert(header.name.len != 0);
assert(std.mem.indexOfScalar(u8, header.name, ':') == null); assert(std.mem.findScalar(u8, header.name, ':') == null);
assert(std.mem.indexOfPosLinear(u8, header.name, 0, "\r\n") == null); assert(std.mem.findPosLinear(u8, header.name, 0, "\r\n") == null);
assert(std.mem.indexOfPosLinear(u8, header.value, 0, "\r\n") == null); assert(std.mem.findPosLinear(u8, header.value, 0, "\r\n") == null);
} }
} }
try writeExpectContinue(request); try writeExpectContinue(request);

View file

@ -435,7 +435,7 @@ test "general client/server API coverage" {
if (mem.startsWith(u8, target, "/get")) { if (mem.startsWith(u8, target, "/get")) {
var response = try request.respondStreaming(&.{}, .{ var response = try request.respondStreaming(&.{}, .{
.content_length = if (mem.indexOf(u8, target, "?chunked") == null) .content_length = if (mem.find(u8, target, "?chunked") == null)
14 14
else else
null, null,

View file

@ -1758,7 +1758,7 @@ fn appendSlice(list: *std.array_list.Managed(u8), buf: []const u8, max_value_len
/// This function will not give meaningful results on non-numeric input. /// This function will not give meaningful results on non-numeric input.
pub fn isNumberFormattedLikeAnInteger(value: []const u8) bool { pub fn isNumberFormattedLikeAnInteger(value: []const u8) bool {
if (std.mem.eql(u8, value, "-0")) return false; if (std.mem.eql(u8, value, "-0")) return false;
return std.mem.indexOfAny(u8, value, ".eE") == null; return std.mem.findAny(u8, value, ".eE") == null;
} }
test { test {

View file

@ -825,7 +825,7 @@ pub const section_64 = extern struct {
}; };
fn parseName(name: *const [16]u8) []const u8 { fn parseName(name: *const [16]u8) []const u8 {
const len = mem.indexOfScalar(u8, name, @as(u8, 0)) orelse name.len; const len = mem.findScalar(u8, name, @as(u8, 0)) orelse name.len;
return name[0..len]; return name[0..len];
} }

View file

@ -1658,8 +1658,8 @@ pub const Mutable = struct {
// Handle trailing zero-words of divisor/dividend. These are not handled in the following // Handle trailing zero-words of divisor/dividend. These are not handled in the following
// algorithms. // algorithms.
// Note, there must be a non-zero limb for either. // Note, there must be a non-zero limb for either.
// const x_trailing = std.mem.indexOfScalar(Limb, x.limbs[0..x.len], 0).?; // const x_trailing = std.mem.findScalar(Limb, x.limbs[0..x.len], 0).?;
// const y_trailing = std.mem.indexOfScalar(Limb, y.limbs[0..y.len], 0).?; // const y_trailing = std.mem.findScalar(Limb, y.limbs[0..y.len], 0).?;
const x_trailing = for (x.limbs[0..x.len], 0..) |xi, i| { const x_trailing = for (x.limbs[0..x.len], 0..) |xi, i| {
if (xi != 0) break i; if (xi != 0) break i;

View file

@ -231,31 +231,6 @@ test "Allocator alloc and remap with zero-bit type" {
try testing.expectEqual(200, values.len); try testing.expectEqual(200, values.len);
} }
/// Copy all of source into dest at position 0.
/// dest.len must be >= source.len.
/// If the slices overlap, dest.ptr must be <= src.ptr.
/// This function is deprecated; use @memmove instead.
pub fn copyForwards(comptime T: type, dest: []T, source: []const T) void {
for (dest[0..source.len], source) |*d, s| d.* = s;
}
/// Copy all of source into dest at position 0.
/// dest.len must be >= source.len.
/// If the slices overlap, dest.ptr must be >= src.ptr.
/// This function is deprecated; use @memmove instead.
pub fn copyBackwards(comptime T: type, dest: []T, source: []const T) void {
// TODO instead of manually doing this check for the whole array
// and turning off runtime safety, the compiler should detect loops like
// this and automatically omit safety checks for loops
@setRuntimeSafety(false);
assert(dest.len >= source.len);
var i = source.len;
while (i > 0) {
i -= 1;
dest[i] = source[i];
}
}
/// Generally, Zig users are encouraged to explicitly initialize all fields of a struct explicitly rather than using this function. /// Generally, Zig users are encouraged to explicitly initialize all fields of a struct explicitly rather than using this function.
/// However, it is recognized that there are sometimes use cases for initializing all fields to a "zero" value. For example, when /// However, it is recognized that there are sometimes use cases for initializing all fields to a "zero" value. For example, when
/// interfacing with a C API where this practice is more common and relied upon. If you are performing code review and see this /// interfacing with a C API where this practice is more common and relied upon. If you are performing code review and see this
@ -816,9 +791,6 @@ fn eqlBytes(a: []const u8, b: []const u8) bool {
return !Scan.isNotEqual(last_a_chunk, last_b_chunk); return !Scan.isNotEqual(last_a_chunk, last_b_chunk);
} }
/// Deprecated in favor of `findDiff`.
pub const indexOfDiff = findDiff;
/// Compares two slices and returns the index of the first inequality. /// Compares two slices and returns the index of the first inequality.
/// Returns null if the slices are equal. /// Returns null if the slices are equal.
pub fn findDiff(comptime T: type, a: []const T, b: []const T) ?usize { pub fn findDiff(comptime T: type, a: []const T, b: []const T) ?usize {
@ -998,7 +970,7 @@ fn lenSliceTo(ptr: anytype, comptime end: std.meta.Elem(@TypeOf(ptr))) usize {
.array => |array_info| { .array => |array_info| {
if (array_info.sentinel()) |s| { if (array_info.sentinel()) |s| {
if (s == end) { if (s == end) {
return indexOfSentinel(array_info.child, end, ptr); return findSentinel(array_info.child, end, ptr);
} }
} }
return findScalar(array_info.child, ptr, end) orelse array_info.len; return findScalar(array_info.child, ptr, end) orelse array_info.len;
@ -1007,7 +979,7 @@ fn lenSliceTo(ptr: anytype, comptime end: std.meta.Elem(@TypeOf(ptr))) usize {
}, },
.many => if (ptr_info.sentinel()) |s| { .many => if (ptr_info.sentinel()) |s| {
if (s == end) { if (s == end) {
return indexOfSentinel(ptr_info.child, end, ptr); return findSentinel(ptr_info.child, end, ptr);
} }
// We're looking for something other than the sentinel, // We're looking for something other than the sentinel,
// but iterating past the sentinel would be a bug so we need // but iterating past the sentinel would be a bug so we need
@ -1018,12 +990,12 @@ fn lenSliceTo(ptr: anytype, comptime end: std.meta.Elem(@TypeOf(ptr))) usize {
}, },
.c => { .c => {
assert(ptr != null); assert(ptr != null);
return indexOfSentinel(ptr_info.child, end, ptr); return findSentinel(ptr_info.child, end, ptr);
}, },
.slice => { .slice => {
if (ptr_info.sentinel()) |s| { if (ptr_info.sentinel()) |s| {
if (s == end) { if (s == end) {
return indexOfSentinel(ptr_info.child, s, ptr); return findSentinel(ptr_info.child, s, ptr);
} }
} }
return findScalar(ptr_info.child, ptr, end) orelse ptr.len; return findScalar(ptr_info.child, ptr, end) orelse ptr.len;
@ -1076,11 +1048,11 @@ pub fn len(value: anytype) usize {
.many => { .many => {
const sentinel = info.sentinel() orelse const sentinel = info.sentinel() orelse
@compileError("invalid type given to std.mem.len: " ++ @typeName(@TypeOf(value))); @compileError("invalid type given to std.mem.len: " ++ @typeName(@TypeOf(value)));
return indexOfSentinel(info.child, sentinel, value); return findSentinel(info.child, sentinel, value);
}, },
.c => { .c => {
assert(value != null); assert(value != null);
return indexOfSentinel(info.child, 0, value); return findSentinel(info.child, 0, value);
}, },
else => @compileError("invalid type given to std.mem.len: " ++ @typeName(@TypeOf(value))), else => @compileError("invalid type given to std.mem.len: " ++ @typeName(@TypeOf(value))),
}, },
@ -1096,9 +1068,6 @@ test len {
try testing.expect(len(c_ptr) == 2); try testing.expect(len(c_ptr) == 2);
} }
/// Deprecated in favor of `findSentinel`.
pub const indexOfSentinel = findSentinel;
/// Returns the index of the sentinel value in a sentinel-terminated pointer. /// Returns the index of the sentinel value in a sentinel-terminated pointer.
/// Linear search through memory until the sentinel is found. /// Linear search through memory until the sentinel is found.
pub fn findSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]const T) usize { pub fn findSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]const T) usize {
@ -1166,7 +1135,7 @@ pub fn findSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]const
return i; return i;
} }
test "indexOfSentinel vector paths" { test "findSentinel vector paths" {
const Types = [_]type{ u8, u16, u32, u64 }; const Types = [_]type{ u8, u16, u32, u64 };
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const page_size = std.heap.page_size_min; const page_size = std.heap.page_size_min;
@ -1189,7 +1158,7 @@ test "indexOfSentinel vector paths" {
const search_len = page_size / @sizeOf(T); const search_len = page_size / @sizeOf(T);
memory[start + search_len] = 0; memory[start + search_len] = 0;
for (0..block_len) |offset| { for (0..block_len) |offset| {
try testing.expectEqual(search_len - offset, indexOfSentinel(T, 0, @ptrCast(&memory[start + offset]))); try testing.expectEqual(search_len - offset, findSentinel(T, 0, @ptrCast(&memory[start + offset])));
} }
memory[start + search_len] = 0xaa; memory[start + search_len] = 0xaa;
@ -1197,7 +1166,7 @@ test "indexOfSentinel vector paths" {
const start_page_boundary = start + (page_size / @sizeOf(T)); const start_page_boundary = start + (page_size / @sizeOf(T));
memory[start_page_boundary + block_len] = 0; memory[start_page_boundary + block_len] = 0;
for (0..block_len) |offset| { for (0..block_len) |offset| {
try testing.expectEqual(2 * block_len - offset, indexOfSentinel(T, 0, @ptrCast(&memory[start_page_boundary - block_len + offset]))); try testing.expectEqual(2 * block_len - offset, findSentinel(T, 0, @ptrCast(&memory[start_page_boundary - block_len + offset])));
} }
} }
} }
@ -1221,9 +1190,6 @@ test trimStart {
try testing.expectEqualSlices(u8, "foo\n ", trimStart(u8, " foo\n ", " \n")); try testing.expectEqualSlices(u8, "foo\n ", trimStart(u8, " foo\n ", " \n"));
} }
/// Deprecated: use `trimStart` instead.
pub const trimLeft = trimStart;
/// Remove a set of values from the end of a slice. /// Remove a set of values from the end of a slice.
pub fn trimEnd(comptime T: type, slice: []const T, values_to_strip: []const T) []const T { pub fn trimEnd(comptime T: type, slice: []const T, values_to_strip: []const T) []const T {
var end: usize = slice.len; var end: usize = slice.len;
@ -1235,9 +1201,6 @@ test trimEnd {
try testing.expectEqualSlices(u8, " foo", trimEnd(u8, " foo\n ", " \n")); try testing.expectEqualSlices(u8, " foo", trimEnd(u8, " foo\n ", " \n"));
} }
/// Deprecated: use `trimEnd` instead.
pub const trimRight = trimEnd;
/// Remove a set of values from the beginning and end of a slice. /// Remove a set of values from the beginning and end of a slice.
pub fn trim(comptime T: type, slice: []const T, values_to_strip: []const T) []const T { pub fn trim(comptime T: type, slice: []const T, values_to_strip: []const T) []const T {
var begin: usize = 0; var begin: usize = 0;
@ -1252,17 +1215,11 @@ test trim {
try testing.expectEqualSlices(u8, "foo", trim(u8, "foo", " \n")); try testing.expectEqualSlices(u8, "foo", trim(u8, "foo", " \n"));
} }
/// Deprecated in favor of `findScalar`.
pub const indexOfScalar = findScalar;
/// Linear search for the index of a scalar value inside a slice. /// Linear search for the index of a scalar value inside a slice.
pub fn findScalar(comptime T: type, slice: []const T, value: T) ?usize { pub fn findScalar(comptime T: type, slice: []const T, value: T) ?usize {
return indexOfScalarPos(T, slice, 0, value); return findScalarPos(T, slice, 0, value);
} }
/// Deprecated in favor of `findScalarLast`.
pub const lastIndexOfScalar = findScalarLast;
/// Linear search for the last index of a scalar value inside a slice. /// Linear search for the last index of a scalar value inside a slice.
pub fn findScalarLast(comptime T: type, slice: []const T, value: T) ?usize { pub fn findScalarLast(comptime T: type, slice: []const T, value: T) ?usize {
var i: usize = slice.len; var i: usize = slice.len;
@ -1273,9 +1230,6 @@ pub fn findScalarLast(comptime T: type, slice: []const T, value: T) ?usize {
return null; return null;
} }
/// Deprecated in favor of `findScalarPos`.
pub const indexOfScalarPos = findScalarPos;
/// Linear search for the index of a scalar value inside a slice, starting from a given position. /// Linear search for the index of a scalar value inside a slice, starting from a given position.
/// Returns null if the value is not found. /// Returns null if the value is not found.
pub fn findScalarPos(comptime T: type, slice: []const T, start_index: usize, value: T) ?usize { pub fn findScalarPos(comptime T: type, slice: []const T, start_index: usize, value: T) ?usize {
@ -1340,7 +1294,7 @@ pub fn findScalarPos(comptime T: type, slice: []const T, start_index: usize, val
return null; return null;
} }
test indexOfScalarPos { test findScalarPos {
const Types = [_]type{ u8, u16, u32, u64 }; const Types = [_]type{ u8, u16, u32, u64 };
inline for (Types) |T| { inline for (Types) |T| {
@ -1349,23 +1303,17 @@ test indexOfScalarPos {
memory[memory.len - 1] = 0; memory[memory.len - 1] = 0;
for (0..memory.len) |i| { for (0..memory.len) |i| {
try testing.expectEqual(memory.len - i - 1, indexOfScalarPos(T, memory[i..], 0, 0).?); try testing.expectEqual(memory.len - i - 1, findScalarPos(T, memory[i..], 0, 0).?);
} }
} }
} }
/// Deprecated in favor of `findAny`.
pub const indexOfAny = findAny;
/// Linear search for the index of any value in the provided list inside a slice. /// Linear search for the index of any value in the provided list inside a slice.
/// Returns null if no values are found. /// Returns null if no values are found.
pub fn findAny(comptime T: type, slice: []const T, values: []const T) ?usize { pub fn findAny(comptime T: type, slice: []const T, values: []const T) ?usize {
return indexOfAnyPos(T, slice, 0, values); return findAnyPos(T, slice, 0, values);
} }
/// Deprecated in favor of `findLastAny`.
pub const lastIndexOfAny = findLastAny;
/// Linear search for the last index of any value in the provided list inside a slice. /// Linear search for the last index of any value in the provided list inside a slice.
/// Returns null if no values are found. /// Returns null if no values are found.
pub fn findLastAny(comptime T: type, slice: []const T, values: []const T) ?usize { pub fn findLastAny(comptime T: type, slice: []const T, values: []const T) ?usize {
@ -1379,9 +1327,6 @@ pub fn findLastAny(comptime T: type, slice: []const T, values: []const T) ?usize
return null; return null;
} }
/// Deprecated in favor of `findAnyPos`.
pub const indexOfAnyPos = findAnyPos;
/// Linear search for the index of any value in the provided list inside a slice, starting from a given position. /// Linear search for the index of any value in the provided list inside a slice, starting from a given position.
/// Returns null if no values are found. /// Returns null if no values are found.
pub fn findAnyPos(comptime T: type, slice: []const T, start_index: usize, values: []const T) ?usize { pub fn findAnyPos(comptime T: type, slice: []const T, start_index: usize, values: []const T) ?usize {
@ -1394,14 +1339,11 @@ pub fn findAnyPos(comptime T: type, slice: []const T, start_index: usize, values
return null; return null;
} }
/// Deprecated in favor of `findNone`.
pub const indexOfNone = findNone;
/// Find the first item in `slice` which is not contained in `values`. /// Find the first item in `slice` which is not contained in `values`.
/// ///
/// Comparable to `strspn` in the C standard library. /// Comparable to `strspn` in the C standard library.
pub fn findNone(comptime T: type, slice: []const T, values: []const T) ?usize { pub fn findNone(comptime T: type, slice: []const T, values: []const T) ?usize {
return indexOfNonePos(T, slice, 0, values); return findNonePos(T, slice, 0, values);
} }
test findNone { test findNone {
@ -1412,12 +1354,9 @@ test findNone {
try testing.expect(findNone(u8, "123123", "123") == null); try testing.expect(findNone(u8, "123123", "123") == null);
try testing.expect(findNone(u8, "333333", "123") == null); try testing.expect(findNone(u8, "333333", "123") == null);
try testing.expect(indexOfNonePos(u8, "abc123", 3, "321") == null); try testing.expect(findNonePos(u8, "abc123", 3, "321") == null);
} }
/// Deprecated in favor of `findLastNone`.
pub const lastIndexOfNone = findLastNone;
/// Find the last item in `slice` which is not contained in `values`. /// Find the last item in `slice` which is not contained in `values`.
/// ///
/// Like `strspn` in the C standard library, but searches from the end. /// Like `strspn` in the C standard library, but searches from the end.
@ -1433,8 +1372,6 @@ pub fn findLastNone(comptime T: type, slice: []const T, values: []const T) ?usiz
return null; return null;
} }
pub const indexOfNonePos = findNonePos;
/// Find the first item in `slice[start_index..]` which is not contained in `values`. /// Find the first item in `slice[start_index..]` which is not contained in `values`.
/// The returned index will be relative to the start of `slice`, and never less than `start_index`. /// The returned index will be relative to the start of `slice`, and never less than `start_index`.
/// ///
@ -1450,22 +1387,16 @@ pub fn findNonePos(comptime T: type, slice: []const T, start_index: usize, value
return null; return null;
} }
/// Deprecated in favor of `find`.
pub const indexOf = find;
/// Search for needle in haystack and return the index of the first occurrence. /// Search for needle in haystack and return the index of the first occurrence.
/// Uses Boyer-Moore-Horspool algorithm on large inputs; linear search on small inputs. /// Uses Boyer-Moore-Horspool algorithm on large inputs; linear search on small inputs.
/// Returns null if needle is not found. /// Returns null if needle is not found.
pub fn find(comptime T: type, haystack: []const T, needle: []const T) ?usize { pub fn find(comptime T: type, haystack: []const T, needle: []const T) ?usize {
return indexOfPos(T, haystack, 0, needle); return findPos(T, haystack, 0, needle);
} }
/// Deprecated in favor of `findLastLinear`.
pub const lastIndexOfLinear = findLastLinear;
/// Find the index in a slice of a sub-slice, searching from the end backwards. /// Find the index in a slice of a sub-slice, searching from the end backwards.
/// To start looking at a different index, slice the haystack first. /// To start looking at a different index, slice the haystack first.
/// Consider using `lastIndexOf` instead of this, which will automatically use a /// Consider using `findLast` instead of this, which will automatically use a
/// more sophisticated algorithm on larger inputs. /// more sophisticated algorithm on larger inputs.
pub fn findLastLinear(comptime T: type, haystack: []const T, needle: []const T) ?usize { pub fn findLastLinear(comptime T: type, haystack: []const T, needle: []const T) ?usize {
if (needle.len > haystack.len) return null; if (needle.len > haystack.len) return null;
@ -1476,9 +1407,7 @@ pub fn findLastLinear(comptime T: type, haystack: []const T, needle: []const T)
} }
} }
pub const indexOfPosLinear = findPosLinear; /// Consider using `findPos` instead of this, which will automatically use a
/// Consider using `indexOfPos` instead of this, which will automatically use a
/// more sophisticated algorithm on larger inputs. /// more sophisticated algorithm on larger inputs.
pub fn findPosLinear(comptime T: type, haystack: []const T, start_index: usize, needle: []const T) ?usize { pub fn findPosLinear(comptime T: type, haystack: []const T, start_index: usize, needle: []const T) ?usize {
if (needle.len > haystack.len) return null; if (needle.len > haystack.len) return null;
@ -1536,19 +1465,16 @@ fn boyerMooreHorspoolPreprocess(pattern: []const u8, table: *[256]usize) void {
} }
} }
/// Deprecated in favor of `find`.
pub const lastIndexOf = findLast;
/// Find the index in a slice of a sub-slice, searching from the end backwards. /// Find the index in a slice of a sub-slice, searching from the end backwards.
/// To start looking at a different index, slice the haystack first. /// To start looking at a different index, slice the haystack first.
/// Uses the Reverse Boyer-Moore-Horspool algorithm on large inputs; /// Uses the Reverse Boyer-Moore-Horspool algorithm on large inputs;
/// `lastIndexOfLinear` on small inputs. /// `findLastLinear` on small inputs.
pub fn findLast(comptime T: type, haystack: []const T, needle: []const T) ?usize { pub fn findLast(comptime T: type, haystack: []const T, needle: []const T) ?usize {
if (needle.len > haystack.len) return null; if (needle.len > haystack.len) return null;
if (needle.len == 0) return haystack.len; if (needle.len == 0) return haystack.len;
if (!std.meta.hasUniqueRepresentation(T) or haystack.len < 52 or needle.len <= 4) if (!std.meta.hasUniqueRepresentation(T) or haystack.len < 52 or needle.len <= 4)
return lastIndexOfLinear(T, haystack, needle); return findLastLinear(T, haystack, needle);
const haystack_bytes = sliceAsBytes(haystack); const haystack_bytes = sliceAsBytes(haystack);
const needle_bytes = sliceAsBytes(needle); const needle_bytes = sliceAsBytes(needle);
@ -1569,20 +1495,17 @@ pub fn findLast(comptime T: type, haystack: []const T, needle: []const T) ?usize
return null; return null;
} }
/// Deprecated in favor of `findPos`. /// Uses Boyer-Moore-Horspool algorithm on large inputs; `findScalarPos` on small inputs.
pub const indexOfPos = findPos;
/// Uses Boyer-Moore-Horspool algorithm on large inputs; `indexOfPosLinear` on small inputs.
pub fn findPos(comptime T: type, haystack: []const T, start_index: usize, needle: []const T) ?usize { pub fn findPos(comptime T: type, haystack: []const T, start_index: usize, needle: []const T) ?usize {
if (needle.len > haystack.len) return null; if (needle.len > haystack.len) return null;
if (needle.len < 2) { if (needle.len < 2) {
if (needle.len == 0) return start_index; if (needle.len == 0) return start_index;
// indexOfScalarPos is significantly faster than indexOfPosLinear // findScalarPos is significantly faster than findPosLinear
return indexOfScalarPos(T, haystack, start_index, needle[0]); return findScalarPos(T, haystack, start_index, needle[0]);
} }
if (!std.meta.hasUniqueRepresentation(T) or haystack.len < 52 or needle.len <= 4) if (!std.meta.hasUniqueRepresentation(T) or haystack.len < 52 or needle.len <= 4)
return indexOfPosLinear(T, haystack, start_index, needle); return findPosLinear(T, haystack, start_index, needle);
const haystack_bytes = sliceAsBytes(haystack); const haystack_bytes = sliceAsBytes(haystack);
const needle_bytes = sliceAsBytes(needle); const needle_bytes = sliceAsBytes(needle);
@ -1601,61 +1524,61 @@ pub fn findPos(comptime T: type, haystack: []const T, start_index: usize, needle
return null; return null;
} }
test indexOf { test find {
try testing.expect(indexOf(u8, "one two three four five six seven eight nine ten eleven", "three four").? == 8); try testing.expect(find(u8, "one two three four five six seven eight nine ten eleven", "three four").? == 8);
try testing.expect(lastIndexOf(u8, "one two three four five six seven eight nine ten eleven", "three four").? == 8); try testing.expect(findLast(u8, "one two three four five six seven eight nine ten eleven", "three four").? == 8);
try testing.expect(indexOf(u8, "one two three four five six seven eight nine ten eleven", "two two") == null); try testing.expect(find(u8, "one two three four five six seven eight nine ten eleven", "two two") == null);
try testing.expect(lastIndexOf(u8, "one two three four five six seven eight nine ten eleven", "two two") == null); try testing.expect(findLast(u8, "one two three four five six seven eight nine ten eleven", "two two") == null);
try testing.expect(indexOf(u8, "one two three four five six seven eight nine ten", "").? == 0); try testing.expect(find(u8, "one two three four five six seven eight nine ten", "").? == 0);
try testing.expect(lastIndexOf(u8, "one two three four five six seven eight nine ten", "").? == 48); try testing.expect(findLast(u8, "one two three four five six seven eight nine ten", "").? == 48);
try testing.expect(indexOf(u8, "one two three four", "four").? == 14); try testing.expect(find(u8, "one two three four", "four").? == 14);
try testing.expect(lastIndexOf(u8, "one two three two four", "two").? == 14); try testing.expect(findLast(u8, "one two three two four", "two").? == 14);
try testing.expect(indexOf(u8, "one two three four", "gour") == null); try testing.expect(find(u8, "one two three four", "gour") == null);
try testing.expect(lastIndexOf(u8, "one two three four", "gour") == null); try testing.expect(findLast(u8, "one two three four", "gour") == null);
try testing.expect(indexOf(u8, "foo", "foo").? == 0); try testing.expect(find(u8, "foo", "foo").? == 0);
try testing.expect(lastIndexOf(u8, "foo", "foo").? == 0); try testing.expect(findLast(u8, "foo", "foo").? == 0);
try testing.expect(indexOf(u8, "foo", "fool") == null); try testing.expect(find(u8, "foo", "fool") == null);
try testing.expect(lastIndexOf(u8, "foo", "lfoo") == null); try testing.expect(findLast(u8, "foo", "lfoo") == null);
try testing.expect(lastIndexOf(u8, "foo", "fool") == null); try testing.expect(findLast(u8, "foo", "fool") == null);
try testing.expect(indexOf(u8, "foo foo", "foo").? == 0); try testing.expect(find(u8, "foo foo", "foo").? == 0);
try testing.expect(lastIndexOf(u8, "foo foo", "foo").? == 4); try testing.expect(findLast(u8, "foo foo", "foo").? == 4);
try testing.expect(lastIndexOfAny(u8, "boo, cat", "abo").? == 6); try testing.expect(findAny(u8, "boo, cat", "abo").? == 6);
try testing.expect(findScalarLast(u8, "boo", 'o').? == 2); try testing.expect(findScalarLast(u8, "boo", 'o').? == 2);
} }
test "indexOf multibyte" { test "find multibyte" {
{ {
// make haystack and needle long enough to trigger Boyer-Moore-Horspool algorithm // make haystack and needle long enough to trigger Boyer-Moore-Horspool algorithm
const haystack = [1]u16{0} ** 100 ++ [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee, 0x00ff }; const haystack = [1]u16{0} ** 100 ++ [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee, 0x00ff };
const needle = [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee }; const needle = [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee };
try testing.expectEqual(indexOfPos(u16, &haystack, 0, &needle), 100); try testing.expectEqual(findPos(u16, &haystack, 0, &needle), 100);
// check for misaligned false positives (little and big endian) // check for misaligned false positives (little and big endian)
const needleLE = [_]u16{ 0xbbbb, 0xcccc, 0xdddd, 0xeeee, 0xffff }; const needleLE = [_]u16{ 0xbbbb, 0xcccc, 0xdddd, 0xeeee, 0xffff };
try testing.expectEqual(indexOfPos(u16, &haystack, 0, &needleLE), null); try testing.expectEqual(findPos(u16, &haystack, 0, &needleLE), null);
const needleBE = [_]u16{ 0xaacc, 0xbbdd, 0xccee, 0xddff, 0xee00 }; const needleBE = [_]u16{ 0xaacc, 0xbbdd, 0xccee, 0xddff, 0xee00 };
try testing.expectEqual(indexOfPos(u16, &haystack, 0, &needleBE), null); try testing.expectEqual(findPos(u16, &haystack, 0, &needleBE), null);
} }
{ {
// make haystack and needle long enough to trigger Boyer-Moore-Horspool algorithm // make haystack and needle long enough to trigger Boyer-Moore-Horspool algorithm
const haystack = [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee, 0x00ff } ++ [1]u16{0} ** 100; const haystack = [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee, 0x00ff } ++ [1]u16{0} ** 100;
const needle = [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee }; const needle = [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee };
try testing.expectEqual(lastIndexOf(u16, &haystack, &needle), 0); try testing.expectEqual(findPos(u16, &haystack, &needle), 0);
// check for misaligned false positives (little and big endian) // check for misaligned false positives (little and big endian)
const needleLE = [_]u16{ 0xbbbb, 0xcccc, 0xdddd, 0xeeee, 0xffff }; const needleLE = [_]u16{ 0xbbbb, 0xcccc, 0xdddd, 0xeeee, 0xffff };
try testing.expectEqual(lastIndexOf(u16, &haystack, &needleLE), null); try testing.expectEqual(findPos(u16, &haystack, &needleLE), null);
const needleBE = [_]u16{ 0xaacc, 0xbbdd, 0xccee, 0xddff, 0xee00 }; const needleBE = [_]u16{ 0xaacc, 0xbbdd, 0xccee, 0xddff, 0xee00 };
try testing.expectEqual(lastIndexOf(u16, &haystack, &needleBE), null); try testing.expectEqual(findPos(u16, &haystack, &needleBE), null);
} }
} }
test "indexOfPos empty needle" { test "findPos empty needle" {
try testing.expectEqual(indexOfPos(u8, "abracadabra", 5, ""), 5); try testing.expectEqual(findPos(u8, "abracadabra", 5, ""), 5);
} }
/// Returns the number of needles inside the haystack /// Returns the number of needles inside the haystack
@ -1667,7 +1590,7 @@ pub fn count(comptime T: type, haystack: []const T, needle: []const T) usize {
var i: usize = 0; var i: usize = 0;
var found: usize = 0; var found: usize = 0;
while (indexOfPos(T, haystack, i, needle)) |idx| { while (findPos(T, haystack, i, needle)) |idx| {
i = idx + needle.len; i = idx + needle.len;
found += 1; found += 1;
} }
@ -1727,17 +1650,15 @@ test countScalar {
/// Returns true if the haystack contains expected_count or more needles /// Returns true if the haystack contains expected_count or more needles
/// needle.len must be > 0 /// needle.len must be > 0
/// does not count overlapping needles /// does not count overlapping needles
//
/// See also: `containsAtLeastScalar`
pub fn containsAtLeast(comptime T: type, haystack: []const T, expected_count: usize, needle: []const T) bool { pub fn containsAtLeast(comptime T: type, haystack: []const T, expected_count: usize, needle: []const T) bool {
if (needle.len == 1) return containsAtLeastScalar(T, haystack, expected_count, needle[0]); if (needle.len == 1) return containsAtLeastScalar2(T, haystack, needle[0], expected_count);
assert(needle.len > 0); assert(needle.len > 0);
if (expected_count == 0) return true; if (expected_count == 0) return true;
var i: usize = 0; var i: usize = 0;
var found: usize = 0; var found: usize = 0;
while (indexOfPos(T, haystack, i, needle)) |idx| { while (findPos(T, haystack, i, needle)) |idx| {
i = idx + needle.len; i = idx + needle.len;
found += 1; found += 1;
if (found == expected_count) return true; if (found == expected_count) return true;
@ -1761,11 +1682,6 @@ test containsAtLeast {
try testing.expect(!containsAtLeast(u8, " radar radar ", 3, "radar")); try testing.expect(!containsAtLeast(u8, " radar radar ", 3, "radar"));
} }
/// Deprecated in favor of `containsAtLeastScalar2`.
pub fn containsAtLeastScalar(comptime T: type, list: []const T, minimum: usize, element: T) bool {
return containsAtLeastScalar2(T, list, element, minimum);
}
/// Returns true if `element` appears at least `minimum` number of times in `list`. /// Returns true if `element` appears at least `minimum` number of times in `list`.
// //
/// Related: /// Related:
@ -3362,9 +3278,9 @@ pub fn SplitIterator(comptime T: type, comptime delimiter_type: DelimiterType) t
pub fn next(self: *Self) ?[]const T { pub fn next(self: *Self) ?[]const T {
const start = self.index orelse return null; const start = self.index orelse return null;
const end = if (switch (delimiter_type) { const end = if (switch (delimiter_type) {
.sequence => indexOfPos(T, self.buffer, start, self.delimiter), .sequence => findPos(T, self.buffer, start, self.delimiter),
.any => indexOfAnyPos(T, self.buffer, start, self.delimiter), .any => findAnyPos(T, self.buffer, start, self.delimiter),
.scalar => indexOfScalarPos(T, self.buffer, start, self.delimiter), .scalar => findScalarPos(T, self.buffer, start, self.delimiter),
}) |delim_start| blk: { }) |delim_start| blk: {
self.index = delim_start + switch (delimiter_type) { self.index = delim_start + switch (delimiter_type) {
.sequence => self.delimiter.len, .sequence => self.delimiter.len,
@ -3383,9 +3299,9 @@ pub fn SplitIterator(comptime T: type, comptime delimiter_type: DelimiterType) t
pub fn peek(self: *Self) ?[]const T { pub fn peek(self: *Self) ?[]const T {
const start = self.index orelse return null; const start = self.index orelse return null;
const end = if (switch (delimiter_type) { const end = if (switch (delimiter_type) {
.sequence => indexOfPos(T, self.buffer, start, self.delimiter), .sequence => findPos(T, self.buffer, start, self.delimiter),
.any => indexOfAnyPos(T, self.buffer, start, self.delimiter), .any => findAnyPos(T, self.buffer, start, self.delimiter),
.scalar => indexOfScalarPos(T, self.buffer, start, self.delimiter), .scalar => findScalarPos(T, self.buffer, start, self.delimiter),
}) |delim_start| delim_start else self.buffer.len; }) |delim_start| delim_start else self.buffer.len;
return self.buffer[start..end]; return self.buffer[start..end];
} }
@ -3428,8 +3344,8 @@ pub fn SplitBackwardsIterator(comptime T: type, comptime delimiter_type: Delimit
pub fn next(self: *Self) ?[]const T { pub fn next(self: *Self) ?[]const T {
const end = self.index orelse return null; const end = self.index orelse return null;
const start = if (switch (delimiter_type) { const start = if (switch (delimiter_type) {
.sequence => lastIndexOf(T, self.buffer[0..end], self.delimiter), .sequence => findLast(T, self.buffer[0..end], self.delimiter),
.any => lastIndexOfAny(T, self.buffer[0..end], self.delimiter), .any => findAny(T, self.buffer[0..end], self.delimiter),
.scalar => findScalarLast(T, self.buffer[0..end], self.delimiter), .scalar => findScalarLast(T, self.buffer[0..end], self.delimiter),
}) |delim_start| blk: { }) |delim_start| blk: {
self.index = delim_start; self.index = delim_start;
@ -3744,9 +3660,6 @@ test minMax {
} }
} }
/// Deprecated in favor of `findMin`.
pub const indexOfMin = findMin;
/// Returns the index of the smallest number in a slice. O(n). /// Returns the index of the smallest number in a slice. O(n).
/// `slice` must not be empty. /// `slice` must not be empty.
pub fn findMin(comptime T: type, slice: []const T) usize { pub fn findMin(comptime T: type, slice: []const T) usize {
@ -3768,8 +3681,6 @@ test findMin {
try testing.expectEqual(findMin(u8, "a"), 0); try testing.expectEqual(findMin(u8, "a"), 0);
} }
pub const indexOfMax = findMax;
/// Returns the index of the largest number in a slice. O(n). /// Returns the index of the largest number in a slice. O(n).
/// `slice` must not be empty. /// `slice` must not be empty.
pub fn findMax(comptime T: type, slice: []const T) usize { pub fn findMax(comptime T: type, slice: []const T) usize {
@ -3791,9 +3702,6 @@ test findMax {
try testing.expectEqual(findMax(u8, "a"), 0); try testing.expectEqual(findMax(u8, "a"), 0);
} }
/// Deprecated in favor of `findMinMax`.
pub const indexOfMinMax = findMinMax;
/// Finds the indices of the smallest and largest number in a slice. O(n). /// Finds the indices of the smallest and largest number in a slice. O(n).
/// Returns the indices of the smallest and largest numbers in that order. /// Returns the indices of the smallest and largest numbers in that order.
/// `slice` must not be empty. /// `slice` must not be empty.

View file

@ -113,7 +113,7 @@ pub fn getFdPath(fd: std.posix.fd_t, out_buffer: *[max_path_bytes]u8) std.posix.
// errno values to expect when command is F.GETPATH... // errno values to expect when command is F.GETPATH...
else => |err| return posix.unexpectedErrno(err), else => |err| return posix.unexpectedErrno(err),
} }
const len = mem.indexOfScalar(u8, out_buffer[0..], 0) orelse max_path_bytes; const len = mem.findScalar(u8, out_buffer[0..], 0) orelse max_path_bytes;
return out_buffer[0..len]; return out_buffer[0..len];
}, },
.linux, .serenity => { .linux, .serenity => {
@ -150,7 +150,7 @@ pub fn getFdPath(fd: std.posix.fd_t, out_buffer: *[max_path_bytes]u8) std.posix.
.BADF => return error.FileNotFound, .BADF => return error.FileNotFound,
else => |err| return posix.unexpectedErrno(err), else => |err| return posix.unexpectedErrno(err),
} }
const len = mem.indexOfScalar(u8, &kfile.path, 0) orelse max_path_bytes; const len = mem.findScalar(u8, &kfile.path, 0) orelse max_path_bytes;
if (len == 0) return error.NameTooLong; if (len == 0) return error.NameTooLong;
const result = out_buffer[0..len]; const result = out_buffer[0..len];
@memcpy(result, kfile.path[0..len]); @memcpy(result, kfile.path[0..len]);
@ -164,7 +164,7 @@ pub fn getFdPath(fd: std.posix.fd_t, out_buffer: *[max_path_bytes]u8) std.posix.
.RANGE => return error.NameTooLong, .RANGE => return error.NameTooLong,
else => |err| return posix.unexpectedErrno(err), else => |err| return posix.unexpectedErrno(err),
} }
const len = mem.indexOfScalar(u8, out_buffer[0..], 0) orelse max_path_bytes; const len = mem.findScalar(u8, out_buffer[0..], 0) orelse max_path_bytes;
return out_buffer[0..len]; return out_buffer[0..len];
}, },
.netbsd => { .netbsd => {
@ -178,7 +178,7 @@ pub fn getFdPath(fd: std.posix.fd_t, out_buffer: *[max_path_bytes]u8) std.posix.
.RANGE => return error.NameTooLong, .RANGE => return error.NameTooLong,
else => |err| return posix.unexpectedErrno(err), else => |err| return posix.unexpectedErrno(err),
} }
const len = mem.indexOfScalar(u8, out_buffer[0..], 0) orelse max_path_bytes; const len = mem.findScalar(u8, out_buffer[0..], 0) orelse max_path_bytes;
return out_buffer[0..len]; return out_buffer[0..len];
}, },
else => unreachable, // made unreachable by isGetFdPathSupportedOnTarget above else => unreachable, // made unreachable by isGetFdPathSupportedOnTarget above

View file

@ -4092,7 +4092,7 @@ inline fn skipKernelLessThan(required: std.SemanticVersion) !void {
const release = mem.sliceTo(&uts.release, 0); const release = mem.sliceTo(&uts.release, 0);
// Strips potential extra, as kernel version might not be semver compliant, example "6.8.9-300.fc40.x86_64" // Strips potential extra, as kernel version might not be semver compliant, example "6.8.9-300.fc40.x86_64"
const extra_index = std.mem.indexOfAny(u8, release, "-+"); const extra_index = std.mem.findAny(u8, release, "-+");
const stripped = release[0..(extra_index orelse release.len)]; const stripped = release[0..(extra_index orelse release.len)];
// Make sure the input don't rely on the extra we just stripped // Make sure the input don't rely on the extra we just stripped
try testing.expect(required.pre == null and required.build == null); try testing.expect(required.pre == null and required.build == null);

View file

@ -1412,7 +1412,7 @@ pub fn GetFinalPathNameByHandle(
}; };
} }
const file_path_begin_index = mem.indexOfPos(u16, final_path, device_prefix.len, &[_]u16{'\\'}) orelse unreachable; const file_path_begin_index = mem.findPos(u16, final_path, device_prefix.len, &[_]u16{'\\'}) orelse unreachable;
const volume_name_u16 = final_path[0..file_path_begin_index]; const volume_name_u16 = final_path[0..file_path_begin_index];
const device_name_u16 = volume_name_u16[device_prefix.len..]; const device_name_u16 = volume_name_u16[device_prefix.len..];
const file_name_u16 = final_path[file_path_begin_index..]; const file_name_u16 = final_path[file_path_begin_index..];
@ -1494,7 +1494,7 @@ pub fn GetFinalPathNameByHandle(
const total_len = drive_letter.len + file_name_u16.len; const total_len = drive_letter.len + file_name_u16.len;
// Validate that DOS does not contain any spurious nul bytes. // Validate that DOS does not contain any spurious nul bytes.
if (mem.indexOfScalar(u16, out_buffer[0..total_len], 0)) |_| { if (mem.findScalar(u16, out_buffer[0..total_len], 0)) |_| {
return error.BadPathName; return error.BadPathName;
} }
@ -1544,7 +1544,7 @@ pub fn GetFinalPathNameByHandle(
const total_len = volume_path.len + file_name_u16.len; const total_len = volume_path.len + file_name_u16.len;
// Validate that DOS does not contain any spurious nul bytes. // Validate that DOS does not contain any spurious nul bytes.
if (mem.indexOfScalar(u16, out_buffer[0..total_len], 0)) |_| { if (mem.findScalar(u16, out_buffer[0..total_len], 0)) |_| {
return error.BadPathName; return error.BadPathName;
} }

View file

@ -1772,7 +1772,7 @@ pub fn execvpeZ_expandArg0(
envp: [*:null]const ?[*:0]const u8, envp: [*:null]const ?[*:0]const u8,
) ExecveError { ) ExecveError {
const file_slice = mem.sliceTo(file, 0); const file_slice = mem.sliceTo(file, 0);
if (mem.indexOfScalar(u8, file_slice, '/') != null) return execveZ(file, child_argv, envp); if (mem.findScalar(u8, file_slice, '/') != null) return execveZ(file, child_argv, envp);
const PATH = getenvZ("PATH") orelse "/usr/local/bin:/bin/:/usr/bin"; const PATH = getenvZ("PATH") orelse "/usr/local/bin:/bin/:/usr/bin";
// Use of PATH_MAX here is valid as the path_buf will be passed // Use of PATH_MAX here is valid as the path_buf will be passed
@ -1828,7 +1828,7 @@ pub fn getenv(key: []const u8) ?[:0]const u8 {
if (native_os == .windows) { if (native_os == .windows) {
@compileError("std.posix.getenv is unavailable for Windows because environment strings are in WTF-16 format. See std.process.getEnvVarOwned for a cross-platform API or std.process.getenvW for a Windows-specific API."); @compileError("std.posix.getenv is unavailable for Windows because environment strings are in WTF-16 format. See std.process.getEnvVarOwned for a cross-platform API or std.process.getenvW for a Windows-specific API.");
} }
if (mem.indexOfScalar(u8, key, '=') != null) { if (mem.findScalar(u8, key, '=') != null) {
return null; return null;
} }
if (builtin.link_libc) { if (builtin.link_libc) {
@ -6662,7 +6662,7 @@ pub fn unexpectedErrno(err: E) UnexpectedError {
/// Used to convert a slice to a null terminated slice on the stack. /// Used to convert a slice to a null terminated slice on the stack.
pub fn toPosixPath(file_path: []const u8) error{NameTooLong}![PATH_MAX - 1:0]u8 { pub fn toPosixPath(file_path: []const u8) error{NameTooLong}![PATH_MAX - 1:0]u8 {
if (std.debug.runtime_safety) assert(mem.indexOfScalar(u8, file_path, 0) == null); if (std.debug.runtime_safety) assert(mem.findScalar(u8, file_path, 0) == null);
var path_with_null: [PATH_MAX - 1:0]u8 = undefined; var path_with_null: [PATH_MAX - 1:0]u8 = undefined;
// >= rather than > to make room for the null byte // >= rather than > to make room for the null byte
if (file_path.len >= PATH_MAX) return error.NameTooLong; if (file_path.len >= PATH_MAX) return error.NameTooLong;

View file

@ -619,7 +619,7 @@ test "siftUp in remove" {
try queue.addSlice(&.{ 0, 1, 100, 2, 3, 101, 102, 4, 5, 6, 7, 103, 104, 105, 106, 8 }); try queue.addSlice(&.{ 0, 1, 100, 2, 3, 101, 102, 4, 5, 6, 7, 103, 104, 105, 106, 8 });
_ = queue.removeIndex(std.mem.indexOfScalar(u32, queue.items[0..queue.count()], 102).?); _ = queue.removeIndex(std.mem.findScalar(u32, queue.items[0..queue.count()], 102).?);
const sorted_items = [_]u32{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 100, 101, 103, 104, 105, 106 }; const sorted_items = [_]u32{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 100, 101, 103, 104, 105, 106 };
for (sorted_items) |e| { for (sorted_items) |e| {

View file

@ -546,7 +546,7 @@ pub fn getenvW(key: [*:0]const u16) ?[:0]const u16 {
} }
const key_slice = mem.sliceTo(key, 0); const key_slice = mem.sliceTo(key, 0);
// '=' anywhere but the start makes this an invalid environment variable name // '=' anywhere but the start makes this an invalid environment variable name
if (key_slice.len > 0 and std.mem.indexOfScalar(u16, key_slice[1..], '=') != null) { if (key_slice.len > 0 and std.mem.findScalar(u16, key_slice[1..], '=') != null) {
return null; return null;
} }
const ptr = windows.peb().ProcessParameters.Environment; const ptr = windows.peb().ProcessParameters.Environment;
@ -559,7 +559,7 @@ pub fn getenvW(key: [*:0]const u16) ?[:0]const u16 {
// if it's the first character. // if it's the first character.
// https://devblogs.microsoft.com/oldnewthing/20100506-00/?p=14133 // https://devblogs.microsoft.com/oldnewthing/20100506-00/?p=14133
const equal_search_start: usize = if (key_value[0] == '=') 1 else 0; const equal_search_start: usize = if (key_value[0] == '=') 1 else 0;
const equal_index = std.mem.indexOfScalarPos(u16, key_value, equal_search_start, '=') orelse { const equal_index = std.mem.findScalarPos(u16, key_value, equal_search_start, '=') orelse {
// This is enforced by CreateProcess. // This is enforced by CreateProcess.
// If violated, CreateProcess will fail with INVALID_PARAMETER. // If violated, CreateProcess will fail with INVALID_PARAMETER.
unreachable; // must contain a = unreachable; // must contain a =

View file

@ -1782,7 +1782,7 @@ fn argvToScriptCommandLineWindows(
// //
// If the script path does not have a path separator, then we know its relative to CWD and // If the script path does not have a path separator, then we know its relative to CWD and
// we can just put `.\` in the front. // we can just put `.\` in the front.
if (mem.indexOfAny(u16, script_path, &[_]u16{ mem.nativeToLittle(u16, '\\'), mem.nativeToLittle(u16, '/') }) == null) { if (mem.findAny(u16, script_path, &[_]u16{ mem.nativeToLittle(u16, '\\'), mem.nativeToLittle(u16, '/') }) == null) {
try buf.appendSlice(".\\"); try buf.appendSlice(".\\");
} }
// Note that we don't do any escaping/mitigations for this argument, since the relevant // Note that we don't do any escaping/mitigations for this argument, since the relevant
@ -1797,7 +1797,7 @@ fn argvToScriptCommandLineWindows(
// always a mistake to include these characters in argv, so it's // always a mistake to include these characters in argv, so it's
// an error condition in order to ensure that the return of this // an error condition in order to ensure that the return of this
// function can always roundtrip through cmd.exe. // function can always roundtrip through cmd.exe.
if (std.mem.indexOfAny(u8, arg, "\x00\r\n") != null) { if (std.mem.findAny(u8, arg, "\x00\r\n") != null) {
return error.InvalidBatchScriptArg; return error.InvalidBatchScriptArg;
} }

View file

@ -71,7 +71,7 @@ pub const Diagnostics = struct {
const start_index: usize = if (path[0] == '/') 1 else 0; const start_index: usize = if (path[0] == '/') 1 else 0;
const end_index: usize = if (path[path.len - 1] == '/') path.len - 1 else path.len; const end_index: usize = if (path[path.len - 1] == '/') path.len - 1 else path.len;
const buf = path[start_index..end_index]; const buf = path[start_index..end_index];
if (std.mem.indexOfScalarPos(u8, buf, 0, '/')) |idx| { if (std.mem.findScalarPos(u8, buf, 0, '/')) |idx| {
return buf[0..idx]; return buf[0..idx];
} }
@ -569,7 +569,7 @@ pub const PaxIterator = struct {
} }
fn hasNull(str: []const u8) bool { fn hasNull(str: []const u8) bool {
return (std.mem.indexOfScalar(u8, str, 0)) != null; return (std.mem.findScalar(u8, str, 0)) != null;
} }
// Checks that each record ends with new line. // Checks that each record ends with new line.
@ -667,7 +667,7 @@ fn stripComponents(path: []const u8, count: u32) []const u8 {
var i: usize = 0; var i: usize = 0;
var c = count; var c = count;
while (c > 0) : (c -= 1) { while (c > 0) : (c -= 1) {
if (std.mem.indexOfScalarPos(u8, path, i, '/')) |pos| { if (std.mem.findScalarPos(u8, path, i, '/')) |pos| {
i = pos + 1; i = pos + 1;
} else { } else {
i = path.len; i = path.len;

View file

@ -301,7 +301,7 @@ pub const Header = extern struct {
// add as much to prefix as you can, must split at / // add as much to prefix as you can, must split at /
const prefix_remaining = max_prefix - prefix_pos; const prefix_remaining = max_prefix - prefix_pos;
if (std.mem.lastIndexOf(u8, sub_path[0..@min(prefix_remaining, sub_path.len)], &.{'/'})) |sep_pos| { if (std.mem.findLast(u8, sub_path[0..@min(prefix_remaining, sub_path.len)], &.{'/'})) |sep_pos| {
@memcpy(w.prefix[prefix_pos..][0..sep_pos], sub_path[0..sep_pos]); @memcpy(w.prefix[prefix_pos..][0..sep_pos], sub_path[0..sep_pos]);
if ((sub_path.len - sep_pos - 1) > max_name) return error.NameTooLong; if ((sub_path.len - sep_pos - 1) > max_name) return error.NameTooLong;
@memcpy(w.name[0..][0 .. sub_path.len - sep_pos - 1], sub_path[sep_pos + 1 ..]); @memcpy(w.name[0..][0 .. sub_path.len - sep_pos - 1], sub_path[sep_pos + 1 ..]);

View file

@ -643,7 +643,7 @@ pub fn tmpDir(opts: std.fs.Dir.OpenOptions) TmpDir {
} }
pub fn expectEqualStrings(expected: []const u8, actual: []const u8) !void { pub fn expectEqualStrings(expected: []const u8, actual: []const u8) !void {
if (std.mem.indexOfDiff(u8, actual, expected)) |diff_index| { if (std.mem.findDiff(u8, actual, expected)) |diff_index| {
if (@inComptime()) { if (@inComptime()) {
@compileError(std.fmt.comptimePrint("\nexpected:\n{s}\nfound:\n{s}\ndifference starts at index {d}", .{ @compileError(std.fmt.comptimePrint("\nexpected:\n{s}\nfound:\n{s}\ndifference starts at index {d}", .{
expected, actual, diff_index, expected, actual, diff_index,
@ -988,11 +988,11 @@ test "expectEqualDeep composite type" {
} }
fn printIndicatorLine(source: []const u8, indicator_index: usize) void { fn printIndicatorLine(source: []const u8, indicator_index: usize) void {
const line_begin_index = if (std.mem.lastIndexOfScalar(u8, source[0..indicator_index], '\n')) |line_begin| const line_begin_index = if (std.mem.findScalarLast(u8, source[0..indicator_index], '\n')) |line_begin|
line_begin + 1 line_begin + 1
else else
0; 0;
const line_end_index = if (std.mem.indexOfScalar(u8, source[indicator_index..], '\n')) |line_end| const line_end_index = if (std.mem.findScalar(u8, source[indicator_index..], '\n')) |line_end|
(indicator_index + line_end) (indicator_index + line_end)
else else
source.len; source.len;
@ -1008,7 +1008,7 @@ fn printIndicatorLine(source: []const u8, indicator_index: usize) void {
fn printWithVisibleNewlines(source: []const u8) void { fn printWithVisibleNewlines(source: []const u8) void {
var i: usize = 0; var i: usize = 0;
while (std.mem.indexOfScalar(u8, source[i..], '\n')) |nl| : (i += nl + 1) { while (std.mem.findScalar(u8, source[i..], '\n')) |nl| : (i += nl + 1) {
printLine(source[i..][0..nl]); printLine(source[i..][0..nl]);
} }
print("{s}␃\n", .{source[i..]}); // End of Text symbol (ETX) print("{s}␃\n", .{source[i..]}); // End of Text symbol (ETX)

View file

@ -234,7 +234,7 @@ pub fn tokenLocation(self: Ast, start_offset: ByteOffset, token_index: TokenInde
const token_start = self.tokenStart(token_index); const token_start = self.tokenStart(token_index);
// Scan to by line until we go past the token start // Scan to by line until we go past the token start
while (std.mem.indexOfScalarPos(u8, self.source, loc.line_start, '\n')) |i| { while (std.mem.findScalarPos(u8, self.source, loc.line_start, '\n')) |i| {
if (i >= token_start) { if (i >= token_start) {
break; // Went past break; // Went past
} }
@ -1312,7 +1312,7 @@ pub fn lastToken(tree: Ast, node: Node.Index) TokenIndex {
pub fn tokensOnSameLine(tree: Ast, token1: TokenIndex, token2: TokenIndex) bool { pub fn tokensOnSameLine(tree: Ast, token1: TokenIndex, token2: TokenIndex) bool {
const source = tree.source[tree.tokenStart(token1)..tree.tokenStart(token2)]; const source = tree.source[tree.tokenStart(token1)..tree.tokenStart(token2)];
return mem.indexOfScalar(u8, source, '\n') == null; return mem.findScalar(u8, source, '\n') == null;
} }
pub fn getNodeSource(tree: Ast, node: Node.Index) []const u8 { pub fn getNodeSource(tree: Ast, node: Node.Index) []const u8 {

View file

@ -1420,7 +1420,7 @@ fn renderFor(r: *Render, for_node: Ast.full.For, space: Space) Error!void {
try renderParamList(r, lparen, for_node.ast.inputs, .space); try renderParamList(r, lparen, for_node.ast.inputs, .space);
var cur = for_node.payload_token; var cur = for_node.payload_token;
const pipe = std.mem.indexOfScalarPos(std.zig.Token.Tag, token_tags, cur, .pipe).?; const pipe = std.mem.findScalarPos(std.zig.Token.Tag, token_tags, cur, .pipe).?;
if (tree.tokenTag(@intCast(pipe - 1)) == .comma) { if (tree.tokenTag(@intCast(pipe - 1)) == .comma) {
try ais.pushIndent(.normal); try ais.pushIndent(.normal);
try renderToken(r, cur - 1, .newline); // | try renderToken(r, cur - 1, .newline); // |
@ -2197,7 +2197,7 @@ fn renderArrayInit(
try renderExpression(&sub_render, expr, .none); try renderExpression(&sub_render, expr, .none);
const written = sub_expr_buffer.written(); const written = sub_expr_buffer.written();
const width = written.len - start; const width = written.len - start;
const this_contains_newline = mem.indexOfScalar(u8, written[start..], '\n') != null; const this_contains_newline = mem.findScalar(u8, written[start..], '\n') != null;
contains_newline = contains_newline or this_contains_newline; contains_newline = contains_newline or this_contains_newline;
expr_widths[i] = width; expr_widths[i] = width;
expr_newlines[i] = this_contains_newline; expr_newlines[i] = this_contains_newline;
@ -2221,7 +2221,7 @@ fn renderArrayInit(
const written = sub_expr_buffer.written(); const written = sub_expr_buffer.written();
const width = written.len - start - 2; const width = written.len - start - 2;
const this_contains_newline = mem.indexOfScalar(u8, written[start .. written.len - 1], '\n') != null; const this_contains_newline = mem.findScalar(u8, written[start .. written.len - 1], '\n') != null;
contains_newline = contains_newline or this_contains_newline; contains_newline = contains_newline or this_contains_newline;
expr_widths[i] = width; expr_widths[i] = width;
expr_newlines[i] = contains_newline; expr_newlines[i] = contains_newline;
@ -3092,7 +3092,7 @@ fn hasComment(tree: Ast, start_token: Ast.TokenIndex, end_token: Ast.TokenIndex)
const token: Ast.TokenIndex = @intCast(i); const token: Ast.TokenIndex = @intCast(i);
const start = tree.tokenStart(token) + tree.tokenSlice(token).len; const start = tree.tokenStart(token) + tree.tokenSlice(token).len;
const end = tree.tokenStart(token + 1); const end = tree.tokenStart(token + 1);
if (mem.indexOf(u8, tree.source[start..end], "//") != null) return true; if (mem.find(u8, tree.source[start..end], "//") != null) return true;
} }
return false; return false;
@ -3101,7 +3101,7 @@ fn hasComment(tree: Ast, start_token: Ast.TokenIndex, end_token: Ast.TokenIndex)
/// Returns true if there exists a multiline string literal between the start /// Returns true if there exists a multiline string literal between the start
/// of token `start_token` and the start of token `end_token`. /// of token `start_token` and the start of token `end_token`.
fn hasMultilineString(tree: Ast, start_token: Ast.TokenIndex, end_token: Ast.TokenIndex) bool { fn hasMultilineString(tree: Ast, start_token: Ast.TokenIndex, end_token: Ast.TokenIndex) bool {
return std.mem.indexOfScalar( return std.mem.findScalar(
Token.Tag, Token.Tag,
tree.tokens.items(.tag)[start_token..end_token], tree.tokens.items(.tag)[start_token..end_token],
.multiline_string_literal_line, .multiline_string_literal_line,
@ -3115,11 +3115,11 @@ fn renderComments(r: *Render, start: usize, end: usize) Error!bool {
const ais = r.ais; const ais = r.ais;
var index: usize = start; var index: usize = start;
while (mem.indexOf(u8, tree.source[index..end], "//")) |offset| { while (mem.find(u8, tree.source[index..end], "//")) |offset| {
const comment_start = index + offset; const comment_start = index + offset;
// If there is no newline, the comment ends with EOF // If there is no newline, the comment ends with EOF
const newline_index = mem.indexOfScalar(u8, tree.source[comment_start..end], '\n'); const newline_index = mem.findScalar(u8, tree.source[comment_start..end], '\n');
const newline = if (newline_index) |i| comment_start + i else null; const newline = if (newline_index) |i| comment_start + i else null;
const untrimmed_comment = tree.source[comment_start .. newline orelse tree.source.len]; const untrimmed_comment = tree.source[comment_start .. newline orelse tree.source.len];
@ -3131,7 +3131,7 @@ fn renderComments(r: *Render, start: usize, end: usize) Error!bool {
// Leave up to one empty line before the first comment // Leave up to one empty line before the first comment
try ais.insertNewline(); try ais.insertNewline();
try ais.insertNewline(); try ais.insertNewline();
} else if (mem.indexOfScalar(u8, tree.source[index..comment_start], '\n') != null) { } else if (mem.findScalar(u8, tree.source[index..comment_start], '\n') != null) {
// Respect the newline directly before the comment. // Respect the newline directly before the comment.
// Note: This allows an empty line between comments // Note: This allows an empty line between comments
try ais.insertNewline(); try ais.insertNewline();
@ -3190,7 +3190,7 @@ fn renderExtraNewlineToken(r: *Render, token_index: Ast.TokenIndex) Error!void {
// If there is a immediately preceding comment or doc_comment, // If there is a immediately preceding comment or doc_comment,
// skip it because required extra newline has already been rendered. // skip it because required extra newline has already been rendered.
if (mem.indexOf(u8, tree.source[prev_token_end..token_start], "//") != null) return; if (mem.find(u8, tree.source[prev_token_end..token_start], "//") != null) return;
if (tree.isTokenPrecededByTags(token_index, &.{.doc_comment})) return; if (tree.isTokenPrecededByTags(token_index, &.{.doc_comment})) return;
// Iterate backwards to the end of the previous token, stopping if a // Iterate backwards to the end of the previous token, stopping if a

View file

@ -4131,7 +4131,7 @@ fn fnDecl(
const lib_name = if (fn_proto.lib_name) |lib_name_token| blk: { const lib_name = if (fn_proto.lib_name) |lib_name_token| blk: {
const lib_name_str = try astgen.strLitAsString(lib_name_token); const lib_name_str = try astgen.strLitAsString(lib_name_token);
const lib_name_slice = astgen.string_bytes.items[@intFromEnum(lib_name_str.index)..][0..lib_name_str.len]; const lib_name_slice = astgen.string_bytes.items[@intFromEnum(lib_name_str.index)..][0..lib_name_str.len];
if (mem.indexOfScalar(u8, lib_name_slice, 0) != null) { if (mem.findScalar(u8, lib_name_slice, 0) != null) {
return astgen.failTok(lib_name_token, "library name cannot contain null bytes", .{}); return astgen.failTok(lib_name_token, "library name cannot contain null bytes", .{});
} else if (lib_name_str.len == 0) { } else if (lib_name_str.len == 0) {
return astgen.failTok(lib_name_token, "library name cannot be empty", .{}); return astgen.failTok(lib_name_token, "library name cannot be empty", .{});
@ -4547,7 +4547,7 @@ fn globalVarDecl(
const lib_name = if (var_decl.lib_name) |lib_name_token| blk: { const lib_name = if (var_decl.lib_name) |lib_name_token| blk: {
const lib_name_str = try astgen.strLitAsString(lib_name_token); const lib_name_str = try astgen.strLitAsString(lib_name_token);
const lib_name_slice = astgen.string_bytes.items[@intFromEnum(lib_name_str.index)..][0..lib_name_str.len]; const lib_name_slice = astgen.string_bytes.items[@intFromEnum(lib_name_str.index)..][0..lib_name_str.len];
if (mem.indexOfScalar(u8, lib_name_slice, 0) != null) { if (mem.findScalar(u8, lib_name_slice, 0) != null) {
return astgen.failTok(lib_name_token, "library name cannot contain null bytes", .{}); return astgen.failTok(lib_name_token, "library name cannot contain null bytes", .{});
} else if (lib_name_str.len == 0) { } else if (lib_name_str.len == 0) {
return astgen.failTok(lib_name_token, "library name cannot be empty", .{}); return astgen.failTok(lib_name_token, "library name cannot be empty", .{});
@ -4769,7 +4769,7 @@ fn testDecl(
.string_literal => name: { .string_literal => name: {
const name = try astgen.strLitAsString(test_name_token); const name = try astgen.strLitAsString(test_name_token);
const slice = astgen.string_bytes.items[@intFromEnum(name.index)..][0..name.len]; const slice = astgen.string_bytes.items[@intFromEnum(name.index)..][0..name.len];
if (mem.indexOfScalar(u8, slice, 0) != null) { if (mem.findScalar(u8, slice, 0) != null) {
return astgen.failTok(test_name_token, "test name cannot contain null bytes", .{}); return astgen.failTok(test_name_token, "test name cannot contain null bytes", .{});
} else if (slice.len == 0) { } else if (slice.len == 0) {
return astgen.failTok(test_name_token, "empty test name must be omitted", .{}); return astgen.failTok(test_name_token, "empty test name must be omitted", .{});
@ -8779,7 +8779,7 @@ fn numberLiteral(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index, source_node:
} }
fn failWithNumberError(astgen: *AstGen, err: std.zig.number_literal.Error, token: Ast.TokenIndex, bytes: []const u8) InnerError { fn failWithNumberError(astgen: *AstGen, err: std.zig.number_literal.Error, token: Ast.TokenIndex, bytes: []const u8) InnerError {
const is_float = std.mem.indexOfScalar(u8, bytes, '.') != null; const is_float = std.mem.findScalar(u8, bytes, '.') != null;
switch (err) { switch (err) {
.leading_zero => if (is_float) { .leading_zero => if (is_float) {
return astgen.failTok(token, "number '{s}' has leading zero", .{bytes}); return astgen.failTok(token, "number '{s}' has leading zero", .{bytes});
@ -9272,7 +9272,7 @@ fn builtinCall(
const str_lit_token = tree.nodeMainToken(operand_node); const str_lit_token = tree.nodeMainToken(operand_node);
const str = try astgen.strLitAsString(str_lit_token); const str = try astgen.strLitAsString(str_lit_token);
const str_slice = astgen.string_bytes.items[@intFromEnum(str.index)..][0..str.len]; const str_slice = astgen.string_bytes.items[@intFromEnum(str.index)..][0..str.len];
if (mem.indexOfScalar(u8, str_slice, 0) != null) { if (mem.findScalar(u8, str_slice, 0) != null) {
return astgen.failTok(str_lit_token, "import path cannot contain null bytes", .{}); return astgen.failTok(str_lit_token, "import path cannot contain null bytes", .{});
} else if (str.len == 0) { } else if (str.len == 0) {
return astgen.failTok(str_lit_token, "import path cannot be empty", .{}); return astgen.failTok(str_lit_token, "import path cannot be empty", .{});
@ -11418,7 +11418,7 @@ fn identifierTokenString(astgen: *AstGen, token: Ast.TokenIndex) InnerError![]co
var buf: ArrayList(u8) = .empty; var buf: ArrayList(u8) = .empty;
defer buf.deinit(astgen.gpa); defer buf.deinit(astgen.gpa);
try astgen.parseStrLit(token, &buf, ident_name, 1); try astgen.parseStrLit(token, &buf, ident_name, 1);
if (mem.indexOfScalar(u8, buf.items, 0) != null) { if (mem.findScalar(u8, buf.items, 0) != null) {
return astgen.failTok(token, "identifier cannot contain null bytes", .{}); return astgen.failTok(token, "identifier cannot contain null bytes", .{});
} else if (buf.items.len == 0) { } else if (buf.items.len == 0) {
return astgen.failTok(token, "identifier cannot be empty", .{}); return astgen.failTok(token, "identifier cannot be empty", .{});
@ -11444,7 +11444,7 @@ fn appendIdentStr(
const start = buf.items.len; const start = buf.items.len;
try astgen.parseStrLit(token, buf, ident_name, 1); try astgen.parseStrLit(token, buf, ident_name, 1);
const slice = buf.items[start..]; const slice = buf.items[start..];
if (mem.indexOfScalar(u8, slice, 0) != null) { if (mem.findScalar(u8, slice, 0) != null) {
return astgen.failTok(token, "identifier cannot contain null bytes", .{}); return astgen.failTok(token, "identifier cannot contain null bytes", .{});
} else if (slice.len == 0) { } else if (slice.len == 0) {
return astgen.failTok(token, "identifier cannot be empty", .{}); return astgen.failTok(token, "identifier cannot be empty", .{});
@ -11701,7 +11701,7 @@ fn strLitAsString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !IndexSlice {
const token_bytes = astgen.tree.tokenSlice(str_lit_token); const token_bytes = astgen.tree.tokenSlice(str_lit_token);
try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0); try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0);
const key: []const u8 = string_bytes.items[str_index..]; const key: []const u8 = string_bytes.items[str_index..];
if (std.mem.indexOfScalar(u8, key, 0)) |_| return .{ if (std.mem.findScalar(u8, key, 0)) |_| return .{
.index = @enumFromInt(str_index), .index = @enumFromInt(str_index),
.len = @intCast(key.len), .len = @intCast(key.len),
}; };

View file

@ -3686,7 +3686,7 @@ fn eatDocComments(p: *Parse) Allocator.Error!?TokenIndex {
} }
fn tokensOnSameLine(p: *Parse, token1: TokenIndex, token2: TokenIndex) bool { fn tokensOnSameLine(p: *Parse, token1: TokenIndex, token2: TokenIndex) bool {
return std.mem.indexOfScalar(u8, p.source[p.tokenStart(token1)..p.tokenStart(token2)], '\n') == null; return std.mem.findScalar(u8, p.source[p.tokenStart(token1)..p.tokenStart(token2)], '\n') == null;
} }
fn eatToken(p: *Parse, tag: Token.Tag) ?TokenIndex { fn eatToken(p: *Parse, tag: Token.Tag) ?TokenIndex {

View file

@ -109,7 +109,7 @@ fn iterateAndFilterByVersion(
.build = "", .build = "",
}; };
const suffix = entry.name[prefix.len..]; const suffix = entry.name[prefix.len..];
const underscore = std.mem.indexOfScalar(u8, entry.name, '_'); const underscore = std.mem.findScalar(u8, entry.name, '_');
var num_it = std.mem.splitScalar(u8, suffix[0 .. underscore orelse suffix.len], '.'); var num_it = std.mem.splitScalar(u8, suffix[0 .. underscore orelse suffix.len], '.');
version.nums[0] = Version.parseNum(num_it.first()) orelse continue; version.nums[0] = Version.parseNum(num_it.first()) orelse continue;
for (version.nums[1..]) |*num| for (version.nums[1..]) |*num|

View file

@ -120,7 +120,7 @@ pub const NullTerminatedString = enum(u32) {
/// Given an index into `string_bytes` returns the null-terminated string found there. /// Given an index into `string_bytes` returns the null-terminated string found there.
pub fn nullTerminatedString(code: Zir, index: NullTerminatedString) [:0]const u8 { pub fn nullTerminatedString(code: Zir, index: NullTerminatedString) [:0]const u8 {
const slice = code.string_bytes[@intFromEnum(index)..]; const slice = code.string_bytes[@intFromEnum(index)..];
return slice[0..std.mem.indexOfScalar(u8, slice, 0).? :0]; return slice[0..std.mem.findScalar(u8, slice, 0).? :0];
} }
pub fn refSlice(code: Zir, start: usize, len: usize) []Inst.Ref { pub fn refSlice(code: Zir, start: usize, len: usize) []Inst.Ref {

View file

@ -221,7 +221,7 @@ pub const Node = union(enum) {
pub const NullTerminatedString = enum(u32) { pub const NullTerminatedString = enum(u32) {
_, _,
pub fn get(nts: NullTerminatedString, zoir: Zoir) [:0]const u8 { pub fn get(nts: NullTerminatedString, zoir: Zoir) [:0]const u8 {
const idx = std.mem.indexOfScalar(u8, zoir.string_bytes[@intFromEnum(nts)..], 0).?; const idx = std.mem.findScalar(u8, zoir.string_bytes[@intFromEnum(nts)..], 0).?;
return zoir.string_bytes[@intFromEnum(nts)..][0..idx :0]; return zoir.string_bytes[@intFromEnum(nts)..][0..idx :0];
} }
}; };

View file

@ -487,7 +487,7 @@ fn appendIdentStr(zg: *ZonGen, ident_token: Ast.TokenIndex) error{ OutOfMemory,
} }
const slice = zg.string_bytes.items[start..]; const slice = zg.string_bytes.items[start..];
if (mem.indexOfScalar(u8, slice, 0) != null) { if (mem.findScalar(u8, slice, 0) != null) {
try zg.addErrorTok(ident_token, "identifier cannot contain null bytes", .{}); try zg.addErrorTok(ident_token, "identifier cannot contain null bytes", .{});
return error.BadString; return error.BadString;
} else if (slice.len == 0) { } else if (slice.len == 0) {
@ -586,7 +586,7 @@ fn strLitAsString(zg: *ZonGen, str_node: Ast.Node.Index) error{ OutOfMemory, Bad
}, },
} }
const key: []const u8 = string_bytes.items[str_index..]; const key: []const u8 = string_bytes.items[str_index..];
if (std.mem.indexOfScalar(u8, key, 0) != null) return .{ .slice = .{ if (std.mem.findScalar(u8, key, 0) != null) return .{ .slice = .{
.start = str_index, .start = str_index,
.len = @intCast(key.len), .len = @intCast(key.len),
} }; } };
@ -785,7 +785,7 @@ fn lowerStrLitError(
} }
fn lowerNumberError(zg: *ZonGen, err: std.zig.number_literal.Error, token: Ast.TokenIndex, bytes: []const u8) Allocator.Error!void { fn lowerNumberError(zg: *ZonGen, err: std.zig.number_literal.Error, token: Ast.TokenIndex, bytes: []const u8) Allocator.Error!void {
const is_float = std.mem.indexOfScalar(u8, bytes, '.') != null; const is_float = std.mem.findScalar(u8, bytes, '.') != null;
switch (err) { switch (err) {
.leading_zero => if (is_float) { .leading_zero => if (is_float) {
try zg.addErrorTok(token, "number '{s}' has leading zero", .{bytes}); try zg.addErrorTok(token, "number '{s}' has leading zero", .{bytes});

View file

@ -115,7 +115,7 @@ fn PromoteIntLiteralReturnType(comptime SuffixType: type, comptime number: compt
else else
&signed_oct_hex; &signed_oct_hex;
var pos = std.mem.indexOfScalar(type, list, SuffixType).?; var pos = std.mem.findScalar(type, list, SuffixType).?;
while (pos < list.len) : (pos += 1) { while (pos < list.len) : (pos += 1) {
if (number >= std.math.minInt(list[pos]) and number <= std.math.maxInt(list[pos])) { if (number >= std.math.minInt(list[pos]) and number <= std.math.maxInt(list[pos])) {
return list[pos]; return list[pos];

View file

@ -9000,7 +9000,7 @@ pub fn attrs(self: *Builder, attributes: []Attribute.Index) Allocator.Error!Attr
pub fn fnAttrs(self: *Builder, fn_attributes: []const Attributes) Allocator.Error!FunctionAttributes { pub fn fnAttrs(self: *Builder, fn_attributes: []const Attributes) Allocator.Error!FunctionAttributes {
try self.function_attributes_set.ensureUnusedCapacity(self.gpa, 1); try self.function_attributes_set.ensureUnusedCapacity(self.gpa, 1);
const function_attributes: FunctionAttributes = @enumFromInt(try self.attrGeneric(@ptrCast( const function_attributes: FunctionAttributes = @enumFromInt(try self.attrGeneric(@ptrCast(
fn_attributes[0..if (std.mem.lastIndexOfNone(Attributes, fn_attributes, &.{.none})) |last| fn_attributes[0..if (std.mem.findLastNone(Attributes, fn_attributes, &.{.none})) |last|
last + 1 last + 1
else else
0], 0],

View file

@ -26,7 +26,7 @@ pub fn BitcodeWriter(comptime types: []const type) type {
widths: [types.len]u16, widths: [types.len]u16,
pub fn getTypeWidth(self: BcWriter, comptime Type: type) u16 { pub fn getTypeWidth(self: BcWriter, comptime Type: type) u16 {
return self.widths[comptime std.mem.indexOfScalar(type, types, Type).?]; return self.widths[comptime std.mem.findScalar(type, types, Type).?];
} }
pub fn init(allocator: std.mem.Allocator, widths: [types.len]u16) BcWriter { pub fn init(allocator: std.mem.Allocator, widths: [types.len]u16) BcWriter {

View file

@ -1076,7 +1076,7 @@ fn detectAbiAndDynamicLinker(io: Io, cpu: Target.Cpu, os: Target.Os, query: Targ
const path_maybe_args = mem.trimEnd(u8, trimmed_line, "\n"); const path_maybe_args = mem.trimEnd(u8, trimmed_line, "\n");
// Separate path and args. // Separate path and args.
const path_end = mem.indexOfAny(u8, path_maybe_args, &.{ ' ', '\t', 0 }) orelse path_maybe_args.len; const path_end = mem.findAny(u8, path_maybe_args, &.{ ' ', '\t', 0 }) orelse path_maybe_args.len;
const unvalidated_path = path_maybe_args[0..path_end]; const unvalidated_path = path_maybe_args[0..path_end];
file_name = if (fs.path.isAbsolute(unvalidated_path)) unvalidated_path else return error.RelativeShebang; file_name = if (fs.path.isAbsolute(unvalidated_path)) unvalidated_path else return error.RelativeShebang;
continue; continue;

View file

@ -35,7 +35,7 @@ const SparcCpuinfoImpl = struct {
fn line_hook(self: *SparcCpuinfoImpl, key: []const u8, value: []const u8) !bool { fn line_hook(self: *SparcCpuinfoImpl, key: []const u8, value: []const u8) !bool {
if (mem.eql(u8, key, "cpu")) { if (mem.eql(u8, key, "cpu")) {
inline for (cpu_names) |pair| { inline for (cpu_names) |pair| {
if (mem.indexOfPos(u8, value, 0, pair[0]) != null) { if (mem.findPos(u8, value, 0, pair[0]) != null) {
self.model = pair[1]; self.model = pair[1];
break; break;
} }
@ -147,7 +147,7 @@ const PowerpcCpuinfoImpl = struct {
// The model name is often followed by a comma or space and extra // The model name is often followed by a comma or space and extra
// info. // info.
inline for (cpu_names) |pair| { inline for (cpu_names) |pair| {
const end_index = mem.indexOfAny(u8, value, ", ") orelse value.len; const end_index = mem.findAny(u8, value, ", ") orelse value.len;
if (mem.eql(u8, value[0..end_index], pair[0])) { if (mem.eql(u8, value[0..end_index], pair[0])) {
self.model = pair[1]; self.model = pair[1];
break; break;
@ -318,7 +318,7 @@ const ArmCpuinfoImpl = struct {
self.have_fields += 1; self.have_fields += 1;
} else if (mem.eql(u8, key, "model name")) { } else if (mem.eql(u8, key, "model name")) {
// ARMv6 cores report "CPU architecture" equal to 7. // ARMv6 cores report "CPU architecture" equal to 7.
if (mem.indexOf(u8, value, "(v6l)")) |_| { if (mem.find(u8, value, "(v6l)")) |_| {
info.is_really_v6 = true; info.is_really_v6 = true;
} }
} else if (mem.eql(u8, key, "CPU revision")) { } else if (mem.eql(u8, key, "CPU revision")) {
@ -427,7 +427,7 @@ fn CpuinfoParser(comptime impl: anytype) type {
fn parse(arch: Target.Cpu.Arch, reader: *Io.Reader) !?Target.Cpu { fn parse(arch: Target.Cpu.Arch, reader: *Io.Reader) !?Target.Cpu {
var obj: impl = .{}; var obj: impl = .{};
while (try reader.takeDelimiter('\n')) |line| { while (try reader.takeDelimiter('\n')) |line| {
const colon_pos = mem.indexOfScalar(u8, line, ':') orelse continue; const colon_pos = mem.findScalar(u8, line, ':') orelse continue;
const key = mem.trimEnd(u8, line[0..colon_pos], " \t"); const key = mem.trimEnd(u8, line[0..colon_pos], " \t");
const value = mem.trimStart(u8, line[colon_pos + 1 ..], " \t"); const value = mem.trimStart(u8, line[colon_pos + 1 ..], " \t");
if (!try obj.line_hook(key, value)) break; if (!try obj.line_hook(key, value)) break;

View file

@ -107,7 +107,7 @@ pub const EndRecord = extern struct {
/// TODO audit this logic /// TODO audit this logic
pub fn findBuffer(buffer: []const u8) FindBufferError!EndRecord { pub fn findBuffer(buffer: []const u8) FindBufferError!EndRecord {
const pos = std.mem.lastIndexOf(u8, buffer, &end_record_sig) orelse return error.ZipNoEndRecord; const pos = std.mem.findLast(u8, buffer, &end_record_sig) orelse return error.ZipNoEndRecord;
if (pos + @sizeOf(EndRecord) > buffer.len) return error.EndOfStream; if (pos + @sizeOf(EndRecord) > buffer.len) return error.EndOfStream;
const record_ptr: *EndRecord = @ptrCast(buffer[pos..][0..@sizeOf(EndRecord)]); const record_ptr: *EndRecord = @ptrCast(buffer[pos..][0..@sizeOf(EndRecord)]);
var record = record_ptr.*; var record = record_ptr.*;
@ -539,7 +539,7 @@ pub const Iterator = struct {
if (options.allow_backslashes) { if (options.allow_backslashes) {
std.mem.replaceScalar(u8, filename, '\\', '/'); std.mem.replaceScalar(u8, filename, '\\', '/');
} else { } else {
if (std.mem.indexOfScalar(u8, filename, '\\')) |_| if (std.mem.findScalar(u8, filename, '\\')) |_|
return error.ZipFilenameHasBackslash; return error.ZipFilenameHasBackslash;
} }
@ -626,7 +626,7 @@ pub const Diagnostics = struct {
if (!self.saw_first_file) { if (!self.saw_first_file) {
self.saw_first_file = true; self.saw_first_file = true;
std.debug.assert(self.root_dir.len == 0); std.debug.assert(self.root_dir.len == 0);
const root_len = std.mem.indexOfScalar(u8, name, '/') orelse return; const root_len = std.mem.findScalar(u8, name, '/') orelse return;
std.debug.assert(root_len > 0); std.debug.assert(root_len > 0);
self.root_dir = try self.allocator.dupe(u8, name[0..root_len]); self.root_dir = try self.allocator.dupe(u8, name[0..root_len]);
} else if (self.root_dir.len > 0) { } else if (self.root_dir.len > 0) {

View file

@ -1831,7 +1831,7 @@ pub const NullTerminatedString = enum(u32) {
pub fn toSlice(nts: NullTerminatedString, air: Air) [:0]const u8 { pub fn toSlice(nts: NullTerminatedString, air: Air) [:0]const u8 {
if (nts == .none) return ""; if (nts == .none) return "";
const bytes = std.mem.sliceAsBytes(air.extra.items[@intFromEnum(nts)..]); const bytes = std.mem.sliceAsBytes(air.extra.items[@intFromEnum(nts)..]);
return bytes[0..std.mem.indexOfScalar(u8, bytes, 0).? :0]; return bytes[0..std.mem.findScalar(u8, bytes, 0).? :0];
} }
}; };

View file

@ -66,7 +66,7 @@ fn runThread(ids: *IncrementalDebugServer) void {
else => @panic("IncrementalDebugServer: failed to read command"), else => @panic("IncrementalDebugServer: failed to read command"),
}; };
const cmd_and_arg = std.mem.trim(u8, untrimmed, " \t\r\n"); const cmd_and_arg = std.mem.trim(u8, untrimmed, " \t\r\n");
const cmd: []const u8, const arg: []const u8 = if (std.mem.indexOfScalar(u8, cmd_and_arg, ' ')) |i| const cmd: []const u8, const arg: []const u8 = if (std.mem.findScalar(u8, cmd_and_arg, ' ')) |i|
.{ cmd_and_arg[0..i], cmd_and_arg[i + 1 ..] } .{ cmd_and_arg[0..i], cmd_and_arg[i + 1 ..] }
else else
.{ cmd_and_arg, "" }; .{ cmd_and_arg, "" };
@ -174,7 +174,7 @@ fn handleCommand(zcu: *Zcu, w: *std.Io.Writer, cmd_str: []const u8, arg_str: []c
const ty: Type = .fromInterned(type_ip_index); const ty: Type = .fromInterned(type_ip_index);
const ty_name = ty.containerTypeName(ip).toSlice(ip); const ty_name = ty.containerTypeName(ip).toSlice(ip);
const success = switch (@as(u2, @intFromBool(anchor_start)) << 1 | @intFromBool(anchor_end)) { const success = switch (@as(u2, @intFromBool(anchor_start)) << 1 | @intFromBool(anchor_end)) {
0b00 => std.mem.indexOf(u8, ty_name, query) != null, 0b00 => std.mem.find(u8, ty_name, query) != null,
0b01 => std.mem.endsWith(u8, ty_name, query), 0b01 => std.mem.endsWith(u8, ty_name, query),
0b10 => std.mem.startsWith(u8, ty_name, query), 0b10 => std.mem.startsWith(u8, ty_name, query),
0b11 => std.mem.eql(u8, ty_name, query), 0b11 => std.mem.eql(u8, ty_name, query),
@ -195,7 +195,7 @@ fn handleCommand(zcu: *Zcu, w: *std.Io.Writer, cmd_str: []const u8, arg_str: []c
const nav = ip.getNav(nav_index); const nav = ip.getNav(nav_index);
const nav_fqn = nav.fqn.toSlice(ip); const nav_fqn = nav.fqn.toSlice(ip);
const success = switch (@as(u2, @intFromBool(anchor_start)) << 1 | @intFromBool(anchor_end)) { const success = switch (@as(u2, @intFromBool(anchor_start)) << 1 | @intFromBool(anchor_end)) {
0b00 => std.mem.indexOf(u8, nav_fqn, query) != null, 0b00 => std.mem.find(u8, nav_fqn, query) != null,
0b01 => std.mem.endsWith(u8, nav_fqn, query), 0b01 => std.mem.endsWith(u8, nav_fqn, query),
0b10 => std.mem.startsWith(u8, nav_fqn, query), 0b10 => std.mem.startsWith(u8, nav_fqn, query),
0b11 => std.mem.eql(u8, nav_fqn, query), 0b11 => std.mem.eql(u8, nav_fqn, query),
@ -299,7 +299,7 @@ fn parseIndex(str: []const u8) ?u32 {
return std.fmt.parseInt(u32, str, 10) catch null; return std.fmt.parseInt(u32, str, 10) catch null;
} }
fn parseAnalUnit(str: []const u8) ?AnalUnit { fn parseAnalUnit(str: []const u8) ?AnalUnit {
const split_idx = std.mem.indexOfScalar(u8, str, ' ') orelse return null; const split_idx = std.mem.findScalar(u8, str, ' ') orelse return null;
const kind = str[0..split_idx]; const kind = str[0..split_idx];
const idx_str = str[split_idx + 1 ..]; const idx_str = str[split_idx + 1 ..];
if (std.mem.eql(u8, kind, "comptime")) { if (std.mem.eql(u8, kind, "comptime")) {

View file

@ -1760,7 +1760,7 @@ pub const String = enum(u32) {
} }
pub fn toNullTerminatedString(string: String, len: u64, ip: *const InternPool) NullTerminatedString { pub fn toNullTerminatedString(string: String, len: u64, ip: *const InternPool) NullTerminatedString {
assert(std.mem.indexOfScalar(u8, string.toSlice(len, ip), 0) == null); assert(std.mem.findScalar(u8, string.toSlice(len, ip), 0) == null);
assert(string.at(len, ip) == 0); assert(string.at(len, ip) == 0);
return @enumFromInt(@intFromEnum(string)); return @enumFromInt(@intFromEnum(string));
} }
@ -1887,7 +1887,7 @@ pub const NullTerminatedString = enum(u32) {
pub fn toUnsigned(string: NullTerminatedString, ip: *const InternPool) ?u32 { pub fn toUnsigned(string: NullTerminatedString, ip: *const InternPool) ?u32 {
const slice = string.toSlice(ip); const slice = string.toSlice(ip);
if (slice.len > 1 and slice[0] == '0') return null; if (slice.len > 1 and slice[0] == '0') return null;
if (std.mem.indexOfScalar(u8, slice, '_')) |_| return null; if (std.mem.findScalar(u8, slice, '_')) |_| return null;
return std.fmt.parseUnsigned(u32, slice, 10) catch null; return std.fmt.parseUnsigned(u32, slice, 10) catch null;
} }
@ -11844,7 +11844,7 @@ pub fn getOrPutTrailingString(
.tid = tid, .tid = tid,
.index = strings.mutate.len - 1, .index = strings.mutate.len - 1,
}).wrap(ip))); }).wrap(ip)));
const has_embedded_null = std.mem.indexOfScalar(u8, key, 0) != null; const has_embedded_null = std.mem.findScalar(u8, key, 0) != null;
switch (embedded_nulls) { switch (embedded_nulls) {
.no_embedded_nulls => assert(!has_embedded_null), .no_embedded_nulls => assert(!has_embedded_null),
.maybe_embedded_nulls => if (has_embedded_null) { .maybe_embedded_nulls => if (has_embedded_null) {

View file

@ -83,7 +83,7 @@ pub const Hash = struct {
const their_multihash_func = std.fmt.parseInt(u8, h.bytes[0..2], 16) catch return false; const their_multihash_func = std.fmt.parseInt(u8, h.bytes[0..2], 16) catch return false;
if (@as(MultihashFunction, @enumFromInt(their_multihash_func)) != multihash_function) return false; if (@as(MultihashFunction, @enumFromInt(their_multihash_func)) != multihash_function) return false;
if (h.toSlice().len != multihash_hex_digest_len) return false; if (h.toSlice().len != multihash_hex_digest_len) return false;
return std.mem.indexOfScalar(u8, &h.bytes, '-') == null; return std.mem.findScalar(u8, &h.bytes, '-') == null;
} }
test isOld { test isOld {

View file

@ -359,7 +359,7 @@ pub fn run(f: *Fetch) RunError!void {
const parent_sub_path = f.parent_package_root.sub_path; const parent_sub_path = f.parent_package_root.sub_path;
const end = find_end: { const end = find_end: {
if (parent_sub_path.len > prefix_len) { if (parent_sub_path.len > prefix_len) {
// Use `isSep` instead of `indexOfScalarPos` to account for // Use `isSep` instead of `findScalarPos` to account for
// Windows accepting both `\` and `/` as path separators. // Windows accepting both `\` and `/` as path separators.
for (parent_sub_path[prefix_len..], prefix_len..) |c, i| { for (parent_sub_path[prefix_len..], prefix_len..) |c, i| {
if (std.fs.path.isSep(c)) break :find_end i; if (std.fs.path.isSep(c)) break :find_end i;
@ -962,7 +962,7 @@ const FileType = enum {
if (cd_header[value_start] != '=') return null; if (cd_header[value_start] != '=') return null;
value_start += 1; value_start += 1;
var value_end = std.mem.indexOfPos(u8, cd_header, value_start, ";") orelse cd_header.len; var value_end = std.mem.findPos(u8, cd_header, value_start, ";") orelse cd_header.len;
if (cd_header[value_end - 1] == '\"') { if (cd_header[value_end - 1] == '\"') {
value_end -= 1; value_end -= 1;
} }
@ -1142,7 +1142,7 @@ fn unpackResource(
return f.fail(f.location_tok, try eb.addString("missing 'Content-Type' header")); return f.fail(f.location_tok, try eb.addString("missing 'Content-Type' header"));
// Extract the MIME type, ignoring charset and boundary directives // Extract the MIME type, ignoring charset and boundary directives
const mime_type_end = std.mem.indexOf(u8, content_type, ";") orelse content_type.len; const mime_type_end = std.mem.find(u8, content_type, ";") orelse content_type.len;
const mime_type = content_type[0..mime_type_end]; const mime_type = content_type[0..mime_type_end];
if (ascii.eqlIgnoreCase(mime_type, "application/x-tar")) if (ascii.eqlIgnoreCase(mime_type, "application/x-tar"))

View file

@ -334,7 +334,7 @@ pub const Repository = struct {
fn next(iterator: *TreeIterator) !?Entry { fn next(iterator: *TreeIterator) !?Entry {
if (iterator.pos == iterator.data.len) return null; if (iterator.pos == iterator.data.len) return null;
const mode_end = mem.indexOfScalarPos(u8, iterator.data, iterator.pos, ' ') orelse return error.InvalidTree; const mode_end = mem.findScalarPos(u8, iterator.data, iterator.pos, ' ') orelse return error.InvalidTree;
const mode: packed struct { const mode: packed struct {
permission: u9, permission: u9,
unused: u3, unused: u3,
@ -349,7 +349,7 @@ pub const Repository = struct {
}; };
iterator.pos = mode_end + 1; iterator.pos = mode_end + 1;
const name_end = mem.indexOfScalarPos(u8, iterator.data, iterator.pos, 0) orelse return error.InvalidTree; const name_end = mem.findScalarPos(u8, iterator.data, iterator.pos, 0) orelse return error.InvalidTree;
const name = iterator.data[iterator.pos..name_end :0]; const name = iterator.data[iterator.pos..name_end :0];
iterator.pos = name_end + 1; iterator.pos = name_end + 1;
@ -821,7 +821,7 @@ pub const Session = struct {
value: ?[]const u8 = null, value: ?[]const u8 = null,
fn parse(data: []const u8) Capability { fn parse(data: []const u8) Capability {
return if (mem.indexOfScalar(u8, data, '=')) |separator_pos| return if (mem.findScalar(u8, data, '=')) |separator_pos|
.{ .key = data[0..separator_pos], .value = data[separator_pos + 1 ..] } .{ .key = data[0..separator_pos], .value = data[separator_pos + 1 ..] }
else else
.{ .key = data }; .{ .key = data };
@ -939,17 +939,17 @@ pub const Session = struct {
.flush => return null, .flush => return null,
.data => |data| { .data => |data| {
const ref_data = Packet.normalizeText(data); const ref_data = Packet.normalizeText(data);
const oid_sep_pos = mem.indexOfScalar(u8, ref_data, ' ') orelse return error.InvalidRefPacket; const oid_sep_pos = mem.findScalar(u8, ref_data, ' ') orelse return error.InvalidRefPacket;
const oid = Oid.parse(it.format, data[0..oid_sep_pos]) catch return error.InvalidRefPacket; const oid = Oid.parse(it.format, data[0..oid_sep_pos]) catch return error.InvalidRefPacket;
const name_sep_pos = mem.indexOfScalarPos(u8, ref_data, oid_sep_pos + 1, ' ') orelse ref_data.len; const name_sep_pos = mem.findScalarPos(u8, ref_data, oid_sep_pos + 1, ' ') orelse ref_data.len;
const name = ref_data[oid_sep_pos + 1 .. name_sep_pos]; const name = ref_data[oid_sep_pos + 1 .. name_sep_pos];
var symref_target: ?[]const u8 = null; var symref_target: ?[]const u8 = null;
var peeled: ?Oid = null; var peeled: ?Oid = null;
var last_sep_pos = name_sep_pos; var last_sep_pos = name_sep_pos;
while (last_sep_pos < ref_data.len) { while (last_sep_pos < ref_data.len) {
const next_sep_pos = mem.indexOfScalarPos(u8, ref_data, last_sep_pos + 1, ' ') orelse ref_data.len; const next_sep_pos = mem.findScalarPos(u8, ref_data, last_sep_pos + 1, ' ') orelse ref_data.len;
const attribute = ref_data[last_sep_pos + 1 .. next_sep_pos]; const attribute = ref_data[last_sep_pos + 1 .. next_sep_pos];
if (mem.startsWith(u8, attribute, "symref-target:")) { if (mem.startsWith(u8, attribute, "symref-target:")) {
symref_target = attribute["symref-target:".len..]; symref_target = attribute["symref-target:".len..];

View file

@ -37571,7 +37571,7 @@ pub fn resolveNavPtrModifiers(
const linksection_body = zir_decl.linksection_body orelse break :ls .none; const linksection_body = zir_decl.linksection_body orelse break :ls .none;
const linksection_ref = try sema.resolveInlineBody(block, linksection_body, decl_inst); const linksection_ref = try sema.resolveInlineBody(block, linksection_body, decl_inst);
const bytes = try sema.toConstString(block, section_src, linksection_ref, .{ .simple = .@"linksection" }); const bytes = try sema.toConstString(block, section_src, linksection_ref, .{ .simple = .@"linksection" });
if (std.mem.indexOfScalar(u8, bytes, 0) != null) { if (std.mem.findScalar(u8, bytes, 0) != null) {
return sema.fail(block, section_src, "linksection cannot contain null bytes", .{}); return sema.fail(block, section_src, "linksection cannot contain null bytes", .{});
} else if (bytes.len == 0) { } else if (bytes.len == 0) {
return sema.fail(block, section_src, "linksection cannot be empty", .{}); return sema.fail(block, section_src, "linksection cannot be empty", .{});

View file

@ -1244,7 +1244,7 @@ pub fn anyScalarIsZero(val: Value, zcu: *Zcu) bool {
.bytes => |str| { .bytes => |str| {
const len = Type.fromInterned(agg.ty).vectorLen(zcu); const len = Type.fromInterned(agg.ty).vectorLen(zcu);
const slice = str.toSlice(len, &zcu.intern_pool); const slice = str.toSlice(len, &zcu.intern_pool);
return std.mem.indexOfScalar(u8, slice, 0) != null; return std.mem.findScalar(u8, slice, 0) != null;
}, },
.elems => |elems| { .elems => |elems| {
for (elems) |elem| { for (elems) |elem| {

View file

@ -597,7 +597,7 @@ pub const BuiltinDecl = enum {
return switch (decl) { return switch (decl) {
inline else => |tag| { inline else => |tag| {
const name = @tagName(tag); const name = @tagName(tag);
const split = (comptime std.mem.lastIndexOfScalar(u8, name, '.')) orelse return .{ .direct = name }; const split = (comptime std.mem.findScalarLast(u8, name, '.')) orelse return .{ .direct = name };
const parent = @field(BuiltinDecl, name[0..split]); const parent = @field(BuiltinDecl, name[0..split]);
comptime assert(@intFromEnum(parent) < @intFromEnum(tag)); // dependencies ordered correctly comptime assert(@intFromEnum(parent) < @intFromEnum(tag)); // dependencies ordered correctly
return .{ .nested = .{ parent, name[split + 1 ..] } }; return .{ .nested = .{ parent, name[split + 1 ..] } };
@ -4111,7 +4111,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoArrayHashMapUnmanaged(AnalUnit, ?R
const fqn_slice = nav.fqn.toSlice(ip); const fqn_slice = nav.fqn.toSlice(ip);
if (comp.test_filters.len > 0) { if (comp.test_filters.len > 0) {
for (comp.test_filters) |test_filter| { for (comp.test_filters) |test_filter| {
if (std.mem.indexOf(u8, fqn_slice, test_filter) != null) break; if (std.mem.find(u8, fqn_slice, test_filter) != null) break;
} else break :a false; } else break :a false;
} }
break :a true; break :a true;

View file

@ -2764,7 +2764,7 @@ const ScanDeclIter = struct {
if (is_named and comp.test_filters.len > 0) { if (is_named and comp.test_filters.len > 0) {
const fqn_slice = fqn.toSlice(ip); const fqn_slice = fqn.toSlice(ip);
for (comp.test_filters) |test_filter| { for (comp.test_filters) |test_filter| {
if (std.mem.indexOf(u8, fqn_slice, test_filter) != null) break; if (std.mem.find(u8, fqn_slice, test_filter) != null) break;
} else break :a false; } else break :a false;
} }
try zcu.test_functions.put(gpa, nav, {}); try zcu.test_functions.put(gpa, nav, {});

View file

@ -162,7 +162,7 @@ const matchers = matchers: {
arg.* = zonCast(param.type.?, instruction.encode[encode_index], symbols); arg.* = zonCast(param.type.?, instruction.encode[encode_index], symbols);
return @call(.auto, encode, args); return @call(.auto, encode, args);
} else if (pattern_token[0] == '<') { } else if (pattern_token[0] == '<') {
const symbol_name = comptime pattern_token[1 .. std.mem.indexOfScalarPos(u8, pattern_token, 1, '|') orelse const symbol_name = comptime pattern_token[1 .. std.mem.findScalarPos(u8, pattern_token, 1, '|') orelse
pattern_token.len - 1]; pattern_token.len - 1];
const symbol = @field(Symbol, symbol_name); const symbol = @field(Symbol, symbol_name);
const symbol_ptr = &@field(symbols, symbol_name); const symbol_ptr = &@field(symbols, symbol_name);

View file

@ -2865,7 +2865,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
const remaining_source = std.mem.span(as.source); const remaining_source = std.mem.span(as.source);
return isel.fail("unable to assemble: '{s}'", .{std.mem.trim( return isel.fail("unable to assemble: '{s}'", .{std.mem.trim(
u8, u8,
as.source[0 .. std.mem.indexOfScalar(u8, remaining_source, '\n') orelse remaining_source.len], as.source[0 .. std.mem.findScalar(u8, remaining_source, '\n') orelse remaining_source.len],
&std.ascii.whitespace, &std.ascii.whitespace,
)}); )});
}, },

View file

@ -5513,7 +5513,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
// for the string, we still use the next u32 for the null terminator. // for the string, we still use the next u32 for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4; extra_i += (constraint.len + name.len + (2 + 3)) / 4;
if (constraint.len < 1 or mem.indexOfScalar(u8, "=+&%", constraint[0]) != null or if (constraint.len < 1 or mem.findScalar(u8, "=+&%", constraint[0]) != null or
(constraint[0] == '{' and constraint[constraint.len - 1] != '}')) (constraint[0] == '{' and constraint[constraint.len - 1] != '}'))
{ {
return f.fail("CBE: constraint not supported: '{s}'", .{constraint}); return f.fail("CBE: constraint not supported: '{s}'", .{constraint});
@ -5577,7 +5577,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
} }
const desc = mem.sliceTo(asm_source[src_i..], ']'); const desc = mem.sliceTo(asm_source[src_i..], ']');
if (mem.indexOfScalar(u8, desc, ':')) |colon| { if (mem.findScalar(u8, desc, ':')) |colon| {
const name = desc[0..colon]; const name = desc[0..colon];
const modifier = desc[colon + 1 ..]; const modifier = desc[colon + 1 ..];

View file

@ -12229,7 +12229,7 @@ fn lowerSystemVFnRetTy(o: *Object, pt: Zcu.PerThread, fn_info: InternPool.Key.Fu
.win_i128 => unreachable, // windows only .win_i128 => unreachable, // windows only
} }
} }
const first_non_integer = std.mem.indexOfNone(x86_64_abi.Class, &classes, &.{.integer}); const first_non_integer = std.mem.findNone(x86_64_abi.Class, &classes, &.{.integer});
if (first_non_integer == null or classes[first_non_integer.?] == .none) { if (first_non_integer == null or classes[first_non_integer.?] == .none) {
assert(first_non_integer orelse classes.len == types_index); assert(first_non_integer orelse classes.len == types_index);
switch (ip.indexToKey(return_type.toIntern())) { switch (ip.indexToKey(return_type.toIntern())) {
@ -12527,7 +12527,7 @@ const ParamTypeIterator = struct {
}, },
} }
} }
const first_non_integer = std.mem.indexOfNone(x86_64_abi.Class, &classes, &.{.integer}); const first_non_integer = std.mem.findNone(x86_64_abi.Class, &classes, &.{.integer});
if (first_non_integer == null or classes[first_non_integer.?] == .none) { if (first_non_integer == null or classes[first_non_integer.?] == .none) {
assert(first_non_integer orelse classes.len == types_index); assert(first_non_integer orelse classes.len == types_index);
if (types_index == 1) { if (types_index == 1) {

View file

@ -6280,8 +6280,8 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
next_op: for (&ops) |*op| { next_op: for (&ops) |*op| {
const op_str = while (!last_op) { const op_str = while (!last_op) {
const full_str = op_it.next() orelse break :next_op; const full_str = op_it.next() orelse break :next_op;
const code_str = if (mem.indexOfScalar(u8, full_str, '#') orelse const code_str = if (mem.findScalar(u8, full_str, '#') orelse
mem.indexOf(u8, full_str, "//")) |comment| mem.find(u8, full_str, "//")) |comment|
code: { code: {
last_op = true; last_op = true;
break :code full_str[0..comment]; break :code full_str[0..comment];
@ -6295,7 +6295,7 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
} else if (std.fmt.parseInt(i12, op_str, 10)) |int| { } else if (std.fmt.parseInt(i12, op_str, 10)) |int| {
op.* = .{ .imm = Immediate.s(int) }; op.* = .{ .imm = Immediate.s(int) };
} else |_| if (mem.startsWith(u8, op_str, "%[")) { } else |_| if (mem.startsWith(u8, op_str, "%[")) {
const mod_index = mem.indexOf(u8, op_str, "]@"); const mod_index = mem.find(u8, op_str, "]@");
const modifier = if (mod_index) |index| const modifier = if (mod_index) |index|
op_str[index + "]@".len ..] op_str[index + "]@".len ..]
else else

View file

@ -177421,7 +177421,7 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
else if (std.mem.endsWith(u8, mnem_str, "l")) else if (std.mem.endsWith(u8, mnem_str, "l"))
.dword .dword
else if (std.mem.endsWith(u8, mnem_str, "q") and else if (std.mem.endsWith(u8, mnem_str, "q") and
(std.mem.indexOfScalar(u8, "vp", mnem_str[0]) == null or !std.mem.endsWith(u8, mnem_str, "dq"))) (std.mem.findScalar(u8, "vp", mnem_str[0]) == null or !std.mem.endsWith(u8, mnem_str, "dq")))
.qword .qword
else if (std.mem.endsWith(u8, mnem_str, "t")) else if (std.mem.endsWith(u8, mnem_str, "t"))
.tbyte .tbyte
@ -177466,8 +177466,8 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
next_op: for (&ops, 0..) |*op, op_index| { next_op: for (&ops, 0..) |*op, op_index| {
const op_str = while (!last_op) { const op_str = while (!last_op) {
const full_str = op_it.next() orelse break :next_op; const full_str = op_it.next() orelse break :next_op;
const code_str = if (std.mem.indexOfScalar(u8, full_str, '#') orelse const code_str = if (std.mem.findScalar(u8, full_str, '#') orelse
std.mem.indexOf(u8, full_str, "//")) |comment| std.mem.find(u8, full_str, "//")) |comment|
code: { code: {
last_op = true; last_op = true;
break :code full_str[0..comment]; break :code full_str[0..comment];
@ -177476,7 +177476,7 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
if (trim_str.len > 0) break trim_str; if (trim_str.len > 0) break trim_str;
} else break; } else break;
if (std.mem.startsWith(u8, op_str, "%%")) { if (std.mem.startsWith(u8, op_str, "%%")) {
const colon = std.mem.indexOfScalarPos(u8, op_str, "%%".len + 2, ':'); const colon = std.mem.findScalarPos(u8, op_str, "%%".len + 2, ':');
const reg = parseRegName(op_str["%%".len .. colon orelse op_str.len]) orelse const reg = parseRegName(op_str["%%".len .. colon orelse op_str.len]) orelse
return self.fail("invalid register: '{s}'", .{op_str}); return self.fail("invalid register: '{s}'", .{op_str});
if (colon) |colon_pos| { if (colon) |colon_pos| {
@ -177496,7 +177496,7 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
op.* = .{ .reg = reg }; op.* = .{ .reg = reg };
} }
} else if (std.mem.startsWith(u8, op_str, "%[") and std.mem.endsWith(u8, op_str, "]")) { } else if (std.mem.startsWith(u8, op_str, "%[") and std.mem.endsWith(u8, op_str, "]")) {
const colon = std.mem.indexOfScalarPos(u8, op_str, "%[".len, ':'); const colon = std.mem.findScalarPos(u8, op_str, "%[".len, ':');
const modifier = if (colon) |colon_pos| const modifier = if (colon) |colon_pos|
op_str[colon_pos + ":".len .. op_str.len - "]".len] op_str[colon_pos + ":".len .. op_str.len - "]".len]
else else
@ -177572,7 +177572,7 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
else |_| else |_|
return self.fail("invalid immediate: '{s}'", .{op_str}); return self.fail("invalid immediate: '{s}'", .{op_str});
} else if (std.mem.endsWith(u8, op_str, ")")) { } else if (std.mem.endsWith(u8, op_str, ")")) {
const open = std.mem.indexOfScalar(u8, op_str, '(') orelse const open = std.mem.findScalar(u8, op_str, '(') orelse
return self.fail("invalid operand: '{s}'", .{op_str}); return self.fail("invalid operand: '{s}'", .{op_str});
var sib_it = std.mem.splitScalar(u8, op_str[open + "(".len .. op_str.len - ")".len], ','); var sib_it = std.mem.splitScalar(u8, op_str[open + "(".len .. op_str.len - ")".len], ',');
const base_str = sib_it.next() orelse const base_str = sib_it.next() orelse
@ -177631,7 +177631,7 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
.disp = if (std.mem.startsWith(u8, op_str[0..open], "%[") and .disp = if (std.mem.startsWith(u8, op_str[0..open], "%[") and
std.mem.endsWith(u8, op_str[0..open], "]")) std.mem.endsWith(u8, op_str[0..open], "]"))
disp: { disp: {
const colon = std.mem.indexOfScalarPos(u8, op_str[0..open], "%[".len, ':'); const colon = std.mem.findScalarPos(u8, op_str[0..open], "%[".len, ':');
const modifier = if (colon) |colon_pos| const modifier = if (colon) |colon_pos|
op_str[colon_pos + ":".len .. open - "]".len] op_str[colon_pos + ":".len .. open - "]".len]
else else
@ -177697,14 +177697,14 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
.{ ._, .pseudo } .{ ._, .pseudo }
else for (std.enums.values(Mir.Inst.Fixes)) |fixes| { else for (std.enums.values(Mir.Inst.Fixes)) |fixes| {
const fixes_name = @tagName(fixes); const fixes_name = @tagName(fixes);
const space_index = std.mem.indexOfScalar(u8, fixes_name, ' '); const space_index = std.mem.findScalar(u8, fixes_name, ' ');
const fixes_prefix = if (space_index) |index| const fixes_prefix = if (space_index) |index|
std.meta.stringToEnum(encoder.Instruction.Prefix, fixes_name[0..index]).? std.meta.stringToEnum(encoder.Instruction.Prefix, fixes_name[0..index]).?
else else
.none; .none;
if (fixes_prefix != prefix) continue; if (fixes_prefix != prefix) continue;
const pattern = fixes_name[if (space_index) |index| index + " ".len else 0..]; const pattern = fixes_name[if (space_index) |index| index + " ".len else 0..];
const wildcard_index = std.mem.indexOfScalar(u8, pattern, '_').?; const wildcard_index = std.mem.findScalar(u8, pattern, '_').?;
const mnem_prefix = pattern[0..wildcard_index]; const mnem_prefix = pattern[0..wildcard_index];
const mnem_suffix = pattern[wildcard_index + "_".len ..]; const mnem_suffix = pattern[wildcard_index + "_".len ..];
if (!std.mem.startsWith(u8, mnem_name, mnem_prefix)) continue; if (!std.mem.startsWith(u8, mnem_name, mnem_prefix)) continue;
@ -177956,11 +177956,11 @@ fn moveStrategy(cg: *CodeGen, ty: Type, class: Register.Class, aligned: bool) !M
.sse => switch (ty.zigTypeTag(zcu)) { .sse => switch (ty.zigTypeTag(zcu)) {
else => { else => {
const classes = std.mem.sliceTo(&abi.classifySystemV(ty, zcu, cg.target, .other), .none); const classes = std.mem.sliceTo(&abi.classifySystemV(ty, zcu, cg.target, .other), .none);
assert(std.mem.indexOfNone(abi.Class, classes, &.{ assert(std.mem.findNone(abi.Class, classes, &.{
.integer, .sse, .sseup, .memory, .float, .float_combine, .integer, .sse, .sseup, .memory, .float, .float_combine,
}) == null); }) == null);
const abi_size = ty.abiSize(zcu); const abi_size = ty.abiSize(zcu);
if (abi_size < 4 or std.mem.indexOfScalar(abi.Class, classes, .integer) != null) switch (abi_size) { if (abi_size < 4 or std.mem.findScalar(abi.Class, classes, .integer) != null) switch (abi_size) {
1 => return if (cg.hasFeature(.avx)) .{ .vex_insert_extract = .{ 1 => return if (cg.hasFeature(.avx)) .{ .vex_insert_extract = .{
.insert = .{ .vp_b, .insr }, .insert = .{ .vp_b, .insr },
.extract = .{ .vp_b, .extr }, .extract = .{ .vp_b, .extr },
@ -182757,8 +182757,8 @@ const Temp = struct {
const class = classes[class_index]; const class = classes[class_index];
next_class_index = @intCast(switch (class) { next_class_index = @intCast(switch (class) {
.integer, .memory, .float, .float_combine => class_index + 1, .integer, .memory, .float, .float_combine => class_index + 1,
.sse => std.mem.indexOfNonePos(abi.Class, classes, class_index + 1, &.{.sseup}) orelse classes.len, .sse => std.mem.findNonePos(abi.Class, classes, class_index + 1, &.{.sseup}) orelse classes.len,
.x87 => std.mem.indexOfNonePos(abi.Class, classes, class_index + 1, &.{.x87up}) orelse classes.len, .x87 => std.mem.findNonePos(abi.Class, classes, class_index + 1, &.{.x87up}) orelse classes.len,
.sseup, .x87up, .none, .win_i128, .integer_per_element => unreachable, .sseup, .x87up, .none, .win_i128, .integer_per_element => unreachable,
}); });
const part_size = switch (class) { const part_size = switch (class) {
@ -187641,7 +187641,7 @@ const Select = struct {
s.cg.asmOps(mir_tag, mir_ops) catch |err| switch (err) { s.cg.asmOps(mir_tag, mir_ops) catch |err| switch (err) {
error.InvalidInstruction => { error.InvalidInstruction => {
const fixes = @tagName(mir_tag[0]); const fixes = @tagName(mir_tag[0]);
const fixes_blank = std.mem.indexOfScalar(u8, fixes, '_').?; const fixes_blank = std.mem.findScalar(u8, fixes, '_').?;
return s.cg.fail("invalid instruction: '{s}{s}{s} {s} {s} {s} {s}'", .{ return s.cg.fail("invalid instruction: '{s}{s}{s} {s} {s} {s} {s}'", .{
fixes[0..fixes_blank], fixes[0..fixes_blank],
@tagName(mir_tag[1]), @tagName(mir_tag[1]),
@ -187721,7 +187721,7 @@ const Select = struct {
.add, .com, .comi, .div, .divr, .mul, .st, .sub, .subr, .ucom, .ucomi => s.top +%= 1, .add, .com, .comi, .div, .divr, .mul, .st, .sub, .subr, .ucom, .ucomi => s.top +%= 1,
else => { else => {
const fixes = @tagName(mir_tag[0]); const fixes = @tagName(mir_tag[0]);
const fixes_blank = std.mem.indexOfScalar(u8, fixes, '_').?; const fixes_blank = std.mem.findScalar(u8, fixes, '_').?;
std.debug.panic("{s}: {s}{s}{s}\n", .{ std.debug.panic("{s}: {s}{s}{s}\n", .{
@src().fn_name, @src().fn_name,
fixes[0..fixes_blank], fixes[0..fixes_blank],

Some files were not shown because too many files have changed in this diff Show more