langref: remove line numbers from code samples

It's unnecessary, more complicated, bloated, and it messes up the table
of operators.
This commit is contained in:
Andrew Kelley 2024-01-21 17:13:59 -07:00
parent a054c01f5c
commit 9be831e15a
2 changed files with 6 additions and 43 deletions

View file

@ -200,24 +200,9 @@
visibility: visible; visibility: visible;
} }
pre {
counter-reset: line;
}
pre .line:before {
counter-increment: line;
content: counter(line);
display: inline-block;
padding-right: 1em;
width: 2em;
text-align: right;
color: #999;
}
th pre code { th pre code {
background: none; background: none;
} }
th .line:before {
display: none;
}
@media (prefers-color-scheme: dark) { @media (prefers-color-scheme: dark) {
body{ body{

View file

@ -947,19 +947,8 @@ fn isType(name: []const u8) bool {
return false; return false;
} }
const start_line = "<span class=\"line\">";
const end_line = "</span>";
fn writeEscapedLines(out: anytype, text: []const u8) !void { fn writeEscapedLines(out: anytype, text: []const u8) !void {
for (text) |char| { return writeEscaped(out, text);
if (char == '\n') {
try out.writeAll(end_line);
try out.writeAll("\n");
try out.writeAll(start_line);
} else {
try writeEscaped(out, &[_]u8{char});
}
}
} }
fn tokenizeAndPrintRaw( fn tokenizeAndPrintRaw(
@ -972,7 +961,7 @@ fn tokenizeAndPrintRaw(
const src_non_terminated = mem.trim(u8, raw_src, " \r\n"); const src_non_terminated = mem.trim(u8, raw_src, " \r\n");
const src = try allocator.dupeZ(u8, src_non_terminated); const src = try allocator.dupeZ(u8, src_non_terminated);
try out.writeAll("<code>" ++ start_line); try out.writeAll("<code>");
var tokenizer = std.zig.Tokenizer.init(src); var tokenizer = std.zig.Tokenizer.init(src);
var index: usize = 0; var index: usize = 0;
var next_tok_is_fn = false; var next_tok_is_fn = false;
@ -1062,6 +1051,7 @@ fn tokenizeAndPrintRaw(
}, },
.string_literal, .string_literal,
.multiline_string_literal_line,
.char_literal, .char_literal,
=> { => {
try out.writeAll("<span class=\"tok-str\">"); try out.writeAll("<span class=\"tok-str\">");
@ -1069,18 +1059,6 @@ fn tokenizeAndPrintRaw(
try out.writeAll("</span>"); try out.writeAll("</span>");
}, },
.multiline_string_literal_line => {
if (src[token.loc.end - 1] == '\n') {
try out.writeAll("<span class=\"tok-str\">");
try writeEscaped(out, src[token.loc.start .. token.loc.end - 1]);
try out.writeAll("</span>" ++ end_line ++ "\n" ++ start_line);
} else {
try out.writeAll("<span class=\"tok-str\">");
try writeEscaped(out, src[token.loc.start..token.loc.end]);
try out.writeAll("</span>");
}
},
.builtin => { .builtin => {
try out.writeAll("<span class=\"tok-builtin\">"); try out.writeAll("<span class=\"tok-builtin\">");
try writeEscaped(out, src[token.loc.start..token.loc.end]); try writeEscaped(out, src[token.loc.start..token.loc.end]);
@ -1211,7 +1189,7 @@ fn tokenizeAndPrintRaw(
} }
index = token.loc.end; index = token.loc.end;
} }
try out.writeAll(end_line ++ "</code>"); try out.writeAll("</code>");
} }
fn tokenizeAndPrint( fn tokenizeAndPrint(
@ -1234,9 +1212,9 @@ fn printSourceBlock(allocator: Allocator, docgen_tokenizer: *Tokenizer, out: any
const raw_source = docgen_tokenizer.buffer[syntax_block.source_token.start..syntax_block.source_token.end]; const raw_source = docgen_tokenizer.buffer[syntax_block.source_token.start..syntax_block.source_token.end];
const trimmed_raw_source = mem.trim(u8, raw_source, " \r\n"); const trimmed_raw_source = mem.trim(u8, raw_source, " \r\n");
try out.writeAll("<code>" ++ start_line); try out.writeAll("<code>");
try writeEscapedLines(out, trimmed_raw_source); try writeEscapedLines(out, trimmed_raw_source);
try out.writeAll(end_line ++ "</code>"); try out.writeAll("</code>");
}, },
} }
try out.writeAll("</pre></figure>"); try out.writeAll("</pre></figure>");