diff --git a/CMakeLists.txt b/CMakeLists.txt index be9931f6dd..b761650385 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -569,6 +569,7 @@ set(ZIG_STAGE2_SOURCES "${CMAKE_SOURCE_DIR}/src/clang_options_data.zig" "${CMAKE_SOURCE_DIR}/src/codegen.zig" "${CMAKE_SOURCE_DIR}/src/codegen/c.zig" + "${CMAKE_SOURCE_DIR}/src/codegen/c/type.zig" "${CMAKE_SOURCE_DIR}/src/codegen/llvm.zig" "${CMAKE_SOURCE_DIR}/src/codegen/llvm/bindings.zig" "${CMAKE_SOURCE_DIR}/src/glibc.zig" @@ -784,7 +785,7 @@ set_target_properties(zig2 PROPERTIES COMPILE_FLAGS ${ZIG2_COMPILE_FLAGS} LINK_FLAGS ${ZIG2_LINK_FLAGS} ) -target_include_directories(zig2 PUBLIC "${CMAKE_SOURCE_DIR}/lib") +target_include_directories(zig2 PUBLIC "${CMAKE_SOURCE_DIR}/stage1") target_link_libraries(zig2 LINK_PUBLIC zigcpp) if(MSVC) diff --git a/build.zig b/build.zig index f75efeb8b4..28109d8fed 100644 --- a/build.zig +++ b/build.zig @@ -509,8 +509,39 @@ fn addWasiUpdateStep(b: *std.Build, version: [:0]const u8) !void { run_opt.addArg("-o"); run_opt.addFileSourceArg(.{ .path = "stage1/zig1.wasm" }); + const CopyFileStep = struct { + const Step = std.Build.Step; + const FileSource = std.Build.FileSource; + const CopyFileStep = @This(); + + step: Step, + builder: *std.Build, + source: FileSource, + dest_rel_path: []const u8, + + pub fn init(builder: *std.Build, source: FileSource, dest_rel_path: []const u8) CopyFileStep { + return CopyFileStep{ + .builder = builder, + .step = Step.init(.custom, builder.fmt("install {s} to {s}", .{ source.getDisplayName(), dest_rel_path }), builder.allocator, make), + .source = source.dupe(builder), + .dest_rel_path = builder.dupePath(dest_rel_path), + }; + } + + fn make(step: *Step) !void { + const self = @fieldParentPtr(CopyFileStep, "step", step); + const full_src_path = self.source.getPath(self.builder); + const full_dest_path = self.builder.pathFromRoot(self.dest_rel_path); + try self.builder.updateFile(full_src_path, full_dest_path); + } + }; + + const copy_zig_h = try b.allocator.create(CopyFileStep); + copy_zig_h.* = CopyFileStep.init(b, .{ .path = "lib/zig.h" }, "stage1/zig.h"); + const update_zig1_step = b.step("update-zig1", "Update stage1/zig1.wasm"); update_zig1_step.dependOn(&run_opt.step); + update_zig1_step.dependOn(©_zig_h.step); } fn addCompilerStep( diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 78fcf68b56..164b81d651 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -508,7 +508,7 @@ pub fn HashMap( /// If a new entry needs to be stored, this function asserts there /// is enough capacity to store it. pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult { - return self.unmanaged.getOrPutAssumeCapacityAdapted(self.allocator, key, ctx); + return self.unmanaged.getOrPutAssumeCapacityAdapted(key, ctx); } pub fn getOrPutValue(self: *Self, key: K, value: V) Allocator.Error!Entry { @@ -2130,7 +2130,7 @@ test "std.hash_map getOrPutAdapted" { try testing.expectEqual(map.count(), keys.len); inline for (keys, 0..) |key_str, i| { - const result = try map.getOrPutAdapted(key_str, AdaptedContext{}); + const result = map.getOrPutAssumeCapacityAdapted(key_str, AdaptedContext{}); try testing.expect(result.found_existing); try testing.expectEqual(real_keys[i], result.key_ptr.*); try testing.expectEqual(@as(u64, i) * 2, result.value_ptr.*); diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig index afdd6a5a8d..56b36aaa81 100644 --- a/lib/std/multi_array_list.zig +++ b/lib/std/multi_array_list.zig @@ -433,15 +433,9 @@ pub fn MultiArrayList(comptime S: type) type { } fn capacityInBytes(capacity: usize) usize { - if (builtin.zig_backend == .stage2_c) { - var bytes: usize = 0; - for (sizes.bytes) |size| bytes += size * capacity; - return bytes; - } else { - const sizes_vector: @Vector(sizes.bytes.len, usize) = sizes.bytes; - const capacity_vector = @splat(sizes.bytes.len, capacity); - return @reduce(.Add, capacity_vector * sizes_vector); - } + comptime var elem_bytes: usize = 0; + inline for (sizes.bytes) |size| elem_bytes += size; + return elem_bytes * capacity; } fn allocatedBytes(self: Self) []align(@alignOf(S)) u8 { diff --git a/lib/zig.h b/lib/zig.h index 0756d9f731..d336ecb2e2 100644 --- a/lib/zig.h +++ b/lib/zig.h @@ -1,6 +1,8 @@ #undef linux +#ifndef __STDC_WANT_IEC_60559_TYPES_EXT__ #define __STDC_WANT_IEC_60559_TYPES_EXT__ +#endif #include #include #include @@ -286,701 +288,802 @@ typedef char bool; #endif #if __STDC_VERSION__ >= 201112L -#define zig_noreturn _Noreturn void +#define zig_noreturn _Noreturn #elif zig_has_attribute(noreturn) || defined(zig_gnuc) -#define zig_noreturn __attribute__((noreturn)) void +#define zig_noreturn __attribute__((noreturn)) #elif _MSC_VER -#define zig_noreturn __declspec(noreturn) void +#define zig_noreturn __declspec(noreturn) #else -#define zig_noreturn void +#define zig_noreturn #endif #define zig_bitSizeOf(T) (CHAR_BIT * sizeof(T)) -typedef uintptr_t zig_usize; -typedef intptr_t zig_isize; -typedef signed short int zig_c_short; -typedef unsigned short int zig_c_ushort; -typedef signed int zig_c_int; -typedef unsigned int zig_c_uint; -typedef signed long int zig_c_long; -typedef unsigned long int zig_c_ulong; -typedef signed long long int zig_c_longlong; -typedef unsigned long long int zig_c_ulonglong; +#define zig_compiler_rt_abbrev_uint32_t si +#define zig_compiler_rt_abbrev_int32_t si +#define zig_compiler_rt_abbrev_uint64_t di +#define zig_compiler_rt_abbrev_int64_t di +#define zig_compiler_rt_abbrev_zig_u128 ti +#define zig_compiler_rt_abbrev_zig_i128 ti +#define zig_compiler_rt_abbrev_zig_f16 hf +#define zig_compiler_rt_abbrev_zig_f32 sf +#define zig_compiler_rt_abbrev_zig_f64 df +#define zig_compiler_rt_abbrev_zig_f80 xf +#define zig_compiler_rt_abbrev_zig_f128 tf -typedef uint8_t zig_u8; -typedef int8_t zig_i8; -typedef uint16_t zig_u16; -typedef int16_t zig_i16; -typedef uint32_t zig_u32; -typedef int32_t zig_i32; -typedef uint64_t zig_u64; -typedef int64_t zig_i64; +zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, size_t); +zig_extern void *memset (void *, int, size_t); -#define zig_as_u8(val) UINT8_C(val) -#define zig_as_i8(val) INT8_C(val) -#define zig_as_u16(val) UINT16_C(val) -#define zig_as_i16(val) INT16_C(val) -#define zig_as_u32(val) UINT32_C(val) -#define zig_as_i32(val) INT32_C(val) -#define zig_as_u64(val) UINT64_C(val) -#define zig_as_i64(val) INT64_C(val) +/* ===================== 8/16/32/64-bit Integer Support ===================== */ + +#if __STDC_VERSION__ >= 199901L || _MSC_VER +#include +#else + +#if SCHAR_MIN == ~0x7F && SCHAR_MAX == 0x7F && UCHAR_MAX == 0xFF +typedef unsigned char uint8_t; +typedef signed char int8_t; +#define INT8_C(c) c +#define UINT8_C(c) c##U +#elif SHRT_MIN == ~0x7F && SHRT_MAX == 0x7F && USHRT_MAX == 0xFF +typedef unsigned short uint8_t; +typedef signed short int8_t; +#define INT8_C(c) c +#define UINT8_C(c) c##U +#elif INT_MIN == ~0x7F && INT_MAX == 0x7F && UINT_MAX == 0xFF +typedef unsigned int uint8_t; +typedef signed int int8_t; +#define INT8_C(c) c +#define UINT8_C(c) c##U +#elif LONG_MIN == ~0x7F && LONG_MAX == 0x7F && ULONG_MAX == 0xFF +typedef unsigned long uint8_t; +typedef signed long int8_t; +#define INT8_C(c) c##L +#define UINT8_C(c) c##LU +#elif LLONG_MIN == ~0x7F && LLONG_MAX == 0x7F && ULLONG_MAX == 0xFF +typedef unsigned long long uint8_t; +typedef signed long long int8_t; +#define INT8_C(c) c##LL +#define UINT8_C(c) c##LLU +#endif +#define INT8_MIN (~INT8_C(0x7F)) +#define INT8_MAX ( INT8_C(0x7F)) +#define UINT8_MAX ( INT8_C(0xFF)) + +#if SCHAR_MIN == ~0x7FFF && SCHAR_MAX == 0x7FFF && UCHAR_MAX == 0xFFFF +typedef unsigned char uint16_t; +typedef signed char int16_t; +#define INT16_C(c) c +#define UINT16_C(c) c##U +#elif SHRT_MIN == ~0x7FFF && SHRT_MAX == 0x7FFF && USHRT_MAX == 0xFFFF +typedef unsigned short uint16_t; +typedef signed short int16_t; +#define INT16_C(c) c +#define UINT16_C(c) c##U +#elif INT_MIN == ~0x7FFF && INT_MAX == 0x7FFF && UINT_MAX == 0xFFFF +typedef unsigned int uint16_t; +typedef signed int int16_t; +#define INT16_C(c) c +#define UINT16_C(c) c##U +#elif LONG_MIN == ~0x7FFF && LONG_MAX == 0x7FFF && ULONG_MAX == 0xFFFF +typedef unsigned long uint16_t; +typedef signed long int16_t; +#define INT16_C(c) c##L +#define UINT16_C(c) c##LU +#elif LLONG_MIN == ~0x7FFF && LLONG_MAX == 0x7FFF && ULLONG_MAX == 0xFFFF +typedef unsigned long long uint16_t; +typedef signed long long int16_t; +#define INT16_C(c) c##LL +#define UINT16_C(c) c##LLU +#endif +#define INT16_MIN (~INT16_C(0x7FFF)) +#define INT16_MAX ( INT16_C(0x7FFF)) +#define UINT16_MAX ( INT16_C(0xFFFF)) + +#if SCHAR_MIN == ~0x7FFFFFFF && SCHAR_MAX == 0x7FFFFFFF && UCHAR_MAX == 0xFFFFFFFF +typedef unsigned char uint32_t; +typedef signed char int32_t; +#define INT32_C(c) c +#define UINT32_C(c) c##U +#elif SHRT_MIN == ~0x7FFFFFFF && SHRT_MAX == 0x7FFFFFFF && USHRT_MAX == 0xFFFFFFFF +typedef unsigned short uint32_t; +typedef signed short int32_t; +#define INT32_C(c) c +#define UINT32_C(c) c##U +#elif INT_MIN == ~0x7FFFFFFF && INT_MAX == 0x7FFFFFFF && UINT_MAX == 0xFFFFFFFF +typedef unsigned int uint32_t; +typedef signed int int32_t; +#define INT32_C(c) c +#define UINT32_C(c) c##U +#elif LONG_MIN == ~0x7FFFFFFF && LONG_MAX == 0x7FFFFFFF && ULONG_MAX == 0xFFFFFFFF +typedef unsigned long uint32_t; +typedef signed long int32_t; +#define INT32_C(c) c##L +#define UINT32_C(c) c##LU +#elif LLONG_MIN == ~0x7FFFFFFF && LLONG_MAX == 0x7FFFFFFF && ULLONG_MAX == 0xFFFFFFFF +typedef unsigned long long uint32_t; +typedef signed long long int32_t; +#define INT32_C(c) c##LL +#define UINT32_C(c) c##LLU +#endif +#define INT32_MIN (~INT32_C(0x7FFFFFFF)) +#define INT32_MAX ( INT32_C(0x7FFFFFFF)) +#define UINT32_MAX ( INT32_C(0xFFFFFFFF)) + +#if SCHAR_MIN == ~0x7FFFFFFFFFFFFFFF && SCHAR_MAX == 0x7FFFFFFFFFFFFFFF && UCHAR_MAX == 0xFFFFFFFFFFFFFFFF +typedef unsigned char uint64_t; +typedef signed char int64_t; +#define INT64_C(c) c +#define UINT64_C(c) c##U +#elif SHRT_MIN == ~0x7FFFFFFFFFFFFFFF && SHRT_MAX == 0x7FFFFFFFFFFFFFFF && USHRT_MAX == 0xFFFFFFFFFFFFFFFF +typedef unsigned short uint64_t; +typedef signed short int64_t; +#define INT64_C(c) c +#define UINT64_C(c) c##U +#elif INT_MIN == ~0x7FFFFFFFFFFFFFFF && INT_MAX == 0x7FFFFFFFFFFFFFFF && UINT_MAX == 0xFFFFFFFFFFFFFFFF +typedef unsigned int uint64_t; +typedef signed int int64_t; +#define INT64_C(c) c +#define UINT64_C(c) c##U +#elif LONG_MIN == ~0x7FFFFFFFFFFFFFFF && LONG_MAX == 0x7FFFFFFFFFFFFFFF && ULONG_MAX == 0xFFFFFFFFFFFFFFFF +typedef unsigned long uint64_t; +typedef signed long int64_t; +#define INT64_C(c) c##L +#define UINT64_C(c) c##LU +#elif LLONG_MIN == ~0x7FFFFFFFFFFFFFFF && LLONG_MAX == 0x7FFFFFFFFFFFFFFF && ULLONG_MAX == 0xFFFFFFFFFFFFFFFF +typedef unsigned long long uint64_t; +typedef signed long long int64_t; +#define INT64_C(c) c##LL +#define UINT64_C(c) c##LLU +#endif +#define INT64_MIN (~INT64_C(0x7FFFFFFFFFFFFFFF)) +#define INT64_MAX ( INT64_C(0x7FFFFFFFFFFFFFFF)) +#define UINT64_MAX ( INT64_C(0xFFFFFFFFFFFFFFFF)) + +typedef size_t uintptr_t; +typedef ptrdiff_t intptr_t; + +#endif -#define zig_minInt_u8 zig_as_u8(0) -#define zig_maxInt_u8 UINT8_MAX #define zig_minInt_i8 INT8_MIN #define zig_maxInt_i8 INT8_MAX -#define zig_minInt_u16 zig_as_u16(0) -#define zig_maxInt_u16 UINT16_MAX +#define zig_minInt_u8 UINT8_C(0) +#define zig_maxInt_u8 UINT8_MAX #define zig_minInt_i16 INT16_MIN #define zig_maxInt_i16 INT16_MAX -#define zig_minInt_u32 zig_as_u32(0) -#define zig_maxInt_u32 UINT32_MAX +#define zig_minInt_u16 UINT16_C(0) +#define zig_maxInt_u16 UINT16_MAX #define zig_minInt_i32 INT32_MIN #define zig_maxInt_i32 INT32_MAX -#define zig_minInt_u64 zig_as_u64(0) -#define zig_maxInt_u64 UINT64_MAX +#define zig_minInt_u32 UINT32_C(0) +#define zig_maxInt_u32 UINT32_MAX #define zig_minInt_i64 INT64_MIN #define zig_maxInt_i64 INT64_MAX +#define zig_minInt_u64 UINT64_C(0) +#define zig_maxInt_u64 UINT64_MAX -#define zig_compiler_rt_abbrev_u32 si -#define zig_compiler_rt_abbrev_i32 si -#define zig_compiler_rt_abbrev_u64 di -#define zig_compiler_rt_abbrev_i64 di -#define zig_compiler_rt_abbrev_u128 ti -#define zig_compiler_rt_abbrev_i128 ti -#define zig_compiler_rt_abbrev_f16 hf -#define zig_compiler_rt_abbrev_f32 sf -#define zig_compiler_rt_abbrev_f64 df -#define zig_compiler_rt_abbrev_f80 xf -#define zig_compiler_rt_abbrev_f128 tf - -zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, zig_usize); -zig_extern void *memset (void *, int, zig_usize); - -/* ==================== 8/16/32/64-bit Integer Routines ===================== */ - -#define zig_maxInt(Type, bits) zig_shr_##Type(zig_maxInt_##Type, (zig_bitSizeOf(zig_##Type) - bits)) -#define zig_expand_maxInt(Type, bits) zig_maxInt(Type, bits) -#define zig_minInt(Type, bits) zig_not_##Type(zig_maxInt(Type, bits), bits) -#define zig_expand_minInt(Type, bits) zig_minInt(Type, bits) +#define zig_intLimit(s, w, limit, bits) zig_shr_##s##w(zig_##limit##Int_##s##w, w - (bits)) +#define zig_minInt_i(w, bits) zig_intLimit(i, w, min, bits) +#define zig_maxInt_i(w, bits) zig_intLimit(i, w, max, bits) +#define zig_minInt_u(w, bits) zig_intLimit(u, w, min, bits) +#define zig_maxInt_u(w, bits) zig_intLimit(u, w, max, bits) #define zig_int_operator(Type, RhsType, operation, operator) \ - static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##RhsType rhs) { \ + static inline Type zig_##operation(Type lhs, RhsType rhs) { \ return lhs operator rhs; \ } #define zig_int_basic_operator(Type, operation, operator) \ - zig_int_operator(Type, Type, operation, operator) + zig_int_operator(Type, Type, operation, operator) #define zig_int_shift_operator(Type, operation, operator) \ - zig_int_operator(Type, u8, operation, operator) + zig_int_operator(Type, uint8_t, operation, operator) #define zig_int_helpers(w) \ - zig_int_basic_operator(u##w, and, &) \ - zig_int_basic_operator(i##w, and, &) \ - zig_int_basic_operator(u##w, or, |) \ - zig_int_basic_operator(i##w, or, |) \ - zig_int_basic_operator(u##w, xor, ^) \ - zig_int_basic_operator(i##w, xor, ^) \ - zig_int_shift_operator(u##w, shl, <<) \ - zig_int_shift_operator(i##w, shl, <<) \ - zig_int_shift_operator(u##w, shr, >>) \ + zig_int_basic_operator(uint##w##_t, and_u##w, &) \ + zig_int_basic_operator( int##w##_t, and_i##w, &) \ + zig_int_basic_operator(uint##w##_t, or_u##w, |) \ + zig_int_basic_operator( int##w##_t, or_i##w, |) \ + zig_int_basic_operator(uint##w##_t, xor_u##w, ^) \ + zig_int_basic_operator( int##w##_t, xor_i##w, ^) \ + zig_int_shift_operator(uint##w##_t, shl_u##w, <<) \ + zig_int_shift_operator( int##w##_t, shl_i##w, <<) \ + zig_int_shift_operator(uint##w##_t, shr_u##w, >>) \ \ - static inline zig_i##w zig_shr_i##w(zig_i##w lhs, zig_u8 rhs) { \ - zig_i##w sign_mask = lhs < zig_as_i##w(0) ? -zig_as_i##w(1) : zig_as_i##w(0); \ + static inline int##w##_t zig_shr_i##w(int##w##_t lhs, uint8_t rhs) { \ + int##w##_t sign_mask = lhs < INT##w##_C(0) ? -INT##w##_C(1) : INT##w##_C(0); \ return ((lhs ^ sign_mask) >> rhs) ^ sign_mask; \ } \ \ - static inline zig_u##w zig_not_u##w(zig_u##w val, zig_u8 bits) { \ - return val ^ zig_maxInt(u##w, bits); \ + static inline uint##w##_t zig_not_u##w(uint##w##_t val, uint8_t bits) { \ + return val ^ zig_maxInt_u(w, bits); \ } \ \ - static inline zig_i##w zig_not_i##w(zig_i##w val, zig_u8 bits) { \ + static inline int##w##_t zig_not_i##w(int##w##_t val, uint8_t bits) { \ (void)bits; \ return ~val; \ } \ \ - static inline zig_u##w zig_wrap_u##w(zig_u##w val, zig_u8 bits) { \ - return val & zig_maxInt(u##w, bits); \ + static inline uint##w##_t zig_wrap_u##w(uint##w##_t val, uint8_t bits) { \ + return val & zig_maxInt_u(w, bits); \ } \ \ - static inline zig_i##w zig_wrap_i##w(zig_i##w val, zig_u8 bits) { \ - return (val & zig_as_u##w(1) << (bits - zig_as_u8(1))) != 0 \ - ? val | zig_minInt(i##w, bits) : val & zig_maxInt(i##w, bits); \ + static inline int##w##_t zig_wrap_i##w(int##w##_t val, uint8_t bits) { \ + return (val & UINT##w##_C(1) << (bits - UINT8_C(1))) != 0 \ + ? val | zig_minInt_i(w, bits) : val & zig_maxInt_i(w, bits); \ } \ \ - zig_int_basic_operator(u##w, div_floor, /) \ + zig_int_basic_operator(uint##w##_t, div_floor_u##w, /) \ \ - static inline zig_i##w zig_div_floor_i##w(zig_i##w lhs, zig_i##w rhs) { \ - return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < zig_as_i##w(0)); \ + static inline int##w##_t zig_div_floor_i##w(int##w##_t lhs, int##w##_t rhs) { \ + return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < INT##w##_C(0)); \ } \ \ - zig_int_basic_operator(u##w, mod, %) \ + zig_int_basic_operator(uint##w##_t, mod_u##w, %) \ \ - static inline zig_i##w zig_mod_i##w(zig_i##w lhs, zig_i##w rhs) { \ - zig_i##w rem = lhs % rhs; \ - return rem + (((lhs ^ rhs) & rem) < zig_as_i##w(0) ? rhs : zig_as_i##w(0)); \ + static inline int##w##_t zig_mod_i##w(int##w##_t lhs, int##w##_t rhs) { \ + int##w##_t rem = lhs % rhs; \ + return rem + (((lhs ^ rhs) & rem) < INT##w##_C(0) ? rhs : INT##w##_C(0)); \ } \ \ - static inline zig_u##w zig_shlw_u##w(zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \ + static inline uint##w##_t zig_shlw_u##w(uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \ return zig_wrap_u##w(zig_shl_u##w(lhs, rhs), bits); \ } \ \ - static inline zig_i##w zig_shlw_i##w(zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \ - return zig_wrap_i##w((zig_i##w)zig_shl_u##w((zig_u##w)lhs, (zig_u##w)rhs), bits); \ + static inline int##w##_t zig_shlw_i##w(int##w##_t lhs, uint8_t rhs, uint8_t bits) { \ + return zig_wrap_i##w((int##w##_t)zig_shl_u##w((uint##w##_t)lhs, (uint##w##_t)rhs), bits); \ } \ \ - static inline zig_u##w zig_addw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + static inline uint##w##_t zig_addw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ return zig_wrap_u##w(lhs + rhs, bits); \ } \ \ - static inline zig_i##w zig_addw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ - return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs + (zig_u##w)rhs), bits); \ + static inline int##w##_t zig_addw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs + (uint##w##_t)rhs), bits); \ } \ \ - static inline zig_u##w zig_subw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + static inline uint##w##_t zig_subw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ return zig_wrap_u##w(lhs - rhs, bits); \ } \ \ - static inline zig_i##w zig_subw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ - return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs - (zig_u##w)rhs), bits); \ + static inline int##w##_t zig_subw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs - (uint##w##_t)rhs), bits); \ } \ \ - static inline zig_u##w zig_mulw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + static inline uint##w##_t zig_mulw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ return zig_wrap_u##w(lhs * rhs, bits); \ } \ \ - static inline zig_i##w zig_mulw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ - return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs * (zig_u##w)rhs), bits); \ + static inline int##w##_t zig_mulw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs * (uint##w##_t)rhs), bits); \ } zig_int_helpers(8) zig_int_helpers(16) zig_int_helpers(32) zig_int_helpers(64) -static inline bool zig_addo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { +static inline bool zig_addo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) || defined(zig_gnuc) - zig_u32 full_res; + uint32_t full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_u32(full_res, bits); - return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits); + return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits); #else *res = zig_addw_u32(lhs, rhs, bits); return *res < lhs; #endif } -static inline void zig_vaddo_u32(zig_u8 *ov, zig_u32 *res, int n, - const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits) +static inline void zig_vaddo_u32(uint8_t *ov, uint32_t *res, int n, + const uint32_t *lhs, const uint32_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_addo_u32(&res[i], lhs[i], rhs[i], bits); } -zig_extern zig_i32 __addosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); -static inline bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { +zig_extern int32_t __addosi4(int32_t lhs, int32_t rhs, int *overflow); +static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) || defined(zig_gnuc) - zig_i32 full_res; + int32_t full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; - zig_i32 full_res = __addosi4(lhs, rhs, &overflow_int); + int overflow_int; + int32_t full_res = __addosi4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i32(full_res, bits); - return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); + return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits); } -static inline void zig_vaddo_i32(zig_u8 *ov, zig_i32 *res, int n, - const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits) +static inline void zig_vaddo_i32(uint8_t *ov, int32_t *res, int n, + const int32_t *lhs, const int32_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_addo_i32(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { +static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) || defined(zig_gnuc) - zig_u64 full_res; + uint64_t full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_u64(full_res, bits); - return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits); + return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits); #else *res = zig_addw_u64(lhs, rhs, bits); return *res < lhs; #endif } -static inline void zig_vaddo_u64(zig_u8 *ov, zig_u64 *res, int n, - const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits) +static inline void zig_vaddo_u64(uint8_t *ov, uint64_t *res, int n, + const uint64_t *lhs, const uint64_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_addo_u64(&res[i], lhs[i], rhs[i], bits); } -zig_extern zig_i64 __addodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); -static inline bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { +zig_extern int64_t __addodi4(int64_t lhs, int64_t rhs, int *overflow); +static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) || defined(zig_gnuc) - zig_i64 full_res; + int64_t full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; - zig_i64 full_res = __addodi4(lhs, rhs, &overflow_int); + int overflow_int; + int64_t full_res = __addodi4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i64(full_res, bits); - return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); + return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits); } -static inline void zig_vaddo_i64(zig_u8 *ov, zig_i64 *res, int n, - const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits) +static inline void zig_vaddo_i64(uint8_t *ov, int64_t *res, int n, + const int64_t *lhs, const int64_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_addo_i64(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { +static inline bool zig_addo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) || defined(zig_gnuc) - zig_u8 full_res; + uint8_t full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_u8(full_res, bits); - return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits); + return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits); #else - zig_u32 full_res; + uint32_t full_res; bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits); - *res = (zig_u8)full_res; + *res = (uint8_t)full_res; return overflow; #endif } -static inline void zig_vaddo_u8(zig_u8 *ov, zig_u8 *res, int n, - const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits) +static inline void zig_vaddo_u8(uint8_t *ov, uint8_t *res, int n, + const uint8_t *lhs, const uint8_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_addo_u8(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { +static inline bool zig_addo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) || defined(zig_gnuc) - zig_i8 full_res; + int8_t full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_i8(full_res, bits); - return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits); + return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits); #else - zig_i32 full_res; + int32_t full_res; bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits); - *res = (zig_i8)full_res; + *res = (int8_t)full_res; return overflow; #endif } -static inline void zig_vaddo_i8(zig_u8 *ov, zig_i8 *res, int n, - const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits) +static inline void zig_vaddo_i8(uint8_t *ov, int8_t *res, int n, + const int8_t *lhs, const int8_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_addo_i8(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { +static inline bool zig_addo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) || defined(zig_gnuc) - zig_u16 full_res; + uint16_t full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_u16(full_res, bits); - return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits); + return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits); #else - zig_u32 full_res; + uint32_t full_res; bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits); - *res = (zig_u16)full_res; + *res = (uint16_t)full_res; return overflow; #endif } -static inline void zig_vaddo_u16(zig_u8 *ov, zig_u16 *res, int n, - const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits) +static inline void zig_vaddo_u16(uint8_t *ov, uint16_t *res, int n, + const uint16_t *lhs, const uint16_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_addo_u16(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { +static inline bool zig_addo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) || defined(zig_gnuc) - zig_i16 full_res; + int16_t full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_i16(full_res, bits); - return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits); + return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits); #else - zig_i32 full_res; + int32_t full_res; bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits); - *res = (zig_i16)full_res; + *res = (int16_t)full_res; return overflow; #endif } -static inline void zig_vaddo_i16(zig_u8 *ov, zig_i16 *res, int n, - const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits) +static inline void zig_vaddo_i16(uint8_t *ov, int16_t *res, int n, + const int16_t *lhs, const int16_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_addo_i16(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { +static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gnuc) - zig_u32 full_res; + uint32_t full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_u32(full_res, bits); - return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits); + return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits); #else *res = zig_subw_u32(lhs, rhs, bits); return *res > lhs; #endif } -static inline void zig_vsubo_u32(zig_u8 *ov, zig_u32 *res, int n, - const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits) +static inline void zig_vsubo_u32(uint8_t *ov, uint32_t *res, int n, + const uint32_t *lhs, const uint32_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_subo_u32(&res[i], lhs[i], rhs[i], bits); } -zig_extern zig_i32 __subosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); -static inline bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { +zig_extern int32_t __subosi4(int32_t lhs, int32_t rhs, int *overflow); +static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gnuc) - zig_i32 full_res; + int32_t full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; - zig_i32 full_res = __subosi4(lhs, rhs, &overflow_int); + int overflow_int; + int32_t full_res = __subosi4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i32(full_res, bits); - return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); + return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits); } -static inline void zig_vsubo_i32(zig_u8 *ov, zig_i32 *res, int n, - const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits) +static inline void zig_vsubo_i32(uint8_t *ov, int32_t *res, int n, + const int32_t *lhs, const int32_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_subo_i32(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { +static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gnuc) - zig_u64 full_res; + uint64_t full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_u64(full_res, bits); - return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits); + return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits); #else *res = zig_subw_u64(lhs, rhs, bits); return *res > lhs; #endif } -static inline void zig_vsubo_u64(zig_u8 *ov, zig_u64 *res, int n, - const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits) +static inline void zig_vsubo_u64(uint8_t *ov, uint64_t *res, int n, + const uint64_t *lhs, const uint64_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_subo_u64(&res[i], lhs[i], rhs[i], bits); } -zig_extern zig_i64 __subodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); -static inline bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { +zig_extern int64_t __subodi4(int64_t lhs, int64_t rhs, int *overflow); +static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gnuc) - zig_i64 full_res; + int64_t full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; - zig_i64 full_res = __subodi4(lhs, rhs, &overflow_int); + int overflow_int; + int64_t full_res = __subodi4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i64(full_res, bits); - return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); + return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits); } -static inline void zig_vsubo_i64(zig_u8 *ov, zig_i64 *res, int n, - const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits) +static inline void zig_vsubo_i64(uint8_t *ov, int64_t *res, int n, + const int64_t *lhs, const int64_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_subo_i64(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { +static inline bool zig_subo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gnuc) - zig_u8 full_res; + uint8_t full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_u8(full_res, bits); - return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits); + return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits); #else - zig_u32 full_res; + uint32_t full_res; bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits); - *res = (zig_u8)full_res; + *res = (uint8_t)full_res; return overflow; #endif } -static inline void zig_vsubo_u8(zig_u8 *ov, zig_u8 *res, int n, - const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits) +static inline void zig_vsubo_u8(uint8_t *ov, uint8_t *res, int n, + const uint8_t *lhs, const uint8_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_subo_u8(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { +static inline bool zig_subo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gnuc) - zig_i8 full_res; + int8_t full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_i8(full_res, bits); - return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits); + return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits); #else - zig_i32 full_res; + int32_t full_res; bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits); - *res = (zig_i8)full_res; + *res = (int8_t)full_res; return overflow; #endif } -static inline void zig_vsubo_i8(zig_u8 *ov, zig_i8 *res, int n, - const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits) +static inline void zig_vsubo_i8(uint8_t *ov, int8_t *res, int n, + const int8_t *lhs, const int8_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_subo_i8(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { +static inline bool zig_subo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gnuc) - zig_u16 full_res; + uint16_t full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_u16(full_res, bits); - return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits); + return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits); #else - zig_u32 full_res; + uint32_t full_res; bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits); - *res = (zig_u16)full_res; + *res = (uint16_t)full_res; return overflow; #endif } -static inline void zig_vsubo_u16(zig_u8 *ov, zig_u16 *res, int n, - const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits) +static inline void zig_vsubo_u16(uint8_t *ov, uint16_t *res, int n, + const uint16_t *lhs, const uint16_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_subo_u16(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { +static inline bool zig_subo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gnuc) - zig_i16 full_res; + int16_t full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_i16(full_res, bits); - return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits); + return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits); #else - zig_i32 full_res; + int32_t full_res; bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits); - *res = (zig_i16)full_res; + *res = (int16_t)full_res; return overflow; #endif } -static inline void zig_vsubo_i16(zig_u8 *ov, zig_i16 *res, int n, - const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits) +static inline void zig_vsubo_i16(uint8_t *ov, int16_t *res, int n, + const int16_t *lhs, const int16_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_subo_i16(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { +static inline bool zig_mulo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gnuc) - zig_u32 full_res; + uint32_t full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_u32(full_res, bits); - return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits); + return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits); #else *res = zig_mulw_u32(lhs, rhs, bits); - return rhs != zig_as_u32(0) && lhs > zig_maxInt(u32, bits) / rhs; + return rhs != UINT32_C(0) && lhs > zig_maxInt_u(32, bits) / rhs; #endif } -static inline void zig_vmulo_u32(zig_u8 *ov, zig_u32 *res, int n, - const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits) +static inline void zig_vmulo_u32(uint8_t *ov, uint32_t *res, int n, + const uint32_t *lhs, const uint32_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u32(&res[i], lhs[i], rhs[i], bits); } -zig_extern zig_i32 __mulosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); -static inline bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { +zig_extern int32_t __mulosi4(int32_t lhs, int32_t rhs, int *overflow); +static inline bool zig_mulo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gnuc) - zig_i32 full_res; + int32_t full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; - zig_i32 full_res = __mulosi4(lhs, rhs, &overflow_int); + int overflow_int; + int32_t full_res = __mulosi4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i32(full_res, bits); - return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); + return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits); } -static inline void zig_vmulo_i32(zig_u8 *ov, zig_i32 *res, int n, - const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits) +static inline void zig_vmulo_i32(uint8_t *ov, int32_t *res, int n, + const int32_t *lhs, const int32_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i32(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { +static inline bool zig_mulo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gnuc) - zig_u64 full_res; + uint64_t full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_u64(full_res, bits); - return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits); + return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits); #else *res = zig_mulw_u64(lhs, rhs, bits); - return rhs != zig_as_u64(0) && lhs > zig_maxInt(u64, bits) / rhs; + return rhs != UINT64_C(0) && lhs > zig_maxInt_u(64, bits) / rhs; #endif } -static inline void zig_vmulo_u64(zig_u8 *ov, zig_u64 *res, int n, - const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits) +static inline void zig_vmulo_u64(uint8_t *ov, uint64_t *res, int n, + const uint64_t *lhs, const uint64_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u64(&res[i], lhs[i], rhs[i], bits); } -zig_extern zig_i64 __mulodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); -static inline bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { +zig_extern int64_t __mulodi4(int64_t lhs, int64_t rhs, int *overflow); +static inline bool zig_mulo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gnuc) - zig_i64 full_res; + int64_t full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; - zig_i64 full_res = __mulodi4(lhs, rhs, &overflow_int); + int overflow_int; + int64_t full_res = __mulodi4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i64(full_res, bits); - return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); + return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits); } -static inline void zig_vmulo_i64(zig_u8 *ov, zig_i64 *res, int n, - const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits) +static inline void zig_vmulo_i64(uint8_t *ov, int64_t *res, int n, + const int64_t *lhs, const int64_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i64(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { +static inline bool zig_mulo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gnuc) - zig_u8 full_res; + uint8_t full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_u8(full_res, bits); - return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits); + return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits); #else - zig_u32 full_res; + uint32_t full_res; bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits); - *res = (zig_u8)full_res; + *res = (uint8_t)full_res; return overflow; #endif } -static inline void zig_vmulo_u8(zig_u8 *ov, zig_u8 *res, int n, - const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits) +static inline void zig_vmulo_u8(uint8_t *ov, uint8_t *res, int n, + const uint8_t *lhs, const uint8_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u8(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { +static inline bool zig_mulo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gnuc) - zig_i8 full_res; + int8_t full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_i8(full_res, bits); - return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits); + return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits); #else - zig_i32 full_res; + int32_t full_res; bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits); - *res = (zig_i8)full_res; + *res = (int8_t)full_res; return overflow; #endif } -static inline void zig_vmulo_i8(zig_u8 *ov, zig_i8 *res, int n, - const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits) +static inline void zig_vmulo_i8(uint8_t *ov, int8_t *res, int n, + const int8_t *lhs, const int8_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i8(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { +static inline bool zig_mulo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gnuc) - zig_u16 full_res; + uint16_t full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_u16(full_res, bits); - return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits); + return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits); #else - zig_u32 full_res; + uint32_t full_res; bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits); - *res = (zig_u16)full_res; + *res = (uint16_t)full_res; return overflow; #endif } -static inline void zig_vmulo_u16(zig_u8 *ov, zig_u16 *res, int n, - const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits) +static inline void zig_vmulo_u16(uint8_t *ov, uint16_t *res, int n, + const uint16_t *lhs, const uint16_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u16(&res[i], lhs[i], rhs[i], bits); } -static inline bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { +static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gnuc) - zig_i16 full_res; + int16_t full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_i16(full_res, bits); - return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits); + return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits); #else - zig_i32 full_res; + int32_t full_res; bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits); - *res = (zig_i16)full_res; + *res = (int16_t)full_res; return overflow; #endif } -static inline void zig_vmulo_i16(zig_u8 *ov, zig_i16 *res, int n, - const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits) +static inline void zig_vmulo_i16(uint8_t *ov, int16_t *res, int n, + const int16_t *lhs, const int16_t *rhs, uint8_t bits) { for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i16(&res[i], lhs[i], rhs[i], bits); } #define zig_int_builtins(w) \ - static inline bool zig_shlo_u##w(zig_u##w *res, zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \ + static inline bool zig_shlo_u##w(uint##w##_t *res, uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \ *res = zig_shlw_u##w(lhs, rhs, bits); \ - return lhs > zig_maxInt(u##w, bits) >> rhs; \ + return lhs > zig_maxInt_u(w, bits) >> rhs; \ } \ \ - static inline bool zig_shlo_i##w(zig_i##w *res, zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \ + static inline bool zig_shlo_i##w(int##w##_t *res, int##w##_t lhs, uint8_t rhs, uint8_t bits) { \ *res = zig_shlw_i##w(lhs, rhs, bits); \ - zig_i##w mask = (zig_i##w)(zig_maxInt_u##w << (bits - rhs - 1)); \ - return (lhs & mask) != zig_as_i##w(0) && (lhs & mask) != mask; \ + int##w##_t mask = (int##w##_t)(UINT##w##_MAX << (bits - rhs - 1)); \ + return (lhs & mask) != INT##w##_C(0) && (lhs & mask) != mask; \ } \ \ - static inline zig_u##w zig_shls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ - zig_u##w res; \ - if (rhs >= bits) return lhs != zig_as_u##w(0) ? zig_maxInt(u##w, bits) : lhs; \ - return zig_shlo_u##w(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u##w, bits) : res; \ + static inline uint##w##_t zig_shls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ + uint##w##_t res; \ + if (rhs >= bits) return lhs != UINT##w##_C(0) ? zig_maxInt_u(w, bits) : lhs; \ + return zig_shlo_u##w(&res, lhs, (uint8_t)rhs, bits) ? zig_maxInt_u(w, bits) : res; \ } \ \ - static inline zig_i##w zig_shls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ - zig_i##w res; \ - if ((zig_u##w)rhs < (zig_u##w)bits && !zig_shlo_i##w(&res, lhs, rhs, bits)) return res; \ - return lhs < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \ + static inline int##w##_t zig_shls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + int##w##_t res; \ + if ((uint##w##_t)rhs < (uint##w##_t)bits && !zig_shlo_i##w(&res, lhs, rhs, bits)) return res; \ + return lhs < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \ } \ \ - static inline zig_u##w zig_adds_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ - zig_u##w res; \ - return zig_addo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \ + static inline uint##w##_t zig_adds_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ + uint##w##_t res; \ + return zig_addo_u##w(&res, lhs, rhs, bits) ? zig_maxInt_u(w, bits) : res; \ } \ \ - static inline zig_i##w zig_adds_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ - zig_i##w res; \ + static inline int##w##_t zig_adds_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + int##w##_t res; \ if (!zig_addo_i##w(&res, lhs, rhs, bits)) return res; \ - return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \ + return res >= INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \ } \ \ - static inline zig_u##w zig_subs_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ - zig_u##w res; \ - return zig_subo_u##w(&res, lhs, rhs, bits) ? zig_minInt(u##w, bits) : res; \ + static inline uint##w##_t zig_subs_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ + uint##w##_t res; \ + return zig_subo_u##w(&res, lhs, rhs, bits) ? zig_minInt_u(w, bits) : res; \ } \ \ - static inline zig_i##w zig_subs_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ - zig_i##w res; \ + static inline int##w##_t zig_subs_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + int##w##_t res; \ if (!zig_subo_i##w(&res, lhs, rhs, bits)) return res; \ - return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \ + return res >= INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \ } \ \ - static inline zig_u##w zig_muls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ - zig_u##w res; \ - return zig_mulo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \ + static inline uint##w##_t zig_muls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ + uint##w##_t res; \ + return zig_mulo_u##w(&res, lhs, rhs, bits) ? zig_maxInt_u(w, bits) : res; \ } \ \ - static inline zig_i##w zig_muls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ - zig_i##w res; \ + static inline int##w##_t zig_muls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + int##w##_t res; \ if (!zig_mulo_i##w(&res, lhs, rhs, bits)) return res; \ - return (lhs ^ rhs) < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \ + return (lhs ^ rhs) < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \ } zig_int_builtins(8) zig_int_builtins(16) @@ -988,89 +1091,89 @@ zig_int_builtins(32) zig_int_builtins(64) #define zig_builtin8(name, val) __builtin_##name(val) -typedef zig_c_uint zig_Builtin8; +typedef unsigned int zig_Builtin8; #define zig_builtin16(name, val) __builtin_##name(val) -typedef zig_c_uint zig_Builtin16; +typedef unsigned int zig_Builtin16; #if INT_MIN <= INT32_MIN #define zig_builtin32(name, val) __builtin_##name(val) -typedef zig_c_uint zig_Builtin32; +typedef unsigned int zig_Builtin32; #elif LONG_MIN <= INT32_MIN #define zig_builtin32(name, val) __builtin_##name##l(val) -typedef zig_c_ulong zig_Builtin32; +typedef unsigned long zig_Builtin32; #endif #if INT_MIN <= INT64_MIN #define zig_builtin64(name, val) __builtin_##name(val) -typedef zig_c_uint zig_Builtin64; +typedef unsigned int zig_Builtin64; #elif LONG_MIN <= INT64_MIN #define zig_builtin64(name, val) __builtin_##name##l(val) -typedef zig_c_ulong zig_Builtin64; +typedef unsigned long zig_Builtin64; #elif LLONG_MIN <= INT64_MIN #define zig_builtin64(name, val) __builtin_##name##ll(val) -typedef zig_c_ulonglong zig_Builtin64; +typedef unsigned long long zig_Builtin64; #endif -static inline zig_u8 zig_byte_swap_u8(zig_u8 val, zig_u8 bits) { +static inline uint8_t zig_byte_swap_u8(uint8_t val, uint8_t bits) { return zig_wrap_u8(val >> (8 - bits), bits); } -static inline zig_i8 zig_byte_swap_i8(zig_i8 val, zig_u8 bits) { - return zig_wrap_i8((zig_i8)zig_byte_swap_u8((zig_u8)val, bits), bits); +static inline int8_t zig_byte_swap_i8(int8_t val, uint8_t bits) { + return zig_wrap_i8((int8_t)zig_byte_swap_u8((uint8_t)val, bits), bits); } -static inline zig_u16 zig_byte_swap_u16(zig_u16 val, zig_u8 bits) { - zig_u16 full_res; +static inline uint16_t zig_byte_swap_u16(uint16_t val, uint8_t bits) { + uint16_t full_res; #if zig_has_builtin(bswap16) || defined(zig_gnuc) full_res = __builtin_bswap16(val); #else - full_res = (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 0), 8) << 8 | - (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 8), 8) >> 0; + full_res = (uint16_t)zig_byte_swap_u8((uint8_t)(val >> 0), 8) << 8 | + (uint16_t)zig_byte_swap_u8((uint8_t)(val >> 8), 8) >> 0; #endif return zig_wrap_u16(full_res >> (16 - bits), bits); } -static inline zig_i16 zig_byte_swap_i16(zig_i16 val, zig_u8 bits) { - return zig_wrap_i16((zig_i16)zig_byte_swap_u16((zig_u16)val, bits), bits); +static inline int16_t zig_byte_swap_i16(int16_t val, uint8_t bits) { + return zig_wrap_i16((int16_t)zig_byte_swap_u16((uint16_t)val, bits), bits); } -static inline zig_u32 zig_byte_swap_u32(zig_u32 val, zig_u8 bits) { - zig_u32 full_res; +static inline uint32_t zig_byte_swap_u32(uint32_t val, uint8_t bits) { + uint32_t full_res; #if zig_has_builtin(bswap32) || defined(zig_gnuc) full_res = __builtin_bswap32(val); #else - full_res = (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 0), 16) << 16 | - (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 16), 16) >> 0; + full_res = (uint32_t)zig_byte_swap_u16((uint16_t)(val >> 0), 16) << 16 | + (uint32_t)zig_byte_swap_u16((uint16_t)(val >> 16), 16) >> 0; #endif return zig_wrap_u32(full_res >> (32 - bits), bits); } -static inline zig_i32 zig_byte_swap_i32(zig_i32 val, zig_u8 bits) { - return zig_wrap_i32((zig_i32)zig_byte_swap_u32((zig_u32)val, bits), bits); +static inline int32_t zig_byte_swap_i32(int32_t val, uint8_t bits) { + return zig_wrap_i32((int32_t)zig_byte_swap_u32((uint32_t)val, bits), bits); } -static inline zig_u64 zig_byte_swap_u64(zig_u64 val, zig_u8 bits) { - zig_u64 full_res; +static inline uint64_t zig_byte_swap_u64(uint64_t val, uint8_t bits) { + uint64_t full_res; #if zig_has_builtin(bswap64) || defined(zig_gnuc) full_res = __builtin_bswap64(val); #else - full_res = (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 0), 32) << 32 | - (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 32), 32) >> 0; + full_res = (uint64_t)zig_byte_swap_u32((uint32_t)(val >> 0), 32) << 32 | + (uint64_t)zig_byte_swap_u32((uint32_t)(val >> 32), 32) >> 0; #endif return zig_wrap_u64(full_res >> (64 - bits), bits); } -static inline zig_i64 zig_byte_swap_i64(zig_i64 val, zig_u8 bits) { - return zig_wrap_i64((zig_i64)zig_byte_swap_u64((zig_u64)val, bits), bits); +static inline int64_t zig_byte_swap_i64(int64_t val, uint8_t bits) { + return zig_wrap_i64((int64_t)zig_byte_swap_u64((uint64_t)val, bits), bits); } -static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) { - zig_u8 full_res; +static inline uint8_t zig_bit_reverse_u8(uint8_t val, uint8_t bits) { + uint8_t full_res; #if zig_has_builtin(bitreverse8) full_res = __builtin_bitreverse8(val); #else - static zig_u8 const lut[0x10] = { + static uint8_t const lut[0x10] = { 0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe, 0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf }; @@ -1079,62 +1182,62 @@ static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) { return zig_wrap_u8(full_res >> (8 - bits), bits); } -static inline zig_i8 zig_bit_reverse_i8(zig_i8 val, zig_u8 bits) { - return zig_wrap_i8((zig_i8)zig_bit_reverse_u8((zig_u8)val, bits), bits); +static inline int8_t zig_bit_reverse_i8(int8_t val, uint8_t bits) { + return zig_wrap_i8((int8_t)zig_bit_reverse_u8((uint8_t)val, bits), bits); } -static inline zig_u16 zig_bit_reverse_u16(zig_u16 val, zig_u8 bits) { - zig_u16 full_res; +static inline uint16_t zig_bit_reverse_u16(uint16_t val, uint8_t bits) { + uint16_t full_res; #if zig_has_builtin(bitreverse16) full_res = __builtin_bitreverse16(val); #else - full_res = (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 0), 8) << 8 | - (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 8), 8) >> 0; + full_res = (uint16_t)zig_bit_reverse_u8((uint8_t)(val >> 0), 8) << 8 | + (uint16_t)zig_bit_reverse_u8((uint8_t)(val >> 8), 8) >> 0; #endif return zig_wrap_u16(full_res >> (16 - bits), bits); } -static inline zig_i16 zig_bit_reverse_i16(zig_i16 val, zig_u8 bits) { - return zig_wrap_i16((zig_i16)zig_bit_reverse_u16((zig_u16)val, bits), bits); +static inline int16_t zig_bit_reverse_i16(int16_t val, uint8_t bits) { + return zig_wrap_i16((int16_t)zig_bit_reverse_u16((uint16_t)val, bits), bits); } -static inline zig_u32 zig_bit_reverse_u32(zig_u32 val, zig_u8 bits) { - zig_u32 full_res; +static inline uint32_t zig_bit_reverse_u32(uint32_t val, uint8_t bits) { + uint32_t full_res; #if zig_has_builtin(bitreverse32) full_res = __builtin_bitreverse32(val); #else - full_res = (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 0), 16) << 16 | - (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 16), 16) >> 0; + full_res = (uint32_t)zig_bit_reverse_u16((uint16_t)(val >> 0), 16) << 16 | + (uint32_t)zig_bit_reverse_u16((uint16_t)(val >> 16), 16) >> 0; #endif return zig_wrap_u32(full_res >> (32 - bits), bits); } -static inline zig_i32 zig_bit_reverse_i32(zig_i32 val, zig_u8 bits) { - return zig_wrap_i32((zig_i32)zig_bit_reverse_u32((zig_u32)val, bits), bits); +static inline int32_t zig_bit_reverse_i32(int32_t val, uint8_t bits) { + return zig_wrap_i32((int32_t)zig_bit_reverse_u32((uint32_t)val, bits), bits); } -static inline zig_u64 zig_bit_reverse_u64(zig_u64 val, zig_u8 bits) { - zig_u64 full_res; +static inline uint64_t zig_bit_reverse_u64(uint64_t val, uint8_t bits) { + uint64_t full_res; #if zig_has_builtin(bitreverse64) full_res = __builtin_bitreverse64(val); #else - full_res = (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 0), 32) << 32 | - (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 32), 32) >> 0; + full_res = (uint64_t)zig_bit_reverse_u32((uint32_t)(val >> 0), 32) << 32 | + (uint64_t)zig_bit_reverse_u32((uint32_t)(val >> 32), 32) >> 0; #endif return zig_wrap_u64(full_res >> (64 - bits), bits); } -static inline zig_i64 zig_bit_reverse_i64(zig_i64 val, zig_u8 bits) { - return zig_wrap_i64((zig_i64)zig_bit_reverse_u64((zig_u64)val, bits), bits); +static inline int64_t zig_bit_reverse_i64(int64_t val, uint8_t bits) { + return zig_wrap_i64((int64_t)zig_bit_reverse_u64((uint64_t)val, bits), bits); } #define zig_builtin_popcount_common(w) \ - static inline zig_u8 zig_popcount_i##w(zig_i##w val, zig_u8 bits) { \ - return zig_popcount_u##w((zig_u##w)val, bits); \ + static inline uint8_t zig_popcount_i##w(int##w##_t val, uint8_t bits) { \ + return zig_popcount_u##w((uint##w##_t)val, bits); \ } #if zig_has_builtin(popcount) || defined(zig_gnuc) #define zig_builtin_popcount(w) \ - static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \ + static inline uint8_t zig_popcount_u##w(uint##w##_t val, uint8_t bits) { \ (void)bits; \ return zig_builtin##w(popcount, val); \ } \ @@ -1142,12 +1245,12 @@ static inline zig_i64 zig_bit_reverse_i64(zig_i64 val, zig_u8 bits) { zig_builtin_popcount_common(w) #else #define zig_builtin_popcount(w) \ - static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \ + static inline uint8_t zig_popcount_u##w(uint##w##_t val, uint8_t bits) { \ (void)bits; \ - zig_u##w temp = val - ((val >> 1) & (zig_maxInt_u##w / 3)); \ - temp = (temp & (zig_maxInt_u##w / 5)) + ((temp >> 2) & (zig_maxInt_u##w / 5)); \ - temp = (temp + (temp >> 4)) & (zig_maxInt_u##w / 17); \ - return temp * (zig_maxInt_u##w / 255) >> (w - 8); \ + uint##w##_t temp = val - ((val >> 1) & (UINT##w##_MAX / 3)); \ + temp = (temp & (UINT##w##_MAX / 5)) + ((temp >> 2) & (UINT##w##_MAX / 5)); \ + temp = (temp + (temp >> 4)) & (UINT##w##_MAX / 17); \ + return temp * (UINT##w##_MAX / 255) >> (w - 8); \ } \ \ zig_builtin_popcount_common(w) @@ -1158,12 +1261,12 @@ zig_builtin_popcount(32) zig_builtin_popcount(64) #define zig_builtin_ctz_common(w) \ - static inline zig_u8 zig_ctz_i##w(zig_i##w val, zig_u8 bits) { \ - return zig_ctz_u##w((zig_u##w)val, bits); \ + static inline uint8_t zig_ctz_i##w(int##w##_t val, uint8_t bits) { \ + return zig_ctz_u##w((uint##w##_t)val, bits); \ } #if zig_has_builtin(ctz) || defined(zig_gnuc) #define zig_builtin_ctz(w) \ - static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \ + static inline uint8_t zig_ctz_u##w(uint##w##_t val, uint8_t bits) { \ if (val == 0) return bits; \ return zig_builtin##w(ctz, val); \ } \ @@ -1171,7 +1274,7 @@ zig_builtin_popcount(64) zig_builtin_ctz_common(w) #else #define zig_builtin_ctz(w) \ - static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \ + static inline uint8_t zig_ctz_u##w(uint##w##_t val, uint8_t bits) { \ return zig_popcount_u##w(zig_not_u##w(val, bits) & zig_subw_u##w(val, 1, bits), bits); \ } \ \ @@ -1183,12 +1286,12 @@ zig_builtin_ctz(32) zig_builtin_ctz(64) #define zig_builtin_clz_common(w) \ - static inline zig_u8 zig_clz_i##w(zig_i##w val, zig_u8 bits) { \ - return zig_clz_u##w((zig_u##w)val, bits); \ + static inline uint8_t zig_clz_i##w(int##w##_t val, uint8_t bits) { \ + return zig_clz_u##w((uint##w##_t)val, bits); \ } #if zig_has_builtin(clz) || defined(zig_gnuc) #define zig_builtin_clz(w) \ - static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \ + static inline uint8_t zig_clz_u##w(uint##w##_t val, uint8_t bits) { \ if (val == 0) return bits; \ return zig_builtin##w(clz, val) - (zig_bitSizeOf(zig_Builtin##w) - bits); \ } \ @@ -1196,7 +1299,7 @@ zig_builtin_ctz(64) zig_builtin_clz_common(w) #else #define zig_builtin_clz(w) \ - static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \ + static inline uint8_t zig_clz_u##w(uint##w##_t val, uint8_t bits) { \ return zig_ctz_u##w(zig_bit_reverse_u##w(val, bits), bits); \ } \ \ @@ -1207,7 +1310,7 @@ zig_builtin_clz(16) zig_builtin_clz(32) zig_builtin_clz(64) -/* ======================== 128-bit Integer Routines ======================== */ +/* ======================== 128-bit Integer Support ========================= */ #if !defined(zig_has_int128) # if defined(__SIZEOF_INT128__) @@ -1222,18 +1325,18 @@ zig_builtin_clz(64) typedef unsigned __int128 zig_u128; typedef signed __int128 zig_i128; -#define zig_as_u128(hi, lo) ((zig_u128)(hi)<<64|(lo)) -#define zig_as_i128(hi, lo) ((zig_i128)zig_as_u128(hi, lo)) -#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo) -#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo) -#define zig_hi_u128(val) ((zig_u64)((val) >> 64)) -#define zig_lo_u128(val) ((zig_u64)((val) >> 0)) -#define zig_hi_i128(val) ((zig_i64)((val) >> 64)) -#define zig_lo_i128(val) ((zig_u64)((val) >> 0)) +#define zig_make_u128(hi, lo) ((zig_u128)(hi)<<64|(lo)) +#define zig_make_i128(hi, lo) ((zig_i128)zig_make_u128(hi, lo)) +#define zig_make_constant_u128(hi, lo) zig_make_u128(hi, lo) +#define zig_make_constant_i128(hi, lo) zig_make_i128(hi, lo) +#define zig_hi_u128(val) ((uint64_t)((val) >> 64)) +#define zig_lo_u128(val) ((uint64_t)((val) >> 0)) +#define zig_hi_i128(val) (( int64_t)((val) >> 64)) +#define zig_lo_i128(val) ((uint64_t)((val) >> 0)) #define zig_bitcast_u128(val) ((zig_u128)(val)) #define zig_bitcast_i128(val) ((zig_i128)(val)) #define zig_cmp_int128(Type) \ - static inline zig_i32 zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \ + static inline int32_t zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \ return (lhs > rhs) - (lhs < rhs); \ } #define zig_bit_int128(Type, operation, operator) \ @@ -1244,31 +1347,31 @@ typedef signed __int128 zig_i128; #else /* zig_has_int128 */ #if __LITTLE_ENDIAN__ || _MSC_VER -typedef struct { zig_align(16) zig_u64 lo; zig_u64 hi; } zig_u128; -typedef struct { zig_align(16) zig_u64 lo; zig_i64 hi; } zig_i128; +typedef struct { zig_align(16) uint64_t lo; uint64_t hi; } zig_u128; +typedef struct { zig_align(16) uint64_t lo; int64_t hi; } zig_i128; #else -typedef struct { zig_align(16) zig_u64 hi; zig_u64 lo; } zig_u128; -typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128; +typedef struct { zig_align(16) uint64_t hi; uint64_t lo; } zig_u128; +typedef struct { zig_align(16) int64_t hi; uint64_t lo; } zig_i128; #endif -#define zig_as_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) }) -#define zig_as_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) }) +#define zig_make_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) }) +#define zig_make_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) }) -#if _MSC_VER -#define zig_as_constant_u128(hi, lo) { .h##i = (hi), .l##o = (lo) } -#define zig_as_constant_i128(hi, lo) { .h##i = (hi), .l##o = (lo) } -#else -#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo) -#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo) +#if _MSC_VER /* MSVC doesn't allow struct literals in constant expressions */ +#define zig_make_constant_u128(hi, lo) { .h##i = (hi), .l##o = (lo) } +#define zig_make_constant_i128(hi, lo) { .h##i = (hi), .l##o = (lo) } +#else /* But non-MSVC doesn't like the unprotected commas */ +#define zig_make_constant_u128(hi, lo) zig_make_u128(hi, lo) +#define zig_make_constant_i128(hi, lo) zig_make_i128(hi, lo) #endif #define zig_hi_u128(val) ((val).hi) #define zig_lo_u128(val) ((val).lo) #define zig_hi_i128(val) ((val).hi) #define zig_lo_i128(val) ((val).lo) -#define zig_bitcast_u128(val) zig_as_u128((zig_u64)(val).hi, (val).lo) -#define zig_bitcast_i128(val) zig_as_i128((zig_i64)(val).hi, (val).lo) +#define zig_bitcast_u128(val) zig_make_u128((uint64_t)(val).hi, (val).lo) +#define zig_bitcast_i128(val) zig_make_i128(( int64_t)(val).hi, (val).lo) #define zig_cmp_int128(Type) \ - static inline zig_i32 zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \ + static inline int32_t zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \ return (lhs.hi == rhs.hi) \ ? (lhs.lo > rhs.lo) - (lhs.lo < rhs.lo) \ : (lhs.hi > rhs.hi) - (lhs.hi < rhs.hi); \ @@ -1280,10 +1383,10 @@ typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128; #endif /* zig_has_int128 */ -#define zig_minInt_u128 zig_as_u128(zig_minInt_u64, zig_minInt_u64) -#define zig_maxInt_u128 zig_as_u128(zig_maxInt_u64, zig_maxInt_u64) -#define zig_minInt_i128 zig_as_i128(zig_minInt_i64, zig_minInt_u64) -#define zig_maxInt_i128 zig_as_i128(zig_maxInt_i64, zig_maxInt_u64) +#define zig_minInt_u128 zig_make_u128(zig_minInt_u64, zig_minInt_u64) +#define zig_maxInt_u128 zig_make_u128(zig_maxInt_u64, zig_maxInt_u64) +#define zig_minInt_i128 zig_make_i128(zig_minInt_i64, zig_minInt_u64) +#define zig_maxInt_i128 zig_make_i128(zig_maxInt_i64, zig_maxInt_u64) zig_cmp_int128(u128) zig_cmp_int128(i128) @@ -1297,28 +1400,33 @@ zig_bit_int128(i128, or, |) zig_bit_int128(u128, xor, ^) zig_bit_int128(i128, xor, ^) -static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs); +static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs); #if zig_has_int128 -static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) { - return val ^ zig_maxInt(u128, bits); +static inline zig_u128 zig_not_u128(zig_u128 val, uint8_t bits) { + return val ^ zig_maxInt_u(128, bits); } -static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) { +static inline zig_i128 zig_not_i128(zig_i128 val, uint8_t bits) { (void)bits; return ~val; } -static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) { +static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs) { return lhs >> rhs; } -static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) { +static inline zig_u128 zig_shl_u128(zig_u128 lhs, uint8_t rhs) { return lhs << rhs; } -static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) { +static inline zig_i128 zig_shr_i128(zig_i128 lhs, uint8_t rhs) { + zig_i128 sign_mask = lhs < zig_make_i128(0, 0) ? -zig_make_i128(0, 1) : zig_make_i128(0, 0); + return ((lhs ^ sign_mask) >> rhs) ^ sign_mask; +} + +static inline zig_i128 zig_shl_i128(zig_i128 lhs, uint8_t rhs) { return lhs << rhs; } @@ -1363,40 +1471,46 @@ static inline zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) { } static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) { - return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_as_i128(0, 0)); + return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_make_i128(0, 0)); } static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) { zig_i128 rem = zig_rem_i128(lhs, rhs); - return rem + (((lhs ^ rhs) & rem) < zig_as_i128(0, 0) ? rhs : zig_as_i128(0, 0)); + return rem + (((lhs ^ rhs) & rem) < zig_make_i128(0, 0) ? rhs : zig_make_i128(0, 0)); } #else /* zig_has_int128 */ -static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) { - return (zig_u128){ .hi = zig_not_u64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) }; +static inline zig_u128 zig_not_u128(zig_u128 val, uint8_t bits) { + return (zig_u128){ .hi = zig_not_u64(val.hi, bits - UINT8_C(64)), .lo = zig_not_u64(val.lo, UINT8_C(64)) }; } -static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) { - return (zig_i128){ .hi = zig_not_i64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) }; +static inline zig_i128 zig_not_i128(zig_i128 val, uint8_t bits) { + return (zig_i128){ .hi = zig_not_i64(val.hi, bits - UINT8_C(64)), .lo = zig_not_u64(val.lo, UINT8_C(64)) }; } -static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) { - if (rhs == zig_as_u8(0)) return lhs; - if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - zig_as_u8(64)) }; - return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (zig_as_u8(64) - rhs) | lhs.lo >> rhs }; +static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs) { + if (rhs == UINT8_C(0)) return lhs; + if (rhs >= UINT8_C(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - UINT8_C(64)) }; + return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (UINT8_C(64) - rhs) | lhs.lo >> rhs }; } -static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) { - if (rhs == zig_as_u8(0)) return lhs; - if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.lo << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 }; - return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs }; +static inline zig_u128 zig_shl_u128(zig_u128 lhs, uint8_t rhs) { + if (rhs == UINT8_C(0)) return lhs; + if (rhs >= UINT8_C(64)) return (zig_u128){ .hi = lhs.lo << (rhs - UINT8_C(64)), .lo = zig_minInt_u64 }; + return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (UINT8_C(64) - rhs), .lo = lhs.lo << rhs }; } -static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) { - if (rhs == zig_as_u8(0)) return lhs; - if (rhs >= zig_as_u8(64)) return (zig_i128){ .hi = lhs.lo << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 }; - return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs }; +static inline zig_i128 zig_shr_i128(zig_i128 lhs, uint8_t rhs) { + if (rhs == UINT8_C(0)) return lhs; + if (rhs >= UINT8_C(64)) return (zig_i128){ .hi = zig_shr_i64(lhs.hi, 63), .lo = zig_shr_i64(lhs.hi, (rhs - UINT8_C(64))) }; + return (zig_i128){ .hi = zig_shr_i64(lhs.hi, rhs), .lo = lhs.lo >> rhs | (uint64_t)lhs.hi << (UINT8_C(64) - rhs) }; +} + +static inline zig_i128 zig_shl_i128(zig_i128 lhs, uint8_t rhs) { + if (rhs == UINT8_C(0)) return lhs; + if (rhs >= UINT8_C(64)) return (zig_i128){ .hi = lhs.lo << (rhs - UINT8_C(64)), .lo = zig_minInt_u64 }; + return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (UINT8_C(64) - rhs), .lo = lhs.lo << rhs }; } static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) { @@ -1424,14 +1538,14 @@ static inline zig_i128 zig_sub_i128(zig_i128 lhs, zig_i128 rhs) { } zig_extern zig_i128 __multi3(zig_i128 lhs, zig_i128 rhs); -static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) { - return zig_bitcast_u128(__multi3(zig_bitcast_i128(lhs), zig_bitcast_i128(rhs))); -} - static zig_i128 zig_mul_i128(zig_i128 lhs, zig_i128 rhs) { return __multi3(lhs, rhs); } +static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) { + return zig_bitcast_u128(zig_mul_i128(zig_bitcast_i128(lhs), zig_bitcast_i128(rhs))); +} + zig_extern zig_u128 __udivti3(zig_u128 lhs, zig_u128 rhs); static zig_u128 zig_div_trunc_u128(zig_u128 lhs, zig_u128 rhs) { return __udivti3(lhs, rhs); @@ -1454,11 +1568,11 @@ static zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) { static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) { zig_i128 rem = zig_rem_i128(lhs, rhs); - return zig_add_i128(rem, (((lhs.hi ^ rhs.hi) & rem.hi) < zig_as_i64(0) ? rhs : zig_as_i128(0, 0))); + return zig_add_i128(rem, ((lhs.hi ^ rhs.hi) & rem.hi) < INT64_C(0) ? rhs : zig_make_i128(0, 0)); } static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) { - return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_as_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_as_i128(0, 0)) < zig_as_i32(0))); + return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_make_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_make_i128(0, 0)) < INT32_C(0))); } #endif /* zig_has_int128 */ @@ -1471,323 +1585,294 @@ static inline zig_u128 zig_nand_u128(zig_u128 lhs, zig_u128 rhs) { } static inline zig_u128 zig_min_u128(zig_u128 lhs, zig_u128 rhs) { - return zig_cmp_u128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs; + return zig_cmp_u128(lhs, rhs) < INT32_C(0) ? lhs : rhs; } static inline zig_i128 zig_min_i128(zig_i128 lhs, zig_i128 rhs) { - return zig_cmp_i128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs; + return zig_cmp_i128(lhs, rhs) < INT32_C(0) ? lhs : rhs; } static inline zig_u128 zig_max_u128(zig_u128 lhs, zig_u128 rhs) { - return zig_cmp_u128(lhs, rhs) > zig_as_i32(0) ? lhs : rhs; + return zig_cmp_u128(lhs, rhs) > INT32_C(0) ? lhs : rhs; } static inline zig_i128 zig_max_i128(zig_i128 lhs, zig_i128 rhs) { - return zig_cmp_i128(lhs, rhs) > zig_as_i32(0) ? lhs : rhs; + return zig_cmp_i128(lhs, rhs) > INT32_C(0) ? lhs : rhs; } -static inline zig_i128 zig_shr_i128(zig_i128 lhs, zig_u8 rhs) { - zig_i128 sign_mask = zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_sub_i128(zig_as_i128(0, 0), zig_as_i128(0, 1)) : zig_as_i128(0, 0); - return zig_xor_i128(zig_bitcast_i128(zig_shr_u128(zig_bitcast_u128(zig_xor_i128(lhs, sign_mask)), rhs)), sign_mask); +static inline zig_u128 zig_wrap_u128(zig_u128 val, uint8_t bits) { + return zig_and_u128(val, zig_maxInt_u(128, bits)); } -static inline zig_u128 zig_wrap_u128(zig_u128 val, zig_u8 bits) { - return zig_and_u128(val, zig_maxInt(u128, bits)); +static inline zig_i128 zig_wrap_i128(zig_i128 val, uint8_t bits) { + return zig_make_i128(zig_wrap_i64(zig_hi_i128(val), bits - UINT8_C(64)), zig_lo_i128(val)); } -static inline zig_i128 zig_wrap_i128(zig_i128 val, zig_u8 bits) { - return zig_as_i128(zig_wrap_i64(zig_hi_i128(val), bits - zig_as_u8(64)), zig_lo_i128(val)); -} - -static inline zig_u128 zig_shlw_u128(zig_u128 lhs, zig_u8 rhs, zig_u8 bits) { +static inline zig_u128 zig_shlw_u128(zig_u128 lhs, uint8_t rhs, uint8_t bits) { return zig_wrap_u128(zig_shl_u128(lhs, rhs), bits); } -static inline zig_i128 zig_shlw_i128(zig_i128 lhs, zig_u8 rhs, zig_u8 bits) { +static inline zig_i128 zig_shlw_i128(zig_i128 lhs, uint8_t rhs, uint8_t bits) { return zig_wrap_i128(zig_bitcast_i128(zig_shl_u128(zig_bitcast_u128(lhs), rhs)), bits); } -static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { return zig_wrap_u128(zig_add_u128(lhs, rhs), bits); } -static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { return zig_wrap_i128(zig_bitcast_i128(zig_add_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); } -static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { return zig_wrap_u128(zig_sub_u128(lhs, rhs), bits); } -static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { return zig_wrap_i128(zig_bitcast_i128(zig_sub_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); } -static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { return zig_wrap_u128(zig_mul_u128(lhs, rhs), bits); } -static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { return zig_wrap_i128(zig_bitcast_i128(zig_mul_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); } #if zig_has_int128 -static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) zig_u128 full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_u128(full_res, bits); - return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits); + return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits); #else *res = zig_addw_u128(lhs, rhs, bits); return *res < lhs; #endif } -zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); -static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, int *overflow); +static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { #if zig_has_builtin(add_overflow) zig_i128 full_res; bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; + int overflow_int; zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i128(full_res, bits); - return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits); + return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits); } -static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) zig_u128 full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_u128(full_res, bits); - return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits); + return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits); #else *res = zig_subw_u128(lhs, rhs, bits); return *res > lhs; #endif } -zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); -static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, int *overflow); +static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) zig_i128 full_res; bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; + int overflow_int; zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i128(full_res, bits); - return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits); + return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits); } -static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) zig_u128 full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_u128(full_res, bits); - return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits); + return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits); #else *res = zig_mulw_u128(lhs, rhs, bits); - return rhs != zig_as_u128(0, 0) && lhs > zig_maxInt(u128, bits) / rhs; + return rhs != zig_make_u128(0, 0) && lhs > zig_maxInt_u(128, bits) / rhs; #endif } -zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); -static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, int *overflow); +static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) zig_i128 full_res; bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); #else - zig_c_int overflow_int; + int overflow_int; zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int); bool overflow = overflow_int != 0; #endif *res = zig_wrap_i128(full_res, bits); - return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits); + return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits); } #else /* zig_has_int128 */ -static inline bool zig_overflow_u128(bool overflow, zig_u128 full_res, zig_u8 bits) { - return overflow || - zig_cmp_u128(full_res, zig_minInt(u128, bits)) < zig_as_i32(0) || - zig_cmp_u128(full_res, zig_maxInt(u128, bits)) > zig_as_i32(0); +static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { + uint64_t hi; + bool overflow = zig_addo_u64(&hi, lhs.hi, rhs.hi, bits - 64); + return overflow ^ zig_addo_u64(&res->hi, hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64); } -static inline bool zig_overflow_i128(bool overflow, zig_i128 full_res, zig_u8 bits) { - return overflow || - zig_cmp_i128(full_res, zig_minInt(i128, bits)) < zig_as_i32(0) || - zig_cmp_i128(full_res, zig_maxInt(i128, bits)) > zig_as_i32(0); +static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { + int64_t hi; + bool overflow = zig_addo_i64(&hi, lhs.hi, rhs.hi, bits - 64); + return overflow ^ zig_addo_i64(&res->hi, hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64); } -static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { - zig_u128 full_res; - bool overflow = - zig_addo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) | - zig_addo_u64(&full_res.hi, full_res.hi, zig_addo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64); - *res = zig_wrap_u128(full_res, bits); - return zig_overflow_u128(overflow, full_res, bits); +static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { + uint64_t hi; + bool overflow = zig_subo_u64(&hi, lhs.hi, rhs.hi, bits - 64); + return overflow ^ zig_subo_u64(&res->hi, hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64); } -zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); -static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { - zig_c_int overflow_int; - zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int); - *res = zig_wrap_i128(full_res, bits); - return zig_overflow_i128(overflow_int, full_res, bits); +static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { + int64_t hi; + bool overflow = zig_subo_i64(&hi, lhs.hi, rhs.hi, bits - 64); + return overflow ^ zig_subo_i64(&res->hi, hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64); } -static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { - zig_u128 full_res; - bool overflow = - zig_subo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) | - zig_subo_u64(&full_res.hi, full_res.hi, zig_subo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64); - *res = zig_wrap_u128(full_res, bits); - return zig_overflow_u128(overflow, full_res, bits); -} - -zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); -static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { - zig_c_int overflow_int; - zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int); - *res = zig_wrap_i128(full_res, bits); - return zig_overflow_i128(overflow_int, full_res, bits); -} - -static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { *res = zig_mulw_u128(lhs, rhs, bits); - return zig_cmp_u128(*res, zig_as_u128(0, 0)) != zig_as_i32(0) && - zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0); + return zig_cmp_u128(*res, zig_make_u128(0, 0)) != INT32_C(0) && + zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt_u(128, bits), rhs)) > INT32_C(0); } -zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); -static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { - zig_c_int overflow_int; +zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, int *overflow); +static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { + int overflow_int; zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0 || + zig_cmp_i128(full_res, zig_minInt_i(128, bits)) < INT32_C(0) || + zig_cmp_i128(full_res, zig_maxInt_i(128, bits)) > INT32_C(0); *res = zig_wrap_i128(full_res, bits); - return zig_overflow_i128(overflow_int, full_res, bits); + return overflow; } #endif /* zig_has_int128 */ -static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) { +static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, uint8_t rhs, uint8_t bits) { *res = zig_shlw_u128(lhs, rhs, bits); - return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0); + return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt_u(128, bits), rhs)) > INT32_C(0); } -static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) { +static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, uint8_t rhs, uint8_t bits) { *res = zig_shlw_i128(lhs, rhs, bits); - zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1))); - return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != zig_as_i32(0) && - zig_cmp_i128(zig_and_i128(lhs, mask), mask) != zig_as_i32(0); + zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - UINT8_C(1))); + return zig_cmp_i128(zig_and_i128(lhs, mask), zig_make_i128(0, 0)) != INT32_C(0) && + zig_cmp_i128(zig_and_i128(lhs, mask), mask) != INT32_C(0); } -static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { zig_u128 res; - if (zig_cmp_u128(rhs, zig_as_u128(0, bits)) >= zig_as_i32(0)) - return zig_cmp_u128(lhs, zig_as_u128(0, 0)) != zig_as_i32(0) ? zig_maxInt(u128, bits) : lhs; - -#if zig_has_int128 - return zig_shlo_u128(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u128, bits) : res; -#else - return zig_shlo_u128(&res, lhs, (zig_u8)rhs.lo, bits) ? zig_maxInt(u128, bits) : res; -#endif + if (zig_cmp_u128(rhs, zig_make_u128(0, bits)) >= INT32_C(0)) + return zig_cmp_u128(lhs, zig_make_u128(0, 0)) != INT32_C(0) ? zig_maxInt_u(128, bits) : lhs; + return zig_shlo_u128(&res, lhs, (uint8_t)zig_lo_u128(rhs), bits) ? zig_maxInt_u(128, bits) : res; } -static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { zig_i128 res; - if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_as_u128(0, bits)) < zig_as_i32(0) && !zig_shlo_i128(&res, lhs, zig_lo_i128(rhs), bits)) return res; - return zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); + if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_i128(&res, lhs, (uint8_t)zig_lo_i128(rhs), bits)) return res; + return zig_cmp_i128(lhs, zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits); } -static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { zig_u128 res; - return zig_addo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res; + return zig_addo_u128(&res, lhs, rhs, bits) ? zig_maxInt_u(128, bits) : res; } -static inline zig_i128 zig_adds_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline zig_i128 zig_adds_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { zig_i128 res; if (!zig_addo_i128(&res, lhs, rhs, bits)) return res; - return zig_cmp_i128(res, zig_as_i128(0, 0)) >= zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); + return zig_cmp_i128(res, zig_make_i128(0, 0)) >= INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits); } -static inline zig_u128 zig_subs_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline zig_u128 zig_subs_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { zig_u128 res; - return zig_subo_u128(&res, lhs, rhs, bits) ? zig_minInt(u128, bits) : res; + return zig_subo_u128(&res, lhs, rhs, bits) ? zig_minInt_u(128, bits) : res; } -static inline zig_i128 zig_subs_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline zig_i128 zig_subs_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { zig_i128 res; if (!zig_subo_i128(&res, lhs, rhs, bits)) return res; - return zig_cmp_i128(res, zig_as_i128(0, 0)) >= zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); + return zig_cmp_i128(res, zig_make_i128(0, 0)) >= INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits); } -static inline zig_u128 zig_muls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline zig_u128 zig_muls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { zig_u128 res; - return zig_mulo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res; + return zig_mulo_u128(&res, lhs, rhs, bits) ? zig_maxInt_u(128, bits) : res; } -static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { zig_i128 res; if (!zig_mulo_i128(&res, lhs, rhs, bits)) return res; - return zig_cmp_i128(zig_xor_i128(lhs, rhs), zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); + return zig_cmp_i128(zig_xor_i128(lhs, rhs), zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits); } -static inline zig_u8 zig_clz_u128(zig_u128 val, zig_u8 bits) { - if (bits <= zig_as_u8(64)) return zig_clz_u64(zig_lo_u128(val), bits); - if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - zig_as_u8(64)); - return zig_clz_u64(zig_lo_u128(val), zig_as_u8(64)) + (bits - zig_as_u8(64)); +static inline uint8_t zig_clz_u128(zig_u128 val, uint8_t bits) { + if (bits <= UINT8_C(64)) return zig_clz_u64(zig_lo_u128(val), bits); + if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - UINT8_C(64)); + return zig_clz_u64(zig_lo_u128(val), UINT8_C(64)) + (bits - UINT8_C(64)); } -static inline zig_u8 zig_clz_i128(zig_i128 val, zig_u8 bits) { +static inline uint8_t zig_clz_i128(zig_i128 val, uint8_t bits) { return zig_clz_u128(zig_bitcast_u128(val), bits); } -static inline zig_u8 zig_ctz_u128(zig_u128 val, zig_u8 bits) { - if (zig_lo_u128(val) != 0) return zig_ctz_u64(zig_lo_u128(val), zig_as_u8(64)); - return zig_ctz_u64(zig_hi_u128(val), bits - zig_as_u8(64)) + zig_as_u8(64); +static inline uint8_t zig_ctz_u128(zig_u128 val, uint8_t bits) { + if (zig_lo_u128(val) != 0) return zig_ctz_u64(zig_lo_u128(val), UINT8_C(64)); + return zig_ctz_u64(zig_hi_u128(val), bits - UINT8_C(64)) + UINT8_C(64); } -static inline zig_u8 zig_ctz_i128(zig_i128 val, zig_u8 bits) { +static inline uint8_t zig_ctz_i128(zig_i128 val, uint8_t bits) { return zig_ctz_u128(zig_bitcast_u128(val), bits); } -static inline zig_u8 zig_popcount_u128(zig_u128 val, zig_u8 bits) { - return zig_popcount_u64(zig_hi_u128(val), bits - zig_as_u8(64)) + - zig_popcount_u64(zig_lo_u128(val), zig_as_u8(64)); +static inline uint8_t zig_popcount_u128(zig_u128 val, uint8_t bits) { + return zig_popcount_u64(zig_hi_u128(val), bits - UINT8_C(64)) + + zig_popcount_u64(zig_lo_u128(val), UINT8_C(64)); } -static inline zig_u8 zig_popcount_i128(zig_i128 val, zig_u8 bits) { +static inline uint8_t zig_popcount_i128(zig_i128 val, uint8_t bits) { return zig_popcount_u128(zig_bitcast_u128(val), bits); } -static inline zig_u128 zig_byte_swap_u128(zig_u128 val, zig_u8 bits) { +static inline zig_u128 zig_byte_swap_u128(zig_u128 val, uint8_t bits) { zig_u128 full_res; #if zig_has_builtin(bswap128) full_res = __builtin_bswap128(val); #else - full_res = zig_as_u128(zig_byte_swap_u64(zig_lo_u128(val), zig_as_u8(64)), - zig_byte_swap_u64(zig_hi_u128(val), zig_as_u8(64))); + full_res = zig_make_u128(zig_byte_swap_u64(zig_lo_u128(val), UINT8_C(64)), + zig_byte_swap_u64(zig_hi_u128(val), UINT8_C(64))); #endif - return zig_shr_u128(full_res, zig_as_u8(128) - bits); + return zig_shr_u128(full_res, UINT8_C(128) - bits); } -static inline zig_i128 zig_byte_swap_i128(zig_i128 val, zig_u8 bits) { +static inline zig_i128 zig_byte_swap_i128(zig_i128 val, uint8_t bits) { return zig_bitcast_i128(zig_byte_swap_u128(zig_bitcast_u128(val), bits)); } -static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, zig_u8 bits) { - return zig_shr_u128(zig_as_u128(zig_bit_reverse_u64(zig_lo_u128(val), zig_as_u8(64)), - zig_bit_reverse_u64(zig_hi_u128(val), zig_as_u8(64))), - zig_as_u8(128) - bits); +static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, uint8_t bits) { + return zig_shr_u128(zig_make_u128(zig_bit_reverse_u64(zig_lo_u128(val), UINT8_C(64)), + zig_bit_reverse_u64(zig_hi_u128(val), UINT8_C(64))), + UINT8_C(128) - bits); } -static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) { +static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) { return zig_bitcast_i128(zig_bit_reverse_u128(zig_bitcast_u128(val), bits)); } @@ -1810,85 +1895,87 @@ static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) { #if (zig_has_builtin(nan) && zig_has_builtin(nans) && zig_has_builtin(inf)) || defined(zig_gnuc) #define zig_has_float_builtins 1 -#define zig_as_special_f16(sign, name, arg, repr) sign zig_as_f16(__builtin_##name, )(arg) -#define zig_as_special_f32(sign, name, arg, repr) sign zig_as_f32(__builtin_##name, )(arg) -#define zig_as_special_f64(sign, name, arg, repr) sign zig_as_f64(__builtin_##name, )(arg) -#define zig_as_special_f80(sign, name, arg, repr) sign zig_as_f80(__builtin_##name, )(arg) -#define zig_as_special_f128(sign, name, arg, repr) sign zig_as_f128(__builtin_##name, )(arg) -#define zig_as_special_c_longdouble(sign, name, arg, repr) sign zig_as_c_longdouble(__builtin_##name, )(arg) +#define zig_make_special_f16(sign, name, arg, repr) sign zig_make_f16(__builtin_##name, )(arg) +#define zig_make_special_f32(sign, name, arg, repr) sign zig_make_f32(__builtin_##name, )(arg) +#define zig_make_special_f64(sign, name, arg, repr) sign zig_make_f64(__builtin_##name, )(arg) +#define zig_make_special_f80(sign, name, arg, repr) sign zig_make_f80(__builtin_##name, )(arg) +#define zig_make_special_f128(sign, name, arg, repr) sign zig_make_f128(__builtin_##name, )(arg) +#define zig_make_special_c_longdouble(sign, name, arg, repr) sign zig_make_c_longdouble(__builtin_##name, )(arg) #else #define zig_has_float_builtins 0 -#define zig_as_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr) -#define zig_as_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr) -#define zig_as_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr) -#define zig_as_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr) -#define zig_as_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr) -#define zig_as_special_c_longdouble(sign, name, arg, repr) zig_float_from_repr_c_longdouble(repr) +#define zig_make_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr) +#define zig_make_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr) +#define zig_make_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr) +#define zig_make_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr) +#define zig_make_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr) +#define zig_make_special_c_longdouble(sign, name, arg, repr) zig_float_from_repr_c_longdouble(repr) #endif #define zig_has_f16 1 #define zig_bitSizeOf_f16 16 #define zig_libc_name_f16(name) __##name##h -#define zig_as_special_constant_f16(sign, name, arg, repr) zig_as_special_f16(sign, name, arg, repr) +#define zig_make_special_constant_f16(sign, name, arg, repr) zig_make_special_f16(sign, name, arg, repr) #if FLT_MANT_DIG == 11 typedef float zig_f16; -#define zig_as_f16(fp, repr) fp##f +#define zig_make_f16(fp, repr) fp##f #elif DBL_MANT_DIG == 11 typedef double zig_f16; -#define zig_as_f16(fp, repr) fp +#define zig_make_f16(fp, repr) fp #elif LDBL_MANT_DIG == 11 #define zig_bitSizeOf_c_longdouble 16 +typedef uint16_t zig_repr_c_longdouble; typedef long double zig_f16; -#define zig_as_f16(fp, repr) fp##l +#define zig_make_f16(fp, repr) fp##l #elif FLT16_MANT_DIG == 11 && (zig_has_builtin(inff16) || defined(zig_gnuc)) typedef _Float16 zig_f16; -#define zig_as_f16(fp, repr) fp##f16 +#define zig_make_f16(fp, repr) fp##f16 #elif defined(__SIZEOF_FP16__) typedef __fp16 zig_f16; -#define zig_as_f16(fp, repr) fp##f16 +#define zig_make_f16(fp, repr) fp##f16 #else #undef zig_has_f16 #define zig_has_f16 0 -#define zig_repr_f16 i16 -typedef zig_i16 zig_f16; -#define zig_as_f16(fp, repr) repr -#undef zig_as_special_f16 -#define zig_as_special_f16(sign, name, arg, repr) repr -#undef zig_as_special_constant_f16 -#define zig_as_special_constant_f16(sign, name, arg, repr) repr +#define zig_bitSizeOf_repr_f16 16 +typedef int16_t zig_f16; +#define zig_make_f16(fp, repr) repr +#undef zig_make_special_f16 +#define zig_make_special_f16(sign, name, arg, repr) repr +#undef zig_make_special_constant_f16 +#define zig_make_special_constant_f16(sign, name, arg, repr) repr #endif #define zig_has_f32 1 #define zig_bitSizeOf_f32 32 #define zig_libc_name_f32(name) name##f #if _MSC_VER -#define zig_as_special_constant_f32(sign, name, arg, repr) sign zig_as_f32(zig_msvc_flt_##name, ) +#define zig_make_special_constant_f32(sign, name, arg, repr) sign zig_make_f32(zig_msvc_flt_##name, ) #else -#define zig_as_special_constant_f32(sign, name, arg, repr) zig_as_special_f32(sign, name, arg, repr) +#define zig_make_special_constant_f32(sign, name, arg, repr) zig_make_special_f32(sign, name, arg, repr) #endif #if FLT_MANT_DIG == 24 typedef float zig_f32; -#define zig_as_f32(fp, repr) fp##f +#define zig_make_f32(fp, repr) fp##f #elif DBL_MANT_DIG == 24 typedef double zig_f32; -#define zig_as_f32(fp, repr) fp +#define zig_make_f32(fp, repr) fp #elif LDBL_MANT_DIG == 24 #define zig_bitSizeOf_c_longdouble 32 +typedef uint32_t zig_repr_c_longdouble; typedef long double zig_f32; -#define zig_as_f32(fp, repr) fp##l +#define zig_make_f32(fp, repr) fp##l #elif FLT32_MANT_DIG == 24 typedef _Float32 zig_f32; -#define zig_as_f32(fp, repr) fp##f32 +#define zig_make_f32(fp, repr) fp##f32 #else #undef zig_has_f32 #define zig_has_f32 0 -#define zig_repr_f32 i32 -typedef zig_i32 zig_f32; -#define zig_as_f32(fp, repr) repr -#undef zig_as_special_f32 -#define zig_as_special_f32(sign, name, arg, repr) repr -#undef zig_as_special_constant_f32 -#define zig_as_special_constant_f32(sign, name, arg, repr) repr +#define zig_bitSizeOf_repr_f32 32 +typedef int32_t zig_f32; +#define zig_make_f32(fp, repr) repr +#undef zig_make_special_f32 +#define zig_make_special_f32(sign, name, arg, repr) repr +#undef zig_make_special_constant_f32 +#define zig_make_special_constant_f32(sign, name, arg, repr) repr #endif #define zig_has_f64 1 @@ -1897,109 +1984,113 @@ typedef zig_i32 zig_f32; #if _MSC_VER #ifdef ZIG_TARGET_ABI_MSVC #define zig_bitSizeOf_c_longdouble 64 +typedef uint64_t zig_repr_c_longdouble; #endif -#define zig_as_special_constant_f64(sign, name, arg, repr) sign zig_as_f64(zig_msvc_flt_##name, ) +#define zig_make_special_constant_f64(sign, name, arg, repr) sign zig_make_f64(zig_msvc_flt_##name, ) #else /* _MSC_VER */ -#define zig_as_special_constant_f64(sign, name, arg, repr) zig_as_special_f64(sign, name, arg, repr) +#define zig_make_special_constant_f64(sign, name, arg, repr) zig_make_special_f64(sign, name, arg, repr) #endif /* _MSC_VER */ #if FLT_MANT_DIG == 53 typedef float zig_f64; -#define zig_as_f64(fp, repr) fp##f +#define zig_make_f64(fp, repr) fp##f #elif DBL_MANT_DIG == 53 typedef double zig_f64; -#define zig_as_f64(fp, repr) fp +#define zig_make_f64(fp, repr) fp #elif LDBL_MANT_DIG == 53 #define zig_bitSizeOf_c_longdouble 64 +typedef uint64_t zig_repr_c_longdouble; typedef long double zig_f64; -#define zig_as_f64(fp, repr) fp##l +#define zig_make_f64(fp, repr) fp##l #elif FLT64_MANT_DIG == 53 typedef _Float64 zig_f64; -#define zig_as_f64(fp, repr) fp##f64 +#define zig_make_f64(fp, repr) fp##f64 #elif FLT32X_MANT_DIG == 53 typedef _Float32x zig_f64; -#define zig_as_f64(fp, repr) fp##f32x +#define zig_make_f64(fp, repr) fp##f32x #else #undef zig_has_f64 #define zig_has_f64 0 -#define zig_repr_f64 i64 -typedef zig_i64 zig_f64; -#define zig_as_f64(fp, repr) repr -#undef zig_as_special_f64 -#define zig_as_special_f64(sign, name, arg, repr) repr -#undef zig_as_special_constant_f64 -#define zig_as_special_constant_f64(sign, name, arg, repr) repr +#define zig_bitSizeOf_repr_f64 64 +typedef int64_t zig_f64; +#define zig_make_f64(fp, repr) repr +#undef zig_make_special_f64 +#define zig_make_special_f64(sign, name, arg, repr) repr +#undef zig_make_special_constant_f64 +#define zig_make_special_constant_f64(sign, name, arg, repr) repr #endif #define zig_has_f80 1 #define zig_bitSizeOf_f80 80 #define zig_libc_name_f80(name) __##name##x -#define zig_as_special_constant_f80(sign, name, arg, repr) zig_as_special_f80(sign, name, arg, repr) +#define zig_make_special_constant_f80(sign, name, arg, repr) zig_make_special_f80(sign, name, arg, repr) #if FLT_MANT_DIG == 64 typedef float zig_f80; -#define zig_as_f80(fp, repr) fp##f +#define zig_make_f80(fp, repr) fp##f #elif DBL_MANT_DIG == 64 typedef double zig_f80; -#define zig_as_f80(fp, repr) fp +#define zig_make_f80(fp, repr) fp #elif LDBL_MANT_DIG == 64 #define zig_bitSizeOf_c_longdouble 80 +typedef zig_u128 zig_repr_c_longdouble; typedef long double zig_f80; -#define zig_as_f80(fp, repr) fp##l +#define zig_make_f80(fp, repr) fp##l #elif FLT80_MANT_DIG == 64 typedef _Float80 zig_f80; -#define zig_as_f80(fp, repr) fp##f80 +#define zig_make_f80(fp, repr) fp##f80 #elif FLT64X_MANT_DIG == 64 typedef _Float64x zig_f80; -#define zig_as_f80(fp, repr) fp##f64x +#define zig_make_f80(fp, repr) fp##f64x #elif defined(__SIZEOF_FLOAT80__) typedef __float80 zig_f80; -#define zig_as_f80(fp, repr) fp##l +#define zig_make_f80(fp, repr) fp##l #else #undef zig_has_f80 #define zig_has_f80 0 -#define zig_repr_f80 i128 +#define zig_bitSizeOf_repr_f80 128 typedef zig_i128 zig_f80; -#define zig_as_f80(fp, repr) repr -#undef zig_as_special_f80 -#define zig_as_special_f80(sign, name, arg, repr) repr -#undef zig_as_special_constant_f80 -#define zig_as_special_constant_f80(sign, name, arg, repr) repr +#define zig_make_f80(fp, repr) repr +#undef zig_make_special_f80 +#define zig_make_special_f80(sign, name, arg, repr) repr +#undef zig_make_special_constant_f80 +#define zig_make_special_constant_f80(sign, name, arg, repr) repr #endif #define zig_has_f128 1 #define zig_bitSizeOf_f128 128 #define zig_libc_name_f128(name) name##q -#define zig_as_special_constant_f128(sign, name, arg, repr) zig_as_special_f128(sign, name, arg, repr) +#define zig_make_special_constant_f128(sign, name, arg, repr) zig_make_special_f128(sign, name, arg, repr) #if FLT_MANT_DIG == 113 typedef float zig_f128; -#define zig_as_f128(fp, repr) fp##f +#define zig_make_f128(fp, repr) fp##f #elif DBL_MANT_DIG == 113 typedef double zig_f128; -#define zig_as_f128(fp, repr) fp +#define zig_make_f128(fp, repr) fp #elif LDBL_MANT_DIG == 113 #define zig_bitSizeOf_c_longdouble 128 +typedef zig_u128 zig_repr_c_longdouble; typedef long double zig_f128; -#define zig_as_f128(fp, repr) fp##l +#define zig_make_f128(fp, repr) fp##l #elif FLT128_MANT_DIG == 113 typedef _Float128 zig_f128; -#define zig_as_f128(fp, repr) fp##f128 +#define zig_make_f128(fp, repr) fp##f128 #elif FLT64X_MANT_DIG == 113 typedef _Float64x zig_f128; -#define zig_as_f128(fp, repr) fp##f64x +#define zig_make_f128(fp, repr) fp##f64x #elif defined(__SIZEOF_FLOAT128__) typedef __float128 zig_f128; -#define zig_as_f128(fp, repr) fp##q -#undef zig_as_special_f128 -#define zig_as_special_f128(sign, name, arg, repr) sign __builtin_##name##f128(arg) +#define zig_make_f128(fp, repr) fp##q +#undef zig_make_special_f128 +#define zig_make_special_f128(sign, name, arg, repr) sign __builtin_##name##f128(arg) #else #undef zig_has_f128 #define zig_has_f128 0 -#define zig_repr_f128 i128 +#define zig_bitSizeOf_repr_f128 128 typedef zig_i128 zig_f128; -#define zig_as_f128(fp, repr) repr -#undef zig_as_special_f128 -#define zig_as_special_f128(sign, name, arg, repr) repr -#undef zig_as_special_constant_f128 -#define zig_as_special_constant_f128(sign, name, arg, repr) repr +#define zig_make_f128(fp, repr) repr +#undef zig_make_special_f128 +#define zig_make_special_f128(sign, name, arg, repr) repr +#undef zig_make_special_constant_f128 +#define zig_make_special_constant_f128(sign, name, arg, repr) repr #endif #define zig_has_c_longdouble 1 @@ -2010,17 +2101,18 @@ typedef zig_i128 zig_f128; #define zig_libc_name_c_longdouble(name) name##l #endif -#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) zig_as_special_c_longdouble(sign, name, arg, repr) +#define zig_make_special_constant_c_longdouble(sign, name, arg, repr) zig_make_special_c_longdouble(sign, name, arg, repr) #ifdef zig_bitSizeOf_c_longdouble #ifdef ZIG_TARGET_ABI_MSVC -typedef double zig_c_longdouble; #undef zig_bitSizeOf_c_longdouble #define zig_bitSizeOf_c_longdouble 64 -#define zig_as_c_longdouble(fp, repr) fp +typedef uint64_t zig_repr_c_longdouble; +typedef zig_f64 zig_c_longdouble; +#define zig_make_c_longdouble(fp, repr) fp #else typedef long double zig_c_longdouble; -#define zig_as_c_longdouble(fp, repr) fp##l +#define zig_make_c_longdouble(fp, repr) fp##l #endif #else /* zig_bitSizeOf_c_longdouble */ @@ -2028,34 +2120,32 @@ typedef long double zig_c_longdouble; #undef zig_has_c_longdouble #define zig_has_c_longdouble 0 #define zig_bitSizeOf_c_longdouble 80 +typedef zig_u128 zig_repr_c_longdouble; #define zig_compiler_rt_abbrev_c_longdouble zig_compiler_rt_abbrev_f80 -#define zig_repr_c_longdouble i128 +#define zig_bitSizeOf_repr_c_longdouble 128 typedef zig_i128 zig_c_longdouble; -#define zig_as_c_longdouble(fp, repr) repr -#undef zig_as_special_c_longdouble -#define zig_as_special_c_longdouble(sign, name, arg, repr) repr -#undef zig_as_special_constant_c_longdouble -#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) repr +#define zig_make_c_longdouble(fp, repr) repr +#undef zig_make_special_c_longdouble +#define zig_make_special_c_longdouble(sign, name, arg, repr) repr +#undef zig_make_special_constant_c_longdouble +#define zig_make_special_constant_c_longdouble(sign, name, arg, repr) repr #endif /* zig_bitSizeOf_c_longdouble */ #if !zig_has_float_builtins #define zig_float_from_repr(Type, ReprType) \ - static inline zig_##Type zig_float_from_repr_##Type(zig_##ReprType repr) { \ - return *((zig_##Type*)&repr); \ + static inline zig_##Type zig_float_from_repr_##Type(ReprType repr) { \ + zig_##Type result; \ + memcpy(&result, &repr, sizeof(result)); \ + return result; \ } -zig_float_from_repr(f16, u16) -zig_float_from_repr(f32, u32) -zig_float_from_repr(f64, u64) -zig_float_from_repr(f80, u128) -zig_float_from_repr(f128, u128) -#if zig_bitSizeOf_c_longdouble == 80 -zig_float_from_repr(c_longdouble, u128) -#else -#define zig_expand_float_from_repr(Type, ReprType) zig_float_from_repr(Type, ReprType) -zig_expand_float_from_repr(c_longdouble, zig_expand_concat(u, zig_bitSizeOf_c_longdouble)) -#endif +zig_float_from_repr(f16, uint16_t) +zig_float_from_repr(f32, uint32_t) +zig_float_from_repr(f64, uint64_t) +zig_float_from_repr(f80, zig_u128) +zig_float_from_repr(f128, zig_u128) +zig_float_from_repr(c_longdouble, zig_repr_c_longdouble) #endif #define zig_cast_f16 (zig_f16) @@ -2073,32 +2163,35 @@ zig_expand_float_from_repr(c_longdouble, zig_expand_concat(u, zig_bitSizeOf_c_lo #endif #define zig_convert_builtin(ResType, operation, ArgType, version) \ - zig_extern zig_##ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \ - zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(zig_##ArgType); -zig_convert_builtin(f16, trunc, f32, 2) -zig_convert_builtin(f16, trunc, f64, 2) -zig_convert_builtin(f16, trunc, f80, 2) -zig_convert_builtin(f16, trunc, f128, 2) -zig_convert_builtin(f32, extend, f16, 2) -zig_convert_builtin(f32, trunc, f64, 2) -zig_convert_builtin(f32, trunc, f80, 2) -zig_convert_builtin(f32, trunc, f128, 2) -zig_convert_builtin(f64, extend, f16, 2) -zig_convert_builtin(f64, extend, f32, 2) -zig_convert_builtin(f64, trunc, f80, 2) -zig_convert_builtin(f64, trunc, f128, 2) -zig_convert_builtin(f80, extend, f16, 2) -zig_convert_builtin(f80, extend, f32, 2) -zig_convert_builtin(f80, extend, f64, 2) -zig_convert_builtin(f80, trunc, f128, 2) -zig_convert_builtin(f128, extend, f16, 2) -zig_convert_builtin(f128, extend, f32, 2) -zig_convert_builtin(f128, extend, f64, 2) -zig_convert_builtin(f128, extend, f80, 2) + zig_extern ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \ + zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(ArgType); +zig_convert_builtin(zig_f16, trunc, zig_f32, 2) +zig_convert_builtin(zig_f16, trunc, zig_f64, 2) +zig_convert_builtin(zig_f16, trunc, zig_f80, 2) +zig_convert_builtin(zig_f16, trunc, zig_f128, 2) +zig_convert_builtin(zig_f32, extend, zig_f16, 2) +zig_convert_builtin(zig_f32, trunc, zig_f64, 2) +zig_convert_builtin(zig_f32, trunc, zig_f80, 2) +zig_convert_builtin(zig_f32, trunc, zig_f128, 2) +zig_convert_builtin(zig_f64, extend, zig_f16, 2) +zig_convert_builtin(zig_f64, extend, zig_f32, 2) +zig_convert_builtin(zig_f64, trunc, zig_f80, 2) +zig_convert_builtin(zig_f64, trunc, zig_f128, 2) +zig_convert_builtin(zig_f80, extend, zig_f16, 2) +zig_convert_builtin(zig_f80, extend, zig_f32, 2) +zig_convert_builtin(zig_f80, extend, zig_f64, 2) +zig_convert_builtin(zig_f80, trunc, zig_f128, 2) +zig_convert_builtin(zig_f128, extend, zig_f16, 2) +zig_convert_builtin(zig_f128, extend, zig_f32, 2) +zig_convert_builtin(zig_f128, extend, zig_f64, 2) +zig_convert_builtin(zig_f128, extend, zig_f80, 2) #define zig_float_negate_builtin_0(Type) \ static inline zig_##Type zig_neg_##Type(zig_##Type arg) { \ - return zig_expand_concat(zig_xor_, zig_repr_##Type)(arg, zig_expand_minInt(zig_repr_##Type, zig_bitSizeOf_##Type)); \ + return zig_expand_concat(zig_xor_i, zig_bitSizeOf_repr_##Type)( \ + arg, \ + zig_minInt_i(zig_bitSizeOf_repr_##Type, zig_bitSizeOf_##Type) \ + ); \ } #define zig_float_negate_builtin_1(Type) \ static inline zig_##Type zig_neg_##Type(zig_##Type arg) { \ @@ -2106,28 +2199,28 @@ zig_convert_builtin(f128, extend, f80, 2) } #define zig_float_less_builtin_0(Type, operation) \ - zig_extern zig_i32 zig_expand_concat(zig_expand_concat(__##operation, \ - zig_compiler_rt_abbrev_##Type), 2)(zig_##Type, zig_##Type); \ - static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ - return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_##Type), 2)(lhs, rhs); \ + zig_extern int32_t zig_expand_concat(zig_expand_concat(__##operation, \ + zig_compiler_rt_abbrev_zig_##Type), 2)(zig_##Type, zig_##Type); \ + static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_zig_##Type), 2)(lhs, rhs); \ } #define zig_float_less_builtin_1(Type, operation) \ - static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ return (!(lhs <= rhs) - (lhs < rhs)); \ } #define zig_float_greater_builtin_0(Type, operation) \ zig_float_less_builtin_0(Type, operation) #define zig_float_greater_builtin_1(Type, operation) \ - static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ return ((lhs > rhs) - !(lhs >= rhs)); \ } #define zig_float_binary_builtin_0(Type, operation, operator) \ zig_extern zig_##Type zig_expand_concat(zig_expand_concat(__##operation, \ - zig_compiler_rt_abbrev_##Type), 3)(zig_##Type, zig_##Type); \ + zig_compiler_rt_abbrev_zig_##Type), 3)(zig_##Type, zig_##Type); \ static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ - return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_##Type), 3)(lhs, rhs); \ + return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_zig_##Type), 3)(lhs, rhs); \ } #define zig_float_binary_builtin_1(Type, operation, operator) \ static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ @@ -2135,18 +2228,18 @@ zig_convert_builtin(f128, extend, f80, 2) } #define zig_float_builtins(Type) \ - zig_convert_builtin(i32, fix, Type, ) \ - zig_convert_builtin(u32, fixuns, Type, ) \ - zig_convert_builtin(i64, fix, Type, ) \ - zig_convert_builtin(u64, fixuns, Type, ) \ - zig_convert_builtin(i128, fix, Type, ) \ - zig_convert_builtin(u128, fixuns, Type, ) \ - zig_convert_builtin(Type, float, i32, ) \ - zig_convert_builtin(Type, floatun, u32, ) \ - zig_convert_builtin(Type, float, i64, ) \ - zig_convert_builtin(Type, floatun, u64, ) \ - zig_convert_builtin(Type, float, i128, ) \ - zig_convert_builtin(Type, floatun, u128, ) \ + zig_convert_builtin( int32_t, fix, zig_##Type, ) \ + zig_convert_builtin(uint32_t, fixuns, zig_##Type, ) \ + zig_convert_builtin( int64_t, fix, zig_##Type, ) \ + zig_convert_builtin(uint64_t, fixuns, zig_##Type, ) \ + zig_convert_builtin(zig_i128, fix, zig_##Type, ) \ + zig_convert_builtin(zig_u128, fixuns, zig_##Type, ) \ + zig_convert_builtin(zig_##Type, float, int32_t, ) \ + zig_convert_builtin(zig_##Type, floatun, uint32_t, ) \ + zig_convert_builtin(zig_##Type, float, int64_t, ) \ + zig_convert_builtin(zig_##Type, floatun, uint64_t, ) \ + zig_convert_builtin(zig_##Type, float, zig_i128, ) \ + zig_convert_builtin(zig_##Type, floatun, zig_u128, ) \ zig_expand_concat(zig_float_negate_builtin_, zig_has_##Type)(Type) \ zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, cmp) \ zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, ne) \ @@ -2200,98 +2293,98 @@ zig_float_builtins(c_longdouble) // TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64 -#define zig_msvc_atomics(Type, suffix) \ - static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \ - zig_##Type comparand = *expected; \ - zig_##Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \ +#define zig_msvc_atomics(ZigType, Type, suffix) \ + static inline bool zig_msvc_cmpxchg_##ZigType(Type volatile* obj, Type* expected, Type desired) { \ + Type comparand = *expected; \ + Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \ bool exchanged = initial == comparand; \ if (!exchanged) { \ *expected = initial; \ } \ return exchanged; \ } \ - static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_xchg_##ZigType(Type volatile* obj, Type value) { \ return _InterlockedExchange##suffix(obj, value); \ } \ - static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_add_##ZigType(Type volatile* obj, Type value) { \ return _InterlockedExchangeAdd##suffix(obj, value); \ } \ - static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_sub_##ZigType(Type volatile* obj, Type value) { \ bool success = false; \ - zig_##Type new; \ - zig_##Type prev; \ + Type new; \ + Type prev; \ while (!success) { \ prev = *obj; \ new = prev - value; \ - success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \ } \ return prev; \ } \ - static inline zig_##Type zig_msvc_atomicrmw_or_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_or_##ZigType(Type volatile* obj, Type value) { \ return _InterlockedOr##suffix(obj, value); \ } \ - static inline zig_##Type zig_msvc_atomicrmw_xor_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_xor_##ZigType(Type volatile* obj, Type value) { \ return _InterlockedXor##suffix(obj, value); \ } \ - static inline zig_##Type zig_msvc_atomicrmw_and_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_and_##ZigType(Type volatile* obj, Type value) { \ return _InterlockedAnd##suffix(obj, value); \ } \ - static inline zig_##Type zig_msvc_atomicrmw_nand_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_nand_##ZigType(Type volatile* obj, Type value) { \ bool success = false; \ - zig_##Type new; \ - zig_##Type prev; \ + Type new; \ + Type prev; \ while (!success) { \ prev = *obj; \ new = ~(prev & value); \ - success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \ } \ return prev; \ } \ - static inline zig_##Type zig_msvc_atomicrmw_min_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_min_##ZigType(Type volatile* obj, Type value) { \ bool success = false; \ - zig_##Type new; \ - zig_##Type prev; \ + Type new; \ + Type prev; \ while (!success) { \ prev = *obj; \ new = value < prev ? value : prev; \ - success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \ } \ return prev; \ } \ - static inline zig_##Type zig_msvc_atomicrmw_max_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline Type zig_msvc_atomicrmw_max_##ZigType(Type volatile* obj, Type value) { \ bool success = false; \ - zig_##Type new; \ - zig_##Type prev; \ + Type new; \ + Type prev; \ while (!success) { \ prev = *obj; \ new = value > prev ? value : prev; \ - success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \ } \ return prev; \ } \ - static inline void zig_msvc_atomic_store_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + static inline void zig_msvc_atomic_store_##ZigType(Type volatile* obj, Type value) { \ _InterlockedExchange##suffix(obj, value); \ } \ - static inline zig_##Type zig_msvc_atomic_load_##Type(zig_##Type volatile* obj) { \ + static inline Type zig_msvc_atomic_load_##ZigType(Type volatile* obj) { \ return _InterlockedOr##suffix(obj, 0); \ } -zig_msvc_atomics(u8, 8) -zig_msvc_atomics(i8, 8) -zig_msvc_atomics(u16, 16) -zig_msvc_atomics(i16, 16) -zig_msvc_atomics(u32, ) -zig_msvc_atomics(i32, ) +zig_msvc_atomics( u8, uint8_t, 8) +zig_msvc_atomics( i8, int8_t, 8) +zig_msvc_atomics(u16, uint16_t, 16) +zig_msvc_atomics(i16, int16_t, 16) +zig_msvc_atomics(u32, uint32_t, ) +zig_msvc_atomics(i32, int32_t, ) #if _M_X64 -zig_msvc_atomics(u64, 64) -zig_msvc_atomics(i64, 64) +zig_msvc_atomics(u64, uint64_t, 64) +zig_msvc_atomics(i64, int64_t, 64) #endif #define zig_msvc_flt_atomics(Type, ReprType, suffix) \ static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \ - zig_##ReprType comparand = *((zig_##ReprType*)expected); \ - zig_##ReprType initial = _InterlockedCompareExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&desired), comparand); \ + ReprType comparand = *((ReprType*)expected); \ + ReprType initial = _InterlockedCompareExchange##suffix((ReprType volatile*)obj, *((ReprType*)&desired), comparand); \ bool exchanged = initial == comparand; \ if (!exchanged) { \ *expected = *((zig_##Type*)&initial); \ @@ -2299,50 +2392,50 @@ zig_msvc_atomics(i64, 64) return exchanged; \ } \ static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \ - zig_##ReprType initial = _InterlockedExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&value)); \ + ReprType initial = _InterlockedExchange##suffix((ReprType volatile*)obj, *((ReprType*)&value)); \ return *((zig_##Type*)&initial); \ } \ static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \ bool success = false; \ - zig_##ReprType new; \ + ReprType new; \ zig_##Type prev; \ while (!success) { \ prev = *obj; \ new = prev + value; \ - success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, *((ReprType*)&new)); \ } \ return prev; \ } \ static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \ bool success = false; \ - zig_##ReprType new; \ + ReprType new; \ zig_##Type prev; \ while (!success) { \ prev = *obj; \ new = prev - value; \ - success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, *((ReprType*)&new)); \ } \ return prev; \ } -zig_msvc_flt_atomics(f32, u32, ) +zig_msvc_flt_atomics(f32, uint32_t, ) #if _M_X64 -zig_msvc_flt_atomics(f64, u64, 64) +zig_msvc_flt_atomics(f64, uint64_t, 64) #endif #if _M_IX86 static inline void zig_msvc_atomic_barrier() { - zig_i32 barrier; + int32_t barrier; __asm { xchg barrier, eax } } -static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, zig_u32* arg) { +static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, void* arg) { return _InterlockedExchangePointer(obj, arg); } -static inline void zig_msvc_atomic_store_p32(void** obj, zig_u32* arg) { +static inline void zig_msvc_atomic_store_p32(void** obj, void* arg) { _InterlockedExchangePointer(obj, arg); } @@ -2360,11 +2453,11 @@ static inline bool zig_msvc_cmpxchg_p32(void** obj, void** expected, void* desir return exchanged; } #else /* _M_IX86 */ -static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, zig_u64* arg) { +static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, void* arg) { return _InterlockedExchangePointer(obj, arg); } -static inline void zig_msvc_atomic_store_p64(void** obj, zig_u64* arg) { +static inline void zig_msvc_atomic_store_p64(void** obj, void* arg) { _InterlockedExchangePointer(obj, arg); } @@ -2383,11 +2476,11 @@ static inline bool zig_msvc_cmpxchg_p64(void** obj, void** expected, void* desir } static inline bool zig_msvc_cmpxchg_u128(zig_u128 volatile* obj, zig_u128* expected, zig_u128 desired) { - return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_i64*)expected); + return _InterlockedCompareExchange128((int64_t volatile*)obj, desired.hi, desired.lo, (int64_t*)expected); } static inline bool zig_msvc_cmpxchg_i128(zig_i128 volatile* obj, zig_i128* expected, zig_i128 desired) { - return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_u64*)expected); + return _InterlockedCompareExchange128((int64_t volatile*)obj, desired.hi, desired.lo, (uint64_t*)expected); } #define zig_msvc_atomics_128xchg(Type) \ @@ -2429,7 +2522,7 @@ zig_msvc_atomics_128op(u128, max) #endif /* _MSC_VER && (_M_IX86 || _M_X64) */ -/* ========================= Special Case Intrinsics ========================= */ +/* ======================== Special Case Intrinsics ========================= */ #if (_MSC_VER && _M_X64) || defined(__x86_64__) @@ -2459,8 +2552,8 @@ static inline void* zig_x86_windows_teb(void) { #if (_MSC_VER && (_M_IX86 || _M_X64)) || defined(__i386__) || defined(__x86_64__) -static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, zig_u32* ebx, zig_u32* ecx, zig_u32* edx) { - zig_u32 cpu_info[4]; +static inline void zig_x86_cpuid(uint32_t leaf_id, uint32_t subid, uint32_t* eax, uint32_t* ebx, uint32_t* ecx, uint32_t* edx) { + uint32_t cpu_info[4]; #if _MSC_VER __cpuidex(cpu_info, leaf_id, subid); #else @@ -2472,12 +2565,12 @@ static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, z *edx = cpu_info[3]; } -static inline zig_u32 zig_x86_get_xcr0(void) { +static inline uint32_t zig_x86_get_xcr0(void) { #if _MSC_VER - return (zig_u32)_xgetbv(0); + return (uint32_t)_xgetbv(0); #else - zig_u32 eax; - zig_u32 edx; + uint32_t eax; + uint32_t edx; __asm__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(0)); return eax; #endif diff --git a/src/Compilation.zig b/src/Compilation.zig index 717a396870..de433a6800 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3325,24 +3325,20 @@ fn processOneJob(comp: *Compilation, job: Job) !void { const decl_emit_h = emit_h.declPtr(decl_index); const fwd_decl = &decl_emit_h.fwd_decl; fwd_decl.shrinkRetainingCapacity(0); - var typedefs_arena = std.heap.ArenaAllocator.init(gpa); - defer typedefs_arena.deinit(); + var ctypes_arena = std.heap.ArenaAllocator.init(gpa); + defer ctypes_arena.deinit(); var dg: c_codegen.DeclGen = .{ .gpa = gpa, .module = module, .error_msg = null, - .decl_index = decl_index, + .decl_index = decl_index.toOptional(), .decl = decl, .fwd_decl = fwd_decl.toManaged(gpa), - .typedefs = c_codegen.TypedefMap.initContext(gpa, .{ .mod = module }), - .typedefs_arena = typedefs_arena.allocator(), + .ctypes = .{}, }; defer { - for (dg.typedefs.values()) |typedef| { - module.gpa.free(typedef.rendered); - } - dg.typedefs.deinit(); + dg.ctypes.deinit(gpa); dg.fwd_decl.deinit(); } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 399549da3a..bfc6e6451b 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -23,12 +23,15 @@ const libcFloatSuffix = target_util.libcFloatSuffix; const compilerRtFloatAbbrev = target_util.compilerRtFloatAbbrev; const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev; -const Mutability = enum { Const, ConstArgument, Mut }; +const Mutability = enum { @"const", mut }; const BigIntLimb = std.math.big.Limb; const BigInt = std.math.big.int; +pub const CType = @import("c/type.zig").CType; + pub const CValue = union(enum) { none: void, + new_local: LocalIndex, local: LocalIndex, /// Address of a local. local_ref: LocalIndex, @@ -36,6 +39,8 @@ pub const CValue = union(enum) { constant: Air.Inst.Ref, /// Index into the parameters arg: usize, + /// The payload field of a parameter + arg_array: usize, /// Index into a tuple's fields field: usize, /// By-value @@ -61,12 +66,17 @@ const TypedefKind = enum { }; pub const CValueMap = std.AutoHashMap(Air.Inst.Ref, CValue); -pub const TypedefMap = std.ArrayHashMap( - Type, - struct { name: []const u8, rendered: []u8 }, - Type.HashContext32, - true, -); + +pub const LazyFnKey = union(enum) { + tag_name: Decl.Index, +}; +pub const LazyFnValue = struct { + fn_name: []const u8, + data: union { + tag_name: Type, + }, +}; +pub const LazyFnMap = std.AutoArrayHashMapUnmanaged(LazyFnKey, LazyFnValue); const LoopDepth = u16; const Local = struct { @@ -81,11 +91,6 @@ const LocalsList = std.ArrayListUnmanaged(LocalIndex); const LocalsMap = std.ArrayHashMapUnmanaged(Type, LocalsList, Type.HashContext32, true); const LocalsStack = std.ArrayListUnmanaged(LocalsMap); -const FormatTypeAsCIdentContext = struct { - ty: Type, - mod: *Module, -}; - const ValueRenderLocation = enum { FunctionArgument, Initializer, @@ -106,26 +111,6 @@ const BuiltinInfo = enum { Bits, }; -fn formatTypeAsCIdentifier( - data: FormatTypeAsCIdentContext, - comptime fmt: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, -) !void { - var stack = std.heap.stackFallback(128, data.mod.gpa); - const allocator = stack.get(); - const str = std.fmt.allocPrint(allocator, "{}", .{data.ty.fmt(data.mod)}) catch ""; - defer allocator.free(str); - return formatIdent(str, fmt, options, writer); -} - -pub fn typeToCIdentifier(ty: Type, mod: *Module) std.fmt.Formatter(formatTypeAsCIdentifier) { - return .{ .data = .{ - .ty = ty, - .mod = mod, - } }; -} - const reserved_idents = std.ComptimeStringMap(void, .{ // C language .{ "alignas", { @@ -281,6 +266,7 @@ pub const Function = struct { next_arg_index: usize = 0, next_block_index: usize = 0, object: Object, + lazy_fns: LazyFnMap, func: *Module.Fn, /// All the locals, to be emitted at the top of the function. locals: std.ArrayListUnmanaged(Local) = .{}, @@ -315,9 +301,9 @@ pub const Function = struct { const alignment = 0; const decl_c_value = try f.allocLocalValue(ty, alignment); const gpa = f.object.dg.gpa; - try f.allocs.put(gpa, decl_c_value.local, true); + try f.allocs.put(gpa, decl_c_value.new_local, true); try writer.writeAll("static "); - try f.object.dg.renderTypeAndName(writer, ty, decl_c_value, .Const, alignment, .Complete); + try f.object.dg.renderTypeAndName(writer, ty, decl_c_value, .@"const", alignment, .Complete); try writer.writeAll(" = "); try f.object.dg.renderValue(writer, ty, val, .StaticInitializer); try writer.writeAll(";\n "); @@ -347,12 +333,12 @@ pub const Function = struct { .alignment = alignment, .loop_depth = @intCast(LoopDepth, f.free_locals_stack.items.len - 1), }); - return CValue{ .local = @intCast(LocalIndex, f.locals.items.len - 1) }; + return CValue{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) }; } fn allocLocal(f: *Function, inst: Air.Inst.Index, ty: Type) !CValue { - const result = try f.allocAlignedLocal(ty, .Mut, 0); - log.debug("%{d}: allocating t{d}", .{ inst, result.local }); + const result = try f.allocAlignedLocal(ty, .mut, 0); + log.debug("%{d}: allocating t{d}", .{ inst, result.new_local }); return result; } @@ -366,7 +352,7 @@ pub const Function = struct { if (local.alignment >= alignment) { local.loop_depth = @intCast(LoopDepth, f.free_locals_stack.items.len - 1); _ = locals_list.swapRemove(i); - return CValue{ .local = local_index }; + return CValue{ .new_local = local_index }; } } } @@ -446,7 +432,31 @@ pub const Function = struct { return f.object.dg.fmtIntLiteral(ty, val); } - pub fn deinit(f: *Function, gpa: mem.Allocator) void { + fn getTagNameFn(f: *Function, enum_ty: Type) ![]const u8 { + const gpa = f.object.dg.gpa; + const owner_decl = enum_ty.getOwnerDecl(); + + const gop = try f.lazy_fns.getOrPut(gpa, .{ .tag_name = owner_decl }); + if (!gop.found_existing) { + errdefer _ = f.lazy_fns.pop(); + + var promoted = f.object.dg.ctypes.promote(gpa); + defer f.object.dg.ctypes.demote(promoted); + const arena = promoted.arena.allocator(); + + gop.value_ptr.* = .{ + .fn_name = try std.fmt.allocPrint(arena, "zig_tagName_{}__{d}", .{ + fmtIdent(mem.span(f.object.dg.module.declPtr(owner_decl).name)), + @enumToInt(owner_decl), + }), + .data = .{ .tag_name = try enum_ty.copy(arena) }, + }; + } + return gop.value_ptr.fn_name; + } + + pub fn deinit(f: *Function) void { + const gpa = f.object.dg.gpa; f.allocs.deinit(gpa); f.locals.deinit(gpa); for (f.free_locals_stack.items) |*free_locals| { @@ -455,11 +465,9 @@ pub const Function = struct { f.free_locals_stack.deinit(gpa); f.blocks.deinit(gpa); f.value_map.deinit(); + f.lazy_fns.deinit(gpa); f.object.code.deinit(); - for (f.object.dg.typedefs.values()) |typedef| { - gpa.free(typedef.rendered); - } - f.object.dg.typedefs.deinit(); + f.object.dg.ctypes.deinit(gpa); f.object.dg.fwd_decl.deinit(); f.arena.deinit(); } @@ -483,30 +491,20 @@ pub const Object = struct { pub const DeclGen = struct { gpa: std.mem.Allocator, module: *Module, - decl: *Decl, - decl_index: Decl.Index, + decl: ?*Decl, + decl_index: Decl.OptionalIndex, fwd_decl: std.ArrayList(u8), error_msg: ?*Module.ErrorMsg, - /// The key of this map is Type which has references to typedefs_arena. - typedefs: TypedefMap, - typedefs_arena: std.mem.Allocator, + ctypes: CType.Store, fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { @setCold(true); const src = LazySrcLoc.nodeOffset(0); - const src_loc = src.toSrcLoc(dg.decl); + const src_loc = src.toSrcLoc(dg.decl.?); dg.error_msg = try Module.ErrorMsg.create(dg.gpa, src_loc, format, args); return error.AnalysisFail; } - fn getTypedefName(dg: *DeclGen, t: Type) ?[]const u8 { - if (dg.typedefs.get(t)) |typedef| { - return typedef.name; - } else { - return null; - } - } - fn renderDeclValue( dg: *DeclGen, writer: anytype, @@ -747,7 +745,7 @@ pub const DeclGen = struct { try writer.writeAll("zig_cast_"); try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeAll(" zig_as_"); + try writer.writeAll(" zig_make_"); try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); switch (bits) { @@ -821,7 +819,7 @@ pub const DeclGen = struct { empty = false; } - if (empty) try writer.print("{x}", .{try dg.fmtIntLiteral(Type.u8, Value.undef)}); + return writer.writeByte('}'); }, .Packed => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, Value.undef)}), @@ -957,7 +955,7 @@ pub const DeclGen = struct { try writer.writeByte(' '); var empty = true; if (std.math.isFinite(f128_val)) { - try writer.writeAll("zig_as_"); + try writer.writeAll("zig_make_"); try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); switch (bits) { @@ -992,7 +990,7 @@ pub const DeclGen = struct { // return dg.fail("Only quiet nans are supported in global variable initializers", .{}); } - try writer.writeAll("zig_as_special_"); + try writer.writeAll("zig_make_special_"); if (location == .StaticInitializer) try writer.writeAll("constant_"); try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); @@ -1292,7 +1290,6 @@ pub const DeclGen = struct { empty = false; } - if (empty) try writer.print("{}", .{try dg.fmtIntLiteral(Type.u8, Value.zero)}); try writer.writeByte('}'); }, .Packed => { @@ -1309,7 +1306,7 @@ pub const DeclGen = struct { const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); var eff_num_fields: usize = 0; - for (field_vals, 0..) |_, index| { + for (0..field_vals.len) |index| { const field_ty = ty.structFieldType(index); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; @@ -1413,6 +1410,7 @@ pub const DeclGen = struct { return; } + var has_payload_init = false; try writer.writeByte('{'); if (ty.unionTagTypeSafety()) |tag_ty| { const layout = ty.unionGetLayout(target); @@ -1421,7 +1419,10 @@ pub const DeclGen = struct { try dg.renderValue(writer, tag_ty, union_obj.tag, initializer_type); try writer.writeAll(", "); } - try writer.writeAll(".payload = {"); + if (!ty.unionHasAllZeroBitFieldTypes()) { + try writer.writeAll(".payload = {"); + has_payload_init = true; + } } var it = ty.unionFields().iterator(); @@ -1433,8 +1434,8 @@ pub const DeclGen = struct { try writer.print(".{ } = ", .{fmtIdent(field.key_ptr.*)}); try dg.renderValue(writer, field.value_ptr.ty, Value.undef, initializer_type); break; - } else try writer.writeAll(".empty_union = 0"); - if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}'); + } + if (has_payload_init) try writer.writeByte('}'); try writer.writeByte('}'); }, @@ -1457,496 +1458,62 @@ pub const DeclGen = struct { } fn renderFunctionSignature(dg: *DeclGen, w: anytype, kind: TypedefKind, export_index: u32) !void { - const fn_info = dg.decl.ty.fnInfo(); + const store = &dg.ctypes.set; + const module = dg.module; + + const fn_ty = dg.decl.?.ty; + const fn_cty_idx = try dg.typeToIndex(fn_ty, switch (kind) { + .Forward => .forward, + .Complete => .complete, + }); + + const fn_info = fn_ty.fnInfo(); if (fn_info.cc == .Naked) { switch (kind) { .Forward => try w.writeAll("zig_naked_decl "), .Complete => try w.writeAll("zig_naked "), } } - if (dg.decl.val.castTag(.function)) |func_payload| + if (dg.decl.?.val.castTag(.function)) |func_payload| if (func_payload.data.is_cold) try w.writeAll("zig_cold "); + if (fn_info.return_type.tag() == .noreturn) try w.writeAll("zig_noreturn "); - const target = dg.module.getTarget(); - var ret_buf: LowerFnRetTyBuffer = undefined; - const ret_ty = lowerFnRetTy(fn_info.return_type, &ret_buf, target); - - try dg.renderType(w, ret_ty, kind); - try w.writeByte(' '); + const trailing = try renderTypePrefix( + dg.decl_index, + store.*, + module, + w, + fn_cty_idx, + .suffix, + CQualifiers.init(.{}), + ); + try w.print("{}", .{trailing}); if (toCallingConvention(fn_info.cc)) |call_conv| { try w.print("zig_callconv({s}) ", .{call_conv}); } - if (fn_info.alignment > 0 and kind == .Complete) try w.print(" zig_align_fn({})", .{fn_info.alignment}); - - try dg.renderDeclName(w, dg.decl_index, export_index); - try w.writeByte('('); - - var index: usize = 0; - for (fn_info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; - if (index > 0) try w.writeAll(", "); - const name = CValue{ .arg = index }; - try dg.renderTypeAndName(w, param_type, name, .ConstArgument, 0, kind); - index += 1; + if (fn_info.alignment > 0 and kind == .Complete) { + try w.print(" zig_align_fn({})", .{fn_info.alignment}); } - if (fn_info.is_var_args) { - if (index > 0) try w.writeAll(", "); - try w.writeAll("..."); - } else if (index == 0) { - try dg.renderType(w, Type.void, kind); + try dg.renderDeclName(w, dg.decl_index.unwrap().?, export_index); + + try renderTypeSuffix(dg.decl_index, store.*, module, w, fn_cty_idx, .suffix); + + if (fn_info.alignment > 0 and kind == .Forward) { + try w.print(" zig_align_fn({})", .{fn_info.alignment}); } - try w.writeByte(')'); - if (fn_info.alignment > 0 and kind == .Forward) try w.print(" zig_align_fn({})", .{fn_info.alignment}); } - fn renderPtrToFnTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - const bw = buffer.writer(); - - const fn_info = t.fnInfo(); - - const target = dg.module.getTarget(); - var ret_buf: LowerFnRetTyBuffer = undefined; - const ret_ty = lowerFnRetTy(fn_info.return_type, &ret_buf, target); - - try bw.writeAll("typedef "); - try dg.renderType(bw, ret_ty, .Forward); - try bw.writeAll(" (*"); - const name_begin = buffer.items.len; - try bw.print("zig_F_{}", .{typeToCIdentifier(t, dg.module)}); - const name_end = buffer.items.len; - try bw.writeAll(")("); - - const param_len = fn_info.param_types.len; - - var params_written: usize = 0; - var index: usize = 0; - while (index < param_len) : (index += 1) { - const param_ty = fn_info.param_types[index]; - if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue; - if (params_written > 0) { - try bw.writeAll(", "); - } - try dg.renderTypeAndName(bw, param_ty, .{ .bytes = "" }, .Mut, 0, .Forward); - params_written += 1; - } - - if (fn_info.is_var_args) { - if (params_written != 0) try bw.writeAll(", "); - try bw.writeAll("..."); - } else if (params_written == 0) { - try dg.renderType(bw, Type.void, .Forward); - } - try bw.writeAll(");\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - const name = rendered[name_begin..name_end]; - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; + fn indexToCType(dg: *DeclGen, idx: CType.Index) CType { + return dg.ctypes.indexToCType(idx); } - - fn renderSliceTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - std.debug.assert(t.sentinel() == null); // expected canonical type - - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - const bw = buffer.writer(); - - var ptr_ty_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = t.slicePtrFieldType(&ptr_ty_buf); - const ptr_name = CValue{ .identifier = "ptr" }; - const len_ty = Type.usize; - const len_name = CValue{ .identifier = "len" }; - - try bw.writeAll("typedef struct {\n "); - try dg.renderTypeAndName(bw, ptr_ty, ptr_name, .Mut, 0, .Complete); - try bw.writeAll(";\n "); - try dg.renderTypeAndName(bw, len_ty, len_name, .Mut, 0, .Complete); - - try bw.writeAll(";\n} "); - const name_begin = buffer.items.len; - try bw.print("zig_{c}_{}", .{ - @as(u8, if (t.isConstPtr()) 'L' else 'M'), - typeToCIdentifier(t.childType(), dg.module), - }); - const name_end = buffer.items.len; - try bw.writeAll(";\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - const name = rendered[name_begin..name_end]; - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; + fn typeToIndex(dg: *DeclGen, ty: Type, kind: CType.Kind) !CType.Index { + return dg.ctypes.typeToIndex(dg.gpa, ty, dg.module, kind); } - - fn renderFwdTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - // The forward declaration for T is stored with a key of *const T. - const child_ty = t.childType(); - - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - const bw = buffer.writer(); - - const tag = switch (child_ty.zigTypeTag()) { - .Struct, .ErrorUnion, .Optional => "struct", - .Union => if (child_ty.unionTagTypeSafety()) |_| "struct" else "union", - else => unreachable, - }; - try bw.writeAll("typedef "); - try bw.writeAll(tag); - const name_begin = buffer.items.len + " ".len; - try bw.writeAll(" zig_"); - switch (child_ty.zigTypeTag()) { - .Struct, .Union => { - var fqn_buf = std.ArrayList(u8).init(dg.typedefs.allocator); - defer fqn_buf.deinit(); - - const owner_decl_index = child_ty.getOwnerDecl(); - const owner_decl = dg.module.declPtr(owner_decl_index); - try owner_decl.renderFullyQualifiedName(dg.module, fqn_buf.writer()); - - try bw.print("S_{}__{d}", .{ fmtIdent(fqn_buf.items), @enumToInt(owner_decl_index) }); - }, - .ErrorUnion => { - try bw.print("E_{}", .{typeToCIdentifier(child_ty.errorUnionPayload(), dg.module)}); - }, - .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - try bw.print("Q_{}", .{typeToCIdentifier(child_ty.optionalChild(&opt_buf), dg.module)}); - }, - else => unreachable, - } - const name_end = buffer.items.len; - try buffer.ensureUnusedCapacity(" ".len + (name_end - name_begin) + ";\n".len); - buffer.appendAssumeCapacity(' '); - buffer.appendSliceAssumeCapacity(buffer.items[name_begin..name_end]); - buffer.appendSliceAssumeCapacity(";\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - const name = rendered[name_begin..name_end]; - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn renderStructTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - var ptr_pl = Type.Payload.ElemType{ .base = .{ .tag = .single_const_pointer }, .data = t }; - const ptr_ty = Type.initPayload(&ptr_pl.base); - const name = dg.getTypedefName(ptr_ty) orelse - try dg.renderFwdTypedef(ptr_ty); - - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - - try buffer.appendSlice("struct "); - - var needs_pack_attr = false; - { - var it = t.structFields().iterator(); - while (it.next()) |field| { - const field_ty = field.value_ptr.ty; - if (!field_ty.hasRuntimeBits()) continue; - const alignment = field.value_ptr.abi_align; - if (alignment != 0 and alignment < field_ty.abiAlignment(dg.module.getTarget())) { - needs_pack_attr = true; - try buffer.appendSlice("zig_packed("); - break; - } - } - } - - try buffer.appendSlice(name); - try buffer.appendSlice(" {\n"); - { - var it = t.structFields().iterator(); - var empty = true; - while (it.next()) |field| { - const field_ty = field.value_ptr.ty; - if (!field_ty.hasRuntimeBits()) continue; - - const alignment = field.value_ptr.alignment(dg.module.getTarget(), t.containerLayout()); - const field_name = CValue{ .identifier = field.key_ptr.* }; - try buffer.append(' '); - try dg.renderTypeAndName(buffer.writer(), field_ty, field_name, .Mut, alignment, .Complete); - try buffer.appendSlice(";\n"); - - empty = false; - } - if (empty) try buffer.appendSlice(" char empty_struct;\n"); - } - if (needs_pack_attr) try buffer.appendSlice("});\n") else try buffer.appendSlice("};\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn renderTupleTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - - try buffer.appendSlice("typedef struct {\n"); - { - const fields = t.tupleFields(); - var field_id: usize = 0; - for (fields.types, 0..) |field_ty, i| { - if (!field_ty.hasRuntimeBits() or fields.values[i].tag() != .unreachable_value) continue; - - try buffer.append(' '); - try dg.renderTypeAndName(buffer.writer(), field_ty, .{ .field = field_id }, .Mut, 0, .Complete); - try buffer.appendSlice(";\n"); - - field_id += 1; - } - if (field_id == 0) try buffer.appendSlice(" char empty_tuple;\n"); - } - const name_begin = buffer.items.len + "} ".len; - try buffer.writer().print("}} zig_T_{}_{d};\n", .{ typeToCIdentifier(t, dg.module), @truncate(u16, t.hash(dg.module)) }); - const name_end = buffer.items.len - ";\n".len; - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - const name = rendered[name_begin..name_end]; - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn renderUnionTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - var ptr_pl = Type.Payload.ElemType{ .base = .{ .tag = .single_const_pointer }, .data = t }; - const ptr_ty = Type.initPayload(&ptr_pl.base); - const name = dg.getTypedefName(ptr_ty) orelse - try dg.renderFwdTypedef(ptr_ty); - - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - - try buffer.appendSlice(if (t.unionTagTypeSafety()) |_| "struct " else "union "); - try buffer.appendSlice(name); - try buffer.appendSlice(" {\n"); - - const indent = if (t.unionTagTypeSafety()) |tag_ty| indent: { - const target = dg.module.getTarget(); - const layout = t.unionGetLayout(target); - if (layout.tag_size != 0) { - try buffer.append(' '); - try dg.renderTypeAndName(buffer.writer(), tag_ty, .{ .identifier = "tag" }, .Mut, 0, .Complete); - try buffer.appendSlice(";\n"); - } - try buffer.appendSlice(" union {\n"); - break :indent " "; - } else " "; - - { - var it = t.unionFields().iterator(); - var empty = true; - while (it.next()) |field| { - const field_ty = field.value_ptr.ty; - if (!field_ty.hasRuntimeBits()) continue; - - const alignment = field.value_ptr.abi_align; - const field_name = CValue{ .identifier = field.key_ptr.* }; - try buffer.appendSlice(indent); - try dg.renderTypeAndName(buffer.writer(), field_ty, field_name, .Mut, alignment, .Complete); - try buffer.appendSlice(";\n"); - - empty = false; - } - if (empty) { - try buffer.appendSlice(indent); - try buffer.appendSlice("char empty_union;\n"); - } - } - - if (t.unionTagTypeSafety()) |_| try buffer.appendSlice(" } payload;\n"); - try buffer.appendSlice("};\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn renderErrorUnionTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - assert(t.errorUnionSet().tag() == .anyerror); - - var ptr_pl = Type.Payload.ElemType{ .base = .{ .tag = .single_const_pointer }, .data = t }; - const ptr_ty = Type.initPayload(&ptr_pl.base); - const name = dg.getTypedefName(ptr_ty) orelse - try dg.renderFwdTypedef(ptr_ty); - - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - const bw = buffer.writer(); - - const payload_ty = t.errorUnionPayload(); - const payload_name = CValue{ .identifier = "payload" }; - const error_ty = t.errorUnionSet(); - const error_name = CValue{ .identifier = "error" }; - - const target = dg.module.getTarget(); - const payload_align = payload_ty.abiAlignment(target); - const error_align = error_ty.abiAlignment(target); - try bw.writeAll("struct "); - try bw.writeAll(name); - try bw.writeAll(" {\n "); - if (error_align > payload_align) { - try dg.renderTypeAndName(bw, payload_ty, payload_name, .Mut, 0, .Complete); - try bw.writeAll(";\n "); - try dg.renderTypeAndName(bw, error_ty, error_name, .Mut, 0, .Complete); - } else { - try dg.renderTypeAndName(bw, error_ty, error_name, .Mut, 0, .Complete); - try bw.writeAll(";\n "); - try dg.renderTypeAndName(bw, payload_ty, payload_name, .Mut, 0, .Complete); - } - try bw.writeAll(";\n};\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn renderArrayTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - const info = t.arrayInfo(); - std.debug.assert(info.sentinel == null); // expected canonical type - - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - const bw = buffer.writer(); - - try bw.writeAll("typedef "); - try dg.renderType(bw, info.elem_type, .Complete); - - const name_begin = buffer.items.len + " ".len; - try bw.print(" zig_A_{}_{d}", .{ typeToCIdentifier(info.elem_type, dg.module), info.len }); - const name_end = buffer.items.len; - - const c_len = if (info.len > 0) info.len else 1; - var c_len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = c_len }; - const c_len_val = Value.initPayload(&c_len_pl.base); - try bw.print("[{}];\n", .{try dg.fmtIntLiteral(Type.usize, c_len_val)}); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - const name = rendered[name_begin..name_end]; - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn renderOptionalTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - var ptr_pl = Type.Payload.ElemType{ .base = .{ .tag = .single_const_pointer }, .data = t }; - const ptr_ty = Type.initPayload(&ptr_pl.base); - const name = dg.getTypedefName(ptr_ty) orelse - try dg.renderFwdTypedef(ptr_ty); - - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - const bw = buffer.writer(); - - var opt_buf: Type.Payload.ElemType = undefined; - const child_ty = t.optionalChild(&opt_buf); - - try bw.writeAll("struct "); - try bw.writeAll(name); - try bw.writeAll(" {\n"); - try dg.renderTypeAndName(bw, child_ty, .{ .identifier = "payload" }, .Mut, 0, .Complete); - try bw.writeAll(";\n "); - try dg.renderTypeAndName(bw, Type.bool, .{ .identifier = "is_null" }, .Mut, 0, .Complete); - try bw.writeAll(";\n};\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn renderOpaqueTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - const opaque_ty = t.cast(Type.Payload.Opaque).?.data; - const unqualified_name = dg.module.declPtr(opaque_ty.owner_decl).name; - const fqn = try opaque_ty.getFullyQualifiedName(dg.module); - defer dg.typedefs.allocator.free(fqn); - - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - - try buffer.writer().print("typedef struct { } ", .{fmtIdent(std.mem.span(unqualified_name))}); - - const name_begin = buffer.items.len; - try buffer.writer().print("zig_O_{}", .{fmtIdent(fqn)}); - const name_end = buffer.items.len; - try buffer.appendSlice(";\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - const name = rendered[name_begin..name_end]; - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try t.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; + fn typeToCType(dg: *DeclGen, ty: Type, kind: CType.Kind) !CType { + return dg.ctypes.typeToCType(dg.gpa, ty, dg.module, kind); } /// Renders a type as a single identifier, generating intermediate typedefs @@ -1959,275 +1526,27 @@ pub const DeclGen = struct { /// |---------------------|-----------------|---------------------| /// | `renderTypecast` | "uint8_t *" | "uint8_t *[10]" | /// | `renderTypeAndName` | "uint8_t *name" | "uint8_t *name[10]" | - /// | `renderType` | "uint8_t *" | "zig_A_uint8_t_10" | + /// | `renderType` | "uint8_t *" | "uint8_t *[10]" | /// fn renderType( dg: *DeclGen, w: anytype, t: Type, - kind: TypedefKind, + _: TypedefKind, ) error{ OutOfMemory, AnalysisFail }!void { - const target = dg.module.getTarget(); - - switch (t.zigTypeTag()) { - .Void => try w.writeAll("void"), - .Bool => try w.writeAll("bool"), - .NoReturn, .Float => { - try w.writeAll("zig_"); - try t.print(w, dg.module); - }, - .Int => { - if (t.isNamedInt()) { - try w.writeAll("zig_"); - try t.print(w, dg.module); - } else { - return renderTypeUnnamed(dg, w, t, kind); - } - }, - .ErrorSet => { - return renderTypeUnnamed(dg, w, t, kind); - }, - .Pointer => { - const ptr_info = t.ptrInfo().data; - if (ptr_info.size == .Slice) { - var slice_pl = Type.Payload.ElemType{ - .base = .{ .tag = if (t.ptrIsMutable()) .mut_slice else .const_slice }, - .data = ptr_info.pointee_type, - }; - const slice_ty = Type.initPayload(&slice_pl.base); - - const name = dg.getTypedefName(slice_ty) orelse - try dg.renderSliceTypedef(slice_ty); - - return w.writeAll(name); - } - - if (ptr_info.pointee_type.zigTypeTag() == .Fn) { - const name = dg.getTypedefName(ptr_info.pointee_type) orelse - try dg.renderPtrToFnTypedef(ptr_info.pointee_type); - - return w.writeAll(name); - } - - if (ptr_info.host_size != 0) { - var host_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.host_size * 8, - }; - const host_ty = Type.initPayload(&host_pl.base); - - try dg.renderType(w, host_ty, .Forward); - } else if (t.isCPtr() and ptr_info.pointee_type.eql(Type.u8, dg.module) and - (dg.decl.val.tag() == .extern_fn or - std.mem.eql(u8, std.mem.span(dg.decl.name), "main"))) - { - // This is a hack, since the c compiler expects a lot of external - // library functions to have char pointers in their signatures, but - // u8 and i8 produce unsigned char and signed char respectively, - // which in C are (not very usefully) different than char. - try w.writeAll("char"); - } else try dg.renderType(w, switch (ptr_info.pointee_type.tag()) { - .anyopaque => Type.void, - else => ptr_info.pointee_type, - }, .Forward); - if (t.isConstPtr()) try w.writeAll(" const"); - if (t.isVolatilePtr()) try w.writeAll(" volatile"); - return w.writeAll(" *"); - }, - .Array, .Vector => { - var array_pl = Type.Payload.Array{ .base = .{ .tag = .array }, .data = .{ - .len = t.arrayLenIncludingSentinel(), - .elem_type = t.childType(), - } }; - const array_ty = Type.initPayload(&array_pl.base); - - const name = dg.getTypedefName(array_ty) orelse - try dg.renderArrayTypedef(array_ty); - - return w.writeAll(name); - }, - .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const child_ty = t.optionalChild(&opt_buf); - - if (!child_ty.hasRuntimeBitsIgnoreComptime()) - return dg.renderType(w, Type.bool, kind); - - if (t.optionalReprIsPayload()) - return dg.renderType(w, child_ty, kind); - - switch (kind) { - .Complete => { - const name = dg.getTypedefName(t) orelse - try dg.renderOptionalTypedef(t); - - try w.writeAll(name); - }, - .Forward => { - var ptr_pl = Type.Payload.ElemType{ - .base = .{ .tag = .single_const_pointer }, - .data = t, - }; - const ptr_ty = Type.initPayload(&ptr_pl.base); - - const name = dg.getTypedefName(ptr_ty) orelse - try dg.renderFwdTypedef(ptr_ty); - - try w.writeAll(name); - }, - } - }, - .ErrorUnion => { - const payload_ty = t.errorUnionPayload(); - - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) - return dg.renderType(w, Type.anyerror, kind); - - var error_union_pl = Type.Payload.ErrorUnion{ - .data = .{ .error_set = Type.anyerror, .payload = payload_ty }, - }; - const error_union_ty = Type.initPayload(&error_union_pl.base); - - switch (kind) { - .Complete => { - const name = dg.getTypedefName(error_union_ty) orelse - try dg.renderErrorUnionTypedef(error_union_ty); - - try w.writeAll(name); - }, - .Forward => { - var ptr_pl = Type.Payload.ElemType{ - .base = .{ .tag = .single_const_pointer }, - .data = error_union_ty, - }; - const ptr_ty = Type.initPayload(&ptr_pl.base); - - const name = dg.getTypedefName(ptr_ty) orelse - try dg.renderFwdTypedef(ptr_ty); - - try w.writeAll(name); - }, - } - }, - .Struct, .Union => |tag| if (t.containerLayout() == .Packed) { - if (t.castTag(.@"struct")) |struct_obj| { - try dg.renderType(w, struct_obj.data.backing_int_ty, kind); - } else { - var buf: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, t.bitSize(target)), - }; - try dg.renderType(w, Type.initPayload(&buf.base), kind); - } - } else if (t.isSimpleTupleOrAnonStruct()) { - const ExpectedContents = struct { types: [8]Type, values: [8]Value }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), dg.gpa); - const allocator = stack.get(); - - var tuple_storage = std.MultiArrayList(struct { type: Type, value: Value }){}; - defer tuple_storage.deinit(allocator); - try tuple_storage.ensureTotalCapacity(allocator, t.structFieldCount()); - - const fields = t.tupleFields(); - for (fields.values, 0..) |value, index| - if (value.tag() == .unreachable_value) - tuple_storage.appendAssumeCapacity(.{ - .type = fields.types[index], - .value = value, - }); - - const tuple_slice = tuple_storage.slice(); - var tuple_pl = Type.Payload.Tuple{ .data = .{ - .types = tuple_slice.items(.type), - .values = tuple_slice.items(.value), - } }; - const tuple_ty = Type.initPayload(&tuple_pl.base); - - const name = dg.getTypedefName(tuple_ty) orelse - try dg.renderTupleTypedef(tuple_ty); - - try w.writeAll(name); - } else switch (kind) { - .Complete => { - const name = dg.getTypedefName(t) orelse switch (tag) { - .Struct => try dg.renderStructTypedef(t), - .Union => try dg.renderUnionTypedef(t), - else => unreachable, - }; - - try w.writeAll(name); - }, - .Forward => { - var ptr_pl = Type.Payload.ElemType{ - .base = .{ .tag = .single_const_pointer }, - .data = t, - }; - const ptr_ty = Type.initPayload(&ptr_pl.base); - - const name = dg.getTypedefName(ptr_ty) orelse - try dg.renderFwdTypedef(ptr_ty); - - try w.writeAll(name); - }, - }, - .Enum => { - // For enums, we simply use the integer tag type. - var int_tag_buf: Type.Payload.Bits = undefined; - const int_tag_ty = t.intTagType(&int_tag_buf); - - try dg.renderType(w, int_tag_ty, kind); - }, - .Opaque => switch (t.tag()) { - .@"opaque" => { - const name = dg.getTypedefName(t) orelse - try dg.renderOpaqueTypedef(t); - - try w.writeAll(name); - }, - else => unreachable, - }, - - .Frame, - .AnyFrame, - => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{ - @tagName(tag), - }), - - .Fn => unreachable, // This is a function body, not a function pointer. - - .Null, - .Undefined, - .EnumLiteral, - .ComptimeFloat, - .ComptimeInt, - .Type, - => unreachable, // must be const or comptime - } - } - - fn renderTypeUnnamed( - dg: *DeclGen, - w: anytype, - t: Type, - kind: TypedefKind, - ) error{ OutOfMemory, AnalysisFail }!void { - const target = dg.module.getTarget(); - const int_info = t.intInfo(target); - if (toCIntBits(int_info.bits)) |c_bits| - return w.print("zig_{c}{d}", .{ signAbbrev(int_info.signedness), c_bits }) - else if (loweredArrayInfo(t, target)) |array_info| { - assert(array_info.sentinel == null); - var array_pl = Type.Payload.Array{ - .base = .{ .tag = .array }, - .data = .{ .len = array_info.len, .elem_type = array_info.elem_type }, - }; - const array_ty = Type.initPayload(&array_pl.base); - - return dg.renderType(w, array_ty, kind); - } else return dg.fail("C backend: Unable to lower unnamed integer type {}", .{ - t.fmt(dg.module), - }); + const store = &dg.ctypes.set; + const module = dg.module; + const idx = try dg.typeToIndex(t, .complete); + _ = try renderTypePrefix( + dg.decl_index, + store.*, + module, + w, + idx, + .suffix, + CQualifiers.init(.{}), + ); + try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix); } const IntCastContext = union(enum) { @@ -2254,16 +1573,16 @@ pub const DeclGen = struct { /// Renders a cast to an int type, from either an int or a pointer. /// /// Some platforms don't have 128 bit integers, so we need to use - /// the zig_as_ and zig_lo_ macros in those cases. + /// the zig_make_ and zig_lo_ macros in those cases. /// /// | Dest type bits | Src type | Result /// |------------------|------------------|---------------------------| /// | < 64 bit integer | pointer | (zig_)(zig_size)src /// | < 64 bit integer | < 64 bit integer | (zig_)src /// | < 64 bit integer | > 64 bit integer | zig_lo(src) - /// | > 64 bit integer | pointer | zig_as_(0, (zig_size)src) - /// | > 64 bit integer | < 64 bit integer | zig_as_(0, src) - /// | > 64 bit integer | > 64 bit integer | zig_as_(zig_hi_(src), zig_lo_(src)) + /// | > 64 bit integer | pointer | zig_make_(0, (zig_size)src) + /// | > 64 bit integer | < 64 bit integer | zig_make_(0, src) + /// | > 64 bit integer | > 64 bit integer | zig_make_(zig_hi_(src), zig_lo_(src)) fn renderIntCast(dg: *DeclGen, w: anytype, dest_ty: Type, context: IntCastContext, src_ty: Type, location: ValueRenderLocation) !void { const target = dg.module.getTarget(); const dest_bits = dest_ty.bitSize(target); @@ -2301,7 +1620,7 @@ pub const DeclGen = struct { try context.writeValue(dg, w, src_ty, .FunctionArgument); try w.writeByte(')'); } else if (dest_bits > 64 and src_bits <= 64) { - try w.writeAll("zig_as_"); + try w.writeAll("zig_make_"); try dg.renderTypeForBuiltinFnName(w, dest_ty); try w.writeAll("(0, "); // TODO: Should the 0 go through fmtIntLiteral? if (src_is_ptr) { @@ -2313,7 +1632,7 @@ pub const DeclGen = struct { try w.writeByte(')'); } else { assert(!src_is_ptr); - try w.writeAll("zig_as_"); + try w.writeAll("zig_make_"); try dg.renderTypeForBuiltinFnName(w, dest_ty); try w.writeAll("(zig_hi_"); try dg.renderTypeForBuiltinFnName(w, src_eff_ty); @@ -2337,10 +1656,10 @@ pub const DeclGen = struct { /// |---------------------|-----------------|---------------------| /// | `renderTypecast` | "uint8_t *" | "uint8_t *[10]" | /// | `renderTypeAndName` | "uint8_t *name" | "uint8_t *name[10]" | - /// | `renderType` | "uint8_t *" | "zig_A_uint8_t_10" | + /// | `renderType` | "uint8_t *" | "uint8_t *[10]" | /// fn renderTypecast(dg: *DeclGen, w: anytype, ty: Type) error{ OutOfMemory, AnalysisFail }!void { - return renderTypeAndName(dg, w, ty, .{ .bytes = "" }, .Mut, 0, .Complete); + try dg.renderType(w, ty, undefined); } /// Renders a type and name in field declaration/definition format. @@ -2350,7 +1669,7 @@ pub const DeclGen = struct { /// |---------------------|-----------------|---------------------| /// | `renderTypecast` | "uint8_t *" | "uint8_t *[10]" | /// | `renderTypeAndName` | "uint8_t *name" | "uint8_t *name[10]" | - /// | `renderType` | "uint8_t *" | "zig_A_uint8_t_10" | + /// | `renderType` | "uint8_t *" | "uint8_t *[10]" | /// fn renderTypeAndName( dg: *DeclGen, @@ -2359,65 +1678,45 @@ pub const DeclGen = struct { name: CValue, mutability: Mutability, alignment: u32, - kind: TypedefKind, + _: TypedefKind, ) error{ OutOfMemory, AnalysisFail }!void { - var suffix = std.ArrayList(u8).init(dg.gpa); - defer suffix.deinit(); - const suffix_writer = suffix.writer(); + const store = &dg.ctypes.set; + const module = dg.module; - // Any top-level array types are rendered here as a suffix, which - // avoids creating typedefs for every array type - const target = dg.module.getTarget(); - var render_ty = ty; - var depth: u32 = 0; - while (loweredArrayInfo(render_ty, target)) |array_info| { - const c_len = array_info.len + @boolToInt(array_info.sentinel != null); - var c_len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = c_len }; - const c_len_val = Value.initPayload(&c_len_pl.base); - - try suffix_writer.writeByte('['); - if (mutability == .ConstArgument and depth == 0) try suffix_writer.writeAll("zig_const_arr "); - try suffix.writer().print("{}]", .{try dg.fmtIntLiteral(Type.usize, c_len_val)}); - render_ty = array_info.elem_type; - depth += 1; - } - - if (alignment != 0) { - const abi_alignment = ty.abiAlignment(target); - if (alignment < abi_alignment) { - try w.print("zig_under_align({}) ", .{alignment}); - } else if (alignment > abi_alignment) { - try w.print("zig_align({}) ", .{alignment}); - } - } - try dg.renderType(w, render_ty, kind); - - const const_prefix = switch (mutability) { - .Const, .ConstArgument => "const ", - .Mut => "", + if (alignment != 0) switch (std.math.order(alignment, ty.abiAlignment(dg.module.getTarget()))) { + .lt => try w.print("zig_under_align({}) ", .{alignment}), + .eq => {}, + .gt => try w.print("zig_align({}) ", .{alignment}), }; - try w.print(" {s}", .{const_prefix}); + + const idx = try dg.typeToIndex(ty, .complete); + const trailing = try renderTypePrefix( + dg.decl_index, + store.*, + module, + w, + idx, + .suffix, + CQualifiers.init(.{ .@"const" = mutability == .@"const" }), + ); + try w.print("{}", .{trailing}); try dg.writeCValue(w, name); - try w.writeAll(suffix.items); + try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix); } - fn renderTagNameFn(dg: *DeclGen, enum_ty: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); - defer buffer.deinit(); - const bw = buffer.writer(); - + fn renderTagNameFn(dg: *DeclGen, w: anytype, fn_name: []const u8, enum_ty: Type) !void { const name_slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - try buffer.appendSlice("static "); - try dg.renderType(bw, name_slice_ty, .Complete); - const name_begin = buffer.items.len + " ".len; - try bw.print(" zig_tagName_{}_{d}(", .{ typeToCIdentifier(enum_ty, dg.module), @enumToInt(enum_ty.getOwnerDecl()) }); - const name_end = buffer.items.len - "(".len; - try dg.renderTypeAndName(bw, enum_ty, .{ .identifier = "tag" }, .Const, 0, .Complete); - try buffer.appendSlice(") {\n switch (tag) {\n"); + try w.writeAll("static "); + try dg.renderType(w, name_slice_ty, .Complete); + try w.writeByte(' '); + try w.writeAll(fn_name); + try w.writeByte('('); + try dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, .@"const", 0, .Complete); + try w.writeAll(") {\n switch (tag) {\n"); for (enum_ty.enumFields().keys(), 0..) |name, index| { - const name_z = try dg.typedefs.allocator.dupeZ(u8, name); - defer dg.typedefs.allocator.free(name_z); + const name_z = try dg.gpa.dupeZ(u8, name); + defer dg.gpa.free(name_z); const name_bytes = name_z[0 .. name_z.len + 1]; var tag_pl: Value.Payload.U32 = .{ @@ -2438,40 +1737,23 @@ pub const DeclGen = struct { var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; const len_val = Value.initPayload(&len_pl.base); - try bw.print(" case {}: {{\n static ", .{try dg.fmtIntLiteral(enum_ty, int_val)}); - try dg.renderTypeAndName(bw, name_ty, .{ .identifier = "name" }, .Const, 0, .Complete); - try buffer.appendSlice(" = "); - try dg.renderValue(bw, name_ty, name_val, .Initializer); - try buffer.appendSlice(";\n return ("); - try dg.renderTypecast(bw, name_slice_ty); - try bw.print("){{{}, {}}};\n", .{ + try w.print(" case {}: {{\n static ", .{try dg.fmtIntLiteral(enum_ty, int_val)}); + try dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, .@"const", 0, .Complete); + try w.writeAll(" = "); + try dg.renderValue(w, name_ty, name_val, .Initializer); + try w.writeAll(";\n return ("); + try dg.renderTypecast(w, name_slice_ty); + try w.print("){{{}, {}}};\n", .{ fmtIdent("name"), try dg.fmtIntLiteral(Type.usize, len_val), }); - try buffer.appendSlice(" }\n"); + try w.writeAll(" }\n"); } - try buffer.appendSlice(" }\n while ("); - try dg.renderValue(bw, Type.bool, Value.true, .Other); - try buffer.appendSlice(") "); - _ = try airBreakpoint(bw); - try buffer.appendSlice("}\n"); - - const rendered = try buffer.toOwnedSlice(); - errdefer dg.typedefs.allocator.free(rendered); - const name = rendered[name_begin..name_end]; - - try dg.typedefs.ensureUnusedCapacity(1); - dg.typedefs.putAssumeCapacityNoClobber( - try enum_ty.copy(dg.typedefs_arena), - .{ .name = name, .rendered = rendered }, - ); - - return name; - } - - fn getTagNameFn(dg: *DeclGen, enum_ty: Type) ![]const u8 { - return dg.getTypedefName(enum_ty) orelse - try dg.renderTagNameFn(enum_ty); + try w.writeAll(" }\n while ("); + try dg.renderValue(w, Type.bool, Value.true, .Other); + try w.writeAll(") "); + _ = try airBreakpoint(w); + try w.writeAll("}\n"); } fn declIsGlobal(dg: *DeclGen, tv: TypedValue) bool { @@ -2492,10 +1774,11 @@ pub const DeclGen = struct { fn writeCValue(dg: *DeclGen, w: anytype, c_value: CValue) !void { switch (c_value) { .none => unreachable, - .local => |i| return w.print("t{d}", .{i}), + .local, .new_local => |i| return w.print("t{d}", .{i}), .local_ref => |i| return w.print("&t{d}", .{i}), .constant => unreachable, .arg => |i| return w.print("a{d}", .{i}), + .arg_array => |i| return dg.writeCValueMember(w, .{ .arg = i }, .{ .identifier = "array" }), .field => |i| return w.print("f{d}", .{i}), .decl => |decl| return dg.renderDeclName(w, decl, 0), .decl_ref => |decl| { @@ -2511,10 +1794,15 @@ pub const DeclGen = struct { fn writeCValueDeref(dg: *DeclGen, w: anytype, c_value: CValue) !void { switch (c_value) { .none => unreachable, - .local => |i| return w.print("(*t{d})", .{i}), + .local, .new_local => |i| return w.print("(*t{d})", .{i}), .local_ref => |i| return w.print("t{d}", .{i}), .constant => unreachable, .arg => |i| return w.print("(*a{d})", .{i}), + .arg_array => |i| { + try w.writeAll("(*"); + try dg.writeCValueMember(w, .{ .arg = i }, .{ .identifier = "array" }); + return w.writeByte(')'); + }, .field => |i| return w.print("f{d}", .{i}), .decl => |decl| { try w.writeAll("(*"); @@ -2541,7 +1829,7 @@ pub const DeclGen = struct { fn writeCValueDerefMember(dg: *DeclGen, writer: anytype, c_value: CValue, member: CValue) !void { switch (c_value) { .none, .constant, .field, .undef => unreachable, - .local, .arg, .decl, .identifier, .bytes => { + .new_local, .local, .arg, .arg_array, .decl, .identifier, .bytes => { try dg.writeCValue(writer, c_value); try writer.writeAll("->"); }, @@ -2668,10 +1956,493 @@ pub const DeclGen = struct { } }; -pub fn genGlobalAsm(mod: *Module, code: *std.ArrayList(u8)) !void { +const CTypeFix = enum { prefix, suffix }; +const CQualifiers = std.enums.EnumSet(enum { @"const", @"volatile", restrict }); +const CTypeRenderTrailing = enum { + no_space, + maybe_space, + + pub fn format( + self: @This(), + comptime fmt: []const u8, + _: std.fmt.FormatOptions, + w: anytype, + ) @TypeOf(w).Error!void { + if (fmt.len != 0) + @compileError("invalid format string '" ++ fmt ++ "' for type '" ++ + @typeName(@This()) ++ "'"); + comptime assert(fmt.len == 0); + switch (self) { + .no_space => {}, + .maybe_space => try w.writeByte(' '), + } + } +}; +fn renderTypeName( + mod: *Module, + w: anytype, + idx: CType.Index, + cty: CType, + attributes: []const u8, +) !void { + switch (cty.tag()) { + else => unreachable, + + .fwd_anon_struct, + .fwd_anon_union, + => |tag| try w.print("{s} {s}anon__lazy_{d}", .{ + @tagName(tag)["fwd_anon_".len..], + attributes, + idx, + }), + + .fwd_struct, + .fwd_union, + => |tag| { + const owner_decl = cty.cast(CType.Payload.FwdDecl).?.data; + try w.print("{s} {s}{}__{d}", .{ + @tagName(tag)["fwd_".len..], + attributes, + fmtIdent(mem.span(mod.declPtr(owner_decl).name)), + @enumToInt(owner_decl), + }); + }, + } +} +fn renderTypePrefix( + decl: Decl.OptionalIndex, + store: CType.Store.Set, + mod: *Module, + w: anytype, + idx: CType.Index, + parent_fix: CTypeFix, + qualifiers: CQualifiers, +) @TypeOf(w).Error!CTypeRenderTrailing { + var trailing = CTypeRenderTrailing.maybe_space; + + const cty = store.indexToCType(idx); + switch (cty.tag()) { + .void, + .char, + .@"signed char", + .short, + .int, + .long, + .@"long long", + ._Bool, + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .float, + .double, + .@"long double", + .bool, + .size_t, + .ptrdiff_t, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + .uintptr_t, + .intptr_t, + .zig_u128, + .zig_i128, + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + .zig_c_longdouble, + => |tag| try w.writeAll(@tagName(tag)), + + .pointer, + .pointer_const, + .pointer_volatile, + .pointer_const_volatile, + => |tag| { + const child_idx = cty.cast(CType.Payload.Child).?.data; + const child_trailing = try renderTypePrefix( + decl, + store, + mod, + w, + child_idx, + .prefix, + CQualifiers.init(.{ .@"const" = switch (tag) { + .pointer, .pointer_volatile => false, + .pointer_const, .pointer_const_volatile => true, + else => unreachable, + }, .@"volatile" = switch (tag) { + .pointer, .pointer_const => false, + .pointer_volatile, .pointer_const_volatile => true, + else => unreachable, + } }), + ); + try w.print("{}*", .{child_trailing}); + trailing = .no_space; + }, + + .array, + .vector, + => { + const child_idx = cty.cast(CType.Payload.Sequence).?.data.elem_type; + const child_trailing = try renderTypePrefix( + decl, + store, + mod, + w, + child_idx, + .suffix, + qualifiers, + ); + switch (parent_fix) { + .prefix => { + try w.print("{}(", .{child_trailing}); + return .no_space; + }, + .suffix => return child_trailing, + } + }, + + .fwd_anon_struct, + .fwd_anon_union, + => if (decl.unwrap()) |decl_index| + try w.print("anon__{d}_{d}", .{ @enumToInt(decl_index), idx }) + else + try renderTypeName(mod, w, idx, cty, ""), + + .fwd_struct, + .fwd_union, + => try renderTypeName(mod, w, idx, cty, ""), + + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + => |tag| { + try w.print("{s} {s}", .{ + @tagName(tag)["unnamed_".len..], + if (cty.isPacked()) "zig_packed(" else "", + }); + try renderAggregateFields(mod, w, store, cty, 1); + if (cty.isPacked()) try w.writeByte(')'); + }, + + .anon_struct, + .anon_union, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => return renderTypePrefix( + decl, + store, + mod, + w, + cty.cast(CType.Payload.Aggregate).?.data.fwd_decl, + parent_fix, + qualifiers, + ), + + .function, + .varargs_function, + => { + const child_trailing = try renderTypePrefix( + decl, + store, + mod, + w, + cty.cast(CType.Payload.Function).?.data.return_type, + .suffix, + CQualifiers.init(.{}), + ); + switch (parent_fix) { + .prefix => { + try w.print("{}(", .{child_trailing}); + return .no_space; + }, + .suffix => return child_trailing, + } + }, + } + + var qualifier_it = qualifiers.iterator(); + while (qualifier_it.next()) |qualifier| { + try w.print("{}{s}", .{ trailing, @tagName(qualifier) }); + trailing = .maybe_space; + } + + return trailing; +} +fn renderTypeSuffix( + decl: Decl.OptionalIndex, + store: CType.Store.Set, + mod: *Module, + w: anytype, + idx: CType.Index, + parent_fix: CTypeFix, +) @TypeOf(w).Error!void { + const cty = store.indexToCType(idx); + switch (cty.tag()) { + .void, + .char, + .@"signed char", + .short, + .int, + .long, + .@"long long", + ._Bool, + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .float, + .double, + .@"long double", + .bool, + .size_t, + .ptrdiff_t, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + .uintptr_t, + .intptr_t, + .zig_u128, + .zig_i128, + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + .zig_c_longdouble, + => {}, + + .pointer, + .pointer_const, + .pointer_volatile, + .pointer_const_volatile, + => try renderTypeSuffix(decl, store, mod, w, cty.cast(CType.Payload.Child).?.data, .prefix), + + .array, + .vector, + => { + switch (parent_fix) { + .prefix => try w.writeByte(')'), + .suffix => {}, + } + + try w.print("[{}]", .{cty.cast(CType.Payload.Sequence).?.data.len}); + try renderTypeSuffix( + decl, + store, + mod, + w, + cty.cast(CType.Payload.Sequence).?.data.elem_type, + .suffix, + ); + }, + + .fwd_anon_struct, + .fwd_anon_union, + .fwd_struct, + .fwd_union, + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + .anon_struct, + .anon_union, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => {}, + + .function, + .varargs_function, + => |tag| { + switch (parent_fix) { + .prefix => try w.writeByte(')'), + .suffix => {}, + } + + const data = cty.cast(CType.Payload.Function).?.data; + + try w.writeByte('('); + var need_comma = false; + for (data.param_types, 0..) |param_type, param_i| { + if (need_comma) try w.writeAll(", "); + need_comma = true; + const trailing = try renderTypePrefix( + decl, + store, + mod, + w, + param_type, + .suffix, + CQualifiers.init(.{ .@"const" = true }), + ); + try w.print("{}a{d}", .{ trailing, param_i }); + try renderTypeSuffix(decl, store, mod, w, param_type, .suffix); + } + switch (tag) { + .function => {}, + .varargs_function => { + if (need_comma) try w.writeAll(", "); + need_comma = true; + try w.writeAll("..."); + }, + else => unreachable, + } + if (!need_comma) try w.writeAll("void"); + try w.writeByte(')'); + + try renderTypeSuffix(decl, store, mod, w, data.return_type, .suffix); + }, + } +} +fn renderAggregateFields( + mod: *Module, + writer: anytype, + store: CType.Store.Set, + cty: CType, + indent: usize, +) !void { + try writer.writeAll("{\n"); + const fields = cty.fields(); + for (fields) |field| { + try writer.writeByteNTimes(' ', indent + 1); + switch (std.math.order(field.alignas.@"align", field.alignas.abi)) { + .lt => try writer.print("zig_under_align({}) ", .{field.alignas.getAlign()}), + .eq => {}, + .gt => try writer.print("zig_align({}) ", .{field.alignas.getAlign()}), + } + const trailing = try renderTypePrefix( + .none, + store, + mod, + writer, + field.type, + .suffix, + CQualifiers.init(.{}), + ); + try writer.print("{}{ }", .{ trailing, fmtIdent(mem.span(field.name)) }); + try renderTypeSuffix(.none, store, mod, writer, field.type, .suffix); + try writer.writeAll(";\n"); + } + try writer.writeByteNTimes(' ', indent); + try writer.writeByte('}'); +} + +pub fn genTypeDecl( + mod: *Module, + writer: anytype, + global_store: CType.Store.Set, + global_idx: CType.Index, + decl: Decl.OptionalIndex, + decl_store: CType.Store.Set, + decl_idx: CType.Index, + found_existing: bool, +) !void { + const global_cty = global_store.indexToCType(global_idx); + switch (global_cty.tag()) { + .fwd_anon_struct => if (decl != .none) { + try writer.writeAll("typedef "); + _ = try renderTypePrefix( + .none, + global_store, + mod, + writer, + global_idx, + .suffix, + CQualifiers.init(.{}), + ); + try writer.writeByte(' '); + _ = try renderTypePrefix( + decl, + decl_store, + mod, + writer, + decl_idx, + .suffix, + CQualifiers.init(.{}), + ); + try writer.writeAll(";\n"); + }, + + .fwd_struct, + .fwd_union, + .anon_struct, + .anon_union, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => |tag| if (!found_existing) { + switch (tag) { + .fwd_struct, + .fwd_union, + => { + const owner_decl = global_cty.cast(CType.Payload.FwdDecl).?.data; + _ = try renderTypePrefix( + .none, + global_store, + mod, + writer, + global_idx, + .suffix, + CQualifiers.init(.{}), + ); + try writer.writeAll("; // "); + try mod.declPtr(owner_decl).renderFullyQualifiedName(mod, writer); + try writer.writeByte('\n'); + }, + + .anon_struct, + .anon_union, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => { + const fwd_idx = global_cty.cast(CType.Payload.Aggregate).?.data.fwd_decl; + try renderTypeName( + mod, + writer, + fwd_idx, + global_store.indexToCType(fwd_idx), + if (global_cty.isPacked()) "zig_packed(" else "", + ); + try writer.writeByte(' '); + try renderAggregateFields(mod, writer, global_store, global_cty, 0); + if (global_cty.isPacked()) try writer.writeByte(')'); + try writer.writeAll(";\n"); + }, + + else => unreachable, + } + }, + + else => {}, + } +} + +pub fn genGlobalAsm(mod: *Module, writer: anytype) !void { var it = mod.global_assembly.valueIterator(); while (it.next()) |asm_source| { - try code.writer().print("__asm({s});\n", .{fmtStringLiteral(asm_source.*)}); + try writer.print("__asm({s});\n", .{fmtStringLiteral(asm_source.*)}); } } @@ -2709,7 +2480,7 @@ pub fn genErrDecls(o: *Object) !void { const name_val = Value.initPayload(&name_pl.base); try writer.writeAll("static "); - try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, .Const, 0, .Complete); + try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, .@"const", 0, .Complete); try writer.writeAll(" = "); try o.dg.renderValue(writer, name_ty, name_val, .StaticInitializer); try writer.writeAll(";\n"); @@ -2722,7 +2493,7 @@ pub fn genErrDecls(o: *Object) !void { const name_array_ty = Type.initPayload(&name_array_ty_pl.base); try writer.writeAll("static "); - try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = name_prefix }, .Const, 0, .Complete); + try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = name_prefix }, .@"const", 0, .Complete); try writer.writeAll(" = {"); for (o.dg.module.error_name_list.items, 0..) |name, value| { if (value != 0) try writer.writeByte(','); @@ -2742,14 +2513,27 @@ fn genExports(o: *Object) !void { defer tracy.end(); const fwd_decl_writer = o.dg.fwd_decl.writer(); - if (o.dg.module.decl_exports.get(o.dg.decl_index)) |exports| for (exports.items[1..], 0..) |@"export", i| { - try fwd_decl_writer.writeAll("zig_export("); - try o.dg.renderFunctionSignature(fwd_decl_writer, .Forward, @intCast(u32, 1 + i)); - try fwd_decl_writer.print(", {s}, {s});\n", .{ - fmtStringLiteral(exports.items[0].options.name), - fmtStringLiteral(@"export".options.name), - }); - }; + if (o.dg.module.decl_exports.get(o.dg.decl_index.unwrap().?)) |exports| { + for (exports.items[1..], 1..) |@"export", i| { + try fwd_decl_writer.writeAll("zig_export("); + try o.dg.renderFunctionSignature(fwd_decl_writer, .Forward, @intCast(u32, i)); + try fwd_decl_writer.print(", {s}, {s});\n", .{ + fmtStringLiteral(exports.items[0].options.name), + fmtStringLiteral(@"export".options.name), + }); + } + } +} + +pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { + const writer = o.writer(); + switch (lazy_fn.key_ptr.*) { + .tag_name => _ = try o.dg.renderTagNameFn( + writer, + lazy_fn.value_ptr.fn_name, + lazy_fn.value_ptr.data.tag_name, + ), + } } pub fn genFunc(f: *Function) !void { @@ -2759,8 +2543,8 @@ pub fn genFunc(f: *Function) !void { const o = &f.object; const gpa = o.dg.gpa; const tv: TypedValue = .{ - .ty = o.dg.decl.ty, - .val = o.dg.decl.val, + .ty = o.dg.decl.?.ty, + .val = o.dg.decl.?.val, }; o.code_header = std.ArrayList(u8).init(gpa); @@ -2799,9 +2583,8 @@ pub fn genFunc(f: *Function) !void { // missing. These are added now to complete the map. Then we can sort by // alignment, descending. const free_locals = f.getFreeLocals(); - const values = f.allocs.values(); - for (f.allocs.keys(), 0..) |local_index, i| { - if (values[i]) continue; // static + for (f.allocs.keys(), f.allocs.values()) |local_index, value| { + if (value) continue; // static const local = f.locals.items[local_index]; log.debug("inserting local {d} into free_locals", .{local_index}); const gop = try free_locals.getOrPutContext(gpa, local.ty, f.tyHashCtx()); @@ -2830,7 +2613,7 @@ pub fn genFunc(f: *Function) !void { w, local.ty, .{ .local = local_index }, - .Mut, + .mut, local.alignment, .Complete, ); @@ -2850,10 +2633,10 @@ pub fn genDecl(o: *Object) !void { const tracy = trace(@src()); defer tracy.end(); - const tv: TypedValue = .{ - .ty = o.dg.decl.ty, - .val = o.dg.decl.val, - }; + const decl = o.dg.decl.?; + const decl_c_value: CValue = .{ .decl = o.dg.decl_index.unwrap().? }; + const tv: TypedValue = .{ .ty = decl.ty, .val = decl.val }; + if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime()) return; if (tv.val.tag() == .extern_fn) { const fwd_decl_writer = o.dg.fwd_decl.writer(); @@ -2867,11 +2650,9 @@ pub fn genDecl(o: *Object) !void { const is_global = o.dg.declIsGlobal(tv) or variable.is_extern; const fwd_decl_writer = o.dg.fwd_decl.writer(); - const decl_c_value = CValue{ .decl = o.dg.decl_index }; - try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static "); if (variable.is_threadlocal) try fwd_decl_writer.writeAll("zig_threadlocal "); - try o.dg.renderTypeAndName(fwd_decl_writer, o.dg.decl.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete); + try o.dg.renderTypeAndName(fwd_decl_writer, decl.ty, decl_c_value, .mut, decl.@"align", .Complete); try fwd_decl_writer.writeAll(";\n"); try genExports(o); @@ -2880,27 +2661,26 @@ pub fn genDecl(o: *Object) !void { const w = o.writer(); if (!is_global) try w.writeAll("static "); if (variable.is_threadlocal) try w.writeAll("zig_threadlocal "); - if (o.dg.decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); - try o.dg.renderTypeAndName(w, o.dg.decl.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete); - if (o.dg.decl.@"linksection" != null) try w.writeAll(", read, write)"); + if (decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); + try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .mut, decl.@"align", .Complete); + if (decl.@"linksection" != null) try w.writeAll(", read, write)"); try w.writeAll(" = "); try o.dg.renderValue(w, tv.ty, variable.init, .StaticInitializer); try w.writeByte(';'); try o.indent_writer.insertNewline(); } else { - const is_global = o.dg.module.decl_exports.contains(o.dg.decl_index); + const is_global = o.dg.module.decl_exports.contains(decl_c_value.decl); const fwd_decl_writer = o.dg.fwd_decl.writer(); - const decl_c_value: CValue = .{ .decl = o.dg.decl_index }; try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static "); - try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, .Const, o.dg.decl.@"align", .Complete); + try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, .@"const", decl.@"align", .Complete); try fwd_decl_writer.writeAll(";\n"); const w = o.writer(); if (!is_global) try w.writeAll("static "); - if (o.dg.decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); - try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .Const, o.dg.decl.@"align", .Complete); - if (o.dg.decl.@"linksection" != null) try w.writeAll(", read)"); + if (decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); + try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .@"const", decl.@"align", .Complete); + if (decl.@"linksection" != null) try w.writeAll(", read)"); try w.writeAll(" = "); try o.dg.renderValue(w, tv.ty, tv.val, .StaticInitializer); try w.writeAll(";\n"); @@ -2912,8 +2692,8 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { defer tracy.end(); const tv: TypedValue = .{ - .ty = dg.decl.ty, - .val = dg.decl.val, + .ty = dg.decl.?.ty, + .val = dg.decl.?.val, }; const writer = dg.fwd_decl.writer(); @@ -2951,7 +2731,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, // zig fmt: off .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies - .arg => airArg(f), + .arg => try airArg(f, inst), .breakpoint => try airBreakpoint(f.object.writer()), .ret_addr => try airRetAddr(f, inst), @@ -3200,13 +2980,14 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, .c_va_start => return f.fail("TODO implement c_va_start", .{}), // zig fmt: on }; - if (result_value == .local) { - log.debug("map %{d} to t{d}", .{ inst, result_value.local }); - } - switch (result_value) { - .none => {}, - else => try f.value_map.putNoClobber(Air.indexToRef(inst), result_value), + if (result_value == .new_local) { + log.debug("map %{d} to t{d}", .{ inst, result_value.new_local }); } + try f.value_map.putNoClobber(Air.indexToRef(inst), switch (result_value) { + .none => continue, + .new_local => |i| .{ .local = i }, + else => result_value, + }); } } @@ -3283,6 +3064,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { return CValue.none; } + const inst_ty = f.air.typeOfIndex(inst); const ptr_ty = f.air.typeOf(bin_op.lhs); const child_ty = ptr_ty.childType(); @@ -3297,7 +3079,9 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, f.air.typeOfIndex(inst)); try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = &("); + try writer.writeAll(" = ("); + try f.renderTypecast(writer, inst_ty); + try writer.writeAll(")&("); if (ptr_ty.ptrSize() == .One) { // It's a pointer to an array, so we need to de-reference. try f.writeCValueDeref(writer, ptr); @@ -3428,13 +3212,13 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { return CValue{ .undef = inst_ty }; } - const mutability: Mutability = if (inst_ty.isConstPtr()) .Const else .Mut; + const mutability: Mutability = if (inst_ty.isConstPtr()) .@"const" else .mut; const target = f.object.dg.module.getTarget(); const local = try f.allocAlignedLocal(elem_type, mutability, inst_ty.ptrAlignment(target)); - log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.local }); + log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.module.gpa; - try f.allocs.put(gpa, local.local, false); - return CValue{ .local_ref = local.local }; + try f.allocs.put(gpa, local.new_local, false); + return CValue{ .local_ref = local.new_local }; } fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { @@ -3445,19 +3229,25 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { return CValue{ .undef = inst_ty }; } - const mutability: Mutability = if (inst_ty.isConstPtr()) .Const else .Mut; + const mutability: Mutability = if (inst_ty.isConstPtr()) .@"const" else .mut; const target = f.object.dg.module.getTarget(); const local = try f.allocAlignedLocal(elem_ty, mutability, inst_ty.ptrAlignment(target)); - log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.local }); + log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.module.gpa; - try f.allocs.put(gpa, local.local, false); - return CValue{ .local_ref = local.local }; + try f.allocs.put(gpa, local.new_local, false); + return CValue{ .local_ref = local.new_local }; } -fn airArg(f: *Function) CValue { +fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { + const inst_ty = f.air.typeOfIndex(inst); + const inst_cty = try f.object.dg.typeToIndex(inst_ty, .parameter); + const i = f.next_arg_index; f.next_arg_index += 1; - return .{ .arg = i }; + return if (inst_cty != try f.object.dg.typeToIndex(inst_ty, .complete)) + .{ .arg_array = i } + else + .{ .arg = i }; } fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { @@ -3567,7 +3357,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { const ret_val = if (is_array) ret_val: { const array_local = try f.allocLocal(inst, try lowered_ret_ty.copy(f.arena.allocator())); try writer.writeAll("memcpy("); - try f.writeCValueMember(writer, array_local, .{ .field = 0 }); + try f.writeCValueMember(writer, array_local, .{ .identifier = "array" }); try writer.writeAll(", "); if (deref) try f.writeCValueDeref(writer, operand) @@ -3587,14 +3377,13 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { try f.writeCValue(writer, ret_val, .Other); try writer.writeAll(";\n"); if (is_array) { - try freeLocal(f, inst, ret_val.local, 0); + try freeLocal(f, inst, ret_val.new_local, 0); } } else { try reap(f, inst, &.{un_op}); - if (f.object.dg.decl.ty.fnCallingConvention() != .Naked) { + if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() != .Naked) // Not even allowed to return void in a naked function. try writer.writeAll("return;\n"); - } } return CValue.none; } @@ -3796,7 +3585,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderTypecast(writer, src_ty); try writer.writeAll("))"); if (src_val == .constant) { - try freeLocal(f, inst, array_src.local, 0); + try freeLocal(f, inst, array_src.new_local, 0); } } else if (ptr_info.host_size != 0) { const host_bits = ptr_info.host_size * 8; @@ -3847,7 +3636,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue { const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64; if (cant_cast) { if (src_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); - try writer.writeAll("zig_as_"); + try writer.writeAll("zig_make_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeAll("(0, "); } else { @@ -4118,32 +3907,31 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const inst_ty = f.air.typeOfIndex(inst); - const elem_ty = switch (inst_ty.ptrSize()) { - .One => blk: { - const array_ty = inst_ty.childType(); - break :blk array_ty.childType(); - }, - else => inst_ty.childType(), - }; + const elem_ty = inst_ty.elemType2(); - // We must convert to and from integer types to prevent UB if the operation - // results in a NULL pointer, or if LHS is NULL. The operation is only UB - // if the result is NULL and then dereferenced. const local = try f.allocLocal(inst, inst_ty); const writer = f.object.writer(); try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = ("); - try f.renderTypecast(writer, inst_ty); - try writer.writeAll(")(((uintptr_t)"); - try f.writeCValue(writer, lhs, .Other); - try writer.writeAll(") "); - try writer.writeByte(operator); - try writer.writeAll(" ("); - try f.writeCValue(writer, rhs, .Other); - try writer.writeAll("*sizeof("); - try f.renderTypecast(writer, elem_ty); - try writer.writeAll(")));\n"); + try writer.writeAll(" = "); + if (elem_ty.hasRuntimeBitsIgnoreComptime()) { + // We must convert to and from integer types to prevent UB if the operation + // results in a NULL pointer, or if LHS is NULL. The operation is only UB + // if the result is NULL and then dereferenced. + try writer.writeByte('('); + try f.renderTypecast(writer, inst_ty); + try writer.writeAll(")(((uintptr_t)"); + try f.writeCValue(writer, lhs, .Other); + try writer.writeAll(") "); + try writer.writeByte(operator); + try writer.writeAll(" ("); + try f.writeCValue(writer, rhs, .Other); + try writer.writeAll("*sizeof("); + try f.renderTypecast(writer, elem_ty); + try writer.writeAll(")))"); + } else try f.writeCValue(writer, lhs, .Initializer); + + try writer.writeAll(";\n"); return local; } @@ -4222,8 +4010,12 @@ fn airCall( modifier: std.builtin.CallModifier, ) !CValue { // Not even allowed to call panic in a naked function. - if (f.object.dg.decl.ty.fnCallingConvention() == .Naked) return .none; + if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none; + const gpa = f.object.dg.gpa; + const module = f.object.dg.module; + const target = module.getTarget(); + const writer = f.object.writer(); switch (modifier) { .auto => {}, @@ -4238,8 +4030,28 @@ fn airCall( const resolved_args = try gpa.alloc(CValue, args.len); defer gpa.free(resolved_args); - for (args, 0..) |arg, i| { - resolved_args[i] = try f.resolveInst(arg); + for (resolved_args, args) |*resolved_arg, arg| { + const arg_ty = f.air.typeOf(arg); + const arg_cty = try f.object.dg.typeToIndex(arg_ty, .parameter); + if (f.object.dg.indexToCType(arg_cty).tag() == .void) { + resolved_arg.* = .none; + continue; + } + resolved_arg.* = try f.resolveInst(arg); + if (arg_cty != try f.object.dg.typeToIndex(arg_ty, .complete)) { + var lowered_arg_buf: LowerFnRetTyBuffer = undefined; + const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, target); + + const array_local = try f.allocLocal(inst, try lowered_arg_ty.copy(f.arena.allocator())); + try writer.writeAll("memcpy("); + try f.writeCValueMember(writer, array_local, .{ .identifier = "array" }); + try writer.writeAll(", "); + try f.writeCValue(writer, resolved_arg.*, .FunctionArgument); + try writer.writeAll(", sizeof("); + try f.renderTypecast(writer, lowered_arg_ty); + try writer.writeAll("));\n"); + resolved_arg.* = array_local; + } } const callee = try f.resolveInst(pl_op.operand); @@ -4256,9 +4068,7 @@ fn airCall( .Pointer => callee_ty.childType(), else => unreachable, }; - const writer = f.object.writer(); - const target = f.object.dg.module.getTarget(); const ret_ty = fn_ty.fnReturnType(); var lowered_ret_buf: LowerFnRetTyBuffer = undefined; const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target); @@ -4293,7 +4103,7 @@ fn airCall( else => break :known, }; }; - name = f.object.dg.module.declPtr(fn_decl).name; + name = module.declPtr(fn_decl).name; try f.object.dg.renderDeclName(writer, fn_decl, 0); break :callee; } @@ -4303,22 +4113,11 @@ fn airCall( try writer.writeByte('('); var args_written: usize = 0; - for (args, 0..) |arg, arg_i| { - const ty = f.air.typeOf(arg); - if (!ty.hasRuntimeBitsIgnoreComptime()) continue; - if (args_written != 0) { - try writer.writeAll(", "); - } - if ((is_extern or std.mem.eql(u8, std.mem.span(name), "main")) and - ty.isCPtr() and ty.childType().tag() == .u8) - { - // Corresponds with hack in renderType .Pointer case. - try writer.writeAll("(char"); - if (ty.isConstPtr()) try writer.writeAll(" const"); - if (ty.isVolatilePtr()) try writer.writeAll(" volatile"); - try writer.writeAll(" *)"); - } - try f.writeCValue(writer, resolved_args[arg_i], .FunctionArgument); + for (resolved_args) |resolved_arg| { + if (resolved_arg == .none) continue; + if (args_written != 0) try writer.writeAll(", "); + try f.writeCValue(writer, resolved_arg, .FunctionArgument); + if (resolved_arg == .new_local) try freeLocal(f, inst, resolved_arg.new_local, 0); args_written += 1; } try writer.writeAll(");\n"); @@ -4331,11 +4130,11 @@ fn airCall( try writer.writeAll("memcpy("); try f.writeCValue(writer, array_local, .FunctionArgument); try writer.writeAll(", "); - try f.writeCValueMember(writer, result_local, .{ .field = 0 }); + try f.writeCValueMember(writer, result_local, .{ .identifier = "array" }); try writer.writeAll(", sizeof("); try f.renderTypecast(writer, ret_ty); try writer.writeAll("));\n"); - try freeLocal(f, inst, result_local.local, 0); + try freeLocal(f, inst, result_local.new_local, 0); break :r array_local; }; @@ -4599,7 +4398,7 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { } if (operand == .constant) { - try freeLocal(f, inst, operand_lval.local, 0); + try freeLocal(f, inst, operand_lval.new_local, 0); } return local; @@ -4645,7 +4444,7 @@ fn airFence(f: *Function, inst: Air.Inst.Index) !CValue { fn airUnreach(f: *Function) !CValue { // Not even allowed to call unreachable in a naked function. - if (f.object.dg.decl.ty.fnCallingConvention() == .Naked) return .none; + if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none; try f.object.writer().writeAll("zig_unreachable();\n"); return CValue.none; @@ -4922,7 +4721,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { writer, output_ty, local_value, - .Mut, + .mut, alignment, .Complete, ); @@ -4961,7 +4760,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { writer, input_ty, local_value, - .Const, + .@"const", alignment, .Complete, ); @@ -5119,7 +4918,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const is_reg = constraint[1] == '{'; if (is_reg) { try f.writeCValueDeref(writer, if (output == .none) - CValue{ .local_ref = local.local } + CValue{ .local_ref = local.new_local } else try f.resolveInst(output)); try writer.writeAll(" = "); @@ -5425,18 +5224,20 @@ fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struc else => .none, }; - const FieldLoc = union(enum) { + const field_loc: union(enum) { begin: void, field: CValue, end: void, - }; - const field_loc = switch (struct_ty.tag()) { - .@"struct" => switch (struct_ty.containerLayout()) { - .Auto, .Extern => for (struct_ty.structFields().values()[index..], 0..) |field, offset| { - if (field.ty.hasRuntimeBitsIgnoreComptime()) break FieldLoc{ .field = .{ - .identifier = struct_ty.structFieldName(index + offset), - } }; - } else @as(FieldLoc, .end), + } = switch (struct_ty.tag()) { + .tuple, .anon_struct, .@"struct" => switch (struct_ty.containerLayout()) { + .Auto, .Extern => for (index..struct_ty.structFieldCount()) |field_i| { + if (!struct_ty.structFieldIsComptime(field_i) and + struct_ty.structFieldType(field_i).hasRuntimeBitsIgnoreComptime()) + break .{ .field = if (struct_ty.isSimpleTuple()) + .{ .field = field_i } + else + .{ .identifier = struct_ty.structFieldName(field_i) } }; + } else .end, .Packed => if (field_ptr_info.data.host_size == 0) { const target = f.object.dg.module.getTarget(); @@ -5461,45 +5262,33 @@ fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struc try f.writeCValue(writer, struct_ptr, .Other); try writer.print(")[{}];\n", .{try f.fmtIntLiteral(Type.usize, byte_offset_val)}); return local; - } else @as(FieldLoc, .begin), + } else .begin, }, .@"union", .union_safety_tagged, .union_tagged => if (struct_ty.containerLayout() == .Packed) { try f.writeCValue(writer, struct_ptr, .Other); try writer.writeAll(";\n"); return local; - } else if (field_ty.hasRuntimeBitsIgnoreComptime()) FieldLoc{ .field = .{ + } else if (field_ty.hasRuntimeBitsIgnoreComptime()) .{ .field = .{ .identifier = struct_ty.unionFields().keys()[index], - } } else @as(FieldLoc, .end), - .tuple, .anon_struct => field_name: { - const tuple = struct_ty.tupleFields(); - if (tuple.values[index].tag() != .unreachable_value) return CValue.none; - - var id: usize = 0; - break :field_name for (tuple.values, 0..) |value, i| { - if (value.tag() != .unreachable_value) continue; - if (!tuple.types[i].hasRuntimeBitsIgnoreComptime()) continue; - if (i >= index) break FieldLoc{ .field = .{ .field = id } }; - id += 1; - } else @as(FieldLoc, .end); - }, + } } else .end, else => unreachable, }; - try writer.writeByte('&'); - switch (field_loc) { - .begin, .end => { - try writer.writeByte('('); - try f.writeCValue(writer, struct_ptr, .Other); - try writer.print(")[{}]", .{ - @boolToInt(field_loc == .end and struct_ty.hasRuntimeBitsIgnoreComptime()), - }); - }, - .field => |field| if (extra_name != .none) { - try f.writeCValueDerefMember(writer, struct_ptr, extra_name); - try writer.writeByte('.'); - try f.writeCValue(writer, field, .Other); - } else try f.writeCValueDerefMember(writer, struct_ptr, field), - } + if (struct_ty.hasRuntimeBitsIgnoreComptime()) { + try writer.writeByte('&'); + switch (field_loc) { + .begin, .end => { + try writer.writeByte('('); + try f.writeCValue(writer, struct_ptr, .Other); + try writer.print(")[{}]", .{@boolToInt(field_loc == .end)}); + }, + .field => |field| if (extra_name != .none) { + try f.writeCValueDerefMember(writer, struct_ptr, extra_name); + try writer.writeByte('.'); + try f.writeCValue(writer, field, .Other); + } else try f.writeCValueDerefMember(writer, struct_ptr, field), + } + } else try f.writeCValue(writer, struct_ptr, .Other); try writer.writeAll(";\n"); return local; } @@ -5534,8 +5323,11 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { }; const field_name: CValue = switch (struct_ty.tag()) { - .@"struct" => switch (struct_ty.containerLayout()) { - .Auto, .Extern => .{ .identifier = struct_ty.structFieldName(extra.field_index) }, + .tuple, .anon_struct, .@"struct" => switch (struct_ty.containerLayout()) { + .Auto, .Extern => if (struct_ty.isSimpleTuple()) + .{ .field = extra.field_index } + else + .{ .identifier = struct_ty.structFieldName(extra.field_index) }, .Packed => { const struct_obj = struct_ty.castTag(.@"struct").?.data; const int_info = struct_ty.intInfo(target); @@ -5593,13 +5385,13 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst, inst_ty); try writer.writeAll("memcpy("); - try f.writeCValue(writer, .{ .local_ref = local.local }, .FunctionArgument); + try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument); try writer.writeAll(", "); - try f.writeCValue(writer, .{ .local_ref = temp_local.local }, .FunctionArgument); + try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument); try writer.writeAll(", sizeof("); try f.renderTypecast(writer, inst_ty); try writer.writeAll("));\n"); - try freeLocal(f, inst, temp_local.local, 0); + try freeLocal(f, inst, temp_local.new_local, 0); return local; }, }, @@ -5623,22 +5415,13 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("));\n"); if (struct_byval == .constant) { - try freeLocal(f, inst, operand_lval.local, 0); + try freeLocal(f, inst, operand_lval.new_local, 0); } return local; } else .{ .identifier = struct_ty.unionFields().keys()[extra.field_index], }, - .tuple, .anon_struct => blk: { - const tuple = struct_ty.tupleFields(); - if (tuple.values[extra.field_index].tag() != .unreachable_value) return CValue.none; - - var id: usize = 0; - for (tuple.values[0..extra.field_index]) |value| - id += @boolToInt(value.tag() == .unreachable_value); - break :blk .{ .field = id }; - }, else => unreachable, }; @@ -5965,26 +5748,28 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.air.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - try f.writeCValue(writer, local, .Other); - const array_len = f.air.typeOf(ty_op.operand).elemType().arrayLen(); + const array_ty = f.air.typeOf(ty_op.operand).childType(); - try writer.writeAll(".ptr = "); + try f.writeCValueMember(writer, local, .{ .identifier = "ptr" }); + try writer.writeAll(" = "); + // Unfortunately, C does not support any equivalent to + // &(*(void *)p)[0], although LLVM does via GetElementPtr if (operand == .undef) { - // Unfortunately, C does not support any equivalent to - // &(*(void *)p)[0], although LLVM does via GetElementPtr var buf: Type.SlicePtrFieldTypeBuffer = undefined; try f.writeCValue(writer, CValue{ .undef = inst_ty.slicePtrFieldType(&buf) }, .Initializer); - } else { + } else if (array_ty.hasRuntimeBitsIgnoreComptime()) { try writer.writeAll("&("); try f.writeCValueDeref(writer, operand); try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); - } + } else try f.writeCValue(writer, operand, .Initializer); + try writer.writeAll("; "); + const array_len = array_ty.arrayLen(); var len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = array_len }; const len_val = Value.initPayload(&len_pl.base); - try writer.writeAll("; "); - try f.writeCValue(writer, local, .Other); - try writer.print(".len = {};\n", .{try f.fmtIntLiteral(Type.usize, len_val)}); + try f.writeCValueMember(writer, local, .{ .identifier = "len" }); + try writer.print(" = {};\n", .{try f.fmtIntLiteral(Type.usize, len_val)}); + return local; } @@ -6223,7 +6008,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue } if (f.liveness.isUnused(inst)) { - try freeLocal(f, inst, local.local, 0); + try freeLocal(f, inst, local.new_local, 0); return CValue.none; } @@ -6266,7 +6051,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(");\n"); if (f.liveness.isUnused(inst)) { - try freeLocal(f, inst, local.local, 0); + try freeLocal(f, inst, local.new_local, 0); return CValue.none; } @@ -6363,7 +6148,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(";\n"); try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs }); - try freeLocal(f, inst, index.local, 0); + try freeLocal(f, inst, index.new_local, 0); return CValue.none; } @@ -6465,7 +6250,7 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); - try writer.print(" = {s}(", .{try f.object.dg.getTagNameFn(enum_ty)}); + try writer.print(" = {s}(", .{try f.getTagNameFn(enum_ty)}); try f.writeCValue(writer, operand, .Other); try writer.writeAll(");\n"); @@ -6680,7 +6465,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(";\n"); - try freeLocal(f, inst, it.local, 0); + try freeLocal(f, inst, it.new_local, 0); return accum; } @@ -6693,8 +6478,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const gpa = f.object.dg.gpa; const resolved_elements = try gpa.alloc(CValue, elements.len); defer gpa.free(resolved_elements); - for (elements, 0..) |element, i| { - resolved_elements[i] = try f.resolveInst(element); + for (resolved_elements, elements) |*resolved_element, element| { + resolved_element.* = try f.resolveInst(element); } { var bt = iterateBigTomb(f, inst); @@ -6733,46 +6518,47 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(")"); try writer.writeByte('{'); var empty = true; - for (elements, 0..) |element, index| { - if (inst_ty.structFieldValueComptime(index)) |_| continue; + for (elements, resolved_elements, 0..) |element, resolved_element, field_i| { + if (inst_ty.structFieldValueComptime(field_i)) |_| continue; if (!empty) try writer.writeAll(", "); - if (!inst_ty.isTupleOrAnonStruct()) { - try writer.print(".{ } = ", .{fmtIdent(inst_ty.structFieldName(index))}); - } + + const field_name: CValue = if (inst_ty.isSimpleTuple()) + .{ .field = field_i } + else + .{ .identifier = inst_ty.structFieldName(field_i) }; + try writer.writeByte('.'); + try f.object.dg.writeCValue(writer, field_name); + try writer.writeAll(" = "); const element_ty = f.air.typeOf(element); try f.writeCValue(writer, switch (element_ty.zigTypeTag()) { .Array => CValue{ .undef = element_ty }, - else => resolved_elements[index], + else => resolved_element, }, .Initializer); empty = false; } - if (empty) try writer.print("{}", .{try f.fmtIntLiteral(Type.u8, Value.zero)}); try writer.writeAll("};\n"); - var field_id: usize = 0; - for (elements, 0..) |element, index| { - if (inst_ty.structFieldValueComptime(index)) |_| continue; + for (elements, resolved_elements, 0..) |element, resolved_element, field_i| { + if (inst_ty.structFieldValueComptime(field_i)) |_| continue; const element_ty = f.air.typeOf(element); if (element_ty.zigTypeTag() != .Array) continue; - const field_name = if (inst_ty.isTupleOrAnonStruct()) - CValue{ .field = field_id } + const field_name: CValue = if (inst_ty.isSimpleTuple()) + .{ .field = field_i } else - CValue{ .identifier = inst_ty.structFieldName(index) }; + .{ .identifier = inst_ty.structFieldName(field_i) }; try writer.writeAll(";\n"); try writer.writeAll("memcpy("); try f.writeCValueMember(writer, local, field_name); try writer.writeAll(", "); - try f.writeCValue(writer, resolved_elements[index], .FunctionArgument); + try f.writeCValue(writer, resolved_element, .FunctionArgument); try writer.writeAll(", sizeof("); try f.renderTypecast(writer, element_ty); try writer.writeAll("));\n"); - - field_id += 1; } }, .Packed => { @@ -6790,7 +6576,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); var empty = true; - for (elements, 0..) |_, index| { + for (0..elements.len) |index| { const field_ty = inst_ty.structFieldType(index); if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; @@ -6839,13 +6625,6 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { empty = false; } - if (empty) { - try writer.writeByte('('); - try f.renderTypecast(writer, inst_ty); - try writer.writeByte(')'); - try f.writeCValue(writer, .{ .undef = inst_ty }, .Initializer); - } - try writer.writeAll(";\n"); }, }, @@ -7350,7 +7129,7 @@ fn formatIntLiteral( use_twos_comp = true; } else { // TODO: Use fmtIntLiteral for 0? - try writer.print("zig_sub_{c}{d}(zig_as_{c}{d}(0, 0), ", .{ signAbbrev(int_info.signedness), c_bits, signAbbrev(int_info.signedness), c_bits }); + try writer.print("zig_sub_{c}{d}(zig_make_{c}{d}(0, 0), ", .{ signAbbrev(int_info.signedness), c_bits, signAbbrev(int_info.signedness), c_bits }); } } else { try writer.writeByte('-'); @@ -7360,11 +7139,16 @@ fn formatIntLiteral( switch (data.ty.tag()) { .c_short, .c_ushort, .c_int, .c_uint, .c_long, .c_ulong, .c_longlong, .c_ulonglong => {}, else => { - if (int_info.bits > 64 and data.location != null and data.location.? == .StaticInitializer) { + if (int_info.bits <= 64) { + try writer.print("{s}INT{d}_C(", .{ switch (int_info.signedness) { + .signed => "", + .unsigned => "U", + }, c_bits }); + } else if (data.location != null and data.location.? == .StaticInitializer) { // MSVC treats casting the struct initializer as not constant (C2099), so an alternate form is used in global initializers - try writer.print("zig_as_constant_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits }); + try writer.print("zig_make_constant_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits }); } else { - try writer.print("zig_as_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits }); + try writer.print("zig_make_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits }); } }, } @@ -7473,17 +7257,20 @@ fn isByRef(ty: Type) bool { } const LowerFnRetTyBuffer = struct { + names: [1][]const u8, types: [1]Type, values: [1]Value, - payload: Type.Payload.Tuple, + payload: Type.Payload.AnonStruct, }; fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, target: std.Target) Type { if (ret_ty.zigTypeTag() == .NoReturn) return Type.initTag(.noreturn); if (lowersToArray(ret_ty, target)) { + buffer.names = [1][]const u8{"array"}; buffer.types = [1]Type{ret_ty}; buffer.values = [1]Value{Value.initTag(.unreachable_value)}; buffer.payload = .{ .data = .{ + .names = &buffer.names, .types = &buffer.types, .values = &buffer.values, } }; @@ -7539,7 +7326,7 @@ fn die(f: *Function, inst: Air.Inst.Index, ref: Air.Inst.Ref) !void { if (f.air.instructions.items(.tag)[ref_inst] == .constant) return; const c_value = (f.value_map.fetchRemove(ref) orelse return).value; const local_index = switch (c_value) { - .local => |l| l, + .local, .new_local => |l| l, else => return, }; try freeLocal(f, inst, local_index, ref_inst); @@ -7614,8 +7401,8 @@ fn deinitFreeLocalsMap(gpa: mem.Allocator, map: *LocalsMap) void { } fn noticeBranchFrees(f: *Function, pre_locals_len: LocalIndex, inst: Air.Inst.Index) !void { - for (f.locals.items[pre_locals_len..], 0..) |*local, local_offset| { - const local_index = pre_locals_len + @intCast(LocalIndex, local_offset); + for (f.locals.items[pre_locals_len..], pre_locals_len..) |*local, local_i| { + const local_index = @intCast(LocalIndex, local_i); if (f.allocs.contains(local_index)) continue; // allocs are not freeable // free more deeply nested locals from other branches at current depth diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig new file mode 100644 index 0000000000..bf148a8b87 --- /dev/null +++ b/src/codegen/c/type.zig @@ -0,0 +1,1919 @@ +const std = @import("std"); +const cstr = std.cstr; +const mem = std.mem; +const Allocator = mem.Allocator; +const assert = std.debug.assert; +const autoHash = std.hash.autoHash; +const Target = std.Target; + +const Module = @import("../../Module.zig"); +const Type = @import("../../type.zig").Type; + +pub const CType = extern union { + /// If the tag value is less than Tag.no_payload_count, then no pointer + /// dereference is needed. + tag_if_small_enough: Tag, + ptr_otherwise: *const Payload, + + pub fn initTag(small_tag: Tag) CType { + assert(!small_tag.hasPayload()); + return .{ .tag_if_small_enough = small_tag }; + } + + pub fn initPayload(pl: anytype) CType { + const T = @typeInfo(@TypeOf(pl)).Pointer.child; + return switch (pl.base.tag) { + inline else => |t| if (comptime t.hasPayload() and t.Type() == T) .{ + .ptr_otherwise = &pl.base, + } else unreachable, + }; + } + + pub fn hasPayload(self: CType) bool { + return self.tag_if_small_enough.hasPayload(); + } + + pub fn tag(self: CType) Tag { + return if (self.hasPayload()) self.ptr_otherwise.tag else self.tag_if_small_enough; + } + + pub fn cast(self: CType, comptime T: type) ?*const T { + if (!self.hasPayload()) return null; + const pl = self.ptr_otherwise; + return switch (pl.tag) { + inline else => |t| if (comptime t.hasPayload() and t.Type() == T) + @fieldParentPtr(T, "base", pl) + else + null, + }; + } + + pub fn castTag(self: CType, comptime t: Tag) ?*const t.Type() { + return if (self.tag() == t) @fieldParentPtr(t.Type(), "base", self.ptr_otherwise) else null; + } + + pub const Tag = enum(usize) { + // The first section of this enum are tags that require no payload. + void, + + // C basic types + char, + + @"signed char", + short, + int, + long, + @"long long", + + _Bool, + @"unsigned char", + @"unsigned short", + @"unsigned int", + @"unsigned long", + @"unsigned long long", + + float, + double, + @"long double", + + // C header types + // - stdbool.h + bool, + // - stddef.h + size_t, + ptrdiff_t, + // - stdint.h + uint8_t, + int8_t, + uint16_t, + int16_t, + uint32_t, + int32_t, + uint64_t, + int64_t, + uintptr_t, + intptr_t, + + // zig.h types + zig_u128, + zig_i128, + zig_f16, + zig_f32, + zig_f64, + zig_f80, + zig_f128, + zig_c_longdouble, // Keep last_no_payload_tag updated! + + // After this, the tag requires a payload. + pointer, + pointer_const, + pointer_volatile, + pointer_const_volatile, + array, + vector, + fwd_anon_struct, + fwd_anon_union, + fwd_struct, + fwd_union, + unnamed_struct, + unnamed_union, + packed_unnamed_struct, + packed_unnamed_union, + anon_struct, + anon_union, + @"struct", + @"union", + packed_struct, + packed_union, + function, + varargs_function, + + pub const last_no_payload_tag = Tag.zig_c_longdouble; + pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; + + pub fn hasPayload(self: Tag) bool { + return @enumToInt(self) >= no_payload_count; + } + + pub fn toIndex(self: Tag) Index { + assert(!self.hasPayload()); + return @intCast(Index, @enumToInt(self)); + } + + pub fn Type(comptime self: Tag) type { + return switch (self) { + .void, + .char, + .@"signed char", + .short, + .int, + .long, + .@"long long", + ._Bool, + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .float, + .double, + .@"long double", + .bool, + .size_t, + .ptrdiff_t, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + .uintptr_t, + .intptr_t, + .zig_u128, + .zig_i128, + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + .zig_c_longdouble, + => @compileError("Type Tag " ++ @tagName(self) ++ " has no payload"), + + .pointer, + .pointer_const, + .pointer_volatile, + .pointer_const_volatile, + => Payload.Child, + + .array, + .vector, + => Payload.Sequence, + + .fwd_anon_struct, + .fwd_anon_union, + => Payload.Fields, + + .fwd_struct, + .fwd_union, + => Payload.FwdDecl, + + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + => Payload.Unnamed, + + .anon_struct, + .anon_union, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => Payload.Aggregate, + + .function, + .varargs_function, + => Payload.Function, + }; + } + }; + + pub const Payload = struct { + tag: Tag, + + pub const Child = struct { + base: Payload, + data: Index, + }; + + pub const Sequence = struct { + base: Payload, + data: struct { + len: u64, + elem_type: Index, + }, + }; + + pub const FwdDecl = struct { + base: Payload, + data: Module.Decl.Index, + }; + + pub const Fields = struct { + base: Payload, + data: Data, + + pub const Data = []const Field; + pub const Field = struct { + name: [*:0]const u8, + type: Index, + alignas: AlignAs, + }; + pub const AlignAs = struct { + @"align": std.math.Log2Int(u32), + abi: std.math.Log2Int(u32), + + pub fn init(alignment: u32, abi_alignment: u32) AlignAs { + assert(std.math.isPowerOfTwo(alignment)); + assert(std.math.isPowerOfTwo(abi_alignment)); + return .{ + .@"align" = std.math.log2_int(u32, alignment), + .abi = std.math.log2_int(u32, abi_alignment), + }; + } + pub fn abiAlign(ty: Type, target: Target) AlignAs { + const abi_align = ty.abiAlignment(target); + return init(abi_align, abi_align); + } + pub fn fieldAlign(struct_ty: Type, field_i: usize, target: Target) AlignAs { + return init( + struct_ty.structFieldAlign(field_i, target), + struct_ty.structFieldType(field_i).abiAlignment(target), + ); + } + pub fn unionPayloadAlign(union_ty: Type, target: Target) AlignAs { + const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_payload_align = union_obj.abiAlignment(target, false); + return init(union_payload_align, union_payload_align); + } + + pub fn getAlign(self: AlignAs) u32 { + return @as(u32, 1) << self.@"align"; + } + }; + }; + + pub const Unnamed = struct { + base: Payload, + data: struct { + fields: Fields.Data, + owner_decl: Module.Decl.Index, + id: u32, + }, + }; + + pub const Aggregate = struct { + base: Payload, + data: struct { + fields: Fields.Data, + fwd_decl: Index, + }, + }; + + pub const Function = struct { + base: Payload, + data: struct { + return_type: Index, + param_types: []const Index, + }, + }; + }; + + pub const Index = u32; + pub const Store = struct { + arena: std.heap.ArenaAllocator.State = .{}, + set: Set = .{}, + + pub const Set = struct { + pub const Map = std.ArrayHashMapUnmanaged(CType, void, HashContext32, true); + + map: Map = .{}, + + pub fn indexToCType(self: Set, index: Index) CType { + if (index < Tag.no_payload_count) return initTag(@intToEnum(Tag, index)); + return self.map.keys()[index - Tag.no_payload_count]; + } + + pub fn indexToHash(self: Set, index: Index) Map.Hash { + if (index < Tag.no_payload_count) + return (HashContext32{ .store = &self }).hash(self.indexToCType(index)); + return self.map.entries.items(.hash)[index - Tag.no_payload_count]; + } + + pub fn typeToIndex(self: Set, ty: Type, target: Target, kind: Kind) ?Index { + const lookup = Convert.Lookup{ .imm = .{ .set = &self, .target = target } }; + + var convert: Convert = undefined; + convert.initType(ty, kind, lookup) catch unreachable; + + const t = convert.tag(); + if (!t.hasPayload()) return t.toIndex(); + + return if (self.map.getIndexAdapted( + ty, + TypeAdapter32{ .kind = kind, .lookup = lookup, .convert = &convert }, + )) |idx| @intCast(Index, Tag.no_payload_count + idx) else null; + } + }; + + pub const Promoted = struct { + arena: std.heap.ArenaAllocator, + set: Set, + + pub fn gpa(self: *Promoted) Allocator { + return self.arena.child_allocator; + } + + pub fn cTypeToIndex(self: *Promoted, cty: CType) Allocator.Error!Index { + const t = cty.tag(); + if (@enumToInt(t) < Tag.no_payload_count) return @intCast(Index, @enumToInt(t)); + + const gop = try self.set.map.getOrPutContext(self.gpa(), cty, .{ .store = &self.set }); + if (!gop.found_existing) gop.key_ptr.* = cty; + if (std.debug.runtime_safety) { + const key = &self.set.map.entries.items(.key)[gop.index]; + assert(key == gop.key_ptr); + assert(cty.eql(key.*)); + assert(cty.hash(self.set) == key.hash(self.set)); + } + return @intCast(Index, Tag.no_payload_count + gop.index); + } + + pub fn typeToIndex( + self: *Promoted, + ty: Type, + mod: *Module, + kind: Kind, + ) Allocator.Error!Index { + const lookup = Convert.Lookup{ .mut = .{ .promoted = self, .mod = mod } }; + + var convert: Convert = undefined; + try convert.initType(ty, kind, lookup); + + const t = convert.tag(); + if (!t.hasPayload()) return t.toIndex(); + + const gop = try self.set.map.getOrPutContextAdapted( + self.gpa(), + ty, + TypeAdapter32{ .kind = kind, .lookup = lookup.freeze(), .convert = &convert }, + .{ .store = &self.set }, + ); + if (!gop.found_existing) { + errdefer _ = self.set.map.pop(); + gop.key_ptr.* = try createFromConvert(self, ty, lookup.getTarget(), kind, convert); + } + if (std.debug.runtime_safety) { + const adapter = TypeAdapter64{ + .kind = kind, + .lookup = lookup.freeze(), + .convert = &convert, + }; + const cty = &self.set.map.entries.items(.key)[gop.index]; + assert(cty == gop.key_ptr); + assert(adapter.eql(ty, cty.*)); + assert(adapter.hash(ty) == cty.hash(self.set)); + } + return @intCast(Index, Tag.no_payload_count + gop.index); + } + }; + + pub fn promote(self: Store, gpa: Allocator) Promoted { + return .{ .arena = self.arena.promote(gpa), .set = self.set }; + } + + pub fn demote(self: *Store, promoted: Promoted) void { + self.arena = promoted.arena.state; + self.set = promoted.set; + } + + pub fn indexToCType(self: Store, index: Index) CType { + return self.set.indexToCType(index); + } + + pub fn indexToHash(self: Store, index: Index) Set.Map.Hash { + return self.set.indexToHash(index); + } + + pub fn cTypeToIndex(self: *Store, gpa: Allocator, cty: CType) !Index { + var promoted = self.promote(gpa); + defer self.demote(promoted); + return promoted.cTypeToIndex(cty); + } + + pub fn typeToCType(self: *Store, gpa: Allocator, ty: Type, mod: *Module, kind: Kind) !CType { + const idx = try self.typeToIndex(gpa, ty, mod, kind); + return self.indexToCType(idx); + } + + pub fn typeToIndex(self: *Store, gpa: Allocator, ty: Type, mod: *Module, kind: Kind) !Index { + var promoted = self.promote(gpa); + defer self.demote(promoted); + return promoted.typeToIndex(ty, mod, kind); + } + + pub fn clearRetainingCapacity(self: *Store, gpa: Allocator) void { + var promoted = self.promote(gpa); + defer self.demote(promoted); + promoted.set.map.clearRetainingCapacity(); + _ = promoted.arena.reset(.retain_capacity); + } + + pub fn clearAndFree(self: *Store, gpa: Allocator) void { + var promoted = self.promote(gpa); + defer self.demote(promoted); + promoted.set.map.clearAndFree(gpa); + _ = promoted.arena.reset(.free_all); + } + + pub fn shrinkRetainingCapacity(self: *Store, gpa: Allocator, new_len: usize) void { + self.set.map.shrinkRetainingCapacity(gpa, new_len); + } + + pub fn shrinkAndFree(self: *Store, gpa: Allocator, new_len: usize) void { + self.set.map.shrinkAndFree(gpa, new_len); + } + + pub fn count(self: Store) usize { + return self.set.map.count(); + } + + pub fn move(self: *Store) Store { + const moved = self.*; + self.* = .{}; + return moved; + } + + pub fn deinit(self: *Store, gpa: Allocator) void { + var promoted = self.promote(gpa); + promoted.set.map.deinit(gpa); + _ = promoted.arena.deinit(); + self.* = undefined; + } + }; + + pub fn isPacked(self: CType) bool { + return switch (self.tag()) { + else => false, + .packed_unnamed_struct, + .packed_unnamed_union, + .packed_struct, + .packed_union, + => true, + }; + } + + pub fn fields(self: CType) Payload.Fields.Data { + return if (self.cast(Payload.Aggregate)) |pl| + pl.data.fields + else if (self.cast(Payload.Unnamed)) |pl| + pl.data.fields + else if (self.cast(Payload.Fields)) |pl| + pl.data + else + unreachable; + } + + pub fn eql(lhs: CType, rhs: CType) bool { + return lhs.eqlContext(rhs, struct { + pub fn eqlIndex(_: @This(), lhs_idx: Index, rhs_idx: Index) bool { + return lhs_idx == rhs_idx; + } + }{}); + } + + pub fn eqlContext(lhs: CType, rhs: CType, ctx: anytype) bool { + // As a shortcut, if the small tags / addresses match, we're done. + if (lhs.tag_if_small_enough == rhs.tag_if_small_enough) return true; + + const lhs_tag = lhs.tag(); + const rhs_tag = rhs.tag(); + if (lhs_tag != rhs_tag) return false; + + return switch (lhs_tag) { + .void, + .char, + .@"signed char", + .short, + .int, + .long, + .@"long long", + ._Bool, + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .float, + .double, + .@"long double", + .bool, + .size_t, + .ptrdiff_t, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + .uintptr_t, + .intptr_t, + .zig_u128, + .zig_i128, + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + .zig_c_longdouble, + => false, + + .pointer, + .pointer_const, + .pointer_volatile, + .pointer_const_volatile, + => ctx.eqlIndex(lhs.cast(Payload.Child).?.data, rhs.cast(Payload.Child).?.data), + + .array, + .vector, + => { + const lhs_data = lhs.cast(Payload.Sequence).?.data; + const rhs_data = rhs.cast(Payload.Sequence).?.data; + return lhs_data.len == rhs_data.len and + ctx.eqlIndex(lhs_data.elem_type, rhs_data.elem_type); + }, + + .fwd_anon_struct, + .fwd_anon_union, + => { + const lhs_data = lhs.cast(Payload.Fields).?.data; + const rhs_data = rhs.cast(Payload.Fields).?.data; + if (lhs_data.len != rhs_data.len) return false; + for (lhs_data, rhs_data) |lhs_field, rhs_field| { + if (!ctx.eqlIndex(lhs_field.type, rhs_field.type)) return false; + if (lhs_field.alignas.@"align" != rhs_field.alignas.@"align") return false; + if (cstr.cmp(lhs_field.name, rhs_field.name) != 0) return false; + } + return true; + }, + + .fwd_struct, + .fwd_union, + => lhs.cast(Payload.FwdDecl).?.data == rhs.cast(Payload.FwdDecl).?.data, + + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + => { + const lhs_data = lhs.cast(Payload.Unnamed).?.data; + const rhs_data = rhs.cast(Payload.Unnamed).?.data; + return lhs_data.owner_decl == rhs_data.owner_decl and lhs_data.id == rhs_data.id; + }, + + .anon_struct, + .anon_union, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => ctx.eqlIndex( + lhs.cast(Payload.Aggregate).?.data.fwd_decl, + rhs.cast(Payload.Aggregate).?.data.fwd_decl, + ), + + .function, + .varargs_function, + => { + const lhs_data = lhs.cast(Payload.Function).?.data; + const rhs_data = rhs.cast(Payload.Function).?.data; + if (lhs_data.param_types.len != rhs_data.param_types.len) return false; + if (!ctx.eqlIndex(lhs_data.return_type, rhs_data.return_type)) return false; + for (lhs_data.param_types, rhs_data.param_types) |lhs_param_idx, rhs_param_idx| { + if (!ctx.eqlIndex(lhs_param_idx, rhs_param_idx)) return false; + } + return true; + }, + }; + } + + pub fn hash(self: CType, store: Store.Set) u64 { + var hasher = std.hash.Wyhash.init(0); + self.updateHasher(&hasher, store); + return hasher.final(); + } + + pub fn updateHasher(self: CType, hasher: anytype, store: Store.Set) void { + const t = self.tag(); + autoHash(hasher, t); + switch (t) { + .void, + .char, + .@"signed char", + .short, + .int, + .long, + .@"long long", + ._Bool, + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .float, + .double, + .@"long double", + .bool, + .size_t, + .ptrdiff_t, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + .uintptr_t, + .intptr_t, + .zig_u128, + .zig_i128, + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + .zig_c_longdouble, + => {}, + + .pointer, + .pointer_const, + .pointer_volatile, + .pointer_const_volatile, + => store.indexToCType(self.cast(Payload.Child).?.data).updateHasher(hasher, store), + + .array, + .vector, + => { + const data = self.cast(Payload.Sequence).?.data; + autoHash(hasher, data.len); + store.indexToCType(data.elem_type).updateHasher(hasher, store); + }, + + .fwd_anon_struct, + .fwd_anon_union, + => for (self.cast(Payload.Fields).?.data) |field| { + store.indexToCType(field.type).updateHasher(hasher, store); + hasher.update(mem.span(field.name)); + autoHash(hasher, field.alignas.@"align"); + }, + + .fwd_struct, + .fwd_union, + => autoHash(hasher, self.cast(Payload.FwdDecl).?.data), + + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + => { + const data = self.cast(Payload.Unnamed).?.data; + autoHash(hasher, data.owner_decl); + autoHash(hasher, data.id); + }, + + .anon_struct, + .anon_union, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => store.indexToCType(self.cast(Payload.Aggregate).?.data.fwd_decl) + .updateHasher(hasher, store), + + .function, + .varargs_function, + => { + const data = self.cast(Payload.Function).?.data; + store.indexToCType(data.return_type).updateHasher(hasher, store); + for (data.param_types) |param_ty| { + store.indexToCType(param_ty).updateHasher(hasher, store); + } + }, + } + } + + pub const Kind = enum { forward, forward_parameter, complete, global, parameter, payload }; + + const Convert = struct { + storage: union { + none: void, + child: Payload.Child, + seq: Payload.Sequence, + fwd: Payload.FwdDecl, + anon: struct { + fields: [2]Payload.Fields.Field, + pl: union { + forward: Payload.Fields, + complete: Payload.Aggregate, + }, + }, + }, + value: union(enum) { + tag: Tag, + cty: CType, + }, + + pub fn init(self: *@This(), t: Tag) void { + self.* = if (t.hasPayload()) .{ + .storage = .{ .none = {} }, + .value = .{ .tag = t }, + } else .{ + .storage = .{ .none = {} }, + .value = .{ .cty = initTag(t) }, + }; + } + + pub fn tag(self: @This()) Tag { + return switch (self.value) { + .tag => |t| t, + .cty => |c| c.tag(), + }; + } + + fn tagFromIntInfo(signedness: std.builtin.Signedness, bits: u16) Tag { + return switch (bits) { + 0 => .void, + 1...8 => switch (signedness) { + .unsigned => .uint8_t, + .signed => .int8_t, + }, + 9...16 => switch (signedness) { + .unsigned => .uint16_t, + .signed => .int16_t, + }, + 17...32 => switch (signedness) { + .unsigned => .uint32_t, + .signed => .int32_t, + }, + 33...64 => switch (signedness) { + .unsigned => .uint64_t, + .signed => .int64_t, + }, + 65...128 => switch (signedness) { + .unsigned => .zig_u128, + .signed => .zig_i128, + }, + else => .array, + }; + } + + pub const Lookup = union(enum) { + fail: Target, + imm: struct { + set: *const Store.Set, + target: Target, + }, + mut: struct { + promoted: *Store.Promoted, + mod: *Module, + }, + + pub fn isMutable(self: @This()) bool { + return switch (self) { + .fail, .imm => false, + .mut => true, + }; + } + + pub fn getTarget(self: @This()) Target { + return switch (self) { + .fail => |target| target, + .imm => |imm| imm.target, + .mut => |mut| mut.mod.getTarget(), + }; + } + + pub fn getSet(self: @This()) ?*const Store.Set { + return switch (self) { + .fail => null, + .imm => |imm| imm.set, + .mut => |mut| &mut.promoted.set, + }; + } + + pub fn typeToIndex(self: @This(), ty: Type, kind: Kind) !?Index { + return switch (self) { + .fail => null, + .imm => |imm| imm.set.typeToIndex(ty, imm.target, kind), + .mut => |mut| try mut.promoted.typeToIndex(ty, mut.mod, kind), + }; + } + + pub fn indexToCType(self: @This(), index: Index) ?CType { + return if (self.getSet()) |set| set.indexToCType(index) else null; + } + + pub fn freeze(self: @This()) @This() { + return switch (self) { + .fail, .imm => self, + .mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .target = self.getTarget() } }, + }; + } + }; + + fn sortFields(self: *@This(), fields_len: usize) []Payload.Fields.Field { + const Field = Payload.Fields.Field; + const slice = self.storage.anon.fields[0..fields_len]; + std.sort.sort(Field, slice, {}, struct { + fn before(_: void, lhs: Field, rhs: Field) bool { + return lhs.alignas.@"align" > rhs.alignas.@"align"; + } + }.before); + return slice; + } + + fn initAnon(self: *@This(), kind: Kind, fwd_idx: Index, fields_len: usize) void { + switch (kind) { + .forward, .forward_parameter => { + self.storage.anon.pl = .{ .forward = .{ + .base = .{ .tag = .fwd_anon_struct }, + .data = self.sortFields(fields_len), + } }; + self.value = .{ .cty = initPayload(&self.storage.anon.pl.forward) }; + }, + .complete, .parameter, .global => { + self.storage.anon.pl = .{ .complete = .{ + .base = .{ .tag = .anon_struct }, + .data = .{ + .fields = self.sortFields(fields_len), + .fwd_decl = fwd_idx, + }, + } }; + self.value = .{ .cty = initPayload(&self.storage.anon.pl.complete) }; + }, + .payload => unreachable, + } + } + + fn initArrayParameter(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void { + if (switch (kind) { + .forward_parameter => @as(Index, undefined), + .parameter => try lookup.typeToIndex(ty, .forward_parameter), + .forward, .complete, .global, .payload => unreachable, + }) |fwd_idx| { + if (try lookup.typeToIndex(ty, switch (kind) { + .forward_parameter => .forward, + .parameter => .complete, + .forward, .complete, .global, .payload => unreachable, + })) |array_idx| { + self.storage = .{ .anon = undefined }; + self.storage.anon.fields[0] = .{ + .name = "array", + .type = array_idx, + .alignas = Payload.Fields.AlignAs.abiAlign(ty, lookup.getTarget()), + }; + self.initAnon(kind, fwd_idx, 1); + } else self.init(switch (kind) { + .forward_parameter => .fwd_anon_struct, + .parameter => .anon_struct, + .forward, .complete, .global, .payload => unreachable, + }); + } else self.init(.anon_struct); + } + + pub fn initType(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void { + const target = lookup.getTarget(); + + self.* = undefined; + if (!ty.isFnOrHasRuntimeBitsIgnoreComptime()) + self.init(.void) + else if (ty.isAbiInt()) switch (ty.tag()) { + .usize => self.init(.uintptr_t), + .isize => self.init(.intptr_t), + .c_short => self.init(.short), + .c_ushort => self.init(.@"unsigned short"), + .c_int => self.init(.int), + .c_uint => self.init(.@"unsigned int"), + .c_long => self.init(.long), + .c_ulong => self.init(.@"unsigned long"), + .c_longlong => self.init(.@"long long"), + .c_ulonglong => self.init(.@"unsigned long long"), + else => { + const info = ty.intInfo(target); + const t = tagFromIntInfo(info.signedness, info.bits); + switch (t) { + .void => unreachable, + else => self.init(t), + .array => switch (kind) { + .forward, .complete, .global => { + const abi_size = ty.abiSize(target); + const abi_align = ty.abiAlignment(target); + self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{ + .len = @divExact(abi_size, abi_align), + .elem_type = tagFromIntInfo( + .unsigned, + @intCast(u16, abi_align * 8), + ).toIndex(), + } } }; + self.value = .{ .cty = initPayload(&self.storage.seq) }; + }, + .forward_parameter, + .parameter, + => try self.initArrayParameter(ty, kind, lookup), + .payload => unreachable, + }, + } + }, + } else switch (ty.zigTypeTag()) { + .Frame => unreachable, + .AnyFrame => unreachable, + + .Int, + .Enum, + .ErrorSet, + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .EnumLiteral, + => unreachable, + + .Bool => self.init(.bool), + + .Float => self.init(switch (ty.tag()) { + .f16 => .zig_f16, + .f32 => .zig_f32, + .f64 => .zig_f64, + .f80 => .zig_f80, + .f128 => .zig_f128, + .c_longdouble => .zig_c_longdouble, + else => unreachable, + }), + + .Pointer => { + const info = ty.ptrInfo().data; + switch (info.size) { + .Slice => { + if (switch (kind) { + .forward, .forward_parameter => @as(Index, undefined), + .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward), + .payload => unreachable, + }) |fwd_idx| { + var buf: Type.SlicePtrFieldTypeBuffer = undefined; + const ptr_ty = ty.slicePtrFieldType(&buf); + if (try lookup.typeToIndex(ptr_ty, kind)) |ptr_idx| { + self.storage = .{ .anon = undefined }; + self.storage.anon.fields[0] = .{ + .name = "ptr", + .type = ptr_idx, + .alignas = Payload.Fields.AlignAs.abiAlign(ptr_ty, target), + }; + self.storage.anon.fields[1] = .{ + .name = "len", + .type = Tag.uintptr_t.toIndex(), + .alignas = Payload.Fields.AlignAs.abiAlign(Type.usize, target), + }; + self.initAnon(kind, fwd_idx, 2); + } else self.init(switch (kind) { + .forward, .forward_parameter => .fwd_anon_struct, + .complete, .parameter, .global => .anon_struct, + .payload => unreachable, + }); + } else self.init(.anon_struct); + }, + + .One, .Many, .C => { + const t: Tag = switch (info.@"volatile") { + false => switch (info.mutable) { + true => .pointer, + false => .pointer_const, + }, + true => switch (info.mutable) { + true => .pointer_volatile, + false => .pointer_const_volatile, + }, + }; + + var host_int_pl = Type.Payload.Bits{ + .base = .{ .tag = .int_unsigned }, + .data = info.host_size * 8, + }; + const pointee_ty = if (info.host_size > 0) + Type.initPayload(&host_int_pl.base) + else + info.pointee_type; + + if (if (info.size == .C and pointee_ty.tag() == .u8) + Tag.char.toIndex() + else + try lookup.typeToIndex(pointee_ty, .forward)) |child_idx| + { + self.storage = .{ .child = .{ + .base = .{ .tag = t }, + .data = child_idx, + } }; + self.value = .{ .cty = initPayload(&self.storage.child) }; + } else self.init(t); + }, + } + }, + + .Struct, .Union => |zig_tag| if (ty.containerLayout() == .Packed) { + if (ty.castTag(.@"struct")) |struct_obj| { + try self.initType(struct_obj.data.backing_int_ty, kind, lookup); + } else { + var buf: Type.Payload.Bits = .{ + .base = .{ .tag = .int_unsigned }, + .data = @intCast(u16, ty.bitSize(target)), + }; + try self.initType(Type.initPayload(&buf.base), kind, lookup); + } + } else if (ty.isTupleOrAnonStruct()) { + if (lookup.isMutable()) { + for (0..ty.structFieldCount()) |field_i| { + const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i) or + !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + _ = try lookup.typeToIndex(field_ty, switch (kind) { + .forward, .forward_parameter => .forward, + .complete, .parameter => .complete, + .global => .global, + .payload => unreachable, + }); + } + switch (kind) { + .forward, .forward_parameter => {}, + .complete, .parameter, .global => _ = try lookup.typeToIndex(ty, .forward), + .payload => unreachable, + } + } + self.init(switch (kind) { + .forward, .forward_parameter => .fwd_anon_struct, + .complete, .parameter, .global => .anon_struct, + .payload => unreachable, + }); + } else { + const tag_ty = ty.unionTagTypeSafety(); + const is_tagged_union_wrapper = kind != .payload and tag_ty != null; + const is_struct = zig_tag == .Struct or is_tagged_union_wrapper; + switch (kind) { + .forward, .forward_parameter => { + self.storage = .{ .fwd = .{ + .base = .{ .tag = if (is_struct) .fwd_struct else .fwd_union }, + .data = ty.getOwnerDecl(), + } }; + self.value = .{ .cty = initPayload(&self.storage.fwd) }; + }, + .complete, .parameter, .global, .payload => if (is_tagged_union_wrapper) { + const fwd_idx = try lookup.typeToIndex(ty, .forward); + const payload_idx = try lookup.typeToIndex(ty, .payload); + const tag_idx = try lookup.typeToIndex(tag_ty.?, kind); + if (fwd_idx != null and payload_idx != null and tag_idx != null) { + self.storage = .{ .anon = undefined }; + var field_count: usize = 0; + if (payload_idx != Tag.void.toIndex()) { + self.storage.anon.fields[field_count] = .{ + .name = "payload", + .type = payload_idx.?, + .alignas = Payload.Fields.AlignAs.unionPayloadAlign(ty, target), + }; + field_count += 1; + } + if (tag_idx != Tag.void.toIndex()) { + self.storage.anon.fields[field_count] = .{ + .name = "tag", + .type = tag_idx.?, + .alignas = Payload.Fields.AlignAs.abiAlign(tag_ty.?, target), + }; + field_count += 1; + } + self.storage.anon.pl = .{ .complete = .{ + .base = .{ .tag = .@"struct" }, + .data = .{ + .fields = self.sortFields(field_count), + .fwd_decl = fwd_idx.?, + }, + } }; + self.value = .{ .cty = initPayload(&self.storage.anon.pl.complete) }; + } else self.init(.@"struct"); + } else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes()) { + self.init(.void); + } else { + var is_packed = false; + for (0..switch (zig_tag) { + .Struct => ty.structFieldCount(), + .Union => ty.unionFields().count(), + else => unreachable, + }) |field_i| { + const field_ty = ty.structFieldType(field_i); + if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + + const field_align = Payload.Fields.AlignAs.fieldAlign( + ty, + field_i, + target, + ); + if (field_align.@"align" < field_align.abi) { + is_packed = true; + if (!lookup.isMutable()) break; + } + + if (lookup.isMutable()) { + _ = try lookup.typeToIndex(field_ty, switch (kind) { + .forward, .forward_parameter => unreachable, + .complete, .parameter, .payload => .complete, + .global => .global, + }); + } + } + switch (kind) { + .forward, .forward_parameter => unreachable, + .complete, .parameter, .global => { + _ = try lookup.typeToIndex(ty, .forward); + self.init(if (is_struct) + if (is_packed) .packed_struct else .@"struct" + else if (is_packed) .packed_union else .@"union"); + }, + .payload => self.init(if (is_packed) + .packed_unnamed_union + else + .unnamed_union), + } + }, + } + }, + + .Array, .Vector => |zig_tag| { + switch (kind) { + .forward, .complete, .global => { + const t: Tag = switch (zig_tag) { + .Array => .array, + .Vector => .vector, + else => unreachable, + }; + if (try lookup.typeToIndex(ty.childType(), kind)) |child_idx| { + self.storage = .{ .seq = .{ .base = .{ .tag = t }, .data = .{ + .len = ty.arrayLenIncludingSentinel(), + .elem_type = child_idx, + } } }; + self.value = .{ .cty = initPayload(&self.storage.seq) }; + } else self.init(t); + }, + .forward_parameter, .parameter => try self.initArrayParameter(ty, kind, lookup), + .payload => unreachable, + } + }, + + .Optional => { + var buf: Type.Payload.ElemType = undefined; + const payload_ty = ty.optionalChild(&buf); + if (payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (ty.optionalReprIsPayload()) { + try self.initType(payload_ty, kind, lookup); + } else if (switch (kind) { + .forward, .forward_parameter => @as(Index, undefined), + .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward), + .payload => unreachable, + }) |fwd_idx| { + if (try lookup.typeToIndex(payload_ty, switch (kind) { + .forward, .forward_parameter => .forward, + .complete, .parameter => .complete, + .global => .global, + .payload => unreachable, + })) |payload_idx| { + self.storage = .{ .anon = undefined }; + self.storage.anon.fields[0] = .{ + .name = "payload", + .type = payload_idx, + .alignas = Payload.Fields.AlignAs.abiAlign(payload_ty, target), + }; + self.storage.anon.fields[1] = .{ + .name = "is_null", + .type = Tag.bool.toIndex(), + .alignas = Payload.Fields.AlignAs.abiAlign(Type.bool, target), + }; + self.initAnon(kind, fwd_idx, 2); + } else self.init(switch (kind) { + .forward, .forward_parameter => .fwd_anon_struct, + .complete, .parameter, .global => .anon_struct, + .payload => unreachable, + }); + } else self.init(.anon_struct); + } else self.init(.bool); + }, + + .ErrorUnion => { + if (switch (kind) { + .forward, .forward_parameter => @as(Index, undefined), + .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward), + .payload => unreachable, + }) |fwd_idx| { + const payload_ty = ty.errorUnionPayload(); + if (try lookup.typeToIndex(payload_ty, switch (kind) { + .forward, .forward_parameter => .forward, + .complete, .parameter => .complete, + .global => .global, + .payload => unreachable, + })) |payload_idx| { + const error_ty = ty.errorUnionSet(); + if (payload_idx == Tag.void.toIndex()) { + try self.initType(error_ty, kind, lookup); + } else if (try lookup.typeToIndex(error_ty, kind)) |error_idx| { + self.storage = .{ .anon = undefined }; + self.storage.anon.fields[0] = .{ + .name = "payload", + .type = payload_idx, + .alignas = Payload.Fields.AlignAs.abiAlign(payload_ty, target), + }; + self.storage.anon.fields[1] = .{ + .name = "error", + .type = error_idx, + .alignas = Payload.Fields.AlignAs.abiAlign(error_ty, target), + }; + self.initAnon(kind, fwd_idx, 2); + } else self.init(switch (kind) { + .forward, .forward_parameter => .fwd_anon_struct, + .complete, .parameter, .global => .anon_struct, + .payload => unreachable, + }); + } else self.init(switch (kind) { + .forward, .forward_parameter => .fwd_anon_struct, + .complete, .parameter, .global => .anon_struct, + .payload => unreachable, + }); + } else self.init(.anon_struct); + }, + + .Opaque => switch (ty.tag()) { + .anyopaque => self.init(.void), + .@"opaque" => { + self.storage = .{ .fwd = .{ + .base = .{ .tag = .fwd_struct }, + .data = ty.getOwnerDecl(), + } }; + self.value = .{ .cty = initPayload(&self.storage.fwd) }; + }, + else => unreachable, + }, + + .Fn => { + const info = ty.fnInfo(); + if (lookup.isMutable()) { + const param_kind: Kind = switch (kind) { + .forward, .forward_parameter => .forward_parameter, + .complete, .parameter, .global => .parameter, + .payload => unreachable, + }; + _ = try lookup.typeToIndex(info.return_type, param_kind); + for (info.param_types) |param_type| { + if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + _ = try lookup.typeToIndex(param_type, param_kind); + } + } + self.init(if (info.is_var_args) .varargs_function else .function); + }, + } + } + }; + + pub fn copy(self: CType, arena: Allocator) !CType { + return self.copyContext(struct { + arena: Allocator, + pub fn copyIndex(_: @This(), idx: Index) Index { + return idx; + } + }{ .arena = arena }); + } + + fn copyFields(ctx: anytype, old_fields: Payload.Fields.Data) !Payload.Fields.Data { + const new_fields = try ctx.arena.alloc(Payload.Fields.Field, old_fields.len); + for (new_fields, old_fields) |*new_field, old_field| { + new_field.name = try ctx.arena.dupeZ(u8, mem.span(old_field.name)); + new_field.type = ctx.copyIndex(old_field.type); + new_field.alignas = old_field.alignas; + } + return new_fields; + } + + fn copyParams(ctx: anytype, old_param_types: []const Index) ![]const Index { + const new_param_types = try ctx.arena.alloc(Index, old_param_types.len); + for (new_param_types, old_param_types) |*new_param_type, old_param_type| + new_param_type.* = ctx.copyIndex(old_param_type); + return new_param_types; + } + + pub fn copyContext(self: CType, ctx: anytype) !CType { + switch (self.tag()) { + .void, + .char, + .@"signed char", + .short, + .int, + .long, + .@"long long", + ._Bool, + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .float, + .double, + .@"long double", + .bool, + .size_t, + .ptrdiff_t, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + .uintptr_t, + .intptr_t, + .zig_u128, + .zig_i128, + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + .zig_c_longdouble, + => return self, + + .pointer, + .pointer_const, + .pointer_volatile, + .pointer_const_volatile, + => { + const pl = self.cast(Payload.Child).?; + const new_pl = try ctx.arena.create(Payload.Child); + new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = ctx.copyIndex(pl.data) }; + return initPayload(new_pl); + }, + + .array, + .vector, + => { + const pl = self.cast(Payload.Sequence).?; + const new_pl = try ctx.arena.create(Payload.Sequence); + new_pl.* = .{ + .base = .{ .tag = pl.base.tag }, + .data = .{ .len = pl.data.len, .elem_type = ctx.copyIndex(pl.data.elem_type) }, + }; + return initPayload(new_pl); + }, + + .fwd_anon_struct, + .fwd_anon_union, + => { + const pl = self.cast(Payload.Fields).?; + const new_pl = try ctx.arena.create(Payload.Fields); + new_pl.* = .{ + .base = .{ .tag = pl.base.tag }, + .data = try copyFields(ctx, pl.data), + }; + return initPayload(new_pl); + }, + + .fwd_struct, + .fwd_union, + => { + const pl = self.cast(Payload.FwdDecl).?; + const new_pl = try ctx.arena.create(Payload.FwdDecl); + new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = pl.data }; + return initPayload(new_pl); + }, + + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + => { + const pl = self.cast(Payload.Unnamed).?; + const new_pl = try ctx.arena.create(Payload.Unnamed); + new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = .{ + .fields = try copyFields(ctx, pl.data.fields), + .owner_decl = pl.data.owner_decl, + .id = pl.data.id, + } }; + return initPayload(new_pl); + }, + + .anon_struct, + .anon_union, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => { + const pl = self.cast(Payload.Aggregate).?; + const new_pl = try ctx.arena.create(Payload.Aggregate); + new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = .{ + .fields = try copyFields(ctx, pl.data.fields), + .fwd_decl = ctx.copyIndex(pl.data.fwd_decl), + } }; + return initPayload(new_pl); + }, + + .function, + .varargs_function, + => { + const pl = self.cast(Payload.Function).?; + const new_pl = try ctx.arena.create(Payload.Function); + new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = .{ + .return_type = ctx.copyIndex(pl.data.return_type), + .param_types = try copyParams(ctx, pl.data.param_types), + } }; + return initPayload(new_pl); + }, + } + } + + fn createFromType(store: *Store.Promoted, ty: Type, target: Target, kind: Kind) !CType { + var convert: Convert = undefined; + try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .target = target } }); + return createFromConvert(store, ty, target, kind, &convert); + } + + fn createFromConvert( + store: *Store.Promoted, + ty: Type, + target: Target, + kind: Kind, + convert: Convert, + ) !CType { + const arena = store.arena.allocator(); + switch (convert.value) { + .cty => |c| return c.copy(arena), + .tag => |t| switch (t) { + .fwd_anon_struct, + .fwd_anon_union, + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + .anon_struct, + .anon_union, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => switch (ty.zigTypeTag()) { + .Struct => { + const fields_len = ty.structFieldCount(); + + var c_fields_len: usize = 0; + for (0..fields_len) |field_i| { + const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i) or + !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + c_fields_len += 1; + } + + const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len); + var c_field_i: usize = 0; + for (0..fields_len) |field_i| { + const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i) or + !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + + fields_pl[c_field_i] = .{ + .name = try if (ty.isSimpleTuple()) + std.fmt.allocPrintZ(arena, "f{}", .{field_i}) + else + arena.dupeZ(u8, ty.structFieldName(field_i)), + .type = store.set.typeToIndex(field_ty, target, switch (kind) { + .forward, .forward_parameter => .forward, + .complete, .parameter => .complete, + .global => .global, + .payload => unreachable, + }).?, + .alignas = Payload.Fields.AlignAs.fieldAlign(ty, field_i, target), + }; + c_field_i += 1; + } + + switch (t) { + .fwd_anon_struct => { + const anon_pl = try arena.create(Payload.Fields); + anon_pl.* = .{ .base = .{ .tag = t }, .data = fields_pl }; + return initPayload(anon_pl); + }, + + .anon_struct, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => { + const struct_pl = try arena.create(Payload.Aggregate); + struct_pl.* = .{ .base = .{ .tag = t }, .data = .{ + .fields = fields_pl, + .fwd_decl = store.set.typeToIndex(ty, target, .forward).?, + } }; + return initPayload(struct_pl); + }, + + else => unreachable, + } + }, + + .Union => { + const union_fields = ty.unionFields(); + const fields_len = union_fields.count(); + + var c_fields_len: usize = 0; + for (0..fields_len) |field_i| { + const field_ty = ty.structFieldType(field_i); + if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + c_fields_len += 1; + } + + const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len); + var field_i: usize = 0; + var c_field_i: usize = 0; + var field_it = union_fields.iterator(); + while (field_it.next()) |field| { + defer field_i += 1; + if (!field.value_ptr.ty.hasRuntimeBitsIgnoreComptime()) continue; + + fields_pl[c_field_i] = .{ + .name = try arena.dupeZ(u8, field.key_ptr.*), + .type = store.set.typeToIndex(field.value_ptr.ty, target, switch (kind) { + .forward, .forward_parameter => unreachable, + .complete, .parameter, .payload => .complete, + .global => .global, + }).?, + .alignas = Payload.Fields.AlignAs.fieldAlign(ty, field_i, target), + }; + c_field_i += 1; + } + + switch (kind) { + .forward, .forward_parameter => unreachable, + .complete, .parameter, .global => { + const union_pl = try arena.create(Payload.Aggregate); + union_pl.* = .{ .base = .{ .tag = t }, .data = .{ + .fields = fields_pl, + .fwd_decl = store.set.typeToIndex(ty, target, .forward).?, + } }; + return initPayload(union_pl); + }, + .payload => if (ty.unionTagTypeSafety()) |_| { + const union_pl = try arena.create(Payload.Unnamed); + union_pl.* = .{ .base = .{ .tag = t }, .data = .{ + .fields = fields_pl, + .owner_decl = ty.getOwnerDecl(), + .id = 0, + } }; + return initPayload(union_pl); + } else unreachable, + } + }, + + else => unreachable, + }, + + .function, + .varargs_function, + => { + const info = ty.fnInfo(); + const param_kind: Kind = switch (kind) { + .forward, .forward_parameter => .forward_parameter, + .complete, .parameter, .global => .parameter, + .payload => unreachable, + }; + + var c_params_len: usize = 0; + for (info.param_types) |param_type| { + if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + c_params_len += 1; + } + + const params_pl = try arena.alloc(Index, c_params_len); + var c_param_i: usize = 0; + for (info.param_types) |param_type| { + if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + params_pl[c_param_i] = store.set.typeToIndex(param_type, target, param_kind).?; + c_param_i += 1; + } + + const fn_pl = try arena.create(Payload.Function); + fn_pl.* = .{ .base = .{ .tag = t }, .data = .{ + .return_type = store.set.typeToIndex(info.return_type, target, param_kind).?, + .param_types = params_pl, + } }; + return initPayload(fn_pl); + }, + + else => unreachable, + }, + } + } + + pub const HashContext64 = struct { + store: *const Store.Set, + + pub fn hash(self: @This(), cty: CType) u64 { + return cty.hash(self.store.*); + } + pub fn eql(_: @This(), lhs: CType, rhs: CType) bool { + return lhs.eql(rhs); + } + }; + + pub const HashContext32 = struct { + store: *const Store.Set, + + pub fn hash(self: @This(), cty: CType) u32 { + return @truncate(u32, cty.hash(self.store.*)); + } + pub fn eql(_: @This(), lhs: CType, rhs: CType, _: usize) bool { + return lhs.eql(rhs); + } + }; + + pub const TypeAdapter64 = struct { + kind: Kind, + lookup: Convert.Lookup, + convert: *const Convert, + + fn eqlRecurse(self: @This(), ty: Type, cty: Index, kind: Kind) bool { + assert(!self.lookup.isMutable()); + + var convert: Convert = undefined; + convert.initType(ty, kind, self.lookup) catch unreachable; + + const self_recurse = @This(){ .kind = kind, .lookup = self.lookup, .convert = &convert }; + return self_recurse.eql(ty, self.lookup.indexToCType(cty).?); + } + + pub fn eql(self: @This(), ty: Type, cty: CType) bool { + switch (self.convert.value) { + .cty => |c| return c.eql(cty), + .tag => |t| { + if (t != cty.tag()) return false; + + const target = self.lookup.getTarget(); + switch (t) { + .fwd_anon_struct, + .fwd_anon_union, + => { + if (!ty.isTupleOrAnonStruct()) return false; + + var name_buf: [ + std.fmt.count("f{}", .{std.math.maxInt(usize)}) + ]u8 = undefined; + const c_fields = cty.cast(Payload.Fields).?.data; + + var c_field_i: usize = 0; + for (0..ty.structFieldCount()) |field_i| { + const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i) or + !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + + const c_field = &c_fields[c_field_i]; + c_field_i += 1; + + if (!self.eqlRecurse(field_ty, c_field.type, switch (self.kind) { + .forward, .forward_parameter => .forward, + .complete, .parameter => .complete, + .global => .global, + .payload => unreachable, + }) or !mem.eql( + u8, + if (ty.isSimpleTuple()) + std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable + else + ty.structFieldName(field_i), + mem.span(c_field.name), + ) or Payload.Fields.AlignAs.fieldAlign(ty, field_i, target).@"align" != + c_field.alignas.@"align") return false; + } + return true; + }, + + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + => switch (self.kind) { + .forward, .forward_parameter, .complete, .parameter, .global => unreachable, + .payload => if (ty.unionTagTypeSafety()) |_| { + const data = cty.cast(Payload.Unnamed).?.data; + return ty.getOwnerDecl() == data.owner_decl and data.id == 0; + } else unreachable, + }, + + .anon_struct, + .anon_union, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => return self.eqlRecurse( + ty, + cty.cast(Payload.Aggregate).?.data.fwd_decl, + .forward, + ), + + .function, + .varargs_function, + => { + if (ty.zigTypeTag() != .Fn) return false; + + const info = ty.fnInfo(); + const data = cty.cast(Payload.Function).?.data; + const param_kind: Kind = switch (self.kind) { + .forward, .forward_parameter => .forward_parameter, + .complete, .parameter, .global => .parameter, + .payload => unreachable, + }; + + if (!self.eqlRecurse(info.return_type, data.return_type, param_kind)) + return false; + + var c_param_i: usize = 0; + for (info.param_types) |param_type| { + if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + + if (c_param_i >= data.param_types.len) return false; + const param_cty = data.param_types[c_param_i]; + c_param_i += 1; + + if (!self.eqlRecurse(param_type, param_cty, param_kind)) + return false; + } + return c_param_i == data.param_types.len; + }, + + else => unreachable, + } + }, + } + } + + pub fn hash(self: @This(), ty: Type) u64 { + var hasher = std.hash.Wyhash.init(0); + self.updateHasher(&hasher, ty); + return hasher.final(); + } + + fn updateHasherRecurse(self: @This(), hasher: anytype, ty: Type, kind: Kind) void { + assert(!self.lookup.isMutable()); + + var convert: Convert = undefined; + convert.initType(ty, kind, self.lookup) catch unreachable; + + const self_recurse = @This(){ .kind = kind, .lookup = self.lookup, .convert = &convert }; + self_recurse.updateHasher(hasher, ty); + } + + pub fn updateHasher(self: @This(), hasher: anytype, ty: Type) void { + switch (self.convert.value) { + .cty => |c| return c.updateHasher(hasher, self.lookup.getSet().?.*), + .tag => |t| { + autoHash(hasher, t); + + const target = self.lookup.getTarget(); + switch (t) { + .fwd_anon_struct, + .fwd_anon_union, + => { + var name_buf: [ + std.fmt.count("f{}", .{std.math.maxInt(usize)}) + ]u8 = undefined; + for (0..switch (ty.zigTypeTag()) { + .Struct => ty.structFieldCount(), + .Union => ty.unionFields().count(), + else => unreachable, + }) |field_i| { + const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i) or + !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + + self.updateHasherRecurse( + hasher, + ty.structFieldType(field_i), + switch (self.kind) { + .forward, .forward_parameter => .forward, + .complete, .parameter => .complete, + .global => .global, + .payload => unreachable, + }, + ); + hasher.update(if (ty.isSimpleTuple()) + std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable + else + ty.structFieldName(field_i)); + autoHash( + hasher, + Payload.Fields.AlignAs.fieldAlign(ty, field_i, target).@"align", + ); + } + }, + + .unnamed_struct, + .unnamed_union, + .packed_unnamed_struct, + .packed_unnamed_union, + => switch (self.kind) { + .forward, .forward_parameter, .complete, .parameter, .global => unreachable, + .payload => if (ty.unionTagTypeSafety()) |_| { + autoHash(hasher, ty.getOwnerDecl()); + autoHash(hasher, @as(u32, 0)); + } else unreachable, + }, + + .anon_struct, + .anon_union, + .@"struct", + .@"union", + .packed_struct, + .packed_union, + => self.updateHasherRecurse(hasher, ty, .forward), + + .function, + .varargs_function, + => { + const info = ty.fnInfo(); + const param_kind: Kind = switch (self.kind) { + .forward, .forward_parameter => .forward_parameter, + .complete, .parameter, .global => .parameter, + .payload => unreachable, + }; + + self.updateHasherRecurse(hasher, info.return_type, param_kind); + for (info.param_types) |param_type| { + if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + self.updateHasherRecurse(hasher, param_type, param_kind); + } + }, + + else => unreachable, + } + }, + } + } + }; + + pub const TypeAdapter32 = struct { + kind: Kind, + lookup: Convert.Lookup, + convert: *const Convert, + + fn to64(self: @This()) TypeAdapter64 { + return .{ .kind = self.kind, .lookup = self.lookup, .convert = self.convert }; + } + + pub fn eql(self: @This(), ty: Type, cty: CType, cty_index: usize) bool { + _ = cty_index; + return self.to64().eql(ty, cty); + } + + pub fn hash(self: @This(), ty: Type) u32 { + return @truncate(u32, self.to64().hash(ty)); + } + }; +}; diff --git a/src/link/C.zig b/src/link/C.zig index 02e5cadfbc..262e4e4923 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -22,27 +22,22 @@ base: link.File, /// Instead, it tracks all declarations in this table, and iterates over it /// in the flush function, stitching pre-rendered pieces of C code together. decl_table: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclBlock) = .{}, -/// Stores Type/Value data for `typedefs` to reference. -/// Accumulates allocations and then there is a periodic garbage collection after flush(). -arena: std.heap.ArenaAllocator, /// Per-declaration data. const DeclBlock = struct { code: std.ArrayListUnmanaged(u8) = .{}, fwd_decl: std.ArrayListUnmanaged(u8) = .{}, - /// Each Decl stores a mapping of Zig Types to corresponding C types, for every - /// Zig Type used by the Decl. In flush(), we iterate over each Decl - /// and emit the typedef code for all types, making sure to not emit the same thing twice. - /// Any arena memory the Type points to lives in the `arena` field of `C`. - typedefs: codegen.TypedefMap.Unmanaged = .{}, + /// Each `Decl` stores a set of used `CType`s. In `flush()`, we iterate + /// over each `Decl` and generate the definition for each used `CType` once. + ctypes: codegen.CType.Store = .{}, + /// Key and Value storage use the ctype arena. + lazy_fns: codegen.LazyFnMap = .{}, fn deinit(db: *DeclBlock, gpa: Allocator) void { - db.code.deinit(gpa); + db.lazy_fns.deinit(gpa); + db.ctypes.deinit(gpa); db.fwd_decl.deinit(gpa); - for (db.typedefs.values()) |typedef| { - gpa.free(typedef.rendered); - } - db.typedefs.deinit(gpa); + db.code.deinit(gpa); db.* = undefined; } }; @@ -64,7 +59,6 @@ pub fn openPath(gpa: Allocator, sub_path: []const u8, options: link.Options) !*C errdefer gpa.destroy(c_file); c_file.* = C{ - .arena = std.heap.ArenaAllocator.init(gpa), .base = .{ .tag = .c, .options = options, @@ -83,8 +77,6 @@ pub fn deinit(self: *C) void { db.deinit(gpa); } self.decl_table.deinit(gpa); - - self.arena.deinit(); } pub fn freeDecl(self: *C, decl_index: Module.Decl.Index) void { @@ -99,124 +91,122 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes const tracy = trace(@src()); defer tracy.end(); + const gpa = self.base.allocator; + const decl_index = func.owner_decl; - const gop = try self.decl_table.getOrPut(self.base.allocator, decl_index); + const gop = try self.decl_table.getOrPut(gpa, decl_index); if (!gop.found_existing) { gop.value_ptr.* = .{}; } + const ctypes = &gop.value_ptr.ctypes; + const lazy_fns = &gop.value_ptr.lazy_fns; const fwd_decl = &gop.value_ptr.fwd_decl; - const typedefs = &gop.value_ptr.typedefs; const code = &gop.value_ptr.code; + ctypes.clearRetainingCapacity(gpa); + lazy_fns.clearRetainingCapacity(); fwd_decl.shrinkRetainingCapacity(0); - for (typedefs.values()) |typedef| { - module.gpa.free(typedef.rendered); - } - typedefs.clearRetainingCapacity(); code.shrinkRetainingCapacity(0); var function: codegen.Function = .{ - .value_map = codegen.CValueMap.init(module.gpa), + .value_map = codegen.CValueMap.init(gpa), .air = air, .liveness = liveness, .func = func, .object = .{ .dg = .{ - .gpa = module.gpa, + .gpa = gpa, .module = module, .error_msg = null, - .decl_index = decl_index, + .decl_index = decl_index.toOptional(), .decl = module.declPtr(decl_index), - .fwd_decl = fwd_decl.toManaged(module.gpa), - .typedefs = typedefs.promoteContext(module.gpa, .{ .mod = module }), - .typedefs_arena = self.arena.allocator(), + .fwd_decl = fwd_decl.toManaged(gpa), + .ctypes = ctypes.*, }, - .code = code.toManaged(module.gpa), + .code = code.toManaged(gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code }, - .arena = std.heap.ArenaAllocator.init(module.gpa), + .lazy_fns = lazy_fns.*, + .arena = std.heap.ArenaAllocator.init(gpa), }; function.object.indent_writer = .{ .underlying_writer = function.object.code.writer() }; - defer function.deinit(module.gpa); + defer function.deinit(); codegen.genFunc(&function) catch |err| switch (err) { error.AnalysisFail => { - try module.failed_decls.put(module.gpa, decl_index, function.object.dg.error_msg.?); + try module.failed_decls.put(gpa, decl_index, function.object.dg.error_msg.?); return; }, else => |e| return e, }; + ctypes.* = function.object.dg.ctypes.move(); + lazy_fns.* = function.lazy_fns.move(); fwd_decl.* = function.object.dg.fwd_decl.moveToUnmanaged(); - typedefs.* = function.object.dg.typedefs.unmanaged; - function.object.dg.typedefs.unmanaged = .{}; code.* = function.object.code.moveToUnmanaged(); // Free excess allocated memory for this Decl. - fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len); - code.shrinkAndFree(module.gpa, code.items.len); + ctypes.shrinkAndFree(gpa, ctypes.count()); + lazy_fns.shrinkAndFree(gpa, lazy_fns.count()); + fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len); + code.shrinkAndFree(gpa, code.items.len); } pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !void { const tracy = trace(@src()); defer tracy.end(); - const gop = try self.decl_table.getOrPut(self.base.allocator, decl_index); + const gpa = self.base.allocator; + + const gop = try self.decl_table.getOrPut(gpa, decl_index); if (!gop.found_existing) { gop.value_ptr.* = .{}; } + const ctypes = &gop.value_ptr.ctypes; const fwd_decl = &gop.value_ptr.fwd_decl; - const typedefs = &gop.value_ptr.typedefs; const code = &gop.value_ptr.code; + ctypes.clearRetainingCapacity(gpa); fwd_decl.shrinkRetainingCapacity(0); - for (typedefs.values()) |value| { - module.gpa.free(value.rendered); - } - typedefs.clearRetainingCapacity(); code.shrinkRetainingCapacity(0); const decl = module.declPtr(decl_index); var object: codegen.Object = .{ .dg = .{ - .gpa = module.gpa, + .gpa = gpa, .module = module, .error_msg = null, - .decl_index = decl_index, + .decl_index = decl_index.toOptional(), .decl = decl, - .fwd_decl = fwd_decl.toManaged(module.gpa), - .typedefs = typedefs.promoteContext(module.gpa, .{ .mod = module }), - .typedefs_arena = self.arena.allocator(), + .fwd_decl = fwd_decl.toManaged(gpa), + .ctypes = ctypes.*, }, - .code = code.toManaged(module.gpa), + .code = code.toManaged(gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code }; object.indent_writer = .{ .underlying_writer = object.code.writer() }; defer { object.code.deinit(); - for (object.dg.typedefs.values()) |typedef| { - module.gpa.free(typedef.rendered); - } - object.dg.typedefs.deinit(); + object.dg.ctypes.deinit(object.dg.gpa); object.dg.fwd_decl.deinit(); } codegen.genDecl(&object) catch |err| switch (err) { error.AnalysisFail => { - try module.failed_decls.put(module.gpa, decl_index, object.dg.error_msg.?); + try module.failed_decls.put(gpa, decl_index, object.dg.error_msg.?); return; }, else => |e| return e, }; + ctypes.* = object.dg.ctypes.move(); fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); - typedefs.* = object.dg.typedefs.unmanaged; - object.dg.typedefs.unmanaged = .{}; code.* = object.code.moveToUnmanaged(); // Free excess allocated memory for this Decl. - fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len); - code.shrinkAndFree(module.gpa, code.items.len); + ctypes.shrinkAndFree(gpa, ctypes.count()); + fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len); + code.shrinkAndFree(gpa, code.items.len); } pub fn updateDeclLineNumber(self: *C, module: *Module, decl_index: Module.Decl.Index) !void { @@ -246,7 +236,7 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) sub_prog_node.activate(); defer sub_prog_node.end(); - const gpa = comp.gpa; + const gpa = self.base.allocator; const module = self.base.options.module.?; // This code path happens exclusively with -ofmt=c. The flush logic for @@ -257,30 +247,28 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) const abi_define = abiDefine(comp); - // Covers defines, zig.h, typedef, and asm. - var buf_count: usize = 2; - if (abi_define != null) buf_count += 1; - try f.all_buffers.ensureUnusedCapacity(gpa, buf_count); + // Covers defines, zig.h, ctypes, asm, lazy fwd, lazy code. + try f.all_buffers.ensureUnusedCapacity(gpa, 6); if (abi_define) |buf| f.appendBufAssumeCapacity(buf); f.appendBufAssumeCapacity(zig_h); - const typedef_index = f.all_buffers.items.len; + const ctypes_index = f.all_buffers.items.len; f.all_buffers.items.len += 1; { - var asm_buf = f.asm_buf.toManaged(module.gpa); - defer asm_buf.deinit(); - - try codegen.genGlobalAsm(module, &asm_buf); - - f.asm_buf = asm_buf.moveToUnmanaged(); - f.appendBufAssumeCapacity(f.asm_buf.items); + var asm_buf = f.asm_buf.toManaged(gpa); + defer f.asm_buf = asm_buf.moveToUnmanaged(); + try codegen.genGlobalAsm(module, asm_buf.writer()); + f.appendBufAssumeCapacity(asm_buf.items); } - try self.flushErrDecls(&f); + const lazy_indices = f.all_buffers.items.len; + f.all_buffers.items.len += 2; - // Typedefs, forward decls, and non-functions first. + try self.flushErrDecls(&f.lazy_db); + + // `CType`s, forward decls, and non-functions first. // Unlike other backends, the .c code we are emitting is order-dependent. Therefore // we must traverse the set of Decls that we are emitting according to their dependencies. // Our strategy is to populate a set of remaining decls, pop Decls one by one, @@ -307,11 +295,35 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) } } - f.all_buffers.items[typedef_index] = .{ - .iov_base = if (f.typedef_buf.items.len > 0) f.typedef_buf.items.ptr else "", - .iov_len = f.typedef_buf.items.len, + { + // We need to flush lazy ctypes after flushing all decls but before flushing any decl ctypes. + assert(f.ctypes.count() == 0); + try self.flushCTypes(&f, .none, f.lazy_db.ctypes); + + var it = self.decl_table.iterator(); + while (it.next()) |entry| + try self.flushCTypes(&f, entry.key_ptr.toOptional(), entry.value_ptr.ctypes); + } + + { + f.all_buffers.items[lazy_indices + 0] = .{ + .iov_base = if (f.lazy_db.fwd_decl.items.len > 0) f.lazy_db.fwd_decl.items.ptr else "", + .iov_len = f.lazy_db.fwd_decl.items.len, + }; + f.file_size += f.lazy_db.fwd_decl.items.len; + + f.all_buffers.items[lazy_indices + 1] = .{ + .iov_base = if (f.lazy_db.code.items.len > 0) f.lazy_db.code.items.ptr else "", + .iov_len = f.lazy_db.code.items.len, + }; + f.file_size += f.lazy_db.code.items.len; + } + + f.all_buffers.items[ctypes_index] = .{ + .iov_base = if (f.ctypes_buf.items.len > 0) f.ctypes_buf.items.ptr else "", + .iov_len = f.ctypes_buf.items.len, }; - f.file_size += f.typedef_buf.items.len; + f.file_size += f.ctypes_buf.items.len; // Now the code. try f.all_buffers.ensureUnusedCapacity(gpa, decl_values.len); @@ -324,22 +336,23 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) } const Flush = struct { - err_decls: DeclBlock = .{}, remaining_decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void) = .{}, - typedefs: Typedefs = .{}, - typedef_buf: std.ArrayListUnmanaged(u8) = .{}, + + ctypes: codegen.CType.Store = .{}, + ctypes_map: std.ArrayListUnmanaged(codegen.CType.Index) = .{}, + ctypes_buf: std.ArrayListUnmanaged(u8) = .{}, + + lazy_db: DeclBlock = .{}, + lazy_fns: LazyFns = .{}, + asm_buf: std.ArrayListUnmanaged(u8) = .{}, + /// We collect a list of buffers to write, and write them all at once with pwritev 😎 all_buffers: std.ArrayListUnmanaged(std.os.iovec_const) = .{}, /// Keeps track of the total bytes of `all_buffers`. file_size: u64 = 0, - const Typedefs = std.HashMapUnmanaged( - Type, - void, - Type.HashContext64, - std.hash_map.default_max_load_percentage, - ); + const LazyFns = std.AutoHashMapUnmanaged(codegen.LazyFnKey, void); fn appendBufAssumeCapacity(f: *Flush, buf: []const u8) void { if (buf.len == 0) return; @@ -349,10 +362,13 @@ const Flush = struct { fn deinit(f: *Flush, gpa: Allocator) void { f.all_buffers.deinit(gpa); - f.typedef_buf.deinit(gpa); - f.typedefs.deinit(gpa); + f.asm_buf.deinit(gpa); + f.lazy_fns.deinit(gpa); + f.lazy_db.deinit(gpa); + f.ctypes_buf.deinit(gpa); + f.ctypes_map.deinit(gpa); + f.ctypes.deinit(gpa); f.remaining_decls.deinit(gpa); - f.err_decls.deinit(gpa); } }; @@ -360,53 +376,116 @@ const FlushDeclError = error{ OutOfMemory, }; -fn flushTypedefs(self: *C, f: *Flush, typedefs: codegen.TypedefMap.Unmanaged) FlushDeclError!void { - if (typedefs.count() == 0) return; +fn flushCTypes( + self: *C, + f: *Flush, + decl_index: Module.Decl.OptionalIndex, + decl_ctypes: codegen.CType.Store, +) FlushDeclError!void { const gpa = self.base.allocator; - const module = self.base.options.module.?; + const mod = self.base.options.module.?; - try f.typedefs.ensureUnusedCapacityContext(gpa, @intCast(u32, typedefs.count()), .{ - .mod = module, - }); - var it = typedefs.iterator(); - while (it.next()) |new| { - const gop = f.typedefs.getOrPutAssumeCapacityContext(new.key_ptr.*, .{ - .mod = module, + const decl_ctypes_len = decl_ctypes.count(); + f.ctypes_map.clearRetainingCapacity(); + try f.ctypes_map.ensureTotalCapacity(gpa, decl_ctypes_len); + + var global_ctypes = f.ctypes.promote(gpa); + defer f.ctypes.demote(global_ctypes); + + var ctypes_buf = f.ctypes_buf.toManaged(gpa); + defer f.ctypes_buf = ctypes_buf.moveToUnmanaged(); + const writer = ctypes_buf.writer(); + + const slice = decl_ctypes.set.map.entries.slice(); + for (slice.items(.key), 0..) |decl_cty, decl_i| { + const Context = struct { + arena: Allocator, + ctypes_map: []codegen.CType.Index, + cached_hash: codegen.CType.Store.Set.Map.Hash, + idx: codegen.CType.Index, + + pub fn hash(ctx: @This(), _: codegen.CType) codegen.CType.Store.Set.Map.Hash { + return ctx.cached_hash; + } + pub fn eql(ctx: @This(), lhs: codegen.CType, rhs: codegen.CType, _: usize) bool { + return lhs.eqlContext(rhs, ctx); + } + pub fn eqlIndex( + ctx: @This(), + lhs_idx: codegen.CType.Index, + rhs_idx: codegen.CType.Index, + ) bool { + if (lhs_idx < codegen.CType.Tag.no_payload_count or + rhs_idx < codegen.CType.Tag.no_payload_count) return lhs_idx == rhs_idx; + const lhs_i = lhs_idx - codegen.CType.Tag.no_payload_count; + if (lhs_i >= ctx.ctypes_map.len) return false; + return ctx.ctypes_map[lhs_i] == rhs_idx; + } + pub fn copyIndex(ctx: @This(), idx: codegen.CType.Index) codegen.CType.Index { + if (idx < codegen.CType.Tag.no_payload_count) return idx; + return ctx.ctypes_map[idx - codegen.CType.Tag.no_payload_count]; + } + }; + const decl_idx = @intCast(codegen.CType.Index, codegen.CType.Tag.no_payload_count + decl_i); + const ctx = Context{ + .arena = global_ctypes.arena.allocator(), + .ctypes_map = f.ctypes_map.items, + .cached_hash = decl_ctypes.indexToHash(decl_idx), + .idx = decl_idx, + }; + const gop = try global_ctypes.set.map.getOrPutContextAdapted(gpa, decl_cty, ctx, .{ + .store = &global_ctypes.set, }); + const global_idx = + @intCast(codegen.CType.Index, codegen.CType.Tag.no_payload_count + gop.index); + f.ctypes_map.appendAssumeCapacity(global_idx); if (!gop.found_existing) { - try f.typedef_buf.appendSlice(gpa, new.value_ptr.rendered); + errdefer _ = global_ctypes.set.map.pop(); + gop.key_ptr.* = try decl_cty.copyContext(ctx); } + if (std.debug.runtime_safety) { + const global_cty = &global_ctypes.set.map.entries.items(.key)[gop.index]; + assert(global_cty == gop.key_ptr); + assert(decl_cty.eqlContext(global_cty.*, ctx)); + assert(decl_cty.hash(decl_ctypes.set) == global_cty.hash(global_ctypes.set)); + } + try codegen.genTypeDecl( + mod, + writer, + global_ctypes.set, + global_idx, + decl_index, + decl_ctypes.set, + decl_idx, + gop.found_existing, + ); } } -fn flushErrDecls(self: *C, f: *Flush) FlushDeclError!void { - const module = self.base.options.module.?; +fn flushErrDecls(self: *C, db: *DeclBlock) FlushDeclError!void { + const gpa = self.base.allocator; - const fwd_decl = &f.err_decls.fwd_decl; - const typedefs = &f.err_decls.typedefs; - const code = &f.err_decls.code; + const fwd_decl = &db.fwd_decl; + const ctypes = &db.ctypes; + const code = &db.code; var object = codegen.Object{ .dg = .{ - .gpa = module.gpa, - .module = module, + .gpa = gpa, + .module = self.base.options.module.?, .error_msg = null, - .decl_index = undefined, - .decl = undefined, - .fwd_decl = fwd_decl.toManaged(module.gpa), - .typedefs = typedefs.promoteContext(module.gpa, .{ .mod = module }), - .typedefs_arena = self.arena.allocator(), + .decl_index = .none, + .decl = null, + .fwd_decl = fwd_decl.toManaged(gpa), + .ctypes = ctypes.*, }, - .code = code.toManaged(module.gpa), + .code = code.toManaged(gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code }; object.indent_writer = .{ .underlying_writer = object.code.writer() }; defer { object.code.deinit(); - for (object.dg.typedefs.values()) |typedef| { - module.gpa.free(typedef.rendered); - } - object.dg.typedefs.deinit(); + object.dg.ctypes.deinit(gpa); object.dg.fwd_decl.deinit(); } @@ -416,14 +495,58 @@ fn flushErrDecls(self: *C, f: *Flush) FlushDeclError!void { }; fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); - typedefs.* = object.dg.typedefs.unmanaged; - object.dg.typedefs.unmanaged = .{}; + ctypes.* = object.dg.ctypes.move(); code.* = object.code.moveToUnmanaged(); +} - try self.flushTypedefs(f, typedefs.*); - try f.all_buffers.ensureUnusedCapacity(self.base.allocator, 1); - f.appendBufAssumeCapacity(fwd_decl.items); - f.appendBufAssumeCapacity(code.items); +fn flushLazyFn(self: *C, db: *DeclBlock, lazy_fn: codegen.LazyFnMap.Entry) FlushDeclError!void { + const gpa = self.base.allocator; + + const fwd_decl = &db.fwd_decl; + const ctypes = &db.ctypes; + const code = &db.code; + + var object = codegen.Object{ + .dg = .{ + .gpa = gpa, + .module = self.base.options.module.?, + .error_msg = null, + .decl_index = .none, + .decl = null, + .fwd_decl = fwd_decl.toManaged(gpa), + .ctypes = ctypes.*, + }, + .code = code.toManaged(gpa), + .indent_writer = undefined, // set later so we can get a pointer to object.code + }; + object.indent_writer = .{ .underlying_writer = object.code.writer() }; + defer { + object.code.deinit(); + object.dg.ctypes.deinit(gpa); + object.dg.fwd_decl.deinit(); + } + + codegen.genLazyFn(&object, lazy_fn) catch |err| switch (err) { + error.AnalysisFail => unreachable, + else => |e| return e, + }; + + fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); + ctypes.* = object.dg.ctypes.move(); + code.* = object.code.moveToUnmanaged(); +} + +fn flushLazyFns(self: *C, f: *Flush, lazy_fns: codegen.LazyFnMap) FlushDeclError!void { + const gpa = self.base.allocator; + try f.lazy_fns.ensureUnusedCapacity(gpa, @intCast(Flush.LazyFns.Size, lazy_fns.count())); + + var it = lazy_fns.iterator(); + while (it.next()) |entry| { + const gop = f.lazy_fns.getOrPutAssumeCapacity(entry.key_ptr.*); + if (gop.found_existing) continue; + gop.value_ptr.* = {}; + try self.flushLazyFn(&f.lazy_db, entry); + } } /// Assumes `decl` was in the `remaining_decls` set, and has already been removed. @@ -433,8 +556,8 @@ fn flushDecl( decl_index: Module.Decl.Index, export_names: std.StringHashMapUnmanaged(void), ) FlushDeclError!void { - const module = self.base.options.module.?; - const decl = module.declPtr(decl_index); + const gpa = self.base.allocator; + const decl = self.base.options.module.?.declPtr(decl_index); // Before flushing any particular Decl we must ensure its // dependencies are already flushed, so that the order in the .c // file comes out correctly. @@ -445,10 +568,9 @@ fn flushDecl( } const decl_block = self.decl_table.getPtr(decl_index).?; - const gpa = self.base.allocator; - try self.flushTypedefs(f, decl_block.typedefs); - try f.all_buffers.ensureUnusedCapacity(gpa, 2); + try self.flushLazyFns(f, decl_block.lazy_fns); + try f.all_buffers.ensureUnusedCapacity(gpa, 1); if (!(decl.isExtern() and export_names.contains(mem.span(decl.name)))) f.appendBufAssumeCapacity(decl_block.fwd_decl.items); } diff --git a/stage1/zig.h b/stage1/zig.h new file mode 100644 index 0000000000..0756d9f731 --- /dev/null +++ b/stage1/zig.h @@ -0,0 +1,2486 @@ +#undef linux + +#define __STDC_WANT_IEC_60559_TYPES_EXT__ +#include +#include +#include +#include + +#if _MSC_VER +#include +#elif defined(__i386__) || defined(__x86_64__) +#include +#endif + +#if !defined(__cplusplus) && __STDC_VERSION__ <= 201710L +#if __STDC_VERSION__ >= 199901L +#include +#else +typedef char bool; +#define false 0 +#define true 1 +#endif +#endif + +#if defined(__has_builtin) +#define zig_has_builtin(builtin) __has_builtin(__builtin_##builtin) +#else +#define zig_has_builtin(builtin) 0 +#endif + +#if defined(__has_attribute) +#define zig_has_attribute(attribute) __has_attribute(attribute) +#else +#define zig_has_attribute(attribute) 0 +#endif + +#if __STDC_VERSION__ >= 201112L +#define zig_threadlocal _Thread_local +#elif defined(__GNUC__) +#define zig_threadlocal __thread +#elif _MSC_VER +#define zig_threadlocal __declspec(thread) +#else +#define zig_threadlocal zig_threadlocal_unavailable +#endif + +#if defined(__clang__) +#define zig_clang +#elif defined(__GNUC__) +#define zig_gnuc +#endif + +#if _MSC_VER +#define zig_const_arr +#define zig_callconv(c) __##c +#else +#define zig_const_arr static const +#define zig_callconv(c) __attribute__((c)) +#endif + +#if zig_has_attribute(naked) || defined(zig_gnuc) +#define zig_naked_decl __attribute__((naked)) +#define zig_naked __attribute__((naked)) +#elif defined(_MSC_VER) +#define zig_naked_decl +#define zig_naked __declspec(naked) +#else +#define zig_naked_decl zig_naked_unavailable +#define zig_naked zig_naked_unavailable +#endif + +#if zig_has_attribute(cold) +#define zig_cold __attribute__((cold)) +#else +#define zig_cold +#endif + +#if __STDC_VERSION__ >= 199901L +#define zig_restrict restrict +#elif defined(__GNUC__) +#define zig_restrict __restrict +#else +#define zig_restrict +#endif + +#if __STDC_VERSION__ >= 201112L +#define zig_align(alignment) _Alignas(alignment) +#elif zig_has_attribute(aligned) +#define zig_align(alignment) __attribute__((aligned(alignment))) +#elif _MSC_VER +#define zig_align(alignment) __declspec(align(alignment)) +#else +#define zig_align zig_align_unavailable +#endif + +#if zig_has_attribute(aligned) +#define zig_under_align(alignment) __attribute__((aligned(alignment))) +#elif _MSC_VER +#define zig_under_align(alignment) zig_align(alignment) +#else +#define zig_align zig_align_unavailable +#endif + +#if zig_has_attribute(aligned) +#define zig_align_fn(alignment) __attribute__((aligned(alignment))) +#elif _MSC_VER +#define zig_align_fn(alignment) +#else +#define zig_align_fn zig_align_fn_unavailable +#endif + +#if zig_has_attribute(packed) +#define zig_packed(definition) __attribute__((packed)) definition +#elif _MSC_VER +#define zig_packed(definition) __pragma(pack(1)) definition __pragma(pack()) +#else +#define zig_packed(definition) zig_packed_unavailable +#endif + +#if zig_has_attribute(section) +#define zig_linksection(name, def, ...) def __attribute__((section(name))) +#elif _MSC_VER +#define zig_linksection(name, def, ...) __pragma(section(name, __VA_ARGS__)) __declspec(allocate(name)) def +#else +#define zig_linksection(name, def, ...) zig_linksection_unavailable +#endif + +#if zig_has_builtin(unreachable) || defined(zig_gnuc) +#define zig_unreachable() __builtin_unreachable() +#else +#define zig_unreachable() +#endif + +#if defined(__cplusplus) +#define zig_extern extern "C" +#else +#define zig_extern extern +#endif + +#if zig_has_attribute(alias) +#define zig_export(sig, symbol, name) zig_extern sig __attribute__((alias(symbol))) +#elif _MSC_VER +#if _M_X64 +#define zig_export(sig, symbol, name) sig;\ + __pragma(comment(linker, "/alternatename:" name "=" symbol )) +#else /*_M_X64 */ +#define zig_export(sig, symbol, name) sig;\ + __pragma(comment(linker, "/alternatename:_" name "=_" symbol )) +#endif /*_M_X64 */ +#else +#define zig_export(sig, symbol, name) __asm(name " = " symbol) +#endif + +#if zig_has_builtin(debugtrap) +#define zig_breakpoint() __builtin_debugtrap() +#elif zig_has_builtin(trap) || defined(zig_gnuc) +#define zig_breakpoint() __builtin_trap() +#elif defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__) +#define zig_breakpoint() __debugbreak() +#elif defined(__i386__) || defined(__x86_64__) +#define zig_breakpoint() __asm__ volatile("int $0x03"); +#else +#define zig_breakpoint() raise(SIGTRAP) +#endif + +#if zig_has_builtin(return_address) || defined(zig_gnuc) +#define zig_return_address() __builtin_extract_return_addr(__builtin_return_address(0)) +#elif defined(_MSC_VER) +#define zig_return_address() _ReturnAddress() +#else +#define zig_return_address() 0 +#endif + +#if zig_has_builtin(frame_address) || defined(zig_gnuc) +#define zig_frame_address() __builtin_frame_address(0) +#else +#define zig_frame_address() 0 +#endif + +#if zig_has_builtin(prefetch) || defined(zig_gnuc) +#define zig_prefetch(addr, rw, locality) __builtin_prefetch(addr, rw, locality) +#else +#define zig_prefetch(addr, rw, locality) +#endif + +#if zig_has_builtin(memory_size) && zig_has_builtin(memory_grow) +#define zig_wasm_memory_size(index) __builtin_wasm_memory_size(index) +#define zig_wasm_memory_grow(index, delta) __builtin_wasm_memory_grow(index, delta) +#else +#define zig_wasm_memory_size(index) zig_unimplemented() +#define zig_wasm_memory_grow(index, delta) zig_unimplemented() +#endif + +#define zig_concat(lhs, rhs) lhs##rhs +#define zig_expand_concat(lhs, rhs) zig_concat(lhs, rhs) + +#if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) +#include +#define zig_atomic(type) _Atomic(type) +#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) atomic_compare_exchange_strong_explicit(obj, &(expected), desired, succ, fail) +#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) atomic_compare_exchange_weak_explicit (obj, &(expected), desired, succ, fail) +#define zig_atomicrmw_xchg(obj, arg, order, type) atomic_exchange_explicit (obj, arg, order) +#define zig_atomicrmw_add(obj, arg, order, type) atomic_fetch_add_explicit (obj, arg, order) +#define zig_atomicrmw_sub(obj, arg, order, type) atomic_fetch_sub_explicit (obj, arg, order) +#define zig_atomicrmw_or(obj, arg, order, type) atomic_fetch_or_explicit (obj, arg, order) +#define zig_atomicrmw_xor(obj, arg, order, type) atomic_fetch_xor_explicit (obj, arg, order) +#define zig_atomicrmw_and(obj, arg, order, type) atomic_fetch_and_explicit (obj, arg, order) +#define zig_atomicrmw_nand(obj, arg, order, type) __atomic_fetch_nand (obj, arg, order) +#define zig_atomicrmw_min(obj, arg, order, type) __atomic_fetch_min (obj, arg, order) +#define zig_atomicrmw_max(obj, arg, order, type) __atomic_fetch_max (obj, arg, order) +#define zig_atomic_store(obj, arg, order, type) atomic_store_explicit (obj, arg, order) +#define zig_atomic_load(obj, order, type) atomic_load_explicit (obj, order) +#define zig_fence(order) atomic_thread_fence(order) +#elif defined(__GNUC__) +#define memory_order_relaxed __ATOMIC_RELAXED +#define memory_order_consume __ATOMIC_CONSUME +#define memory_order_acquire __ATOMIC_ACQUIRE +#define memory_order_release __ATOMIC_RELEASE +#define memory_order_acq_rel __ATOMIC_ACQ_REL +#define memory_order_seq_cst __ATOMIC_SEQ_CST +#define zig_atomic(type) type +#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) __atomic_compare_exchange_n(obj, &(expected), desired, false, succ, fail) +#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) __atomic_compare_exchange_n(obj, &(expected), desired, true , succ, fail) +#define zig_atomicrmw_xchg(obj, arg, order, type) __atomic_exchange_n(obj, arg, order) +#define zig_atomicrmw_add(obj, arg, order, type) __atomic_fetch_add (obj, arg, order) +#define zig_atomicrmw_sub(obj, arg, order, type) __atomic_fetch_sub (obj, arg, order) +#define zig_atomicrmw_or(obj, arg, order, type) __atomic_fetch_or (obj, arg, order) +#define zig_atomicrmw_xor(obj, arg, order, type) __atomic_fetch_xor (obj, arg, order) +#define zig_atomicrmw_and(obj, arg, order, type) __atomic_fetch_and (obj, arg, order) +#define zig_atomicrmw_nand(obj, arg, order, type) __atomic_fetch_nand(obj, arg, order) +#define zig_atomicrmw_min(obj, arg, order, type) __atomic_fetch_min (obj, arg, order) +#define zig_atomicrmw_max(obj, arg, order, type) __atomic_fetch_max (obj, arg, order) +#define zig_atomic_store(obj, arg, order, type) __atomic_store_n (obj, arg, order) +#define zig_atomic_load(obj, order, type) __atomic_load_n (obj, order) +#define zig_fence(order) __atomic_thread_fence(order) +#elif _MSC_VER && (_M_IX86 || _M_X64) +#define memory_order_relaxed 0 +#define memory_order_consume 1 +#define memory_order_acquire 2 +#define memory_order_release 3 +#define memory_order_acq_rel 4 +#define memory_order_seq_cst 5 +#define zig_atomic(type) type +#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) zig_expand_concat(zig_msvc_cmpxchg_, type)(obj, &(expected), desired) +#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) +#define zig_atomicrmw_xchg(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_xchg_, type)(obj, arg) +#define zig_atomicrmw_add(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_add_, type)(obj, arg) +#define zig_atomicrmw_sub(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_sub_, type)(obj, arg) +#define zig_atomicrmw_or(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_or_, type)(obj, arg) +#define zig_atomicrmw_xor(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_xor_, type)(obj, arg) +#define zig_atomicrmw_and(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_and_, type)(obj, arg) +#define zig_atomicrmw_nand(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_nand_, type)(obj, arg) +#define zig_atomicrmw_min(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_min_, type)(obj, arg) +#define zig_atomicrmw_max(obj, arg, order, type) zig_expand_concat(zig_msvc_atomicrmw_max_, type)(obj, arg) +#define zig_atomic_store(obj, arg, order, type) zig_expand_concat(zig_msvc_atomic_store_, type)(obj, arg) +#define zig_atomic_load(obj, order, type) zig_expand_concat(zig_msvc_atomic_load_, type)(obj) +#if _M_X64 +#define zig_fence(order) __faststorefence() +#else +#define zig_fence(order) zig_msvc_atomic_barrier() +#endif + +// TODO: _MSC_VER && (_M_ARM || _M_ARM64) +#else +#define memory_order_relaxed 0 +#define memory_order_consume 1 +#define memory_order_acquire 2 +#define memory_order_release 3 +#define memory_order_acq_rel 4 +#define memory_order_seq_cst 5 +#define zig_atomic(type) type +#define zig_cmpxchg_strong(obj, expected, desired, succ, fail, type) zig_unimplemented() +#define zig_cmpxchg_weak(obj, expected, desired, succ, fail, type) zig_unimplemented() +#define zig_atomicrmw_xchg(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_add(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_sub(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_or(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_xor(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_and(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_nand(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_min(obj, arg, order, type) zig_unimplemented() +#define zig_atomicrmw_max(obj, arg, order, type) zig_unimplemented() +#define zig_atomic_store(obj, arg, order, type) zig_unimplemented() +#define zig_atomic_load(obj, order, type) zig_unimplemented() +#define zig_fence(order) zig_unimplemented() +#endif + +#if __STDC_VERSION__ >= 201112L +#define zig_noreturn _Noreturn void +#elif zig_has_attribute(noreturn) || defined(zig_gnuc) +#define zig_noreturn __attribute__((noreturn)) void +#elif _MSC_VER +#define zig_noreturn __declspec(noreturn) void +#else +#define zig_noreturn void +#endif + +#define zig_bitSizeOf(T) (CHAR_BIT * sizeof(T)) + +typedef uintptr_t zig_usize; +typedef intptr_t zig_isize; +typedef signed short int zig_c_short; +typedef unsigned short int zig_c_ushort; +typedef signed int zig_c_int; +typedef unsigned int zig_c_uint; +typedef signed long int zig_c_long; +typedef unsigned long int zig_c_ulong; +typedef signed long long int zig_c_longlong; +typedef unsigned long long int zig_c_ulonglong; + +typedef uint8_t zig_u8; +typedef int8_t zig_i8; +typedef uint16_t zig_u16; +typedef int16_t zig_i16; +typedef uint32_t zig_u32; +typedef int32_t zig_i32; +typedef uint64_t zig_u64; +typedef int64_t zig_i64; + +#define zig_as_u8(val) UINT8_C(val) +#define zig_as_i8(val) INT8_C(val) +#define zig_as_u16(val) UINT16_C(val) +#define zig_as_i16(val) INT16_C(val) +#define zig_as_u32(val) UINT32_C(val) +#define zig_as_i32(val) INT32_C(val) +#define zig_as_u64(val) UINT64_C(val) +#define zig_as_i64(val) INT64_C(val) + +#define zig_minInt_u8 zig_as_u8(0) +#define zig_maxInt_u8 UINT8_MAX +#define zig_minInt_i8 INT8_MIN +#define zig_maxInt_i8 INT8_MAX +#define zig_minInt_u16 zig_as_u16(0) +#define zig_maxInt_u16 UINT16_MAX +#define zig_minInt_i16 INT16_MIN +#define zig_maxInt_i16 INT16_MAX +#define zig_minInt_u32 zig_as_u32(0) +#define zig_maxInt_u32 UINT32_MAX +#define zig_minInt_i32 INT32_MIN +#define zig_maxInt_i32 INT32_MAX +#define zig_minInt_u64 zig_as_u64(0) +#define zig_maxInt_u64 UINT64_MAX +#define zig_minInt_i64 INT64_MIN +#define zig_maxInt_i64 INT64_MAX + +#define zig_compiler_rt_abbrev_u32 si +#define zig_compiler_rt_abbrev_i32 si +#define zig_compiler_rt_abbrev_u64 di +#define zig_compiler_rt_abbrev_i64 di +#define zig_compiler_rt_abbrev_u128 ti +#define zig_compiler_rt_abbrev_i128 ti +#define zig_compiler_rt_abbrev_f16 hf +#define zig_compiler_rt_abbrev_f32 sf +#define zig_compiler_rt_abbrev_f64 df +#define zig_compiler_rt_abbrev_f80 xf +#define zig_compiler_rt_abbrev_f128 tf + +zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, zig_usize); +zig_extern void *memset (void *, int, zig_usize); + +/* ==================== 8/16/32/64-bit Integer Routines ===================== */ + +#define zig_maxInt(Type, bits) zig_shr_##Type(zig_maxInt_##Type, (zig_bitSizeOf(zig_##Type) - bits)) +#define zig_expand_maxInt(Type, bits) zig_maxInt(Type, bits) +#define zig_minInt(Type, bits) zig_not_##Type(zig_maxInt(Type, bits), bits) +#define zig_expand_minInt(Type, bits) zig_minInt(Type, bits) + +#define zig_int_operator(Type, RhsType, operation, operator) \ + static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##RhsType rhs) { \ + return lhs operator rhs; \ + } +#define zig_int_basic_operator(Type, operation, operator) \ + zig_int_operator(Type, Type, operation, operator) +#define zig_int_shift_operator(Type, operation, operator) \ + zig_int_operator(Type, u8, operation, operator) +#define zig_int_helpers(w) \ + zig_int_basic_operator(u##w, and, &) \ + zig_int_basic_operator(i##w, and, &) \ + zig_int_basic_operator(u##w, or, |) \ + zig_int_basic_operator(i##w, or, |) \ + zig_int_basic_operator(u##w, xor, ^) \ + zig_int_basic_operator(i##w, xor, ^) \ + zig_int_shift_operator(u##w, shl, <<) \ + zig_int_shift_operator(i##w, shl, <<) \ + zig_int_shift_operator(u##w, shr, >>) \ +\ + static inline zig_i##w zig_shr_i##w(zig_i##w lhs, zig_u8 rhs) { \ + zig_i##w sign_mask = lhs < zig_as_i##w(0) ? -zig_as_i##w(1) : zig_as_i##w(0); \ + return ((lhs ^ sign_mask) >> rhs) ^ sign_mask; \ + } \ +\ + static inline zig_u##w zig_not_u##w(zig_u##w val, zig_u8 bits) { \ + return val ^ zig_maxInt(u##w, bits); \ + } \ +\ + static inline zig_i##w zig_not_i##w(zig_i##w val, zig_u8 bits) { \ + (void)bits; \ + return ~val; \ + } \ +\ + static inline zig_u##w zig_wrap_u##w(zig_u##w val, zig_u8 bits) { \ + return val & zig_maxInt(u##w, bits); \ + } \ +\ + static inline zig_i##w zig_wrap_i##w(zig_i##w val, zig_u8 bits) { \ + return (val & zig_as_u##w(1) << (bits - zig_as_u8(1))) != 0 \ + ? val | zig_minInt(i##w, bits) : val & zig_maxInt(i##w, bits); \ + } \ +\ + zig_int_basic_operator(u##w, div_floor, /) \ +\ + static inline zig_i##w zig_div_floor_i##w(zig_i##w lhs, zig_i##w rhs) { \ + return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < zig_as_i##w(0)); \ + } \ +\ + zig_int_basic_operator(u##w, mod, %) \ +\ + static inline zig_i##w zig_mod_i##w(zig_i##w lhs, zig_i##w rhs) { \ + zig_i##w rem = lhs % rhs; \ + return rem + (((lhs ^ rhs) & rem) < zig_as_i##w(0) ? rhs : zig_as_i##w(0)); \ + } \ +\ + static inline zig_u##w zig_shlw_u##w(zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \ + return zig_wrap_u##w(zig_shl_u##w(lhs, rhs), bits); \ + } \ +\ + static inline zig_i##w zig_shlw_i##w(zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \ + return zig_wrap_i##w((zig_i##w)zig_shl_u##w((zig_u##w)lhs, (zig_u##w)rhs), bits); \ + } \ +\ + static inline zig_u##w zig_addw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + return zig_wrap_u##w(lhs + rhs, bits); \ + } \ +\ + static inline zig_i##w zig_addw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ + return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs + (zig_u##w)rhs), bits); \ + } \ +\ + static inline zig_u##w zig_subw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + return zig_wrap_u##w(lhs - rhs, bits); \ + } \ +\ + static inline zig_i##w zig_subw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ + return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs - (zig_u##w)rhs), bits); \ + } \ +\ + static inline zig_u##w zig_mulw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + return zig_wrap_u##w(lhs * rhs, bits); \ + } \ +\ + static inline zig_i##w zig_mulw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ + return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs * (zig_u##w)rhs), bits); \ + } +zig_int_helpers(8) +zig_int_helpers(16) +zig_int_helpers(32) +zig_int_helpers(64) + +static inline bool zig_addo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gnuc) + zig_u32 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u32(full_res, bits); + return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits); +#else + *res = zig_addw_u32(lhs, rhs, bits); + return *res < lhs; +#endif +} + +static inline void zig_vaddo_u32(zig_u8 *ov, zig_u32 *res, int n, + const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_u32(&res[i], lhs[i], rhs[i], bits); +} + +zig_extern zig_i32 __addosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); +static inline bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gnuc) + zig_i32 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i32 full_res = __addosi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i32(full_res, bits); + return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); +} + +static inline void zig_vaddo_i32(zig_u8 *ov, zig_i32 *res, int n, + const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_i32(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gnuc) + zig_u64 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u64(full_res, bits); + return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits); +#else + *res = zig_addw_u64(lhs, rhs, bits); + return *res < lhs; +#endif +} + +static inline void zig_vaddo_u64(zig_u8 *ov, zig_u64 *res, int n, + const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_u64(&res[i], lhs[i], rhs[i], bits); +} + +zig_extern zig_i64 __addodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); +static inline bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gnuc) + zig_i64 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i64 full_res = __addodi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i64(full_res, bits); + return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); +} + +static inline void zig_vaddo_i64(zig_u8 *ov, zig_i64 *res, int n, + const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_i64(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gnuc) + zig_u8 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u8(full_res, bits); + return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits); +#else + zig_u32 full_res; + bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits); + *res = (zig_u8)full_res; + return overflow; +#endif +} + +static inline void zig_vaddo_u8(zig_u8 *ov, zig_u8 *res, int n, + const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_u8(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gnuc) + zig_i8 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i8(full_res, bits); + return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits); +#else + zig_i32 full_res; + bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits); + *res = (zig_i8)full_res; + return overflow; +#endif +} + +static inline void zig_vaddo_i8(zig_u8 *ov, zig_i8 *res, int n, + const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_i8(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gnuc) + zig_u16 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u16(full_res, bits); + return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits); +#else + zig_u32 full_res; + bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits); + *res = (zig_u16)full_res; + return overflow; +#endif +} + +static inline void zig_vaddo_u16(zig_u8 *ov, zig_u16 *res, int n, + const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_u16(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gnuc) + zig_i16 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i16(full_res, bits); + return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits); +#else + zig_i32 full_res; + bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits); + *res = (zig_i16)full_res; + return overflow; +#endif +} + +static inline void zig_vaddo_i16(zig_u8 *ov, zig_i16 *res, int n, + const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_i16(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gnuc) + zig_u32 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u32(full_res, bits); + return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits); +#else + *res = zig_subw_u32(lhs, rhs, bits); + return *res > lhs; +#endif +} + +static inline void zig_vsubo_u32(zig_u8 *ov, zig_u32 *res, int n, + const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_u32(&res[i], lhs[i], rhs[i], bits); +} + +zig_extern zig_i32 __subosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); +static inline bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gnuc) + zig_i32 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i32 full_res = __subosi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i32(full_res, bits); + return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); +} + +static inline void zig_vsubo_i32(zig_u8 *ov, zig_i32 *res, int n, + const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_i32(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gnuc) + zig_u64 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u64(full_res, bits); + return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits); +#else + *res = zig_subw_u64(lhs, rhs, bits); + return *res > lhs; +#endif +} + +static inline void zig_vsubo_u64(zig_u8 *ov, zig_u64 *res, int n, + const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_u64(&res[i], lhs[i], rhs[i], bits); +} + +zig_extern zig_i64 __subodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); +static inline bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gnuc) + zig_i64 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i64 full_res = __subodi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i64(full_res, bits); + return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); +} + +static inline void zig_vsubo_i64(zig_u8 *ov, zig_i64 *res, int n, + const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_i64(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gnuc) + zig_u8 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u8(full_res, bits); + return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits); +#else + zig_u32 full_res; + bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits); + *res = (zig_u8)full_res; + return overflow; +#endif +} + +static inline void zig_vsubo_u8(zig_u8 *ov, zig_u8 *res, int n, + const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_u8(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gnuc) + zig_i8 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i8(full_res, bits); + return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits); +#else + zig_i32 full_res; + bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits); + *res = (zig_i8)full_res; + return overflow; +#endif +} + +static inline void zig_vsubo_i8(zig_u8 *ov, zig_i8 *res, int n, + const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_i8(&res[i], lhs[i], rhs[i], bits); +} + + +static inline bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gnuc) + zig_u16 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u16(full_res, bits); + return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits); +#else + zig_u32 full_res; + bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits); + *res = (zig_u16)full_res; + return overflow; +#endif +} + +static inline void zig_vsubo_u16(zig_u8 *ov, zig_u16 *res, int n, + const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_u16(&res[i], lhs[i], rhs[i], bits); +} + + +static inline bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gnuc) + zig_i16 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i16(full_res, bits); + return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits); +#else + zig_i32 full_res; + bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits); + *res = (zig_i16)full_res; + return overflow; +#endif +} + +static inline void zig_vsubo_i16(zig_u8 *ov, zig_i16 *res, int n, + const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_i16(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gnuc) + zig_u32 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u32(full_res, bits); + return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits); +#else + *res = zig_mulw_u32(lhs, rhs, bits); + return rhs != zig_as_u32(0) && lhs > zig_maxInt(u32, bits) / rhs; +#endif +} + +static inline void zig_vmulo_u32(zig_u8 *ov, zig_u32 *res, int n, + const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u32(&res[i], lhs[i], rhs[i], bits); +} + +zig_extern zig_i32 __mulosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); +static inline bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gnuc) + zig_i32 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i32 full_res = __mulosi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i32(full_res, bits); + return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); +} + +static inline void zig_vmulo_i32(zig_u8 *ov, zig_i32 *res, int n, + const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i32(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gnuc) + zig_u64 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u64(full_res, bits); + return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits); +#else + *res = zig_mulw_u64(lhs, rhs, bits); + return rhs != zig_as_u64(0) && lhs > zig_maxInt(u64, bits) / rhs; +#endif +} + +static inline void zig_vmulo_u64(zig_u8 *ov, zig_u64 *res, int n, + const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u64(&res[i], lhs[i], rhs[i], bits); +} + +zig_extern zig_i64 __mulodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); +static inline bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gnuc) + zig_i64 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i64 full_res = __mulodi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i64(full_res, bits); + return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); +} + +static inline void zig_vmulo_i64(zig_u8 *ov, zig_i64 *res, int n, + const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i64(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gnuc) + zig_u8 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u8(full_res, bits); + return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits); +#else + zig_u32 full_res; + bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits); + *res = (zig_u8)full_res; + return overflow; +#endif +} + +static inline void zig_vmulo_u8(zig_u8 *ov, zig_u8 *res, int n, + const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u8(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gnuc) + zig_i8 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i8(full_res, bits); + return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits); +#else + zig_i32 full_res; + bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits); + *res = (zig_i8)full_res; + return overflow; +#endif +} + +static inline void zig_vmulo_i8(zig_u8 *ov, zig_i8 *res, int n, + const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i8(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gnuc) + zig_u16 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u16(full_res, bits); + return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits); +#else + zig_u32 full_res; + bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits); + *res = (zig_u16)full_res; + return overflow; +#endif +} + +static inline void zig_vmulo_u16(zig_u8 *ov, zig_u16 *res, int n, + const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u16(&res[i], lhs[i], rhs[i], bits); +} + +static inline bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gnuc) + zig_i16 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i16(full_res, bits); + return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits); +#else + zig_i32 full_res; + bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits); + *res = (zig_i16)full_res; + return overflow; +#endif +} + +static inline void zig_vmulo_i16(zig_u8 *ov, zig_i16 *res, int n, + const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i16(&res[i], lhs[i], rhs[i], bits); +} + +#define zig_int_builtins(w) \ + static inline bool zig_shlo_u##w(zig_u##w *res, zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \ + *res = zig_shlw_u##w(lhs, rhs, bits); \ + return lhs > zig_maxInt(u##w, bits) >> rhs; \ + } \ +\ + static inline bool zig_shlo_i##w(zig_i##w *res, zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \ + *res = zig_shlw_i##w(lhs, rhs, bits); \ + zig_i##w mask = (zig_i##w)(zig_maxInt_u##w << (bits - rhs - 1)); \ + return (lhs & mask) != zig_as_i##w(0) && (lhs & mask) != mask; \ + } \ +\ + static inline zig_u##w zig_shls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + zig_u##w res; \ + if (rhs >= bits) return lhs != zig_as_u##w(0) ? zig_maxInt(u##w, bits) : lhs; \ + return zig_shlo_u##w(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u##w, bits) : res; \ + } \ +\ + static inline zig_i##w zig_shls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ + zig_i##w res; \ + if ((zig_u##w)rhs < (zig_u##w)bits && !zig_shlo_i##w(&res, lhs, rhs, bits)) return res; \ + return lhs < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \ + } \ +\ + static inline zig_u##w zig_adds_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + zig_u##w res; \ + return zig_addo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \ + } \ +\ + static inline zig_i##w zig_adds_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ + zig_i##w res; \ + if (!zig_addo_i##w(&res, lhs, rhs, bits)) return res; \ + return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \ + } \ +\ + static inline zig_u##w zig_subs_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + zig_u##w res; \ + return zig_subo_u##w(&res, lhs, rhs, bits) ? zig_minInt(u##w, bits) : res; \ + } \ +\ + static inline zig_i##w zig_subs_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ + zig_i##w res; \ + if (!zig_subo_i##w(&res, lhs, rhs, bits)) return res; \ + return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \ + } \ +\ + static inline zig_u##w zig_muls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \ + zig_u##w res; \ + return zig_mulo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \ + } \ +\ + static inline zig_i##w zig_muls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \ + zig_i##w res; \ + if (!zig_mulo_i##w(&res, lhs, rhs, bits)) return res; \ + return (lhs ^ rhs) < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \ + } +zig_int_builtins(8) +zig_int_builtins(16) +zig_int_builtins(32) +zig_int_builtins(64) + +#define zig_builtin8(name, val) __builtin_##name(val) +typedef zig_c_uint zig_Builtin8; + +#define zig_builtin16(name, val) __builtin_##name(val) +typedef zig_c_uint zig_Builtin16; + +#if INT_MIN <= INT32_MIN +#define zig_builtin32(name, val) __builtin_##name(val) +typedef zig_c_uint zig_Builtin32; +#elif LONG_MIN <= INT32_MIN +#define zig_builtin32(name, val) __builtin_##name##l(val) +typedef zig_c_ulong zig_Builtin32; +#endif + +#if INT_MIN <= INT64_MIN +#define zig_builtin64(name, val) __builtin_##name(val) +typedef zig_c_uint zig_Builtin64; +#elif LONG_MIN <= INT64_MIN +#define zig_builtin64(name, val) __builtin_##name##l(val) +typedef zig_c_ulong zig_Builtin64; +#elif LLONG_MIN <= INT64_MIN +#define zig_builtin64(name, val) __builtin_##name##ll(val) +typedef zig_c_ulonglong zig_Builtin64; +#endif + +static inline zig_u8 zig_byte_swap_u8(zig_u8 val, zig_u8 bits) { + return zig_wrap_u8(val >> (8 - bits), bits); +} + +static inline zig_i8 zig_byte_swap_i8(zig_i8 val, zig_u8 bits) { + return zig_wrap_i8((zig_i8)zig_byte_swap_u8((zig_u8)val, bits), bits); +} + +static inline zig_u16 zig_byte_swap_u16(zig_u16 val, zig_u8 bits) { + zig_u16 full_res; +#if zig_has_builtin(bswap16) || defined(zig_gnuc) + full_res = __builtin_bswap16(val); +#else + full_res = (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 0), 8) << 8 | + (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 8), 8) >> 0; +#endif + return zig_wrap_u16(full_res >> (16 - bits), bits); +} + +static inline zig_i16 zig_byte_swap_i16(zig_i16 val, zig_u8 bits) { + return zig_wrap_i16((zig_i16)zig_byte_swap_u16((zig_u16)val, bits), bits); +} + +static inline zig_u32 zig_byte_swap_u32(zig_u32 val, zig_u8 bits) { + zig_u32 full_res; +#if zig_has_builtin(bswap32) || defined(zig_gnuc) + full_res = __builtin_bswap32(val); +#else + full_res = (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 0), 16) << 16 | + (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 16), 16) >> 0; +#endif + return zig_wrap_u32(full_res >> (32 - bits), bits); +} + +static inline zig_i32 zig_byte_swap_i32(zig_i32 val, zig_u8 bits) { + return zig_wrap_i32((zig_i32)zig_byte_swap_u32((zig_u32)val, bits), bits); +} + +static inline zig_u64 zig_byte_swap_u64(zig_u64 val, zig_u8 bits) { + zig_u64 full_res; +#if zig_has_builtin(bswap64) || defined(zig_gnuc) + full_res = __builtin_bswap64(val); +#else + full_res = (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 0), 32) << 32 | + (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 32), 32) >> 0; +#endif + return zig_wrap_u64(full_res >> (64 - bits), bits); +} + +static inline zig_i64 zig_byte_swap_i64(zig_i64 val, zig_u8 bits) { + return zig_wrap_i64((zig_i64)zig_byte_swap_u64((zig_u64)val, bits), bits); +} + +static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) { + zig_u8 full_res; +#if zig_has_builtin(bitreverse8) + full_res = __builtin_bitreverse8(val); +#else + static zig_u8 const lut[0x10] = { + 0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe, + 0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf + }; + full_res = lut[val >> 0 & 0xF] << 4 | lut[val >> 4 & 0xF] << 0; +#endif + return zig_wrap_u8(full_res >> (8 - bits), bits); +} + +static inline zig_i8 zig_bit_reverse_i8(zig_i8 val, zig_u8 bits) { + return zig_wrap_i8((zig_i8)zig_bit_reverse_u8((zig_u8)val, bits), bits); +} + +static inline zig_u16 zig_bit_reverse_u16(zig_u16 val, zig_u8 bits) { + zig_u16 full_res; +#if zig_has_builtin(bitreverse16) + full_res = __builtin_bitreverse16(val); +#else + full_res = (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 0), 8) << 8 | + (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 8), 8) >> 0; +#endif + return zig_wrap_u16(full_res >> (16 - bits), bits); +} + +static inline zig_i16 zig_bit_reverse_i16(zig_i16 val, zig_u8 bits) { + return zig_wrap_i16((zig_i16)zig_bit_reverse_u16((zig_u16)val, bits), bits); +} + +static inline zig_u32 zig_bit_reverse_u32(zig_u32 val, zig_u8 bits) { + zig_u32 full_res; +#if zig_has_builtin(bitreverse32) + full_res = __builtin_bitreverse32(val); +#else + full_res = (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 0), 16) << 16 | + (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 16), 16) >> 0; +#endif + return zig_wrap_u32(full_res >> (32 - bits), bits); +} + +static inline zig_i32 zig_bit_reverse_i32(zig_i32 val, zig_u8 bits) { + return zig_wrap_i32((zig_i32)zig_bit_reverse_u32((zig_u32)val, bits), bits); +} + +static inline zig_u64 zig_bit_reverse_u64(zig_u64 val, zig_u8 bits) { + zig_u64 full_res; +#if zig_has_builtin(bitreverse64) + full_res = __builtin_bitreverse64(val); +#else + full_res = (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 0), 32) << 32 | + (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 32), 32) >> 0; +#endif + return zig_wrap_u64(full_res >> (64 - bits), bits); +} + +static inline zig_i64 zig_bit_reverse_i64(zig_i64 val, zig_u8 bits) { + return zig_wrap_i64((zig_i64)zig_bit_reverse_u64((zig_u64)val, bits), bits); +} + +#define zig_builtin_popcount_common(w) \ + static inline zig_u8 zig_popcount_i##w(zig_i##w val, zig_u8 bits) { \ + return zig_popcount_u##w((zig_u##w)val, bits); \ + } +#if zig_has_builtin(popcount) || defined(zig_gnuc) +#define zig_builtin_popcount(w) \ + static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \ + (void)bits; \ + return zig_builtin##w(popcount, val); \ + } \ +\ + zig_builtin_popcount_common(w) +#else +#define zig_builtin_popcount(w) \ + static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \ + (void)bits; \ + zig_u##w temp = val - ((val >> 1) & (zig_maxInt_u##w / 3)); \ + temp = (temp & (zig_maxInt_u##w / 5)) + ((temp >> 2) & (zig_maxInt_u##w / 5)); \ + temp = (temp + (temp >> 4)) & (zig_maxInt_u##w / 17); \ + return temp * (zig_maxInt_u##w / 255) >> (w - 8); \ + } \ +\ + zig_builtin_popcount_common(w) +#endif +zig_builtin_popcount(8) +zig_builtin_popcount(16) +zig_builtin_popcount(32) +zig_builtin_popcount(64) + +#define zig_builtin_ctz_common(w) \ + static inline zig_u8 zig_ctz_i##w(zig_i##w val, zig_u8 bits) { \ + return zig_ctz_u##w((zig_u##w)val, bits); \ + } +#if zig_has_builtin(ctz) || defined(zig_gnuc) +#define zig_builtin_ctz(w) \ + static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \ + if (val == 0) return bits; \ + return zig_builtin##w(ctz, val); \ + } \ +\ + zig_builtin_ctz_common(w) +#else +#define zig_builtin_ctz(w) \ + static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \ + return zig_popcount_u##w(zig_not_u##w(val, bits) & zig_subw_u##w(val, 1, bits), bits); \ + } \ +\ + zig_builtin_ctz_common(w) +#endif +zig_builtin_ctz(8) +zig_builtin_ctz(16) +zig_builtin_ctz(32) +zig_builtin_ctz(64) + +#define zig_builtin_clz_common(w) \ + static inline zig_u8 zig_clz_i##w(zig_i##w val, zig_u8 bits) { \ + return zig_clz_u##w((zig_u##w)val, bits); \ + } +#if zig_has_builtin(clz) || defined(zig_gnuc) +#define zig_builtin_clz(w) \ + static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \ + if (val == 0) return bits; \ + return zig_builtin##w(clz, val) - (zig_bitSizeOf(zig_Builtin##w) - bits); \ + } \ +\ + zig_builtin_clz_common(w) +#else +#define zig_builtin_clz(w) \ + static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \ + return zig_ctz_u##w(zig_bit_reverse_u##w(val, bits), bits); \ + } \ +\ + zig_builtin_clz_common(w) +#endif +zig_builtin_clz(8) +zig_builtin_clz(16) +zig_builtin_clz(32) +zig_builtin_clz(64) + +/* ======================== 128-bit Integer Routines ======================== */ + +#if !defined(zig_has_int128) +# if defined(__SIZEOF_INT128__) +# define zig_has_int128 1 +# else +# define zig_has_int128 0 +# endif +#endif + +#if zig_has_int128 + +typedef unsigned __int128 zig_u128; +typedef signed __int128 zig_i128; + +#define zig_as_u128(hi, lo) ((zig_u128)(hi)<<64|(lo)) +#define zig_as_i128(hi, lo) ((zig_i128)zig_as_u128(hi, lo)) +#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo) +#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo) +#define zig_hi_u128(val) ((zig_u64)((val) >> 64)) +#define zig_lo_u128(val) ((zig_u64)((val) >> 0)) +#define zig_hi_i128(val) ((zig_i64)((val) >> 64)) +#define zig_lo_i128(val) ((zig_u64)((val) >> 0)) +#define zig_bitcast_u128(val) ((zig_u128)(val)) +#define zig_bitcast_i128(val) ((zig_i128)(val)) +#define zig_cmp_int128(Type) \ + static inline zig_i32 zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return (lhs > rhs) - (lhs < rhs); \ + } +#define zig_bit_int128(Type, operation, operator) \ + static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return lhs operator rhs; \ + } + +#else /* zig_has_int128 */ + +#if __LITTLE_ENDIAN__ || _MSC_VER +typedef struct { zig_align(16) zig_u64 lo; zig_u64 hi; } zig_u128; +typedef struct { zig_align(16) zig_u64 lo; zig_i64 hi; } zig_i128; +#else +typedef struct { zig_align(16) zig_u64 hi; zig_u64 lo; } zig_u128; +typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128; +#endif + +#define zig_as_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) }) +#define zig_as_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) }) + +#if _MSC_VER +#define zig_as_constant_u128(hi, lo) { .h##i = (hi), .l##o = (lo) } +#define zig_as_constant_i128(hi, lo) { .h##i = (hi), .l##o = (lo) } +#else +#define zig_as_constant_u128(hi, lo) zig_as_u128(hi, lo) +#define zig_as_constant_i128(hi, lo) zig_as_i128(hi, lo) +#endif +#define zig_hi_u128(val) ((val).hi) +#define zig_lo_u128(val) ((val).lo) +#define zig_hi_i128(val) ((val).hi) +#define zig_lo_i128(val) ((val).lo) +#define zig_bitcast_u128(val) zig_as_u128((zig_u64)(val).hi, (val).lo) +#define zig_bitcast_i128(val) zig_as_i128((zig_i64)(val).hi, (val).lo) +#define zig_cmp_int128(Type) \ + static inline zig_i32 zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return (lhs.hi == rhs.hi) \ + ? (lhs.lo > rhs.lo) - (lhs.lo < rhs.lo) \ + : (lhs.hi > rhs.hi) - (lhs.hi < rhs.hi); \ + } +#define zig_bit_int128(Type, operation, operator) \ + static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return (zig_##Type){ .hi = lhs.hi operator rhs.hi, .lo = lhs.lo operator rhs.lo }; \ + } + +#endif /* zig_has_int128 */ + +#define zig_minInt_u128 zig_as_u128(zig_minInt_u64, zig_minInt_u64) +#define zig_maxInt_u128 zig_as_u128(zig_maxInt_u64, zig_maxInt_u64) +#define zig_minInt_i128 zig_as_i128(zig_minInt_i64, zig_minInt_u64) +#define zig_maxInt_i128 zig_as_i128(zig_maxInt_i64, zig_maxInt_u64) + +zig_cmp_int128(u128) +zig_cmp_int128(i128) + +zig_bit_int128(u128, and, &) +zig_bit_int128(i128, and, &) + +zig_bit_int128(u128, or, |) +zig_bit_int128(i128, or, |) + +zig_bit_int128(u128, xor, ^) +zig_bit_int128(i128, xor, ^) + +static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs); + +#if zig_has_int128 + +static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) { + return val ^ zig_maxInt(u128, bits); +} + +static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) { + (void)bits; + return ~val; +} + +static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) { + return lhs >> rhs; +} + +static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) { + return lhs << rhs; +} + +static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) { + return lhs << rhs; +} + +static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) { + return lhs + rhs; +} + +static inline zig_i128 zig_add_i128(zig_i128 lhs, zig_i128 rhs) { + return lhs + rhs; +} + +static inline zig_u128 zig_sub_u128(zig_u128 lhs, zig_u128 rhs) { + return lhs - rhs; +} + +static inline zig_i128 zig_sub_i128(zig_i128 lhs, zig_i128 rhs) { + return lhs - rhs; +} + +static inline zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) { + return lhs * rhs; +} + +static inline zig_i128 zig_mul_i128(zig_i128 lhs, zig_i128 rhs) { + return lhs * rhs; +} + +static inline zig_u128 zig_div_trunc_u128(zig_u128 lhs, zig_u128 rhs) { + return lhs / rhs; +} + +static inline zig_i128 zig_div_trunc_i128(zig_i128 lhs, zig_i128 rhs) { + return lhs / rhs; +} + +static inline zig_u128 zig_rem_u128(zig_u128 lhs, zig_u128 rhs) { + return lhs % rhs; +} + +static inline zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) { + return lhs % rhs; +} + +static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) { + return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_as_i128(0, 0)); +} + +static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) { + zig_i128 rem = zig_rem_i128(lhs, rhs); + return rem + (((lhs ^ rhs) & rem) < zig_as_i128(0, 0) ? rhs : zig_as_i128(0, 0)); +} + +#else /* zig_has_int128 */ + +static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) { + return (zig_u128){ .hi = zig_not_u64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) }; +} + +static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) { + return (zig_i128){ .hi = zig_not_i64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) }; +} + +static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) { + if (rhs == zig_as_u8(0)) return lhs; + if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - zig_as_u8(64)) }; + return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (zig_as_u8(64) - rhs) | lhs.lo >> rhs }; +} + +static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) { + if (rhs == zig_as_u8(0)) return lhs; + if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.lo << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 }; + return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs }; +} + +static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) { + if (rhs == zig_as_u8(0)) return lhs; + if (rhs >= zig_as_u8(64)) return (zig_i128){ .hi = lhs.lo << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 }; + return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs }; +} + +static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) { + zig_u128 res; + res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, 64); + return res; +} + +static inline zig_i128 zig_add_i128(zig_i128 lhs, zig_i128 rhs) { + zig_i128 res; + res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, 64); + return res; +} + +static inline zig_u128 zig_sub_u128(zig_u128 lhs, zig_u128 rhs) { + zig_u128 res; + res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, 64); + return res; +} + +static inline zig_i128 zig_sub_i128(zig_i128 lhs, zig_i128 rhs) { + zig_i128 res; + res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, 64); + return res; +} + +zig_extern zig_i128 __multi3(zig_i128 lhs, zig_i128 rhs); +static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) { + return zig_bitcast_u128(__multi3(zig_bitcast_i128(lhs), zig_bitcast_i128(rhs))); +} + +static zig_i128 zig_mul_i128(zig_i128 lhs, zig_i128 rhs) { + return __multi3(lhs, rhs); +} + +zig_extern zig_u128 __udivti3(zig_u128 lhs, zig_u128 rhs); +static zig_u128 zig_div_trunc_u128(zig_u128 lhs, zig_u128 rhs) { + return __udivti3(lhs, rhs); +}; + +zig_extern zig_i128 __divti3(zig_i128 lhs, zig_i128 rhs); +static zig_i128 zig_div_trunc_i128(zig_i128 lhs, zig_i128 rhs) { + return __divti3(lhs, rhs); +}; + +zig_extern zig_u128 __umodti3(zig_u128 lhs, zig_u128 rhs); +static zig_u128 zig_rem_u128(zig_u128 lhs, zig_u128 rhs) { + return __umodti3(lhs, rhs); +} + +zig_extern zig_i128 __modti3(zig_i128 lhs, zig_i128 rhs); +static zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) { + return __modti3(lhs, rhs); +} + +static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) { + zig_i128 rem = zig_rem_i128(lhs, rhs); + return zig_add_i128(rem, (((lhs.hi ^ rhs.hi) & rem.hi) < zig_as_i64(0) ? rhs : zig_as_i128(0, 0))); +} + +static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) { + return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), zig_as_i128(0, zig_cmp_i128(zig_and_i128(zig_xor_i128(lhs, rhs), zig_rem_i128(lhs, rhs)), zig_as_i128(0, 0)) < zig_as_i32(0))); +} + +#endif /* zig_has_int128 */ + +#define zig_div_floor_u128 zig_div_trunc_u128 +#define zig_mod_u128 zig_rem_u128 + +static inline zig_u128 zig_nand_u128(zig_u128 lhs, zig_u128 rhs) { + return zig_not_u128(zig_and_u128(lhs, rhs), 128); +} + +static inline zig_u128 zig_min_u128(zig_u128 lhs, zig_u128 rhs) { + return zig_cmp_u128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs; +} + +static inline zig_i128 zig_min_i128(zig_i128 lhs, zig_i128 rhs) { + return zig_cmp_i128(lhs, rhs) < zig_as_i32(0) ? lhs : rhs; +} + +static inline zig_u128 zig_max_u128(zig_u128 lhs, zig_u128 rhs) { + return zig_cmp_u128(lhs, rhs) > zig_as_i32(0) ? lhs : rhs; +} + +static inline zig_i128 zig_max_i128(zig_i128 lhs, zig_i128 rhs) { + return zig_cmp_i128(lhs, rhs) > zig_as_i32(0) ? lhs : rhs; +} + +static inline zig_i128 zig_shr_i128(zig_i128 lhs, zig_u8 rhs) { + zig_i128 sign_mask = zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_sub_i128(zig_as_i128(0, 0), zig_as_i128(0, 1)) : zig_as_i128(0, 0); + return zig_xor_i128(zig_bitcast_i128(zig_shr_u128(zig_bitcast_u128(zig_xor_i128(lhs, sign_mask)), rhs)), sign_mask); +} + +static inline zig_u128 zig_wrap_u128(zig_u128 val, zig_u8 bits) { + return zig_and_u128(val, zig_maxInt(u128, bits)); +} + +static inline zig_i128 zig_wrap_i128(zig_i128 val, zig_u8 bits) { + return zig_as_i128(zig_wrap_i64(zig_hi_i128(val), bits - zig_as_u8(64)), zig_lo_i128(val)); +} + +static inline zig_u128 zig_shlw_u128(zig_u128 lhs, zig_u8 rhs, zig_u8 bits) { + return zig_wrap_u128(zig_shl_u128(lhs, rhs), bits); +} + +static inline zig_i128 zig_shlw_i128(zig_i128 lhs, zig_u8 rhs, zig_u8 bits) { + return zig_wrap_i128(zig_bitcast_i128(zig_shl_u128(zig_bitcast_u128(lhs), rhs)), bits); +} + +static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + return zig_wrap_u128(zig_add_u128(lhs, rhs), bits); +} + +static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + return zig_wrap_i128(zig_bitcast_i128(zig_add_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); +} + +static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + return zig_wrap_u128(zig_sub_u128(lhs, rhs), bits); +} + +static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + return zig_wrap_i128(zig_bitcast_i128(zig_sub_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); +} + +static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + return zig_wrap_u128(zig_mul_u128(lhs, rhs), bits); +} + +static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + return zig_wrap_i128(zig_bitcast_i128(zig_mul_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits); +} + +#if zig_has_int128 + +static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) + zig_u128 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u128(full_res, bits); + return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits); +#else + *res = zig_addw_u128(lhs, rhs, bits); + return *res < lhs; +#endif +} + +zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); +static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +#if zig_has_builtin(add_overflow) + zig_i128 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i128(full_res, bits); + return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits); +} + +static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) + zig_u128 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u128(full_res, bits); + return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits); +#else + *res = zig_subw_u128(lhs, rhs, bits); + return *res > lhs; +#endif +} + +zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); +static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +#if zig_has_builtin(sub_overflow) + zig_i128 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i128(full_res, bits); + return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits); +} + +static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) + zig_u128 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u128(full_res, bits); + return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits); +#else + *res = zig_mulw_u128(lhs, rhs, bits); + return rhs != zig_as_u128(0, 0) && lhs > zig_maxInt(u128, bits) / rhs; +#endif +} + +zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); +static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +#if zig_has_builtin(mul_overflow) + zig_i128 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); +#else + zig_c_int overflow_int; + zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i128(full_res, bits); + return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits); +} + +#else /* zig_has_int128 */ + +static inline bool zig_overflow_u128(bool overflow, zig_u128 full_res, zig_u8 bits) { + return overflow || + zig_cmp_u128(full_res, zig_minInt(u128, bits)) < zig_as_i32(0) || + zig_cmp_u128(full_res, zig_maxInt(u128, bits)) > zig_as_i32(0); +} + +static inline bool zig_overflow_i128(bool overflow, zig_i128 full_res, zig_u8 bits) { + return overflow || + zig_cmp_i128(full_res, zig_minInt(i128, bits)) < zig_as_i32(0) || + zig_cmp_i128(full_res, zig_maxInt(i128, bits)) > zig_as_i32(0); +} + +static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + zig_u128 full_res; + bool overflow = + zig_addo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) | + zig_addo_u64(&full_res.hi, full_res.hi, zig_addo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64); + *res = zig_wrap_u128(full_res, bits); + return zig_overflow_u128(overflow, full_res, bits); +} + +zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); +static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_c_int overflow_int; + zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int); + *res = zig_wrap_i128(full_res, bits); + return zig_overflow_i128(overflow_int, full_res, bits); +} + +static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + zig_u128 full_res; + bool overflow = + zig_subo_u64(&full_res.hi, lhs.hi, rhs.hi, 64) | + zig_subo_u64(&full_res.hi, full_res.hi, zig_subo_u64(&full_res.lo, lhs.lo, rhs.lo, 64), 64); + *res = zig_wrap_u128(full_res, bits); + return zig_overflow_u128(overflow, full_res, bits); +} + +zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); +static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_c_int overflow_int; + zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int); + *res = zig_wrap_i128(full_res, bits); + return zig_overflow_i128(overflow_int, full_res, bits); +} + +static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + *res = zig_mulw_u128(lhs, rhs, bits); + return zig_cmp_u128(*res, zig_as_u128(0, 0)) != zig_as_i32(0) && + zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0); +} + +zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); +static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_c_int overflow_int; + zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int); + *res = zig_wrap_i128(full_res, bits); + return zig_overflow_i128(overflow_int, full_res, bits); +} + +#endif /* zig_has_int128 */ + +static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) { + *res = zig_shlw_u128(lhs, rhs, bits); + return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt(u128, bits), rhs)) > zig_as_i32(0); +} + +static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) { + *res = zig_shlw_i128(lhs, rhs, bits); + zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1))); + return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != zig_as_i32(0) && + zig_cmp_i128(zig_and_i128(lhs, mask), mask) != zig_as_i32(0); +} + +static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + zig_u128 res; + if (zig_cmp_u128(rhs, zig_as_u128(0, bits)) >= zig_as_i32(0)) + return zig_cmp_u128(lhs, zig_as_u128(0, 0)) != zig_as_i32(0) ? zig_maxInt(u128, bits) : lhs; + +#if zig_has_int128 + return zig_shlo_u128(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u128, bits) : res; +#else + return zig_shlo_u128(&res, lhs, (zig_u8)rhs.lo, bits) ? zig_maxInt(u128, bits) : res; +#endif +} + +static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_i128 res; + if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_as_u128(0, bits)) < zig_as_i32(0) && !zig_shlo_i128(&res, lhs, zig_lo_i128(rhs), bits)) return res; + return zig_cmp_i128(lhs, zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); +} + +static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + zig_u128 res; + return zig_addo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res; +} + +static inline zig_i128 zig_adds_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_i128 res; + if (!zig_addo_i128(&res, lhs, rhs, bits)) return res; + return zig_cmp_i128(res, zig_as_i128(0, 0)) >= zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); +} + +static inline zig_u128 zig_subs_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + zig_u128 res; + return zig_subo_u128(&res, lhs, rhs, bits) ? zig_minInt(u128, bits) : res; +} + +static inline zig_i128 zig_subs_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_i128 res; + if (!zig_subo_i128(&res, lhs, rhs, bits)) return res; + return zig_cmp_i128(res, zig_as_i128(0, 0)) >= zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); +} + +static inline zig_u128 zig_muls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { + zig_u128 res; + return zig_mulo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res; +} + +static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { + zig_i128 res; + if (!zig_mulo_i128(&res, lhs, rhs, bits)) return res; + return zig_cmp_i128(zig_xor_i128(lhs, rhs), zig_as_i128(0, 0)) < zig_as_i32(0) ? zig_minInt(i128, bits) : zig_maxInt(i128, bits); +} + +static inline zig_u8 zig_clz_u128(zig_u128 val, zig_u8 bits) { + if (bits <= zig_as_u8(64)) return zig_clz_u64(zig_lo_u128(val), bits); + if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - zig_as_u8(64)); + return zig_clz_u64(zig_lo_u128(val), zig_as_u8(64)) + (bits - zig_as_u8(64)); +} + +static inline zig_u8 zig_clz_i128(zig_i128 val, zig_u8 bits) { + return zig_clz_u128(zig_bitcast_u128(val), bits); +} + +static inline zig_u8 zig_ctz_u128(zig_u128 val, zig_u8 bits) { + if (zig_lo_u128(val) != 0) return zig_ctz_u64(zig_lo_u128(val), zig_as_u8(64)); + return zig_ctz_u64(zig_hi_u128(val), bits - zig_as_u8(64)) + zig_as_u8(64); +} + +static inline zig_u8 zig_ctz_i128(zig_i128 val, zig_u8 bits) { + return zig_ctz_u128(zig_bitcast_u128(val), bits); +} + +static inline zig_u8 zig_popcount_u128(zig_u128 val, zig_u8 bits) { + return zig_popcount_u64(zig_hi_u128(val), bits - zig_as_u8(64)) + + zig_popcount_u64(zig_lo_u128(val), zig_as_u8(64)); +} + +static inline zig_u8 zig_popcount_i128(zig_i128 val, zig_u8 bits) { + return zig_popcount_u128(zig_bitcast_u128(val), bits); +} + +static inline zig_u128 zig_byte_swap_u128(zig_u128 val, zig_u8 bits) { + zig_u128 full_res; +#if zig_has_builtin(bswap128) + full_res = __builtin_bswap128(val); +#else + full_res = zig_as_u128(zig_byte_swap_u64(zig_lo_u128(val), zig_as_u8(64)), + zig_byte_swap_u64(zig_hi_u128(val), zig_as_u8(64))); +#endif + return zig_shr_u128(full_res, zig_as_u8(128) - bits); +} + +static inline zig_i128 zig_byte_swap_i128(zig_i128 val, zig_u8 bits) { + return zig_bitcast_i128(zig_byte_swap_u128(zig_bitcast_u128(val), bits)); +} + +static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, zig_u8 bits) { + return zig_shr_u128(zig_as_u128(zig_bit_reverse_u64(zig_lo_u128(val), zig_as_u8(64)), + zig_bit_reverse_u64(zig_hi_u128(val), zig_as_u8(64))), + zig_as_u8(128) - bits); +} + +static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) { + return zig_bitcast_i128(zig_bit_reverse_u128(zig_bitcast_u128(val), bits)); +} + +/* ========================= Floating Point Support ========================= */ + +#if _MSC_VER +#define zig_msvc_flt_inf ((double)(1e+300 * 1e+300)) +#define zig_msvc_flt_inff ((float)(1e+300 * 1e+300)) +#define zig_msvc_flt_infl ((long double)(1e+300 * 1e+300)) +#define zig_msvc_flt_nan ((double)(zig_msvc_flt_inf * 0.f)) +#define zig_msvc_flt_nanf ((float)(zig_msvc_flt_inf * 0.f)) +#define zig_msvc_flt_nanl ((long double)(zig_msvc_flt_inf * 0.f)) +#define __builtin_nan(str) nan(str) +#define __builtin_nanf(str) nanf(str) +#define __builtin_nanl(str) nanl(str) +#define __builtin_inf() zig_msvc_flt_inf +#define __builtin_inff() zig_msvc_flt_inff +#define __builtin_infl() zig_msvc_flt_infl +#endif + +#if (zig_has_builtin(nan) && zig_has_builtin(nans) && zig_has_builtin(inf)) || defined(zig_gnuc) +#define zig_has_float_builtins 1 +#define zig_as_special_f16(sign, name, arg, repr) sign zig_as_f16(__builtin_##name, )(arg) +#define zig_as_special_f32(sign, name, arg, repr) sign zig_as_f32(__builtin_##name, )(arg) +#define zig_as_special_f64(sign, name, arg, repr) sign zig_as_f64(__builtin_##name, )(arg) +#define zig_as_special_f80(sign, name, arg, repr) sign zig_as_f80(__builtin_##name, )(arg) +#define zig_as_special_f128(sign, name, arg, repr) sign zig_as_f128(__builtin_##name, )(arg) +#define zig_as_special_c_longdouble(sign, name, arg, repr) sign zig_as_c_longdouble(__builtin_##name, )(arg) +#else +#define zig_has_float_builtins 0 +#define zig_as_special_f16(sign, name, arg, repr) zig_float_from_repr_f16(repr) +#define zig_as_special_f32(sign, name, arg, repr) zig_float_from_repr_f32(repr) +#define zig_as_special_f64(sign, name, arg, repr) zig_float_from_repr_f64(repr) +#define zig_as_special_f80(sign, name, arg, repr) zig_float_from_repr_f80(repr) +#define zig_as_special_f128(sign, name, arg, repr) zig_float_from_repr_f128(repr) +#define zig_as_special_c_longdouble(sign, name, arg, repr) zig_float_from_repr_c_longdouble(repr) +#endif + +#define zig_has_f16 1 +#define zig_bitSizeOf_f16 16 +#define zig_libc_name_f16(name) __##name##h +#define zig_as_special_constant_f16(sign, name, arg, repr) zig_as_special_f16(sign, name, arg, repr) +#if FLT_MANT_DIG == 11 +typedef float zig_f16; +#define zig_as_f16(fp, repr) fp##f +#elif DBL_MANT_DIG == 11 +typedef double zig_f16; +#define zig_as_f16(fp, repr) fp +#elif LDBL_MANT_DIG == 11 +#define zig_bitSizeOf_c_longdouble 16 +typedef long double zig_f16; +#define zig_as_f16(fp, repr) fp##l +#elif FLT16_MANT_DIG == 11 && (zig_has_builtin(inff16) || defined(zig_gnuc)) +typedef _Float16 zig_f16; +#define zig_as_f16(fp, repr) fp##f16 +#elif defined(__SIZEOF_FP16__) +typedef __fp16 zig_f16; +#define zig_as_f16(fp, repr) fp##f16 +#else +#undef zig_has_f16 +#define zig_has_f16 0 +#define zig_repr_f16 i16 +typedef zig_i16 zig_f16; +#define zig_as_f16(fp, repr) repr +#undef zig_as_special_f16 +#define zig_as_special_f16(sign, name, arg, repr) repr +#undef zig_as_special_constant_f16 +#define zig_as_special_constant_f16(sign, name, arg, repr) repr +#endif + +#define zig_has_f32 1 +#define zig_bitSizeOf_f32 32 +#define zig_libc_name_f32(name) name##f +#if _MSC_VER +#define zig_as_special_constant_f32(sign, name, arg, repr) sign zig_as_f32(zig_msvc_flt_##name, ) +#else +#define zig_as_special_constant_f32(sign, name, arg, repr) zig_as_special_f32(sign, name, arg, repr) +#endif +#if FLT_MANT_DIG == 24 +typedef float zig_f32; +#define zig_as_f32(fp, repr) fp##f +#elif DBL_MANT_DIG == 24 +typedef double zig_f32; +#define zig_as_f32(fp, repr) fp +#elif LDBL_MANT_DIG == 24 +#define zig_bitSizeOf_c_longdouble 32 +typedef long double zig_f32; +#define zig_as_f32(fp, repr) fp##l +#elif FLT32_MANT_DIG == 24 +typedef _Float32 zig_f32; +#define zig_as_f32(fp, repr) fp##f32 +#else +#undef zig_has_f32 +#define zig_has_f32 0 +#define zig_repr_f32 i32 +typedef zig_i32 zig_f32; +#define zig_as_f32(fp, repr) repr +#undef zig_as_special_f32 +#define zig_as_special_f32(sign, name, arg, repr) repr +#undef zig_as_special_constant_f32 +#define zig_as_special_constant_f32(sign, name, arg, repr) repr +#endif + +#define zig_has_f64 1 +#define zig_bitSizeOf_f64 64 +#define zig_libc_name_f64(name) name +#if _MSC_VER +#ifdef ZIG_TARGET_ABI_MSVC +#define zig_bitSizeOf_c_longdouble 64 +#endif +#define zig_as_special_constant_f64(sign, name, arg, repr) sign zig_as_f64(zig_msvc_flt_##name, ) +#else /* _MSC_VER */ +#define zig_as_special_constant_f64(sign, name, arg, repr) zig_as_special_f64(sign, name, arg, repr) +#endif /* _MSC_VER */ +#if FLT_MANT_DIG == 53 +typedef float zig_f64; +#define zig_as_f64(fp, repr) fp##f +#elif DBL_MANT_DIG == 53 +typedef double zig_f64; +#define zig_as_f64(fp, repr) fp +#elif LDBL_MANT_DIG == 53 +#define zig_bitSizeOf_c_longdouble 64 +typedef long double zig_f64; +#define zig_as_f64(fp, repr) fp##l +#elif FLT64_MANT_DIG == 53 +typedef _Float64 zig_f64; +#define zig_as_f64(fp, repr) fp##f64 +#elif FLT32X_MANT_DIG == 53 +typedef _Float32x zig_f64; +#define zig_as_f64(fp, repr) fp##f32x +#else +#undef zig_has_f64 +#define zig_has_f64 0 +#define zig_repr_f64 i64 +typedef zig_i64 zig_f64; +#define zig_as_f64(fp, repr) repr +#undef zig_as_special_f64 +#define zig_as_special_f64(sign, name, arg, repr) repr +#undef zig_as_special_constant_f64 +#define zig_as_special_constant_f64(sign, name, arg, repr) repr +#endif + +#define zig_has_f80 1 +#define zig_bitSizeOf_f80 80 +#define zig_libc_name_f80(name) __##name##x +#define zig_as_special_constant_f80(sign, name, arg, repr) zig_as_special_f80(sign, name, arg, repr) +#if FLT_MANT_DIG == 64 +typedef float zig_f80; +#define zig_as_f80(fp, repr) fp##f +#elif DBL_MANT_DIG == 64 +typedef double zig_f80; +#define zig_as_f80(fp, repr) fp +#elif LDBL_MANT_DIG == 64 +#define zig_bitSizeOf_c_longdouble 80 +typedef long double zig_f80; +#define zig_as_f80(fp, repr) fp##l +#elif FLT80_MANT_DIG == 64 +typedef _Float80 zig_f80; +#define zig_as_f80(fp, repr) fp##f80 +#elif FLT64X_MANT_DIG == 64 +typedef _Float64x zig_f80; +#define zig_as_f80(fp, repr) fp##f64x +#elif defined(__SIZEOF_FLOAT80__) +typedef __float80 zig_f80; +#define zig_as_f80(fp, repr) fp##l +#else +#undef zig_has_f80 +#define zig_has_f80 0 +#define zig_repr_f80 i128 +typedef zig_i128 zig_f80; +#define zig_as_f80(fp, repr) repr +#undef zig_as_special_f80 +#define zig_as_special_f80(sign, name, arg, repr) repr +#undef zig_as_special_constant_f80 +#define zig_as_special_constant_f80(sign, name, arg, repr) repr +#endif + +#define zig_has_f128 1 +#define zig_bitSizeOf_f128 128 +#define zig_libc_name_f128(name) name##q +#define zig_as_special_constant_f128(sign, name, arg, repr) zig_as_special_f128(sign, name, arg, repr) +#if FLT_MANT_DIG == 113 +typedef float zig_f128; +#define zig_as_f128(fp, repr) fp##f +#elif DBL_MANT_DIG == 113 +typedef double zig_f128; +#define zig_as_f128(fp, repr) fp +#elif LDBL_MANT_DIG == 113 +#define zig_bitSizeOf_c_longdouble 128 +typedef long double zig_f128; +#define zig_as_f128(fp, repr) fp##l +#elif FLT128_MANT_DIG == 113 +typedef _Float128 zig_f128; +#define zig_as_f128(fp, repr) fp##f128 +#elif FLT64X_MANT_DIG == 113 +typedef _Float64x zig_f128; +#define zig_as_f128(fp, repr) fp##f64x +#elif defined(__SIZEOF_FLOAT128__) +typedef __float128 zig_f128; +#define zig_as_f128(fp, repr) fp##q +#undef zig_as_special_f128 +#define zig_as_special_f128(sign, name, arg, repr) sign __builtin_##name##f128(arg) +#else +#undef zig_has_f128 +#define zig_has_f128 0 +#define zig_repr_f128 i128 +typedef zig_i128 zig_f128; +#define zig_as_f128(fp, repr) repr +#undef zig_as_special_f128 +#define zig_as_special_f128(sign, name, arg, repr) repr +#undef zig_as_special_constant_f128 +#define zig_as_special_constant_f128(sign, name, arg, repr) repr +#endif + +#define zig_has_c_longdouble 1 + +#ifdef ZIG_TARGET_ABI_MSVC +#define zig_libc_name_c_longdouble(name) name +#else +#define zig_libc_name_c_longdouble(name) name##l +#endif + +#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) zig_as_special_c_longdouble(sign, name, arg, repr) +#ifdef zig_bitSizeOf_c_longdouble + +#ifdef ZIG_TARGET_ABI_MSVC +typedef double zig_c_longdouble; +#undef zig_bitSizeOf_c_longdouble +#define zig_bitSizeOf_c_longdouble 64 +#define zig_as_c_longdouble(fp, repr) fp +#else +typedef long double zig_c_longdouble; +#define zig_as_c_longdouble(fp, repr) fp##l +#endif + +#else /* zig_bitSizeOf_c_longdouble */ + +#undef zig_has_c_longdouble +#define zig_has_c_longdouble 0 +#define zig_bitSizeOf_c_longdouble 80 +#define zig_compiler_rt_abbrev_c_longdouble zig_compiler_rt_abbrev_f80 +#define zig_repr_c_longdouble i128 +typedef zig_i128 zig_c_longdouble; +#define zig_as_c_longdouble(fp, repr) repr +#undef zig_as_special_c_longdouble +#define zig_as_special_c_longdouble(sign, name, arg, repr) repr +#undef zig_as_special_constant_c_longdouble +#define zig_as_special_constant_c_longdouble(sign, name, arg, repr) repr + +#endif /* zig_bitSizeOf_c_longdouble */ + +#if !zig_has_float_builtins +#define zig_float_from_repr(Type, ReprType) \ + static inline zig_##Type zig_float_from_repr_##Type(zig_##ReprType repr) { \ + return *((zig_##Type*)&repr); \ + } + +zig_float_from_repr(f16, u16) +zig_float_from_repr(f32, u32) +zig_float_from_repr(f64, u64) +zig_float_from_repr(f80, u128) +zig_float_from_repr(f128, u128) +#if zig_bitSizeOf_c_longdouble == 80 +zig_float_from_repr(c_longdouble, u128) +#else +#define zig_expand_float_from_repr(Type, ReprType) zig_float_from_repr(Type, ReprType) +zig_expand_float_from_repr(c_longdouble, zig_expand_concat(u, zig_bitSizeOf_c_longdouble)) +#endif +#endif + +#define zig_cast_f16 (zig_f16) +#define zig_cast_f32 (zig_f32) +#define zig_cast_f64 (zig_f64) + +#if _MSC_VER && !zig_has_f128 +#define zig_cast_f80 +#define zig_cast_c_longdouble +#define zig_cast_f128 +#else +#define zig_cast_f80 (zig_f80) +#define zig_cast_c_longdouble (zig_c_longdouble) +#define zig_cast_f128 (zig_f128) +#endif + +#define zig_convert_builtin(ResType, operation, ArgType, version) \ + zig_extern zig_##ResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \ + zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(zig_##ArgType); +zig_convert_builtin(f16, trunc, f32, 2) +zig_convert_builtin(f16, trunc, f64, 2) +zig_convert_builtin(f16, trunc, f80, 2) +zig_convert_builtin(f16, trunc, f128, 2) +zig_convert_builtin(f32, extend, f16, 2) +zig_convert_builtin(f32, trunc, f64, 2) +zig_convert_builtin(f32, trunc, f80, 2) +zig_convert_builtin(f32, trunc, f128, 2) +zig_convert_builtin(f64, extend, f16, 2) +zig_convert_builtin(f64, extend, f32, 2) +zig_convert_builtin(f64, trunc, f80, 2) +zig_convert_builtin(f64, trunc, f128, 2) +zig_convert_builtin(f80, extend, f16, 2) +zig_convert_builtin(f80, extend, f32, 2) +zig_convert_builtin(f80, extend, f64, 2) +zig_convert_builtin(f80, trunc, f128, 2) +zig_convert_builtin(f128, extend, f16, 2) +zig_convert_builtin(f128, extend, f32, 2) +zig_convert_builtin(f128, extend, f64, 2) +zig_convert_builtin(f128, extend, f80, 2) + +#define zig_float_negate_builtin_0(Type) \ + static inline zig_##Type zig_neg_##Type(zig_##Type arg) { \ + return zig_expand_concat(zig_xor_, zig_repr_##Type)(arg, zig_expand_minInt(zig_repr_##Type, zig_bitSizeOf_##Type)); \ + } +#define zig_float_negate_builtin_1(Type) \ + static inline zig_##Type zig_neg_##Type(zig_##Type arg) { \ + return -arg; \ + } + +#define zig_float_less_builtin_0(Type, operation) \ + zig_extern zig_i32 zig_expand_concat(zig_expand_concat(__##operation, \ + zig_compiler_rt_abbrev_##Type), 2)(zig_##Type, zig_##Type); \ + static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_##Type), 2)(lhs, rhs); \ + } +#define zig_float_less_builtin_1(Type, operation) \ + static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return (!(lhs <= rhs) - (lhs < rhs)); \ + } + +#define zig_float_greater_builtin_0(Type, operation) \ + zig_float_less_builtin_0(Type, operation) +#define zig_float_greater_builtin_1(Type, operation) \ + static inline zig_i32 zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return ((lhs > rhs) - !(lhs >= rhs)); \ + } + +#define zig_float_binary_builtin_0(Type, operation, operator) \ + zig_extern zig_##Type zig_expand_concat(zig_expand_concat(__##operation, \ + zig_compiler_rt_abbrev_##Type), 3)(zig_##Type, zig_##Type); \ + static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_##Type), 3)(lhs, rhs); \ + } +#define zig_float_binary_builtin_1(Type, operation, operator) \ + static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return lhs operator rhs; \ + } + +#define zig_float_builtins(Type) \ + zig_convert_builtin(i32, fix, Type, ) \ + zig_convert_builtin(u32, fixuns, Type, ) \ + zig_convert_builtin(i64, fix, Type, ) \ + zig_convert_builtin(u64, fixuns, Type, ) \ + zig_convert_builtin(i128, fix, Type, ) \ + zig_convert_builtin(u128, fixuns, Type, ) \ + zig_convert_builtin(Type, float, i32, ) \ + zig_convert_builtin(Type, floatun, u32, ) \ + zig_convert_builtin(Type, float, i64, ) \ + zig_convert_builtin(Type, floatun, u64, ) \ + zig_convert_builtin(Type, float, i128, ) \ + zig_convert_builtin(Type, floatun, u128, ) \ + zig_expand_concat(zig_float_negate_builtin_, zig_has_##Type)(Type) \ + zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, cmp) \ + zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, ne) \ + zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, eq) \ + zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, lt) \ + zig_expand_concat(zig_float_less_builtin_, zig_has_##Type)(Type, le) \ + zig_expand_concat(zig_float_greater_builtin_, zig_has_##Type)(Type, gt) \ + zig_expand_concat(zig_float_greater_builtin_, zig_has_##Type)(Type, ge) \ + zig_expand_concat(zig_float_binary_builtin_, zig_has_##Type)(Type, add, +) \ + zig_expand_concat(zig_float_binary_builtin_, zig_has_##Type)(Type, sub, -) \ + zig_expand_concat(zig_float_binary_builtin_, zig_has_##Type)(Type, mul, *) \ + zig_expand_concat(zig_float_binary_builtin_, zig_has_##Type)(Type, div, /) \ + zig_extern zig_##Type zig_libc_name_##Type(sqrt)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(sin)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(cos)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(tan)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(exp)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(exp2)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(log)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(log2)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(log10)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(fabs)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(floor)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(ceil)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(round)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(trunc)(zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(fmod)(zig_##Type, zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(fmin)(zig_##Type, zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(fmax)(zig_##Type, zig_##Type); \ + zig_extern zig_##Type zig_libc_name_##Type(fma)(zig_##Type, zig_##Type, zig_##Type); \ +\ + static inline zig_##Type zig_div_trunc_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return zig_libc_name_##Type(trunc)(zig_div_##Type(lhs, rhs)); \ + } \ +\ + static inline zig_##Type zig_div_floor_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return zig_libc_name_##Type(floor)(zig_div_##Type(lhs, rhs)); \ + } \ +\ + static inline zig_##Type zig_mod_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return zig_sub_##Type(lhs, zig_mul_##Type(zig_div_floor_##Type(lhs, rhs), rhs)); \ + } +zig_float_builtins(f16) +zig_float_builtins(f32) +zig_float_builtins(f64) +zig_float_builtins(f80) +zig_float_builtins(f128) +zig_float_builtins(c_longdouble) + +#if _MSC_VER && (_M_IX86 || _M_X64) + +// TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64 + +#define zig_msvc_atomics(Type, suffix) \ + static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \ + zig_##Type comparand = *expected; \ + zig_##Type initial = _InterlockedCompareExchange##suffix(obj, desired, comparand); \ + bool exchanged = initial == comparand; \ + if (!exchanged) { \ + *expected = initial; \ + } \ + return exchanged; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + return _InterlockedExchange##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + return _InterlockedExchangeAdd##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = prev - value; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + } \ + return prev; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_or_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + return _InterlockedOr##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_xor_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + return _InterlockedXor##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_and_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + return _InterlockedAnd##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_nand_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = ~(prev & value); \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + } \ + return prev; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_min_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = value < prev ? value : prev; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + } \ + return prev; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_max_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = value > prev ? value : prev; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + } \ + return prev; \ + } \ + static inline void zig_msvc_atomic_store_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + _InterlockedExchange##suffix(obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomic_load_##Type(zig_##Type volatile* obj) { \ + return _InterlockedOr##suffix(obj, 0); \ + } + +zig_msvc_atomics(u8, 8) +zig_msvc_atomics(i8, 8) +zig_msvc_atomics(u16, 16) +zig_msvc_atomics(i16, 16) +zig_msvc_atomics(u32, ) +zig_msvc_atomics(i32, ) + +#if _M_X64 +zig_msvc_atomics(u64, 64) +zig_msvc_atomics(i64, 64) +#endif + +#define zig_msvc_flt_atomics(Type, ReprType, suffix) \ + static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \ + zig_##ReprType comparand = *((zig_##ReprType*)expected); \ + zig_##ReprType initial = _InterlockedCompareExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&desired), comparand); \ + bool exchanged = initial == comparand; \ + if (!exchanged) { \ + *expected = *((zig_##Type*)&initial); \ + } \ + return exchanged; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + zig_##ReprType initial = _InterlockedExchange##suffix((zig_##ReprType volatile*)obj, *((zig_##ReprType*)&value)); \ + return *((zig_##Type*)&initial); \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##ReprType new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = prev + value; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \ + } \ + return prev; \ + } \ + static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##ReprType new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = prev - value; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, *((zig_##ReprType*)&new)); \ + } \ + return prev; \ + } + +zig_msvc_flt_atomics(f32, u32, ) +#if _M_X64 +zig_msvc_flt_atomics(f64, u64, 64) +#endif + +#if _M_IX86 +static inline void zig_msvc_atomic_barrier() { + zig_i32 barrier; + __asm { + xchg barrier, eax + } +} + +static inline void* zig_msvc_atomicrmw_xchg_p32(void** obj, zig_u32* arg) { + return _InterlockedExchangePointer(obj, arg); +} + +static inline void zig_msvc_atomic_store_p32(void** obj, zig_u32* arg) { + _InterlockedExchangePointer(obj, arg); +} + +static inline void* zig_msvc_atomic_load_p32(void** obj) { + return (void*)_InterlockedOr((void*)obj, 0); +} + +static inline bool zig_msvc_cmpxchg_p32(void** obj, void** expected, void* desired) { + void* comparand = *expected; + void* initial = _InterlockedCompareExchangePointer(obj, desired, comparand); + bool exchanged = initial == comparand; + if (!exchanged) { + *expected = initial; + } + return exchanged; +} +#else /* _M_IX86 */ +static inline void* zig_msvc_atomicrmw_xchg_p64(void** obj, zig_u64* arg) { + return _InterlockedExchangePointer(obj, arg); +} + +static inline void zig_msvc_atomic_store_p64(void** obj, zig_u64* arg) { + _InterlockedExchangePointer(obj, arg); +} + +static inline void* zig_msvc_atomic_load_p64(void** obj) { + return (void*)_InterlockedOr64((void*)obj, 0); +} + +static inline bool zig_msvc_cmpxchg_p64(void** obj, void** expected, void* desired) { + void* comparand = *expected; + void* initial = _InterlockedCompareExchangePointer(obj, desired, comparand); + bool exchanged = initial == comparand; + if (!exchanged) { + *expected = initial; + } + return exchanged; +} + +static inline bool zig_msvc_cmpxchg_u128(zig_u128 volatile* obj, zig_u128* expected, zig_u128 desired) { + return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_i64*)expected); +} + +static inline bool zig_msvc_cmpxchg_i128(zig_i128 volatile* obj, zig_i128* expected, zig_i128 desired) { + return _InterlockedCompareExchange128((zig_i64 volatile*)obj, desired.hi, desired.lo, (zig_u64*)expected); +} + +#define zig_msvc_atomics_128xchg(Type) \ + static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, value); \ + } \ + return prev; \ + } + +zig_msvc_atomics_128xchg(u128) +zig_msvc_atomics_128xchg(i128) + +#define zig_msvc_atomics_128op(Type, operation) \ + static inline zig_##Type zig_msvc_atomicrmw_##operation##_##Type(zig_##Type volatile* obj, zig_##Type value) { \ + bool success = false; \ + zig_##Type new; \ + zig_##Type prev; \ + while (!success) { \ + prev = *obj; \ + new = zig_##operation##_##Type(prev, value); \ + success = zig_msvc_cmpxchg_##Type(obj, &prev, new); \ + } \ + return prev; \ + } + +zig_msvc_atomics_128op(u128, add) +zig_msvc_atomics_128op(u128, sub) +zig_msvc_atomics_128op(u128, or) +zig_msvc_atomics_128op(u128, xor) +zig_msvc_atomics_128op(u128, and) +zig_msvc_atomics_128op(u128, nand) +zig_msvc_atomics_128op(u128, min) +zig_msvc_atomics_128op(u128, max) +#endif /* _M_IX86 */ + +#endif /* _MSC_VER && (_M_IX86 || _M_X64) */ + +/* ========================= Special Case Intrinsics ========================= */ + +#if (_MSC_VER && _M_X64) || defined(__x86_64__) + +static inline void* zig_x86_64_windows_teb(void) { +#if _MSC_VER + return (void*)__readgsqword(0x30); +#else + void* teb; + __asm volatile(" movq %%gs:0x30, %[ptr]": [ptr]"=r"(teb)::); + return teb; +#endif +} + +#elif (_MSC_VER && _M_IX86) || defined(__i386__) || defined(__X86__) + +static inline void* zig_x86_windows_teb(void) { +#if _MSC_VER + return (void*)__readfsdword(0x18); +#else + void* teb; + __asm volatile(" movl %%fs:0x18, %[ptr]": [ptr]"=r"(teb)::); + return teb; +#endif +} + +#endif + +#if (_MSC_VER && (_M_IX86 || _M_X64)) || defined(__i386__) || defined(__x86_64__) + +static inline void zig_x86_cpuid(zig_u32 leaf_id, zig_u32 subid, zig_u32* eax, zig_u32* ebx, zig_u32* ecx, zig_u32* edx) { + zig_u32 cpu_info[4]; +#if _MSC_VER + __cpuidex(cpu_info, leaf_id, subid); +#else + __cpuid_count(leaf_id, subid, cpu_info[0], cpu_info[1], cpu_info[2], cpu_info[3]); +#endif + *eax = cpu_info[0]; + *ebx = cpu_info[1]; + *ecx = cpu_info[2]; + *edx = cpu_info[3]; +} + +static inline zig_u32 zig_x86_get_xcr0(void) { +#if _MSC_VER + return (zig_u32)_xgetbv(0); +#else + zig_u32 eax; + zig_u32 edx; + __asm__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(0)); + return eax; +#endif +} + +#endif diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 162c798758..901ea3697a 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -551,7 +551,11 @@ test "align(N) on functions" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO this is not supported on MSVC + + // This is not supported on MSVC + if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) { + return error.SkipZigTest; + } // function alignment is a compile error on wasm32/wasm64 if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest; diff --git a/test/behavior/asm.zig b/test/behavior/asm.zig index f041963494..b242374ef8 100644 --- a/test/behavior/asm.zig +++ b/test/behavior/asm.zig @@ -7,6 +7,7 @@ const is_x86_64_linux = builtin.cpu.arch == .x86_64 and builtin.os.tag == .linux comptime { if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_aarch64 and + !(builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) and // MSVC doesn't support inline assembly is_x86_64_linux) { asm ( @@ -23,7 +24,8 @@ test "module level assembly" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + + if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly if (is_x86_64_linux) { try expect(this_is_my_alias() == 1234); @@ -36,7 +38,8 @@ test "output constraint modifiers" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + + if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly // This is only testing compilation. var a: u32 = 3; @@ -58,7 +61,8 @@ test "alternative constraints" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + + if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly // Make sure we allow commas as a separator for alternative constraints. var a: u32 = 3; @@ -75,7 +79,8 @@ test "sized integer/float in asm input" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + + if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly asm volatile ("" : @@ -125,7 +130,8 @@ test "struct/array/union types as input values" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + + if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly asm volatile ("" : @@ -151,6 +157,8 @@ test "asm modifiers (AArch64)" { if (builtin.target.cpu.arch != .aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly + var x: u32 = 15; const double = asm ("add %[ret:w], %[in:w], %[in:w]" : [ret] "=r" (-> u32), diff --git a/test/behavior/int_comparison_elision.zig b/test/behavior/int_comparison_elision.zig index 5e13e00e83..ea26f02b7e 100644 --- a/test/behavior/int_comparison_elision.zig +++ b/test/behavior/int_comparison_elision.zig @@ -13,7 +13,6 @@ test "int comparison elision" { // TODO: support int types > 128 bits wide in other backends if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/lower_strlit_to_vector.zig b/test/behavior/lower_strlit_to_vector.zig index adbca8f0df..427379636e 100644 --- a/test/behavior/lower_strlit_to_vector.zig +++ b/test/behavior/lower_strlit_to_vector.zig @@ -7,7 +7,6 @@ test "strlit to vector" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO const strlit = "0123456789abcdef0123456789ABCDEF"; const vec_from_strlit: @Vector(32, u8) = strlit.*; diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 8ab8614605..54263e1daf 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -1463,7 +1463,6 @@ test "vector integer addition" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const S = struct { diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 8a01d68587..348e269682 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -1330,7 +1330,6 @@ test "struct field init value is size of the struct" { } test "under-aligned struct field" { - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index e983e0cfb0..191c7bf7eb 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -75,7 +75,6 @@ test "vector int operators" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const S = struct { @@ -178,7 +177,6 @@ test "tuple to vector" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) { @@ -943,7 +941,6 @@ test "multiplication-assignment operator with an array operand" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const S = struct { @@ -1247,7 +1244,6 @@ test "array operands to shuffle are coerced to vectors" { test "load packed vector element" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -1260,7 +1256,6 @@ test "load packed vector element" { test "store packed vector element" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO diff --git a/test/stage2/cbe.zig b/test/stage2/cbe.zig index 6c0c5e03cf..e9750853a6 100644 --- a/test/stage2/cbe.zig +++ b/test/stage2/cbe.zig @@ -959,7 +959,7 @@ pub fn addCases(ctx: *TestContext) !void { \\ _ = a; \\} , - \\zig_extern void start(zig_u8 const a0); + \\zig_extern void start(uint8_t const a0); \\ ); ctx.h("header with multiple param function", linux_x64, @@ -967,19 +967,19 @@ pub fn addCases(ctx: *TestContext) !void { \\ _ = a; _ = b; _ = c; \\} , - \\zig_extern void start(zig_u8 const a0, zig_u8 const a1, zig_u8 const a2); + \\zig_extern void start(uint8_t const a0, uint8_t const a1, uint8_t const a2); \\ ); ctx.h("header with u32 param function", linux_x64, \\export fn start(a: u32) void{ _ = a; } , - \\zig_extern void start(zig_u32 const a0); + \\zig_extern void start(uint32_t const a0); \\ ); ctx.h("header with usize param function", linux_x64, \\export fn start(a: usize) void{ _ = a; } , - \\zig_extern void start(zig_usize const a0); + \\zig_extern void start(uintptr_t const a0); \\ ); ctx.h("header with bool param function", linux_x64, @@ -993,7 +993,7 @@ pub fn addCases(ctx: *TestContext) !void { \\ unreachable; \\} , - \\zig_extern zig_noreturn start(void); + \\zig_extern zig_noreturn void start(void); \\ ); ctx.h("header with multiple functions", linux_x64, @@ -1009,7 +1009,7 @@ pub fn addCases(ctx: *TestContext) !void { ctx.h("header with multiple includes", linux_x64, \\export fn start(a: u32, b: usize) void{ _ = a; _ = b; } , - \\zig_extern void start(zig_u32 const a0, zig_usize const a1); + \\zig_extern void start(uint32_t const a0, uintptr_t const a1); \\ ); }