text
stringlengths 32
314k
| url
stringlengths 93
243
|
---|---|
const std = @import("std");
pub fn build(b: *std.build.Builder) void {
// Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
// Standard release options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
const mode = b.standardReleaseOptions();
const exe = b.addExecutable("08_core_2d_camera", "src/main.zig");
exe.setTarget(target);
exe.setBuildMode(mode);
exe.linkLibC();
exe.linkSystemLibrary("raylib");
exe.install();
const run = exe.run();
const step = b.step("run", "Runs the executable");
step.dependOn(&run.step);
}
| https://raw.githubusercontent.com/thechampagne/raylib-zig/540b817e8b92e6a34c58664616ac0dcf445c42af/08_core_2d_camera/build.zig |
const std = @import("std");
const testing = std.testing;
const command = @import("./util/command.zig");
const Graph = @import("./island.zig").Graph;
pub fn main() anyerror!u8 {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const part = command.choosePart();
var graph = Graph.init(allocator);
defer graph.deinit();
const inp = std.io.getStdIn().reader();
var buf: [1024]u8 = undefined;
while (try inp.readUntilDelimiterOrEof(&buf, '\n')) |line| {
try graph.addLine(line);
}
// graph.show();
var answer: usize = 0;
switch (part) {
.part1 => {
answer = try graph.getSubgraphSizeProduct();
const expected = @as(usize, 562772);
try testing.expectEqual(expected, answer);
},
.part2 => {
answer = 42;
const expected = @as(usize, 42);
try testing.expectEqual(expected, answer);
},
}
const out = std.io.getStdOut().writer();
try out.print("=== {s} ===\n", .{@tagName(part)});
try out.print("Answer: {}\n", .{answer});
try out.print("Elapsed: {}ms\n", .{command.getElapsedMs()});
return 0;
}
| https://raw.githubusercontent.com/gonzus/AdventOfCode/7e972b92a23db29461b2869713a3251998af5822/2023/p25/p25.zig |
pub const Class = enum {
integer,
sse,
sseup,
x87,
x87up,
complex_x87,
memory,
none,
win_i128,
float,
float_combine,
integer_per_element,
fn isX87(class: Class) bool {
return switch (class) {
.x87, .x87up, .complex_x87 => true,
else => false,
};
}
/// Combine a field class with the prev one.
fn combineSystemV(prev_class: Class, next_class: Class) Class {
// "If both classes are equal, this is the resulting class."
if (prev_class == next_class)
return if (prev_class == .float) .float_combine else prev_class;
// "If one of the classes is NO_CLASS, the resulting class
// is the other class."
if (prev_class == .none) return next_class;
// "If one of the classes is MEMORY, the result is the MEMORY class."
if (prev_class == .memory or next_class == .memory) return .memory;
// "If one of the classes is INTEGER, the result is the INTEGER."
if (prev_class == .integer or next_class == .integer) return .integer;
// "If one of the classes is X87, X87UP, COMPLEX_X87 class,
// MEMORY is used as class."
if (prev_class.isX87() or next_class.isX87()) return .memory;
// "Otherwise class SSE is used."
return .sse;
}
};
pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
// https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017
// "There's a strict one-to-one correspondence between a function call's arguments
// and the registers used for those arguments. Any argument that doesn't fit in 8
// bytes, or isn't 1, 2, 4, or 8 bytes, must be passed by reference. A single argument
// is never spread across multiple registers."
// "All floating point operations are done using the 16 XMM registers."
// "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
// as if they were integers of the same size."
switch (ty.zigTypeTag(zcu)) {
.Pointer,
.Int,
.Bool,
.Enum,
.Void,
.NoReturn,
.ErrorSet,
.Struct,
.Union,
.Optional,
.Array,
.ErrorUnion,
.AnyFrame,
.Frame,
=> switch (ty.abiSize(zcu)) {
0 => unreachable,
1, 2, 4, 8 => return .integer,
else => switch (ty.zigTypeTag(zcu)) {
.Int => return .win_i128,
.Struct, .Union => if (ty.containerLayout(zcu) == .@"packed") {
return .win_i128;
} else {
return .memory;
},
else => return .memory,
},
},
.Float, .Vector => return .sse,
.Type,
.ComptimeFloat,
.ComptimeInt,
.Undefined,
.Null,
.Fn,
.Opaque,
.EnumLiteral,
=> unreachable,
}
}
pub const Context = enum { ret, arg, field, other };
/// There are a maximum of 8 possible return slots. Returned values are in
/// the beginning of the array; unused slots are filled with .none.
pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8]Class {
const memory_class = [_]Class{
.memory, .none, .none, .none,
.none, .none, .none, .none,
};
var result = [1]Class{.none} ** 8;
switch (ty.zigTypeTag(zcu)) {
.Pointer => switch (ty.ptrSize(zcu)) {
.Slice => {
result[0] = .integer;
result[1] = .integer;
return result;
},
else => {
result[0] = .integer;
return result;
},
},
.Int, .Enum, .ErrorSet => {
const bits = ty.intInfo(zcu).bits;
if (bits <= 64) {
result[0] = .integer;
return result;
}
if (bits <= 128) {
result[0] = .integer;
result[1] = .integer;
return result;
}
if (bits <= 192) {
result[0] = .integer;
result[1] = .integer;
result[2] = .integer;
return result;
}
if (bits <= 256) {
result[0] = .integer;
result[1] = .integer;
result[2] = .integer;
result[3] = .integer;
return result;
}
return memory_class;
},
.Bool, .Void, .NoReturn => {
result[0] = .integer;
return result;
},
.Float => switch (ty.floatBits(target)) {
16 => {
if (ctx == .field) {
result[0] = .memory;
} else {
// TODO clang doesn't allow __fp16 as .ret or .arg
result[0] = .sse;
}
return result;
},
32 => {
result[0] = .float;
return result;
},
64 => {
result[0] = .sse;
return result;
},
128 => {
// "Arguments of types __float128, _Decimal128 and __m128 are
// split into two halves. The least significant ones belong
// to class SSE, the most significant one to class SSEUP."
result[0] = .sse;
result[1] = .sseup;
return result;
},
80 => {
// "The 64-bit mantissa of arguments of type long double
// belongs to classX87, the 16-bit exponent plus 6 bytes
// of padding belongs to class X87UP."
result[0] = .x87;
result[1] = .x87up;
return result;
},
else => unreachable,
},
.Vector => {
const elem_ty = ty.childType(zcu);
const bits = elem_ty.bitSize(zcu) * ty.arrayLen(zcu);
if (elem_ty.toIntern() == .bool_type) {
if (bits <= 32) return .{
.integer, .none, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 64) return .{
.sse, .none, .none, .none,
.none, .none, .none, .none,
};
if (ctx == .arg) {
if (bits <= 128) return .{
.integer_per_element, .none, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 256 and std.Target.x86.featureSetHas(target.cpu.features, .avx)) return .{
.integer_per_element, .none, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 512 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{
.integer_per_element, .none, .none, .none,
.none, .none, .none, .none,
};
}
return memory_class;
}
if (bits <= 64) return .{
.sse, .none, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 128) return .{
.sse, .sseup, .none, .none,
.none, .none, .none, .none,
};
if (ctx == .arg and !std.Target.x86.featureSetHas(target.cpu.features, .avx)) return memory_class;
if (bits <= 192) return .{
.sse, .sseup, .sseup, .none,
.none, .none, .none, .none,
};
if (bits <= 256) return .{
.sse, .sseup, .sseup, .sseup,
.none, .none, .none, .none,
};
if (ctx == .arg and !std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return memory_class;
if (bits <= 320) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .none, .none, .none,
};
if (bits <= 384) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .sseup, .none, .none,
};
if (bits <= 448) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .sseup, .sseup, .none,
};
// LLVM always returns vectors byval
if (bits <= 512 or ctx == .ret) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .sseup, .sseup, .sseup,
};
return memory_class;
},
.Optional => {
if (ty.isPtrLikeOptional(zcu)) {
result[0] = .integer;
return result;
}
return memory_class;
},
.Struct, .Union => {
// "If the size of an object is larger than eight eightbytes, or
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.".
const ty_size = ty.abiSize(zcu);
switch (ty.containerLayout(zcu)) {
.auto, .@"extern" => {},
.@"packed" => {
assert(ty_size <= 16);
result[0] = .integer;
if (ty_size > 8) result[1] = .integer;
return result;
},
}
if (ty_size > 64)
return memory_class;
_ = if (zcu.typeToStruct(ty)) |loaded_struct|
classifySystemVStruct(&result, 0, loaded_struct, zcu, target)
else if (zcu.typeToUnion(ty)) |loaded_union|
classifySystemVUnion(&result, 0, loaded_union, zcu, target)
else
unreachable;
// Post-merger cleanup
// "If one of the classes is MEMORY, the whole argument is passed in memory"
// "If X87UP is not preceded by X87, the whole argument is passed in memory."
for (result, 0..) |class, i| switch (class) {
.memory => return memory_class,
.x87up => if (i == 0 or result[i - 1] != .x87) return memory_class,
else => continue,
};
// "If the size of the aggregate exceeds two eightbytes and the first eight-
// byte isn’t SSE or any other eightbyte isn’t SSEUP, the whole argument
// is passed in memory."
if (ty_size > 16 and (result[0] != .sse or
std.mem.indexOfNone(Class, result[1..], &.{ .sseup, .none }) != null)) return memory_class;
// "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
for (&result, 0..) |*item, i| {
if (item.* == .sseup) switch (result[i - 1]) {
.sse, .sseup => continue,
else => item.* = .sse,
};
}
return result;
},
.Array => {
const ty_size = ty.abiSize(zcu);
if (ty_size <= 8) {
result[0] = .integer;
return result;
}
if (ty_size <= 16) {
result[0] = .integer;
result[1] = .integer;
return result;
}
return memory_class;
},
else => unreachable,
}
}
fn classifySystemVStruct(
result: *[8]Class,
starting_byte_offset: u64,
loaded_struct: InternPool.LoadedStructType,
zcu: *Zcu,
target: std.Target,
) u64 {
const ip = &zcu.intern_pool;
var byte_offset = starting_byte_offset;
var field_it = loaded_struct.iterateRuntimeOrder(ip);
while (field_it.next()) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
const field_align = loaded_struct.fieldAlign(ip, field_index);
byte_offset = std.mem.alignForward(
u64,
byte_offset,
field_align.toByteUnits() orelse field_ty.abiAlignment(zcu).toByteUnits().?,
);
if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
switch (field_loaded_struct.layout) {
.auto, .@"extern" => {
byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, zcu, target);
continue;
},
.@"packed" => {},
}
} else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
switch (field_loaded_union.getLayout(ip)) {
.auto, .@"extern" => {
byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, zcu, target);
continue;
},
.@"packed" => {},
}
}
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
for (result[@intCast(byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
result_class.* = result_class.combineSystemV(field_class);
byte_offset += field_ty.abiSize(zcu);
}
const final_byte_offset = starting_byte_offset + loaded_struct.size(ip).*;
std.debug.assert(final_byte_offset == std.mem.alignForward(
u64,
byte_offset,
loaded_struct.flagsPtr(ip).alignment.toByteUnits().?,
));
return final_byte_offset;
}
fn classifySystemVUnion(
result: *[8]Class,
starting_byte_offset: u64,
loaded_union: InternPool.LoadedUnionType,
zcu: *Zcu,
target: std.Target,
) u64 {
const ip = &zcu.intern_pool;
for (0..loaded_union.field_types.len) |field_index| {
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
switch (field_loaded_struct.layout) {
.auto, .@"extern" => {
_ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, zcu, target);
continue;
},
.@"packed" => {},
}
} else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
switch (field_loaded_union.getLayout(ip)) {
.auto, .@"extern" => {
_ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, zcu, target);
continue;
},
.@"packed" => {},
}
}
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
result_class.* = result_class.combineSystemV(field_class);
}
return starting_byte_offset + loaded_union.size(ip).*;
}
pub const SysV = struct {
/// Note that .rsp and .rbp also belong to this set, however, we never expect to use them
/// for anything else but stack offset tracking therefore we exclude them from this set.
pub const callee_preserved_regs = [_]Register{ .rbx, .r12, .r13, .r14, .r15 };
/// These registers need to be preserved (saved on the stack) and restored by the caller before
/// the caller relinquishes control to a subroutine via call instruction (or similar).
/// In other words, these registers are free to use by the callee.
pub const caller_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .rsi, .rdi, .r8, .r9, .r10, .r11 } ++ x87_regs ++ sse_avx_regs;
pub const c_abi_int_param_regs = [_]Register{ .rdi, .rsi, .rdx, .rcx, .r8, .r9 };
pub const c_abi_sse_param_regs = sse_avx_regs[0..8].*;
pub const c_abi_int_return_regs = [_]Register{ .rax, .rdx };
pub const c_abi_sse_return_regs = sse_avx_regs[0..2].*;
};
pub const Win64 = struct {
/// Note that .rsp and .rbp also belong to this set, however, we never expect to use them
/// for anything else but stack offset tracking therefore we exclude them from this set.
pub const callee_preserved_regs = [_]Register{ .rbx, .rsi, .rdi, .r12, .r13, .r14, .r15 };
/// These registers need to be preserved (saved on the stack) and restored by the caller before
/// the caller relinquishes control to a subroutine via call instruction (or similar).
/// In other words, these registers are free to use by the callee.
pub const caller_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .r8, .r9, .r10, .r11 } ++ x87_regs ++ sse_avx_regs;
pub const c_abi_int_param_regs = [_]Register{ .rcx, .rdx, .r8, .r9 };
pub const c_abi_sse_param_regs = sse_avx_regs[0..4].*;
pub const c_abi_int_return_regs = [_]Register{.rax};
pub const c_abi_sse_return_regs = sse_avx_regs[0..1].*;
};
pub fn resolveCallingConvention(
cc: std.builtin.CallingConvention,
target: std.Target,
) std.builtin.CallingConvention {
return switch (cc) {
.Unspecified, .C => switch (target.os.tag) {
else => .SysV,
.windows => .Win64,
},
else => cc,
};
}
pub fn getCalleePreservedRegs(cc: std.builtin.CallingConvention) []const Register {
return switch (cc) {
.SysV => &SysV.callee_preserved_regs,
.Win64 => &Win64.callee_preserved_regs,
else => unreachable,
};
}
pub fn getCallerPreservedRegs(cc: std.builtin.CallingConvention) []const Register {
return switch (cc) {
.SysV => &SysV.caller_preserved_regs,
.Win64 => &Win64.caller_preserved_regs,
else => unreachable,
};
}
pub fn getCAbiIntParamRegs(cc: std.builtin.CallingConvention) []const Register {
return switch (cc) {
.SysV => &SysV.c_abi_int_param_regs,
.Win64 => &Win64.c_abi_int_param_regs,
else => unreachable,
};
}
pub fn getCAbiSseParamRegs(cc: std.builtin.CallingConvention) []const Register {
return switch (cc) {
.SysV => &SysV.c_abi_sse_param_regs,
.Win64 => &Win64.c_abi_sse_param_regs,
else => unreachable,
};
}
pub fn getCAbiIntReturnRegs(cc: std.builtin.CallingConvention) []const Register {
return switch (cc) {
.SysV => &SysV.c_abi_int_return_regs,
.Win64 => &Win64.c_abi_int_return_regs,
else => unreachable,
};
}
pub fn getCAbiSseReturnRegs(cc: std.builtin.CallingConvention) []const Register {
return switch (cc) {
.SysV => &SysV.c_abi_sse_return_regs,
.Win64 => &Win64.c_abi_sse_return_regs,
else => unreachable,
};
}
const gp_regs = [_]Register{
.rax, .rcx, .rdx, .rbx, .rsi, .rdi, .r8, .r9, .r10, .r11, .r12, .r13, .r14, .r15,
};
const x87_regs = [_]Register{
.st0, .st1, .st2, .st3, .st4, .st5, .st6, .st7,
};
const sse_avx_regs = [_]Register{
.ymm0, .ymm1, .ymm2, .ymm3, .ymm4, .ymm5, .ymm6, .ymm7,
.ymm8, .ymm9, .ymm10, .ymm11, .ymm12, .ymm13, .ymm14, .ymm15,
};
const allocatable_regs = gp_regs ++ x87_regs[0 .. x87_regs.len - 1] ++ sse_avx_regs;
pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, allocatable_regs);
// Register classes
const RegisterBitSet = RegisterManager.RegisterBitSet;
pub const RegisterClass = struct {
pub const gp: RegisterBitSet = blk: {
var set = RegisterBitSet.initEmpty();
for (allocatable_regs, 0..) |reg, index| if (reg.class() == .general_purpose) set.set(index);
break :blk set;
};
pub const x87: RegisterBitSet = blk: {
var set = RegisterBitSet.initEmpty();
for (allocatable_regs, 0..) |reg, index| if (reg.class() == .x87) set.set(index);
break :blk set;
};
pub const sse: RegisterBitSet = blk: {
var set = RegisterBitSet.initEmpty();
for (allocatable_regs, 0..) |reg, index| if (reg.class() == .sse) set.set(index);
break :blk set;
};
};
const builtin = @import("builtin");
const std = @import("std");
const assert = std.debug.assert;
const testing = std.testing;
const InternPool = @import("../../InternPool.zig");
const Register = @import("bits.zig").Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../type.zig").Type;
const Value = @import("../../Value.zig");
const Zcu = @import("../../Module.zig");
| https://raw.githubusercontent.com/ziglang/zig-bootstrap/ec2dca85a340f134d2fcfdc9007e91f9abed6996/zig/src/arch/x86_64/abi.zig |
pub const Class = enum {
integer,
sse,
sseup,
x87,
x87up,
complex_x87,
memory,
none,
win_i128,
float,
float_combine,
integer_per_element,
fn isX87(class: Class) bool {
return switch (class) {
.x87, .x87up, .complex_x87 => true,
else => false,
};
}
/// Combine a field class with the prev one.
fn combineSystemV(prev_class: Class, next_class: Class) Class {
// "If both classes are equal, this is the resulting class."
if (prev_class == next_class)
return if (prev_class == .float) .float_combine else prev_class;
// "If one of the classes is NO_CLASS, the resulting class
// is the other class."
if (prev_class == .none) return next_class;
// "If one of the classes is MEMORY, the result is the MEMORY class."
if (prev_class == .memory or next_class == .memory) return .memory;
// "If one of the classes is INTEGER, the result is the INTEGER."
if (prev_class == .integer or next_class == .integer) return .integer;
// "If one of the classes is X87, X87UP, COMPLEX_X87 class,
// MEMORY is used as class."
if (prev_class.isX87() or next_class.isX87()) return .memory;
// "Otherwise class SSE is used."
return .sse;
}
};
pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
// https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017
// "There's a strict one-to-one correspondence between a function call's arguments
// and the registers used for those arguments. Any argument that doesn't fit in 8
// bytes, or isn't 1, 2, 4, or 8 bytes, must be passed by reference. A single argument
// is never spread across multiple registers."
// "All floating point operations are done using the 16 XMM registers."
// "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
// as if they were integers of the same size."
switch (ty.zigTypeTag(zcu)) {
.Pointer,
.Int,
.Bool,
.Enum,
.Void,
.NoReturn,
.ErrorSet,
.Struct,
.Union,
.Optional,
.Array,
.ErrorUnion,
.AnyFrame,
.Frame,
=> switch (ty.abiSize(zcu)) {
0 => unreachable,
1, 2, 4, 8 => return .integer,
else => switch (ty.zigTypeTag(zcu)) {
.Int => return .win_i128,
.Struct, .Union => if (ty.containerLayout(zcu) == .@"packed") {
return .win_i128;
} else {
return .memory;
},
else => return .memory,
},
},
.Float, .Vector => return .sse,
.Type,
.ComptimeFloat,
.ComptimeInt,
.Undefined,
.Null,
.Fn,
.Opaque,
.EnumLiteral,
=> unreachable,
}
}
pub const Context = enum { ret, arg, field, other };
/// There are a maximum of 8 possible return slots. Returned values are in
/// the beginning of the array; unused slots are filled with .none.
pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8]Class {
const memory_class = [_]Class{
.memory, .none, .none, .none,
.none, .none, .none, .none,
};
var result = [1]Class{.none} ** 8;
switch (ty.zigTypeTag(zcu)) {
.Pointer => switch (ty.ptrSize(zcu)) {
.Slice => {
result[0] = .integer;
result[1] = .integer;
return result;
},
else => {
result[0] = .integer;
return result;
},
},
.Int, .Enum, .ErrorSet => {
const bits = ty.intInfo(zcu).bits;
if (bits <= 64) {
result[0] = .integer;
return result;
}
if (bits <= 128) {
result[0] = .integer;
result[1] = .integer;
return result;
}
if (bits <= 192) {
result[0] = .integer;
result[1] = .integer;
result[2] = .integer;
return result;
}
if (bits <= 256) {
result[0] = .integer;
result[1] = .integer;
result[2] = .integer;
result[3] = .integer;
return result;
}
return memory_class;
},
.Bool, .Void, .NoReturn => {
result[0] = .integer;
return result;
},
.Float => switch (ty.floatBits(target)) {
16 => {
if (ctx == .field) {
result[0] = .memory;
} else {
// TODO clang doesn't allow __fp16 as .ret or .arg
result[0] = .sse;
}
return result;
},
32 => {
result[0] = .float;
return result;
},
64 => {
result[0] = .sse;
return result;
},
128 => {
// "Arguments of types __float128, _Decimal128 and __m128 are
// split into two halves. The least significant ones belong
// to class SSE, the most significant one to class SSEUP."
result[0] = .sse;
result[1] = .sseup;
return result;
},
80 => {
// "The 64-bit mantissa of arguments of type long double
// belongs to classX87, the 16-bit exponent plus 6 bytes
// of padding belongs to class X87UP."
result[0] = .x87;
result[1] = .x87up;
return result;
},
else => unreachable,
},
.Vector => {
const elem_ty = ty.childType(zcu);
const bits = elem_ty.bitSize(zcu) * ty.arrayLen(zcu);
if (elem_ty.toIntern() == .bool_type) {
if (bits <= 32) return .{
.integer, .none, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 64) return .{
.sse, .none, .none, .none,
.none, .none, .none, .none,
};
if (ctx == .arg) {
if (bits <= 128) return .{
.integer_per_element, .none, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 256 and std.Target.x86.featureSetHas(target.cpu.features, .avx)) return .{
.integer_per_element, .none, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 512 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{
.integer_per_element, .none, .none, .none,
.none, .none, .none, .none,
};
}
return memory_class;
}
if (bits <= 64) return .{
.sse, .none, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 128) return .{
.sse, .sseup, .none, .none,
.none, .none, .none, .none,
};
if (ctx == .arg and !std.Target.x86.featureSetHas(target.cpu.features, .avx)) return memory_class;
if (bits <= 192) return .{
.sse, .sseup, .sseup, .none,
.none, .none, .none, .none,
};
if (bits <= 256) return .{
.sse, .sseup, .sseup, .sseup,
.none, .none, .none, .none,
};
if (ctx == .arg and !std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return memory_class;
if (bits <= 320) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .none, .none, .none,
};
if (bits <= 384) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .sseup, .none, .none,
};
if (bits <= 448) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .sseup, .sseup, .none,
};
// LLVM always returns vectors byval
if (bits <= 512 or ctx == .ret) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .sseup, .sseup, .sseup,
};
return memory_class;
},
.Optional => {
if (ty.isPtrLikeOptional(zcu)) {
result[0] = .integer;
return result;
}
return memory_class;
},
.Struct, .Union => {
// "If the size of an object is larger than eight eightbytes, or
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.".
const ty_size = ty.abiSize(zcu);
switch (ty.containerLayout(zcu)) {
.auto, .@"extern" => {},
.@"packed" => {
assert(ty_size <= 16);
result[0] = .integer;
if (ty_size > 8) result[1] = .integer;
return result;
},
}
if (ty_size > 64)
return memory_class;
_ = if (zcu.typeToStruct(ty)) |loaded_struct|
classifySystemVStruct(&result, 0, loaded_struct, zcu, target)
else if (zcu.typeToUnion(ty)) |loaded_union|
classifySystemVUnion(&result, 0, loaded_union, zcu, target)
else
unreachable;
// Post-merger cleanup
// "If one of the classes is MEMORY, the whole argument is passed in memory"
// "If X87UP is not preceded by X87, the whole argument is passed in memory."
for (result, 0..) |class, i| switch (class) {
.memory => return memory_class,
.x87up => if (i == 0 or result[i - 1] != .x87) return memory_class,
else => continue,
};
// "If the size of the aggregate exceeds two eightbytes and the first eight-
// byte isn’t SSE or any other eightbyte isn’t SSEUP, the whole argument
// is passed in memory."
if (ty_size > 16 and (result[0] != .sse or
std.mem.indexOfNone(Class, result[1..], &.{ .sseup, .none }) != null)) return memory_class;
// "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
for (&result, 0..) |*item, i| {
if (item.* == .sseup) switch (result[i - 1]) {
.sse, .sseup => continue,
else => item.* = .sse,
};
}
return result;
},
.Array => {
const ty_size = ty.abiSize(zcu);
if (ty_size <= 8) {
result[0] = .integer;
return result;
}
if (ty_size <= 16) {
result[0] = .integer;
result[1] = .integer;
return result;
}
return memory_class;
},
else => unreachable,
}
}
fn classifySystemVStruct(
result: *[8]Class,
starting_byte_offset: u64,
loaded_struct: InternPool.LoadedStructType,
zcu: *Zcu,
target: std.Target,
) u64 {
const ip = &zcu.intern_pool;
var byte_offset = starting_byte_offset;
var field_it = loaded_struct.iterateRuntimeOrder(ip);
while (field_it.next()) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
const field_align = loaded_struct.fieldAlign(ip, field_index);
byte_offset = std.mem.alignForward(
u64,
byte_offset,
field_align.toByteUnits() orelse field_ty.abiAlignment(zcu).toByteUnits().?,
);
if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
switch (field_loaded_struct.layout) {
.auto, .@"extern" => {
byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, zcu, target);
continue;
},
.@"packed" => {},
}
} else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
switch (field_loaded_union.getLayout(ip)) {
.auto, .@"extern" => {
byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, zcu, target);
continue;
},
.@"packed" => {},
}
}
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
for (result[@intCast(byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
result_class.* = result_class.combineSystemV(field_class);
byte_offset += field_ty.abiSize(zcu);
}
const final_byte_offset = starting_byte_offset + loaded_struct.size(ip).*;
std.debug.assert(final_byte_offset == std.mem.alignForward(
u64,
byte_offset,
loaded_struct.flagsPtr(ip).alignment.toByteUnits().?,
));
return final_byte_offset;
}
fn classifySystemVUnion(
result: *[8]Class,
starting_byte_offset: u64,
loaded_union: InternPool.LoadedUnionType,
zcu: *Zcu,
target: std.Target,
) u64 {
const ip = &zcu.intern_pool;
for (0..loaded_union.field_types.len) |field_index| {
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
switch (field_loaded_struct.layout) {
.auto, .@"extern" => {
_ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, zcu, target);
continue;
},
.@"packed" => {},
}
} else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
switch (field_loaded_union.getLayout(ip)) {
.auto, .@"extern" => {
_ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, zcu, target);
continue;
},
.@"packed" => {},
}
}
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
result_class.* = result_class.combineSystemV(field_class);
}
return starting_byte_offset + loaded_union.size(ip).*;
}
pub const SysV = struct {
/// Note that .rsp and .rbp also belong to this set, however, we never expect to use them
/// for anything else but stack offset tracking therefore we exclude them from this set.
pub const callee_preserved_regs = [_]Register{ .rbx, .r12, .r13, .r14, .r15 };
/// These registers need to be preserved (saved on the stack) and restored by the caller before
/// the caller relinquishes control to a subroutine via call instruction (or similar).
/// In other words, these registers are free to use by the callee.
pub const caller_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .rsi, .rdi, .r8, .r9, .r10, .r11 } ++ x87_regs ++ sse_avx_regs;
pub const c_abi_int_param_regs = [_]Register{ .rdi, .rsi, .rdx, .rcx, .r8, .r9 };
pub const c_abi_sse_param_regs = sse_avx_regs[0..8].*;
pub const c_abi_int_return_regs = [_]Register{ .rax, .rdx };
pub const c_abi_sse_return_regs = sse_avx_regs[0..2].*;
};
pub const Win64 = struct {
/// Note that .rsp and .rbp also belong to this set, however, we never expect to use them
/// for anything else but stack offset tracking therefore we exclude them from this set.
pub const callee_preserved_regs = [_]Register{ .rbx, .rsi, .rdi, .r12, .r13, .r14, .r15 };
/// These registers need to be preserved (saved on the stack) and restored by the caller before
/// the caller relinquishes control to a subroutine via call instruction (or similar).
/// In other words, these registers are free to use by the callee.
pub const caller_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .r8, .r9, .r10, .r11 } ++ x87_regs ++ sse_avx_regs;
pub const c_abi_int_param_regs = [_]Register{ .rcx, .rdx, .r8, .r9 };
pub const c_abi_sse_param_regs = sse_avx_regs[0..4].*;
pub const c_abi_int_return_regs = [_]Register{.rax};
pub const c_abi_sse_return_regs = sse_avx_regs[0..1].*;
};
pub fn resolveCallingConvention(
cc: std.builtin.CallingConvention,
target: std.Target,
) std.builtin.CallingConvention {
return switch (cc) {
.Unspecified, .C => switch (target.os.tag) {
else => .SysV,
.windows => .Win64,
},
else => cc,
};
}
pub fn getCalleePreservedRegs(cc: std.builtin.CallingConvention) []const Register {
return switch (cc) {
.SysV => &SysV.callee_preserved_regs,
.Win64 => &Win64.callee_preserved_regs,
else => unreachable,
};
}
pub fn getCallerPreservedRegs(cc: std.builtin.CallingConvention) []const Register {
return switch (cc) {
.SysV => &SysV.caller_preserved_regs,
.Win64 => &Win64.caller_preserved_regs,
else => unreachable,
};
}
pub fn getCAbiIntParamRegs(cc: std.builtin.CallingConvention) []const Register {
return switch (cc) {
.SysV => &SysV.c_abi_int_param_regs,
.Win64 => &Win64.c_abi_int_param_regs,
else => unreachable,
};
}
pub fn getCAbiSseParamRegs(cc: std.builtin.CallingConvention) []const Register {
return switch (cc) {
.SysV => &SysV.c_abi_sse_param_regs,
.Win64 => &Win64.c_abi_sse_param_regs,
else => unreachable,
};
}
pub fn getCAbiIntReturnRegs(cc: std.builtin.CallingConvention) []const Register {
return switch (cc) {
.SysV => &SysV.c_abi_int_return_regs,
.Win64 => &Win64.c_abi_int_return_regs,
else => unreachable,
};
}
pub fn getCAbiSseReturnRegs(cc: std.builtin.CallingConvention) []const Register {
return switch (cc) {
.SysV => &SysV.c_abi_sse_return_regs,
.Win64 => &Win64.c_abi_sse_return_regs,
else => unreachable,
};
}
const gp_regs = [_]Register{
.rax, .rcx, .rdx, .rbx, .rsi, .rdi, .r8, .r9, .r10, .r11, .r12, .r13, .r14, .r15,
};
const x87_regs = [_]Register{
.st0, .st1, .st2, .st3, .st4, .st5, .st6, .st7,
};
const sse_avx_regs = [_]Register{
.ymm0, .ymm1, .ymm2, .ymm3, .ymm4, .ymm5, .ymm6, .ymm7,
.ymm8, .ymm9, .ymm10, .ymm11, .ymm12, .ymm13, .ymm14, .ymm15,
};
const allocatable_regs = gp_regs ++ x87_regs[0 .. x87_regs.len - 1] ++ sse_avx_regs;
pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, allocatable_regs);
// Register classes
const RegisterBitSet = RegisterManager.RegisterBitSet;
pub const RegisterClass = struct {
pub const gp: RegisterBitSet = blk: {
var set = RegisterBitSet.initEmpty();
for (allocatable_regs, 0..) |reg, index| if (reg.class() == .general_purpose) set.set(index);
break :blk set;
};
pub const x87: RegisterBitSet = blk: {
var set = RegisterBitSet.initEmpty();
for (allocatable_regs, 0..) |reg, index| if (reg.class() == .x87) set.set(index);
break :blk set;
};
pub const sse: RegisterBitSet = blk: {
var set = RegisterBitSet.initEmpty();
for (allocatable_regs, 0..) |reg, index| if (reg.class() == .sse) set.set(index);
break :blk set;
};
};
const builtin = @import("builtin");
const std = @import("std");
const assert = std.debug.assert;
const testing = std.testing;
const InternPool = @import("../../InternPool.zig");
const Register = @import("bits.zig").Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../type.zig").Type;
const Value = @import("../../Value.zig");
const Zcu = @import("../../Module.zig");
| https://raw.githubusercontent.com/kassane/zig-mos-bootstrap/19aac4779b9e93b0e833402c26c93cfc13bb94e2/zig/src/arch/x86_64/abi.zig |
// This file is part of river, a dynamic tiling wayland compositor.
//
// Copyright 2020 The River Developers
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, version 3.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
const std = @import("std");
const posix = std.posix;
const c = @import("../c.zig");
const util = @import("../util.zig");
const process = @import("../process.zig");
const Error = @import("../command.zig").Error;
const Seat = @import("../Seat.zig");
/// Spawn a program.
pub fn spawn(
_: *Seat,
args: []const [:0]const u8,
out: *?[]const u8,
) Error!void {
if (args.len < 2) return Error.NotEnoughArguments;
if (args.len > 2) return Error.TooManyArguments;
const child_args = [_:null]?[*:0]const u8{ "/bin/sh", "-c", args[1], null };
const pid = posix.fork() catch {
out.* = try std.fmt.allocPrint(util.gpa, "fork/execve failed", .{});
return Error.Other;
};
if (pid == 0) {
process.cleanupChild();
const pid2 = posix.fork() catch c._exit(1);
if (pid2 == 0) {
posix.execveZ("/bin/sh", &child_args, std.c.environ) catch c._exit(1);
}
c._exit(0);
}
// Wait the intermediate child.
const ret = posix.waitpid(pid, 0);
if (!posix.W.IFEXITED(ret.status) or
(posix.W.IFEXITED(ret.status) and posix.W.EXITSTATUS(ret.status) != 0))
{
out.* = try std.fmt.allocPrint(util.gpa, "fork/execve failed", .{});
return Error.Other;
}
}
| https://raw.githubusercontent.com/koonix/river/4a96c2d18dc5e33917f134bc632b74da149e4812/river/command/spawn.zig |
const Vec4 = @import("math3d.zig").Vec4;
pub const Piece = struct {
name: u8,
color: Vec4,
layout: [4][4][4]bool,
};
const F = false;
const T = true;
pub const pieces = [_]Piece{
Piece{
.name = 'I',
.color = Vec4{
.data = [_]f32{ 0.0 / 255.0, 255.0 / 255.0, 255.0 / 255.0, 1.0 },
},
.layout = [_][4][4]bool{
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, T, T },
[_]bool{ F, F, F, F },
},
[_][4]bool{
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, T, T },
[_]bool{ F, F, F, F },
},
[_][4]bool{
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
},
},
},
Piece{
.name = 'O',
.color = Vec4{
.data = [_]f32{ 255.0 / 255.0, 255.0 / 255.0, 0.0 / 255.0, 1.0 },
},
.layout = [_][4][4]bool{
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ T, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ T, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ T, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ T, T, F, F },
},
},
},
Piece{
.name = 'T',
.color = Vec4{
.data = [_]f32{ 255.0 / 255.0, 0.0 / 255.0, 255.0 / 255.0, 1.0 },
},
.layout = [_][4][4]bool{
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, T, F },
[_]bool{ F, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, T, F, F },
[_]bool{ T, T, F, F },
[_]bool{ F, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, T, F, F },
[_]bool{ T, T, T, F },
[_]bool{ F, F, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, T, F },
[_]bool{ F, T, F, F },
},
},
},
Piece{
.name = 'J',
.color = Vec4{
.data = [_]f32{ 0.0 / 255.0, 0.0 / 255.0, 255.0 / 255.0, 1.0 },
},
.layout = [_][4][4]bool{
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
[_]bool{ T, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ T, F, F, F },
[_]bool{ T, T, T, F },
[_]bool{ F, F, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, T, T, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, T, F },
[_]bool{ F, F, T, F },
},
},
},
Piece{
.name = 'L',
.color = Vec4{
.data = [_]f32{ 255.0 / 255.0, 128.0 / 255.0, 0.0 / 255.0, 1.0 },
},
.layout = [_][4][4]bool{
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, T, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, T, F },
[_]bool{ T, F, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, T, F },
[_]bool{ T, T, T, F },
[_]bool{ F, F, F, F },
},
},
},
Piece{
.name = 'S',
.color = Vec4{
.data = [_]f32{ 0.0 / 255.0, 255.0 / 255.0, 0.0 / 255.0, 1.0 },
},
.layout = [_][4][4]bool{
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ F, T, T, F },
[_]bool{ T, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ T, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ F, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ F, T, T, F },
[_]bool{ T, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ T, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ F, T, F, F },
},
},
},
Piece{
.name = 'Z',
.color = Vec4{
.data = [_]f32{ 255.0 / 255.0, 0.0 / 255.0, 0.0 / 255.0, 1.0 },
},
.layout = [_][4][4]bool{
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ F, T, T, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, T, F },
[_]bool{ F, T, T, F },
[_]bool{ F, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ F, T, T, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, T, F },
[_]bool{ F, T, T, F },
[_]bool{ F, T, F, F },
},
},
},
};
| https://raw.githubusercontent.com/ziglang/gotta-go-fast/c915c45c5afed9a2e2de4f4484acba2df5090c3a/src/tetris/tetris/src/pieces.zig |
const Vec4 = @import("math3d.zig").Vec4;
pub const Piece = struct {
name: u8,
color: Vec4,
layout: [4][4][4]bool,
};
const F = false;
const T = true;
pub const pieces = [_]Piece{
Piece{
.name = 'I',
.color = Vec4{
.data = [_]f32{ 0.0 / 255.0, 255.0 / 255.0, 255.0 / 255.0, 1.0 },
},
.layout = [_][4][4]bool{
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, T, T },
[_]bool{ F, F, F, F },
},
[_][4]bool{
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, T, T },
[_]bool{ F, F, F, F },
},
[_][4]bool{
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
},
},
},
Piece{
.name = 'O',
.color = Vec4{
.data = [_]f32{ 255.0 / 255.0, 255.0 / 255.0, 0.0 / 255.0, 1.0 },
},
.layout = [_][4][4]bool{
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ T, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ T, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ T, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ T, T, F, F },
},
},
},
Piece{
.name = 'T',
.color = Vec4{
.data = [_]f32{ 255.0 / 255.0, 0.0 / 255.0, 255.0 / 255.0, 1.0 },
},
.layout = [_][4][4]bool{
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, T, F },
[_]bool{ F, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, T, F, F },
[_]bool{ T, T, F, F },
[_]bool{ F, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, T, F, F },
[_]bool{ T, T, T, F },
[_]bool{ F, F, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, T, F },
[_]bool{ F, T, F, F },
},
},
},
Piece{
.name = 'J',
.color = Vec4{
.data = [_]f32{ 0.0 / 255.0, 0.0 / 255.0, 255.0 / 255.0, 1.0 },
},
.layout = [_][4][4]bool{
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
[_]bool{ T, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ T, F, F, F },
[_]bool{ T, T, T, F },
[_]bool{ F, F, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, T, T, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, T, F },
[_]bool{ F, F, T, F },
},
},
},
Piece{
.name = 'L',
.color = Vec4{
.data = [_]f32{ 255.0 / 255.0, 128.0 / 255.0, 0.0 / 255.0, 1.0 },
},
.layout = [_][4][4]bool{
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, T, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, T, F },
[_]bool{ T, F, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ F, T, F, F },
[_]bool{ F, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, T, F },
[_]bool{ T, T, T, F },
[_]bool{ F, F, F, F },
},
},
},
Piece{
.name = 'S',
.color = Vec4{
.data = [_]f32{ 0.0 / 255.0, 255.0 / 255.0, 0.0 / 255.0, 1.0 },
},
.layout = [_][4][4]bool{
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ F, T, T, F },
[_]bool{ T, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ T, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ F, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ F, T, T, F },
[_]bool{ T, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ T, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ F, T, F, F },
},
},
},
Piece{
.name = 'Z',
.color = Vec4{
.data = [_]f32{ 255.0 / 255.0, 0.0 / 255.0, 0.0 / 255.0, 1.0 },
},
.layout = [_][4][4]bool{
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ F, T, T, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, T, F },
[_]bool{ F, T, T, F },
[_]bool{ F, T, F, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, F, F },
[_]bool{ T, T, F, F },
[_]bool{ F, T, T, F },
},
[_][4]bool{
[_]bool{ F, F, F, F },
[_]bool{ F, F, T, F },
[_]bool{ F, T, T, F },
[_]bool{ F, T, F, F },
},
},
},
};
| https://raw.githubusercontent.com/Rafaelmdcarneiro/tetris-zig/078142b33feb7363bebbdc4666718635c4c6969b/src/pieces.zig |
const std = @import("std");
const print = @import("std").debug.print;
const r2pipe = @import("src/r2pipe.zig");
pub fn main() !void {
try inr2();
// try inspawn();
}
fn inr2() !void {
const r2 = try r2pipe.open("");
const res = try r2.cmd("?E Hello World");
print("Hello, {s}\n{s}\n", .{ "world", res });
}
fn inspawn() !void {
const r2 = try r2pipe.open("/bin/ls");
const res = try r2.cmd("?E Hello World");
print("Hello, {s}\n{s}\n", .{ "world", res });
r2.quit();
}
| https://raw.githubusercontent.com/2lambda123/radare2-r2pipe/bab6a251f07adcb90ffec715f8f4aeaf9addbf3f/zig/main.zig |
//
// One of the more common uses of 'comptime' function parameters is
// passing a type to a function:
//
// fn foo(comptime MyType: type) void { ... }
//
// In fact, types are ONLY available at compile time, so the
// 'comptime' keyword is required here.
//
// Please take a moment to put on the wizard hat which has been
// provided for you. We're about to use this ability to implement
// a generic function.
//
const print = @import("std").debug.print;
pub fn main() void {
// Here we declare arrays of three different types and sizes
// at compile time from a function call. Neat!
const s1 = makeSequence(u8, 3); // creates a [3]u8
const s2 = makeSequence(u32, 5); // creates a [5]u32
const s3 = makeSequence(i64, 7); // creates a [7]i64
print("s1={any}, s2={any}, s3={any}\n", .{ s1, s2, s3 });
}
// This function is pretty wild because it executes at runtime
// and is part of the final compiled program. The function is
// compiled with unchanging data sizes and types.
//
// And yet it ALSO allows for different sizes and types. This
// seems paradoxical. How could both things be true?
//
// To accomplish this, the Zig compiler actually generates a
// separate copy of the function for every size/type combination!
// So in this case, three different functions will be generated
// for you, each with machine code that handles that specific
// data size and type.
//
// Please fix this function so that the 'size' parameter:
//
// 1) Is guaranteed to be known at compile time.
// 2) Sets the size of the array of type T (which is the
// sequence we're creating and returning).
//
fn makeSequence(comptime T: type, comptime size: usize) [size]T {
var sequence: [size]T = undefined;
var i: usize = 0;
while (i < size) : (i += 1) {
sequence[i] = @as(T, @intCast(i)) + 1;
}
return sequence;
}
| https://raw.githubusercontent.com/scbizu/zerglings/b8f519f0c52cc9b44e262ae242bb923467146d49/exercises/069_comptime4.zig |
//
// One of the more common uses of 'comptime' function parameters is
// passing a type to a function:
//
// fn foo(comptime MyType: type) void { ... }
//
// In fact, types are ONLY available at compile time, so the
// 'comptime' keyword is required here.
//
// Please take a moment to put on the wizard hat which has been
// provided for you. We're about to use this ability to implement
// a generic function.
//
const print = @import("std").debug.print;
pub fn main() void {
// Here we declare arrays of three different types and sizes
// at compile time from a function call. Neat!
const s1 = makeSequence(u8, 3); // creates a [3]u8
const s2 = makeSequence(u32, 5); // creates a [5]u32
const s3 = makeSequence(i64, 7); // creates a [7]i64
print("s1={any}, s2={any}, s3={any}\n", .{ s1, s2, s3 });
}
// This function is pretty wild because it executes at runtime
// and is part of the final compiled program. The function is
// compiled with unchanging data sizes and types.
//
// And yet it ALSO allows for different sizes and types. This
// seems paradoxical. How could both things be true?
//
// To accomplish this, the Zig compiler actually generates a
// separate copy of the function for every size/type combination!
// So in this case, three different functions will be generated
// for you, each with machine code that handles that specific
// data size and type.
//
// Please fix this function so that the 'size' parameter:
//
// 1) Is guaranteed to be known at compile time.
// 2) Sets the size of the array of type T (which is the
// sequence we're creating and returning).
//
fn makeSequence(comptime T: type, comptime size: usize) [size]T {
var sequence: [size]T = undefined;
var i: usize = 0;
while (i < size) : (i += 1) {
sequence[i] = @as(T, @intCast(i)) + 1;
}
return sequence;
}
| https://raw.githubusercontent.com/ErickDevv/ziglings/fd5adcf1282f056ef916321e63c3d1efd60c1d0d/exercises/069_comptime4.zig |
//
// One of the more common uses of 'comptime' function parameters is
// passing a type to a function:
//
// fn foo(comptime MyType: type) void { ... }
//
// In fact, types are ONLY available at compile time, so the
// 'comptime' keyword is required here.
//
// Please take a moment to put on the wizard hat which has been
// provided for you. We're about to use this ability to implement
// a generic function.
//
const print = @import("std").debug.print;
pub fn main() void {
// Here we declare arrays of three different types and sizes
// at compile time from a function call. Neat!
const s1 = makeSequence(u8, 3); // creates a [3]u8
const s2 = makeSequence(u32, 5); // creates a [5]u32
const s3 = makeSequence(i64, 7); // creates a [7]i64
print("s1={any}, s2={any}, s3={any}\n", .{ s1, s2, s3 });
}
// This function is pretty wild because it executes at runtime
// and is part of the final compiled program. The function is
// compiled with unchanging data sizes and types.
//
// And yet it ALSO allows for different sizes and types. This
// seems paradoxical. How could both things be true?
//
// To accomplish this, the Zig compiler actually generates a
// separate copy of the function for every size/type combination!
// So in this case, three different functions will be generated
// for you, each with machine code that handles that specific
// data size and type.
//
// Please fix this function so that the 'size' parameter:
//
// 1) Is guaranteed to be known at compile time.
// 2) Sets the size of the array of type T (which is the
// sequence we're creating and returning).
//
fn makeSequence(comptime T: type, comptime size: usize) [size]T {
var sequence: [size]T = undefined;
var i: usize = 0;
while (i < size) : (i += 1) {
sequence[i] = @as(T, @intCast(i)) + 1;
}
return sequence;
}
| https://raw.githubusercontent.com/AndrewJarrett/ziglings/ccbde8411969bd22059fa63d0371bc34b26343a1/exercises/069_comptime4.zig |
//
// One of the more common uses of 'comptime' function parameters is
// passing a type to a function:
//
// fn foo(comptime MyType: type) void { ... }
//
// In fact, types are ONLY available at compile time, so the
// 'comptime' keyword is required here.
//
// Please take a moment to put on the wizard hat which has been
// provided for you. We're about to use this ability to implement
// a generic function.
//
const print = @import("std").debug.print;
pub fn main() void {
// Here we declare arrays of three different types and sizes
// at compile time from a function call. Neat!
const s1 = makeSequence(u8, 3); // creates a [3]u8
const s2 = makeSequence(u32, 5); // creates a [5]u32
const s3 = makeSequence(i64, 7); // creates a [7]i64
print("s1={any}, s2={any}, s3={any}\n", .{ s1, s2, s3 });
}
// This function is pretty wild because it executes at runtime
// and is part of the final compiled program. The function is
// compiled with unchanging data sizes and types.
//
// And yet it ALSO allows for different sizes and types. This
// seems paradoxical. How could both things be true?
//
// To accomplish this, the Zig compiler actually generates a
// separate copy of the function for every size/type combination!
// So in this case, three different functions will be generated
// for you, each with machine code that handles that specific
// data size and type.
//
// Please fix this function so that the 'size' parameter:
//
// 1) Is guaranteed to be known at compile time.
// 2) Sets the size of the array of type T (which is the
// sequence we're creating and returning).
//
fn makeSequence(comptime T: type, comptime size: usize) [size]T {
var sequence: [size]T = undefined;
var i: usize = 0;
while (i < size) : (i += 1) {
sequence[i] = @as(T, @intCast(i)) + 1;
}
return sequence;
}
| https://raw.githubusercontent.com/thekorn/ziglings/11530b95d49af1a7adee0235dfe9b8efc4f009bc/exercises/069_comptime4.zig |
// Ported from musl, which is licensed under the MIT license:
// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
//
// https://git.musl-libc.org/cgit/musl/tree/src/math/atan2f.c
// https://git.musl-libc.org/cgit/musl/tree/src/math/atan2.c
const std = @import("../std.zig");
const math = std.math;
const expect = std.testing.expect;
/// Returns the arc-tangent of y/x.
///
/// Special Cases:
/// - atan2(y, nan) = nan
/// - atan2(nan, x) = nan
/// - atan2(+0, x>=0) = +0
/// - atan2(-0, x>=0) = -0
/// - atan2(+0, x<=-0) = +pi
/// - atan2(-0, x<=-0) = -pi
/// - atan2(y>0, 0) = +pi/2
/// - atan2(y<0, 0) = -pi/2
/// - atan2(+inf, +inf) = +pi/4
/// - atan2(-inf, +inf) = -pi/4
/// - atan2(+inf, -inf) = 3pi/4
/// - atan2(-inf, -inf) = -3pi/4
/// - atan2(y, +inf) = 0
/// - atan2(y>0, -inf) = +pi
/// - atan2(y<0, -inf) = -pi
/// - atan2(+inf, x) = +pi/2
/// - atan2(-inf, x) = -pi/2
pub fn atan2(comptime T: type, y: T, x: T) T {
return switch (T) {
f32 => atan2_32(y, x),
f64 => atan2_64(y, x),
else => @compileError("atan2 not implemented for " ++ @typeName(T)),
};
}
fn atan2_32(y: f32, x: f32) f32 {
const pi: f32 = 3.1415927410e+00;
const pi_lo: f32 = -8.7422776573e-08;
if (math.isNan(x) or math.isNan(y)) {
return x + y;
}
var ix = @as(u32, @bitCast(x));
var iy = @as(u32, @bitCast(y));
// x = 1.0
if (ix == 0x3F800000) {
return math.atan(y);
}
// 2 * sign(x) + sign(y)
const m = ((iy >> 31) & 1) | ((ix >> 30) & 2);
ix &= 0x7FFFFFFF;
iy &= 0x7FFFFFFF;
if (iy == 0) {
switch (m) {
0, 1 => return y, // atan(+-0, +...)
2 => return pi, // atan(+0, -...)
3 => return -pi, // atan(-0, -...)
else => unreachable,
}
}
if (ix == 0) {
if (m & 1 != 0) {
return -pi / 2;
} else {
return pi / 2;
}
}
if (ix == 0x7F800000) {
if (iy == 0x7F800000) {
switch (m) {
0 => return pi / 4, // atan(+inf, +inf)
1 => return -pi / 4, // atan(-inf, +inf)
2 => return 3 * pi / 4, // atan(+inf, -inf)
3 => return -3 * pi / 4, // atan(-inf, -inf)
else => unreachable,
}
} else {
switch (m) {
0 => return 0.0, // atan(+..., +inf)
1 => return -0.0, // atan(-..., +inf)
2 => return pi, // atan(+..., -inf)
3 => return -pi, // atan(-...f, -inf)
else => unreachable,
}
}
}
// |y / x| > 0x1p26
if (ix + (26 << 23) < iy or iy == 0x7F800000) {
if (m & 1 != 0) {
return -pi / 2;
} else {
return pi / 2;
}
}
// z = atan(|y / x|) with correct underflow
var z = z: {
if ((m & 2) != 0 and iy + (26 << 23) < ix) {
break :z 0.0;
} else {
break :z math.atan(@fabs(y / x));
}
};
switch (m) {
0 => return z, // atan(+, +)
1 => return -z, // atan(-, +)
2 => return pi - (z - pi_lo), // atan(+, -)
3 => return (z - pi_lo) - pi, // atan(-, -)
else => unreachable,
}
}
fn atan2_64(y: f64, x: f64) f64 {
const pi: f64 = 3.1415926535897931160E+00;
const pi_lo: f64 = 1.2246467991473531772E-16;
if (math.isNan(x) or math.isNan(y)) {
return x + y;
}
var ux = @as(u64, @bitCast(x));
var ix = @as(u32, @intCast(ux >> 32));
var lx = @as(u32, @intCast(ux & 0xFFFFFFFF));
var uy = @as(u64, @bitCast(y));
var iy = @as(u32, @intCast(uy >> 32));
var ly = @as(u32, @intCast(uy & 0xFFFFFFFF));
// x = 1.0
if ((ix -% 0x3FF00000) | lx == 0) {
return math.atan(y);
}
// 2 * sign(x) + sign(y)
const m = ((iy >> 31) & 1) | ((ix >> 30) & 2);
ix &= 0x7FFFFFFF;
iy &= 0x7FFFFFFF;
if (iy | ly == 0) {
switch (m) {
0, 1 => return y, // atan(+-0, +...)
2 => return pi, // atan(+0, -...)
3 => return -pi, // atan(-0, -...)
else => unreachable,
}
}
if (ix | lx == 0) {
if (m & 1 != 0) {
return -pi / 2;
} else {
return pi / 2;
}
}
if (ix == 0x7FF00000) {
if (iy == 0x7FF00000) {
switch (m) {
0 => return pi / 4, // atan(+inf, +inf)
1 => return -pi / 4, // atan(-inf, +inf)
2 => return 3 * pi / 4, // atan(+inf, -inf)
3 => return -3 * pi / 4, // atan(-inf, -inf)
else => unreachable,
}
} else {
switch (m) {
0 => return 0.0, // atan(+..., +inf)
1 => return -0.0, // atan(-..., +inf)
2 => return pi, // atan(+..., -inf)
3 => return -pi, // atan(-...f, -inf)
else => unreachable,
}
}
}
// |y / x| > 0x1p64
if (ix +% (64 << 20) < iy or iy == 0x7FF00000) {
if (m & 1 != 0) {
return -pi / 2;
} else {
return pi / 2;
}
}
// z = atan(|y / x|) with correct underflow
var z = z: {
if ((m & 2) != 0 and iy +% (64 << 20) < ix) {
break :z 0.0;
} else {
break :z math.atan(@fabs(y / x));
}
};
switch (m) {
0 => return z, // atan(+, +)
1 => return -z, // atan(-, +)
2 => return pi - (z - pi_lo), // atan(+, -)
3 => return (z - pi_lo) - pi, // atan(-, -)
else => unreachable,
}
}
test "math.atan2" {
try expect(atan2(f32, 0.2, 0.21) == atan2_32(0.2, 0.21));
try expect(atan2(f64, 0.2, 0.21) == atan2_64(0.2, 0.21));
}
test "math.atan2_32" {
const epsilon = 0.000001;
try expect(math.approxEqAbs(f32, atan2_32(0.0, 0.0), 0.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(0.2, 0.2), 0.785398, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(-0.2, 0.2), -0.785398, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(0.2, -0.2), 2.356194, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(-0.2, -0.2), -2.356194, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(0.34, -0.4), 2.437099, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(0.34, 1.243), 0.267001, epsilon));
}
test "math.atan2_64" {
const epsilon = 0.000001;
try expect(math.approxEqAbs(f64, atan2_64(0.0, 0.0), 0.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(0.2, 0.2), 0.785398, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(-0.2, 0.2), -0.785398, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(0.2, -0.2), 2.356194, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(-0.2, -0.2), -2.356194, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(0.34, -0.4), 2.437099, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(0.34, 1.243), 0.267001, epsilon));
}
test "math.atan2_32.special" {
const epsilon = 0.000001;
try expect(math.isNan(atan2_32(1.0, math.nan(f32))));
try expect(math.isNan(atan2_32(math.nan(f32), 1.0)));
try expect(atan2_32(0.0, 5.0) == 0.0);
try expect(atan2_32(-0.0, 5.0) == -0.0);
try expect(math.approxEqAbs(f32, atan2_32(0.0, -5.0), math.pi, epsilon));
//expect(math.approxEqAbs(f32, atan2_32(-0.0, -5.0), -math.pi, .{.rel=0,.abs=epsilon})); TODO support negative zero?
try expect(math.approxEqAbs(f32, atan2_32(1.0, 0.0), math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(1.0, -0.0), math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(-1.0, 0.0), -math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(-1.0, -0.0), -math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(math.inf(f32), math.inf(f32)), math.pi / 4.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(-math.inf(f32), math.inf(f32)), -math.pi / 4.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(math.inf(f32), -math.inf(f32)), 3.0 * math.pi / 4.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(-math.inf(f32), -math.inf(f32)), -3.0 * math.pi / 4.0, epsilon));
try expect(atan2_32(1.0, math.inf(f32)) == 0.0);
try expect(math.approxEqAbs(f32, atan2_32(1.0, -math.inf(f32)), math.pi, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(-1.0, -math.inf(f32)), -math.pi, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(math.inf(f32), 1.0), math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(-math.inf(f32), 1.0), -math.pi / 2.0, epsilon));
}
test "math.atan2_64.special" {
const epsilon = 0.000001;
try expect(math.isNan(atan2_64(1.0, math.nan(f64))));
try expect(math.isNan(atan2_64(math.nan(f64), 1.0)));
try expect(atan2_64(0.0, 5.0) == 0.0);
try expect(atan2_64(-0.0, 5.0) == -0.0);
try expect(math.approxEqAbs(f64, atan2_64(0.0, -5.0), math.pi, epsilon));
//expect(math.approxEqAbs(f64, atan2_64(-0.0, -5.0), -math.pi, .{.rel=0,.abs=epsilon})); TODO support negative zero?
try expect(math.approxEqAbs(f64, atan2_64(1.0, 0.0), math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(1.0, -0.0), math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(-1.0, 0.0), -math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(-1.0, -0.0), -math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(math.inf(f64), math.inf(f64)), math.pi / 4.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(-math.inf(f64), math.inf(f64)), -math.pi / 4.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(math.inf(f64), -math.inf(f64)), 3.0 * math.pi / 4.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(-math.inf(f64), -math.inf(f64)), -3.0 * math.pi / 4.0, epsilon));
try expect(atan2_64(1.0, math.inf(f64)) == 0.0);
try expect(math.approxEqAbs(f64, atan2_64(1.0, -math.inf(f64)), math.pi, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(-1.0, -math.inf(f64)), -math.pi, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(math.inf(f64), 1.0), math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(-math.inf(f64), 1.0), -math.pi / 2.0, epsilon));
}
| https://raw.githubusercontent.com/mundusnine/FoundryTools_linux_x64/98e738bf92a416b255c9d11b78e8033071b52672/lib/std/math/atan2.zig |
// Ported from musl, which is licensed under the MIT license:
// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
//
// https://git.musl-libc.org/cgit/musl/tree/src/math/atan2f.c
// https://git.musl-libc.org/cgit/musl/tree/src/math/atan2.c
const std = @import("../std.zig");
const math = std.math;
const expect = std.testing.expect;
/// Returns the arc-tangent of y/x.
///
/// Special Cases:
/// - atan2(y, nan) = nan
/// - atan2(nan, x) = nan
/// - atan2(+0, x>=0) = +0
/// - atan2(-0, x>=0) = -0
/// - atan2(+0, x<=-0) = +pi
/// - atan2(-0, x<=-0) = -pi
/// - atan2(y>0, 0) = +pi/2
/// - atan2(y<0, 0) = -pi/2
/// - atan2(+inf, +inf) = +pi/4
/// - atan2(-inf, +inf) = -pi/4
/// - atan2(+inf, -inf) = 3pi/4
/// - atan2(-inf, -inf) = -3pi/4
/// - atan2(y, +inf) = 0
/// - atan2(y>0, -inf) = +pi
/// - atan2(y<0, -inf) = -pi
/// - atan2(+inf, x) = +pi/2
/// - atan2(-inf, x) = -pi/2
pub fn atan2(comptime T: type, y: T, x: T) T {
return switch (T) {
f32 => atan2_32(y, x),
f64 => atan2_64(y, x),
else => @compileError("atan2 not implemented for " ++ @typeName(T)),
};
}
fn atan2_32(y: f32, x: f32) f32 {
const pi: f32 = 3.1415927410e+00;
const pi_lo: f32 = -8.7422776573e-08;
if (math.isNan(x) or math.isNan(y)) {
return x + y;
}
var ix = @as(u32, @bitCast(x));
var iy = @as(u32, @bitCast(y));
// x = 1.0
if (ix == 0x3F800000) {
return math.atan(y);
}
// 2 * sign(x) + sign(y)
const m = ((iy >> 31) & 1) | ((ix >> 30) & 2);
ix &= 0x7FFFFFFF;
iy &= 0x7FFFFFFF;
if (iy == 0) {
switch (m) {
0, 1 => return y, // atan(+-0, +...)
2 => return pi, // atan(+0, -...)
3 => return -pi, // atan(-0, -...)
else => unreachable,
}
}
if (ix == 0) {
if (m & 1 != 0) {
return -pi / 2;
} else {
return pi / 2;
}
}
if (ix == 0x7F800000) {
if (iy == 0x7F800000) {
switch (m) {
0 => return pi / 4, // atan(+inf, +inf)
1 => return -pi / 4, // atan(-inf, +inf)
2 => return 3 * pi / 4, // atan(+inf, -inf)
3 => return -3 * pi / 4, // atan(-inf, -inf)
else => unreachable,
}
} else {
switch (m) {
0 => return 0.0, // atan(+..., +inf)
1 => return -0.0, // atan(-..., +inf)
2 => return pi, // atan(+..., -inf)
3 => return -pi, // atan(-...f, -inf)
else => unreachable,
}
}
}
// |y / x| > 0x1p26
if (ix + (26 << 23) < iy or iy == 0x7F800000) {
if (m & 1 != 0) {
return -pi / 2;
} else {
return pi / 2;
}
}
// z = atan(|y / x|) with correct underflow
var z = z: {
if ((m & 2) != 0 and iy + (26 << 23) < ix) {
break :z 0.0;
} else {
break :z math.atan(@fabs(y / x));
}
};
switch (m) {
0 => return z, // atan(+, +)
1 => return -z, // atan(-, +)
2 => return pi - (z - pi_lo), // atan(+, -)
3 => return (z - pi_lo) - pi, // atan(-, -)
else => unreachable,
}
}
fn atan2_64(y: f64, x: f64) f64 {
const pi: f64 = 3.1415926535897931160E+00;
const pi_lo: f64 = 1.2246467991473531772E-16;
if (math.isNan(x) or math.isNan(y)) {
return x + y;
}
var ux = @as(u64, @bitCast(x));
var ix = @as(u32, @intCast(ux >> 32));
var lx = @as(u32, @intCast(ux & 0xFFFFFFFF));
var uy = @as(u64, @bitCast(y));
var iy = @as(u32, @intCast(uy >> 32));
var ly = @as(u32, @intCast(uy & 0xFFFFFFFF));
// x = 1.0
if ((ix -% 0x3FF00000) | lx == 0) {
return math.atan(y);
}
// 2 * sign(x) + sign(y)
const m = ((iy >> 31) & 1) | ((ix >> 30) & 2);
ix &= 0x7FFFFFFF;
iy &= 0x7FFFFFFF;
if (iy | ly == 0) {
switch (m) {
0, 1 => return y, // atan(+-0, +...)
2 => return pi, // atan(+0, -...)
3 => return -pi, // atan(-0, -...)
else => unreachable,
}
}
if (ix | lx == 0) {
if (m & 1 != 0) {
return -pi / 2;
} else {
return pi / 2;
}
}
if (ix == 0x7FF00000) {
if (iy == 0x7FF00000) {
switch (m) {
0 => return pi / 4, // atan(+inf, +inf)
1 => return -pi / 4, // atan(-inf, +inf)
2 => return 3 * pi / 4, // atan(+inf, -inf)
3 => return -3 * pi / 4, // atan(-inf, -inf)
else => unreachable,
}
} else {
switch (m) {
0 => return 0.0, // atan(+..., +inf)
1 => return -0.0, // atan(-..., +inf)
2 => return pi, // atan(+..., -inf)
3 => return -pi, // atan(-...f, -inf)
else => unreachable,
}
}
}
// |y / x| > 0x1p64
if (ix +% (64 << 20) < iy or iy == 0x7FF00000) {
if (m & 1 != 0) {
return -pi / 2;
} else {
return pi / 2;
}
}
// z = atan(|y / x|) with correct underflow
var z = z: {
if ((m & 2) != 0 and iy +% (64 << 20) < ix) {
break :z 0.0;
} else {
break :z math.atan(@fabs(y / x));
}
};
switch (m) {
0 => return z, // atan(+, +)
1 => return -z, // atan(-, +)
2 => return pi - (z - pi_lo), // atan(+, -)
3 => return (z - pi_lo) - pi, // atan(-, -)
else => unreachable,
}
}
test "math.atan2" {
try expect(atan2(f32, 0.2, 0.21) == atan2_32(0.2, 0.21));
try expect(atan2(f64, 0.2, 0.21) == atan2_64(0.2, 0.21));
}
test "math.atan2_32" {
const epsilon = 0.000001;
try expect(math.approxEqAbs(f32, atan2_32(0.0, 0.0), 0.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(0.2, 0.2), 0.785398, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(-0.2, 0.2), -0.785398, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(0.2, -0.2), 2.356194, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(-0.2, -0.2), -2.356194, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(0.34, -0.4), 2.437099, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(0.34, 1.243), 0.267001, epsilon));
}
test "math.atan2_64" {
const epsilon = 0.000001;
try expect(math.approxEqAbs(f64, atan2_64(0.0, 0.0), 0.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(0.2, 0.2), 0.785398, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(-0.2, 0.2), -0.785398, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(0.2, -0.2), 2.356194, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(-0.2, -0.2), -2.356194, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(0.34, -0.4), 2.437099, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(0.34, 1.243), 0.267001, epsilon));
}
test "math.atan2_32.special" {
const epsilon = 0.000001;
try expect(math.isNan(atan2_32(1.0, math.nan(f32))));
try expect(math.isNan(atan2_32(math.nan(f32), 1.0)));
try expect(atan2_32(0.0, 5.0) == 0.0);
try expect(atan2_32(-0.0, 5.0) == -0.0);
try expect(math.approxEqAbs(f32, atan2_32(0.0, -5.0), math.pi, epsilon));
//expect(math.approxEqAbs(f32, atan2_32(-0.0, -5.0), -math.pi, .{.rel=0,.abs=epsilon})); TODO support negative zero?
try expect(math.approxEqAbs(f32, atan2_32(1.0, 0.0), math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(1.0, -0.0), math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(-1.0, 0.0), -math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(-1.0, -0.0), -math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(math.inf(f32), math.inf(f32)), math.pi / 4.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(-math.inf(f32), math.inf(f32)), -math.pi / 4.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(math.inf(f32), -math.inf(f32)), 3.0 * math.pi / 4.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(-math.inf(f32), -math.inf(f32)), -3.0 * math.pi / 4.0, epsilon));
try expect(atan2_32(1.0, math.inf(f32)) == 0.0);
try expect(math.approxEqAbs(f32, atan2_32(1.0, -math.inf(f32)), math.pi, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(-1.0, -math.inf(f32)), -math.pi, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(math.inf(f32), 1.0), math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f32, atan2_32(-math.inf(f32), 1.0), -math.pi / 2.0, epsilon));
}
test "math.atan2_64.special" {
const epsilon = 0.000001;
try expect(math.isNan(atan2_64(1.0, math.nan(f64))));
try expect(math.isNan(atan2_64(math.nan(f64), 1.0)));
try expect(atan2_64(0.0, 5.0) == 0.0);
try expect(atan2_64(-0.0, 5.0) == -0.0);
try expect(math.approxEqAbs(f64, atan2_64(0.0, -5.0), math.pi, epsilon));
//expect(math.approxEqAbs(f64, atan2_64(-0.0, -5.0), -math.pi, .{.rel=0,.abs=epsilon})); TODO support negative zero?
try expect(math.approxEqAbs(f64, atan2_64(1.0, 0.0), math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(1.0, -0.0), math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(-1.0, 0.0), -math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(-1.0, -0.0), -math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(math.inf(f64), math.inf(f64)), math.pi / 4.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(-math.inf(f64), math.inf(f64)), -math.pi / 4.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(math.inf(f64), -math.inf(f64)), 3.0 * math.pi / 4.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(-math.inf(f64), -math.inf(f64)), -3.0 * math.pi / 4.0, epsilon));
try expect(atan2_64(1.0, math.inf(f64)) == 0.0);
try expect(math.approxEqAbs(f64, atan2_64(1.0, -math.inf(f64)), math.pi, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(-1.0, -math.inf(f64)), -math.pi, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(math.inf(f64), 1.0), math.pi / 2.0, epsilon));
try expect(math.approxEqAbs(f64, atan2_64(-math.inf(f64), 1.0), -math.pi / 2.0, epsilon));
}
| https://raw.githubusercontent.com/matpx/daydream/018ad0c7caaf796d8a04b882fcbed39ccb7c9cd8/toolchain/zig/lib/std/math/atan2.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const compile = @import("compile.zig");
const Program = compile.Program;
const VmBacktrack = @import("vm_backtrack.zig").VmBacktrack;
const VmPike = @import("vm_pike.zig").VmPike;
const Input = @import("input.zig").Input;
pub fn exec(allocator: Allocator, prog: Program, prog_start: usize, input: *Input, slots: *ArrayList(?usize)) !bool {
if (VmBacktrack.shouldExec(prog, input)) {
var engine = VmBacktrack.init(allocator);
return engine.exec(prog, prog_start, input, slots);
} else {
var engine = VmPike.init(allocator);
return engine.exec(prog, prog_start, input, slots);
}
}
| https://raw.githubusercontent.com/TemariVirus/AOC-2020/a32dcfb85655e733247c5c53b70d2f8837a6f468/lib/zig-regex/src/exec.zig |
const std = @import("std");
const builtin = @import("builtin");
pub const tb_packet_t = @import("tb_client/packet.zig").Packet;
pub const tb_packet_status_t = tb_packet_t.Status;
pub const tb_packet_acquire_status_t = @import("tb_client/context.zig").PacketAcquireStatus;
pub const tb_client_t = *anyopaque;
pub const tb_status_t = enum(c_int) {
success = 0,
unexpected,
out_of_memory,
address_invalid,
address_limit_exceeded,
concurrency_max_invalid,
system_resources,
network_subsystem,
};
pub const tb_operation_t = StateMachine.Operation;
pub const tb_completion_t = *const fn (
context: usize,
client: tb_client_t,
packet: *tb_packet_t,
result_ptr: ?[*]const u8,
result_len: u32,
) callconv(.C) void;
const constants = @import("../../constants.zig");
const Storage = @import("../../storage.zig").Storage;
const MessageBus = @import("../../message_bus.zig").MessageBusClient;
const StateMachine = @import("../../state_machine.zig").StateMachineType(Storage, constants.state_machine_config);
const ContextType = @import("tb_client/context.zig").ContextType;
const ContextImplementation = @import("tb_client/context.zig").ContextImplementation;
pub const InitError = @import("tb_client/context.zig").Error;
const DefaultContext = blk: {
const Client = @import("../../vsr/client.zig").Client(StateMachine, MessageBus);
break :blk ContextType(Client);
};
const TestingContext = blk: {
const EchoClient = @import("tb_client/echo_client.zig").EchoClient(StateMachine, MessageBus);
break :blk ContextType(EchoClient);
};
pub fn context_to_client(implementation: *ContextImplementation) tb_client_t {
return @as(tb_client_t, @ptrCast(implementation));
}
fn client_to_context(tb_client: tb_client_t) *ContextImplementation {
return @ptrCast(@alignCast(tb_client));
}
pub fn init_error_to_status(err: InitError) tb_status_t {
return switch (err) {
error.Unexpected => .unexpected,
error.OutOfMemory => .out_of_memory,
error.AddressInvalid => .address_invalid,
error.AddressLimitExceeded => .address_limit_exceeded,
error.ConcurrencyMaxInvalid => .concurrency_max_invalid,
error.SystemResources => .system_resources,
error.NetworkSubsystemFailed => .network_subsystem,
};
}
pub fn init(
allocator: std.mem.Allocator,
cluster_id: u128,
addresses: []const u8,
packets_count: u32,
on_completion_ctx: usize,
on_completion_fn: tb_completion_t,
) InitError!tb_client_t {
const context = try DefaultContext.init(
allocator,
cluster_id,
addresses,
packets_count,
on_completion_ctx,
on_completion_fn,
);
return context_to_client(&context.implementation);
}
pub fn init_echo(
allocator: std.mem.Allocator,
cluster_id: u128,
addresses: []const u8,
packets_count: u32,
on_completion_ctx: usize,
on_completion_fn: tb_completion_t,
) InitError!tb_client_t {
const context = try TestingContext.init(
allocator,
cluster_id,
addresses,
packets_count,
on_completion_ctx,
on_completion_fn,
);
return context_to_client(&context.implementation);
}
pub fn completion_context(client: tb_client_t) callconv(.C) usize {
const context = client_to_context(client);
return context.completion_ctx;
}
pub fn acquire_packet(
client: tb_client_t,
out_packet: *?*tb_packet_t,
) callconv(.C) tb_packet_acquire_status_t {
const context = client_to_context(client);
return (context.acquire_packet_fn)(context, out_packet);
}
pub fn release_packet(
client: tb_client_t,
packet: *tb_packet_t,
) callconv(.C) void {
const context = client_to_context(client);
return (context.release_packet_fn)(context, packet);
}
pub fn submit(
client: tb_client_t,
packet: *tb_packet_t,
) callconv(.C) void {
const context = client_to_context(client);
(context.submit_fn)(context, packet);
}
pub fn deinit(
client: tb_client_t,
) callconv(.C) void {
const context = client_to_context(client);
(context.deinit_fn)(context);
}
| https://raw.githubusercontent.com/matklad/tigerbeetle-filter-repo/d699bf1bbc60a034efb0c99d7f993498e3277960/src/clients/c/tb_client.zig |
const std = @import("std");
const mmio = @import("../../../mmio.zig");
address: u64,
const FLAG_OFFSET = 0x18;
const INTEGER_BAUD_DIVISOR_OFFSET: u64 = 0x24;
const FRACTIONAL_BAUD_DIVISOR_OFFSET: u64 = 0x28;
const LINE_CONTROL_OFFSET: u64 = 0x2c;
const CONTROL_OFFSET: u64 = 0x30;
const INTERRUPT_OFFSET: u64 = 0x44;
const Self = @This();
pub const Error = error{};
/// Initialize the serial port.
pub fn init(self: Self) void {
// Turn off the UART temporarily
mmio.write(u32, self.address + CONTROL_OFFSET, 0);
// Clear all interupts.
mmio.write(u32, self.address + INTERRUPT_OFFSET, 0x7ff);
// Set maximum speed to 115200 baud.
mmio.write(u32, self.address + INTEGER_BAUD_DIVISOR_OFFSET, 0x02);
mmio.write(u32, self.address + FRACTIONAL_BAUD_DIVISOR_OFFSET, 0x0b);
// Enable 8N1 and FIFO.
mmio.write(u32, self.address + LINE_CONTROL_OFFSET, 0x07 << 0x04);
// Enable interrupts.
mmio.write(u32, self.address + INTERRUPT_OFFSET, 0x301);
}
/// Sends a byte on the serial port.
pub fn writeByte(self: Self, byte: u8) void {
switch (byte) {
0x7f => {
mmio.write(u32, self.address, 0x7f);
mmio.write(u32, self.address, ' ');
mmio.write(u32, self.address, 0x7f);
},
0x0a => {
mmio.write(u32, self.address, 0x0d);
mmio.write(u32, self.address, 0x0a);
},
else => mmio.write(u32, self.address, byte),
}
}
/// Sends bytes to the serial port.
pub fn write(self: Self, bytes: []const u8) Error!usize {
for (bytes) |byte|
writeByte(self, byte);
return bytes.len;
}
/// Receives a byte on the serial port.
pub fn read(self: Self) u8 {
return @truncate(mmio.read(u32, self.address));
}
| https://raw.githubusercontent.com/kora-org/ydin/3a9ea572489c98719de1a87fd18ebfec6d100e8f/src/arch/aarch64/serial/pl011.zig |
//
// Zig has builtins for mathematical operations such as...
//
// @sqrt @sin @cos
// @exp @log @floor
//
// ...and lots of type casting operations such as...
//
// @as @errorFromInt @floatFromInt
// @ptrFromInt @intFromPtr @intFromEnum
//
// Spending part of a rainy day skimming through the complete
// list of builtins in the official Zig documentation wouldn't be
// a bad use of your time. There are some seriously cool features
// in there. Check out @call, @compileLog, @embedFile, and @src!
//
// ...
//
// For now, we're going to complete our examination of builtins
// by exploring just THREE of Zig's MANY introspection abilities:
//
// 1. @This() type
//
// Returns the innermost struct, enum, or union that a function
// call is inside.
//
// 2. @typeInfo(comptime T: type) @import("std").builtin.TypeInfo
//
// Returns information about any type in a TypeInfo union which
// will contain different information depending on which type
// you're examining.
//
// 3. @TypeOf(...) type
//
// Returns the type common to all input parameters (each of which
// may be any expression). The type is resolved using the same
// "peer type resolution" process the compiler itself uses when
// inferring types.
//
// (Notice how the two functions which return types start with
// uppercase letters? This is a standard naming practice in Zig.)
//
const print = @import("std").debug.print;
const Narcissus = struct {
me: *Narcissus = undefined,
myself: *Narcissus = undefined,
echo: void = undefined, // Alas, poor Echo!
pub fn fetchTheMostBeautifulType() type {
return @This();
}
};
pub fn main() void {
var narcissus: Narcissus = Narcissus{};
// Oops! We cannot leave the 'me' and 'myself' fields
// undefined. Please set them here:
narcissus.me = &narcissus;
narcissus.myself = &narcissus;
// This determines a "peer type" from three separate
// references (they just happen to all be the same object).
const Type1 = @TypeOf(narcissus, narcissus.me.*, narcissus.myself.*);
// Oh dear, we seem to have done something wrong when calling
// this function. We called it as a method, which would work
// if it had a self parameter. But it doesn't. (See above.)
//
// The fix for this is very subtle, but it makes a big
// difference!
const Type2 = Narcissus.fetchTheMostBeautifulType();
// Now we print a pithy statement about Narcissus.
print("A {s} loves all {s}es. ", .{
maximumNarcissism(Type1),
maximumNarcissism(Type2),
});
// His final words as he was looking in
// those waters he habitually watched
// were these:
// "Alas, my beloved boy, in vain!"
// The place gave every word back in reply.
// He cried:
// "Farewell."
// And Echo called:
// "Farewell!"
//
// --Ovid, The Metamorphoses
// translated by Ian Johnston
print("He has room in his heart for:", .{});
// A StructFields array
const fields = @typeInfo(Narcissus).Struct.fields;
// 'fields' is a slice of StructFields. Here's the declaration:
//
// pub const StructField = struct {
// name: []const u8,
// type: type,
// default_value: anytype,
// is_comptime: bool,
// alignment: comptime_int,
// };
//
// Please complete these 'if' statements so that the field
// name will not be printed if the field is of type 'void'
// (which is a zero-bit type that takes up no space at all!):
if (fields[0].type != void) {
print(" {s}", .{@typeInfo(Narcissus).Struct.fields[0].name});
}
if (fields[1].type != void) {
print(" {s}", .{@typeInfo(Narcissus).Struct.fields[1].name});
}
if (fields[2].type != void) {
print(" {s}", .{@typeInfo(Narcissus).Struct.fields[2].name});
}
// Yuck, look at all that repeated code above! I don't know
// about you, but it makes me itchy.
//
// Alas, we can't use a regular 'for' loop here because
// 'fields' can only be evaluated at compile time. It seems
// like we're overdue to learn about this "comptime" stuff,
// doesn't it? Don't worry, we'll get there.
print(".\n", .{});
}
// NOTE: This exercise did not originally include the function below.
// But a change after Zig 0.10.0 added the source file name to the
// type. "Narcissus" became "065_builtins2.Narcissus".
//
// To fix this, I've added this function to strip the filename from
// the front of the type name in the dumbest way possible. (It returns
// a slice of the type name starting at character 14 (assuming
// single-byte characters).
//
// We'll be seeing @typeName again in Exercise 070. For now, you can
// see that it takes a Type and returns a u8 "string".
fn maximumNarcissism(myType: anytype) []const u8 {
// Turn '065_builtins2.Narcissus' into 'Narcissus'
return @typeName(myType)[14..];
}
| https://raw.githubusercontent.com/41Leahcim/my_ziglings/583649387228f46b7f7569424cc66d16e0baaa30/exercises/065_builtins2.zig |
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const fixuint = @import("fixuint.zig").fixuint;
const builtin = @import("builtin");
pub fn __fixunsdfti(a: f64) callconv(.C) u128 {
@setRuntimeSafety(builtin.is_test);
return fixuint(f64, u128, a);
}
test "import fixunsdfti" {
_ = @import("fixunsdfti_test.zig");
}
| https://raw.githubusercontent.com/collinalexbell/all-the-compilers/7f834984f71054806bfec8604e02e86b99c0f831/zig/lib/std/special/compiler_rt/fixunsdfti.zig |
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const fixuint = @import("fixuint.zig").fixuint;
const builtin = @import("builtin");
pub fn __fixunsdfti(a: f64) callconv(.C) u128 {
@setRuntimeSafety(builtin.is_test);
return fixuint(f64, u128, a);
}
test "import fixunsdfti" {
_ = @import("fixunsdfti_test.zig");
}
| https://raw.githubusercontent.com/creationix/zig-toolset/9ad208cd93d1f05eb772deff4af24f58eb42386f/zig-linux-x86_64-0.8.0-dev.1860+1fada3746/lib/std/special/compiler_rt/fixunsdfti.zig |
objects: std.ArrayListUnmanaged(Object) = .{},
strtab: std.ArrayListUnmanaged(u8) = .{},
pub fn isArchive(path: []const u8) !bool {
const file = try std.fs.cwd().openFile(path, .{});
defer file.close();
const reader = file.reader();
const magic = reader.readBytesNoEof(elf.ARMAG.len) catch return false;
if (!mem.eql(u8, &magic, elf.ARMAG)) return false;
return true;
}
pub fn deinit(self: *Archive, allocator: Allocator) void {
self.objects.deinit(allocator);
self.strtab.deinit(allocator);
}
pub fn parse(self: *Archive, elf_file: *Elf, path: []const u8, handle_index: File.HandleIndex) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const handle = elf_file.fileHandle(handle_index);
const size = (try handle.stat()).size;
var pos: usize = elf.ARMAG.len;
while (true) {
if (pos >= size) break;
if (!mem.isAligned(pos, 2)) pos += 1;
var hdr_buffer: [@sizeOf(elf.ar_hdr)]u8 = undefined;
{
const amt = try handle.preadAll(&hdr_buffer, pos);
if (amt != @sizeOf(elf.ar_hdr)) return error.InputOutput;
}
const hdr = @as(*align(1) const elf.ar_hdr, @ptrCast(&hdr_buffer)).*;
pos += @sizeOf(elf.ar_hdr);
if (!mem.eql(u8, &hdr.ar_fmag, elf.ARFMAG)) {
try elf_file.reportParseError(path, "invalid archive header delimiter: {s}", .{
std.fmt.fmtSliceEscapeLower(&hdr.ar_fmag),
});
return error.MalformedArchive;
}
const obj_size = try hdr.size();
defer pos += obj_size;
if (hdr.isSymtab() or hdr.isSymtab64()) continue;
if (hdr.isStrtab()) {
try self.strtab.resize(gpa, obj_size);
const amt = try handle.preadAll(self.strtab.items, pos);
if (amt != obj_size) return error.InputOutput;
continue;
}
if (hdr.isSymdef() or hdr.isSymdefSorted()) continue;
const name = if (hdr.name()) |name|
name
else if (try hdr.nameOffset()) |off|
self.getString(off)
else
unreachable;
const object = Object{
.archive = .{
.path = try gpa.dupe(u8, path),
.offset = pos,
.size = obj_size,
},
.path = try gpa.dupe(u8, name),
.file_handle = handle_index,
.index = undefined,
.alive = false,
};
log.debug("extracting object '{s}' from archive '{s}'", .{ object.path, path });
try self.objects.append(gpa, object);
}
}
fn getString(self: Archive, off: u32) []const u8 {
assert(off < self.strtab.items.len);
const name = mem.sliceTo(@as([*:'\n']const u8, @ptrCast(self.strtab.items.ptr + off)), 0);
return name[0 .. name.len - 1];
}
pub fn setArHdr(opts: struct {
name: union(enum) {
symtab: void,
strtab: void,
name: []const u8,
name_off: u32,
},
size: usize,
}) elf.ar_hdr {
var hdr: elf.ar_hdr = .{
.ar_name = undefined,
.ar_date = undefined,
.ar_uid = undefined,
.ar_gid = undefined,
.ar_mode = undefined,
.ar_size = undefined,
.ar_fmag = undefined,
};
@memset(mem.asBytes(&hdr), 0x20);
@memcpy(&hdr.ar_fmag, elf.ARFMAG);
{
var stream = std.io.fixedBufferStream(&hdr.ar_name);
const writer = stream.writer();
switch (opts.name) {
.symtab => writer.print("{s}", .{elf.SYM64NAME}) catch unreachable,
.strtab => writer.print("//", .{}) catch unreachable,
.name => |x| writer.print("{s}/", .{x}) catch unreachable,
.name_off => |x| writer.print("/{d}", .{x}) catch unreachable,
}
}
{
var stream = std.io.fixedBufferStream(&hdr.ar_size);
stream.writer().print("{d}", .{opts.size}) catch unreachable;
}
return hdr;
}
const strtab_delimiter = '\n';
pub const max_member_name_len = 15;
pub const ArSymtab = struct {
symtab: std.ArrayListUnmanaged(Entry) = .{},
strtab: StringTable = .{},
pub fn deinit(ar: *ArSymtab, allocator: Allocator) void {
ar.symtab.deinit(allocator);
ar.strtab.deinit(allocator);
}
pub fn sort(ar: *ArSymtab) void {
mem.sort(Entry, ar.symtab.items, {}, Entry.lessThan);
}
pub fn size(ar: ArSymtab, kind: enum { p32, p64 }) usize {
const ptr_size: usize = switch (kind) {
.p32 => 4,
.p64 => 8,
};
var ss: usize = ptr_size + ar.symtab.items.len * ptr_size;
for (ar.symtab.items) |entry| {
ss += ar.strtab.getAssumeExists(entry.off).len + 1;
}
return ss;
}
pub fn write(ar: ArSymtab, kind: enum { p32, p64 }, elf_file: *Elf, writer: anytype) !void {
assert(kind == .p64); // TODO p32
const hdr = setArHdr(.{ .name = .symtab, .size = @intCast(ar.size(.p64)) });
try writer.writeAll(mem.asBytes(&hdr));
const comp = elf_file.base.comp;
const gpa = comp.gpa;
var offsets = std.AutoHashMap(File.Index, u64).init(gpa);
defer offsets.deinit();
try offsets.ensureUnusedCapacity(@intCast(elf_file.objects.items.len + 1));
if (elf_file.zigObjectPtr()) |zig_object| {
offsets.putAssumeCapacityNoClobber(zig_object.index, zig_object.output_ar_state.file_off);
}
for (elf_file.objects.items) |index| {
offsets.putAssumeCapacityNoClobber(index, elf_file.file(index).?.object.output_ar_state.file_off);
}
// Number of symbols
try writer.writeInt(u64, @as(u64, @intCast(ar.symtab.items.len)), .big);
// Offsets to files
for (ar.symtab.items) |entry| {
const off = offsets.get(entry.file_index).?;
try writer.writeInt(u64, off, .big);
}
// Strings
for (ar.symtab.items) |entry| {
try writer.print("{s}\x00", .{ar.strtab.getAssumeExists(entry.off)});
}
}
pub fn format(
ar: ArSymtab,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = ar;
_ = unused_fmt_string;
_ = options;
_ = writer;
@compileError("do not format ar symtab directly; use fmt instead");
}
const FormatContext = struct {
ar: ArSymtab,
elf_file: *Elf,
};
pub fn fmt(ar: ArSymtab, elf_file: *Elf) std.fmt.Formatter(format2) {
return .{ .data = .{
.ar = ar,
.elf_file = elf_file,
} };
}
fn format2(
ctx: FormatContext,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = unused_fmt_string;
_ = options;
const ar = ctx.ar;
const elf_file = ctx.elf_file;
for (ar.symtab.items, 0..) |entry, i| {
const name = ar.strtab.getAssumeExists(entry.off);
const file = elf_file.file(entry.file_index).?;
try writer.print(" {d}: {s} in file({d})({})\n", .{ i, name, entry.file_index, file.fmtPath() });
}
}
const Entry = struct {
/// Offset into the string table.
off: u32,
/// Index of the file defining the global.
file_index: File.Index,
pub fn lessThan(ctx: void, lhs: Entry, rhs: Entry) bool {
_ = ctx;
if (lhs.off == rhs.off) return lhs.file_index < rhs.file_index;
return lhs.off < rhs.off;
}
};
};
pub const ArStrtab = struct {
buffer: std.ArrayListUnmanaged(u8) = .{},
pub fn deinit(ar: *ArStrtab, allocator: Allocator) void {
ar.buffer.deinit(allocator);
}
pub fn insert(ar: *ArStrtab, allocator: Allocator, name: []const u8) error{OutOfMemory}!u32 {
const off = @as(u32, @intCast(ar.buffer.items.len));
try ar.buffer.writer(allocator).print("{s}/{c}", .{ name, strtab_delimiter });
return off;
}
pub fn size(ar: ArStrtab) usize {
return ar.buffer.items.len;
}
pub fn write(ar: ArStrtab, writer: anytype) !void {
const hdr = setArHdr(.{ .name = .strtab, .size = @intCast(ar.size()) });
try writer.writeAll(mem.asBytes(&hdr));
try writer.writeAll(ar.buffer.items);
}
pub fn format(
ar: ArStrtab,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = unused_fmt_string;
_ = options;
try writer.print("{s}", .{std.fmt.fmtSliceEscapeLower(ar.buffer.items)});
}
};
pub const ArState = struct {
/// Name offset in the string table.
name_off: u32 = 0,
/// File offset of the ar_hdr describing the contributing
/// object in the archive.
file_off: u64 = 0,
/// Total size of the contributing object (excludes ar_hdr).
size: u64 = 0,
};
const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
const fs = std.fs;
const log = std.log.scoped(.link);
const mem = std.mem;
const Allocator = mem.Allocator;
const Archive = @This();
const Elf = @import("../Elf.zig");
const File = @import("file.zig").File;
const Object = @import("Object.zig");
const StringTable = @import("../StringTable.zig");
| https://raw.githubusercontent.com/ziglang/zig-bootstrap/ec2dca85a340f134d2fcfdc9007e91f9abed6996/zig/src/link/Elf/Archive.zig |
objects: std.ArrayListUnmanaged(Object) = .{},
strtab: std.ArrayListUnmanaged(u8) = .{},
pub fn isArchive(path: []const u8) !bool {
const file = try std.fs.cwd().openFile(path, .{});
defer file.close();
const reader = file.reader();
const magic = reader.readBytesNoEof(elf.ARMAG.len) catch return false;
if (!mem.eql(u8, &magic, elf.ARMAG)) return false;
return true;
}
pub fn deinit(self: *Archive, allocator: Allocator) void {
self.objects.deinit(allocator);
self.strtab.deinit(allocator);
}
pub fn parse(self: *Archive, elf_file: *Elf, path: []const u8, handle_index: File.HandleIndex) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const handle = elf_file.fileHandle(handle_index);
const size = (try handle.stat()).size;
var pos: usize = elf.ARMAG.len;
while (true) {
if (pos >= size) break;
if (!mem.isAligned(pos, 2)) pos += 1;
var hdr_buffer: [@sizeOf(elf.ar_hdr)]u8 = undefined;
{
const amt = try handle.preadAll(&hdr_buffer, pos);
if (amt != @sizeOf(elf.ar_hdr)) return error.InputOutput;
}
const hdr = @as(*align(1) const elf.ar_hdr, @ptrCast(&hdr_buffer)).*;
pos += @sizeOf(elf.ar_hdr);
if (!mem.eql(u8, &hdr.ar_fmag, elf.ARFMAG)) {
try elf_file.reportParseError(path, "invalid archive header delimiter: {s}", .{
std.fmt.fmtSliceEscapeLower(&hdr.ar_fmag),
});
return error.MalformedArchive;
}
const obj_size = try hdr.size();
defer pos += obj_size;
if (hdr.isSymtab() or hdr.isSymtab64()) continue;
if (hdr.isStrtab()) {
try self.strtab.resize(gpa, obj_size);
const amt = try handle.preadAll(self.strtab.items, pos);
if (amt != obj_size) return error.InputOutput;
continue;
}
if (hdr.isSymdef() or hdr.isSymdefSorted()) continue;
const name = if (hdr.name()) |name|
name
else if (try hdr.nameOffset()) |off|
self.getString(off)
else
unreachable;
const object = Object{
.archive = .{
.path = try gpa.dupe(u8, path),
.offset = pos,
.size = obj_size,
},
.path = try gpa.dupe(u8, name),
.file_handle = handle_index,
.index = undefined,
.alive = false,
};
log.debug("extracting object '{s}' from archive '{s}'", .{ object.path, path });
try self.objects.append(gpa, object);
}
}
fn getString(self: Archive, off: u32) []const u8 {
assert(off < self.strtab.items.len);
const name = mem.sliceTo(@as([*:'\n']const u8, @ptrCast(self.strtab.items.ptr + off)), 0);
return name[0 .. name.len - 1];
}
pub fn setArHdr(opts: struct {
name: union(enum) {
symtab: void,
strtab: void,
name: []const u8,
name_off: u32,
},
size: usize,
}) elf.ar_hdr {
var hdr: elf.ar_hdr = .{
.ar_name = undefined,
.ar_date = undefined,
.ar_uid = undefined,
.ar_gid = undefined,
.ar_mode = undefined,
.ar_size = undefined,
.ar_fmag = undefined,
};
@memset(mem.asBytes(&hdr), 0x20);
@memcpy(&hdr.ar_fmag, elf.ARFMAG);
{
var stream = std.io.fixedBufferStream(&hdr.ar_name);
const writer = stream.writer();
switch (opts.name) {
.symtab => writer.print("{s}", .{elf.SYM64NAME}) catch unreachable,
.strtab => writer.print("//", .{}) catch unreachable,
.name => |x| writer.print("{s}/", .{x}) catch unreachable,
.name_off => |x| writer.print("/{d}", .{x}) catch unreachable,
}
}
{
var stream = std.io.fixedBufferStream(&hdr.ar_size);
stream.writer().print("{d}", .{opts.size}) catch unreachable;
}
return hdr;
}
const strtab_delimiter = '\n';
pub const max_member_name_len = 15;
pub const ArSymtab = struct {
symtab: std.ArrayListUnmanaged(Entry) = .{},
strtab: StringTable = .{},
pub fn deinit(ar: *ArSymtab, allocator: Allocator) void {
ar.symtab.deinit(allocator);
ar.strtab.deinit(allocator);
}
pub fn sort(ar: *ArSymtab) void {
mem.sort(Entry, ar.symtab.items, {}, Entry.lessThan);
}
pub fn size(ar: ArSymtab, kind: enum { p32, p64 }) usize {
const ptr_size: usize = switch (kind) {
.p32 => 4,
.p64 => 8,
};
var ss: usize = ptr_size + ar.symtab.items.len * ptr_size;
for (ar.symtab.items) |entry| {
ss += ar.strtab.getAssumeExists(entry.off).len + 1;
}
return ss;
}
pub fn write(ar: ArSymtab, kind: enum { p32, p64 }, elf_file: *Elf, writer: anytype) !void {
assert(kind == .p64); // TODO p32
const hdr = setArHdr(.{ .name = .symtab, .size = @intCast(ar.size(.p64)) });
try writer.writeAll(mem.asBytes(&hdr));
const comp = elf_file.base.comp;
const gpa = comp.gpa;
var offsets = std.AutoHashMap(File.Index, u64).init(gpa);
defer offsets.deinit();
try offsets.ensureUnusedCapacity(@intCast(elf_file.objects.items.len + 1));
if (elf_file.zigObjectPtr()) |zig_object| {
offsets.putAssumeCapacityNoClobber(zig_object.index, zig_object.output_ar_state.file_off);
}
for (elf_file.objects.items) |index| {
offsets.putAssumeCapacityNoClobber(index, elf_file.file(index).?.object.output_ar_state.file_off);
}
// Number of symbols
try writer.writeInt(u64, @as(u64, @intCast(ar.symtab.items.len)), .big);
// Offsets to files
for (ar.symtab.items) |entry| {
const off = offsets.get(entry.file_index).?;
try writer.writeInt(u64, off, .big);
}
// Strings
for (ar.symtab.items) |entry| {
try writer.print("{s}\x00", .{ar.strtab.getAssumeExists(entry.off)});
}
}
pub fn format(
ar: ArSymtab,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = ar;
_ = unused_fmt_string;
_ = options;
_ = writer;
@compileError("do not format ar symtab directly; use fmt instead");
}
const FormatContext = struct {
ar: ArSymtab,
elf_file: *Elf,
};
pub fn fmt(ar: ArSymtab, elf_file: *Elf) std.fmt.Formatter(format2) {
return .{ .data = .{
.ar = ar,
.elf_file = elf_file,
} };
}
fn format2(
ctx: FormatContext,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = unused_fmt_string;
_ = options;
const ar = ctx.ar;
const elf_file = ctx.elf_file;
for (ar.symtab.items, 0..) |entry, i| {
const name = ar.strtab.getAssumeExists(entry.off);
const file = elf_file.file(entry.file_index).?;
try writer.print(" {d}: {s} in file({d})({})\n", .{ i, name, entry.file_index, file.fmtPath() });
}
}
const Entry = struct {
/// Offset into the string table.
off: u32,
/// Index of the file defining the global.
file_index: File.Index,
pub fn lessThan(ctx: void, lhs: Entry, rhs: Entry) bool {
_ = ctx;
if (lhs.off == rhs.off) return lhs.file_index < rhs.file_index;
return lhs.off < rhs.off;
}
};
};
pub const ArStrtab = struct {
buffer: std.ArrayListUnmanaged(u8) = .{},
pub fn deinit(ar: *ArStrtab, allocator: Allocator) void {
ar.buffer.deinit(allocator);
}
pub fn insert(ar: *ArStrtab, allocator: Allocator, name: []const u8) error{OutOfMemory}!u32 {
const off = @as(u32, @intCast(ar.buffer.items.len));
try ar.buffer.writer(allocator).print("{s}/{c}", .{ name, strtab_delimiter });
return off;
}
pub fn size(ar: ArStrtab) usize {
return ar.buffer.items.len;
}
pub fn write(ar: ArStrtab, writer: anytype) !void {
const hdr = setArHdr(.{ .name = .strtab, .size = @intCast(ar.size()) });
try writer.writeAll(mem.asBytes(&hdr));
try writer.writeAll(ar.buffer.items);
}
pub fn format(
ar: ArStrtab,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = unused_fmt_string;
_ = options;
try writer.print("{s}", .{std.fmt.fmtSliceEscapeLower(ar.buffer.items)});
}
};
pub const ArState = struct {
/// Name offset in the string table.
name_off: u32 = 0,
/// File offset of the ar_hdr describing the contributing
/// object in the archive.
file_off: u64 = 0,
/// Total size of the contributing object (excludes ar_hdr).
size: u64 = 0,
};
const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
const fs = std.fs;
const log = std.log.scoped(.link);
const mem = std.mem;
const Allocator = mem.Allocator;
const Archive = @This();
const Elf = @import("../Elf.zig");
const File = @import("file.zig").File;
const Object = @import("Object.zig");
const StringTable = @import("../StringTable.zig");
| https://raw.githubusercontent.com/kassane/zig-mos-bootstrap/19aac4779b9e93b0e833402c26c93cfc13bb94e2/zig/src/link/Elf/Archive.zig |
const fixuint = @import("fixuint.zig").fixuint;
const builtin = @import("builtin");
pub fn __fixunstfti(a: f128) callconv(.C) u128 {
@setRuntimeSafety(builtin.is_test);
return fixuint(f128, u128, a);
}
test {
_ = @import("fixunstfti_test.zig");
}
| https://raw.githubusercontent.com/natanalt/zig-x86_16/1b38fc3ef5e539047c76604ffe71b81e246f1a1e/lib/std/special/compiler_rt/fixunstfti.zig |
const fixuint = @import("fixuint.zig").fixuint;
const builtin = @import("builtin");
pub fn __fixunstfti(a: f128) callconv(.C) u128 {
@setRuntimeSafety(builtin.is_test);
return fixuint(f128, u128, a);
}
test {
_ = @import("fixunstfti_test.zig");
}
| https://raw.githubusercontent.com/kraxli/dev_tools_mswindows/1d1a8f61299e4b7ba356fae3a37af0ddc8daf356/zig-windows-x86_64-0.9.1/lib/std/special/compiler_rt/fixunstfti.zig |
const fixuint = @import("fixuint.zig").fixuint;
const builtin = @import("builtin");
pub fn __fixunstfti(a: f128) callconv(.C) u128 {
@setRuntimeSafety(builtin.is_test);
return fixuint(f128, u128, a);
}
test {
_ = @import("fixunstfti_test.zig");
}
| https://raw.githubusercontent.com/jamesmintram/jimzos/8eb52e7efffb1a97eca4899ff72549f96ed3460b/tools/ext2fs/data/test3/special/compiler_rt/fixunstfti.zig |
//! generated by flatc-zig from Schema.fbs
const flatbuffers = @import("flatbuffers");
/// A Map is a logical nested type that is represented as
///
/// List<entries: Struct<key: K, value: V>>
///
/// In this layout, the keys and values are each respectively contiguous. We do
/// not constrain the key and value types, so the application is responsible
/// for ensuring that the keys are hashable and unique. Whether the keys are sorted
/// may be set in the metadata for this field.
///
/// In a field with Map type, the field has a child Struct field, which then
/// has two children: key type and the second the value type. The names of the
/// child fields may be respectively "entries", "key", and "value", but this is
/// not enforced.
///
/// Map
/// ```text
/// - child[0] entries: Struct
/// - child[0] key: K
/// - child[1] value: V
/// ```
/// Neither the "entries" field nor the "key" field may be nullable.
///
/// The metadata is structured so that Arrow systems without special handling
/// for Map can make Map an alias for List. The "layout" attribute for the Map
/// field must have the same contents as a List.
pub const Map = struct {
/// Set to true if the keys within each value are sorted
keys_sorted: bool = false,
const Self = @This();
pub fn init(packed_: PackedMap) flatbuffers.Error!Self {
return .{
.keys_sorted = try packed_.keysSorted(),
};
}
pub fn pack(self: Self, builder: *flatbuffers.Builder) flatbuffers.Error!u32 {
try builder.startTable();
try builder.appendTableFieldWithDefault(bool, self.keys_sorted, false);
return builder.endTable();
}
};
/// A Map is a logical nested type that is represented as
///
/// List<entries: Struct<key: K, value: V>>
///
/// In this layout, the keys and values are each respectively contiguous. We do
/// not constrain the key and value types, so the application is responsible
/// for ensuring that the keys are hashable and unique. Whether the keys are sorted
/// may be set in the metadata for this field.
///
/// In a field with Map type, the field has a child Struct field, which then
/// has two children: key type and the second the value type. The names of the
/// child fields may be respectively "entries", "key", and "value", but this is
/// not enforced.
///
/// Map
/// ```text
/// - child[0] entries: Struct
/// - child[0] key: K
/// - child[1] value: V
/// ```
/// Neither the "entries" field nor the "key" field may be nullable.
///
/// The metadata is structured so that Arrow systems without special handling
/// for Map can make Map an alias for List. The "layout" attribute for the Map
/// field must have the same contents as a List.
pub const PackedMap = struct {
table: flatbuffers.Table,
const Self = @This();
pub fn init(size_prefixed_bytes: []u8) flatbuffers.Error!Self {
return .{ .table = try flatbuffers.Table.init(size_prefixed_bytes) };
}
/// Set to true if the keys within each value are sorted
pub fn keysSorted(self: Self) flatbuffers.Error!bool {
return self.table.readFieldWithDefault(bool, 0, false);
}
};
| https://raw.githubusercontent.com/clickingbuttons/flatbuffers-zig/47a7c4b913e01c9701e1a69ce49c450cfb9a880f/codegen/examples/arrow/gen/Map.zig |
//
// We've absorbed a lot of information about the variations of types
// we can use in Zig. Roughly, in order we have:
//
// u8 single item
// *u8 single-item pointer
// []u8 slice (size known at runtime)
// [5]u8 array of 5 u8s
// [*]u8 many-item pointer (zero or more)
// enum {a, b} set of unique values a and b
// error {e, f} set of unique error values e and f
// struct {y: u8, z: i32} group of values y and z
// union(enum) {a: u8, b: i32} single value either u8 or i32
//
// Values of any of the above types can be assigned as "var" or "const"
// to allow or disallow changes (mutability) via the assigned name:
//
// const a: u8 = 5; // immutable
// var b: u8 = 5; // mutable
//
// We can also make error unions or optional types from any of
// the above:
//
// var a: E!u8 = 5; // can be u8 or error from set E
// var b: ?u8 = 5; // can be u8 or null
//
// Knowing all of this, maybe we can help out a local hermit. He made
// a little Zig program to help him plan his trips through the woods,
// but it has some mistakes.
//
// *************************************************************
// * A NOTE ABOUT THIS EXERCISE *
// * *
// * You do NOT have to read and understand every bit of this *
// * program. This is a very big example. Feel free to skim *
// * through it and then just focus on the few parts that are *
// * actually broken! *
// * *
// *************************************************************
//
const print = @import("std").debug.print;
// The grue is a nod to Zork.
const TripError = error{ Unreachable, EatenByAGrue };
// Let's start with the Places on the map. Each has a name and a
// distance or difficulty of travel (as judged by the hermit).
//
// Note that we declare the places as mutable (var) because we need to
// assign the paths later. And why is that? Because paths contain
// pointers to places and assigning them now would create a dependency
// loop!
const Place = struct {
name: []const u8,
paths: []const Path = undefined,
};
var a = Place{ .name = "Archer's Point" };
var b = Place{ .name = "Bridge" };
var c = Place{ .name = "Cottage" };
var d = Place{ .name = "Dogwood Grove" };
var e = Place{ .name = "East Pond" };
var f = Place{ .name = "Fox Pond" };
// The hermit's hand-drawn ASCII map
// +---------------------------------------------------+
// | * Archer's Point ~~~~ |
// | ~~~ ~~~~~~~~ |
// | ~~~| |~~~~~~~~~~~~ ~~~~~~~ |
// | Bridge ~~~~~~~~ |
// | ^ ^ ^ |
// | ^ ^ / \ |
// | ^ ^ ^ ^ |_| Cottage |
// | Dogwood Grove |
// | ^ <boat> |
// | ^ ^ ^ ^ ~~~~~~~~~~~~~ ^ ^ |
// | ^ ~~ East Pond ~~~ |
// | ^ ^ ^ ~~~~~~~~~~~~~~ |
// | ~~ ^ |
// | ^ ~~~ <-- short waterfall |
// | ^ ~~~~~ |
// | ~~~~~~~~~~~~~~~~~ |
// | ~~~~ Fox Pond ~~~~~~~ ^ ^ |
// | ^ ~~~~~~~~~~~~~~~ ^ ^ |
// | ~~~~~ |
// +---------------------------------------------------+
//
// We'll be reserving memory in our program based on the number of
// places on the map. Note that we do not have to specify the type of
// this value because we don't actually use it in our program once
// it's compiled! (Don't worry if this doesn't make sense yet.)
const place_count = 6;
// Now let's create all of the paths between sites. A path goes from
// one place to another and has a distance.
const Path = struct {
from: *const Place,
to: *const Place,
dist: u8,
};
// By the way, if the following code seems like a lot of tedious
// manual labor, you're right! One of Zig's killer features is letting
// us write code that runs at compile time to "automate" repetitive
// code (much like macros in other languages), but we haven't learned
// how to do that yet!
const a_paths = [_]Path{
Path{
.from = &a, // from: Archer's Point
.to = &b, // to: Bridge
.dist = 2,
},
};
const b_paths = [_]Path{
Path{
.from = &b, // from: Bridge
.to = &a, // to: Archer's Point
.dist = 2,
},
Path{
.from = &b, // from: Bridge
.to = &d, // to: Dogwood Grove
.dist = 1,
},
};
const c_paths = [_]Path{
Path{
.from = &c, // from: Cottage
.to = &d, // to: Dogwood Grove
.dist = 3,
},
Path{
.from = &c, // from: Cottage
.to = &e, // to: East Pond
.dist = 2,
},
};
const d_paths = [_]Path{
Path{
.from = &d, // from: Dogwood Grove
.to = &b, // to: Bridge
.dist = 1,
},
Path{
.from = &d, // from: Dogwood Grove
.to = &c, // to: Cottage
.dist = 3,
},
Path{
.from = &d, // from: Dogwood Grove
.to = &f, // to: Fox Pond
.dist = 7,
},
};
const e_paths = [_]Path{
Path{
.from = &e, // from: East Pond
.to = &c, // to: Cottage
.dist = 2,
},
Path{
.from = &e, // from: East Pond
.to = &f, // to: Fox Pond
.dist = 1, // (one-way down a short waterfall!)
},
};
const f_paths = [_]Path{
Path{
.from = &f, // from: Fox Pond
.to = &d, // to: Dogwood Grove
.dist = 7,
},
};
// Once we've plotted the best course through the woods, we'll make a
// "trip" out of it. A trip is a series of Places connected by Paths.
// We use a TripItem union to allow both Places and Paths to be in the
// same array.
const TripItem = union(enum) {
place: *const Place,
path: *const Path,
// This is a little helper function to print the two different
// types of item correctly.
fn printMe(self: TripItem) void {
switch (self) {
// Oops! The hermit forgot how to capture the union values
// in a switch statement. Please capture both values as
// 'p' so the print statements work!
.place => |p| print("{s}", .{p.name}),
.path => |p| print("--{}->", .{p.dist}),
}
}
};
// The Hermit's Notebook is where all the magic happens. A notebook
// entry is a Place discovered on the map along with the Path taken to
// get there and the distance to reach it from the start point. If we
// find a better Path to reach a Place (shorter distance), we update the
// entry. Entries also serve as a "todo" list which is how we keep
// track of which paths to explore next.
const NotebookEntry = struct {
place: *const Place,
coming_from: ?*const Place,
via_path: ?*const Path,
dist_to_reach: u16,
};
// +------------------------------------------------+
// | ~ Hermit's Notebook ~ |
// +---+----------------+----------------+----------+
// | | Place | From | Distance |
// +---+----------------+----------------+----------+
// | 0 | Archer's Point | null | 0 |
// | 1 | Bridge | Archer's Point | 2 | < next_entry
// | 2 | Dogwood Grove | Bridge | 1 |
// | 3 | | | | < end_of_entries
// | ... |
// +---+----------------+----------------+----------+
//
const HermitsNotebook = struct {
// Remember the array repetition operator `**`? It is no mere
// novelty, it's also a great way to assign multiple items in an
// array without having to list them one by one. Here we use it to
// initialize an array with null values.
entries: [place_count]?NotebookEntry = .{null} ** place_count,
// The next entry keeps track of where we are in our "todo" list.
next_entry: u8 = 0,
// Mark the start of empty space in the notebook.
end_of_entries: u8 = 0,
// We'll often want to find an entry by Place. If one is not
// found, we return null.
fn getEntry(self: *HermitsNotebook, place: *const Place) ?*NotebookEntry {
for (self.entries) |*entry, i| {
if (i >= self.end_of_entries) break;
// Here's where the hermit got stuck. We need to return
// an optional pointer to a NotebookEntry.
//
// What we have with "entry" is the opposite: a pointer to
// an optional NotebookEntry!
//
// To get one from the other, we need to dereference
// "entry" (with .*) and get the non-null value from the
// optional (with .?) and return the address of that. The
// if statement provides some clues about how the
// dereference and optional value "unwrapping" look
// together. Remember that you return the address with the
// "&" operator.
if (place == entry.*.?.place) return &entry.*.?;
// Try to make your answer this long:__________;
}
return null;
}
// The checkNote() method is the beating heart of the magical
// notebook. Given a new note in the form of a NotebookEntry
// struct, we check to see if we already have an entry for the
// note's Place.
//
// If we DON'T, we'll add the entry to the end of the notebook
// along with the Path taken and distance.
//
// If we DO, we check to see if the path is "better" (shorter
// distance) than the one we'd noted before. If it is, we
// overwrite the old entry with the new one.
fn checkNote(self: *HermitsNotebook, note: NotebookEntry) void {
var existing_entry = self.getEntry(note.place);
if (existing_entry == null) {
self.entries[self.end_of_entries] = note;
self.end_of_entries += 1;
} else if (note.dist_to_reach < existing_entry.?.dist_to_reach) {
existing_entry.?.* = note;
}
}
// The next two methods allow us to use the notebook as a "todo"
// list.
fn hasNextEntry(self: *HermitsNotebook) bool {
return self.next_entry < self.end_of_entries;
}
fn getNextEntry(self: *HermitsNotebook) *const NotebookEntry {
defer self.next_entry += 1; // Increment after getting entry
return &self.entries[self.next_entry].?;
}
// After we've completed our search of the map, we'll have
// computed the shortest Path to every Place. To collect the
// complete trip from the start to the destination, we need to
// walk backwards from the destination's notebook entry, following
// the coming_from pointers back to the start. What we end up with
// is an array of TripItems with our trip in reverse order.
//
// We need to take the trip array as a parameter because we want
// the main() function to "own" the array memory. What do you
// suppose could happen if we allocated the array in this
// function's stack frame (the space allocated for a function's
// "local" data) and returned a pointer or slice to it?
//
// Looks like the hermit forgot something in the return value of
// this function. What could that be?
fn getTripTo(self: *HermitsNotebook, trip: []?TripItem, dest: *Place) TripError!void {
// We start at the destination entry.
const destination_entry = self.getEntry(dest);
// This function needs to return an error if the requested
// destination was never reached. (This can't actually happen
// in our map since every Place is reachable by every other
// Place.)
if (destination_entry == null) {
return TripError.Unreachable;
}
// Variables hold the entry we're currently examining and an
// index to keep track of where we're appending trip items.
var current_entry = destination_entry.?;
var i: u8 = 0;
// At the end of each looping, a continue expression increments
// our index. Can you see why we need to increment by two?
while (true) : (i += 2) {
trip[i] = TripItem{ .place = current_entry.place };
// An entry "coming from" nowhere means we've reached the
// start, so we're done.
if (current_entry.coming_from == null) break;
// Otherwise, entries have a path.
trip[i + 1] = TripItem{ .path = current_entry.via_path.? };
// Now we follow the entry we're "coming from". If we
// aren't able to find the entry we're "coming from" by
// Place, something has gone horribly wrong with our
// program! (This really shouldn't ever happen. Have you
// checked for grues?)
// Note: you do not need to fix anything here.
const previous_entry = self.getEntry(current_entry.coming_from.?);
if (previous_entry == null) return TripError.EatenByAGrue;
current_entry = previous_entry.?;
}
}
};
pub fn main() void {
// Here's where the hermit decides where he would like to go. Once
// you get the program working, try some different Places on the
// map!
const start = &a; // Archer's Point
const destination = &f; // Fox Pond
// Store each Path array as a slice in each Place. As mentioned
// above, we needed to delay making these references to avoid
// creating a dependency loop when the compiler is trying to
// figure out how to allocate space for each item.
a.paths = a_paths[0..];
b.paths = b_paths[0..];
c.paths = c_paths[0..];
d.paths = d_paths[0..];
e.paths = e_paths[0..];
f.paths = f_paths[0..];
// Now we create an instance of the notebook and add the first
// "start" entry. Note the null values. Read the comments for the
// checkNote() method above to see how this entry gets added to
// the notebook.
var notebook = HermitsNotebook{};
var working_note = NotebookEntry{
.place = start,
.coming_from = null,
.via_path = null,
.dist_to_reach = 0,
};
notebook.checkNote(working_note);
// Get the next entry from the notebook (the first being the
// "start" entry we just added) until we run out, at which point
// we'll have checked every reachable Place.
while (notebook.hasNextEntry()) {
var place_entry = notebook.getNextEntry();
// For every Path that leads FROM the current Place, create a
// new note (in the form of a NotebookEntry) with the
// destination Place and the total distance from the start to
// reach that place. Again, read the comments for the
// checkNote() method to see how this works.
for (place_entry.place.paths) |*path| {
working_note = NotebookEntry{
.place = path.to,
.coming_from = place_entry.place,
.via_path = path,
.dist_to_reach = place_entry.dist_to_reach + path.dist,
};
notebook.checkNote(working_note);
}
}
// Once the loop above is complete, we've calculated the shortest
// path to every reachable Place! What we need to do now is set
// aside memory for the trip and have the hermit's notebook fill
// in the trip from the destination back to the path. Note that
// this is the first time we've actually used the destination!
var trip = [_]?TripItem{null} ** (place_count * 2);
notebook.getTripTo(trip[0..], destination) catch |err| {
print("Oh no! {}\n", .{err});
return;
};
// Print the trip with a little helper function below.
printTrip(trip[0..]);
}
// Remember that trips will be a series of alternating TripItems
// containing a Place or Path from the destination back to the start.
// The remaining space in the trip array will contain null values, so
// we need to loop through the items in reverse, skipping nulls, until
// we reach the destination at the front of the array.
fn printTrip(trip: []?TripItem) void {
// We convert the usize length to a u8 with @intCast(), a
// builtin function just like @import(). We'll learn about
// these properly in a later exercise.
var i: u8 = @intCast(u8, trip.len);
while (i > 0) {
i -= 1;
if (trip[i] == null) continue;
trip[i].?.printMe();
}
print("\n", .{});
}
// Going deeper:
//
// In computer science terms, our map places are "nodes" or "vertices" and
// the paths are "edges". Together, they form a "weighted, directed
// graph". It is "weighted" because each path has a distance (also
// known as a "cost"). It is "directed" because each path goes FROM
// one place TO another place (undirected graphs allow you to travel
// on an edge in either direction).
//
// Since we append new notebook entries at the end of the list and
// then explore each sequentially from the beginning (like a "todo"
// list), we are treating the notebook as a "First In, First Out"
// (FIFO) queue.
//
// Since we examine all closest paths first before trying further ones
// (thanks to the "todo" queue), we are performing a "Breadth-First
// Search" (BFS).
//
// By tracking "lowest cost" paths, we can also say that we're
// performing a "least-cost search".
//
// Even more specifically, the Hermit's Notebook most closely
// resembles the Shortest Path Faster Algorithm (SPFA), attributed to
// Edward F. Moore. By replacing our simple FIFO queue with a
// "priority queue", we would basically have Dijkstra's algorithm. A
// priority queue retrieves items sorted by "weight" (in our case, it
// would keep the paths with the shortest distance at the front of the
// queue). Dijkstra's algorithm is more efficient because longer paths
// can be eliminated more quickly. (Work it out on paper to see why!)
| https://raw.githubusercontent.com/ruslandoga/ziglings/1cae040494f5bb64cd92b0847d839ab26b7bec46/exercises/058_quiz7.zig |
//! A set of array and slice types that bit-pack integer elements. A normal [12]u3
//! takes up 12 bytes of memory since u3's alignment is 1. PackedArray(u3, 12) only
//! takes up 4 bytes of memory.
const std = @import("std");
const builtin = @import("builtin");
const debug = std.debug;
const testing = std.testing;
const native_endian = builtin.target.cpu.arch.endian();
const Endian = std.builtin.Endian;
/// Provides a set of functions for reading and writing packed integers from a
/// slice of bytes.
pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
// The general technique employed here is to cast bytes in the array to a container
// integer (having bits % 8 == 0) large enough to contain the number of bits we want,
// then we can retrieve or store the new value with a relative minimum of masking
// and shifting. In this worst case, this means that we'll need an integer that's
// actually 1 byte larger than the minimum required to store the bits, because it
// is possible that the bits start at the end of the first byte, continue through
// zero or more, then end in the beginning of the last. But, if we try to access
// a value in the very last byte of memory with that integer size, that extra byte
// will be out of bounds. Depending on the circumstances of the memory, that might
// mean the OS fatally kills the program. Thus, we use a larger container (MaxIo)
// most of the time, but a smaller container (MinIo) when touching the last byte
// of the memory.
const int_bits = @bitSizeOf(Int);
// In the best case, this is the number of bytes we need to touch
// to read or write a value, as bits.
const min_io_bits = ((int_bits + 7) / 8) * 8;
// In the worst case, this is the number of bytes we need to touch
// to read or write a value, as bits. To calculate for int_bits > 1,
// set aside 2 bits to touch the first and last bytes, then divide
// by 8 to see how many bytes can be filled up in between.
const max_io_bits = switch (int_bits) {
0 => 0,
1 => 8,
else => ((int_bits - 2) / 8 + 2) * 8,
};
// We bitcast the desired Int type to an unsigned version of itself
// to avoid issues with shifting signed ints.
const UnInt = std.meta.Int(.unsigned, int_bits);
// The maximum container int type
const MinIo = std.meta.Int(.unsigned, min_io_bits);
// The minimum container int type
const MaxIo = std.meta.Int(.unsigned, max_io_bits);
return struct {
/// Retrieves the integer at `index` from the packed data beginning at `bit_offset`
/// within `bytes`.
pub fn get(bytes: []const u8, index: usize, bit_offset: u7) Int {
if (int_bits == 0) return 0;
const bit_index = (index * int_bits) + bit_offset;
const max_end_byte = (bit_index + max_io_bits) / 8;
//using the larger container size will potentially read out of bounds
if (max_end_byte > bytes.len) return getBits(bytes, MinIo, bit_index);
return getBits(bytes, MaxIo, bit_index);
}
fn getBits(bytes: []const u8, comptime Container: type, bit_index: usize) Int {
const container_bits = @bitSizeOf(Container);
const start_byte = bit_index / 8;
const head_keep_bits = bit_index - (start_byte * 8);
const tail_keep_bits = container_bits - (int_bits + head_keep_bits);
//read bytes as container
const value_ptr: *align(1) const Container = @ptrCast(&bytes[start_byte]);
var value = value_ptr.*;
if (endian != native_endian) value = @byteSwap(value);
switch (endian) {
.big => {
value <<= @intCast(head_keep_bits);
value >>= @intCast(head_keep_bits);
value >>= @intCast(tail_keep_bits);
},
.little => {
value <<= @intCast(tail_keep_bits);
value >>= @intCast(tail_keep_bits);
value >>= @intCast(head_keep_bits);
},
}
return @bitCast(@as(UnInt, @truncate(value)));
}
/// Sets the integer at `index` to `val` within the packed data beginning
/// at `bit_offset` into `bytes`.
pub fn set(bytes: []u8, index: usize, bit_offset: u3, int: Int) void {
if (int_bits == 0) return;
const bit_index = (index * int_bits) + bit_offset;
const max_end_byte = (bit_index + max_io_bits) / 8;
//using the larger container size will potentially write out of bounds
if (max_end_byte > bytes.len) return setBits(bytes, MinIo, bit_index, int);
setBits(bytes, MaxIo, bit_index, int);
}
fn setBits(bytes: []u8, comptime Container: type, bit_index: usize, int: Int) void {
const container_bits = @bitSizeOf(Container);
const Shift = std.math.Log2Int(Container);
const start_byte = bit_index / 8;
const head_keep_bits = bit_index - (start_byte * 8);
const tail_keep_bits = container_bits - (int_bits + head_keep_bits);
const keep_shift: Shift = switch (endian) {
.big => @intCast(tail_keep_bits),
.little => @intCast(head_keep_bits),
};
//position the bits where they need to be in the container
const value = @as(Container, @intCast(@as(UnInt, @bitCast(int)))) << keep_shift;
//read existing bytes
const target_ptr: *align(1) Container = @ptrCast(&bytes[start_byte]);
var target = target_ptr.*;
if (endian != native_endian) target = @byteSwap(target);
//zero the bits we want to replace in the existing bytes
const inv_mask = @as(Container, @intCast(std.math.maxInt(UnInt))) << keep_shift;
const mask = ~inv_mask;
target &= mask;
//merge the new value
target |= value;
if (endian != native_endian) target = @byteSwap(target);
//save it back
target_ptr.* = target;
}
/// Provides a PackedIntSlice of the packed integers in `bytes` (which begins at `bit_offset`)
/// from the element specified by `start` to the element specified by `end`.
pub fn slice(bytes: []u8, bit_offset: u3, start: usize, end: usize) PackedIntSliceEndian(Int, endian) {
debug.assert(end >= start);
const length = end - start;
const bit_index = (start * int_bits) + bit_offset;
const start_byte = bit_index / 8;
const end_byte = (bit_index + (length * int_bits) + 7) / 8;
const new_bytes = bytes[start_byte..end_byte];
if (length == 0) return PackedIntSliceEndian(Int, endian).init(new_bytes[0..0], 0);
var new_slice = PackedIntSliceEndian(Int, endian).init(new_bytes, length);
new_slice.bit_offset = @intCast((bit_index - (start_byte * 8)));
return new_slice;
}
/// Recasts a packed slice to a version with elements of type `NewInt` and endianness `new_endian`.
/// Slice will begin at `bit_offset` within `bytes` and the new length will be automatically
/// calculated from `old_len` using the sizes of the current integer type and `NewInt`.
pub fn sliceCast(bytes: []u8, comptime NewInt: type, comptime new_endian: Endian, bit_offset: u3, old_len: usize) PackedIntSliceEndian(NewInt, new_endian) {
const new_int_bits = @bitSizeOf(NewInt);
const New = PackedIntSliceEndian(NewInt, new_endian);
const total_bits = (old_len * int_bits);
const new_int_count = total_bits / new_int_bits;
debug.assert(total_bits == new_int_count * new_int_bits);
var new = New.init(bytes, new_int_count);
new.bit_offset = bit_offset;
return new;
}
};
}
/// Creates a bit-packed array of `Int`. Non-byte-multiple integers
/// will take up less memory in PackedIntArray than in a normal array.
/// Elements are packed using native endianness and without storing any
/// meta data. PackedArray(i3, 8) will occupy exactly 3 bytes
/// of memory.
pub fn PackedIntArray(comptime Int: type, comptime int_count: usize) type {
return PackedIntArrayEndian(Int, native_endian, int_count);
}
/// Creates a bit-packed array of `Int` with bit order specified by `endian`.
/// Non-byte-multiple integers will take up less memory in PackedIntArrayEndian
/// than in a normal array. Elements are packed without storing any meta data.
/// PackedIntArrayEndian(i3, 8) will occupy exactly 3 bytes of memory.
pub fn PackedIntArrayEndian(comptime Int: type, comptime endian: Endian, comptime int_count: usize) type {
const int_bits = @bitSizeOf(Int);
const total_bits = int_bits * int_count;
const total_bytes = (total_bits + 7) / 8;
const Io = PackedIntIo(Int, endian);
return struct {
const Self = @This();
/// The byte buffer containing the packed data.
bytes: [total_bytes]u8,
/// The number of elements in the packed array.
comptime len: usize = int_count,
/// The integer type of the packed array.
pub const Child = Int;
/// Initialize a packed array using an unpacked array
/// or, more likely, an array literal.
pub fn init(ints: [int_count]Int) Self {
var self: Self = undefined;
for (ints, 0..) |int, i| self.set(i, int);
return self;
}
/// Initialize all entries of a packed array to the same value.
pub fn initAllTo(int: Int) Self {
var self: Self = undefined;
self.setAll(int);
return self;
}
/// Return the integer stored at `index`.
pub fn get(self: Self, index: usize) Int {
debug.assert(index < int_count);
return Io.get(&self.bytes, index, 0);
}
///Copy the value of `int` into the array at `index`.
pub fn set(self: *Self, index: usize, int: Int) void {
debug.assert(index < int_count);
return Io.set(&self.bytes, index, 0, int);
}
/// Set all entries of a packed array to the value of `int`.
pub fn setAll(self: *Self, int: Int) void {
var i: usize = 0;
while (i < int_count) : (i += 1) {
self.set(i, int);
}
}
/// Create a PackedIntSlice of the array from `start` to `end`.
pub fn slice(self: *Self, start: usize, end: usize) PackedIntSliceEndian(Int, endian) {
debug.assert(start < int_count);
debug.assert(end <= int_count);
return Io.slice(&self.bytes, 0, start, end);
}
/// Create a PackedIntSlice of the array using `NewInt` as the integer type.
/// `NewInt`'s bit width must fit evenly within the array's `Int`'s total bits.
pub fn sliceCast(self: *Self, comptime NewInt: type) PackedIntSlice(NewInt) {
return self.sliceCastEndian(NewInt, endian);
}
/// Create a PackedIntSliceEndian of the array using `NewInt` as the integer type
/// and `new_endian` as the new endianness. `NewInt`'s bit width must fit evenly
/// within the array's `Int`'s total bits.
pub fn sliceCastEndian(self: *Self, comptime NewInt: type, comptime new_endian: Endian) PackedIntSliceEndian(NewInt, new_endian) {
return Io.sliceCast(&self.bytes, NewInt, new_endian, 0, int_count);
}
};
}
/// A type representing a sub range of a PackedIntArray.
pub fn PackedIntSlice(comptime Int: type) type {
return PackedIntSliceEndian(Int, native_endian);
}
/// A type representing a sub range of a PackedIntArrayEndian.
pub fn PackedIntSliceEndian(comptime Int: type, comptime endian: Endian) type {
const int_bits = @bitSizeOf(Int);
const Io = PackedIntIo(Int, endian);
return struct {
const Self = @This();
bytes: []u8,
bit_offset: u3,
len: usize,
/// The integer type of the packed slice.
pub const Child = Int;
/// Calculates the number of bytes required to store a desired count
/// of `Int`s.
pub fn bytesRequired(int_count: usize) usize {
const total_bits = int_bits * int_count;
const total_bytes = (total_bits + 7) / 8;
return total_bytes;
}
/// Initialize a packed slice using the memory at `bytes`, with `int_count`
/// elements. `bytes` must be large enough to accommodate the requested
/// count.
pub fn init(bytes: []u8, int_count: usize) Self {
debug.assert(bytes.len >= bytesRequired(int_count));
return Self{
.bytes = bytes,
.len = int_count,
.bit_offset = 0,
};
}
/// Return the integer stored at `index`.
pub fn get(self: Self, index: usize) Int {
debug.assert(index < self.len);
return Io.get(self.bytes, index, self.bit_offset);
}
/// Copy `int` into the slice at `index`.
pub fn set(self: *Self, index: usize, int: Int) void {
debug.assert(index < self.len);
return Io.set(self.bytes, index, self.bit_offset, int);
}
/// Create a PackedIntSlice of this slice from `start` to `end`.
pub fn slice(self: Self, start: usize, end: usize) PackedIntSliceEndian(Int, endian) {
debug.assert(start < self.len);
debug.assert(end <= self.len);
return Io.slice(self.bytes, self.bit_offset, start, end);
}
/// Create a PackedIntSlice of the sclice using `NewInt` as the integer type.
/// `NewInt`'s bit width must fit evenly within the slice's `Int`'s total bits.
pub fn sliceCast(self: Self, comptime NewInt: type) PackedIntSliceEndian(NewInt, endian) {
return self.sliceCastEndian(NewInt, endian);
}
/// Create a PackedIntSliceEndian of the slice using `NewInt` as the integer type
/// and `new_endian` as the new endianness. `NewInt`'s bit width must fit evenly
/// within the slice's `Int`'s total bits.
pub fn sliceCastEndian(self: Self, comptime NewInt: type, comptime new_endian: Endian) PackedIntSliceEndian(NewInt, new_endian) {
return Io.sliceCast(self.bytes, NewInt, new_endian, self.bit_offset, self.len);
}
};
}
test "PackedIntArray" {
// TODO @setEvalBranchQuota generates panics in wasm32. Investigate.
if (builtin.target.cpu.arch == .wasm32) return error.SkipZigTest;
// TODO: enable this test
if (true) return error.SkipZigTest;
@setEvalBranchQuota(10000);
const max_bits = 256;
const int_count = 19;
comptime var bits = 0;
inline while (bits <= max_bits) : (bits += 1) {
//alternate unsigned and signed
const sign: std.builtin.Signedness = if (bits % 2 == 0) .signed else .unsigned;
const I = std.meta.Int(sign, bits);
const PackedArray = PackedIntArray(I, int_count);
const expected_bytes = ((bits * int_count) + 7) / 8;
try testing.expect(@sizeOf(PackedArray) == expected_bytes);
var data: PackedArray = undefined;
//write values, counting up
var i: usize = 0;
var count: I = 0;
while (i < data.len) : (i += 1) {
data.set(i, count);
if (bits > 0) count +%= 1;
}
//read and verify values
i = 0;
count = 0;
while (i < data.len) : (i += 1) {
const val = data.get(i);
try testing.expect(val == count);
if (bits > 0) count +%= 1;
}
}
}
test "PackedIntIo" {
const bytes = [_]u8{ 0b01101_000, 0b01011_110, 0b00011_101 };
try testing.expectEqual(@as(u15, 0x2bcd), PackedIntIo(u15, .little).get(&bytes, 0, 3));
try testing.expectEqual(@as(u16, 0xabcd), PackedIntIo(u16, .little).get(&bytes, 0, 3));
try testing.expectEqual(@as(u17, 0x1abcd), PackedIntIo(u17, .little).get(&bytes, 0, 3));
try testing.expectEqual(@as(u18, 0x3abcd), PackedIntIo(u18, .little).get(&bytes, 0, 3));
}
test "PackedIntArray init" {
const S = struct {
fn doTheTest() !void {
const PackedArray = PackedIntArray(u3, 8);
var packed_array = PackedArray.init([_]u3{ 0, 1, 2, 3, 4, 5, 6, 7 });
var i: usize = 0;
while (i < packed_array.len) : (i += 1) try testing.expectEqual(@as(u3, @intCast(i)), packed_array.get(i));
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "PackedIntArray initAllTo" {
const S = struct {
fn doTheTest() !void {
const PackedArray = PackedIntArray(u3, 8);
var packed_array = PackedArray.initAllTo(5);
var i: usize = 0;
while (i < packed_array.len) : (i += 1) try testing.expectEqual(@as(u3, 5), packed_array.get(i));
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "PackedIntSlice" {
// TODO @setEvalBranchQuota generates panics in wasm32. Investigate.
if (builtin.target.cpu.arch == .wasm32) return error.SkipZigTest;
// TODO enable this test
if (true) return error.SkipZigTest;
@setEvalBranchQuota(10000);
const max_bits = 256;
const int_count = 19;
const total_bits = max_bits * int_count;
const total_bytes = (total_bits + 7) / 8;
var buffer: [total_bytes]u8 = undefined;
comptime var bits = 0;
inline while (bits <= max_bits) : (bits += 1) {
//alternate unsigned and signed
const sign: std.builtin.Signedness = if (bits % 2 == 0) .signed else .unsigned;
const I = std.meta.Int(sign, bits);
const P = PackedIntSlice(I);
var data = P.init(&buffer, int_count);
//write values, counting up
var i: usize = 0;
var count: I = 0;
while (i < data.len) : (i += 1) {
data.set(i, count);
if (bits > 0) count +%= 1;
}
//read and verify values
i = 0;
count = 0;
while (i < data.len) : (i += 1) {
const val = data.get(i);
try testing.expect(val == count);
if (bits > 0) count +%= 1;
}
}
}
test "PackedIntSlice of PackedInt(Array/Slice)" {
// TODO enable this test
if (true) return error.SkipZigTest;
const max_bits = 16;
const int_count = 19;
comptime var bits = 0;
inline while (bits <= max_bits) : (bits += 1) {
const Int = std.meta.Int(.unsigned, bits);
const PackedArray = PackedIntArray(Int, int_count);
var packed_array: PackedArray = undefined;
const limit = (1 << bits);
var i: usize = 0;
while (i < packed_array.len) : (i += 1) {
packed_array.set(i, @intCast(i % limit));
}
//slice of array
var packed_slice = packed_array.slice(2, 5);
try testing.expect(packed_slice.len == 3);
const ps_bit_count = (bits * packed_slice.len) + packed_slice.bit_offset;
const ps_expected_bytes = (ps_bit_count + 7) / 8;
try testing.expect(packed_slice.bytes.len == ps_expected_bytes);
try testing.expect(packed_slice.get(0) == 2 % limit);
try testing.expect(packed_slice.get(1) == 3 % limit);
try testing.expect(packed_slice.get(2) == 4 % limit);
packed_slice.set(1, 7 % limit);
try testing.expect(packed_slice.get(1) == 7 % limit);
//write through slice
try testing.expect(packed_array.get(3) == 7 % limit);
//slice of a slice
const packed_slice_two = packed_slice.slice(0, 3);
try testing.expect(packed_slice_two.len == 3);
const ps2_bit_count = (bits * packed_slice_two.len) + packed_slice_two.bit_offset;
const ps2_expected_bytes = (ps2_bit_count + 7) / 8;
try testing.expect(packed_slice_two.bytes.len == ps2_expected_bytes);
try testing.expect(packed_slice_two.get(1) == 7 % limit);
try testing.expect(packed_slice_two.get(2) == 4 % limit);
//size one case
const packed_slice_three = packed_slice_two.slice(1, 2);
try testing.expect(packed_slice_three.len == 1);
const ps3_bit_count = (bits * packed_slice_three.len) + packed_slice_three.bit_offset;
const ps3_expected_bytes = (ps3_bit_count + 7) / 8;
try testing.expect(packed_slice_three.bytes.len == ps3_expected_bytes);
try testing.expect(packed_slice_three.get(0) == 7 % limit);
//empty slice case
const packed_slice_empty = packed_slice.slice(0, 0);
try testing.expect(packed_slice_empty.len == 0);
try testing.expect(packed_slice_empty.bytes.len == 0);
//slicing at byte boundaries
const packed_slice_edge = packed_array.slice(8, 16);
try testing.expect(packed_slice_edge.len == 8);
const pse_bit_count = (bits * packed_slice_edge.len) + packed_slice_edge.bit_offset;
const pse_expected_bytes = (pse_bit_count + 7) / 8;
try testing.expect(packed_slice_edge.bytes.len == pse_expected_bytes);
try testing.expect(packed_slice_edge.bit_offset == 0);
}
}
test "PackedIntSlice accumulating bit offsets" {
//bit_offset is u3, so standard debugging asserts should catch
// anything
{
const PackedArray = PackedIntArray(u3, 16);
var packed_array: PackedArray = undefined;
var packed_slice = packed_array.slice(0, packed_array.len);
var i: usize = 0;
while (i < packed_array.len - 1) : (i += 1) {
packed_slice = packed_slice.slice(1, packed_slice.len);
}
}
{
const PackedArray = PackedIntArray(u11, 88);
var packed_array: PackedArray = undefined;
var packed_slice = packed_array.slice(0, packed_array.len);
var i: usize = 0;
while (i < packed_array.len - 1) : (i += 1) {
packed_slice = packed_slice.slice(1, packed_slice.len);
}
}
}
test "PackedInt(Array/Slice) sliceCast" {
const PackedArray = PackedIntArray(u1, 16);
var packed_array = PackedArray.init([_]u1{ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 });
const packed_slice_cast_2 = packed_array.sliceCast(u2);
const packed_slice_cast_4 = packed_slice_cast_2.sliceCast(u4);
var packed_slice_cast_9 = packed_array.slice(0, (packed_array.len / 9) * 9).sliceCast(u9);
const packed_slice_cast_3 = packed_slice_cast_9.sliceCast(u3);
var i: usize = 0;
while (i < packed_slice_cast_2.len) : (i += 1) {
const val = switch (native_endian) {
.big => 0b01,
.little => 0b10,
};
try testing.expect(packed_slice_cast_2.get(i) == val);
}
i = 0;
while (i < packed_slice_cast_4.len) : (i += 1) {
const val = switch (native_endian) {
.big => 0b0101,
.little => 0b1010,
};
try testing.expect(packed_slice_cast_4.get(i) == val);
}
i = 0;
while (i < packed_slice_cast_9.len) : (i += 1) {
const val = 0b010101010;
try testing.expect(packed_slice_cast_9.get(i) == val);
packed_slice_cast_9.set(i, 0b111000111);
}
i = 0;
while (i < packed_slice_cast_3.len) : (i += 1) {
const val: u3 = switch (native_endian) {
.big => if (i % 2 == 0) 0b111 else 0b000,
.little => if (i % 2 == 0) 0b111 else 0b000,
};
try testing.expect(packed_slice_cast_3.get(i) == val);
}
}
test "PackedInt(Array/Slice)Endian" {
{
const PackedArrayBe = PackedIntArrayEndian(u4, .big, 8);
var packed_array_be = PackedArrayBe.init([_]u4{ 0, 1, 2, 3, 4, 5, 6, 7 });
try testing.expect(packed_array_be.bytes[0] == 0b00000001);
try testing.expect(packed_array_be.bytes[1] == 0b00100011);
var i: usize = 0;
while (i < packed_array_be.len) : (i += 1) {
try testing.expect(packed_array_be.get(i) == i);
}
var packed_slice_le = packed_array_be.sliceCastEndian(u4, .little);
i = 0;
while (i < packed_slice_le.len) : (i += 1) {
const val = if (i % 2 == 0) i + 1 else i - 1;
try testing.expect(packed_slice_le.get(i) == val);
}
var packed_slice_le_shift = packed_array_be.slice(1, 5).sliceCastEndian(u4, .little);
i = 0;
while (i < packed_slice_le_shift.len) : (i += 1) {
const val = if (i % 2 == 0) i else i + 2;
try testing.expect(packed_slice_le_shift.get(i) == val);
}
}
{
const PackedArrayBe = PackedIntArrayEndian(u11, .big, 8);
var packed_array_be = PackedArrayBe.init([_]u11{ 0, 1, 2, 3, 4, 5, 6, 7 });
try testing.expect(packed_array_be.bytes[0] == 0b00000000);
try testing.expect(packed_array_be.bytes[1] == 0b00000000);
try testing.expect(packed_array_be.bytes[2] == 0b00000100);
try testing.expect(packed_array_be.bytes[3] == 0b00000001);
try testing.expect(packed_array_be.bytes[4] == 0b00000000);
var i: usize = 0;
while (i < packed_array_be.len) : (i += 1) {
try testing.expect(packed_array_be.get(i) == i);
}
var packed_slice_le = packed_array_be.sliceCastEndian(u11, .little);
try testing.expect(packed_slice_le.get(0) == 0b00000000000);
try testing.expect(packed_slice_le.get(1) == 0b00010000000);
try testing.expect(packed_slice_le.get(2) == 0b00000000100);
try testing.expect(packed_slice_le.get(3) == 0b00000000000);
try testing.expect(packed_slice_le.get(4) == 0b00010000011);
try testing.expect(packed_slice_le.get(5) == 0b00000000010);
try testing.expect(packed_slice_le.get(6) == 0b10000010000);
try testing.expect(packed_slice_le.get(7) == 0b00000111001);
var packed_slice_le_shift = packed_array_be.slice(1, 5).sliceCastEndian(u11, .little);
try testing.expect(packed_slice_le_shift.get(0) == 0b00010000000);
try testing.expect(packed_slice_le_shift.get(1) == 0b00000000100);
try testing.expect(packed_slice_le_shift.get(2) == 0b00000000000);
try testing.expect(packed_slice_le_shift.get(3) == 0b00010000011);
}
}
//@NOTE: Need to manually update this list as more posix os's get
// added to DirectAllocator.
// These tests prove we aren't accidentally accessing memory past
// the end of the array/slice by placing it at the end of a page
// and reading the last element. The assumption is that the page
// after this one is not mapped and will cause a segfault if we
// don't account for the bounds.
test "PackedIntArray at end of available memory" {
switch (builtin.target.os.tag) {
.linux, .macos, .ios, .freebsd, .netbsd, .openbsd, .windows => {},
else => return,
}
const PackedArray = PackedIntArray(u3, 8);
const Padded = struct {
_: [std.mem.page_size - @sizeOf(PackedArray)]u8,
p: PackedArray,
};
const allocator = std.testing.allocator;
var pad = try allocator.create(Padded);
defer allocator.destroy(pad);
pad.p.set(7, std.math.maxInt(u3));
}
test "PackedIntSlice at end of available memory" {
switch (builtin.target.os.tag) {
.linux, .macos, .ios, .freebsd, .netbsd, .openbsd, .windows => {},
else => return,
}
const PackedSlice = PackedIntSlice(u11);
const allocator = std.testing.allocator;
var page = try allocator.alloc(u8, std.mem.page_size);
defer allocator.free(page);
var p = PackedSlice.init(page[std.mem.page_size - 2 ..], 1);
p.set(0, std.math.maxInt(u11));
}
| https://raw.githubusercontent.com/ziglang/zig-bootstrap/ec2dca85a340f134d2fcfdc9007e91f9abed6996/zig/lib/std/packed_int_array.zig |
//! A set of array and slice types that bit-pack integer elements. A normal [12]u3
//! takes up 12 bytes of memory since u3's alignment is 1. PackedArray(u3, 12) only
//! takes up 4 bytes of memory.
const std = @import("std");
const builtin = @import("builtin");
const debug = std.debug;
const testing = std.testing;
const native_endian = builtin.target.cpu.arch.endian();
const Endian = std.builtin.Endian;
/// Provides a set of functions for reading and writing packed integers from a
/// slice of bytes.
pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
// The general technique employed here is to cast bytes in the array to a container
// integer (having bits % 8 == 0) large enough to contain the number of bits we want,
// then we can retrieve or store the new value with a relative minimum of masking
// and shifting. In this worst case, this means that we'll need an integer that's
// actually 1 byte larger than the minimum required to store the bits, because it
// is possible that the bits start at the end of the first byte, continue through
// zero or more, then end in the beginning of the last. But, if we try to access
// a value in the very last byte of memory with that integer size, that extra byte
// will be out of bounds. Depending on the circumstances of the memory, that might
// mean the OS fatally kills the program. Thus, we use a larger container (MaxIo)
// most of the time, but a smaller container (MinIo) when touching the last byte
// of the memory.
const int_bits = @bitSizeOf(Int);
// In the best case, this is the number of bytes we need to touch
// to read or write a value, as bits.
const min_io_bits = ((int_bits + 7) / 8) * 8;
// In the worst case, this is the number of bytes we need to touch
// to read or write a value, as bits. To calculate for int_bits > 1,
// set aside 2 bits to touch the first and last bytes, then divide
// by 8 to see how many bytes can be filled up in between.
const max_io_bits = switch (int_bits) {
0 => 0,
1 => 8,
else => ((int_bits - 2) / 8 + 2) * 8,
};
// We bitcast the desired Int type to an unsigned version of itself
// to avoid issues with shifting signed ints.
const UnInt = std.meta.Int(.unsigned, int_bits);
// The maximum container int type
const MinIo = std.meta.Int(.unsigned, min_io_bits);
// The minimum container int type
const MaxIo = std.meta.Int(.unsigned, max_io_bits);
return struct {
/// Retrieves the integer at `index` from the packed data beginning at `bit_offset`
/// within `bytes`.
pub fn get(bytes: []const u8, index: usize, bit_offset: u7) Int {
if (int_bits == 0) return 0;
const bit_index = (index * int_bits) + bit_offset;
const max_end_byte = (bit_index + max_io_bits) / 8;
//using the larger container size will potentially read out of bounds
if (max_end_byte > bytes.len) return getBits(bytes, MinIo, bit_index);
return getBits(bytes, MaxIo, bit_index);
}
fn getBits(bytes: []const u8, comptime Container: type, bit_index: usize) Int {
const container_bits = @bitSizeOf(Container);
const start_byte = bit_index / 8;
const head_keep_bits = bit_index - (start_byte * 8);
const tail_keep_bits = container_bits - (int_bits + head_keep_bits);
//read bytes as container
const value_ptr: *align(1) const Container = @ptrCast(&bytes[start_byte]);
var value = value_ptr.*;
if (endian != native_endian) value = @byteSwap(value);
switch (endian) {
.big => {
value <<= @intCast(head_keep_bits);
value >>= @intCast(head_keep_bits);
value >>= @intCast(tail_keep_bits);
},
.little => {
value <<= @intCast(tail_keep_bits);
value >>= @intCast(tail_keep_bits);
value >>= @intCast(head_keep_bits);
},
}
return @bitCast(@as(UnInt, @truncate(value)));
}
/// Sets the integer at `index` to `val` within the packed data beginning
/// at `bit_offset` into `bytes`.
pub fn set(bytes: []u8, index: usize, bit_offset: u3, int: Int) void {
if (int_bits == 0) return;
const bit_index = (index * int_bits) + bit_offset;
const max_end_byte = (bit_index + max_io_bits) / 8;
//using the larger container size will potentially write out of bounds
if (max_end_byte > bytes.len) return setBits(bytes, MinIo, bit_index, int);
setBits(bytes, MaxIo, bit_index, int);
}
fn setBits(bytes: []u8, comptime Container: type, bit_index: usize, int: Int) void {
const container_bits = @bitSizeOf(Container);
const Shift = std.math.Log2Int(Container);
const start_byte = bit_index / 8;
const head_keep_bits = bit_index - (start_byte * 8);
const tail_keep_bits = container_bits - (int_bits + head_keep_bits);
const keep_shift: Shift = switch (endian) {
.big => @intCast(tail_keep_bits),
.little => @intCast(head_keep_bits),
};
//position the bits where they need to be in the container
const value = @as(Container, @intCast(@as(UnInt, @bitCast(int)))) << keep_shift;
//read existing bytes
const target_ptr: *align(1) Container = @ptrCast(&bytes[start_byte]);
var target = target_ptr.*;
if (endian != native_endian) target = @byteSwap(target);
//zero the bits we want to replace in the existing bytes
const inv_mask = @as(Container, @intCast(std.math.maxInt(UnInt))) << keep_shift;
const mask = ~inv_mask;
target &= mask;
//merge the new value
target |= value;
if (endian != native_endian) target = @byteSwap(target);
//save it back
target_ptr.* = target;
}
/// Provides a PackedIntSlice of the packed integers in `bytes` (which begins at `bit_offset`)
/// from the element specified by `start` to the element specified by `end`.
pub fn slice(bytes: []u8, bit_offset: u3, start: usize, end: usize) PackedIntSliceEndian(Int, endian) {
debug.assert(end >= start);
const length = end - start;
const bit_index = (start * int_bits) + bit_offset;
const start_byte = bit_index / 8;
const end_byte = (bit_index + (length * int_bits) + 7) / 8;
const new_bytes = bytes[start_byte..end_byte];
if (length == 0) return PackedIntSliceEndian(Int, endian).init(new_bytes[0..0], 0);
var new_slice = PackedIntSliceEndian(Int, endian).init(new_bytes, length);
new_slice.bit_offset = @intCast((bit_index - (start_byte * 8)));
return new_slice;
}
/// Recasts a packed slice to a version with elements of type `NewInt` and endianness `new_endian`.
/// Slice will begin at `bit_offset` within `bytes` and the new length will be automatically
/// calculated from `old_len` using the sizes of the current integer type and `NewInt`.
pub fn sliceCast(bytes: []u8, comptime NewInt: type, comptime new_endian: Endian, bit_offset: u3, old_len: usize) PackedIntSliceEndian(NewInt, new_endian) {
const new_int_bits = @bitSizeOf(NewInt);
const New = PackedIntSliceEndian(NewInt, new_endian);
const total_bits = (old_len * int_bits);
const new_int_count = total_bits / new_int_bits;
debug.assert(total_bits == new_int_count * new_int_bits);
var new = New.init(bytes, new_int_count);
new.bit_offset = bit_offset;
return new;
}
};
}
/// Creates a bit-packed array of `Int`. Non-byte-multiple integers
/// will take up less memory in PackedIntArray than in a normal array.
/// Elements are packed using native endianness and without storing any
/// meta data. PackedArray(i3, 8) will occupy exactly 3 bytes
/// of memory.
pub fn PackedIntArray(comptime Int: type, comptime int_count: usize) type {
return PackedIntArrayEndian(Int, native_endian, int_count);
}
/// Creates a bit-packed array of `Int` with bit order specified by `endian`.
/// Non-byte-multiple integers will take up less memory in PackedIntArrayEndian
/// than in a normal array. Elements are packed without storing any meta data.
/// PackedIntArrayEndian(i3, 8) will occupy exactly 3 bytes of memory.
pub fn PackedIntArrayEndian(comptime Int: type, comptime endian: Endian, comptime int_count: usize) type {
const int_bits = @bitSizeOf(Int);
const total_bits = int_bits * int_count;
const total_bytes = (total_bits + 7) / 8;
const Io = PackedIntIo(Int, endian);
return struct {
const Self = @This();
/// The byte buffer containing the packed data.
bytes: [total_bytes]u8,
/// The number of elements in the packed array.
comptime len: usize = int_count,
/// The integer type of the packed array.
pub const Child = Int;
/// Initialize a packed array using an unpacked array
/// or, more likely, an array literal.
pub fn init(ints: [int_count]Int) Self {
var self: Self = undefined;
for (ints, 0..) |int, i| self.set(i, int);
return self;
}
/// Initialize all entries of a packed array to the same value.
pub fn initAllTo(int: Int) Self {
var self: Self = undefined;
self.setAll(int);
return self;
}
/// Return the integer stored at `index`.
pub fn get(self: Self, index: usize) Int {
debug.assert(index < int_count);
return Io.get(&self.bytes, index, 0);
}
///Copy the value of `int` into the array at `index`.
pub fn set(self: *Self, index: usize, int: Int) void {
debug.assert(index < int_count);
return Io.set(&self.bytes, index, 0, int);
}
/// Set all entries of a packed array to the value of `int`.
pub fn setAll(self: *Self, int: Int) void {
var i: usize = 0;
while (i < int_count) : (i += 1) {
self.set(i, int);
}
}
/// Create a PackedIntSlice of the array from `start` to `end`.
pub fn slice(self: *Self, start: usize, end: usize) PackedIntSliceEndian(Int, endian) {
debug.assert(start < int_count);
debug.assert(end <= int_count);
return Io.slice(&self.bytes, 0, start, end);
}
/// Create a PackedIntSlice of the array using `NewInt` as the integer type.
/// `NewInt`'s bit width must fit evenly within the array's `Int`'s total bits.
pub fn sliceCast(self: *Self, comptime NewInt: type) PackedIntSlice(NewInt) {
return self.sliceCastEndian(NewInt, endian);
}
/// Create a PackedIntSliceEndian of the array using `NewInt` as the integer type
/// and `new_endian` as the new endianness. `NewInt`'s bit width must fit evenly
/// within the array's `Int`'s total bits.
pub fn sliceCastEndian(self: *Self, comptime NewInt: type, comptime new_endian: Endian) PackedIntSliceEndian(NewInt, new_endian) {
return Io.sliceCast(&self.bytes, NewInt, new_endian, 0, int_count);
}
};
}
/// A type representing a sub range of a PackedIntArray.
pub fn PackedIntSlice(comptime Int: type) type {
return PackedIntSliceEndian(Int, native_endian);
}
/// A type representing a sub range of a PackedIntArrayEndian.
pub fn PackedIntSliceEndian(comptime Int: type, comptime endian: Endian) type {
const int_bits = @bitSizeOf(Int);
const Io = PackedIntIo(Int, endian);
return struct {
const Self = @This();
bytes: []u8,
bit_offset: u3,
len: usize,
/// The integer type of the packed slice.
pub const Child = Int;
/// Calculates the number of bytes required to store a desired count
/// of `Int`s.
pub fn bytesRequired(int_count: usize) usize {
const total_bits = int_bits * int_count;
const total_bytes = (total_bits + 7) / 8;
return total_bytes;
}
/// Initialize a packed slice using the memory at `bytes`, with `int_count`
/// elements. `bytes` must be large enough to accommodate the requested
/// count.
pub fn init(bytes: []u8, int_count: usize) Self {
debug.assert(bytes.len >= bytesRequired(int_count));
return Self{
.bytes = bytes,
.len = int_count,
.bit_offset = 0,
};
}
/// Return the integer stored at `index`.
pub fn get(self: Self, index: usize) Int {
debug.assert(index < self.len);
return Io.get(self.bytes, index, self.bit_offset);
}
/// Copy `int` into the slice at `index`.
pub fn set(self: *Self, index: usize, int: Int) void {
debug.assert(index < self.len);
return Io.set(self.bytes, index, self.bit_offset, int);
}
/// Create a PackedIntSlice of this slice from `start` to `end`.
pub fn slice(self: Self, start: usize, end: usize) PackedIntSliceEndian(Int, endian) {
debug.assert(start < self.len);
debug.assert(end <= self.len);
return Io.slice(self.bytes, self.bit_offset, start, end);
}
/// Create a PackedIntSlice of the sclice using `NewInt` as the integer type.
/// `NewInt`'s bit width must fit evenly within the slice's `Int`'s total bits.
pub fn sliceCast(self: Self, comptime NewInt: type) PackedIntSliceEndian(NewInt, endian) {
return self.sliceCastEndian(NewInt, endian);
}
/// Create a PackedIntSliceEndian of the slice using `NewInt` as the integer type
/// and `new_endian` as the new endianness. `NewInt`'s bit width must fit evenly
/// within the slice's `Int`'s total bits.
pub fn sliceCastEndian(self: Self, comptime NewInt: type, comptime new_endian: Endian) PackedIntSliceEndian(NewInt, new_endian) {
return Io.sliceCast(self.bytes, NewInt, new_endian, self.bit_offset, self.len);
}
};
}
test "PackedIntArray" {
// TODO @setEvalBranchQuota generates panics in wasm32. Investigate.
if (builtin.target.cpu.arch == .wasm32) return error.SkipZigTest;
// TODO: enable this test
if (true) return error.SkipZigTest;
@setEvalBranchQuota(10000);
const max_bits = 256;
const int_count = 19;
comptime var bits = 0;
inline while (bits <= max_bits) : (bits += 1) {
//alternate unsigned and signed
const sign: std.builtin.Signedness = if (bits % 2 == 0) .signed else .unsigned;
const I = std.meta.Int(sign, bits);
const PackedArray = PackedIntArray(I, int_count);
const expected_bytes = ((bits * int_count) + 7) / 8;
try testing.expect(@sizeOf(PackedArray) == expected_bytes);
var data: PackedArray = undefined;
//write values, counting up
var i: usize = 0;
var count: I = 0;
while (i < data.len) : (i += 1) {
data.set(i, count);
if (bits > 0) count +%= 1;
}
//read and verify values
i = 0;
count = 0;
while (i < data.len) : (i += 1) {
const val = data.get(i);
try testing.expect(val == count);
if (bits > 0) count +%= 1;
}
}
}
test "PackedIntIo" {
const bytes = [_]u8{ 0b01101_000, 0b01011_110, 0b00011_101 };
try testing.expectEqual(@as(u15, 0x2bcd), PackedIntIo(u15, .little).get(&bytes, 0, 3));
try testing.expectEqual(@as(u16, 0xabcd), PackedIntIo(u16, .little).get(&bytes, 0, 3));
try testing.expectEqual(@as(u17, 0x1abcd), PackedIntIo(u17, .little).get(&bytes, 0, 3));
try testing.expectEqual(@as(u18, 0x3abcd), PackedIntIo(u18, .little).get(&bytes, 0, 3));
}
test "PackedIntArray init" {
const S = struct {
fn doTheTest() !void {
const PackedArray = PackedIntArray(u3, 8);
var packed_array = PackedArray.init([_]u3{ 0, 1, 2, 3, 4, 5, 6, 7 });
var i: usize = 0;
while (i < packed_array.len) : (i += 1) try testing.expectEqual(@as(u3, @intCast(i)), packed_array.get(i));
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "PackedIntArray initAllTo" {
const S = struct {
fn doTheTest() !void {
const PackedArray = PackedIntArray(u3, 8);
var packed_array = PackedArray.initAllTo(5);
var i: usize = 0;
while (i < packed_array.len) : (i += 1) try testing.expectEqual(@as(u3, 5), packed_array.get(i));
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "PackedIntSlice" {
// TODO @setEvalBranchQuota generates panics in wasm32. Investigate.
if (builtin.target.cpu.arch == .wasm32) return error.SkipZigTest;
// TODO enable this test
if (true) return error.SkipZigTest;
@setEvalBranchQuota(10000);
const max_bits = 256;
const int_count = 19;
const total_bits = max_bits * int_count;
const total_bytes = (total_bits + 7) / 8;
var buffer: [total_bytes]u8 = undefined;
comptime var bits = 0;
inline while (bits <= max_bits) : (bits += 1) {
//alternate unsigned and signed
const sign: std.builtin.Signedness = if (bits % 2 == 0) .signed else .unsigned;
const I = std.meta.Int(sign, bits);
const P = PackedIntSlice(I);
var data = P.init(&buffer, int_count);
//write values, counting up
var i: usize = 0;
var count: I = 0;
while (i < data.len) : (i += 1) {
data.set(i, count);
if (bits > 0) count +%= 1;
}
//read and verify values
i = 0;
count = 0;
while (i < data.len) : (i += 1) {
const val = data.get(i);
try testing.expect(val == count);
if (bits > 0) count +%= 1;
}
}
}
test "PackedIntSlice of PackedInt(Array/Slice)" {
// TODO enable this test
if (true) return error.SkipZigTest;
const max_bits = 16;
const int_count = 19;
comptime var bits = 0;
inline while (bits <= max_bits) : (bits += 1) {
const Int = std.meta.Int(.unsigned, bits);
const PackedArray = PackedIntArray(Int, int_count);
var packed_array: PackedArray = undefined;
const limit = (1 << bits);
var i: usize = 0;
while (i < packed_array.len) : (i += 1) {
packed_array.set(i, @intCast(i % limit));
}
//slice of array
var packed_slice = packed_array.slice(2, 5);
try testing.expect(packed_slice.len == 3);
const ps_bit_count = (bits * packed_slice.len) + packed_slice.bit_offset;
const ps_expected_bytes = (ps_bit_count + 7) / 8;
try testing.expect(packed_slice.bytes.len == ps_expected_bytes);
try testing.expect(packed_slice.get(0) == 2 % limit);
try testing.expect(packed_slice.get(1) == 3 % limit);
try testing.expect(packed_slice.get(2) == 4 % limit);
packed_slice.set(1, 7 % limit);
try testing.expect(packed_slice.get(1) == 7 % limit);
//write through slice
try testing.expect(packed_array.get(3) == 7 % limit);
//slice of a slice
const packed_slice_two = packed_slice.slice(0, 3);
try testing.expect(packed_slice_two.len == 3);
const ps2_bit_count = (bits * packed_slice_two.len) + packed_slice_two.bit_offset;
const ps2_expected_bytes = (ps2_bit_count + 7) / 8;
try testing.expect(packed_slice_two.bytes.len == ps2_expected_bytes);
try testing.expect(packed_slice_two.get(1) == 7 % limit);
try testing.expect(packed_slice_two.get(2) == 4 % limit);
//size one case
const packed_slice_three = packed_slice_two.slice(1, 2);
try testing.expect(packed_slice_three.len == 1);
const ps3_bit_count = (bits * packed_slice_three.len) + packed_slice_three.bit_offset;
const ps3_expected_bytes = (ps3_bit_count + 7) / 8;
try testing.expect(packed_slice_three.bytes.len == ps3_expected_bytes);
try testing.expect(packed_slice_three.get(0) == 7 % limit);
//empty slice case
const packed_slice_empty = packed_slice.slice(0, 0);
try testing.expect(packed_slice_empty.len == 0);
try testing.expect(packed_slice_empty.bytes.len == 0);
//slicing at byte boundaries
const packed_slice_edge = packed_array.slice(8, 16);
try testing.expect(packed_slice_edge.len == 8);
const pse_bit_count = (bits * packed_slice_edge.len) + packed_slice_edge.bit_offset;
const pse_expected_bytes = (pse_bit_count + 7) / 8;
try testing.expect(packed_slice_edge.bytes.len == pse_expected_bytes);
try testing.expect(packed_slice_edge.bit_offset == 0);
}
}
test "PackedIntSlice accumulating bit offsets" {
//bit_offset is u3, so standard debugging asserts should catch
// anything
{
const PackedArray = PackedIntArray(u3, 16);
var packed_array: PackedArray = undefined;
var packed_slice = packed_array.slice(0, packed_array.len);
var i: usize = 0;
while (i < packed_array.len - 1) : (i += 1) {
packed_slice = packed_slice.slice(1, packed_slice.len);
}
}
{
const PackedArray = PackedIntArray(u11, 88);
var packed_array: PackedArray = undefined;
var packed_slice = packed_array.slice(0, packed_array.len);
var i: usize = 0;
while (i < packed_array.len - 1) : (i += 1) {
packed_slice = packed_slice.slice(1, packed_slice.len);
}
}
}
test "PackedInt(Array/Slice) sliceCast" {
const PackedArray = PackedIntArray(u1, 16);
var packed_array = PackedArray.init([_]u1{ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 });
const packed_slice_cast_2 = packed_array.sliceCast(u2);
const packed_slice_cast_4 = packed_slice_cast_2.sliceCast(u4);
var packed_slice_cast_9 = packed_array.slice(0, (packed_array.len / 9) * 9).sliceCast(u9);
const packed_slice_cast_3 = packed_slice_cast_9.sliceCast(u3);
var i: usize = 0;
while (i < packed_slice_cast_2.len) : (i += 1) {
const val = switch (native_endian) {
.big => 0b01,
.little => 0b10,
};
try testing.expect(packed_slice_cast_2.get(i) == val);
}
i = 0;
while (i < packed_slice_cast_4.len) : (i += 1) {
const val = switch (native_endian) {
.big => 0b0101,
.little => 0b1010,
};
try testing.expect(packed_slice_cast_4.get(i) == val);
}
i = 0;
while (i < packed_slice_cast_9.len) : (i += 1) {
const val = 0b010101010;
try testing.expect(packed_slice_cast_9.get(i) == val);
packed_slice_cast_9.set(i, 0b111000111);
}
i = 0;
while (i < packed_slice_cast_3.len) : (i += 1) {
const val: u3 = switch (native_endian) {
.big => if (i % 2 == 0) 0b111 else 0b000,
.little => if (i % 2 == 0) 0b111 else 0b000,
};
try testing.expect(packed_slice_cast_3.get(i) == val);
}
}
test "PackedInt(Array/Slice)Endian" {
{
const PackedArrayBe = PackedIntArrayEndian(u4, .big, 8);
var packed_array_be = PackedArrayBe.init([_]u4{ 0, 1, 2, 3, 4, 5, 6, 7 });
try testing.expect(packed_array_be.bytes[0] == 0b00000001);
try testing.expect(packed_array_be.bytes[1] == 0b00100011);
var i: usize = 0;
while (i < packed_array_be.len) : (i += 1) {
try testing.expect(packed_array_be.get(i) == i);
}
var packed_slice_le = packed_array_be.sliceCastEndian(u4, .little);
i = 0;
while (i < packed_slice_le.len) : (i += 1) {
const val = if (i % 2 == 0) i + 1 else i - 1;
try testing.expect(packed_slice_le.get(i) == val);
}
var packed_slice_le_shift = packed_array_be.slice(1, 5).sliceCastEndian(u4, .little);
i = 0;
while (i < packed_slice_le_shift.len) : (i += 1) {
const val = if (i % 2 == 0) i else i + 2;
try testing.expect(packed_slice_le_shift.get(i) == val);
}
}
{
const PackedArrayBe = PackedIntArrayEndian(u11, .big, 8);
var packed_array_be = PackedArrayBe.init([_]u11{ 0, 1, 2, 3, 4, 5, 6, 7 });
try testing.expect(packed_array_be.bytes[0] == 0b00000000);
try testing.expect(packed_array_be.bytes[1] == 0b00000000);
try testing.expect(packed_array_be.bytes[2] == 0b00000100);
try testing.expect(packed_array_be.bytes[3] == 0b00000001);
try testing.expect(packed_array_be.bytes[4] == 0b00000000);
var i: usize = 0;
while (i < packed_array_be.len) : (i += 1) {
try testing.expect(packed_array_be.get(i) == i);
}
var packed_slice_le = packed_array_be.sliceCastEndian(u11, .little);
try testing.expect(packed_slice_le.get(0) == 0b00000000000);
try testing.expect(packed_slice_le.get(1) == 0b00010000000);
try testing.expect(packed_slice_le.get(2) == 0b00000000100);
try testing.expect(packed_slice_le.get(3) == 0b00000000000);
try testing.expect(packed_slice_le.get(4) == 0b00010000011);
try testing.expect(packed_slice_le.get(5) == 0b00000000010);
try testing.expect(packed_slice_le.get(6) == 0b10000010000);
try testing.expect(packed_slice_le.get(7) == 0b00000111001);
var packed_slice_le_shift = packed_array_be.slice(1, 5).sliceCastEndian(u11, .little);
try testing.expect(packed_slice_le_shift.get(0) == 0b00010000000);
try testing.expect(packed_slice_le_shift.get(1) == 0b00000000100);
try testing.expect(packed_slice_le_shift.get(2) == 0b00000000000);
try testing.expect(packed_slice_le_shift.get(3) == 0b00010000011);
}
}
//@NOTE: Need to manually update this list as more posix os's get
// added to DirectAllocator.
// These tests prove we aren't accidentally accessing memory past
// the end of the array/slice by placing it at the end of a page
// and reading the last element. The assumption is that the page
// after this one is not mapped and will cause a segfault if we
// don't account for the bounds.
test "PackedIntArray at end of available memory" {
switch (builtin.target.os.tag) {
.linux, .macos, .ios, .freebsd, .netbsd, .openbsd, .windows => {},
else => return,
}
const PackedArray = PackedIntArray(u3, 8);
const Padded = struct {
_: [std.mem.page_size - @sizeOf(PackedArray)]u8,
p: PackedArray,
};
const allocator = std.testing.allocator;
var pad = try allocator.create(Padded);
defer allocator.destroy(pad);
pad.p.set(7, std.math.maxInt(u3));
}
test "PackedIntSlice at end of available memory" {
switch (builtin.target.os.tag) {
.linux, .macos, .ios, .freebsd, .netbsd, .openbsd, .windows => {},
else => return,
}
const PackedSlice = PackedIntSlice(u11);
const allocator = std.testing.allocator;
var page = try allocator.alloc(u8, std.mem.page_size);
defer allocator.free(page);
var p = PackedSlice.init(page[std.mem.page_size - 2 ..], 1);
p.set(0, std.math.maxInt(u11));
}
| https://raw.githubusercontent.com/cyberegoorg/cetech1-zig/7438a7b157a4047261d161c06248b54fe9d822eb/lib/std/packed_int_array.zig |
//! A set of array and slice types that bit-pack integer elements. A normal [12]u3
//! takes up 12 bytes of memory since u3's alignment is 1. PackedArray(u3, 12) only
//! takes up 4 bytes of memory.
const std = @import("std");
const builtin = @import("builtin");
const debug = std.debug;
const testing = std.testing;
const native_endian = builtin.target.cpu.arch.endian();
const Endian = std.builtin.Endian;
/// Provides a set of functions for reading and writing packed integers from a
/// slice of bytes.
pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
// The general technique employed here is to cast bytes in the array to a container
// integer (having bits % 8 == 0) large enough to contain the number of bits we want,
// then we can retrieve or store the new value with a relative minimum of masking
// and shifting. In this worst case, this means that we'll need an integer that's
// actually 1 byte larger than the minimum required to store the bits, because it
// is possible that the bits start at the end of the first byte, continue through
// zero or more, then end in the beginning of the last. But, if we try to access
// a value in the very last byte of memory with that integer size, that extra byte
// will be out of bounds. Depending on the circumstances of the memory, that might
// mean the OS fatally kills the program. Thus, we use a larger container (MaxIo)
// most of the time, but a smaller container (MinIo) when touching the last byte
// of the memory.
const int_bits = @bitSizeOf(Int);
// In the best case, this is the number of bytes we need to touch
// to read or write a value, as bits.
const min_io_bits = ((int_bits + 7) / 8) * 8;
// In the worst case, this is the number of bytes we need to touch
// to read or write a value, as bits. To calculate for int_bits > 1,
// set aside 2 bits to touch the first and last bytes, then divide
// by 8 to see how many bytes can be filled up in between.
const max_io_bits = switch (int_bits) {
0 => 0,
1 => 8,
else => ((int_bits - 2) / 8 + 2) * 8,
};
// We bitcast the desired Int type to an unsigned version of itself
// to avoid issues with shifting signed ints.
const UnInt = std.meta.Int(.unsigned, int_bits);
// The maximum container int type
const MinIo = std.meta.Int(.unsigned, min_io_bits);
// The minimum container int type
const MaxIo = std.meta.Int(.unsigned, max_io_bits);
return struct {
/// Retrieves the integer at `index` from the packed data beginning at `bit_offset`
/// within `bytes`.
pub fn get(bytes: []const u8, index: usize, bit_offset: u7) Int {
if (int_bits == 0) return 0;
const bit_index = (index * int_bits) + bit_offset;
const max_end_byte = (bit_index + max_io_bits) / 8;
//using the larger container size will potentially read out of bounds
if (max_end_byte > bytes.len) return getBits(bytes, MinIo, bit_index);
return getBits(bytes, MaxIo, bit_index);
}
fn getBits(bytes: []const u8, comptime Container: type, bit_index: usize) Int {
const container_bits = @bitSizeOf(Container);
const start_byte = bit_index / 8;
const head_keep_bits = bit_index - (start_byte * 8);
const tail_keep_bits = container_bits - (int_bits + head_keep_bits);
//read bytes as container
const value_ptr: *align(1) const Container = @ptrCast(&bytes[start_byte]);
var value = value_ptr.*;
if (endian != native_endian) value = @byteSwap(value);
switch (endian) {
.big => {
value <<= @intCast(head_keep_bits);
value >>= @intCast(head_keep_bits);
value >>= @intCast(tail_keep_bits);
},
.little => {
value <<= @intCast(tail_keep_bits);
value >>= @intCast(tail_keep_bits);
value >>= @intCast(head_keep_bits);
},
}
return @bitCast(@as(UnInt, @truncate(value)));
}
/// Sets the integer at `index` to `val` within the packed data beginning
/// at `bit_offset` into `bytes`.
pub fn set(bytes: []u8, index: usize, bit_offset: u3, int: Int) void {
if (int_bits == 0) return;
const bit_index = (index * int_bits) + bit_offset;
const max_end_byte = (bit_index + max_io_bits) / 8;
//using the larger container size will potentially write out of bounds
if (max_end_byte > bytes.len) return setBits(bytes, MinIo, bit_index, int);
setBits(bytes, MaxIo, bit_index, int);
}
fn setBits(bytes: []u8, comptime Container: type, bit_index: usize, int: Int) void {
const container_bits = @bitSizeOf(Container);
const Shift = std.math.Log2Int(Container);
const start_byte = bit_index / 8;
const head_keep_bits = bit_index - (start_byte * 8);
const tail_keep_bits = container_bits - (int_bits + head_keep_bits);
const keep_shift: Shift = switch (endian) {
.big => @intCast(tail_keep_bits),
.little => @intCast(head_keep_bits),
};
//position the bits where they need to be in the container
const value = @as(Container, @intCast(@as(UnInt, @bitCast(int)))) << keep_shift;
//read existing bytes
const target_ptr: *align(1) Container = @ptrCast(&bytes[start_byte]);
var target = target_ptr.*;
if (endian != native_endian) target = @byteSwap(target);
//zero the bits we want to replace in the existing bytes
const inv_mask = @as(Container, @intCast(std.math.maxInt(UnInt))) << keep_shift;
const mask = ~inv_mask;
target &= mask;
//merge the new value
target |= value;
if (endian != native_endian) target = @byteSwap(target);
//save it back
target_ptr.* = target;
}
/// Provides a PackedIntSlice of the packed integers in `bytes` (which begins at `bit_offset`)
/// from the element specified by `start` to the element specified by `end`.
pub fn slice(bytes: []u8, bit_offset: u3, start: usize, end: usize) PackedIntSliceEndian(Int, endian) {
debug.assert(end >= start);
const length = end - start;
const bit_index = (start * int_bits) + bit_offset;
const start_byte = bit_index / 8;
const end_byte = (bit_index + (length * int_bits) + 7) / 8;
const new_bytes = bytes[start_byte..end_byte];
if (length == 0) return PackedIntSliceEndian(Int, endian).init(new_bytes[0..0], 0);
var new_slice = PackedIntSliceEndian(Int, endian).init(new_bytes, length);
new_slice.bit_offset = @intCast((bit_index - (start_byte * 8)));
return new_slice;
}
/// Recasts a packed slice to a version with elements of type `NewInt` and endianness `new_endian`.
/// Slice will begin at `bit_offset` within `bytes` and the new length will be automatically
/// calculated from `old_len` using the sizes of the current integer type and `NewInt`.
pub fn sliceCast(bytes: []u8, comptime NewInt: type, comptime new_endian: Endian, bit_offset: u3, old_len: usize) PackedIntSliceEndian(NewInt, new_endian) {
const new_int_bits = @bitSizeOf(NewInt);
const New = PackedIntSliceEndian(NewInt, new_endian);
const total_bits = (old_len * int_bits);
const new_int_count = total_bits / new_int_bits;
debug.assert(total_bits == new_int_count * new_int_bits);
var new = New.init(bytes, new_int_count);
new.bit_offset = bit_offset;
return new;
}
};
}
/// Creates a bit-packed array of `Int`. Non-byte-multiple integers
/// will take up less memory in PackedIntArray than in a normal array.
/// Elements are packed using native endianness and without storing any
/// meta data. PackedArray(i3, 8) will occupy exactly 3 bytes
/// of memory.
pub fn PackedIntArray(comptime Int: type, comptime int_count: usize) type {
return PackedIntArrayEndian(Int, native_endian, int_count);
}
/// Creates a bit-packed array of `Int` with bit order specified by `endian`.
/// Non-byte-multiple integers will take up less memory in PackedIntArrayEndian
/// than in a normal array. Elements are packed without storing any meta data.
/// PackedIntArrayEndian(i3, 8) will occupy exactly 3 bytes of memory.
pub fn PackedIntArrayEndian(comptime Int: type, comptime endian: Endian, comptime int_count: usize) type {
const int_bits = @bitSizeOf(Int);
const total_bits = int_bits * int_count;
const total_bytes = (total_bits + 7) / 8;
const Io = PackedIntIo(Int, endian);
return struct {
const Self = @This();
/// The byte buffer containing the packed data.
bytes: [total_bytes]u8,
/// The number of elements in the packed array.
comptime len: usize = int_count,
/// The integer type of the packed array.
pub const Child = Int;
/// Initialize a packed array using an unpacked array
/// or, more likely, an array literal.
pub fn init(ints: [int_count]Int) Self {
var self: Self = undefined;
for (ints, 0..) |int, i| self.set(i, int);
return self;
}
/// Initialize all entries of a packed array to the same value.
pub fn initAllTo(int: Int) Self {
var self: Self = undefined;
self.setAll(int);
return self;
}
/// Return the integer stored at `index`.
pub fn get(self: Self, index: usize) Int {
debug.assert(index < int_count);
return Io.get(&self.bytes, index, 0);
}
///Copy the value of `int` into the array at `index`.
pub fn set(self: *Self, index: usize, int: Int) void {
debug.assert(index < int_count);
return Io.set(&self.bytes, index, 0, int);
}
/// Set all entries of a packed array to the value of `int`.
pub fn setAll(self: *Self, int: Int) void {
var i: usize = 0;
while (i < int_count) : (i += 1) {
self.set(i, int);
}
}
/// Create a PackedIntSlice of the array from `start` to `end`.
pub fn slice(self: *Self, start: usize, end: usize) PackedIntSliceEndian(Int, endian) {
debug.assert(start < int_count);
debug.assert(end <= int_count);
return Io.slice(&self.bytes, 0, start, end);
}
/// Create a PackedIntSlice of the array using `NewInt` as the integer type.
/// `NewInt`'s bit width must fit evenly within the array's `Int`'s total bits.
pub fn sliceCast(self: *Self, comptime NewInt: type) PackedIntSlice(NewInt) {
return self.sliceCastEndian(NewInt, endian);
}
/// Create a PackedIntSliceEndian of the array using `NewInt` as the integer type
/// and `new_endian` as the new endianness. `NewInt`'s bit width must fit evenly
/// within the array's `Int`'s total bits.
pub fn sliceCastEndian(self: *Self, comptime NewInt: type, comptime new_endian: Endian) PackedIntSliceEndian(NewInt, new_endian) {
return Io.sliceCast(&self.bytes, NewInt, new_endian, 0, int_count);
}
};
}
/// A type representing a sub range of a PackedIntArray.
pub fn PackedIntSlice(comptime Int: type) type {
return PackedIntSliceEndian(Int, native_endian);
}
/// A type representing a sub range of a PackedIntArrayEndian.
pub fn PackedIntSliceEndian(comptime Int: type, comptime endian: Endian) type {
const int_bits = @bitSizeOf(Int);
const Io = PackedIntIo(Int, endian);
return struct {
const Self = @This();
bytes: []u8,
bit_offset: u3,
len: usize,
/// The integer type of the packed slice.
pub const Child = Int;
/// Calculates the number of bytes required to store a desired count
/// of `Int`s.
pub fn bytesRequired(int_count: usize) usize {
const total_bits = int_bits * int_count;
const total_bytes = (total_bits + 7) / 8;
return total_bytes;
}
/// Initialize a packed slice using the memory at `bytes`, with `int_count`
/// elements. `bytes` must be large enough to accommodate the requested
/// count.
pub fn init(bytes: []u8, int_count: usize) Self {
debug.assert(bytes.len >= bytesRequired(int_count));
return Self{
.bytes = bytes,
.len = int_count,
.bit_offset = 0,
};
}
/// Return the integer stored at `index`.
pub fn get(self: Self, index: usize) Int {
debug.assert(index < self.len);
return Io.get(self.bytes, index, self.bit_offset);
}
/// Copy `int` into the slice at `index`.
pub fn set(self: *Self, index: usize, int: Int) void {
debug.assert(index < self.len);
return Io.set(self.bytes, index, self.bit_offset, int);
}
/// Create a PackedIntSlice of this slice from `start` to `end`.
pub fn slice(self: Self, start: usize, end: usize) PackedIntSliceEndian(Int, endian) {
debug.assert(start < self.len);
debug.assert(end <= self.len);
return Io.slice(self.bytes, self.bit_offset, start, end);
}
/// Create a PackedIntSlice of the sclice using `NewInt` as the integer type.
/// `NewInt`'s bit width must fit evenly within the slice's `Int`'s total bits.
pub fn sliceCast(self: Self, comptime NewInt: type) PackedIntSliceEndian(NewInt, endian) {
return self.sliceCastEndian(NewInt, endian);
}
/// Create a PackedIntSliceEndian of the slice using `NewInt` as the integer type
/// and `new_endian` as the new endianness. `NewInt`'s bit width must fit evenly
/// within the slice's `Int`'s total bits.
pub fn sliceCastEndian(self: Self, comptime NewInt: type, comptime new_endian: Endian) PackedIntSliceEndian(NewInt, new_endian) {
return Io.sliceCast(self.bytes, NewInt, new_endian, self.bit_offset, self.len);
}
};
}
test "PackedIntArray" {
// TODO @setEvalBranchQuota generates panics in wasm32. Investigate.
if (builtin.target.cpu.arch == .wasm32) return error.SkipZigTest;
// TODO: enable this test
if (true) return error.SkipZigTest;
@setEvalBranchQuota(10000);
const max_bits = 256;
const int_count = 19;
comptime var bits = 0;
inline while (bits <= max_bits) : (bits += 1) {
//alternate unsigned and signed
const sign: std.builtin.Signedness = if (bits % 2 == 0) .signed else .unsigned;
const I = std.meta.Int(sign, bits);
const PackedArray = PackedIntArray(I, int_count);
const expected_bytes = ((bits * int_count) + 7) / 8;
try testing.expect(@sizeOf(PackedArray) == expected_bytes);
var data: PackedArray = undefined;
//write values, counting up
var i: usize = 0;
var count: I = 0;
while (i < data.len) : (i += 1) {
data.set(i, count);
if (bits > 0) count +%= 1;
}
//read and verify values
i = 0;
count = 0;
while (i < data.len) : (i += 1) {
const val = data.get(i);
try testing.expect(val == count);
if (bits > 0) count +%= 1;
}
}
}
test "PackedIntIo" {
const bytes = [_]u8{ 0b01101_000, 0b01011_110, 0b00011_101 };
try testing.expectEqual(@as(u15, 0x2bcd), PackedIntIo(u15, .little).get(&bytes, 0, 3));
try testing.expectEqual(@as(u16, 0xabcd), PackedIntIo(u16, .little).get(&bytes, 0, 3));
try testing.expectEqual(@as(u17, 0x1abcd), PackedIntIo(u17, .little).get(&bytes, 0, 3));
try testing.expectEqual(@as(u18, 0x3abcd), PackedIntIo(u18, .little).get(&bytes, 0, 3));
}
test "PackedIntArray init" {
const S = struct {
fn doTheTest() !void {
const PackedArray = PackedIntArray(u3, 8);
var packed_array = PackedArray.init([_]u3{ 0, 1, 2, 3, 4, 5, 6, 7 });
var i: usize = 0;
while (i < packed_array.len) : (i += 1) try testing.expectEqual(@as(u3, @intCast(i)), packed_array.get(i));
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "PackedIntArray initAllTo" {
const S = struct {
fn doTheTest() !void {
const PackedArray = PackedIntArray(u3, 8);
var packed_array = PackedArray.initAllTo(5);
var i: usize = 0;
while (i < packed_array.len) : (i += 1) try testing.expectEqual(@as(u3, 5), packed_array.get(i));
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "PackedIntSlice" {
// TODO @setEvalBranchQuota generates panics in wasm32. Investigate.
if (builtin.target.cpu.arch == .wasm32) return error.SkipZigTest;
// TODO enable this test
if (true) return error.SkipZigTest;
@setEvalBranchQuota(10000);
const max_bits = 256;
const int_count = 19;
const total_bits = max_bits * int_count;
const total_bytes = (total_bits + 7) / 8;
var buffer: [total_bytes]u8 = undefined;
comptime var bits = 0;
inline while (bits <= max_bits) : (bits += 1) {
//alternate unsigned and signed
const sign: std.builtin.Signedness = if (bits % 2 == 0) .signed else .unsigned;
const I = std.meta.Int(sign, bits);
const P = PackedIntSlice(I);
var data = P.init(&buffer, int_count);
//write values, counting up
var i: usize = 0;
var count: I = 0;
while (i < data.len) : (i += 1) {
data.set(i, count);
if (bits > 0) count +%= 1;
}
//read and verify values
i = 0;
count = 0;
while (i < data.len) : (i += 1) {
const val = data.get(i);
try testing.expect(val == count);
if (bits > 0) count +%= 1;
}
}
}
test "PackedIntSlice of PackedInt(Array/Slice)" {
// TODO enable this test
if (true) return error.SkipZigTest;
const max_bits = 16;
const int_count = 19;
comptime var bits = 0;
inline while (bits <= max_bits) : (bits += 1) {
const Int = std.meta.Int(.unsigned, bits);
const PackedArray = PackedIntArray(Int, int_count);
var packed_array: PackedArray = undefined;
const limit = (1 << bits);
var i: usize = 0;
while (i < packed_array.len) : (i += 1) {
packed_array.set(i, @intCast(i % limit));
}
//slice of array
var packed_slice = packed_array.slice(2, 5);
try testing.expect(packed_slice.len == 3);
const ps_bit_count = (bits * packed_slice.len) + packed_slice.bit_offset;
const ps_expected_bytes = (ps_bit_count + 7) / 8;
try testing.expect(packed_slice.bytes.len == ps_expected_bytes);
try testing.expect(packed_slice.get(0) == 2 % limit);
try testing.expect(packed_slice.get(1) == 3 % limit);
try testing.expect(packed_slice.get(2) == 4 % limit);
packed_slice.set(1, 7 % limit);
try testing.expect(packed_slice.get(1) == 7 % limit);
//write through slice
try testing.expect(packed_array.get(3) == 7 % limit);
//slice of a slice
const packed_slice_two = packed_slice.slice(0, 3);
try testing.expect(packed_slice_two.len == 3);
const ps2_bit_count = (bits * packed_slice_two.len) + packed_slice_two.bit_offset;
const ps2_expected_bytes = (ps2_bit_count + 7) / 8;
try testing.expect(packed_slice_two.bytes.len == ps2_expected_bytes);
try testing.expect(packed_slice_two.get(1) == 7 % limit);
try testing.expect(packed_slice_two.get(2) == 4 % limit);
//size one case
const packed_slice_three = packed_slice_two.slice(1, 2);
try testing.expect(packed_slice_three.len == 1);
const ps3_bit_count = (bits * packed_slice_three.len) + packed_slice_three.bit_offset;
const ps3_expected_bytes = (ps3_bit_count + 7) / 8;
try testing.expect(packed_slice_three.bytes.len == ps3_expected_bytes);
try testing.expect(packed_slice_three.get(0) == 7 % limit);
//empty slice case
const packed_slice_empty = packed_slice.slice(0, 0);
try testing.expect(packed_slice_empty.len == 0);
try testing.expect(packed_slice_empty.bytes.len == 0);
//slicing at byte boundaries
const packed_slice_edge = packed_array.slice(8, 16);
try testing.expect(packed_slice_edge.len == 8);
const pse_bit_count = (bits * packed_slice_edge.len) + packed_slice_edge.bit_offset;
const pse_expected_bytes = (pse_bit_count + 7) / 8;
try testing.expect(packed_slice_edge.bytes.len == pse_expected_bytes);
try testing.expect(packed_slice_edge.bit_offset == 0);
}
}
test "PackedIntSlice accumulating bit offsets" {
//bit_offset is u3, so standard debugging asserts should catch
// anything
{
const PackedArray = PackedIntArray(u3, 16);
var packed_array: PackedArray = undefined;
var packed_slice = packed_array.slice(0, packed_array.len);
var i: usize = 0;
while (i < packed_array.len - 1) : (i += 1) {
packed_slice = packed_slice.slice(1, packed_slice.len);
}
}
{
const PackedArray = PackedIntArray(u11, 88);
var packed_array: PackedArray = undefined;
var packed_slice = packed_array.slice(0, packed_array.len);
var i: usize = 0;
while (i < packed_array.len - 1) : (i += 1) {
packed_slice = packed_slice.slice(1, packed_slice.len);
}
}
}
test "PackedInt(Array/Slice) sliceCast" {
const PackedArray = PackedIntArray(u1, 16);
var packed_array = PackedArray.init([_]u1{ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 });
const packed_slice_cast_2 = packed_array.sliceCast(u2);
const packed_slice_cast_4 = packed_slice_cast_2.sliceCast(u4);
var packed_slice_cast_9 = packed_array.slice(0, (packed_array.len / 9) * 9).sliceCast(u9);
const packed_slice_cast_3 = packed_slice_cast_9.sliceCast(u3);
var i: usize = 0;
while (i < packed_slice_cast_2.len) : (i += 1) {
const val = switch (native_endian) {
.big => 0b01,
.little => 0b10,
};
try testing.expect(packed_slice_cast_2.get(i) == val);
}
i = 0;
while (i < packed_slice_cast_4.len) : (i += 1) {
const val = switch (native_endian) {
.big => 0b0101,
.little => 0b1010,
};
try testing.expect(packed_slice_cast_4.get(i) == val);
}
i = 0;
while (i < packed_slice_cast_9.len) : (i += 1) {
const val = 0b010101010;
try testing.expect(packed_slice_cast_9.get(i) == val);
packed_slice_cast_9.set(i, 0b111000111);
}
i = 0;
while (i < packed_slice_cast_3.len) : (i += 1) {
const val: u3 = switch (native_endian) {
.big => if (i % 2 == 0) 0b111 else 0b000,
.little => if (i % 2 == 0) 0b111 else 0b000,
};
try testing.expect(packed_slice_cast_3.get(i) == val);
}
}
test "PackedInt(Array/Slice)Endian" {
{
const PackedArrayBe = PackedIntArrayEndian(u4, .big, 8);
var packed_array_be = PackedArrayBe.init([_]u4{ 0, 1, 2, 3, 4, 5, 6, 7 });
try testing.expect(packed_array_be.bytes[0] == 0b00000001);
try testing.expect(packed_array_be.bytes[1] == 0b00100011);
var i: usize = 0;
while (i < packed_array_be.len) : (i += 1) {
try testing.expect(packed_array_be.get(i) == i);
}
var packed_slice_le = packed_array_be.sliceCastEndian(u4, .little);
i = 0;
while (i < packed_slice_le.len) : (i += 1) {
const val = if (i % 2 == 0) i + 1 else i - 1;
try testing.expect(packed_slice_le.get(i) == val);
}
var packed_slice_le_shift = packed_array_be.slice(1, 5).sliceCastEndian(u4, .little);
i = 0;
while (i < packed_slice_le_shift.len) : (i += 1) {
const val = if (i % 2 == 0) i else i + 2;
try testing.expect(packed_slice_le_shift.get(i) == val);
}
}
{
const PackedArrayBe = PackedIntArrayEndian(u11, .big, 8);
var packed_array_be = PackedArrayBe.init([_]u11{ 0, 1, 2, 3, 4, 5, 6, 7 });
try testing.expect(packed_array_be.bytes[0] == 0b00000000);
try testing.expect(packed_array_be.bytes[1] == 0b00000000);
try testing.expect(packed_array_be.bytes[2] == 0b00000100);
try testing.expect(packed_array_be.bytes[3] == 0b00000001);
try testing.expect(packed_array_be.bytes[4] == 0b00000000);
var i: usize = 0;
while (i < packed_array_be.len) : (i += 1) {
try testing.expect(packed_array_be.get(i) == i);
}
var packed_slice_le = packed_array_be.sliceCastEndian(u11, .little);
try testing.expect(packed_slice_le.get(0) == 0b00000000000);
try testing.expect(packed_slice_le.get(1) == 0b00010000000);
try testing.expect(packed_slice_le.get(2) == 0b00000000100);
try testing.expect(packed_slice_le.get(3) == 0b00000000000);
try testing.expect(packed_slice_le.get(4) == 0b00010000011);
try testing.expect(packed_slice_le.get(5) == 0b00000000010);
try testing.expect(packed_slice_le.get(6) == 0b10000010000);
try testing.expect(packed_slice_le.get(7) == 0b00000111001);
var packed_slice_le_shift = packed_array_be.slice(1, 5).sliceCastEndian(u11, .little);
try testing.expect(packed_slice_le_shift.get(0) == 0b00010000000);
try testing.expect(packed_slice_le_shift.get(1) == 0b00000000100);
try testing.expect(packed_slice_le_shift.get(2) == 0b00000000000);
try testing.expect(packed_slice_le_shift.get(3) == 0b00010000011);
}
}
//@NOTE: Need to manually update this list as more posix os's get
// added to DirectAllocator.
// These tests prove we aren't accidentally accessing memory past
// the end of the array/slice by placing it at the end of a page
// and reading the last element. The assumption is that the page
// after this one is not mapped and will cause a segfault if we
// don't account for the bounds.
test "PackedIntArray at end of available memory" {
switch (builtin.target.os.tag) {
.linux, .macos, .ios, .freebsd, .netbsd, .openbsd, .windows => {},
else => return,
}
const PackedArray = PackedIntArray(u3, 8);
const Padded = struct {
_: [std.mem.page_size - @sizeOf(PackedArray)]u8,
p: PackedArray,
};
const allocator = std.testing.allocator;
var pad = try allocator.create(Padded);
defer allocator.destroy(pad);
pad.p.set(7, std.math.maxInt(u3));
}
test "PackedIntSlice at end of available memory" {
switch (builtin.target.os.tag) {
.linux, .macos, .ios, .freebsd, .netbsd, .openbsd, .windows => {},
else => return,
}
const PackedSlice = PackedIntSlice(u11);
const allocator = std.testing.allocator;
var page = try allocator.alloc(u8, std.mem.page_size);
defer allocator.free(page);
var p = PackedSlice.init(page[std.mem.page_size - 2 ..], 1);
p.set(0, std.math.maxInt(u11));
}
| https://raw.githubusercontent.com/kassane/zig-mos-bootstrap/19aac4779b9e93b0e833402c26c93cfc13bb94e2/zig/lib/std/packed_int_array.zig |
const std = @import("std");
pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
const dstSigBits = std.math.floatMantissaBits(dst_t);
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
const srcBits = @typeInfo(src_t).Float.bits;
const srcExpBits = srcBits - srcSigBits - 1;
const srcInfExp = (1 << srcExpBits) - 1;
const srcExpBias = srcInfExp >> 1;
const srcMinNormal = 1 << srcSigBits;
const srcSignificandMask = srcMinNormal - 1;
const srcInfinity = srcInfExp << srcSigBits;
const srcSignMask = 1 << (srcSigBits + srcExpBits);
const srcAbsMask = srcSignMask - 1;
const roundMask = (1 << (srcSigBits - dstSigBits)) - 1;
const halfway = 1 << (srcSigBits - dstSigBits - 1);
const srcQNaN = 1 << (srcSigBits - 1);
const srcNaNCode = srcQNaN - 1;
const dstBits = @typeInfo(dst_t).Float.bits;
const dstExpBits = dstBits - dstSigBits - 1;
const dstInfExp = (1 << dstExpBits) - 1;
const dstExpBias = dstInfExp >> 1;
const underflowExponent = srcExpBias + 1 - dstExpBias;
const overflowExponent = srcExpBias + dstInfExp - dstExpBias;
const underflow = underflowExponent << srcSigBits;
const overflow = overflowExponent << srcSigBits;
const dstQNaN = 1 << (dstSigBits - 1);
const dstNaNCode = dstQNaN - 1;
// Break a into a sign and representation of the absolute value
const aRep: src_rep_t = @bitCast(a);
const aAbs: src_rep_t = aRep & srcAbsMask;
const sign: src_rep_t = aRep & srcSignMask;
var absResult: dst_rep_t = undefined;
if (aAbs -% underflow < aAbs -% overflow) {
// The exponent of a is within the range of normal numbers in the
// destination format. We can convert by simply right-shifting with
// rounding and adjusting the exponent.
absResult = @truncate(aAbs >> (srcSigBits - dstSigBits));
absResult -%= @as(dst_rep_t, srcExpBias - dstExpBias) << dstSigBits;
const roundBits: src_rep_t = aAbs & roundMask;
if (roundBits > halfway) {
// Round to nearest
absResult += 1;
} else if (roundBits == halfway) {
// Ties to even
absResult += absResult & 1;
}
} else if (aAbs > srcInfinity) {
// a is NaN.
// Conjure the result by beginning with infinity, setting the qNaN
// bit and inserting the (truncated) trailing NaN field.
absResult = @as(dst_rep_t, @intCast(dstInfExp)) << dstSigBits;
absResult |= dstQNaN;
absResult |= @intCast(((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode);
} else if (aAbs >= overflow) {
// a overflows to infinity.
absResult = @as(dst_rep_t, @intCast(dstInfExp)) << dstSigBits;
} else {
// a underflows on conversion to the destination type or is an exact
// zero. The result may be a denormal or zero. Extract the exponent
// to get the shift amount for the denormalization.
const aExp: u32 = @intCast(aAbs >> srcSigBits);
const shift: u32 = @intCast(srcExpBias - dstExpBias - aExp + 1);
const significand: src_rep_t = (aRep & srcSignificandMask) | srcMinNormal;
// Right shift by the denormalization amount with sticky.
if (shift > srcSigBits) {
absResult = 0;
} else {
const sticky: src_rep_t = @intFromBool(significand << @intCast(srcBits - shift) != 0);
const denormalizedSignificand: src_rep_t = significand >> @intCast(shift) | sticky;
absResult = @intCast(denormalizedSignificand >> (srcSigBits - dstSigBits));
const roundBits: src_rep_t = denormalizedSignificand & roundMask;
if (roundBits > halfway) {
// Round to nearest
absResult += 1;
} else if (roundBits == halfway) {
// Ties to even
absResult += absResult & 1;
}
}
}
const result: dst_rep_t align(@alignOf(dst_t)) = absResult |
@as(dst_rep_t, @truncate(sign >> @intCast(srcBits - dstBits)));
return @bitCast(result);
}
pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t {
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
const dst_sig_bits = std.math.floatMantissaBits(dst_t);
const src_exp_bias = 16383;
const round_mask = (1 << (src_sig_bits - dst_sig_bits)) - 1;
const halfway = 1 << (src_sig_bits - dst_sig_bits - 1);
const dst_bits = @typeInfo(dst_t).Float.bits;
const dst_exp_bits = dst_bits - dst_sig_bits - 1;
const dst_inf_exp = (1 << dst_exp_bits) - 1;
const dst_exp_bias = dst_inf_exp >> 1;
const underflow = src_exp_bias + 1 - dst_exp_bias;
const overflow = src_exp_bias + dst_inf_exp - dst_exp_bias;
const dst_qnan = 1 << (dst_sig_bits - 1);
const dst_nan_mask = dst_qnan - 1;
// Break a into a sign and representation of the absolute value
var a_rep = std.math.break_f80(a);
const sign = a_rep.exp & 0x8000;
a_rep.exp &= 0x7FFF;
a_rep.fraction &= 0x7FFFFFFFFFFFFFFF;
var abs_result: dst_rep_t = undefined;
if (a_rep.exp -% underflow < a_rep.exp -% overflow) {
// The exponent of a is within the range of normal numbers in the
// destination format. We can convert by simply right-shifting with
// rounding and adjusting the exponent.
abs_result = @as(dst_rep_t, a_rep.exp) << dst_sig_bits;
abs_result |= @truncate(a_rep.fraction >> (src_sig_bits - dst_sig_bits));
abs_result -%= @as(dst_rep_t, src_exp_bias - dst_exp_bias) << dst_sig_bits;
const round_bits = a_rep.fraction & round_mask;
if (round_bits > halfway) {
// Round to nearest
abs_result += 1;
} else if (round_bits == halfway) {
// Ties to even
abs_result += abs_result & 1;
}
} else if (a_rep.exp == 0x7FFF and a_rep.fraction != 0) {
// a is NaN.
// Conjure the result by beginning with infinity, setting the qNaN
// bit and inserting the (truncated) trailing NaN field.
abs_result = @as(dst_rep_t, @intCast(dst_inf_exp)) << dst_sig_bits;
abs_result |= dst_qnan;
abs_result |= @intCast((a_rep.fraction >> (src_sig_bits - dst_sig_bits)) & dst_nan_mask);
} else if (a_rep.exp >= overflow) {
// a overflows to infinity.
abs_result = @as(dst_rep_t, @intCast(dst_inf_exp)) << dst_sig_bits;
} else {
// a underflows on conversion to the destination type or is an exact
// zero. The result may be a denormal or zero. Extract the exponent
// to get the shift amount for the denormalization.
const shift = src_exp_bias - dst_exp_bias - a_rep.exp;
// Right shift by the denormalization amount with sticky.
if (shift > src_sig_bits) {
abs_result = 0;
} else {
const sticky = @intFromBool(a_rep.fraction << @intCast(shift) != 0);
const denormalized_significand = a_rep.fraction >> @intCast(shift) | sticky;
abs_result = @intCast(denormalized_significand >> (src_sig_bits - dst_sig_bits));
const round_bits = denormalized_significand & round_mask;
if (round_bits > halfway) {
// Round to nearest
abs_result += 1;
} else if (round_bits == halfway) {
// Ties to even
abs_result += abs_result & 1;
}
}
}
const result align(@alignOf(dst_t)) = abs_result | @as(dst_rep_t, sign) << dst_bits - 16;
return @bitCast(result);
}
test {
_ = @import("truncf_test.zig");
}
| https://raw.githubusercontent.com/ziglang/zig-bootstrap/ec2dca85a340f134d2fcfdc9007e91f9abed6996/zig/lib/compiler_rt/truncf.zig |
const std = @import("std");
pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
const dstSigBits = std.math.floatMantissaBits(dst_t);
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
const srcBits = @typeInfo(src_t).Float.bits;
const srcExpBits = srcBits - srcSigBits - 1;
const srcInfExp = (1 << srcExpBits) - 1;
const srcExpBias = srcInfExp >> 1;
const srcMinNormal = 1 << srcSigBits;
const srcSignificandMask = srcMinNormal - 1;
const srcInfinity = srcInfExp << srcSigBits;
const srcSignMask = 1 << (srcSigBits + srcExpBits);
const srcAbsMask = srcSignMask - 1;
const roundMask = (1 << (srcSigBits - dstSigBits)) - 1;
const halfway = 1 << (srcSigBits - dstSigBits - 1);
const srcQNaN = 1 << (srcSigBits - 1);
const srcNaNCode = srcQNaN - 1;
const dstBits = @typeInfo(dst_t).Float.bits;
const dstExpBits = dstBits - dstSigBits - 1;
const dstInfExp = (1 << dstExpBits) - 1;
const dstExpBias = dstInfExp >> 1;
const underflowExponent = srcExpBias + 1 - dstExpBias;
const overflowExponent = srcExpBias + dstInfExp - dstExpBias;
const underflow = underflowExponent << srcSigBits;
const overflow = overflowExponent << srcSigBits;
const dstQNaN = 1 << (dstSigBits - 1);
const dstNaNCode = dstQNaN - 1;
// Break a into a sign and representation of the absolute value
const aRep: src_rep_t = @bitCast(a);
const aAbs: src_rep_t = aRep & srcAbsMask;
const sign: src_rep_t = aRep & srcSignMask;
var absResult: dst_rep_t = undefined;
if (aAbs -% underflow < aAbs -% overflow) {
// The exponent of a is within the range of normal numbers in the
// destination format. We can convert by simply right-shifting with
// rounding and adjusting the exponent.
absResult = @truncate(aAbs >> (srcSigBits - dstSigBits));
absResult -%= @as(dst_rep_t, srcExpBias - dstExpBias) << dstSigBits;
const roundBits: src_rep_t = aAbs & roundMask;
if (roundBits > halfway) {
// Round to nearest
absResult += 1;
} else if (roundBits == halfway) {
// Ties to even
absResult += absResult & 1;
}
} else if (aAbs > srcInfinity) {
// a is NaN.
// Conjure the result by beginning with infinity, setting the qNaN
// bit and inserting the (truncated) trailing NaN field.
absResult = @as(dst_rep_t, @intCast(dstInfExp)) << dstSigBits;
absResult |= dstQNaN;
absResult |= @intCast(((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode);
} else if (aAbs >= overflow) {
// a overflows to infinity.
absResult = @as(dst_rep_t, @intCast(dstInfExp)) << dstSigBits;
} else {
// a underflows on conversion to the destination type or is an exact
// zero. The result may be a denormal or zero. Extract the exponent
// to get the shift amount for the denormalization.
const aExp: u32 = @intCast(aAbs >> srcSigBits);
const shift: u32 = @intCast(srcExpBias - dstExpBias - aExp + 1);
const significand: src_rep_t = (aRep & srcSignificandMask) | srcMinNormal;
// Right shift by the denormalization amount with sticky.
if (shift > srcSigBits) {
absResult = 0;
} else {
const sticky: src_rep_t = @intFromBool(significand << @intCast(srcBits - shift) != 0);
const denormalizedSignificand: src_rep_t = significand >> @intCast(shift) | sticky;
absResult = @intCast(denormalizedSignificand >> (srcSigBits - dstSigBits));
const roundBits: src_rep_t = denormalizedSignificand & roundMask;
if (roundBits > halfway) {
// Round to nearest
absResult += 1;
} else if (roundBits == halfway) {
// Ties to even
absResult += absResult & 1;
}
}
}
const result: dst_rep_t align(@alignOf(dst_t)) = absResult |
@as(dst_rep_t, @truncate(sign >> @intCast(srcBits - dstBits)));
return @bitCast(result);
}
pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t {
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
const dst_sig_bits = std.math.floatMantissaBits(dst_t);
const src_exp_bias = 16383;
const round_mask = (1 << (src_sig_bits - dst_sig_bits)) - 1;
const halfway = 1 << (src_sig_bits - dst_sig_bits - 1);
const dst_bits = @typeInfo(dst_t).Float.bits;
const dst_exp_bits = dst_bits - dst_sig_bits - 1;
const dst_inf_exp = (1 << dst_exp_bits) - 1;
const dst_exp_bias = dst_inf_exp >> 1;
const underflow = src_exp_bias + 1 - dst_exp_bias;
const overflow = src_exp_bias + dst_inf_exp - dst_exp_bias;
const dst_qnan = 1 << (dst_sig_bits - 1);
const dst_nan_mask = dst_qnan - 1;
// Break a into a sign and representation of the absolute value
var a_rep = std.math.break_f80(a);
const sign = a_rep.exp & 0x8000;
a_rep.exp &= 0x7FFF;
a_rep.fraction &= 0x7FFFFFFFFFFFFFFF;
var abs_result: dst_rep_t = undefined;
if (a_rep.exp -% underflow < a_rep.exp -% overflow) {
// The exponent of a is within the range of normal numbers in the
// destination format. We can convert by simply right-shifting with
// rounding and adjusting the exponent.
abs_result = @as(dst_rep_t, a_rep.exp) << dst_sig_bits;
abs_result |= @truncate(a_rep.fraction >> (src_sig_bits - dst_sig_bits));
abs_result -%= @as(dst_rep_t, src_exp_bias - dst_exp_bias) << dst_sig_bits;
const round_bits = a_rep.fraction & round_mask;
if (round_bits > halfway) {
// Round to nearest
abs_result += 1;
} else if (round_bits == halfway) {
// Ties to even
abs_result += abs_result & 1;
}
} else if (a_rep.exp == 0x7FFF and a_rep.fraction != 0) {
// a is NaN.
// Conjure the result by beginning with infinity, setting the qNaN
// bit and inserting the (truncated) trailing NaN field.
abs_result = @as(dst_rep_t, @intCast(dst_inf_exp)) << dst_sig_bits;
abs_result |= dst_qnan;
abs_result |= @intCast((a_rep.fraction >> (src_sig_bits - dst_sig_bits)) & dst_nan_mask);
} else if (a_rep.exp >= overflow) {
// a overflows to infinity.
abs_result = @as(dst_rep_t, @intCast(dst_inf_exp)) << dst_sig_bits;
} else {
// a underflows on conversion to the destination type or is an exact
// zero. The result may be a denormal or zero. Extract the exponent
// to get the shift amount for the denormalization.
const shift = src_exp_bias - dst_exp_bias - a_rep.exp;
// Right shift by the denormalization amount with sticky.
if (shift > src_sig_bits) {
abs_result = 0;
} else {
const sticky = @intFromBool(a_rep.fraction << @intCast(shift) != 0);
const denormalized_significand = a_rep.fraction >> @intCast(shift) | sticky;
abs_result = @intCast(denormalized_significand >> (src_sig_bits - dst_sig_bits));
const round_bits = denormalized_significand & round_mask;
if (round_bits > halfway) {
// Round to nearest
abs_result += 1;
} else if (round_bits == halfway) {
// Ties to even
abs_result += abs_result & 1;
}
}
}
const result align(@alignOf(dst_t)) = abs_result | @as(dst_rep_t, sign) << dst_bits - 16;
return @bitCast(result);
}
test {
_ = @import("truncf_test.zig");
}
| https://raw.githubusercontent.com/kassane/zig-mos-bootstrap/19aac4779b9e93b0e833402c26c93cfc13bb94e2/zig/lib/compiler_rt/truncf.zig |
const std = @import("std");
pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
const dstSigBits = std.math.floatMantissaBits(dst_t);
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
const srcBits = @typeInfo(src_t).Float.bits;
const srcExpBits = srcBits - srcSigBits - 1;
const srcInfExp = (1 << srcExpBits) - 1;
const srcExpBias = srcInfExp >> 1;
const srcMinNormal = 1 << srcSigBits;
const srcSignificandMask = srcMinNormal - 1;
const srcInfinity = srcInfExp << srcSigBits;
const srcSignMask = 1 << (srcSigBits + srcExpBits);
const srcAbsMask = srcSignMask - 1;
const roundMask = (1 << (srcSigBits - dstSigBits)) - 1;
const halfway = 1 << (srcSigBits - dstSigBits - 1);
const srcQNaN = 1 << (srcSigBits - 1);
const srcNaNCode = srcQNaN - 1;
const dstBits = @typeInfo(dst_t).Float.bits;
const dstExpBits = dstBits - dstSigBits - 1;
const dstInfExp = (1 << dstExpBits) - 1;
const dstExpBias = dstInfExp >> 1;
const underflowExponent = srcExpBias + 1 - dstExpBias;
const overflowExponent = srcExpBias + dstInfExp - dstExpBias;
const underflow = underflowExponent << srcSigBits;
const overflow = overflowExponent << srcSigBits;
const dstQNaN = 1 << (dstSigBits - 1);
const dstNaNCode = dstQNaN - 1;
// Break a into a sign and representation of the absolute value
const aRep: src_rep_t = @bitCast(a);
const aAbs: src_rep_t = aRep & srcAbsMask;
const sign: src_rep_t = aRep & srcSignMask;
var absResult: dst_rep_t = undefined;
if (aAbs -% underflow < aAbs -% overflow) {
// The exponent of a is within the range of normal numbers in the
// destination format. We can convert by simply right-shifting with
// rounding and adjusting the exponent.
absResult = @truncate(aAbs >> (srcSigBits - dstSigBits));
absResult -%= @as(dst_rep_t, srcExpBias - dstExpBias) << dstSigBits;
const roundBits: src_rep_t = aAbs & roundMask;
if (roundBits > halfway) {
// Round to nearest
absResult += 1;
} else if (roundBits == halfway) {
// Ties to even
absResult += absResult & 1;
}
} else if (aAbs > srcInfinity) {
// a is NaN.
// Conjure the result by beginning with infinity, setting the qNaN
// bit and inserting the (truncated) trailing NaN field.
absResult = @as(dst_rep_t, @intCast(dstInfExp)) << dstSigBits;
absResult |= dstQNaN;
absResult |= @intCast(((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode);
} else if (aAbs >= overflow) {
// a overflows to infinity.
absResult = @as(dst_rep_t, @intCast(dstInfExp)) << dstSigBits;
} else {
// a underflows on conversion to the destination type or is an exact
// zero. The result may be a denormal or zero. Extract the exponent
// to get the shift amount for the denormalization.
const aExp: u32 = @intCast(aAbs >> srcSigBits);
const shift: u32 = @intCast(srcExpBias - dstExpBias - aExp + 1);
const significand: src_rep_t = (aRep & srcSignificandMask) | srcMinNormal;
// Right shift by the denormalization amount with sticky.
if (shift > srcSigBits) {
absResult = 0;
} else {
const sticky: src_rep_t = @intFromBool(significand << @intCast(srcBits - shift) != 0);
const denormalizedSignificand: src_rep_t = significand >> @intCast(shift) | sticky;
absResult = @intCast(denormalizedSignificand >> (srcSigBits - dstSigBits));
const roundBits: src_rep_t = denormalizedSignificand & roundMask;
if (roundBits > halfway) {
// Round to nearest
absResult += 1;
} else if (roundBits == halfway) {
// Ties to even
absResult += absResult & 1;
}
}
}
const result: dst_rep_t align(@alignOf(dst_t)) = absResult |
@as(dst_rep_t, @truncate(sign >> @intCast(srcBits - dstBits)));
return @bitCast(result);
}
pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t {
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
const dst_sig_bits = std.math.floatMantissaBits(dst_t);
const src_exp_bias = 16383;
const round_mask = (1 << (src_sig_bits - dst_sig_bits)) - 1;
const halfway = 1 << (src_sig_bits - dst_sig_bits - 1);
const dst_bits = @typeInfo(dst_t).Float.bits;
const dst_exp_bits = dst_bits - dst_sig_bits - 1;
const dst_inf_exp = (1 << dst_exp_bits) - 1;
const dst_exp_bias = dst_inf_exp >> 1;
const underflow = src_exp_bias + 1 - dst_exp_bias;
const overflow = src_exp_bias + dst_inf_exp - dst_exp_bias;
const dst_qnan = 1 << (dst_sig_bits - 1);
const dst_nan_mask = dst_qnan - 1;
// Break a into a sign and representation of the absolute value
var a_rep = std.math.break_f80(a);
const sign = a_rep.exp & 0x8000;
a_rep.exp &= 0x7FFF;
a_rep.fraction &= 0x7FFFFFFFFFFFFFFF;
var abs_result: dst_rep_t = undefined;
if (a_rep.exp -% underflow < a_rep.exp -% overflow) {
// The exponent of a is within the range of normal numbers in the
// destination format. We can convert by simply right-shifting with
// rounding and adjusting the exponent.
abs_result = @as(dst_rep_t, a_rep.exp) << dst_sig_bits;
abs_result |= @truncate(a_rep.fraction >> (src_sig_bits - dst_sig_bits));
abs_result -%= @as(dst_rep_t, src_exp_bias - dst_exp_bias) << dst_sig_bits;
const round_bits = a_rep.fraction & round_mask;
if (round_bits > halfway) {
// Round to nearest
abs_result += 1;
} else if (round_bits == halfway) {
// Ties to even
abs_result += abs_result & 1;
}
} else if (a_rep.exp == 0x7FFF and a_rep.fraction != 0) {
// a is NaN.
// Conjure the result by beginning with infinity, setting the qNaN
// bit and inserting the (truncated) trailing NaN field.
abs_result = @as(dst_rep_t, @intCast(dst_inf_exp)) << dst_sig_bits;
abs_result |= dst_qnan;
abs_result |= @intCast((a_rep.fraction >> (src_sig_bits - dst_sig_bits)) & dst_nan_mask);
} else if (a_rep.exp >= overflow) {
// a overflows to infinity.
abs_result = @as(dst_rep_t, @intCast(dst_inf_exp)) << dst_sig_bits;
} else {
// a underflows on conversion to the destination type or is an exact
// zero. The result may be a denormal or zero. Extract the exponent
// to get the shift amount for the denormalization.
const shift = src_exp_bias - dst_exp_bias - a_rep.exp;
// Right shift by the denormalization amount with sticky.
if (shift > src_sig_bits) {
abs_result = 0;
} else {
const sticky = @intFromBool(a_rep.fraction << @intCast(shift) != 0);
const denormalized_significand = a_rep.fraction >> @intCast(shift) | sticky;
abs_result = @intCast(denormalized_significand >> (src_sig_bits - dst_sig_bits));
const round_bits = denormalized_significand & round_mask;
if (round_bits > halfway) {
// Round to nearest
abs_result += 1;
} else if (round_bits == halfway) {
// Ties to even
abs_result += abs_result & 1;
}
}
}
const result align(@alignOf(dst_t)) = abs_result | @as(dst_rep_t, sign) << dst_bits - 16;
return @bitCast(result);
}
test {
_ = @import("truncf_test.zig");
}
| https://raw.githubusercontent.com/beingofexistence13/multiversal-lang/dd769e3fc6182c23ef43ed4479614f43f29738c9/zig/lib/compiler_rt/truncf.zig |
const std = @import("std");
pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
const dstSigBits = std.math.floatMantissaBits(dst_t);
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
const srcBits = @typeInfo(src_t).Float.bits;
const srcExpBits = srcBits - srcSigBits - 1;
const srcInfExp = (1 << srcExpBits) - 1;
const srcExpBias = srcInfExp >> 1;
const srcMinNormal = 1 << srcSigBits;
const srcSignificandMask = srcMinNormal - 1;
const srcInfinity = srcInfExp << srcSigBits;
const srcSignMask = 1 << (srcSigBits + srcExpBits);
const srcAbsMask = srcSignMask - 1;
const roundMask = (1 << (srcSigBits - dstSigBits)) - 1;
const halfway = 1 << (srcSigBits - dstSigBits - 1);
const srcQNaN = 1 << (srcSigBits - 1);
const srcNaNCode = srcQNaN - 1;
const dstBits = @typeInfo(dst_t).Float.bits;
const dstExpBits = dstBits - dstSigBits - 1;
const dstInfExp = (1 << dstExpBits) - 1;
const dstExpBias = dstInfExp >> 1;
const underflowExponent = srcExpBias + 1 - dstExpBias;
const overflowExponent = srcExpBias + dstInfExp - dstExpBias;
const underflow = underflowExponent << srcSigBits;
const overflow = overflowExponent << srcSigBits;
const dstQNaN = 1 << (dstSigBits - 1);
const dstNaNCode = dstQNaN - 1;
// Break a into a sign and representation of the absolute value
const aRep: src_rep_t = @bitCast(a);
const aAbs: src_rep_t = aRep & srcAbsMask;
const sign: src_rep_t = aRep & srcSignMask;
var absResult: dst_rep_t = undefined;
if (aAbs -% underflow < aAbs -% overflow) {
// The exponent of a is within the range of normal numbers in the
// destination format. We can convert by simply right-shifting with
// rounding and adjusting the exponent.
absResult = @truncate(aAbs >> (srcSigBits - dstSigBits));
absResult -%= @as(dst_rep_t, srcExpBias - dstExpBias) << dstSigBits;
const roundBits: src_rep_t = aAbs & roundMask;
if (roundBits > halfway) {
// Round to nearest
absResult += 1;
} else if (roundBits == halfway) {
// Ties to even
absResult += absResult & 1;
}
} else if (aAbs > srcInfinity) {
// a is NaN.
// Conjure the result by beginning with infinity, setting the qNaN
// bit and inserting the (truncated) trailing NaN field.
absResult = @as(dst_rep_t, @intCast(dstInfExp)) << dstSigBits;
absResult |= dstQNaN;
absResult |= @intCast(((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode);
} else if (aAbs >= overflow) {
// a overflows to infinity.
absResult = @as(dst_rep_t, @intCast(dstInfExp)) << dstSigBits;
} else {
// a underflows on conversion to the destination type or is an exact
// zero. The result may be a denormal or zero. Extract the exponent
// to get the shift amount for the denormalization.
const aExp: u32 = @intCast(aAbs >> srcSigBits);
const shift: u32 = @intCast(srcExpBias - dstExpBias - aExp + 1);
const significand: src_rep_t = (aRep & srcSignificandMask) | srcMinNormal;
// Right shift by the denormalization amount with sticky.
if (shift > srcSigBits) {
absResult = 0;
} else {
const sticky: src_rep_t = @intFromBool(significand << @intCast(srcBits - shift) != 0);
const denormalizedSignificand: src_rep_t = significand >> @intCast(shift) | sticky;
absResult = @intCast(denormalizedSignificand >> (srcSigBits - dstSigBits));
const roundBits: src_rep_t = denormalizedSignificand & roundMask;
if (roundBits > halfway) {
// Round to nearest
absResult += 1;
} else if (roundBits == halfway) {
// Ties to even
absResult += absResult & 1;
}
}
}
const result: dst_rep_t align(@alignOf(dst_t)) = absResult |
@as(dst_rep_t, @truncate(sign >> @intCast(srcBits - dstBits)));
return @bitCast(result);
}
pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t {
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
const dst_sig_bits = std.math.floatMantissaBits(dst_t);
const src_exp_bias = 16383;
const round_mask = (1 << (src_sig_bits - dst_sig_bits)) - 1;
const halfway = 1 << (src_sig_bits - dst_sig_bits - 1);
const dst_bits = @typeInfo(dst_t).Float.bits;
const dst_exp_bits = dst_bits - dst_sig_bits - 1;
const dst_inf_exp = (1 << dst_exp_bits) - 1;
const dst_exp_bias = dst_inf_exp >> 1;
const underflow = src_exp_bias + 1 - dst_exp_bias;
const overflow = src_exp_bias + dst_inf_exp - dst_exp_bias;
const dst_qnan = 1 << (dst_sig_bits - 1);
const dst_nan_mask = dst_qnan - 1;
// Break a into a sign and representation of the absolute value
var a_rep = std.math.break_f80(a);
const sign = a_rep.exp & 0x8000;
a_rep.exp &= 0x7FFF;
a_rep.fraction &= 0x7FFFFFFFFFFFFFFF;
var abs_result: dst_rep_t = undefined;
if (a_rep.exp -% underflow < a_rep.exp -% overflow) {
// The exponent of a is within the range of normal numbers in the
// destination format. We can convert by simply right-shifting with
// rounding and adjusting the exponent.
abs_result = @as(dst_rep_t, a_rep.exp) << dst_sig_bits;
abs_result |= @truncate(a_rep.fraction >> (src_sig_bits - dst_sig_bits));
abs_result -%= @as(dst_rep_t, src_exp_bias - dst_exp_bias) << dst_sig_bits;
const round_bits = a_rep.fraction & round_mask;
if (round_bits > halfway) {
// Round to nearest
abs_result += 1;
} else if (round_bits == halfway) {
// Ties to even
abs_result += abs_result & 1;
}
} else if (a_rep.exp == 0x7FFF and a_rep.fraction != 0) {
// a is NaN.
// Conjure the result by beginning with infinity, setting the qNaN
// bit and inserting the (truncated) trailing NaN field.
abs_result = @as(dst_rep_t, @intCast(dst_inf_exp)) << dst_sig_bits;
abs_result |= dst_qnan;
abs_result |= @intCast((a_rep.fraction >> (src_sig_bits - dst_sig_bits)) & dst_nan_mask);
} else if (a_rep.exp >= overflow) {
// a overflows to infinity.
abs_result = @as(dst_rep_t, @intCast(dst_inf_exp)) << dst_sig_bits;
} else {
// a underflows on conversion to the destination type or is an exact
// zero. The result may be a denormal or zero. Extract the exponent
// to get the shift amount for the denormalization.
const shift = src_exp_bias - dst_exp_bias - a_rep.exp;
// Right shift by the denormalization amount with sticky.
if (shift > src_sig_bits) {
abs_result = 0;
} else {
const sticky = @intFromBool(a_rep.fraction << @intCast(shift) != 0);
const denormalized_significand = a_rep.fraction >> @intCast(shift) | sticky;
abs_result = @intCast(denormalized_significand >> (src_sig_bits - dst_sig_bits));
const round_bits = denormalized_significand & round_mask;
if (round_bits > halfway) {
// Round to nearest
abs_result += 1;
} else if (round_bits == halfway) {
// Ties to even
abs_result += abs_result & 1;
}
}
}
const result align(@alignOf(dst_t)) = abs_result | @as(dst_rep_t, sign) << dst_bits - 16;
return @bitCast(result);
}
test {
_ = @import("truncf_test.zig");
}
| https://raw.githubusercontent.com/2lambda123/ziglang-zig-bootstrap/f56dc0fd298f41c8cc2a4f76a9648111e6c75503/zig/lib/compiler_rt/truncf.zig |
//! __emutls_get_address specific builtin
//!
//! derived work from LLVM Compiler Infrastructure - release 8.0 (MIT)
//! https://github.com/llvm-mirror/compiler-rt/blob/release_80/lib/builtins/emutls.c
const std = @import("std");
const builtin = @import("builtin");
const common = @import("common.zig");
const abort = std.os.abort;
const assert = std.debug.assert;
const expect = std.testing.expect;
/// defined in C as:
/// typedef unsigned int gcc_word __attribute__((mode(word)));
const gcc_word = usize;
pub const panic = common.panic;
comptime {
if (builtin.link_libc and (builtin.abi == .android or builtin.os.tag == .openbsd)) {
@export(__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = common.linkage, .visibility = common.visibility });
}
}
/// public entrypoint for generated code using EmulatedTLS
pub fn __emutls_get_address(control: *emutls_control) callconv(.C) *anyopaque {
return control.getPointer();
}
/// Simple allocator interface, to avoid pulling in the while
/// std allocator implementation.
const simple_allocator = struct {
/// Allocate a memory chunk for requested type. Return a pointer on the data.
pub fn alloc(comptime T: type) *T {
return @ptrCast(@alignCast(advancedAlloc(@alignOf(T), @sizeOf(T))));
}
/// Allocate a slice of T, with len elements.
pub fn allocSlice(comptime T: type, len: usize) []T {
return @as([*]T, @ptrCast(@alignCast(
advancedAlloc(@alignOf(T), @sizeOf(T) * len),
)))[0 .. len - 1];
}
/// Allocate a memory chunk.
pub fn advancedAlloc(alignment: u29, size: usize) [*]u8 {
const minimal_alignment = @max(@alignOf(usize), alignment);
var aligned_ptr: ?*anyopaque = undefined;
if (std.c.posix_memalign(&aligned_ptr, minimal_alignment, size) != 0) {
abort();
}
return @as([*]u8, @ptrCast(aligned_ptr));
}
/// Resize a slice.
pub fn reallocSlice(comptime T: type, slice: []T, len: usize) []T {
var c_ptr: *anyopaque = @as(*anyopaque, @ptrCast(slice.ptr));
var new_array: [*]T = @ptrCast(@alignCast(std.c.realloc(c_ptr, @sizeOf(T) * len) orelse abort()));
return new_array[0..len];
}
/// Free a memory chunk allocated with simple_allocator.
pub fn free(ptr: anytype) void {
std.c.free(@as(*anyopaque, @ptrCast(ptr)));
}
};
/// Simple array of ?ObjectPointer with automatic resizing and
/// automatic storage allocation.
const ObjectArray = struct {
const ObjectPointer = *anyopaque;
// content of the array
slots: []?ObjectPointer,
/// create a new ObjectArray with n slots. must call deinit() to deallocate.
pub fn init(n: usize) *ObjectArray {
var array = simple_allocator.alloc(ObjectArray);
array.* = ObjectArray{
.slots = simple_allocator.allocSlice(?ObjectPointer, n),
};
for (array.slots) |*object| {
object.* = null;
}
return array;
}
/// deallocate the ObjectArray.
pub fn deinit(self: *ObjectArray) void {
// deallocated used objects in the array
for (self.slots) |*object| {
simple_allocator.free(object.*);
}
simple_allocator.free(self.slots);
simple_allocator.free(self);
}
/// resize the ObjectArray if needed.
pub fn ensureLength(self: *ObjectArray, new_len: usize) *ObjectArray {
const old_len = self.slots.len;
if (old_len > new_len) {
return self;
}
// reallocate
self.slots = simple_allocator.reallocSlice(?ObjectPointer, self.slots, new_len);
// init newly added slots
for (self.slots[old_len..]) |*object| {
object.* = null;
}
return self;
}
/// Retrieve the pointer at request index, using control to initialize it if needed.
pub fn getPointer(self: *ObjectArray, index: usize, control: *emutls_control) ObjectPointer {
if (self.slots[index] == null) {
// initialize the slot
const size = control.size;
const alignment: u29 = @truncate(control.alignment);
var data = simple_allocator.advancedAlloc(alignment, size);
errdefer simple_allocator.free(data);
if (control.default_value) |value| {
// default value: copy the content to newly allocated object.
@memcpy(data[0..size], @as([*]const u8, @ptrCast(value)));
} else {
// no default: return zeroed memory.
@memset(data[0..size], 0);
}
self.slots[index] = @as(*anyopaque, @ptrCast(data));
}
return self.slots[index].?;
}
};
// Global stucture for Thread Storage.
// It provides thread-safety for on-demand storage of Thread Objects.
const current_thread_storage = struct {
var key: std.c.pthread_key_t = undefined;
var init_once = std.once(current_thread_storage.init);
/// Return a per thread ObjectArray with at least the expected index.
pub fn getArray(index: usize) *ObjectArray {
if (current_thread_storage.getspecific()) |array| {
// we already have a specific. just ensure the array is
// big enough for the wanted index.
return array.ensureLength(index);
}
// no specific. we need to create a new array.
// make it to contains at least 16 objects (to avoid too much
// reallocation at startup).
const size = @max(16, index);
// create a new array and store it.
var array: *ObjectArray = ObjectArray.init(size);
current_thread_storage.setspecific(array);
return array;
}
/// Return casted thread specific value.
fn getspecific() ?*ObjectArray {
return @ptrCast(@alignCast(std.c.pthread_getspecific(current_thread_storage.key)));
}
/// Set casted thread specific value.
fn setspecific(new: ?*ObjectArray) void {
if (std.c.pthread_setspecific(current_thread_storage.key, @as(*anyopaque, @ptrCast(new))) != 0) {
abort();
}
}
/// Initialize pthread_key_t.
fn init() void {
if (std.c.pthread_key_create(¤t_thread_storage.key, current_thread_storage.deinit) != .SUCCESS) {
abort();
}
}
/// Invoked by pthread specific destructor. the passed argument is the ObjectArray pointer.
fn deinit(arrayPtr: *anyopaque) callconv(.C) void {
var array: *ObjectArray = @ptrCast(@alignCast(arrayPtr));
array.deinit();
}
};
const emutls_control = extern struct {
// A emutls_control value is a global value across all
// threads. The threads shares the index of TLS variable. The data
// array (containing address of allocated variables) is thread
// specific and stored using pthread_setspecific().
// size of the object in bytes
size: gcc_word,
// alignment of the object in bytes
alignment: gcc_word,
object: extern union {
// data[index-1] is the object address / 0 = uninit
index: usize,
// object address, when in single thread env (not used)
address: *anyopaque,
},
// null or non-zero initial value for the object
default_value: ?*const anyopaque,
// global Mutex used to serialize control.index initialization.
var mutex: std.c.pthread_mutex_t = std.c.PTHREAD_MUTEX_INITIALIZER;
// global counter for keeping track of requested indexes.
// access should be done with mutex held.
var next_index: usize = 1;
/// Simple wrapper for global lock.
fn lock() void {
if (std.c.pthread_mutex_lock(&emutls_control.mutex) != .SUCCESS) {
abort();
}
}
/// Simple wrapper for global unlock.
fn unlock() void {
if (std.c.pthread_mutex_unlock(&emutls_control.mutex) != .SUCCESS) {
abort();
}
}
/// Helper to retrieve nad initialize global unique index per emutls variable.
pub fn getIndex(self: *emutls_control) usize {
// Two threads could race against the same emutls_control.
// Use atomic for reading coherent value lockless.
const index_lockless = @atomicLoad(usize, &self.object.index, .Acquire);
if (index_lockless != 0) {
// index is already initialized, return it.
return index_lockless;
}
// index is uninitialized: take global lock to avoid possible race.
emutls_control.lock();
defer emutls_control.unlock();
const index_locked = self.object.index;
if (index_locked != 0) {
// we lost a race, but index is already initialized: nothing particular to do.
return index_locked;
}
// Store a new index atomically (for having coherent index_lockless reading).
@atomicStore(usize, &self.object.index, emutls_control.next_index, .Release);
// Increment the next available index
emutls_control.next_index += 1;
return self.object.index;
}
/// Simple helper for testing purpose.
pub fn init(comptime T: type, default_value: ?*const T) emutls_control {
return emutls_control{
.size = @sizeOf(T),
.alignment = @alignOf(T),
.object = .{ .index = 0 },
.default_value = @as(?*const anyopaque, @ptrCast(default_value)),
};
}
/// Get the pointer on allocated storage for emutls variable.
pub fn getPointer(self: *emutls_control) *anyopaque {
// ensure current_thread_storage initialization is done
current_thread_storage.init_once.call();
const index = self.getIndex();
var array = current_thread_storage.getArray(index);
return array.getPointer(index - 1, self);
}
/// Testing helper for retrieving typed pointer.
pub fn get_typed_pointer(self: *emutls_control, comptime T: type) *T {
assert(self.size == @sizeOf(T));
assert(self.alignment == @alignOf(T));
return @ptrCast(@alignCast(self.getPointer()));
}
};
test "simple_allocator" {
if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
var data1: *[64]u8 = simple_allocator.alloc([64]u8);
defer simple_allocator.free(data1);
for (data1) |*c| {
c.* = 0xff;
}
var data2: [*]u8 = simple_allocator.advancedAlloc(@alignOf(u8), 64);
defer simple_allocator.free(data2);
for (data2[0..63]) |*c| {
c.* = 0xff;
}
}
test "__emutls_get_address zeroed" {
if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
var ctl = emutls_control.init(usize, null);
try expect(ctl.object.index == 0);
// retrieve a variable from ctl
var x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
try expect(ctl.object.index != 0); // index has been allocated for this ctl
try expect(x.* == 0); // storage has been zeroed
// modify the storage
x.* = 1234;
// retrieve a variable from ctl (same ctl)
var y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
try expect(y.* == 1234); // same content that x.*
try expect(x == y); // same pointer
}
test "__emutls_get_address with default_value" {
if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
const value: usize = 5678; // default value
var ctl = emutls_control.init(usize, &value);
try expect(ctl.object.index == 0);
var x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
try expect(ctl.object.index != 0);
try expect(x.* == 5678); // storage initialized with default value
// modify the storage
x.* = 9012;
try expect(value == 5678); // the default value didn't change
var y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
try expect(y.* == 9012); // the modified storage persists
}
test "test default_value with differents sizes" {
if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
const testType = struct {
fn _testType(comptime T: type, value: T) !void {
var ctl = emutls_control.init(T, &value);
var x = ctl.get_typed_pointer(T);
try expect(x.* == value);
}
}._testType;
try testType(usize, 1234);
try testType(u32, 1234);
try testType(i16, -12);
try testType(f64, -12.0);
try testType(
@TypeOf("012345678901234567890123456789"),
"012345678901234567890123456789",
);
}
| https://raw.githubusercontent.com/mundusnine/FoundryTools_linux_x64/98e738bf92a416b255c9d11b78e8033071b52672/lib/compiler_rt/emutls.zig |
//! __emutls_get_address specific builtin
//!
//! derived work from LLVM Compiler Infrastructure - release 8.0 (MIT)
//! https://github.com/llvm-mirror/compiler-rt/blob/release_80/lib/builtins/emutls.c
const std = @import("std");
const builtin = @import("builtin");
const common = @import("common.zig");
const abort = std.os.abort;
const assert = std.debug.assert;
const expect = std.testing.expect;
/// defined in C as:
/// typedef unsigned int gcc_word __attribute__((mode(word)));
const gcc_word = usize;
pub const panic = common.panic;
comptime {
if (builtin.link_libc and (builtin.abi == .android or builtin.os.tag == .openbsd)) {
@export(__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = common.linkage, .visibility = common.visibility });
}
}
/// public entrypoint for generated code using EmulatedTLS
pub fn __emutls_get_address(control: *emutls_control) callconv(.C) *anyopaque {
return control.getPointer();
}
/// Simple allocator interface, to avoid pulling in the while
/// std allocator implementation.
const simple_allocator = struct {
/// Allocate a memory chunk for requested type. Return a pointer on the data.
pub fn alloc(comptime T: type) *T {
return @ptrCast(@alignCast(advancedAlloc(@alignOf(T), @sizeOf(T))));
}
/// Allocate a slice of T, with len elements.
pub fn allocSlice(comptime T: type, len: usize) []T {
return @as([*]T, @ptrCast(@alignCast(
advancedAlloc(@alignOf(T), @sizeOf(T) * len),
)))[0 .. len - 1];
}
/// Allocate a memory chunk.
pub fn advancedAlloc(alignment: u29, size: usize) [*]u8 {
const minimal_alignment = @max(@alignOf(usize), alignment);
var aligned_ptr: ?*anyopaque = undefined;
if (std.c.posix_memalign(&aligned_ptr, minimal_alignment, size) != 0) {
abort();
}
return @as([*]u8, @ptrCast(aligned_ptr));
}
/// Resize a slice.
pub fn reallocSlice(comptime T: type, slice: []T, len: usize) []T {
var c_ptr: *anyopaque = @as(*anyopaque, @ptrCast(slice.ptr));
var new_array: [*]T = @ptrCast(@alignCast(std.c.realloc(c_ptr, @sizeOf(T) * len) orelse abort()));
return new_array[0..len];
}
/// Free a memory chunk allocated with simple_allocator.
pub fn free(ptr: anytype) void {
std.c.free(@as(*anyopaque, @ptrCast(ptr)));
}
};
/// Simple array of ?ObjectPointer with automatic resizing and
/// automatic storage allocation.
const ObjectArray = struct {
const ObjectPointer = *anyopaque;
// content of the array
slots: []?ObjectPointer,
/// create a new ObjectArray with n slots. must call deinit() to deallocate.
pub fn init(n: usize) *ObjectArray {
var array = simple_allocator.alloc(ObjectArray);
array.* = ObjectArray{
.slots = simple_allocator.allocSlice(?ObjectPointer, n),
};
for (array.slots) |*object| {
object.* = null;
}
return array;
}
/// deallocate the ObjectArray.
pub fn deinit(self: *ObjectArray) void {
// deallocated used objects in the array
for (self.slots) |*object| {
simple_allocator.free(object.*);
}
simple_allocator.free(self.slots);
simple_allocator.free(self);
}
/// resize the ObjectArray if needed.
pub fn ensureLength(self: *ObjectArray, new_len: usize) *ObjectArray {
const old_len = self.slots.len;
if (old_len > new_len) {
return self;
}
// reallocate
self.slots = simple_allocator.reallocSlice(?ObjectPointer, self.slots, new_len);
// init newly added slots
for (self.slots[old_len..]) |*object| {
object.* = null;
}
return self;
}
/// Retrieve the pointer at request index, using control to initialize it if needed.
pub fn getPointer(self: *ObjectArray, index: usize, control: *emutls_control) ObjectPointer {
if (self.slots[index] == null) {
// initialize the slot
const size = control.size;
const alignment: u29 = @truncate(control.alignment);
var data = simple_allocator.advancedAlloc(alignment, size);
errdefer simple_allocator.free(data);
if (control.default_value) |value| {
// default value: copy the content to newly allocated object.
@memcpy(data[0..size], @as([*]const u8, @ptrCast(value)));
} else {
// no default: return zeroed memory.
@memset(data[0..size], 0);
}
self.slots[index] = @as(*anyopaque, @ptrCast(data));
}
return self.slots[index].?;
}
};
// Global stucture for Thread Storage.
// It provides thread-safety for on-demand storage of Thread Objects.
const current_thread_storage = struct {
var key: std.c.pthread_key_t = undefined;
var init_once = std.once(current_thread_storage.init);
/// Return a per thread ObjectArray with at least the expected index.
pub fn getArray(index: usize) *ObjectArray {
if (current_thread_storage.getspecific()) |array| {
// we already have a specific. just ensure the array is
// big enough for the wanted index.
return array.ensureLength(index);
}
// no specific. we need to create a new array.
// make it to contains at least 16 objects (to avoid too much
// reallocation at startup).
const size = @max(16, index);
// create a new array and store it.
var array: *ObjectArray = ObjectArray.init(size);
current_thread_storage.setspecific(array);
return array;
}
/// Return casted thread specific value.
fn getspecific() ?*ObjectArray {
return @ptrCast(@alignCast(std.c.pthread_getspecific(current_thread_storage.key)));
}
/// Set casted thread specific value.
fn setspecific(new: ?*ObjectArray) void {
if (std.c.pthread_setspecific(current_thread_storage.key, @as(*anyopaque, @ptrCast(new))) != 0) {
abort();
}
}
/// Initialize pthread_key_t.
fn init() void {
if (std.c.pthread_key_create(¤t_thread_storage.key, current_thread_storage.deinit) != .SUCCESS) {
abort();
}
}
/// Invoked by pthread specific destructor. the passed argument is the ObjectArray pointer.
fn deinit(arrayPtr: *anyopaque) callconv(.C) void {
var array: *ObjectArray = @ptrCast(@alignCast(arrayPtr));
array.deinit();
}
};
const emutls_control = extern struct {
// A emutls_control value is a global value across all
// threads. The threads shares the index of TLS variable. The data
// array (containing address of allocated variables) is thread
// specific and stored using pthread_setspecific().
// size of the object in bytes
size: gcc_word,
// alignment of the object in bytes
alignment: gcc_word,
object: extern union {
// data[index-1] is the object address / 0 = uninit
index: usize,
// object address, when in single thread env (not used)
address: *anyopaque,
},
// null or non-zero initial value for the object
default_value: ?*const anyopaque,
// global Mutex used to serialize control.index initialization.
var mutex: std.c.pthread_mutex_t = std.c.PTHREAD_MUTEX_INITIALIZER;
// global counter for keeping track of requested indexes.
// access should be done with mutex held.
var next_index: usize = 1;
/// Simple wrapper for global lock.
fn lock() void {
if (std.c.pthread_mutex_lock(&emutls_control.mutex) != .SUCCESS) {
abort();
}
}
/// Simple wrapper for global unlock.
fn unlock() void {
if (std.c.pthread_mutex_unlock(&emutls_control.mutex) != .SUCCESS) {
abort();
}
}
/// Helper to retrieve nad initialize global unique index per emutls variable.
pub fn getIndex(self: *emutls_control) usize {
// Two threads could race against the same emutls_control.
// Use atomic for reading coherent value lockless.
const index_lockless = @atomicLoad(usize, &self.object.index, .Acquire);
if (index_lockless != 0) {
// index is already initialized, return it.
return index_lockless;
}
// index is uninitialized: take global lock to avoid possible race.
emutls_control.lock();
defer emutls_control.unlock();
const index_locked = self.object.index;
if (index_locked != 0) {
// we lost a race, but index is already initialized: nothing particular to do.
return index_locked;
}
// Store a new index atomically (for having coherent index_lockless reading).
@atomicStore(usize, &self.object.index, emutls_control.next_index, .Release);
// Increment the next available index
emutls_control.next_index += 1;
return self.object.index;
}
/// Simple helper for testing purpose.
pub fn init(comptime T: type, default_value: ?*const T) emutls_control {
return emutls_control{
.size = @sizeOf(T),
.alignment = @alignOf(T),
.object = .{ .index = 0 },
.default_value = @as(?*const anyopaque, @ptrCast(default_value)),
};
}
/// Get the pointer on allocated storage for emutls variable.
pub fn getPointer(self: *emutls_control) *anyopaque {
// ensure current_thread_storage initialization is done
current_thread_storage.init_once.call();
const index = self.getIndex();
var array = current_thread_storage.getArray(index);
return array.getPointer(index - 1, self);
}
/// Testing helper for retrieving typed pointer.
pub fn get_typed_pointer(self: *emutls_control, comptime T: type) *T {
assert(self.size == @sizeOf(T));
assert(self.alignment == @alignOf(T));
return @ptrCast(@alignCast(self.getPointer()));
}
};
test "simple_allocator" {
if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
var data1: *[64]u8 = simple_allocator.alloc([64]u8);
defer simple_allocator.free(data1);
for (data1) |*c| {
c.* = 0xff;
}
var data2: [*]u8 = simple_allocator.advancedAlloc(@alignOf(u8), 64);
defer simple_allocator.free(data2);
for (data2[0..63]) |*c| {
c.* = 0xff;
}
}
test "__emutls_get_address zeroed" {
if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
var ctl = emutls_control.init(usize, null);
try expect(ctl.object.index == 0);
// retrieve a variable from ctl
var x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
try expect(ctl.object.index != 0); // index has been allocated for this ctl
try expect(x.* == 0); // storage has been zeroed
// modify the storage
x.* = 1234;
// retrieve a variable from ctl (same ctl)
var y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
try expect(y.* == 1234); // same content that x.*
try expect(x == y); // same pointer
}
test "__emutls_get_address with default_value" {
if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
const value: usize = 5678; // default value
var ctl = emutls_control.init(usize, &value);
try expect(ctl.object.index == 0);
var x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
try expect(ctl.object.index != 0);
try expect(x.* == 5678); // storage initialized with default value
// modify the storage
x.* = 9012;
try expect(value == 5678); // the default value didn't change
var y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
try expect(y.* == 9012); // the modified storage persists
}
test "test default_value with differents sizes" {
if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
const testType = struct {
fn _testType(comptime T: type, value: T) !void {
var ctl = emutls_control.init(T, &value);
var x = ctl.get_typed_pointer(T);
try expect(x.* == value);
}
}._testType;
try testType(usize, 1234);
try testType(u32, 1234);
try testType(i16, -12);
try testType(f64, -12.0);
try testType(
@TypeOf("012345678901234567890123456789"),
"012345678901234567890123456789",
);
}
| https://raw.githubusercontent.com/matpx/daydream/018ad0c7caaf796d8a04b882fcbed39ccb7c9cd8/toolchain/zig/lib/compiler_rt/emutls.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Hash = std.hash.Wyhash;
const Limb = std.math.big.Limb;
const Interner = @This();
map: std.AutoArrayHashMapUnmanaged(void, void) = .{},
items: std.MultiArrayList(struct {
tag: Tag,
data: u32,
}) = .{},
extra: std.ArrayListUnmanaged(u32) = .{},
limbs: std.ArrayListUnmanaged(Limb) = .{},
strings: std.ArrayListUnmanaged(u8) = .{},
const KeyAdapter = struct {
interner: *const Interner,
pub fn eql(adapter: KeyAdapter, a: Key, b_void: void, b_map_index: usize) bool {
_ = b_void;
return adapter.interner.get(@as(Ref, @enumFromInt(b_map_index))).eql(a);
}
pub fn hash(adapter: KeyAdapter, a: Key) u32 {
_ = adapter;
return a.hash();
}
};
pub const Key = union(enum) {
int_ty: u16,
float_ty: u16,
ptr_ty,
noreturn_ty,
void_ty,
func_ty,
array_ty: struct {
len: u64,
child: Ref,
},
vector_ty: struct {
len: u32,
child: Ref,
},
record_ty: []const Ref,
/// May not be zero
null,
int: union(enum) {
u64: u64,
i64: i64,
big_int: BigIntConst,
pub fn toBigInt(repr: @This(), space: *Tag.Int.BigIntSpace) BigIntConst {
return switch (repr) {
.big_int => |x| x,
inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(),
};
}
},
float: Float,
bytes: []const u8,
pub const Float = union(enum) {
f16: f16,
f32: f32,
f64: f64,
f80: f80,
f128: f128,
};
pub fn hash(key: Key) u32 {
var hasher = Hash.init(0);
const tag = std.meta.activeTag(key);
std.hash.autoHash(&hasher, tag);
switch (key) {
.bytes => |bytes| {
hasher.update(bytes);
},
.record_ty => |elems| for (elems) |elem| {
std.hash.autoHash(&hasher, elem);
},
.float => |repr| switch (repr) {
inline else => |data| std.hash.autoHash(
&hasher,
@as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(data))), @bitCast(data)),
),
},
.int => |repr| {
var space: Tag.Int.BigIntSpace = undefined;
const big = repr.toBigInt(&space);
std.hash.autoHash(&hasher, big.positive);
for (big.limbs) |limb| std.hash.autoHash(&hasher, limb);
},
inline else => |info| {
std.hash.autoHash(&hasher, info);
},
}
return @truncate(hasher.final());
}
pub fn eql(a: Key, b: Key) bool {
const KeyTag = std.meta.Tag(Key);
const a_tag: KeyTag = a;
const b_tag: KeyTag = b;
if (a_tag != b_tag) return false;
switch (a) {
.record_ty => |a_elems| {
const b_elems = b.record_ty;
if (a_elems.len != b_elems.len) return false;
for (a_elems, b_elems) |a_elem, b_elem| {
if (a_elem != b_elem) return false;
}
return true;
},
.bytes => |a_bytes| {
const b_bytes = b.bytes;
return std.mem.eql(u8, a_bytes, b_bytes);
},
.int => |a_repr| {
var a_space: Tag.Int.BigIntSpace = undefined;
const a_big = a_repr.toBigInt(&a_space);
var b_space: Tag.Int.BigIntSpace = undefined;
const b_big = b.int.toBigInt(&b_space);
return a_big.eql(b_big);
},
inline else => |a_info, tag| {
const b_info = @field(b, @tagName(tag));
return std.meta.eql(a_info, b_info);
},
}
}
fn toRef(key: Key) ?Ref {
switch (key) {
.int_ty => |bits| switch (bits) {
1 => return .i1,
8 => return .i8,
16 => return .i16,
32 => return .i32,
64 => return .i64,
128 => return .i128,
else => {},
},
.float_ty => |bits| switch (bits) {
16 => return .f16,
32 => return .f32,
64 => return .f64,
80 => return .f80,
128 => return .f128,
else => unreachable,
},
.ptr_ty => return .ptr,
.func_ty => return .func,
.noreturn_ty => return .noreturn,
.void_ty => return .void,
.int => |repr| {
var space: Tag.Int.BigIntSpace = undefined;
const big = repr.toBigInt(&space);
if (big.eqlZero()) return .zero;
const big_one = BigIntConst{ .limbs = &.{1}, .positive = true };
if (big.eql(big_one)) return .one;
},
.float => |repr| switch (repr) {
inline else => |data| {
if (std.math.isPositiveZero(data)) return .zero;
if (data == 1) return .one;
},
},
.null => return .null,
else => {},
}
return null;
}
};
pub const Ref = enum(u32) {
const max = std.math.maxInt(u32);
ptr = max - 1,
noreturn = max - 2,
void = max - 3,
i1 = max - 4,
i8 = max - 5,
i16 = max - 6,
i32 = max - 7,
i64 = max - 8,
i128 = max - 9,
f16 = max - 10,
f32 = max - 11,
f64 = max - 12,
f80 = max - 13,
f128 = max - 14,
func = max - 15,
zero = max - 16,
one = max - 17,
null = max - 18,
_,
};
pub const OptRef = enum(u32) {
const max = std.math.maxInt(u32);
none = max - 0,
ptr = max - 1,
noreturn = max - 2,
void = max - 3,
i1 = max - 4,
i8 = max - 5,
i16 = max - 6,
i32 = max - 7,
i64 = max - 8,
i128 = max - 9,
f16 = max - 10,
f32 = max - 11,
f64 = max - 12,
f80 = max - 13,
f128 = max - 14,
func = max - 15,
zero = max - 16,
one = max - 17,
null = max - 18,
_,
};
pub const Tag = enum(u8) {
/// `data` is `u16`
int_ty,
/// `data` is `u16`
float_ty,
/// `data` is index to `Array`
array_ty,
/// `data` is index to `Vector`
vector_ty,
/// `data` is `u32`
u32,
/// `data` is `i32`
i32,
/// `data` is `Int`
int_positive,
/// `data` is `Int`
int_negative,
/// `data` is `f16`
f16,
/// `data` is `f32`
f32,
/// `data` is `F64`
f64,
/// `data` is `F80`
f80,
/// `data` is `F128`
f128,
/// `data` is `Bytes`
bytes,
/// `data` is `Record`
record_ty,
pub const Array = struct {
len0: u32,
len1: u32,
child: Ref,
pub fn getLen(a: Array) u64 {
return (PackedU64{
.a = a.len0,
.b = a.len1,
}).get();
}
};
pub const Vector = struct {
len: u32,
child: Ref,
};
pub const Int = struct {
limbs_index: u32,
limbs_len: u32,
/// Big enough to fit any non-BigInt value
pub const BigIntSpace = struct {
/// The +1 is headroom so that operations such as incrementing once
/// or decrementing once are possible without using an allocator.
limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb,
};
};
pub const F64 = struct {
piece0: u32,
piece1: u32,
pub fn get(self: F64) f64 {
const int_bits = @as(u64, self.piece0) | (@as(u64, self.piece1) << 32);
return @bitCast(int_bits);
}
fn pack(val: f64) F64 {
const bits = @as(u64, @bitCast(val));
return .{
.piece0 = @as(u32, @truncate(bits)),
.piece1 = @as(u32, @truncate(bits >> 32)),
};
}
};
pub const F80 = struct {
piece0: u32,
piece1: u32,
piece2: u32, // u16 part, top bits
pub fn get(self: F80) f80 {
const int_bits = @as(u80, self.piece0) |
(@as(u80, self.piece1) << 32) |
(@as(u80, self.piece2) << 64);
return @bitCast(int_bits);
}
fn pack(val: f80) F80 {
const bits = @as(u80, @bitCast(val));
return .{
.piece0 = @as(u32, @truncate(bits)),
.piece1 = @as(u32, @truncate(bits >> 32)),
.piece2 = @as(u16, @truncate(bits >> 64)),
};
}
};
pub const F128 = struct {
piece0: u32,
piece1: u32,
piece2: u32,
piece3: u32,
pub fn get(self: F128) f128 {
const int_bits = @as(u128, self.piece0) |
(@as(u128, self.piece1) << 32) |
(@as(u128, self.piece2) << 64) |
(@as(u128, self.piece3) << 96);
return @bitCast(int_bits);
}
fn pack(val: f128) F128 {
const bits = @as(u128, @bitCast(val));
return .{
.piece0 = @as(u32, @truncate(bits)),
.piece1 = @as(u32, @truncate(bits >> 32)),
.piece2 = @as(u32, @truncate(bits >> 64)),
.piece3 = @as(u32, @truncate(bits >> 96)),
};
}
};
pub const Bytes = struct {
strings_index: u32,
len: u32,
};
pub const Record = struct {
elements_len: u32,
// trailing
// [elements_len]Ref
};
};
pub const PackedU64 = packed struct(u64) {
a: u32,
b: u32,
pub fn get(x: PackedU64) u64 {
return @bitCast(x);
}
pub fn init(x: u64) PackedU64 {
return @bitCast(x);
}
};
pub fn deinit(i: *Interner, gpa: Allocator) void {
i.map.deinit(gpa);
i.items.deinit(gpa);
i.extra.deinit(gpa);
i.limbs.deinit(gpa);
i.strings.deinit(gpa);
}
pub fn put(i: *Interner, gpa: Allocator, key: Key) !Ref {
if (key.toRef()) |some| return some;
const adapter: KeyAdapter = .{ .interner = i };
const gop = try i.map.getOrPutAdapted(gpa, key, adapter);
if (gop.found_existing) return @enumFromInt(gop.index);
try i.items.ensureUnusedCapacity(gpa, 1);
switch (key) {
.int_ty => |bits| {
i.items.appendAssumeCapacity(.{
.tag = .int_ty,
.data = bits,
});
},
.float_ty => |bits| {
i.items.appendAssumeCapacity(.{
.tag = .float_ty,
.data = bits,
});
},
.array_ty => |info| {
const split_len = PackedU64.init(info.len);
i.items.appendAssumeCapacity(.{
.tag = .array_ty,
.data = try i.addExtra(gpa, Tag.Array{
.len0 = split_len.a,
.len1 = split_len.b,
.child = info.child,
}),
});
},
.vector_ty => |info| {
i.items.appendAssumeCapacity(.{
.tag = .vector_ty,
.data = try i.addExtra(gpa, Tag.Vector{
.len = info.len,
.child = info.child,
}),
});
},
.int => |repr| int: {
var space: Tag.Int.BigIntSpace = undefined;
const big = repr.toBigInt(&space);
switch (repr) {
.u64 => |data| if (std.math.cast(u32, data)) |small| {
i.items.appendAssumeCapacity(.{
.tag = .u32,
.data = small,
});
break :int;
},
.i64 => |data| if (std.math.cast(i32, data)) |small| {
i.items.appendAssumeCapacity(.{
.tag = .i32,
.data = @bitCast(small),
});
break :int;
},
.big_int => |data| {
if (data.fitsInTwosComp(.unsigned, 32)) {
i.items.appendAssumeCapacity(.{
.tag = .u32,
.data = data.to(u32) catch unreachable,
});
break :int;
} else if (data.fitsInTwosComp(.signed, 32)) {
i.items.appendAssumeCapacity(.{
.tag = .i32,
.data = @bitCast(data.to(i32) catch unreachable),
});
break :int;
}
},
}
const limbs_index: u32 = @intCast(i.limbs.items.len);
try i.limbs.appendSlice(gpa, big.limbs);
i.items.appendAssumeCapacity(.{
.tag = if (big.positive) .int_positive else .int_negative,
.data = try i.addExtra(gpa, Tag.Int{
.limbs_index = limbs_index,
.limbs_len = @intCast(big.limbs.len),
}),
});
},
.float => |repr| switch (repr) {
.f16 => |data| i.items.appendAssumeCapacity(.{
.tag = .f16,
.data = @as(u16, @bitCast(data)),
}),
.f32 => |data| i.items.appendAssumeCapacity(.{
.tag = .f32,
.data = @as(u32, @bitCast(data)),
}),
.f64 => |data| i.items.appendAssumeCapacity(.{
.tag = .f64,
.data = try i.addExtra(gpa, Tag.F64.pack(data)),
}),
.f80 => |data| i.items.appendAssumeCapacity(.{
.tag = .f80,
.data = try i.addExtra(gpa, Tag.F80.pack(data)),
}),
.f128 => |data| i.items.appendAssumeCapacity(.{
.tag = .f128,
.data = try i.addExtra(gpa, Tag.F128.pack(data)),
}),
},
.bytes => |bytes| {
const strings_index: u32 = @intCast(i.strings.items.len);
try i.strings.appendSlice(gpa, bytes);
i.items.appendAssumeCapacity(.{
.tag = .bytes,
.data = try i.addExtra(gpa, Tag.Bytes{
.strings_index = strings_index,
.len = @intCast(bytes.len),
}),
});
},
.record_ty => |elems| {
try i.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.Record).Struct.fields.len +
elems.len);
i.items.appendAssumeCapacity(.{
.tag = .record_ty,
.data = i.addExtraAssumeCapacity(Tag.Record{
.elements_len = @intCast(elems.len),
}),
});
i.extra.appendSliceAssumeCapacity(@ptrCast(elems));
},
.ptr_ty,
.noreturn_ty,
.void_ty,
.func_ty,
.null,
=> unreachable,
}
return @enumFromInt(gop.index);
}
fn addExtra(i: *Interner, gpa: Allocator, extra: anytype) Allocator.Error!u32 {
const fields = @typeInfo(@TypeOf(extra)).Struct.fields;
try i.extra.ensureUnusedCapacity(gpa, fields.len);
return i.addExtraAssumeCapacity(extra);
}
fn addExtraAssumeCapacity(i: *Interner, extra: anytype) u32 {
const result = @as(u32, @intCast(i.extra.items.len));
inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
i.extra.appendAssumeCapacity(switch (field.type) {
Ref => @intFromEnum(@field(extra, field.name)),
u32 => @field(extra, field.name),
else => @compileError("bad field type: " ++ @typeName(field.type)),
});
}
return result;
}
pub fn get(i: *const Interner, ref: Ref) Key {
switch (ref) {
.ptr => return .ptr_ty,
.func => return .func_ty,
.noreturn => return .noreturn_ty,
.void => return .void_ty,
.i1 => return .{ .int_ty = 1 },
.i8 => return .{ .int_ty = 8 },
.i16 => return .{ .int_ty = 16 },
.i32 => return .{ .int_ty = 32 },
.i64 => return .{ .int_ty = 64 },
.i128 => return .{ .int_ty = 128 },
.f16 => return .{ .float_ty = 16 },
.f32 => return .{ .float_ty = 32 },
.f64 => return .{ .float_ty = 64 },
.f80 => return .{ .float_ty = 80 },
.f128 => return .{ .float_ty = 128 },
.zero => return .{ .int = .{ .u64 = 0 } },
.one => return .{ .int = .{ .u64 = 1 } },
.null => return .null,
else => {},
}
const item = i.items.get(@intFromEnum(ref));
const data = item.data;
return switch (item.tag) {
.int_ty => .{ .int_ty = @intCast(data) },
.float_ty => .{ .float_ty = @intCast(data) },
.array_ty => {
const array_ty = i.extraData(Tag.Array, data);
return .{ .array_ty = .{
.len = array_ty.getLen(),
.child = array_ty.child,
} };
},
.vector_ty => {
const vector_ty = i.extraData(Tag.Vector, data);
return .{ .vector_ty = .{
.len = vector_ty.len,
.child = vector_ty.child,
} };
},
.u32 => .{ .int = .{ .u64 = data } },
.i32 => .{ .int = .{ .i64 = @as(i32, @bitCast(data)) } },
.int_positive, .int_negative => {
const int_info = i.extraData(Tag.Int, data);
const limbs = i.limbs.items[int_info.limbs_index..][0..int_info.limbs_len];
return .{ .int = .{
.big_int = .{
.positive = item.tag == .int_positive,
.limbs = limbs,
},
} };
},
.f16 => .{ .float = .{ .f16 = @bitCast(@as(u16, @intCast(data))) } },
.f32 => .{ .float = .{ .f32 = @bitCast(data) } },
.f64 => {
const float = i.extraData(Tag.F64, data);
return .{ .float = .{ .f64 = float.get() } };
},
.f80 => {
const float = i.extraData(Tag.F80, data);
return .{ .float = .{ .f80 = float.get() } };
},
.f128 => {
const float = i.extraData(Tag.F128, data);
return .{ .float = .{ .f128 = float.get() } };
},
.bytes => {
const bytes = i.extraData(Tag.Bytes, data);
return .{ .bytes = i.strings.items[bytes.strings_index..][0..bytes.len] };
},
.record_ty => {
const extra = i.extraDataTrail(Tag.Record, data);
return .{
.record_ty = @ptrCast(i.extra.items[extra.end..][0..extra.data.elements_len]),
};
},
};
}
fn extraData(i: *const Interner, comptime T: type, index: usize) T {
return i.extraDataTrail(T, index).data;
}
fn extraDataTrail(i: *const Interner, comptime T: type, index: usize) struct { data: T, end: u32 } {
var result: T = undefined;
const fields = @typeInfo(T).Struct.fields;
inline for (fields, 0..) |field, field_i| {
const int32 = i.extra.items[field_i + index];
@field(result, field.name) = switch (field.type) {
Ref => @enumFromInt(int32),
u32 => int32,
else => @compileError("bad field type: " ++ @typeName(field.type)),
};
}
return .{
.data = result,
.end = @intCast(index + fields.len),
};
}
| https://raw.githubusercontent.com/ziglang/zig-bootstrap/ec2dca85a340f134d2fcfdc9007e91f9abed6996/zig/lib/compiler/aro/backend/Interner.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Hash = std.hash.Wyhash;
const Limb = std.math.big.Limb;
const Interner = @This();
map: std.AutoArrayHashMapUnmanaged(void, void) = .{},
items: std.MultiArrayList(struct {
tag: Tag,
data: u32,
}) = .{},
extra: std.ArrayListUnmanaged(u32) = .{},
limbs: std.ArrayListUnmanaged(Limb) = .{},
strings: std.ArrayListUnmanaged(u8) = .{},
const KeyAdapter = struct {
interner: *const Interner,
pub fn eql(adapter: KeyAdapter, a: Key, b_void: void, b_map_index: usize) bool {
_ = b_void;
return adapter.interner.get(@as(Ref, @enumFromInt(b_map_index))).eql(a);
}
pub fn hash(adapter: KeyAdapter, a: Key) u32 {
_ = adapter;
return a.hash();
}
};
pub const Key = union(enum) {
int_ty: u16,
float_ty: u16,
ptr_ty,
noreturn_ty,
void_ty,
func_ty,
array_ty: struct {
len: u64,
child: Ref,
},
vector_ty: struct {
len: u32,
child: Ref,
},
record_ty: []const Ref,
/// May not be zero
null,
int: union(enum) {
u64: u64,
i64: i64,
big_int: BigIntConst,
pub fn toBigInt(repr: @This(), space: *Tag.Int.BigIntSpace) BigIntConst {
return switch (repr) {
.big_int => |x| x,
inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(),
};
}
},
float: Float,
bytes: []const u8,
pub const Float = union(enum) {
f16: f16,
f32: f32,
f64: f64,
f80: f80,
f128: f128,
};
pub fn hash(key: Key) u32 {
var hasher = Hash.init(0);
const tag = std.meta.activeTag(key);
std.hash.autoHash(&hasher, tag);
switch (key) {
.bytes => |bytes| {
hasher.update(bytes);
},
.record_ty => |elems| for (elems) |elem| {
std.hash.autoHash(&hasher, elem);
},
.float => |repr| switch (repr) {
inline else => |data| std.hash.autoHash(
&hasher,
@as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(data))), @bitCast(data)),
),
},
.int => |repr| {
var space: Tag.Int.BigIntSpace = undefined;
const big = repr.toBigInt(&space);
std.hash.autoHash(&hasher, big.positive);
for (big.limbs) |limb| std.hash.autoHash(&hasher, limb);
},
inline else => |info| {
std.hash.autoHash(&hasher, info);
},
}
return @truncate(hasher.final());
}
pub fn eql(a: Key, b: Key) bool {
const KeyTag = std.meta.Tag(Key);
const a_tag: KeyTag = a;
const b_tag: KeyTag = b;
if (a_tag != b_tag) return false;
switch (a) {
.record_ty => |a_elems| {
const b_elems = b.record_ty;
if (a_elems.len != b_elems.len) return false;
for (a_elems, b_elems) |a_elem, b_elem| {
if (a_elem != b_elem) return false;
}
return true;
},
.bytes => |a_bytes| {
const b_bytes = b.bytes;
return std.mem.eql(u8, a_bytes, b_bytes);
},
.int => |a_repr| {
var a_space: Tag.Int.BigIntSpace = undefined;
const a_big = a_repr.toBigInt(&a_space);
var b_space: Tag.Int.BigIntSpace = undefined;
const b_big = b.int.toBigInt(&b_space);
return a_big.eql(b_big);
},
inline else => |a_info, tag| {
const b_info = @field(b, @tagName(tag));
return std.meta.eql(a_info, b_info);
},
}
}
fn toRef(key: Key) ?Ref {
switch (key) {
.int_ty => |bits| switch (bits) {
1 => return .i1,
8 => return .i8,
16 => return .i16,
32 => return .i32,
64 => return .i64,
128 => return .i128,
else => {},
},
.float_ty => |bits| switch (bits) {
16 => return .f16,
32 => return .f32,
64 => return .f64,
80 => return .f80,
128 => return .f128,
else => unreachable,
},
.ptr_ty => return .ptr,
.func_ty => return .func,
.noreturn_ty => return .noreturn,
.void_ty => return .void,
.int => |repr| {
var space: Tag.Int.BigIntSpace = undefined;
const big = repr.toBigInt(&space);
if (big.eqlZero()) return .zero;
const big_one = BigIntConst{ .limbs = &.{1}, .positive = true };
if (big.eql(big_one)) return .one;
},
.float => |repr| switch (repr) {
inline else => |data| {
if (std.math.isPositiveZero(data)) return .zero;
if (data == 1) return .one;
},
},
.null => return .null,
else => {},
}
return null;
}
};
pub const Ref = enum(u32) {
const max = std.math.maxInt(u32);
ptr = max - 1,
noreturn = max - 2,
void = max - 3,
i1 = max - 4,
i8 = max - 5,
i16 = max - 6,
i32 = max - 7,
i64 = max - 8,
i128 = max - 9,
f16 = max - 10,
f32 = max - 11,
f64 = max - 12,
f80 = max - 13,
f128 = max - 14,
func = max - 15,
zero = max - 16,
one = max - 17,
null = max - 18,
_,
};
pub const OptRef = enum(u32) {
const max = std.math.maxInt(u32);
none = max - 0,
ptr = max - 1,
noreturn = max - 2,
void = max - 3,
i1 = max - 4,
i8 = max - 5,
i16 = max - 6,
i32 = max - 7,
i64 = max - 8,
i128 = max - 9,
f16 = max - 10,
f32 = max - 11,
f64 = max - 12,
f80 = max - 13,
f128 = max - 14,
func = max - 15,
zero = max - 16,
one = max - 17,
null = max - 18,
_,
};
pub const Tag = enum(u8) {
/// `data` is `u16`
int_ty,
/// `data` is `u16`
float_ty,
/// `data` is index to `Array`
array_ty,
/// `data` is index to `Vector`
vector_ty,
/// `data` is `u32`
u32,
/// `data` is `i32`
i32,
/// `data` is `Int`
int_positive,
/// `data` is `Int`
int_negative,
/// `data` is `f16`
f16,
/// `data` is `f32`
f32,
/// `data` is `F64`
f64,
/// `data` is `F80`
f80,
/// `data` is `F128`
f128,
/// `data` is `Bytes`
bytes,
/// `data` is `Record`
record_ty,
pub const Array = struct {
len0: u32,
len1: u32,
child: Ref,
pub fn getLen(a: Array) u64 {
return (PackedU64{
.a = a.len0,
.b = a.len1,
}).get();
}
};
pub const Vector = struct {
len: u32,
child: Ref,
};
pub const Int = struct {
limbs_index: u32,
limbs_len: u32,
/// Big enough to fit any non-BigInt value
pub const BigIntSpace = struct {
/// The +1 is headroom so that operations such as incrementing once
/// or decrementing once are possible without using an allocator.
limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb,
};
};
pub const F64 = struct {
piece0: u32,
piece1: u32,
pub fn get(self: F64) f64 {
const int_bits = @as(u64, self.piece0) | (@as(u64, self.piece1) << 32);
return @bitCast(int_bits);
}
fn pack(val: f64) F64 {
const bits = @as(u64, @bitCast(val));
return .{
.piece0 = @as(u32, @truncate(bits)),
.piece1 = @as(u32, @truncate(bits >> 32)),
};
}
};
pub const F80 = struct {
piece0: u32,
piece1: u32,
piece2: u32, // u16 part, top bits
pub fn get(self: F80) f80 {
const int_bits = @as(u80, self.piece0) |
(@as(u80, self.piece1) << 32) |
(@as(u80, self.piece2) << 64);
return @bitCast(int_bits);
}
fn pack(val: f80) F80 {
const bits = @as(u80, @bitCast(val));
return .{
.piece0 = @as(u32, @truncate(bits)),
.piece1 = @as(u32, @truncate(bits >> 32)),
.piece2 = @as(u16, @truncate(bits >> 64)),
};
}
};
pub const F128 = struct {
piece0: u32,
piece1: u32,
piece2: u32,
piece3: u32,
pub fn get(self: F128) f128 {
const int_bits = @as(u128, self.piece0) |
(@as(u128, self.piece1) << 32) |
(@as(u128, self.piece2) << 64) |
(@as(u128, self.piece3) << 96);
return @bitCast(int_bits);
}
fn pack(val: f128) F128 {
const bits = @as(u128, @bitCast(val));
return .{
.piece0 = @as(u32, @truncate(bits)),
.piece1 = @as(u32, @truncate(bits >> 32)),
.piece2 = @as(u32, @truncate(bits >> 64)),
.piece3 = @as(u32, @truncate(bits >> 96)),
};
}
};
pub const Bytes = struct {
strings_index: u32,
len: u32,
};
pub const Record = struct {
elements_len: u32,
// trailing
// [elements_len]Ref
};
};
pub const PackedU64 = packed struct(u64) {
a: u32,
b: u32,
pub fn get(x: PackedU64) u64 {
return @bitCast(x);
}
pub fn init(x: u64) PackedU64 {
return @bitCast(x);
}
};
pub fn deinit(i: *Interner, gpa: Allocator) void {
i.map.deinit(gpa);
i.items.deinit(gpa);
i.extra.deinit(gpa);
i.limbs.deinit(gpa);
i.strings.deinit(gpa);
}
pub fn put(i: *Interner, gpa: Allocator, key: Key) !Ref {
if (key.toRef()) |some| return some;
const adapter: KeyAdapter = .{ .interner = i };
const gop = try i.map.getOrPutAdapted(gpa, key, adapter);
if (gop.found_existing) return @enumFromInt(gop.index);
try i.items.ensureUnusedCapacity(gpa, 1);
switch (key) {
.int_ty => |bits| {
i.items.appendAssumeCapacity(.{
.tag = .int_ty,
.data = bits,
});
},
.float_ty => |bits| {
i.items.appendAssumeCapacity(.{
.tag = .float_ty,
.data = bits,
});
},
.array_ty => |info| {
const split_len = PackedU64.init(info.len);
i.items.appendAssumeCapacity(.{
.tag = .array_ty,
.data = try i.addExtra(gpa, Tag.Array{
.len0 = split_len.a,
.len1 = split_len.b,
.child = info.child,
}),
});
},
.vector_ty => |info| {
i.items.appendAssumeCapacity(.{
.tag = .vector_ty,
.data = try i.addExtra(gpa, Tag.Vector{
.len = info.len,
.child = info.child,
}),
});
},
.int => |repr| int: {
var space: Tag.Int.BigIntSpace = undefined;
const big = repr.toBigInt(&space);
switch (repr) {
.u64 => |data| if (std.math.cast(u32, data)) |small| {
i.items.appendAssumeCapacity(.{
.tag = .u32,
.data = small,
});
break :int;
},
.i64 => |data| if (std.math.cast(i32, data)) |small| {
i.items.appendAssumeCapacity(.{
.tag = .i32,
.data = @bitCast(small),
});
break :int;
},
.big_int => |data| {
if (data.fitsInTwosComp(.unsigned, 32)) {
i.items.appendAssumeCapacity(.{
.tag = .u32,
.data = data.to(u32) catch unreachable,
});
break :int;
} else if (data.fitsInTwosComp(.signed, 32)) {
i.items.appendAssumeCapacity(.{
.tag = .i32,
.data = @bitCast(data.to(i32) catch unreachable),
});
break :int;
}
},
}
const limbs_index: u32 = @intCast(i.limbs.items.len);
try i.limbs.appendSlice(gpa, big.limbs);
i.items.appendAssumeCapacity(.{
.tag = if (big.positive) .int_positive else .int_negative,
.data = try i.addExtra(gpa, Tag.Int{
.limbs_index = limbs_index,
.limbs_len = @intCast(big.limbs.len),
}),
});
},
.float => |repr| switch (repr) {
.f16 => |data| i.items.appendAssumeCapacity(.{
.tag = .f16,
.data = @as(u16, @bitCast(data)),
}),
.f32 => |data| i.items.appendAssumeCapacity(.{
.tag = .f32,
.data = @as(u32, @bitCast(data)),
}),
.f64 => |data| i.items.appendAssumeCapacity(.{
.tag = .f64,
.data = try i.addExtra(gpa, Tag.F64.pack(data)),
}),
.f80 => |data| i.items.appendAssumeCapacity(.{
.tag = .f80,
.data = try i.addExtra(gpa, Tag.F80.pack(data)),
}),
.f128 => |data| i.items.appendAssumeCapacity(.{
.tag = .f128,
.data = try i.addExtra(gpa, Tag.F128.pack(data)),
}),
},
.bytes => |bytes| {
const strings_index: u32 = @intCast(i.strings.items.len);
try i.strings.appendSlice(gpa, bytes);
i.items.appendAssumeCapacity(.{
.tag = .bytes,
.data = try i.addExtra(gpa, Tag.Bytes{
.strings_index = strings_index,
.len = @intCast(bytes.len),
}),
});
},
.record_ty => |elems| {
try i.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.Record).Struct.fields.len +
elems.len);
i.items.appendAssumeCapacity(.{
.tag = .record_ty,
.data = i.addExtraAssumeCapacity(Tag.Record{
.elements_len = @intCast(elems.len),
}),
});
i.extra.appendSliceAssumeCapacity(@ptrCast(elems));
},
.ptr_ty,
.noreturn_ty,
.void_ty,
.func_ty,
.null,
=> unreachable,
}
return @enumFromInt(gop.index);
}
fn addExtra(i: *Interner, gpa: Allocator, extra: anytype) Allocator.Error!u32 {
const fields = @typeInfo(@TypeOf(extra)).Struct.fields;
try i.extra.ensureUnusedCapacity(gpa, fields.len);
return i.addExtraAssumeCapacity(extra);
}
fn addExtraAssumeCapacity(i: *Interner, extra: anytype) u32 {
const result = @as(u32, @intCast(i.extra.items.len));
inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
i.extra.appendAssumeCapacity(switch (field.type) {
Ref => @intFromEnum(@field(extra, field.name)),
u32 => @field(extra, field.name),
else => @compileError("bad field type: " ++ @typeName(field.type)),
});
}
return result;
}
pub fn get(i: *const Interner, ref: Ref) Key {
switch (ref) {
.ptr => return .ptr_ty,
.func => return .func_ty,
.noreturn => return .noreturn_ty,
.void => return .void_ty,
.i1 => return .{ .int_ty = 1 },
.i8 => return .{ .int_ty = 8 },
.i16 => return .{ .int_ty = 16 },
.i32 => return .{ .int_ty = 32 },
.i64 => return .{ .int_ty = 64 },
.i128 => return .{ .int_ty = 128 },
.f16 => return .{ .float_ty = 16 },
.f32 => return .{ .float_ty = 32 },
.f64 => return .{ .float_ty = 64 },
.f80 => return .{ .float_ty = 80 },
.f128 => return .{ .float_ty = 128 },
.zero => return .{ .int = .{ .u64 = 0 } },
.one => return .{ .int = .{ .u64 = 1 } },
.null => return .null,
else => {},
}
const item = i.items.get(@intFromEnum(ref));
const data = item.data;
return switch (item.tag) {
.int_ty => .{ .int_ty = @intCast(data) },
.float_ty => .{ .float_ty = @intCast(data) },
.array_ty => {
const array_ty = i.extraData(Tag.Array, data);
return .{ .array_ty = .{
.len = array_ty.getLen(),
.child = array_ty.child,
} };
},
.vector_ty => {
const vector_ty = i.extraData(Tag.Vector, data);
return .{ .vector_ty = .{
.len = vector_ty.len,
.child = vector_ty.child,
} };
},
.u32 => .{ .int = .{ .u64 = data } },
.i32 => .{ .int = .{ .i64 = @as(i32, @bitCast(data)) } },
.int_positive, .int_negative => {
const int_info = i.extraData(Tag.Int, data);
const limbs = i.limbs.items[int_info.limbs_index..][0..int_info.limbs_len];
return .{ .int = .{
.big_int = .{
.positive = item.tag == .int_positive,
.limbs = limbs,
},
} };
},
.f16 => .{ .float = .{ .f16 = @bitCast(@as(u16, @intCast(data))) } },
.f32 => .{ .float = .{ .f32 = @bitCast(data) } },
.f64 => {
const float = i.extraData(Tag.F64, data);
return .{ .float = .{ .f64 = float.get() } };
},
.f80 => {
const float = i.extraData(Tag.F80, data);
return .{ .float = .{ .f80 = float.get() } };
},
.f128 => {
const float = i.extraData(Tag.F128, data);
return .{ .float = .{ .f128 = float.get() } };
},
.bytes => {
const bytes = i.extraData(Tag.Bytes, data);
return .{ .bytes = i.strings.items[bytes.strings_index..][0..bytes.len] };
},
.record_ty => {
const extra = i.extraDataTrail(Tag.Record, data);
return .{
.record_ty = @ptrCast(i.extra.items[extra.end..][0..extra.data.elements_len]),
};
},
};
}
fn extraData(i: *const Interner, comptime T: type, index: usize) T {
return i.extraDataTrail(T, index).data;
}
fn extraDataTrail(i: *const Interner, comptime T: type, index: usize) struct { data: T, end: u32 } {
var result: T = undefined;
const fields = @typeInfo(T).Struct.fields;
inline for (fields, 0..) |field, field_i| {
const int32 = i.extra.items[field_i + index];
@field(result, field.name) = switch (field.type) {
Ref => @enumFromInt(int32),
u32 => int32,
else => @compileError("bad field type: " ++ @typeName(field.type)),
};
}
return .{
.data = result,
.end = @intCast(index + fields.len),
};
}
| https://raw.githubusercontent.com/cyberegoorg/cetech1-zig/7438a7b157a4047261d161c06248b54fe9d822eb/lib/compiler/aro/backend/Interner.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Hash = std.hash.Wyhash;
const Limb = std.math.big.Limb;
const Interner = @This();
map: std.AutoArrayHashMapUnmanaged(void, void) = .{},
items: std.MultiArrayList(struct {
tag: Tag,
data: u32,
}) = .{},
extra: std.ArrayListUnmanaged(u32) = .{},
limbs: std.ArrayListUnmanaged(Limb) = .{},
strings: std.ArrayListUnmanaged(u8) = .{},
const KeyAdapter = struct {
interner: *const Interner,
pub fn eql(adapter: KeyAdapter, a: Key, b_void: void, b_map_index: usize) bool {
_ = b_void;
return adapter.interner.get(@as(Ref, @enumFromInt(b_map_index))).eql(a);
}
pub fn hash(adapter: KeyAdapter, a: Key) u32 {
_ = adapter;
return a.hash();
}
};
pub const Key = union(enum) {
int_ty: u16,
float_ty: u16,
ptr_ty,
noreturn_ty,
void_ty,
func_ty,
array_ty: struct {
len: u64,
child: Ref,
},
vector_ty: struct {
len: u32,
child: Ref,
},
record_ty: []const Ref,
/// May not be zero
null,
int: union(enum) {
u64: u64,
i64: i64,
big_int: BigIntConst,
pub fn toBigInt(repr: @This(), space: *Tag.Int.BigIntSpace) BigIntConst {
return switch (repr) {
.big_int => |x| x,
inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(),
};
}
},
float: Float,
bytes: []const u8,
pub const Float = union(enum) {
f16: f16,
f32: f32,
f64: f64,
f80: f80,
f128: f128,
};
pub fn hash(key: Key) u32 {
var hasher = Hash.init(0);
const tag = std.meta.activeTag(key);
std.hash.autoHash(&hasher, tag);
switch (key) {
.bytes => |bytes| {
hasher.update(bytes);
},
.record_ty => |elems| for (elems) |elem| {
std.hash.autoHash(&hasher, elem);
},
.float => |repr| switch (repr) {
inline else => |data| std.hash.autoHash(
&hasher,
@as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(data))), @bitCast(data)),
),
},
.int => |repr| {
var space: Tag.Int.BigIntSpace = undefined;
const big = repr.toBigInt(&space);
std.hash.autoHash(&hasher, big.positive);
for (big.limbs) |limb| std.hash.autoHash(&hasher, limb);
},
inline else => |info| {
std.hash.autoHash(&hasher, info);
},
}
return @truncate(hasher.final());
}
pub fn eql(a: Key, b: Key) bool {
const KeyTag = std.meta.Tag(Key);
const a_tag: KeyTag = a;
const b_tag: KeyTag = b;
if (a_tag != b_tag) return false;
switch (a) {
.record_ty => |a_elems| {
const b_elems = b.record_ty;
if (a_elems.len != b_elems.len) return false;
for (a_elems, b_elems) |a_elem, b_elem| {
if (a_elem != b_elem) return false;
}
return true;
},
.bytes => |a_bytes| {
const b_bytes = b.bytes;
return std.mem.eql(u8, a_bytes, b_bytes);
},
.int => |a_repr| {
var a_space: Tag.Int.BigIntSpace = undefined;
const a_big = a_repr.toBigInt(&a_space);
var b_space: Tag.Int.BigIntSpace = undefined;
const b_big = b.int.toBigInt(&b_space);
return a_big.eql(b_big);
},
inline else => |a_info, tag| {
const b_info = @field(b, @tagName(tag));
return std.meta.eql(a_info, b_info);
},
}
}
fn toRef(key: Key) ?Ref {
switch (key) {
.int_ty => |bits| switch (bits) {
1 => return .i1,
8 => return .i8,
16 => return .i16,
32 => return .i32,
64 => return .i64,
128 => return .i128,
else => {},
},
.float_ty => |bits| switch (bits) {
16 => return .f16,
32 => return .f32,
64 => return .f64,
80 => return .f80,
128 => return .f128,
else => unreachable,
},
.ptr_ty => return .ptr,
.func_ty => return .func,
.noreturn_ty => return .noreturn,
.void_ty => return .void,
.int => |repr| {
var space: Tag.Int.BigIntSpace = undefined;
const big = repr.toBigInt(&space);
if (big.eqlZero()) return .zero;
const big_one = BigIntConst{ .limbs = &.{1}, .positive = true };
if (big.eql(big_one)) return .one;
},
.float => |repr| switch (repr) {
inline else => |data| {
if (std.math.isPositiveZero(data)) return .zero;
if (data == 1) return .one;
},
},
.null => return .null,
else => {},
}
return null;
}
};
pub const Ref = enum(u32) {
const max = std.math.maxInt(u32);
ptr = max - 1,
noreturn = max - 2,
void = max - 3,
i1 = max - 4,
i8 = max - 5,
i16 = max - 6,
i32 = max - 7,
i64 = max - 8,
i128 = max - 9,
f16 = max - 10,
f32 = max - 11,
f64 = max - 12,
f80 = max - 13,
f128 = max - 14,
func = max - 15,
zero = max - 16,
one = max - 17,
null = max - 18,
_,
};
pub const OptRef = enum(u32) {
const max = std.math.maxInt(u32);
none = max - 0,
ptr = max - 1,
noreturn = max - 2,
void = max - 3,
i1 = max - 4,
i8 = max - 5,
i16 = max - 6,
i32 = max - 7,
i64 = max - 8,
i128 = max - 9,
f16 = max - 10,
f32 = max - 11,
f64 = max - 12,
f80 = max - 13,
f128 = max - 14,
func = max - 15,
zero = max - 16,
one = max - 17,
null = max - 18,
_,
};
pub const Tag = enum(u8) {
/// `data` is `u16`
int_ty,
/// `data` is `u16`
float_ty,
/// `data` is index to `Array`
array_ty,
/// `data` is index to `Vector`
vector_ty,
/// `data` is `u32`
u32,
/// `data` is `i32`
i32,
/// `data` is `Int`
int_positive,
/// `data` is `Int`
int_negative,
/// `data` is `f16`
f16,
/// `data` is `f32`
f32,
/// `data` is `F64`
f64,
/// `data` is `F80`
f80,
/// `data` is `F128`
f128,
/// `data` is `Bytes`
bytes,
/// `data` is `Record`
record_ty,
pub const Array = struct {
len0: u32,
len1: u32,
child: Ref,
pub fn getLen(a: Array) u64 {
return (PackedU64{
.a = a.len0,
.b = a.len1,
}).get();
}
};
pub const Vector = struct {
len: u32,
child: Ref,
};
pub const Int = struct {
limbs_index: u32,
limbs_len: u32,
/// Big enough to fit any non-BigInt value
pub const BigIntSpace = struct {
/// The +1 is headroom so that operations such as incrementing once
/// or decrementing once are possible without using an allocator.
limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb,
};
};
pub const F64 = struct {
piece0: u32,
piece1: u32,
pub fn get(self: F64) f64 {
const int_bits = @as(u64, self.piece0) | (@as(u64, self.piece1) << 32);
return @bitCast(int_bits);
}
fn pack(val: f64) F64 {
const bits = @as(u64, @bitCast(val));
return .{
.piece0 = @as(u32, @truncate(bits)),
.piece1 = @as(u32, @truncate(bits >> 32)),
};
}
};
pub const F80 = struct {
piece0: u32,
piece1: u32,
piece2: u32, // u16 part, top bits
pub fn get(self: F80) f80 {
const int_bits = @as(u80, self.piece0) |
(@as(u80, self.piece1) << 32) |
(@as(u80, self.piece2) << 64);
return @bitCast(int_bits);
}
fn pack(val: f80) F80 {
const bits = @as(u80, @bitCast(val));
return .{
.piece0 = @as(u32, @truncate(bits)),
.piece1 = @as(u32, @truncate(bits >> 32)),
.piece2 = @as(u16, @truncate(bits >> 64)),
};
}
};
pub const F128 = struct {
piece0: u32,
piece1: u32,
piece2: u32,
piece3: u32,
pub fn get(self: F128) f128 {
const int_bits = @as(u128, self.piece0) |
(@as(u128, self.piece1) << 32) |
(@as(u128, self.piece2) << 64) |
(@as(u128, self.piece3) << 96);
return @bitCast(int_bits);
}
fn pack(val: f128) F128 {
const bits = @as(u128, @bitCast(val));
return .{
.piece0 = @as(u32, @truncate(bits)),
.piece1 = @as(u32, @truncate(bits >> 32)),
.piece2 = @as(u32, @truncate(bits >> 64)),
.piece3 = @as(u32, @truncate(bits >> 96)),
};
}
};
pub const Bytes = struct {
strings_index: u32,
len: u32,
};
pub const Record = struct {
elements_len: u32,
// trailing
// [elements_len]Ref
};
};
pub const PackedU64 = packed struct(u64) {
a: u32,
b: u32,
pub fn get(x: PackedU64) u64 {
return @bitCast(x);
}
pub fn init(x: u64) PackedU64 {
return @bitCast(x);
}
};
pub fn deinit(i: *Interner, gpa: Allocator) void {
i.map.deinit(gpa);
i.items.deinit(gpa);
i.extra.deinit(gpa);
i.limbs.deinit(gpa);
i.strings.deinit(gpa);
}
pub fn put(i: *Interner, gpa: Allocator, key: Key) !Ref {
if (key.toRef()) |some| return some;
const adapter: KeyAdapter = .{ .interner = i };
const gop = try i.map.getOrPutAdapted(gpa, key, adapter);
if (gop.found_existing) return @enumFromInt(gop.index);
try i.items.ensureUnusedCapacity(gpa, 1);
switch (key) {
.int_ty => |bits| {
i.items.appendAssumeCapacity(.{
.tag = .int_ty,
.data = bits,
});
},
.float_ty => |bits| {
i.items.appendAssumeCapacity(.{
.tag = .float_ty,
.data = bits,
});
},
.array_ty => |info| {
const split_len = PackedU64.init(info.len);
i.items.appendAssumeCapacity(.{
.tag = .array_ty,
.data = try i.addExtra(gpa, Tag.Array{
.len0 = split_len.a,
.len1 = split_len.b,
.child = info.child,
}),
});
},
.vector_ty => |info| {
i.items.appendAssumeCapacity(.{
.tag = .vector_ty,
.data = try i.addExtra(gpa, Tag.Vector{
.len = info.len,
.child = info.child,
}),
});
},
.int => |repr| int: {
var space: Tag.Int.BigIntSpace = undefined;
const big = repr.toBigInt(&space);
switch (repr) {
.u64 => |data| if (std.math.cast(u32, data)) |small| {
i.items.appendAssumeCapacity(.{
.tag = .u32,
.data = small,
});
break :int;
},
.i64 => |data| if (std.math.cast(i32, data)) |small| {
i.items.appendAssumeCapacity(.{
.tag = .i32,
.data = @bitCast(small),
});
break :int;
},
.big_int => |data| {
if (data.fitsInTwosComp(.unsigned, 32)) {
i.items.appendAssumeCapacity(.{
.tag = .u32,
.data = data.to(u32) catch unreachable,
});
break :int;
} else if (data.fitsInTwosComp(.signed, 32)) {
i.items.appendAssumeCapacity(.{
.tag = .i32,
.data = @bitCast(data.to(i32) catch unreachable),
});
break :int;
}
},
}
const limbs_index: u32 = @intCast(i.limbs.items.len);
try i.limbs.appendSlice(gpa, big.limbs);
i.items.appendAssumeCapacity(.{
.tag = if (big.positive) .int_positive else .int_negative,
.data = try i.addExtra(gpa, Tag.Int{
.limbs_index = limbs_index,
.limbs_len = @intCast(big.limbs.len),
}),
});
},
.float => |repr| switch (repr) {
.f16 => |data| i.items.appendAssumeCapacity(.{
.tag = .f16,
.data = @as(u16, @bitCast(data)),
}),
.f32 => |data| i.items.appendAssumeCapacity(.{
.tag = .f32,
.data = @as(u32, @bitCast(data)),
}),
.f64 => |data| i.items.appendAssumeCapacity(.{
.tag = .f64,
.data = try i.addExtra(gpa, Tag.F64.pack(data)),
}),
.f80 => |data| i.items.appendAssumeCapacity(.{
.tag = .f80,
.data = try i.addExtra(gpa, Tag.F80.pack(data)),
}),
.f128 => |data| i.items.appendAssumeCapacity(.{
.tag = .f128,
.data = try i.addExtra(gpa, Tag.F128.pack(data)),
}),
},
.bytes => |bytes| {
const strings_index: u32 = @intCast(i.strings.items.len);
try i.strings.appendSlice(gpa, bytes);
i.items.appendAssumeCapacity(.{
.tag = .bytes,
.data = try i.addExtra(gpa, Tag.Bytes{
.strings_index = strings_index,
.len = @intCast(bytes.len),
}),
});
},
.record_ty => |elems| {
try i.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.Record).Struct.fields.len +
elems.len);
i.items.appendAssumeCapacity(.{
.tag = .record_ty,
.data = i.addExtraAssumeCapacity(Tag.Record{
.elements_len = @intCast(elems.len),
}),
});
i.extra.appendSliceAssumeCapacity(@ptrCast(elems));
},
.ptr_ty,
.noreturn_ty,
.void_ty,
.func_ty,
.null,
=> unreachable,
}
return @enumFromInt(gop.index);
}
fn addExtra(i: *Interner, gpa: Allocator, extra: anytype) Allocator.Error!u32 {
const fields = @typeInfo(@TypeOf(extra)).Struct.fields;
try i.extra.ensureUnusedCapacity(gpa, fields.len);
return i.addExtraAssumeCapacity(extra);
}
fn addExtraAssumeCapacity(i: *Interner, extra: anytype) u32 {
const result = @as(u32, @intCast(i.extra.items.len));
inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
i.extra.appendAssumeCapacity(switch (field.type) {
Ref => @intFromEnum(@field(extra, field.name)),
u32 => @field(extra, field.name),
else => @compileError("bad field type: " ++ @typeName(field.type)),
});
}
return result;
}
pub fn get(i: *const Interner, ref: Ref) Key {
switch (ref) {
.ptr => return .ptr_ty,
.func => return .func_ty,
.noreturn => return .noreturn_ty,
.void => return .void_ty,
.i1 => return .{ .int_ty = 1 },
.i8 => return .{ .int_ty = 8 },
.i16 => return .{ .int_ty = 16 },
.i32 => return .{ .int_ty = 32 },
.i64 => return .{ .int_ty = 64 },
.i128 => return .{ .int_ty = 128 },
.f16 => return .{ .float_ty = 16 },
.f32 => return .{ .float_ty = 32 },
.f64 => return .{ .float_ty = 64 },
.f80 => return .{ .float_ty = 80 },
.f128 => return .{ .float_ty = 128 },
.zero => return .{ .int = .{ .u64 = 0 } },
.one => return .{ .int = .{ .u64 = 1 } },
.null => return .null,
else => {},
}
const item = i.items.get(@intFromEnum(ref));
const data = item.data;
return switch (item.tag) {
.int_ty => .{ .int_ty = @intCast(data) },
.float_ty => .{ .float_ty = @intCast(data) },
.array_ty => {
const array_ty = i.extraData(Tag.Array, data);
return .{ .array_ty = .{
.len = array_ty.getLen(),
.child = array_ty.child,
} };
},
.vector_ty => {
const vector_ty = i.extraData(Tag.Vector, data);
return .{ .vector_ty = .{
.len = vector_ty.len,
.child = vector_ty.child,
} };
},
.u32 => .{ .int = .{ .u64 = data } },
.i32 => .{ .int = .{ .i64 = @as(i32, @bitCast(data)) } },
.int_positive, .int_negative => {
const int_info = i.extraData(Tag.Int, data);
const limbs = i.limbs.items[int_info.limbs_index..][0..int_info.limbs_len];
return .{ .int = .{
.big_int = .{
.positive = item.tag == .int_positive,
.limbs = limbs,
},
} };
},
.f16 => .{ .float = .{ .f16 = @bitCast(@as(u16, @intCast(data))) } },
.f32 => .{ .float = .{ .f32 = @bitCast(data) } },
.f64 => {
const float = i.extraData(Tag.F64, data);
return .{ .float = .{ .f64 = float.get() } };
},
.f80 => {
const float = i.extraData(Tag.F80, data);
return .{ .float = .{ .f80 = float.get() } };
},
.f128 => {
const float = i.extraData(Tag.F128, data);
return .{ .float = .{ .f128 = float.get() } };
},
.bytes => {
const bytes = i.extraData(Tag.Bytes, data);
return .{ .bytes = i.strings.items[bytes.strings_index..][0..bytes.len] };
},
.record_ty => {
const extra = i.extraDataTrail(Tag.Record, data);
return .{
.record_ty = @ptrCast(i.extra.items[extra.end..][0..extra.data.elements_len]),
};
},
};
}
fn extraData(i: *const Interner, comptime T: type, index: usize) T {
return i.extraDataTrail(T, index).data;
}
fn extraDataTrail(i: *const Interner, comptime T: type, index: usize) struct { data: T, end: u32 } {
var result: T = undefined;
const fields = @typeInfo(T).Struct.fields;
inline for (fields, 0..) |field, field_i| {
const int32 = i.extra.items[field_i + index];
@field(result, field.name) = switch (field.type) {
Ref => @enumFromInt(int32),
u32 => int32,
else => @compileError("bad field type: " ++ @typeName(field.type)),
};
}
return .{
.data = result,
.end = @intCast(index + fields.len),
};
}
| https://raw.githubusercontent.com/kassane/zig-mos-bootstrap/19aac4779b9e93b0e833402c26c93cfc13bb94e2/zig/lib/compiler/aro/backend/Interner.zig |
//! NOTE: this file is autogenerated, DO NOT MODIFY
//--------------------------------------------------------------------------------
// Section: Constants (92)
//--------------------------------------------------------------------------------
pub const ASN_UNIVERSAL = @as(u32, 0);
pub const ASN_APPLICATION = @as(u32, 64);
pub const ASN_CONTEXT = @as(u32, 128);
pub const ASN_PRIVATE = @as(u32, 192);
pub const ASN_PRIMITIVE = @as(u32, 0);
pub const ASN_CONSTRUCTOR = @as(u32, 32);
pub const SNMP_ACCESS_NONE = @as(u32, 0);
pub const SNMP_ACCESS_NOTIFY = @as(u32, 1);
pub const SNMP_ACCESS_READ_ONLY = @as(u32, 2);
pub const SNMP_ACCESS_READ_WRITE = @as(u32, 3);
pub const SNMP_ACCESS_READ_CREATE = @as(u32, 4);
pub const SNMPAPI_NOERROR = @as(u32, 1);
pub const SNMPAPI_ERROR = @as(u32, 0);
pub const SNMP_OUTPUT_TO_EVENTLOG = @as(u32, 4);
pub const DEFAULT_SNMP_PORT_UDP = @as(u32, 161);
pub const DEFAULT_SNMP_PORT_IPX = @as(u32, 36879);
pub const DEFAULT_SNMPTRAP_PORT_UDP = @as(u32, 162);
pub const DEFAULT_SNMPTRAP_PORT_IPX = @as(u32, 36880);
pub const SNMP_MAX_OID_LEN = @as(u32, 128);
pub const SNMP_MEM_ALLOC_ERROR = @as(u32, 1);
pub const SNMP_BERAPI_INVALID_LENGTH = @as(u32, 10);
pub const SNMP_BERAPI_INVALID_TAG = @as(u32, 11);
pub const SNMP_BERAPI_OVERFLOW = @as(u32, 12);
pub const SNMP_BERAPI_SHORT_BUFFER = @as(u32, 13);
pub const SNMP_BERAPI_INVALID_OBJELEM = @as(u32, 14);
pub const SNMP_PDUAPI_UNRECOGNIZED_PDU = @as(u32, 20);
pub const SNMP_PDUAPI_INVALID_ES = @as(u32, 21);
pub const SNMP_PDUAPI_INVALID_GT = @as(u32, 22);
pub const SNMP_AUTHAPI_INVALID_VERSION = @as(u32, 30);
pub const SNMP_AUTHAPI_INVALID_MSG_TYPE = @as(u32, 31);
pub const SNMP_AUTHAPI_TRIV_AUTH_FAILED = @as(u32, 32);
pub const ASN_CONTEXTSPECIFIC = @as(u32, 128);
pub const ASN_PRIMATIVE = @as(u32, 0);
pub const SNMP_MGMTAPI_TIMEOUT = @as(u32, 40);
pub const SNMP_MGMTAPI_SELECT_FDERRORS = @as(u32, 41);
pub const SNMP_MGMTAPI_TRAP_ERRORS = @as(u32, 42);
pub const SNMP_MGMTAPI_TRAP_DUPINIT = @as(u32, 43);
pub const SNMP_MGMTAPI_NOTRAPS = @as(u32, 44);
pub const SNMP_MGMTAPI_AGAIN = @as(u32, 45);
pub const SNMP_MGMTAPI_INVALID_CTL = @as(u32, 46);
pub const SNMP_MGMTAPI_INVALID_SESSION = @as(u32, 47);
pub const SNMP_MGMTAPI_INVALID_BUFFER = @as(u32, 48);
pub const MGMCTL_SETAGENTPORT = @as(u32, 1);
pub const MAXOBJIDSIZE = @as(u32, 128);
pub const MAXOBJIDSTRSIZE = @as(u32, 1408);
pub const SNMPLISTEN_USEENTITY_ADDR = @as(u32, 0);
pub const SNMPLISTEN_ALL_ADDR = @as(u32, 1);
pub const SNMP_TRAP_COLDSTART = @as(u32, 0);
pub const SNMP_TRAP_WARMSTART = @as(u32, 1);
pub const SNMP_TRAP_LINKDOWN = @as(u32, 2);
pub const SNMP_TRAP_LINKUP = @as(u32, 3);
pub const SNMP_TRAP_AUTHFAIL = @as(u32, 4);
pub const SNMP_TRAP_EGPNEIGHBORLOSS = @as(u32, 5);
pub const SNMP_TRAP_ENTERPRISESPECIFIC = @as(u32, 6);
pub const SNMPAPI_NO_SUPPORT = @as(u32, 0);
pub const SNMPAPI_V1_SUPPORT = @as(u32, 1);
pub const SNMPAPI_V2_SUPPORT = @as(u32, 2);
pub const SNMPAPI_M2M_SUPPORT = @as(u32, 3);
pub const SNMPAPI_FAILURE = @as(u32, 0);
pub const SNMPAPI_SUCCESS = @as(u32, 1);
pub const SNMPAPI_ALLOC_ERROR = @as(u32, 2);
pub const SNMPAPI_CONTEXT_INVALID = @as(u32, 3);
pub const SNMPAPI_CONTEXT_UNKNOWN = @as(u32, 4);
pub const SNMPAPI_ENTITY_INVALID = @as(u32, 5);
pub const SNMPAPI_ENTITY_UNKNOWN = @as(u32, 6);
pub const SNMPAPI_INDEX_INVALID = @as(u32, 7);
pub const SNMPAPI_NOOP = @as(u32, 8);
pub const SNMPAPI_OID_INVALID = @as(u32, 9);
pub const SNMPAPI_OPERATION_INVALID = @as(u32, 10);
pub const SNMPAPI_OUTPUT_TRUNCATED = @as(u32, 11);
pub const SNMPAPI_PDU_INVALID = @as(u32, 12);
pub const SNMPAPI_SESSION_INVALID = @as(u32, 13);
pub const SNMPAPI_SYNTAX_INVALID = @as(u32, 14);
pub const SNMPAPI_VBL_INVALID = @as(u32, 15);
pub const SNMPAPI_MODE_INVALID = @as(u32, 16);
pub const SNMPAPI_SIZE_INVALID = @as(u32, 17);
pub const SNMPAPI_NOT_INITIALIZED = @as(u32, 18);
pub const SNMPAPI_MESSAGE_INVALID = @as(u32, 19);
pub const SNMPAPI_HWND_INVALID = @as(u32, 20);
pub const SNMPAPI_OTHER_ERROR = @as(u32, 99);
pub const SNMPAPI_TL_NOT_INITIALIZED = @as(u32, 100);
pub const SNMPAPI_TL_NOT_SUPPORTED = @as(u32, 101);
pub const SNMPAPI_TL_NOT_AVAILABLE = @as(u32, 102);
pub const SNMPAPI_TL_RESOURCE_ERROR = @as(u32, 103);
pub const SNMPAPI_TL_UNDELIVERABLE = @as(u32, 104);
pub const SNMPAPI_TL_SRC_INVALID = @as(u32, 105);
pub const SNMPAPI_TL_INVALID_PARAM = @as(u32, 106);
pub const SNMPAPI_TL_IN_USE = @as(u32, 107);
pub const SNMPAPI_TL_TIMEOUT = @as(u32, 108);
pub const SNMPAPI_TL_PDU_TOO_BIG = @as(u32, 109);
pub const SNMPAPI_TL_OTHER = @as(u32, 199);
pub const MAXVENDORINFO = @as(u32, 32);
//--------------------------------------------------------------------------------
// Section: Types (29)
//--------------------------------------------------------------------------------
pub const SNMP_PDU_TYPE = enum(u32) {
GET = 160,
GETNEXT = 161,
RESPONSE = 162,
SET = 163,
GETBULK = 165,
TRAP = 167,
};
pub const SNMP_PDU_GET = SNMP_PDU_TYPE.GET;
pub const SNMP_PDU_GETNEXT = SNMP_PDU_TYPE.GETNEXT;
pub const SNMP_PDU_RESPONSE = SNMP_PDU_TYPE.RESPONSE;
pub const SNMP_PDU_SET = SNMP_PDU_TYPE.SET;
pub const SNMP_PDU_GETBULK = SNMP_PDU_TYPE.GETBULK;
pub const SNMP_PDU_TRAP = SNMP_PDU_TYPE.TRAP;
pub const SNMP_EXTENSION_REQUEST_TYPE = enum(u32) {
GET = 160,
GET_NEXT = 161,
SET_TEST = 224,
SET_COMMIT = 163,
SET_UNDO = 225,
SET_CLEANUP = 226,
};
pub const SNMP_EXTENSION_GET = SNMP_EXTENSION_REQUEST_TYPE.GET;
pub const SNMP_EXTENSION_GET_NEXT = SNMP_EXTENSION_REQUEST_TYPE.GET_NEXT;
pub const SNMP_EXTENSION_SET_TEST = SNMP_EXTENSION_REQUEST_TYPE.SET_TEST;
pub const SNMP_EXTENSION_SET_COMMIT = SNMP_EXTENSION_REQUEST_TYPE.SET_COMMIT;
pub const SNMP_EXTENSION_SET_UNDO = SNMP_EXTENSION_REQUEST_TYPE.SET_UNDO;
pub const SNMP_EXTENSION_SET_CLEANUP = SNMP_EXTENSION_REQUEST_TYPE.SET_CLEANUP;
pub const SNMP_API_TRANSLATE_MODE = enum(u32) {
TRANSLATED = 0,
UNTRANSLATED_V1 = 1,
UNTRANSLATED_V2 = 2,
};
pub const SNMPAPI_TRANSLATED = SNMP_API_TRANSLATE_MODE.TRANSLATED;
pub const SNMPAPI_UNTRANSLATED_V1 = SNMP_API_TRANSLATE_MODE.UNTRANSLATED_V1;
pub const SNMPAPI_UNTRANSLATED_V2 = SNMP_API_TRANSLATE_MODE.UNTRANSLATED_V2;
pub const SNMP_GENERICTRAP = enum(u32) {
COLDSTART = 0,
WARMSTART = 1,
LINKDOWN = 2,
LINKUP = 3,
AUTHFAILURE = 4,
EGPNEIGHLOSS = 5,
ENTERSPECIFIC = 6,
};
pub const SNMP_GENERICTRAP_COLDSTART = SNMP_GENERICTRAP.COLDSTART;
pub const SNMP_GENERICTRAP_WARMSTART = SNMP_GENERICTRAP.WARMSTART;
pub const SNMP_GENERICTRAP_LINKDOWN = SNMP_GENERICTRAP.LINKDOWN;
pub const SNMP_GENERICTRAP_LINKUP = SNMP_GENERICTRAP.LINKUP;
pub const SNMP_GENERICTRAP_AUTHFAILURE = SNMP_GENERICTRAP.AUTHFAILURE;
pub const SNMP_GENERICTRAP_EGPNEIGHLOSS = SNMP_GENERICTRAP.EGPNEIGHLOSS;
pub const SNMP_GENERICTRAP_ENTERSPECIFIC = SNMP_GENERICTRAP.ENTERSPECIFIC;
pub const SNMP_ERROR_STATUS = enum(u32) {
NOERROR = 0,
TOOBIG = 1,
NOSUCHNAME = 2,
BADVALUE = 3,
READONLY = 4,
GENERR = 5,
NOACCESS = 6,
WRONGTYPE = 7,
WRONGLENGTH = 8,
WRONGENCODING = 9,
WRONGVALUE = 10,
NOCREATION = 11,
INCONSISTENTVALUE = 12,
RESOURCEUNAVAILABLE = 13,
COMMITFAILED = 14,
UNDOFAILED = 15,
AUTHORIZATIONERROR = 16,
NOTWRITABLE = 17,
INCONSISTENTNAME = 18,
};
pub const SNMP_ERRORSTATUS_NOERROR = SNMP_ERROR_STATUS.NOERROR;
pub const SNMP_ERRORSTATUS_TOOBIG = SNMP_ERROR_STATUS.TOOBIG;
pub const SNMP_ERRORSTATUS_NOSUCHNAME = SNMP_ERROR_STATUS.NOSUCHNAME;
pub const SNMP_ERRORSTATUS_BADVALUE = SNMP_ERROR_STATUS.BADVALUE;
pub const SNMP_ERRORSTATUS_READONLY = SNMP_ERROR_STATUS.READONLY;
pub const SNMP_ERRORSTATUS_GENERR = SNMP_ERROR_STATUS.GENERR;
pub const SNMP_ERRORSTATUS_NOACCESS = SNMP_ERROR_STATUS.NOACCESS;
pub const SNMP_ERRORSTATUS_WRONGTYPE = SNMP_ERROR_STATUS.WRONGTYPE;
pub const SNMP_ERRORSTATUS_WRONGLENGTH = SNMP_ERROR_STATUS.WRONGLENGTH;
pub const SNMP_ERRORSTATUS_WRONGENCODING = SNMP_ERROR_STATUS.WRONGENCODING;
pub const SNMP_ERRORSTATUS_WRONGVALUE = SNMP_ERROR_STATUS.WRONGVALUE;
pub const SNMP_ERRORSTATUS_NOCREATION = SNMP_ERROR_STATUS.NOCREATION;
pub const SNMP_ERRORSTATUS_INCONSISTENTVALUE = SNMP_ERROR_STATUS.INCONSISTENTVALUE;
pub const SNMP_ERRORSTATUS_RESOURCEUNAVAILABLE = SNMP_ERROR_STATUS.RESOURCEUNAVAILABLE;
pub const SNMP_ERRORSTATUS_COMMITFAILED = SNMP_ERROR_STATUS.COMMITFAILED;
pub const SNMP_ERRORSTATUS_UNDOFAILED = SNMP_ERROR_STATUS.UNDOFAILED;
pub const SNMP_ERRORSTATUS_AUTHORIZATIONERROR = SNMP_ERROR_STATUS.AUTHORIZATIONERROR;
pub const SNMP_ERRORSTATUS_NOTWRITABLE = SNMP_ERROR_STATUS.NOTWRITABLE;
pub const SNMP_ERRORSTATUS_INCONSISTENTNAME = SNMP_ERROR_STATUS.INCONSISTENTNAME;
pub const SNMP_STATUS = enum(u32) {
N = 1,
FF = 0,
};
pub const SNMPAPI_ON = SNMP_STATUS.N;
pub const SNMPAPI_OFF = SNMP_STATUS.FF;
pub const SNMP_OUTPUT_LOG_TYPE = enum(u32) {
CONSOLE = 1,
LOGFILE = 2,
DEBUGGER = 8,
};
pub const SNMP_OUTPUT_TO_CONSOLE = SNMP_OUTPUT_LOG_TYPE.CONSOLE;
pub const SNMP_OUTPUT_TO_LOGFILE = SNMP_OUTPUT_LOG_TYPE.LOGFILE;
pub const SNMP_OUTPUT_TO_DEBUGGER = SNMP_OUTPUT_LOG_TYPE.DEBUGGER;
pub const SNMP_LOG = enum(u32) {
SILENT = 0,
FATAL = 1,
ERROR = 2,
WARNING = 3,
TRACE = 4,
VERBOSE = 5,
};
pub const SNMP_LOG_SILENT = SNMP_LOG.SILENT;
pub const SNMP_LOG_FATAL = SNMP_LOG.FATAL;
pub const SNMP_LOG_ERROR = SNMP_LOG.ERROR;
pub const SNMP_LOG_WARNING = SNMP_LOG.WARNING;
pub const SNMP_LOG_TRACE = SNMP_LOG.TRACE;
pub const SNMP_LOG_VERBOSE = SNMP_LOG.VERBOSE;
pub const SNMP_ERROR = enum(u32) {
NOERROR = 0,
TOOBIG = 1,
NOSUCHNAME = 2,
BADVALUE = 3,
READONLY = 4,
GENERR = 5,
NOACCESS = 6,
WRONGTYPE = 7,
WRONGLENGTH = 8,
WRONGENCODING = 9,
WRONGVALUE = 10,
NOCREATION = 11,
INCONSISTENTVALUE = 12,
RESOURCEUNAVAILABLE = 13,
COMMITFAILED = 14,
UNDOFAILED = 15,
AUTHORIZATIONERROR = 16,
NOTWRITABLE = 17,
INCONSISTENTNAME = 18,
};
pub const SNMP_ERROR_NOERROR = SNMP_ERROR.NOERROR;
pub const SNMP_ERROR_TOOBIG = SNMP_ERROR.TOOBIG;
pub const SNMP_ERROR_NOSUCHNAME = SNMP_ERROR.NOSUCHNAME;
pub const SNMP_ERROR_BADVALUE = SNMP_ERROR.BADVALUE;
pub const SNMP_ERROR_READONLY = SNMP_ERROR.READONLY;
pub const SNMP_ERROR_GENERR = SNMP_ERROR.GENERR;
pub const SNMP_ERROR_NOACCESS = SNMP_ERROR.NOACCESS;
pub const SNMP_ERROR_WRONGTYPE = SNMP_ERROR.WRONGTYPE;
pub const SNMP_ERROR_WRONGLENGTH = SNMP_ERROR.WRONGLENGTH;
pub const SNMP_ERROR_WRONGENCODING = SNMP_ERROR.WRONGENCODING;
pub const SNMP_ERROR_WRONGVALUE = SNMP_ERROR.WRONGVALUE;
pub const SNMP_ERROR_NOCREATION = SNMP_ERROR.NOCREATION;
pub const SNMP_ERROR_INCONSISTENTVALUE = SNMP_ERROR.INCONSISTENTVALUE;
pub const SNMP_ERROR_RESOURCEUNAVAILABLE = SNMP_ERROR.RESOURCEUNAVAILABLE;
pub const SNMP_ERROR_COMMITFAILED = SNMP_ERROR.COMMITFAILED;
pub const SNMP_ERROR_UNDOFAILED = SNMP_ERROR.UNDOFAILED;
pub const SNMP_ERROR_AUTHORIZATIONERROR = SNMP_ERROR.AUTHORIZATIONERROR;
pub const SNMP_ERROR_NOTWRITABLE = SNMP_ERROR.NOTWRITABLE;
pub const SNMP_ERROR_INCONSISTENTNAME = SNMP_ERROR.INCONSISTENTNAME;
pub const AsnOctetString = extern struct {
stream: ?*u8 align(4),
length: u32 align(4),
dynamic: BOOL align(4),
};
pub const AsnObjectIdentifier = extern struct {
idLength: u32 align(4),
ids: ?*u32 align(4),
};
pub const AsnAny = extern struct {
asnType: u8,
asnValue: extern union {
number: i32 align(4),
unsigned32: u32 align(4),
counter64: ULARGE_INTEGER align(4),
string: AsnOctetString align(4),
bits: AsnOctetString align(4),
object: AsnObjectIdentifier align(4),
sequence: AsnOctetString align(4),
address: AsnOctetString align(4),
counter: u32 align(4),
gauge: u32 align(4),
ticks: u32 align(4),
arbitrary: AsnOctetString align(4),
},
};
pub const SnmpVarBind = extern struct {
name: AsnObjectIdentifier,
value: AsnAny,
};
pub const SnmpVarBindList = extern struct {
list: ?*SnmpVarBind align(4),
len: u32 align(4),
};
pub const PFNSNMPEXTENSIONINIT = switch (@import("builtin").zig_backend) {
.stage1 => fn(
dwUpTimeReference: u32,
phSubagentTrapEvent: ?*?HANDLE,
pFirstSupportedRegion: ?*AsnObjectIdentifier,
) callconv(@import("std").os.windows.WINAPI) BOOL,
else => *const fn(
dwUpTimeReference: u32,
phSubagentTrapEvent: ?*?HANDLE,
pFirstSupportedRegion: ?*AsnObjectIdentifier,
) callconv(@import("std").os.windows.WINAPI) BOOL,
} ;
pub const PFNSNMPEXTENSIONINITEX = switch (@import("builtin").zig_backend) {
.stage1 => fn(
pNextSupportedRegion: ?*AsnObjectIdentifier,
) callconv(@import("std").os.windows.WINAPI) BOOL,
else => *const fn(
pNextSupportedRegion: ?*AsnObjectIdentifier,
) callconv(@import("std").os.windows.WINAPI) BOOL,
} ;
pub const PFNSNMPEXTENSIONMONITOR = switch (@import("builtin").zig_backend) {
.stage1 => fn(
pAgentMgmtData: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) BOOL,
else => *const fn(
pAgentMgmtData: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) BOOL,
} ;
pub const PFNSNMPEXTENSIONQUERY = switch (@import("builtin").zig_backend) {
.stage1 => fn(
bPduType: u8,
pVarBindList: ?*SnmpVarBindList,
pErrorStatus: ?*i32,
pErrorIndex: ?*i32,
) callconv(@import("std").os.windows.WINAPI) BOOL,
else => *const fn(
bPduType: u8,
pVarBindList: ?*SnmpVarBindList,
pErrorStatus: ?*i32,
pErrorIndex: ?*i32,
) callconv(@import("std").os.windows.WINAPI) BOOL,
} ;
pub const PFNSNMPEXTENSIONQUERYEX = switch (@import("builtin").zig_backend) {
.stage1 => fn(
nRequestType: u32,
nTransactionId: u32,
pVarBindList: ?*SnmpVarBindList,
pContextInfo: ?*AsnOctetString,
pErrorStatus: ?*i32,
pErrorIndex: ?*i32,
) callconv(@import("std").os.windows.WINAPI) BOOL,
else => *const fn(
nRequestType: u32,
nTransactionId: u32,
pVarBindList: ?*SnmpVarBindList,
pContextInfo: ?*AsnOctetString,
pErrorStatus: ?*i32,
pErrorIndex: ?*i32,
) callconv(@import("std").os.windows.WINAPI) BOOL,
} ;
pub const PFNSNMPEXTENSIONTRAP = switch (@import("builtin").zig_backend) {
.stage1 => fn(
pEnterpriseOid: ?*AsnObjectIdentifier,
pGenericTrapId: ?*i32,
pSpecificTrapId: ?*i32,
pTimeStamp: ?*u32,
pVarBindList: ?*SnmpVarBindList,
) callconv(@import("std").os.windows.WINAPI) BOOL,
else => *const fn(
pEnterpriseOid: ?*AsnObjectIdentifier,
pGenericTrapId: ?*i32,
pSpecificTrapId: ?*i32,
pTimeStamp: ?*u32,
pVarBindList: ?*SnmpVarBindList,
) callconv(@import("std").os.windows.WINAPI) BOOL,
} ;
pub const PFNSNMPEXTENSIONCLOSE = switch (@import("builtin").zig_backend) {
.stage1 => fn(
) callconv(@import("std").os.windows.WINAPI) void,
else => *const fn(
) callconv(@import("std").os.windows.WINAPI) void,
} ;
pub const smiOCTETS = extern struct {
len: u32,
ptr: ?*u8,
};
pub const smiOID = extern struct {
len: u32,
ptr: ?*u32,
};
pub const smiCNTR64 = extern struct {
hipart: u32,
lopart: u32,
};
pub const smiVALUE = extern struct {
syntax: u32,
value: extern union {
sNumber: i32,
uNumber: u32,
hNumber: smiCNTR64,
string: smiOCTETS,
oid: smiOID,
empty: u8,
},
};
pub const smiVENDORINFO = extern struct {
vendorName: [64]CHAR,
vendorContact: [64]CHAR,
vendorVersionId: [32]CHAR,
vendorVersionDate: [32]CHAR,
vendorEnterprise: u32,
};
pub const SNMPAPI_CALLBACK = switch (@import("builtin").zig_backend) {
.stage1 => fn(
hSession: isize,
hWnd: ?HWND,
wMsg: u32,
wParam: WPARAM,
lParam: LPARAM,
lpClientData: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) u32,
else => *const fn(
hSession: isize,
hWnd: ?HWND,
wMsg: u32,
wParam: WPARAM,
lParam: LPARAM,
lpClientData: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) u32,
} ;
pub const PFNSNMPSTARTUPEX = switch (@import("builtin").zig_backend) {
.stage1 => fn(
param0: ?*u32,
param1: ?*u32,
param2: ?*u32,
param3: ?*u32,
param4: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32,
else => *const fn(
param0: ?*u32,
param1: ?*u32,
param2: ?*u32,
param3: ?*u32,
param4: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32,
} ;
pub const PFNSNMPCLEANUPEX = switch (@import("builtin").zig_backend) {
.stage1 => fn(
) callconv(@import("std").os.windows.WINAPI) u32,
else => *const fn(
) callconv(@import("std").os.windows.WINAPI) u32,
} ;
//--------------------------------------------------------------------------------
// Section: Functions (84)
//--------------------------------------------------------------------------------
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilOidCpy(
pOidDst: ?*AsnObjectIdentifier,
pOidSrc: ?*AsnObjectIdentifier,
) callconv(@import("std").os.windows.WINAPI) i32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilOidAppend(
pOidDst: ?*AsnObjectIdentifier,
pOidSrc: ?*AsnObjectIdentifier,
) callconv(@import("std").os.windows.WINAPI) i32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilOidNCmp(
pOid1: ?*AsnObjectIdentifier,
pOid2: ?*AsnObjectIdentifier,
nSubIds: u32,
) callconv(@import("std").os.windows.WINAPI) i32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilOidCmp(
pOid1: ?*AsnObjectIdentifier,
pOid2: ?*AsnObjectIdentifier,
) callconv(@import("std").os.windows.WINAPI) i32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilOidFree(
pOid: ?*AsnObjectIdentifier,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilOctetsCmp(
pOctets1: ?*AsnOctetString,
pOctets2: ?*AsnOctetString,
) callconv(@import("std").os.windows.WINAPI) i32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilOctetsNCmp(
pOctets1: ?*AsnOctetString,
pOctets2: ?*AsnOctetString,
nChars: u32,
) callconv(@import("std").os.windows.WINAPI) i32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilOctetsCpy(
pOctetsDst: ?*AsnOctetString,
pOctetsSrc: ?*AsnOctetString,
) callconv(@import("std").os.windows.WINAPI) i32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilOctetsFree(
pOctets: ?*AsnOctetString,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilAsnAnyCpy(
pAnyDst: ?*AsnAny,
pAnySrc: ?*AsnAny,
) callconv(@import("std").os.windows.WINAPI) i32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilAsnAnyFree(
pAny: ?*AsnAny,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilVarBindCpy(
pVbDst: ?*SnmpVarBind,
pVbSrc: ?*SnmpVarBind,
) callconv(@import("std").os.windows.WINAPI) i32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilVarBindFree(
pVb: ?*SnmpVarBind,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilVarBindListCpy(
pVblDst: ?*SnmpVarBindList,
pVblSrc: ?*SnmpVarBindList,
) callconv(@import("std").os.windows.WINAPI) i32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilVarBindListFree(
pVbl: ?*SnmpVarBindList,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilMemFree(
pMem: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilMemAlloc(
nBytes: u32,
) callconv(@import("std").os.windows.WINAPI) ?*anyopaque;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilMemReAlloc(
pMem: ?*anyopaque,
nBytes: u32,
) callconv(@import("std").os.windows.WINAPI) ?*anyopaque;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilOidToA(
Oid: ?*AsnObjectIdentifier,
) callconv(@import("std").os.windows.WINAPI) ?PSTR;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilIdsToA(
Ids: ?*u32,
IdLength: u32,
) callconv(@import("std").os.windows.WINAPI) ?PSTR;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilPrintOid(
Oid: ?*AsnObjectIdentifier,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilPrintAsnAny(
pAny: ?*AsnAny,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpSvcGetUptime(
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpSvcSetLogLevel(
nLogLevel: SNMP_LOG,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpSvcSetLogType(
nLogType: SNMP_OUTPUT_LOG_TYPE,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows5.0'
pub extern "snmpapi" fn SnmpUtilDbgPrint(
nLogLevel: SNMP_LOG,
szFormat: ?PSTR,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows5.0'
pub extern "mgmtapi" fn SnmpMgrOpen(
lpAgentAddress: ?PSTR,
lpAgentCommunity: ?PSTR,
nTimeOut: i32,
nRetries: i32,
) callconv(@import("std").os.windows.WINAPI) ?*anyopaque;
// TODO: this type is limited to platform 'windows5.0'
pub extern "mgmtapi" fn SnmpMgrCtl(
session: ?*anyopaque,
dwCtlCode: u32,
lpvInBuffer: ?*anyopaque,
cbInBuffer: u32,
lpvOUTBuffer: ?*anyopaque,
cbOUTBuffer: u32,
lpcbBytesReturned: ?*u32,
) callconv(@import("std").os.windows.WINAPI) BOOL;
// TODO: this type is limited to platform 'windows5.0'
pub extern "mgmtapi" fn SnmpMgrClose(
session: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) BOOL;
// TODO: this type is limited to platform 'windows5.0'
pub extern "mgmtapi" fn SnmpMgrRequest(
session: ?*anyopaque,
requestType: u8,
variableBindings: ?*SnmpVarBindList,
errorStatus: ?*SNMP_ERROR_STATUS,
errorIndex: ?*i32,
) callconv(@import("std").os.windows.WINAPI) i32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "mgmtapi" fn SnmpMgrStrToOid(
string: ?PSTR,
oid: ?*AsnObjectIdentifier,
) callconv(@import("std").os.windows.WINAPI) BOOL;
// TODO: this type is limited to platform 'windows5.0'
pub extern "mgmtapi" fn SnmpMgrOidToStr(
oid: ?*AsnObjectIdentifier,
string: ?*?PSTR,
) callconv(@import("std").os.windows.WINAPI) BOOL;
// TODO: this type is limited to platform 'windows5.0'
pub extern "mgmtapi" fn SnmpMgrTrapListen(
phTrapAvailable: ?*?HANDLE,
) callconv(@import("std").os.windows.WINAPI) BOOL;
// TODO: this type is limited to platform 'windows5.0'
pub extern "mgmtapi" fn SnmpMgrGetTrap(
enterprise: ?*AsnObjectIdentifier,
IPAddress: ?*AsnOctetString,
genericTrap: ?*SNMP_GENERICTRAP,
specificTrap: ?*i32,
timeStamp: ?*u32,
variableBindings: ?*SnmpVarBindList,
) callconv(@import("std").os.windows.WINAPI) BOOL;
// TODO: this type is limited to platform 'windows5.0'
pub extern "mgmtapi" fn SnmpMgrGetTrapEx(
enterprise: ?*AsnObjectIdentifier,
agentAddress: ?*AsnOctetString,
sourceAddress: ?*AsnOctetString,
genericTrap: ?*SNMP_GENERICTRAP,
specificTrap: ?*i32,
community: ?*AsnOctetString,
timeStamp: ?*u32,
variableBindings: ?*SnmpVarBindList,
) callconv(@import("std").os.windows.WINAPI) BOOL;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpGetTranslateMode(
nTranslateMode: ?*SNMP_API_TRANSLATE_MODE,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpSetTranslateMode(
nTranslateMode: SNMP_API_TRANSLATE_MODE,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpGetRetransmitMode(
nRetransmitMode: ?*SNMP_STATUS,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpSetRetransmitMode(
nRetransmitMode: SNMP_STATUS,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpGetTimeout(
hEntity: isize,
nPolicyTimeout: ?*u32,
nActualTimeout: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpSetTimeout(
hEntity: isize,
nPolicyTimeout: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpGetRetry(
hEntity: isize,
nPolicyRetry: ?*u32,
nActualRetry: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpSetRetry(
hEntity: isize,
nPolicyRetry: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpGetVendorInfo(
vendorInfo: ?*smiVENDORINFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpStartup(
nMajorVersion: ?*u32,
nMinorVersion: ?*u32,
nLevel: ?*u32,
nTranslateMode: ?*SNMP_API_TRANSLATE_MODE,
nRetransmitMode: ?*SNMP_STATUS,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpCleanup(
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpOpen(
hWnd: ?HWND,
wMsg: u32,
) callconv(@import("std").os.windows.WINAPI) isize;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpClose(
session: isize,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpSendMsg(
session: isize,
srcEntity: isize,
dstEntity: isize,
context: isize,
PDU: isize,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpRecvMsg(
session: isize,
srcEntity: ?*isize,
dstEntity: ?*isize,
context: ?*isize,
PDU: ?*isize,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpRegister(
session: isize,
srcEntity: isize,
dstEntity: isize,
context: isize,
notification: ?*smiOID,
state: SNMP_STATUS,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpCreateSession(
hWnd: ?HWND,
wMsg: u32,
fCallBack: ?SNMPAPI_CALLBACK,
lpClientData: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) isize;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpListen(
hEntity: isize,
lStatus: SNMP_STATUS,
) callconv(@import("std").os.windows.WINAPI) u32;
pub extern "wsnmp32" fn SnmpListenEx(
hEntity: isize,
lStatus: u32,
nUseEntityAddr: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpCancelMsg(
session: isize,
reqId: i32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpStartupEx(
nMajorVersion: ?*u32,
nMinorVersion: ?*u32,
nLevel: ?*u32,
nTranslateMode: ?*SNMP_API_TRANSLATE_MODE,
nRetransmitMode: ?*SNMP_STATUS,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpCleanupEx(
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpStrToEntity(
session: isize,
string: ?[*:0]const u8,
) callconv(@import("std").os.windows.WINAPI) isize;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpEntityToStr(
entity: isize,
size: u32,
string: [*:0]u8,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpFreeEntity(
entity: isize,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpStrToContext(
session: isize,
string: ?*smiOCTETS,
) callconv(@import("std").os.windows.WINAPI) isize;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpContextToStr(
context: isize,
string: ?*smiOCTETS,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpFreeContext(
context: isize,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpSetPort(
hEntity: isize,
nPort: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpCreatePdu(
session: isize,
PDU_type: SNMP_PDU_TYPE,
request_id: i32,
error_status: i32,
error_index: i32,
varbindlist: isize,
) callconv(@import("std").os.windows.WINAPI) isize;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpGetPduData(
PDU: isize,
PDU_type: ?*SNMP_PDU_TYPE,
request_id: ?*i32,
error_status: ?*SNMP_ERROR,
error_index: ?*i32,
varbindlist: ?*isize,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpSetPduData(
PDU: isize,
PDU_type: ?*const i32,
request_id: ?*const i32,
non_repeaters: ?*const i32,
max_repetitions: ?*const i32,
varbindlist: ?*const isize,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpDuplicatePdu(
session: isize,
PDU: isize,
) callconv(@import("std").os.windows.WINAPI) isize;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpFreePdu(
PDU: isize,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpCreateVbl(
session: isize,
name: ?*smiOID,
value: ?*smiVALUE,
) callconv(@import("std").os.windows.WINAPI) isize;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpDuplicateVbl(
session: isize,
vbl: isize,
) callconv(@import("std").os.windows.WINAPI) isize;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpFreeVbl(
vbl: isize,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpCountVbl(
vbl: isize,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpGetVb(
vbl: isize,
index: u32,
name: ?*smiOID,
value: ?*smiVALUE,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpSetVb(
vbl: isize,
index: u32,
name: ?*smiOID,
value: ?*smiVALUE,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpDeleteVb(
vbl: isize,
index: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpGetLastError(
session: isize,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpStrToOid(
string: ?[*:0]const u8,
dstOID: ?*smiOID,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpOidToStr(
srcOID: ?*smiOID,
size: u32,
string: [*:0]u8,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpOidCopy(
srcOID: ?*smiOID,
dstOID: ?*smiOID,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpOidCompare(
xOID: ?*smiOID,
yOID: ?*smiOID,
maxlen: u32,
result: ?*i32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpEncodeMsg(
session: isize,
srcEntity: isize,
dstEntity: isize,
context: isize,
pdu: isize,
msgBufDesc: ?*smiOCTETS,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpDecodeMsg(
session: isize,
srcEntity: ?*isize,
dstEntity: ?*isize,
context: ?*isize,
pdu: ?*isize,
msgBufDesc: ?*smiOCTETS,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "wsnmp32" fn SnmpFreeDescriptor(
syntax: u32,
descriptor: ?*smiOCTETS,
) callconv(@import("std").os.windows.WINAPI) u32;
//--------------------------------------------------------------------------------
// Section: Unicode Aliases (0)
//--------------------------------------------------------------------------------
const thismodule = @This();
pub usingnamespace switch (@import("../zig.zig").unicode_mode) {
.ansi => struct {
},
.wide => struct {
},
.unspecified => if (@import("builtin").is_test) struct {
} else struct {
},
};
//--------------------------------------------------------------------------------
// Section: Imports (8)
//--------------------------------------------------------------------------------
const BOOL = @import("../foundation.zig").BOOL;
const CHAR = @import("../foundation.zig").CHAR;
const HANDLE = @import("../foundation.zig").HANDLE;
const HWND = @import("../foundation.zig").HWND;
const LPARAM = @import("../foundation.zig").LPARAM;
const PSTR = @import("../foundation.zig").PSTR;
const ULARGE_INTEGER = @import("../foundation.zig").ULARGE_INTEGER;
const WPARAM = @import("../foundation.zig").WPARAM;
test {
// The following '_ = <FuncPtrType>' lines are a workaround for https://github.com/ziglang/zig/issues/4476
if (@hasDecl(@This(), "PFNSNMPEXTENSIONINIT")) { _ = PFNSNMPEXTENSIONINIT; }
if (@hasDecl(@This(), "PFNSNMPEXTENSIONINITEX")) { _ = PFNSNMPEXTENSIONINITEX; }
if (@hasDecl(@This(), "PFNSNMPEXTENSIONMONITOR")) { _ = PFNSNMPEXTENSIONMONITOR; }
if (@hasDecl(@This(), "PFNSNMPEXTENSIONQUERY")) { _ = PFNSNMPEXTENSIONQUERY; }
if (@hasDecl(@This(), "PFNSNMPEXTENSIONQUERYEX")) { _ = PFNSNMPEXTENSIONQUERYEX; }
if (@hasDecl(@This(), "PFNSNMPEXTENSIONTRAP")) { _ = PFNSNMPEXTENSIONTRAP; }
if (@hasDecl(@This(), "PFNSNMPEXTENSIONCLOSE")) { _ = PFNSNMPEXTENSIONCLOSE; }
if (@hasDecl(@This(), "SNMPAPI_CALLBACK")) { _ = SNMPAPI_CALLBACK; }
if (@hasDecl(@This(), "PFNSNMPSTARTUPEX")) { _ = PFNSNMPSTARTUPEX; }
if (@hasDecl(@This(), "PFNSNMPCLEANUPEX")) { _ = PFNSNMPCLEANUPEX; }
@setEvalBranchQuota(
comptime @import("std").meta.declarations(@This()).len * 3
);
// reference all the pub declarations
if (!@import("builtin").is_test) return;
inline for (comptime @import("std").meta.declarations(@This())) |decl| {
_ = @field(@This(), decl.name);
}
}
| https://raw.githubusercontent.com/OAguinagalde/tinyrenderer/20e140ad9a9483d6976f91c074a2e8a96e2038fb/dep/zigwin32/win32/network_management/snmp.zig |
const std = @import("../../../std.zig");
const kern = @import("kern.zig");
const PtRegs = @compileError("TODO missing os bits: PtRegs");
const TcpHdr = @compileError("TODO missing os bits: TcpHdr");
const SkFullSock = @compileError("TODO missing os bits: SkFullSock");
// in BPF, all the helper calls
// TODO: when https://github.com/ziglang/zig/issues/1717 is here, make a nice
// function that uses the Helper enum
//
// Note, these function signatures were created from documentation found in
// '/usr/include/linux/bpf.h'
pub const map_lookup_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque, @ptrFromInt(1));
pub const map_update_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(2));
pub const map_delete_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long, @ptrFromInt(3));
pub const probe_read = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(4));
pub const ktime_get_ns = @as(*const fn () u64, @ptrFromInt(5));
pub const trace_printk = @as(*const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, @ptrFromInt(6));
pub const get_prandom_u32 = @as(*const fn () u32, @ptrFromInt(7));
pub const get_smp_processor_id = @as(*const fn () u32, @ptrFromInt(8));
pub const skb_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long, @ptrFromInt(9));
pub const l3_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, @ptrFromInt(10));
pub const l4_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, @ptrFromInt(11));
pub const tail_call = @as(*const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(12));
pub const clone_redirect = @as(*const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, @ptrFromInt(13));
pub const get_current_pid_tgid = @as(*const fn () u64, @ptrFromInt(14));
pub const get_current_uid_gid = @as(*const fn () u64, @ptrFromInt(15));
pub const get_current_comm = @as(*const fn (buf: ?*anyopaque, size_of_buf: u32) c_long, @ptrFromInt(16));
pub const get_cgroup_classid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(17));
// Note vlan_proto is big endian
pub const skb_vlan_push = @as(*const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, @ptrFromInt(18));
pub const skb_vlan_pop = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(19));
pub const skb_get_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(20));
pub const skb_set_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(21));
pub const perf_event_read = @as(*const fn (map: *const kern.MapDef, flags: u64) u64, @ptrFromInt(22));
pub const redirect = @as(*const fn (ifindex: u32, flags: u64) c_long, @ptrFromInt(23));
pub const get_route_realm = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(24));
pub const perf_event_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(25));
pub const skb_load_bytes = @as(*const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long, @ptrFromInt(26));
pub const get_stackid = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long, @ptrFromInt(27));
// from and to point to __be32
pub const csum_diff = @as(*const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, @ptrFromInt(28));
pub const skb_get_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(29));
pub const skb_set_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(30));
// proto is __be16
pub const skb_change_proto = @as(*const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, @ptrFromInt(31));
pub const skb_change_type = @as(*const fn (skb: *kern.SkBuff, skb_type: u32) c_long, @ptrFromInt(32));
pub const skb_under_cgroup = @as(*const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long, @ptrFromInt(33));
pub const get_hash_recalc = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(34));
pub const get_current_task = @as(*const fn () u64, @ptrFromInt(35));
pub const probe_write_user = @as(*const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long, @ptrFromInt(36));
pub const current_task_under_cgroup = @as(*const fn (map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(37));
pub const skb_change_tail = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(38));
pub const skb_pull_data = @as(*const fn (skb: *kern.SkBuff, len: u32) c_long, @ptrFromInt(39));
pub const csum_update = @as(*const fn (skb: *kern.SkBuff, csum: u32) i64, @ptrFromInt(40));
pub const set_hash_invalid = @as(*const fn (skb: *kern.SkBuff) void, @ptrFromInt(41));
pub const get_numa_node_id = @as(*const fn () c_long, @ptrFromInt(42));
pub const skb_change_head = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(43));
pub const xdp_adjust_head = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(44));
pub const probe_read_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(45));
pub const get_socket_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(46));
pub const get_socket_uid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(47));
pub const set_hash = @as(*const fn (skb: *kern.SkBuff, hash: u32) c_long, @ptrFromInt(48));
pub const setsockopt = @as(*const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(49));
pub const skb_adjust_room = @as(*const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, @ptrFromInt(50));
pub const redirect_map = @as(*const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(51));
pub const sk_redirect_map = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(52));
pub const sock_map_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(53));
pub const xdp_adjust_meta = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(54));
pub const perf_event_read_value = @as(*const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(55));
pub const perf_prog_read_value = @as(*const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(56));
pub const getsockopt = @as(*const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(57));
pub const override_return = @as(*const fn (regs: *PtRegs, rc: u64) c_long, @ptrFromInt(58));
pub const sock_ops_cb_flags_set = @as(*const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, @ptrFromInt(59));
pub const msg_redirect_map = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(60));
pub const msg_apply_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(61));
pub const msg_cork_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(62));
pub const msg_pull_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, @ptrFromInt(63));
pub const bind = @as(*const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, @ptrFromInt(64));
pub const xdp_adjust_tail = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(65));
pub const skb_get_xfrm_state = @as(*const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, @ptrFromInt(66));
pub const get_stack = @as(*const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(67));
pub const skb_load_bytes_relative = @as(*const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long, @ptrFromInt(68));
pub const fib_lookup = @as(*const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, @ptrFromInt(69));
pub const sock_hash_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(70));
pub const msg_redirect_hash = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(71));
pub const sk_redirect_hash = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(72));
pub const lwt_push_encap = @as(*const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long, @ptrFromInt(73));
pub const lwt_seg6_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long, @ptrFromInt(74));
pub const lwt_seg6_adjust_srh = @as(*const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, @ptrFromInt(75));
pub const lwt_seg6_action = @as(*const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long, @ptrFromInt(76));
pub const rc_repeat = @as(*const fn (ctx: ?*anyopaque) c_long, @ptrFromInt(77));
pub const rc_keydown = @as(*const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long, @ptrFromInt(78));
pub const skb_cgroup_id = @as(*const fn (skb: *kern.SkBuff) u64, @ptrFromInt(79));
pub const get_current_cgroup_id = @as(*const fn () u64, @ptrFromInt(80));
pub const get_local_storage = @as(*const fn (map: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(81));
pub const sk_select_reuseport = @as(*const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(82));
pub const skb_ancestor_cgroup_id = @as(*const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, @ptrFromInt(83));
pub const sk_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(84));
pub const sk_lookup_udp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(85));
pub const sk_release = @as(*const fn (sock: *kern.Sock) c_long, @ptrFromInt(86));
pub const map_push_elem = @as(*const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(87));
pub const map_pop_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(88));
pub const map_peek_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(89));
pub const msg_push_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(90));
pub const msg_pop_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(91));
pub const rc_pointer_rel = @as(*const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long, @ptrFromInt(92));
pub const spin_lock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(93));
pub const spin_unlock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(94));
pub const sk_fullsock = @as(*const fn (sk: *kern.Sock) ?*SkFullSock, @ptrFromInt(95));
pub const tcp_sock = @as(*const fn (sk: *kern.Sock) ?*kern.TcpSock, @ptrFromInt(96));
pub const skb_ecn_set_ce = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(97));
pub const get_listener_sock = @as(*const fn (sk: *kern.Sock) ?*kern.Sock, @ptrFromInt(98));
pub const skc_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(99));
pub const tcp_check_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, @ptrFromInt(100));
pub const sysctl_get_name = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, @ptrFromInt(101));
pub const sysctl_get_current_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(102));
pub const sysctl_get_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(103));
pub const sysctl_set_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, @ptrFromInt(104));
pub const strtol = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, @ptrFromInt(105));
pub const strtoul = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, @ptrFromInt(106));
pub const sk_storage_get = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(107));
pub const sk_storage_delete = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, @ptrFromInt(108));
pub const send_signal = @as(*const fn (sig: u32) c_long, @ptrFromInt(109));
pub const tcp_gen_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64, @ptrFromInt(110));
pub const skb_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(111));
pub const probe_read_user = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(112));
pub const probe_read_kernel = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(113));
pub const probe_read_user_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(114));
pub const probe_read_kernel_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(115));
pub const tcp_send_ack = @as(*const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long, @ptrFromInt(116));
pub const send_signal_thread = @as(*const fn (sig: u32) c_long, @ptrFromInt(117));
pub const jiffies64 = @as(*const fn () u64, @ptrFromInt(118));
pub const read_branch_records = @as(*const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(119));
pub const get_ns_current_pid_tgid = @as(*const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, @ptrFromInt(120));
pub const xdp_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(121));
pub const get_netns_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(122));
pub const get_current_ancestor_cgroup_id = @as(*const fn (ancestor_level: c_int) u64, @ptrFromInt(123));
pub const sk_assign = @as(*const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, @ptrFromInt(124));
pub const ktime_get_boot_ns = @as(*const fn () u64, @ptrFromInt(125));
pub const seq_printf = @as(*const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long, @ptrFromInt(126));
pub const seq_write = @as(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, @ptrFromInt(127));
pub const sk_cgroup_id = @as(*const fn (sk: *kern.BpfSock) u64, @ptrFromInt(128));
pub const sk_ancestor_cgroup_id = @as(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, @ptrFromInt(129));
pub const ringbuf_output = @as(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, @ptrFromInt(130));
pub const ringbuf_reserve = @as(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, @ptrFromInt(131));
pub const ringbuf_submit = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(132));
pub const ringbuf_discard = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(133));
pub const ringbuf_query = @as(*const fn (ringbuf: ?*anyopaque, flags: u64) u64, @ptrFromInt(134));
pub const csum_level = @as(*const fn (skb: *kern.SkBuff, level: u64) c_long, @ptrFromInt(135));
pub const skc_to_tcp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock, @ptrFromInt(136));
pub const skc_to_tcp_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpSock, @ptrFromInt(137));
pub const skc_to_tcp_timewait_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock, @ptrFromInt(138));
pub const skc_to_tcp_request_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock, @ptrFromInt(139));
pub const skc_to_udp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Udp6Sock, @ptrFromInt(140));
pub const get_task_stack = @as(*const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(141));
| https://raw.githubusercontent.com/mundusnine/FoundryTools_windows_x64/b64cdb7e56db28eb710a05a089aed0daff8bc8be/lib/std/os/linux/bpf/helpers.zig |
const std = @import("../../../std.zig");
const kern = @import("kern.zig");
const PtRegs = @compileError("TODO missing os bits: PtRegs");
const TcpHdr = @compileError("TODO missing os bits: TcpHdr");
const SkFullSock = @compileError("TODO missing os bits: SkFullSock");
// in BPF, all the helper calls
// TODO: when https://github.com/ziglang/zig/issues/1717 is here, make a nice
// function that uses the Helper enum
//
// Note, these function signatures were created from documentation found in
// '/usr/include/linux/bpf.h'
pub const map_lookup_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque, @ptrFromInt(1));
pub const map_update_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(2));
pub const map_delete_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long, @ptrFromInt(3));
pub const probe_read = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(4));
pub const ktime_get_ns = @as(*const fn () u64, @ptrFromInt(5));
pub const trace_printk = @as(*const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, @ptrFromInt(6));
pub const get_prandom_u32 = @as(*const fn () u32, @ptrFromInt(7));
pub const get_smp_processor_id = @as(*const fn () u32, @ptrFromInt(8));
pub const skb_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long, @ptrFromInt(9));
pub const l3_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, @ptrFromInt(10));
pub const l4_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, @ptrFromInt(11));
pub const tail_call = @as(*const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(12));
pub const clone_redirect = @as(*const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, @ptrFromInt(13));
pub const get_current_pid_tgid = @as(*const fn () u64, @ptrFromInt(14));
pub const get_current_uid_gid = @as(*const fn () u64, @ptrFromInt(15));
pub const get_current_comm = @as(*const fn (buf: ?*anyopaque, size_of_buf: u32) c_long, @ptrFromInt(16));
pub const get_cgroup_classid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(17));
// Note vlan_proto is big endian
pub const skb_vlan_push = @as(*const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, @ptrFromInt(18));
pub const skb_vlan_pop = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(19));
pub const skb_get_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(20));
pub const skb_set_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(21));
pub const perf_event_read = @as(*const fn (map: *const kern.MapDef, flags: u64) u64, @ptrFromInt(22));
pub const redirect = @as(*const fn (ifindex: u32, flags: u64) c_long, @ptrFromInt(23));
pub const get_route_realm = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(24));
pub const perf_event_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(25));
pub const skb_load_bytes = @as(*const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long, @ptrFromInt(26));
pub const get_stackid = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long, @ptrFromInt(27));
// from and to point to __be32
pub const csum_diff = @as(*const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, @ptrFromInt(28));
pub const skb_get_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(29));
pub const skb_set_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(30));
// proto is __be16
pub const skb_change_proto = @as(*const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, @ptrFromInt(31));
pub const skb_change_type = @as(*const fn (skb: *kern.SkBuff, skb_type: u32) c_long, @ptrFromInt(32));
pub const skb_under_cgroup = @as(*const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long, @ptrFromInt(33));
pub const get_hash_recalc = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(34));
pub const get_current_task = @as(*const fn () u64, @ptrFromInt(35));
pub const probe_write_user = @as(*const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long, @ptrFromInt(36));
pub const current_task_under_cgroup = @as(*const fn (map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(37));
pub const skb_change_tail = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(38));
pub const skb_pull_data = @as(*const fn (skb: *kern.SkBuff, len: u32) c_long, @ptrFromInt(39));
pub const csum_update = @as(*const fn (skb: *kern.SkBuff, csum: u32) i64, @ptrFromInt(40));
pub const set_hash_invalid = @as(*const fn (skb: *kern.SkBuff) void, @ptrFromInt(41));
pub const get_numa_node_id = @as(*const fn () c_long, @ptrFromInt(42));
pub const skb_change_head = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(43));
pub const xdp_adjust_head = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(44));
pub const probe_read_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(45));
pub const get_socket_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(46));
pub const get_socket_uid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(47));
pub const set_hash = @as(*const fn (skb: *kern.SkBuff, hash: u32) c_long, @ptrFromInt(48));
pub const setsockopt = @as(*const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(49));
pub const skb_adjust_room = @as(*const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, @ptrFromInt(50));
pub const redirect_map = @as(*const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(51));
pub const sk_redirect_map = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(52));
pub const sock_map_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(53));
pub const xdp_adjust_meta = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(54));
pub const perf_event_read_value = @as(*const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(55));
pub const perf_prog_read_value = @as(*const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(56));
pub const getsockopt = @as(*const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(57));
pub const override_return = @as(*const fn (regs: *PtRegs, rc: u64) c_long, @ptrFromInt(58));
pub const sock_ops_cb_flags_set = @as(*const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, @ptrFromInt(59));
pub const msg_redirect_map = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(60));
pub const msg_apply_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(61));
pub const msg_cork_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(62));
pub const msg_pull_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, @ptrFromInt(63));
pub const bind = @as(*const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, @ptrFromInt(64));
pub const xdp_adjust_tail = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(65));
pub const skb_get_xfrm_state = @as(*const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, @ptrFromInt(66));
pub const get_stack = @as(*const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(67));
pub const skb_load_bytes_relative = @as(*const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long, @ptrFromInt(68));
pub const fib_lookup = @as(*const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, @ptrFromInt(69));
pub const sock_hash_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(70));
pub const msg_redirect_hash = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(71));
pub const sk_redirect_hash = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(72));
pub const lwt_push_encap = @as(*const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long, @ptrFromInt(73));
pub const lwt_seg6_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long, @ptrFromInt(74));
pub const lwt_seg6_adjust_srh = @as(*const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, @ptrFromInt(75));
pub const lwt_seg6_action = @as(*const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long, @ptrFromInt(76));
pub const rc_repeat = @as(*const fn (ctx: ?*anyopaque) c_long, @ptrFromInt(77));
pub const rc_keydown = @as(*const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long, @ptrFromInt(78));
pub const skb_cgroup_id = @as(*const fn (skb: *kern.SkBuff) u64, @ptrFromInt(79));
pub const get_current_cgroup_id = @as(*const fn () u64, @ptrFromInt(80));
pub const get_local_storage = @as(*const fn (map: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(81));
pub const sk_select_reuseport = @as(*const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(82));
pub const skb_ancestor_cgroup_id = @as(*const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, @ptrFromInt(83));
pub const sk_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(84));
pub const sk_lookup_udp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(85));
pub const sk_release = @as(*const fn (sock: *kern.Sock) c_long, @ptrFromInt(86));
pub const map_push_elem = @as(*const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(87));
pub const map_pop_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(88));
pub const map_peek_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(89));
pub const msg_push_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(90));
pub const msg_pop_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(91));
pub const rc_pointer_rel = @as(*const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long, @ptrFromInt(92));
pub const spin_lock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(93));
pub const spin_unlock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(94));
pub const sk_fullsock = @as(*const fn (sk: *kern.Sock) ?*SkFullSock, @ptrFromInt(95));
pub const tcp_sock = @as(*const fn (sk: *kern.Sock) ?*kern.TcpSock, @ptrFromInt(96));
pub const skb_ecn_set_ce = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(97));
pub const get_listener_sock = @as(*const fn (sk: *kern.Sock) ?*kern.Sock, @ptrFromInt(98));
pub const skc_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(99));
pub const tcp_check_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, @ptrFromInt(100));
pub const sysctl_get_name = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, @ptrFromInt(101));
pub const sysctl_get_current_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(102));
pub const sysctl_get_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(103));
pub const sysctl_set_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, @ptrFromInt(104));
pub const strtol = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, @ptrFromInt(105));
pub const strtoul = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, @ptrFromInt(106));
pub const sk_storage_get = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(107));
pub const sk_storage_delete = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, @ptrFromInt(108));
pub const send_signal = @as(*const fn (sig: u32) c_long, @ptrFromInt(109));
pub const tcp_gen_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64, @ptrFromInt(110));
pub const skb_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(111));
pub const probe_read_user = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(112));
pub const probe_read_kernel = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(113));
pub const probe_read_user_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(114));
pub const probe_read_kernel_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(115));
pub const tcp_send_ack = @as(*const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long, @ptrFromInt(116));
pub const send_signal_thread = @as(*const fn (sig: u32) c_long, @ptrFromInt(117));
pub const jiffies64 = @as(*const fn () u64, @ptrFromInt(118));
pub const read_branch_records = @as(*const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(119));
pub const get_ns_current_pid_tgid = @as(*const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, @ptrFromInt(120));
pub const xdp_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(121));
pub const get_netns_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(122));
pub const get_current_ancestor_cgroup_id = @as(*const fn (ancestor_level: c_int) u64, @ptrFromInt(123));
pub const sk_assign = @as(*const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, @ptrFromInt(124));
pub const ktime_get_boot_ns = @as(*const fn () u64, @ptrFromInt(125));
pub const seq_printf = @as(*const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long, @ptrFromInt(126));
pub const seq_write = @as(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, @ptrFromInt(127));
pub const sk_cgroup_id = @as(*const fn (sk: *kern.BpfSock) u64, @ptrFromInt(128));
pub const sk_ancestor_cgroup_id = @as(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, @ptrFromInt(129));
pub const ringbuf_output = @as(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, @ptrFromInt(130));
pub const ringbuf_reserve = @as(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, @ptrFromInt(131));
pub const ringbuf_submit = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(132));
pub const ringbuf_discard = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(133));
pub const ringbuf_query = @as(*const fn (ringbuf: ?*anyopaque, flags: u64) u64, @ptrFromInt(134));
pub const csum_level = @as(*const fn (skb: *kern.SkBuff, level: u64) c_long, @ptrFromInt(135));
pub const skc_to_tcp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock, @ptrFromInt(136));
pub const skc_to_tcp_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpSock, @ptrFromInt(137));
pub const skc_to_tcp_timewait_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock, @ptrFromInt(138));
pub const skc_to_tcp_request_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock, @ptrFromInt(139));
pub const skc_to_udp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Udp6Sock, @ptrFromInt(140));
pub const get_task_stack = @as(*const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(141));
| https://raw.githubusercontent.com/mundusnine/FoundryTools_linux_x64/98e738bf92a416b255c9d11b78e8033071b52672/lib/std/os/linux/bpf/helpers.zig |
const std = @import("../../../std.zig");
const kern = @import("kern.zig");
const PtRegs = @compileError("TODO missing os bits: PtRegs");
const TcpHdr = @compileError("TODO missing os bits: TcpHdr");
const SkFullSock = @compileError("TODO missing os bits: SkFullSock");
// in BPF, all the helper calls
// TODO: when https://github.com/ziglang/zig/issues/1717 is here, make a nice
// function that uses the Helper enum
//
// Note, these function signatures were created from documentation found in
// '/usr/include/linux/bpf.h'
pub const map_lookup_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque, @ptrFromInt(1));
pub const map_update_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(2));
pub const map_delete_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long, @ptrFromInt(3));
pub const probe_read = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(4));
pub const ktime_get_ns = @as(*const fn () u64, @ptrFromInt(5));
pub const trace_printk = @as(*const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, @ptrFromInt(6));
pub const get_prandom_u32 = @as(*const fn () u32, @ptrFromInt(7));
pub const get_smp_processor_id = @as(*const fn () u32, @ptrFromInt(8));
pub const skb_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long, @ptrFromInt(9));
pub const l3_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, @ptrFromInt(10));
pub const l4_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, @ptrFromInt(11));
pub const tail_call = @as(*const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(12));
pub const clone_redirect = @as(*const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, @ptrFromInt(13));
pub const get_current_pid_tgid = @as(*const fn () u64, @ptrFromInt(14));
pub const get_current_uid_gid = @as(*const fn () u64, @ptrFromInt(15));
pub const get_current_comm = @as(*const fn (buf: ?*anyopaque, size_of_buf: u32) c_long, @ptrFromInt(16));
pub const get_cgroup_classid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(17));
// Note vlan_proto is big endian
pub const skb_vlan_push = @as(*const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, @ptrFromInt(18));
pub const skb_vlan_pop = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(19));
pub const skb_get_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(20));
pub const skb_set_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(21));
pub const perf_event_read = @as(*const fn (map: *const kern.MapDef, flags: u64) u64, @ptrFromInt(22));
pub const redirect = @as(*const fn (ifindex: u32, flags: u64) c_long, @ptrFromInt(23));
pub const get_route_realm = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(24));
pub const perf_event_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(25));
pub const skb_load_bytes = @as(*const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long, @ptrFromInt(26));
pub const get_stackid = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long, @ptrFromInt(27));
// from and to point to __be32
pub const csum_diff = @as(*const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, @ptrFromInt(28));
pub const skb_get_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(29));
pub const skb_set_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(30));
// proto is __be16
pub const skb_change_proto = @as(*const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, @ptrFromInt(31));
pub const skb_change_type = @as(*const fn (skb: *kern.SkBuff, skb_type: u32) c_long, @ptrFromInt(32));
pub const skb_under_cgroup = @as(*const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long, @ptrFromInt(33));
pub const get_hash_recalc = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(34));
pub const get_current_task = @as(*const fn () u64, @ptrFromInt(35));
pub const probe_write_user = @as(*const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long, @ptrFromInt(36));
pub const current_task_under_cgroup = @as(*const fn (map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(37));
pub const skb_change_tail = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(38));
pub const skb_pull_data = @as(*const fn (skb: *kern.SkBuff, len: u32) c_long, @ptrFromInt(39));
pub const csum_update = @as(*const fn (skb: *kern.SkBuff, csum: u32) i64, @ptrFromInt(40));
pub const set_hash_invalid = @as(*const fn (skb: *kern.SkBuff) void, @ptrFromInt(41));
pub const get_numa_node_id = @as(*const fn () c_long, @ptrFromInt(42));
pub const skb_change_head = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(43));
pub const xdp_adjust_head = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(44));
pub const probe_read_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(45));
pub const get_socket_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(46));
pub const get_socket_uid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(47));
pub const set_hash = @as(*const fn (skb: *kern.SkBuff, hash: u32) c_long, @ptrFromInt(48));
pub const setsockopt = @as(*const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(49));
pub const skb_adjust_room = @as(*const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, @ptrFromInt(50));
pub const redirect_map = @as(*const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(51));
pub const sk_redirect_map = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(52));
pub const sock_map_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(53));
pub const xdp_adjust_meta = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(54));
pub const perf_event_read_value = @as(*const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(55));
pub const perf_prog_read_value = @as(*const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(56));
pub const getsockopt = @as(*const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(57));
pub const override_return = @as(*const fn (regs: *PtRegs, rc: u64) c_long, @ptrFromInt(58));
pub const sock_ops_cb_flags_set = @as(*const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, @ptrFromInt(59));
pub const msg_redirect_map = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(60));
pub const msg_apply_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(61));
pub const msg_cork_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(62));
pub const msg_pull_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, @ptrFromInt(63));
pub const bind = @as(*const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, @ptrFromInt(64));
pub const xdp_adjust_tail = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(65));
pub const skb_get_xfrm_state = @as(*const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, @ptrFromInt(66));
pub const get_stack = @as(*const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(67));
pub const skb_load_bytes_relative = @as(*const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long, @ptrFromInt(68));
pub const fib_lookup = @as(*const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, @ptrFromInt(69));
pub const sock_hash_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(70));
pub const msg_redirect_hash = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(71));
pub const sk_redirect_hash = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(72));
pub const lwt_push_encap = @as(*const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long, @ptrFromInt(73));
pub const lwt_seg6_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long, @ptrFromInt(74));
pub const lwt_seg6_adjust_srh = @as(*const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, @ptrFromInt(75));
pub const lwt_seg6_action = @as(*const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long, @ptrFromInt(76));
pub const rc_repeat = @as(*const fn (ctx: ?*anyopaque) c_long, @ptrFromInt(77));
pub const rc_keydown = @as(*const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long, @ptrFromInt(78));
pub const skb_cgroup_id = @as(*const fn (skb: *kern.SkBuff) u64, @ptrFromInt(79));
pub const get_current_cgroup_id = @as(*const fn () u64, @ptrFromInt(80));
pub const get_local_storage = @as(*const fn (map: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(81));
pub const sk_select_reuseport = @as(*const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(82));
pub const skb_ancestor_cgroup_id = @as(*const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, @ptrFromInt(83));
pub const sk_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(84));
pub const sk_lookup_udp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(85));
pub const sk_release = @as(*const fn (sock: *kern.Sock) c_long, @ptrFromInt(86));
pub const map_push_elem = @as(*const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(87));
pub const map_pop_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(88));
pub const map_peek_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(89));
pub const msg_push_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(90));
pub const msg_pop_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(91));
pub const rc_pointer_rel = @as(*const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long, @ptrFromInt(92));
pub const spin_lock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(93));
pub const spin_unlock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(94));
pub const sk_fullsock = @as(*const fn (sk: *kern.Sock) ?*SkFullSock, @ptrFromInt(95));
pub const tcp_sock = @as(*const fn (sk: *kern.Sock) ?*kern.TcpSock, @ptrFromInt(96));
pub const skb_ecn_set_ce = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(97));
pub const get_listener_sock = @as(*const fn (sk: *kern.Sock) ?*kern.Sock, @ptrFromInt(98));
pub const skc_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(99));
pub const tcp_check_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, @ptrFromInt(100));
pub const sysctl_get_name = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, @ptrFromInt(101));
pub const sysctl_get_current_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(102));
pub const sysctl_get_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(103));
pub const sysctl_set_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, @ptrFromInt(104));
pub const strtol = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, @ptrFromInt(105));
pub const strtoul = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, @ptrFromInt(106));
pub const sk_storage_get = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(107));
pub const sk_storage_delete = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, @ptrFromInt(108));
pub const send_signal = @as(*const fn (sig: u32) c_long, @ptrFromInt(109));
pub const tcp_gen_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64, @ptrFromInt(110));
pub const skb_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(111));
pub const probe_read_user = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(112));
pub const probe_read_kernel = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(113));
pub const probe_read_user_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(114));
pub const probe_read_kernel_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(115));
pub const tcp_send_ack = @as(*const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long, @ptrFromInt(116));
pub const send_signal_thread = @as(*const fn (sig: u32) c_long, @ptrFromInt(117));
pub const jiffies64 = @as(*const fn () u64, @ptrFromInt(118));
pub const read_branch_records = @as(*const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(119));
pub const get_ns_current_pid_tgid = @as(*const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, @ptrFromInt(120));
pub const xdp_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(121));
pub const get_netns_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(122));
pub const get_current_ancestor_cgroup_id = @as(*const fn (ancestor_level: c_int) u64, @ptrFromInt(123));
pub const sk_assign = @as(*const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, @ptrFromInt(124));
pub const ktime_get_boot_ns = @as(*const fn () u64, @ptrFromInt(125));
pub const seq_printf = @as(*const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long, @ptrFromInt(126));
pub const seq_write = @as(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, @ptrFromInt(127));
pub const sk_cgroup_id = @as(*const fn (sk: *kern.BpfSock) u64, @ptrFromInt(128));
pub const sk_ancestor_cgroup_id = @as(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, @ptrFromInt(129));
pub const ringbuf_output = @as(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, @ptrFromInt(130));
pub const ringbuf_reserve = @as(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, @ptrFromInt(131));
pub const ringbuf_submit = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(132));
pub const ringbuf_discard = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(133));
pub const ringbuf_query = @as(*const fn (ringbuf: ?*anyopaque, flags: u64) u64, @ptrFromInt(134));
pub const csum_level = @as(*const fn (skb: *kern.SkBuff, level: u64) c_long, @ptrFromInt(135));
pub const skc_to_tcp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock, @ptrFromInt(136));
pub const skc_to_tcp_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpSock, @ptrFromInt(137));
pub const skc_to_tcp_timewait_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock, @ptrFromInt(138));
pub const skc_to_tcp_request_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock, @ptrFromInt(139));
pub const skc_to_udp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Udp6Sock, @ptrFromInt(140));
pub const get_task_stack = @as(*const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(141));
| https://raw.githubusercontent.com/matpx/daydream/018ad0c7caaf796d8a04b882fcbed39ccb7c9cd8/toolchain/zig/lib/std/os/linux/bpf/helpers.zig |
pub const backends = @import("display/backends.zig");
| https://raw.githubusercontent.com/PhantomUIx/display-ramfb/3b58c87f5484cb082f2008fd21ef2433d4de723f/src/phantom/display.zig |
pub const backends = @import("display/backends.zig");
| https://raw.githubusercontent.com/PhantomUIx/display-xcb/303dad53b21186a71912f49b9690f0cbf7741bb0/src/phantom/display.zig |
pub const backends = @import("display/backends.zig");
| https://raw.githubusercontent.com/PhantomUIx/display-drm/f0c62d360938219e57a31934fde7a0c9b21bd93a/src/phantom/display.zig |
pub const backends = @import("display/backends.zig");
| https://raw.githubusercontent.com/PhantomUIx/display-fbdev/ce43e92e6530a10c7ea964fe5b281f8a9b402b89/src/phantom/display.zig |
const print = @import("std").debug.print;
extern fn foo_strict(x: f64) f64;
extern fn foo_optimized(x: f64) f64;
pub fn main() void {
const x = 0.001;
print("optimized = {}\n", .{foo_optimized(x)});
print("strict = {}\n", .{foo_strict(x)});
}
// syntax
| https://raw.githubusercontent.com/ziglang/zig-bootstrap/ec2dca85a340f134d2fcfdc9007e91f9abed6996/zig/doc/langref/float_mode_exe.zig |
const print = @import("std").debug.print;
extern fn foo_strict(x: f64) f64;
extern fn foo_optimized(x: f64) f64;
pub fn main() void {
const x = 0.001;
print("optimized = {}\n", .{foo_optimized(x)});
print("strict = {}\n", .{foo_strict(x)});
}
// syntax
| https://raw.githubusercontent.com/kassane/zig-mos-bootstrap/19aac4779b9e93b0e833402c26c93cfc13bb94e2/zig/doc/langref/float_mode_exe.zig |
// wasi_snapshot_preview1 spec available (in witx format) here:
// * typenames -- https://github.com/WebAssembly/WASI/blob/master/phases/snapshot/witx/typenames.witx
// * module -- https://github.com/WebAssembly/WASI/blob/master/phases/snapshot/witx/wasi_snapshot_preview1.witx
const std = @import("std");
const assert = std.debug.assert;
comptime {
assert(@alignOf(i8) == 1);
assert(@alignOf(u8) == 1);
assert(@alignOf(i16) == 2);
assert(@alignOf(u16) == 2);
assert(@alignOf(i32) == 4);
assert(@alignOf(u32) == 4);
// assert(@alignOf(i64) == 8);
// assert(@alignOf(u64) == 8);
}
pub const iovec_t = std.os.iovec;
pub const ciovec_t = std.os.iovec_const;
pub extern "wasi_snapshot_preview1" fn args_get(argv: [*][*:0]u8, argv_buf: [*]u8) errno_t;
pub extern "wasi_snapshot_preview1" fn args_sizes_get(argc: *usize, argv_buf_size: *usize) errno_t;
pub extern "wasi_snapshot_preview1" fn clock_res_get(clock_id: clockid_t, resolution: *timestamp_t) errno_t;
pub extern "wasi_snapshot_preview1" fn clock_time_get(clock_id: clockid_t, precision: timestamp_t, timestamp: *timestamp_t) errno_t;
pub extern "wasi_snapshot_preview1" fn environ_get(environ: [*][*:0]u8, environ_buf: [*]u8) errno_t;
pub extern "wasi_snapshot_preview1" fn environ_sizes_get(environ_count: *usize, environ_buf_size: *usize) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_advise(fd: fd_t, offset: filesize_t, len: filesize_t, advice: advice_t) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_allocate(fd: fd_t, offset: filesize_t, len: filesize_t) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_close(fd: fd_t) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_datasync(fd: fd_t) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_pread(fd: fd_t, iovs: [*]const iovec_t, iovs_len: usize, offset: filesize_t, nread: *usize) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_pwrite(fd: fd_t, iovs: [*]const ciovec_t, iovs_len: usize, offset: filesize_t, nwritten: *usize) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_read(fd: fd_t, iovs: [*]const iovec_t, iovs_len: usize, nread: *usize) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_readdir(fd: fd_t, buf: [*]u8, buf_len: usize, cookie: dircookie_t, bufused: *usize) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_renumber(from: fd_t, to: fd_t) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_seek(fd: fd_t, offset: filedelta_t, whence: whence_t, newoffset: *filesize_t) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_sync(fd: fd_t) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_tell(fd: fd_t, newoffset: *filesize_t) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_write(fd: fd_t, iovs: [*]const ciovec_t, iovs_len: usize, nwritten: *usize) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_fdstat_get(fd: fd_t, buf: *fdstat_t) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_fdstat_set_flags(fd: fd_t, flags: fdflags_t) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_fdstat_set_rights(fd: fd_t, fs_rights_base: rights_t, fs_rights_inheriting: rights_t) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_filestat_get(fd: fd_t, buf: *filestat_t) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_filestat_set_size(fd: fd_t, st_size: filesize_t) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_filestat_set_times(fd: fd_t, st_atim: timestamp_t, st_mtim: timestamp_t, fstflags: fstflags_t) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_prestat_get(fd: fd_t, buf: *prestat_t) errno_t;
pub extern "wasi_snapshot_preview1" fn fd_prestat_dir_name(fd: fd_t, path: [*]u8, path_len: usize) errno_t;
pub extern "wasi_snapshot_preview1" fn path_create_directory(fd: fd_t, path: [*]const u8, path_len: usize) errno_t;
pub extern "wasi_snapshot_preview1" fn path_filestat_get(fd: fd_t, flags: lookupflags_t, path: [*]const u8, path_len: usize, buf: *filestat_t) errno_t;
pub extern "wasi_snapshot_preview1" fn path_filestat_set_times(fd: fd_t, flags: lookupflags_t, path: [*]const u8, path_len: usize, st_atim: timestamp_t, st_mtim: timestamp_t, fstflags: fstflags_t) errno_t;
pub extern "wasi_snapshot_preview1" fn path_link(old_fd: fd_t, old_flags: lookupflags_t, old_path: [*]const u8, old_path_len: usize, new_fd: fd_t, new_path: [*]const u8, new_path_len: usize) errno_t;
pub extern "wasi_snapshot_preview1" fn path_open(dirfd: fd_t, dirflags: lookupflags_t, path: [*]const u8, path_len: usize, oflags: oflags_t, fs_rights_base: rights_t, fs_rights_inheriting: rights_t, fs_flags: fdflags_t, fd: *fd_t) errno_t;
pub extern "wasi_snapshot_preview1" fn path_readlink(fd: fd_t, path: [*]const u8, path_len: usize, buf: [*]u8, buf_len: usize, bufused: *usize) errno_t;
pub extern "wasi_snapshot_preview1" fn path_remove_directory(fd: fd_t, path: [*]const u8, path_len: usize) errno_t;
pub extern "wasi_snapshot_preview1" fn path_rename(old_fd: fd_t, old_path: [*]const u8, old_path_len: usize, new_fd: fd_t, new_path: [*]const u8, new_path_len: usize) errno_t;
pub extern "wasi_snapshot_preview1" fn path_symlink(old_path: [*]const u8, old_path_len: usize, fd: fd_t, new_path: [*]const u8, new_path_len: usize) errno_t;
pub extern "wasi_snapshot_preview1" fn path_unlink_file(fd: fd_t, path: [*]const u8, path_len: usize) errno_t;
pub extern "wasi_snapshot_preview1" fn poll_oneoff(in: *const subscription_t, out: *event_t, nsubscriptions: usize, nevents: *usize) errno_t;
pub extern "wasi_snapshot_preview1" fn proc_exit(rval: exitcode_t) noreturn;
pub extern "wasi_snapshot_preview1" fn random_get(buf: [*]u8, buf_len: usize) errno_t;
pub extern "wasi_snapshot_preview1" fn sched_yield() errno_t;
pub extern "wasi_snapshot_preview1" fn sock_recv(sock: fd_t, ri_data: *const iovec_t, ri_data_len: usize, ri_flags: riflags_t, ro_datalen: *usize, ro_flags: *roflags_t) errno_t;
pub extern "wasi_snapshot_preview1" fn sock_send(sock: fd_t, si_data: *const ciovec_t, si_data_len: usize, si_flags: siflags_t, so_datalen: *usize) errno_t;
pub extern "wasi_snapshot_preview1" fn sock_shutdown(sock: fd_t, how: sdflags_t) errno_t;
/// Get the errno from a syscall return value, or 0 for no error.
pub fn getErrno(r: errno_t) errno_t {
return r;
}
pub const STDIN_FILENO = 0;
pub const STDOUT_FILENO = 1;
pub const STDERR_FILENO = 2;
pub const mode_t = u32;
pub const time_t = i64; // match https://github.com/CraneStation/wasi-libc
pub const timespec = struct {
tv_sec: time_t,
tv_nsec: isize,
pub fn fromTimestamp(tm: timestamp_t) timespec {
const tv_sec: timestamp_t = tm / 1_000_000_000;
const tv_nsec = tm - tv_sec * 1_000_000_000;
return timespec{
.tv_sec = @intCast(time_t, tv_sec),
.tv_nsec = @intCast(isize, tv_nsec),
};
}
pub fn toTimestamp(ts: timespec) timestamp_t {
const tm = @intCast(timestamp_t, ts.tv_sec * 1_000_000_000) + @intCast(timestamp_t, ts.tv_nsec);
return tm;
}
};
pub const Stat = struct {
dev: device_t,
ino: inode_t,
mode: mode_t,
filetype: filetype_t,
nlink: linkcount_t,
size: filesize_t,
atim: timespec,
mtim: timespec,
ctim: timespec,
const Self = @This();
pub fn fromFilestat(stat: filestat_t) Self {
return Self{
.dev = stat.dev,
.ino = stat.ino,
.mode = 0,
.filetype = stat.filetype,
.nlink = stat.nlink,
.size = stat.size,
.atim = stat.atime(),
.mtim = stat.mtime(),
.ctim = stat.ctime(),
};
}
pub fn atime(self: Self) timespec {
return self.atim;
}
pub fn mtime(self: Self) timespec {
return self.mtim;
}
pub fn ctime(self: Self) timespec {
return self.ctim;
}
};
pub const IOV_MAX = 1024;
pub const AT = struct {
pub const REMOVEDIR: u32 = 0x4;
pub const FDCWD: fd_t = -2;
};
// As defined in the wasi_snapshot_preview1 spec file:
// https://github.com/WebAssembly/WASI/blob/master/phases/snapshot/witx/typenames.witx
pub const advice_t = u8;
pub const ADVICE_NORMAL: advice_t = 0;
pub const ADVICE_SEQUENTIAL: advice_t = 1;
pub const ADVICE_RANDOM: advice_t = 2;
pub const ADVICE_WILLNEED: advice_t = 3;
pub const ADVICE_DONTNEED: advice_t = 4;
pub const ADVICE_NOREUSE: advice_t = 5;
pub const clockid_t = u32;
pub const CLOCK = struct {
pub const REALTIME: clockid_t = 0;
pub const MONOTONIC: clockid_t = 1;
pub const PROCESS_CPUTIME_ID: clockid_t = 2;
pub const THREAD_CPUTIME_ID: clockid_t = 3;
};
pub const device_t = u64;
pub const dircookie_t = u64;
pub const DIRCOOKIE_START: dircookie_t = 0;
pub const dirnamlen_t = u32;
pub const dirent_t = extern struct {
d_next: dircookie_t,
d_ino: inode_t,
d_namlen: dirnamlen_t,
d_type: filetype_t,
};
pub const errno_t = enum(u16) {
SUCCESS = 0,
@"2BIG" = 1,
ACCES = 2,
ADDRINUSE = 3,
ADDRNOTAVAIL = 4,
AFNOSUPPORT = 5,
/// This is also the error code used for `WOULDBLOCK`.
AGAIN = 6,
ALREADY = 7,
BADF = 8,
BADMSG = 9,
BUSY = 10,
CANCELED = 11,
CHILD = 12,
CONNABORTED = 13,
CONNREFUSED = 14,
CONNRESET = 15,
DEADLK = 16,
DESTADDRREQ = 17,
DOM = 18,
DQUOT = 19,
EXIST = 20,
FAULT = 21,
FBIG = 22,
HOSTUNREACH = 23,
IDRM = 24,
ILSEQ = 25,
INPROGRESS = 26,
INTR = 27,
INVAL = 28,
IO = 29,
ISCONN = 30,
ISDIR = 31,
LOOP = 32,
MFILE = 33,
MLINK = 34,
MSGSIZE = 35,
MULTIHOP = 36,
NAMETOOLONG = 37,
NETDOWN = 38,
NETRESET = 39,
NETUNREACH = 40,
NFILE = 41,
NOBUFS = 42,
NODEV = 43,
NOENT = 44,
NOEXEC = 45,
NOLCK = 46,
NOLINK = 47,
NOMEM = 48,
NOMSG = 49,
NOPROTOOPT = 50,
NOSPC = 51,
NOSYS = 52,
NOTCONN = 53,
NOTDIR = 54,
NOTEMPTY = 55,
NOTRECOVERABLE = 56,
NOTSOCK = 57,
/// This is also the code used for `NOTSUP`.
OPNOTSUPP = 58,
NOTTY = 59,
NXIO = 60,
OVERFLOW = 61,
OWNERDEAD = 62,
PERM = 63,
PIPE = 64,
PROTO = 65,
PROTONOSUPPORT = 66,
PROTOTYPE = 67,
RANGE = 68,
ROFS = 69,
SPIPE = 70,
SRCH = 71,
STALE = 72,
TIMEDOUT = 73,
TXTBSY = 74,
XDEV = 75,
NOTCAPABLE = 76,
_,
};
pub const E = errno_t;
pub const event_t = extern struct {
userdata: userdata_t,
@"error": errno_t,
@"type": eventtype_t,
fd_readwrite: eventfdreadwrite_t,
};
pub const eventfdreadwrite_t = extern struct {
nbytes: filesize_t,
flags: eventrwflags_t,
};
pub const eventrwflags_t = u16;
pub const EVENT_FD_READWRITE_HANGUP: eventrwflags_t = 0x0001;
pub const eventtype_t = u8;
pub const EVENTTYPE_CLOCK: eventtype_t = 0;
pub const EVENTTYPE_FD_READ: eventtype_t = 1;
pub const EVENTTYPE_FD_WRITE: eventtype_t = 2;
pub const exitcode_t = u32;
pub const fd_t = i32;
pub const fdflags_t = u16;
pub const FDFLAG = struct {
pub const APPEND: fdflags_t = 0x0001;
pub const DSYNC: fdflags_t = 0x0002;
pub const NONBLOCK: fdflags_t = 0x0004;
pub const RSYNC: fdflags_t = 0x0008;
pub const SYNC: fdflags_t = 0x0010;
};
pub const fdstat_t = extern struct {
fs_filetype: filetype_t,
fs_flags: fdflags_t,
fs_rights_base: rights_t,
fs_rights_inheriting: rights_t,
};
pub const filedelta_t = i64;
pub const filesize_t = u64;
pub const filestat_t = extern struct {
dev: device_t,
ino: inode_t,
filetype: filetype_t,
nlink: linkcount_t,
size: filesize_t,
atim: timestamp_t,
mtim: timestamp_t,
ctim: timestamp_t,
pub fn atime(self: filestat_t) timespec {
return timespec.fromTimestamp(self.atim);
}
pub fn mtime(self: filestat_t) timespec {
return timespec.fromTimestamp(self.mtim);
}
pub fn ctime(self: filestat_t) timespec {
return timespec.fromTimestamp(self.ctim);
}
};
/// Also known as `FILETYPE`.
pub const filetype_t = enum(u8) {
UNKNOWN,
BLOCK_DEVICE,
CHARACTER_DEVICE,
DIRECTORY,
REGULAR_FILE,
SOCKET_DGRAM,
SOCKET_STREAM,
SYMBOLIC_LINK,
_,
};
pub const fstflags_t = u16;
pub const FILESTAT_SET_ATIM: fstflags_t = 0x0001;
pub const FILESTAT_SET_ATIM_NOW: fstflags_t = 0x0002;
pub const FILESTAT_SET_MTIM: fstflags_t = 0x0004;
pub const FILESTAT_SET_MTIM_NOW: fstflags_t = 0x0008;
pub const inode_t = u64;
pub const ino_t = inode_t;
pub const linkcount_t = u64;
pub const lookupflags_t = u32;
pub const LOOKUP_SYMLINK_FOLLOW: lookupflags_t = 0x00000001;
pub const oflags_t = u16;
pub const O = struct {
pub const CREAT: oflags_t = 0x0001;
pub const DIRECTORY: oflags_t = 0x0002;
pub const EXCL: oflags_t = 0x0004;
pub const TRUNC: oflags_t = 0x0008;
};
pub const preopentype_t = u8;
pub const PREOPENTYPE_DIR: preopentype_t = 0;
pub const prestat_t = extern struct {
pr_type: preopentype_t,
u: prestat_u_t,
};
pub const prestat_dir_t = extern struct {
pr_name_len: usize,
};
pub const prestat_u_t = extern union {
dir: prestat_dir_t,
};
pub const riflags_t = u16;
pub const roflags_t = u16;
pub const SOCK = struct {
pub const RECV_PEEK: riflags_t = 0x0001;
pub const RECV_WAITALL: riflags_t = 0x0002;
pub const RECV_DATA_TRUNCATED: roflags_t = 0x0001;
};
pub const rights_t = u64;
pub const RIGHT = struct {
pub const FD_DATASYNC: rights_t = 0x0000000000000001;
pub const FD_READ: rights_t = 0x0000000000000002;
pub const FD_SEEK: rights_t = 0x0000000000000004;
pub const FD_FDSTAT_SET_FLAGS: rights_t = 0x0000000000000008;
pub const FD_SYNC: rights_t = 0x0000000000000010;
pub const FD_TELL: rights_t = 0x0000000000000020;
pub const FD_WRITE: rights_t = 0x0000000000000040;
pub const FD_ADVISE: rights_t = 0x0000000000000080;
pub const FD_ALLOCATE: rights_t = 0x0000000000000100;
pub const PATH_CREATE_DIRECTORY: rights_t = 0x0000000000000200;
pub const PATH_CREATE_FILE: rights_t = 0x0000000000000400;
pub const PATH_LINK_SOURCE: rights_t = 0x0000000000000800;
pub const PATH_LINK_TARGET: rights_t = 0x0000000000001000;
pub const PATH_OPEN: rights_t = 0x0000000000002000;
pub const FD_READDIR: rights_t = 0x0000000000004000;
pub const PATH_READLINK: rights_t = 0x0000000000008000;
pub const PATH_RENAME_SOURCE: rights_t = 0x0000000000010000;
pub const PATH_RENAME_TARGET: rights_t = 0x0000000000020000;
pub const PATH_FILESTAT_GET: rights_t = 0x0000000000040000;
pub const PATH_FILESTAT_SET_SIZE: rights_t = 0x0000000000080000;
pub const PATH_FILESTAT_SET_TIMES: rights_t = 0x0000000000100000;
pub const FD_FILESTAT_GET: rights_t = 0x0000000000200000;
pub const FD_FILESTAT_SET_SIZE: rights_t = 0x0000000000400000;
pub const FD_FILESTAT_SET_TIMES: rights_t = 0x0000000000800000;
pub const PATH_SYMLINK: rights_t = 0x0000000001000000;
pub const PATH_REMOVE_DIRECTORY: rights_t = 0x0000000002000000;
pub const PATH_UNLINK_FILE: rights_t = 0x0000000004000000;
pub const POLL_FD_READWRITE: rights_t = 0x0000000008000000;
pub const SOCK_SHUTDOWN: rights_t = 0x0000000010000000;
pub const ALL: rights_t = FD_DATASYNC |
FD_READ |
FD_SEEK |
FD_FDSTAT_SET_FLAGS |
FD_SYNC |
FD_TELL |
FD_WRITE |
FD_ADVISE |
FD_ALLOCATE |
PATH_CREATE_DIRECTORY |
PATH_CREATE_FILE |
PATH_LINK_SOURCE |
PATH_LINK_TARGET |
PATH_OPEN |
FD_READDIR |
PATH_READLINK |
PATH_RENAME_SOURCE |
PATH_RENAME_TARGET |
PATH_FILESTAT_GET |
PATH_FILESTAT_SET_SIZE |
PATH_FILESTAT_SET_TIMES |
FD_FILESTAT_GET |
FD_FILESTAT_SET_SIZE |
FD_FILESTAT_SET_TIMES |
PATH_SYMLINK |
PATH_REMOVE_DIRECTORY |
PATH_UNLINK_FILE |
POLL_FD_READWRITE |
SOCK_SHUTDOWN;
};
pub const sdflags_t = u8;
pub const SHUT = struct {
pub const RD: sdflags_t = 0x01;
pub const WR: sdflags_t = 0x02;
};
pub const siflags_t = u16;
pub const signal_t = u8;
pub const SIGNONE: signal_t = 0;
pub const SIGHUP: signal_t = 1;
pub const SIGINT: signal_t = 2;
pub const SIGQUIT: signal_t = 3;
pub const SIGILL: signal_t = 4;
pub const SIGTRAP: signal_t = 5;
pub const SIGABRT: signal_t = 6;
pub const SIGBUS: signal_t = 7;
pub const SIGFPE: signal_t = 8;
pub const SIGKILL: signal_t = 9;
pub const SIGUSR1: signal_t = 10;
pub const SIGSEGV: signal_t = 11;
pub const SIGUSR2: signal_t = 12;
pub const SIGPIPE: signal_t = 13;
pub const SIGALRM: signal_t = 14;
pub const SIGTERM: signal_t = 15;
pub const SIGCHLD: signal_t = 16;
pub const SIGCONT: signal_t = 17;
pub const SIGSTOP: signal_t = 18;
pub const SIGTSTP: signal_t = 19;
pub const SIGTTIN: signal_t = 20;
pub const SIGTTOU: signal_t = 21;
pub const SIGURG: signal_t = 22;
pub const SIGXCPU: signal_t = 23;
pub const SIGXFSZ: signal_t = 24;
pub const SIGVTALRM: signal_t = 25;
pub const SIGPROF: signal_t = 26;
pub const SIGWINCH: signal_t = 27;
pub const SIGPOLL: signal_t = 28;
pub const SIGPWR: signal_t = 29;
pub const SIGSYS: signal_t = 30;
pub const subclockflags_t = u16;
pub const SUBSCRIPTION_CLOCK_ABSTIME: subclockflags_t = 0x0001;
pub const subscription_t = extern struct {
userdata: userdata_t,
u: subscription_u_t,
};
pub const subscription_clock_t = extern struct {
id: clockid_t,
timeout: timestamp_t,
precision: timestamp_t,
flags: subclockflags_t,
};
pub const subscription_fd_readwrite_t = extern struct {
fd: fd_t,
};
pub const subscription_u_t = extern struct {
tag: eventtype_t,
u: subscription_u_u_t,
};
pub const subscription_u_u_t = extern union {
clock: subscription_clock_t,
fd_read: subscription_fd_readwrite_t,
fd_write: subscription_fd_readwrite_t,
};
pub const timestamp_t = u64;
pub const userdata_t = u64;
/// Also known as `WHENCE`.
pub const whence_t = enum(u8) { SET, CUR, END };
pub const S = struct {
pub const IEXEC = @compileError("TODO audit this");
pub const IFBLK = 0x6000;
pub const IFCHR = 0x2000;
pub const IFDIR = 0x4000;
pub const IFIFO = 0xc000;
pub const IFLNK = 0xa000;
pub const IFMT = IFBLK | IFCHR | IFDIR | IFIFO | IFLNK | IFREG | IFSOCK;
pub const IFREG = 0x8000;
// There's no concept of UNIX domain socket but we define this value here in order to line with other OSes.
pub const IFSOCK = 0x1;
};
pub const LOCK = struct {
pub const SH = 0x1;
pub const EX = 0x2;
pub const NB = 0x4;
pub const UN = 0x8;
};
| https://raw.githubusercontent.com/kraxli/dev_tools_mswindows/1d1a8f61299e4b7ba356fae3a37af0ddc8daf356/zig-windows-x86_64-0.9.1/lib/std/os/wasi.zig |
const Fs = @import("fs.zig");
const std = @import("std");
const bun = @import("root").bun;
const string = bun.string;
const Output = bun.Output;
const Global = bun.Global;
const Environment = bun.Environment;
const strings = bun.strings;
const MutableString = bun.MutableString;
const CodePoint = bun.CodePoint;
const StoredFileDescriptorType = bun.StoredFileDescriptorType;
const FeatureFlags = bun.FeatureFlags;
const stringZ = bun.stringZ;
const default_allocator = bun.default_allocator;
const C = bun.C;
const options = @import("./options.zig");
const import_record = @import("import_record.zig");
const logger = bun.logger;
const Options = options;
const resolver = @import("./resolver/resolver.zig");
const _linker = @import("./linker.zig");
const URL = @import("./url.zig").URL;
const replacementCharacter: CodePoint = 0xFFFD;
pub const Chunk = struct {
// Entire chunk
range: logger.Range,
content: Content,
pub const Content = union(Tag) {
t_url: TextContent,
t_verbatim: Verbatim,
t_import: Import,
};
pub fn raw(chunk: *const Chunk, source: *const logger.Source) string {
return source.contents[@as(usize, @intCast(chunk.range.loc.start))..][0..@as(usize, @intCast(chunk.range.len))];
}
// pub fn string(chunk: *const Chunk, source: *const logger.Source) string {
// switch (chunk.content) {
// .t_url => |url| {
// var str = url.utf8;
// var start: i32 = 4;
// var end: i32 = chunk.range.len - 1;
// while (start < end and isWhitespace(str[start])) {
// start += 1;
// }
// while (start < end and isWhitespace(str[end - 1])) {
// end -= 1;
// }
// return str;
// },
// .t_import => |import| {
// if (import.url) {}
// },
// else => {
// return chunk.raw(source);
// },
// }
// }
pub const TextContent = struct {
quote: Quote = .none,
utf8: string,
valid: bool = true,
needs_decode_escape: bool = false,
pub const Quote = enum {
none,
double,
single,
};
};
pub const Import = struct {
url: bool = false,
text: TextContent,
supports: string = "",
// @import can contain media queries and other stuff
media_queries_str: string = "",
suffix: string = "",
};
pub const Verbatim = struct {};
pub const Tag = enum {
t_url,
t_verbatim,
t_import,
};
};
pub const Token = enum {
t_end_of_file,
t_semicolon,
t_whitespace,
t_at_import,
t_url,
t_verbatim,
t_string,
t_bad_string,
};
const escLineFeed = 0x0C;
// This is not a CSS parser.
// All this does is scan for URLs and @import statements
// Once found, it resolves & rewrites them
// Eventually, there will be a real CSS parser in here.
// But, no time yet.
pub const Scanner = struct {
current: usize = 0,
start: usize = 0,
end: usize = 0,
log: *logger.Log,
has_newline_before: bool = false,
has_delimiter_before: bool = false,
allocator: std.mem.Allocator,
source: *const logger.Source,
codepoint: CodePoint = -1,
approximate_newline_count: usize = 0,
pub fn init(log: *logger.Log, allocator: std.mem.Allocator, source: *const logger.Source) Scanner {
return Scanner{ .log = log, .source = source, .allocator = allocator };
}
pub fn range(scanner: *Scanner) logger.Range {
return logger.Range{
.loc = .{ .start = @as(i32, @intCast(scanner.start)) },
.len = @as(i32, @intCast(scanner.end - scanner.start)),
};
}
pub fn step(scanner: *Scanner) void {
scanner.codepoint = scanner.nextCodepoint();
scanner.approximate_newline_count += @intFromBool(scanner.codepoint == '\n');
}
pub fn raw(_: *Scanner) string {}
pub fn isValidEscape(scanner: *Scanner) bool {
if (scanner.codepoint != '\\') return false;
const slice = scanner.nextCodepointSlice(false);
return switch (slice.len) {
0 => false,
1 => true,
2 => (std.unicode.utf8Decode2(slice) catch 0) > 0,
3 => (std.unicode.utf8Decode3(slice) catch 0) > 0,
4 => (std.unicode.utf8Decode4(slice) catch 0) > 0,
else => false,
};
}
pub fn consumeString(
scanner: *Scanner,
comptime quote: CodePoint,
) ?string {
const start = scanner.current;
scanner.step();
while (true) {
switch (scanner.codepoint) {
'\\' => {
scanner.step();
// Handle Windows CRLF
if (scanner.codepoint == '\r') {
scanner.step();
if (scanner.codepoint == '\n') {
scanner.step();
}
continue;
}
// Otherwise, fall through to ignore the character after the backslash
},
-1 => {
scanner.end = scanner.current;
scanner.log.addRangeError(
scanner.source,
scanner.range(),
"Unterminated string token",
) catch unreachable;
return null;
},
'\n', '\r', escLineFeed => {
scanner.end = scanner.current;
scanner.log.addRangeError(
scanner.source,
scanner.range(),
"Unterminated string token",
) catch unreachable;
return null;
},
quote => {
const result = scanner.source.contents[start..scanner.end];
scanner.step();
return result;
},
else => {},
}
scanner.step();
}
unreachable;
}
pub fn consumeToEndOfMultiLineComment(scanner: *Scanner, start_range: logger.Range) void {
while (true) {
switch (scanner.codepoint) {
'*' => {
scanner.step();
if (scanner.codepoint == '/') {
scanner.step();
return;
}
},
-1 => {
scanner.log.addRangeError(scanner.source, start_range, "Expected \"*/\" to terminate multi-line comment") catch {};
return;
},
else => {
scanner.step();
},
}
}
}
pub fn consumeToEndOfSingleLineComment(scanner: *Scanner) void {
while (!isNewline(scanner.codepoint) and scanner.codepoint != -1) {
scanner.step();
}
// scanner.log.addRangeWarning(
// scanner.source,
// scanner.range(),
// "Comments in CSS use \"/* ... */\" instead of \"//\"",
// ) catch {};
}
pub fn consumeURL(scanner: *Scanner) Chunk.TextContent {
var text = Chunk.TextContent{ .utf8 = "" };
const start = scanner.end;
validURL: while (true) {
switch (scanner.codepoint) {
')' => {
text.utf8 = scanner.source.contents[start..scanner.end];
scanner.step();
return text;
},
-1 => {
const loc = logger.Loc{ .start = @as(i32, @intCast(scanner.end)) };
scanner.log.addError(scanner.source, loc, "Expected \")\" to end URL token") catch {};
return text;
},
'\t', '\n', '\r', escLineFeed => {
scanner.step();
while (isWhitespace(scanner.codepoint)) {
scanner.step();
}
text.utf8 = scanner.source.contents[start..scanner.end];
if (scanner.codepoint != ')') {
const loc = logger.Loc{ .start = @as(i32, @intCast(scanner.end)) };
scanner.log.addError(scanner.source, loc, "Expected \")\" to end URL token") catch {};
break :validURL;
}
scanner.step();
return text;
},
'"', '\'', '(' => {
const r = logger.Range{ .loc = logger.Loc{ .start = @as(i32, @intCast(start)) }, .len = @as(i32, @intCast(scanner.end - start)) };
scanner.log.addRangeError(scanner.source, r, "Expected \")\" to end URL token") catch {};
break :validURL;
},
'\\' => {
text.needs_decode_escape = true;
if (!scanner.isValidEscape()) {
const loc = logger.Loc{
.start = @as(i32, @intCast(scanner.end)),
};
scanner.log.addError(scanner.source, loc, "Expected \")\" to end URL token") catch {};
break :validURL;
}
_ = scanner.consumeEscape();
},
else => {
if (isNonPrintable(scanner.codepoint)) {
const r = logger.Range{
.loc = logger.Loc{
.start = @as(i32, @intCast(start)),
},
.len = 1,
};
scanner.log.addRangeError(scanner.source, r, "Invalid escape") catch {};
break :validURL;
}
scanner.step();
},
}
}
text.valid = false;
// Consume the remnants of a bad url
while (true) {
switch (scanner.codepoint) {
')', -1 => {
scanner.step();
text.utf8 = scanner.source.contents[start..scanner.end];
return text;
},
'\\' => {
text.needs_decode_escape = true;
if (scanner.isValidEscape()) {
_ = scanner.consumeEscape();
}
},
else => {},
}
scanner.step();
}
return text;
}
var did_warn_tailwind = false;
pub fn warnTailwind(scanner: *Scanner, start: usize) void {
if (did_warn_tailwind) return;
did_warn_tailwind = true;
scanner.log.addWarningFmt(
scanner.source,
logger.usize2Loc(start),
scanner.allocator,
"To use Tailwind with bun, use the Tailwind CLI and import the processed .css file.\nLearn more: https://tailwindcss.com/docs/installation#watching-for-changes",
.{},
) catch {};
}
pub fn next(
scanner: *Scanner,
comptime import_behavior: ImportBehavior,
comptime WriterType: type,
writer: WriterType,
writeChunk: (fn (ctx: WriterType, Chunk) anyerror!void),
) anyerror!void {
scanner.has_newline_before = scanner.end == 0;
scanner.has_delimiter_before = false;
scanner.step();
restart: while (true) {
var chunk = Chunk{
.range = logger.Range{
.loc = .{ .start = @as(i32, @intCast(scanner.end)) },
.len = 0,
},
.content = .{
.t_verbatim = .{},
},
};
scanner.start = scanner.end;
toplevel: while (true) {
// We only care about two things.
// 1. url()
// 2. @import
// To correctly parse, url(), we need to verify that the character preceding it is either whitespace, a colon, or a comma
// We also need to parse strings and comments, or else we risk resolving comments like this /* url(hi.jpg) */
switch (scanner.codepoint) {
-1 => {
chunk.range.len = @as(i32, @intCast(scanner.end)) - chunk.range.loc.start;
chunk.content.t_verbatim = .{};
try writeChunk(writer, chunk);
return;
},
'\t', '\n', '\r', escLineFeed => {
scanner.has_newline_before = true;
scanner.step();
continue;
},
// Ensure whitespace doesn't affect scanner.has_delimiter_before
' ' => {},
':', ',' => {
scanner.has_delimiter_before = true;
},
'{', '}' => {
scanner.has_delimiter_before = false;
// Heuristic:
// If we're only scanning the imports, as soon as there's a curly brace somewhere we can assume that @import is done.
// @import only appears at the top of the file. Only @charset is allowed to be above it.
if (import_behavior == .scan) {
return;
}
},
// this is a little hacky, but it should work since we're not parsing scopes
';' => {
scanner.has_delimiter_before = false;
},
'u', 'U' => {
// url() always appears on the property value side
// so we should ignore it if it's part of a different token
if (!scanner.has_delimiter_before) {
scanner.step();
continue :toplevel;
}
const url_start = scanner.end;
scanner.step();
switch (scanner.codepoint) {
'r', 'R' => {},
else => {
continue;
},
}
scanner.step();
switch (scanner.codepoint) {
'l', 'L' => {},
else => {
continue;
},
}
scanner.step();
if (scanner.codepoint != '(') {
continue;
}
scanner.step();
var url_text: Chunk.TextContent = undefined;
switch (scanner.codepoint) {
'\'' => {
const str = scanner.consumeString('\'') orelse return error.SyntaxError;
if (scanner.codepoint != ')') {
continue;
}
scanner.step();
url_text = .{ .utf8 = str, .quote = .double };
},
'"' => {
const str = scanner.consumeString('"') orelse return error.SyntaxError;
if (scanner.codepoint != ')') {
continue;
}
scanner.step();
url_text = .{ .utf8 = str, .quote = .single };
},
else => {
url_text = scanner.consumeURL();
},
}
chunk.range.len = @as(i32, @intCast(url_start)) - chunk.range.loc.start;
chunk.content = .{ .t_verbatim = .{} };
// flush the pending chunk
try writeChunk(writer, chunk);
chunk.range.loc.start = @as(i32, @intCast(url_start));
chunk.range.len = @as(i32, @intCast(scanner.end)) - chunk.range.loc.start;
chunk.content = .{ .t_url = url_text };
try writeChunk(writer, chunk);
scanner.has_delimiter_before = false;
continue :restart;
},
'@' => {
const start = scanner.end;
scanner.step();
switch (scanner.codepoint) {
'i' => {},
't' => {
scanner.step();
if (scanner.codepoint != 'a') continue :toplevel;
scanner.step();
if (scanner.codepoint != 'i') continue :toplevel;
scanner.step();
if (scanner.codepoint != 'l') continue :toplevel;
scanner.step();
if (scanner.codepoint != 'w') continue :toplevel;
scanner.step();
if (scanner.codepoint != 'i') continue :toplevel;
scanner.step();
if (scanner.codepoint != 'n') continue :toplevel;
scanner.step();
if (scanner.codepoint != 'd') continue :toplevel;
scanner.step();
if (scanner.codepoint != ' ') continue :toplevel;
scanner.step();
const word_start = scanner.end;
while (switch (scanner.codepoint) {
'a'...'z', 'A'...'Z' => true,
else => false,
}) {
scanner.step();
}
const word = scanner.source.contents[word_start..scanner.end];
while (switch (scanner.codepoint) {
' ', '\n', '\r' => true,
else => false,
}) {
scanner.step();
}
if (scanner.codepoint != ';') continue :toplevel;
switch (word[0]) {
'b' => {
if (strings.eqlComptime(word, "base")) {
scanner.warnTailwind(start);
}
},
'c' => {
if (strings.eqlComptime(word, "components")) {
scanner.warnTailwind(start);
}
},
'u' => {
if (strings.eqlComptime(word, "utilities")) {
scanner.warnTailwind(start);
}
},
's' => {
if (strings.eqlComptime(word, "screens")) {
scanner.warnTailwind(start);
}
},
else => continue :toplevel,
}
continue :toplevel;
},
else => continue :toplevel,
}
scanner.step();
if (scanner.codepoint != 'm') continue :toplevel;
scanner.step();
if (scanner.codepoint != 'p') continue :toplevel;
scanner.step();
if (scanner.codepoint != 'o') continue :toplevel;
scanner.step();
if (scanner.codepoint != 'r') continue :toplevel;
scanner.step();
if (scanner.codepoint != 't') continue :toplevel;
scanner.step();
if (scanner.codepoint != ' ') continue :toplevel;
// Now that we know to expect an import url, we flush the chunk
chunk.range.len = @as(i32, @intCast(start)) - chunk.range.loc.start;
chunk.content = .{ .t_verbatim = .{} };
// flush the pending chunk
try writeChunk(writer, chunk);
// Don't write the .start until we know it's an @import rule
// We want to avoid messing with other rules
scanner.start = start;
// "Imported rules must precede all other types of rule"
// https://developer.mozilla.org/en-US/docs/Web/CSS/@import
// @import url;
// @import url list-of-media-queries;
// @import url supports( supports-query );
// @import url supports( supports-query ) list-of-media-queries;
while (isWhitespace(scanner.codepoint)) {
scanner.step();
}
var import = Chunk.Import{
.text = .{
.utf8 = "",
},
};
switch (scanner.codepoint) {
// spongebob-case url() are supported, I guess.
// uRL()
// uRL()
// URl()
'u', 'U' => {
scanner.step();
switch (scanner.codepoint) {
'r', 'R' => {},
else => {
scanner.log.addError(
scanner.source,
logger.Loc{ .start = @as(i32, @intCast(scanner.end)) },
"Expected @import to start with a string or url()",
) catch {};
return error.SyntaxError;
},
}
scanner.step();
switch (scanner.codepoint) {
'l', 'L' => {},
else => {
scanner.log.addError(
scanner.source,
logger.Loc{ .start = @as(i32, @intCast(scanner.end)) },
"Expected @import to start with a \", ' or url()",
) catch {};
return error.SyntaxError;
},
}
scanner.step();
if (scanner.codepoint != '(') {
scanner.log.addError(
scanner.source,
logger.Loc{ .start = @as(i32, @intCast(scanner.end)) },
"Expected \"(\" in @import url",
) catch {};
return error.SyntaxError;
}
scanner.step();
var url_text: Chunk.TextContent = undefined;
switch (scanner.codepoint) {
'\'' => {
const str = scanner.consumeString('\'') orelse return error.SyntaxError;
if (scanner.codepoint != ')') {
continue;
}
scanner.step();
url_text = .{ .utf8 = str, .quote = .single };
},
'"' => {
const str = scanner.consumeString('"') orelse return error.SyntaxError;
if (scanner.codepoint != ')') {
continue;
}
scanner.step();
url_text = .{ .utf8 = str, .quote = .double };
},
else => {
url_text = scanner.consumeURL();
},
}
import.text = url_text;
},
'"' => {
import.text.quote = .double;
if (scanner.consumeString('"')) |str| {
import.text.utf8 = str;
} else {
return error.SyntaxError;
}
},
'\'' => {
import.text.quote = .single;
if (scanner.consumeString('\'')) |str| {
import.text.utf8 = str;
} else {
return error.SyntaxError;
}
},
else => {
return error.SyntaxError;
},
}
const suffix_start = scanner.end;
get_suffix: while (true) {
switch (scanner.codepoint) {
';' => {
scanner.step();
import.suffix = scanner.source.contents[suffix_start..scanner.end];
scanner.has_delimiter_before = false;
break :get_suffix;
},
-1 => {
scanner.log.addError(
scanner.source,
logger.Loc{ .start = @as(i32, @intCast(scanner.end)) },
"Expected \";\" at end of @import",
) catch {};
return;
},
else => {},
}
scanner.step();
}
if (import_behavior == .scan or import_behavior == .keep) {
chunk.range.len = @as(i32, @intCast(scanner.end)) - @max(chunk.range.loc.start, 0);
chunk.content = .{ .t_import = import };
try writeChunk(writer, chunk);
}
scanner.step();
continue :restart;
},
// We don't actually care what the values are here, we just want to avoid confusing strings for URLs.
'\'' => {
scanner.has_delimiter_before = false;
if (scanner.consumeString('\'') == null) {
return error.SyntaxError;
}
},
'"' => {
scanner.has_delimiter_before = false;
if (scanner.consumeString('"') == null) {
return error.SyntaxError;
}
},
// Skip comments
'/' => {
scanner.step();
switch (scanner.codepoint) {
'*' => {
scanner.step();
chunk.range.len = @as(i32, @intCast(scanner.end));
scanner.consumeToEndOfMultiLineComment(chunk.range);
},
'/' => {
scanner.step();
scanner.consumeToEndOfSingleLineComment();
continue;
},
else => {
continue;
},
}
},
else => {
scanner.has_delimiter_before = false;
},
}
scanner.step();
}
}
}
pub fn consumeEscape(scanner: *Scanner) CodePoint {
scanner.step();
const c = scanner.codepoint;
if (isHex(c)) |__hex| {
var hex = __hex;
scanner.step();
value: {
if (isHex(scanner.codepoint)) |_hex| {
scanner.step();
hex = hex * 16 + _hex;
} else {
break :value;
}
if (isHex(scanner.codepoint)) |_hex| {
scanner.step();
hex = hex * 16 + _hex;
} else {
break :value;
}
if (isHex(scanner.codepoint)) |_hex| {
scanner.step();
hex = hex * 16 + _hex;
} else {
break :value;
}
if (isHex(scanner.codepoint)) |_hex| {
scanner.step();
hex = hex * 16 + _hex;
} else {
break :value;
}
break :value;
}
if (isWhitespace(scanner.codepoint)) {
scanner.step();
}
return switch (hex) {
0, 0xD800...0xDFFF, 0x10FFFF...std.math.maxInt(CodePoint) => replacementCharacter,
else => hex,
};
}
if (c == -1) return replacementCharacter;
scanner.step();
return c;
}
inline fn nextCodepointSlice(it: *Scanner, comptime advance: bool) []const u8 {
@setRuntimeSafety(false);
if (comptime Environment.allow_assert) {
bun.assert(it.source.contents.len > 0);
}
const cp_len = strings.utf8ByteSequenceLength(it.source.contents[it.current]);
if (advance) {
it.end = it.current;
it.current += cp_len;
}
return if (!(it.current > it.source.contents.len)) it.source.contents[it.current - cp_len .. it.current] else "";
}
pub inline fn nextCodepoint(it: *Scanner) CodePoint {
const slice = it.nextCodepointSlice(true);
@setRuntimeSafety(false);
return switch (slice.len) {
0 => -1,
1 => @as(CodePoint, @intCast(slice[0])),
2 => @as(CodePoint, @intCast(std.unicode.utf8Decode2(slice) catch unreachable)),
3 => @as(CodePoint, @intCast(std.unicode.utf8Decode3(slice) catch unreachable)),
4 => @as(CodePoint, @intCast(std.unicode.utf8Decode4(slice) catch unreachable)),
else => unreachable,
};
}
};
fn isWhitespace(c: CodePoint) bool {
return switch (c) {
' ', '\t', '\n', '\r', escLineFeed => true,
else => false,
};
}
fn isNewline(c: CodePoint) bool {
return switch (c) {
'\t', '\n', '\r', escLineFeed => true,
else => false,
};
}
fn isNonPrintable(c: CodePoint) bool {
return switch (c) {
0...0x08, 0x0B, 0x0E...0x1F, 0x7F => true,
else => false,
};
}
pub fn isHex(c: CodePoint) ?CodePoint {
return switch (c) {
'0'...'9' => c - '0',
'a'...'f' => c + (10 - 'a'),
'A'...'F' => c + (10 - 'A'),
else => null,
};
}
pub const ImportBehavior = enum { keep, omit, scan };
pub fn NewWriter(
comptime WriterType: type,
comptime LinkerType: type,
comptime import_path_format: Options.BundleOptions.ImportPathFormat,
comptime BuildContextType: type,
) type {
return struct {
const Writer = @This();
ctx: WriterType,
linker: LinkerType,
source: *const logger.Source,
buildCtx: BuildContextType = undefined,
log: *logger.Log,
pub fn init(
source: *const logger.Source,
ctx: WriterType,
linker: LinkerType,
log: *logger.Log,
) Writer {
return Writer{
.ctx = ctx,
.linker = linker,
.source = source,
.log = log,
};
}
/// The Source must not be empty
pub fn scan(
writer: *Writer,
log: *logger.Log,
allocator: std.mem.Allocator,
) anyerror!void {
bun.assert(writer.source.contents.len > 0);
var scanner = Scanner.init(
log,
allocator,
writer.source,
);
try scanner.next(.scan, @TypeOf(writer), writer, scanChunk);
}
/// The Source must not be empty
pub fn append(
writer: *Writer,
log: *logger.Log,
allocator: std.mem.Allocator,
) !usize {
bun.assert(writer.source.contents.len > 0);
var scanner = Scanner.init(
log,
allocator,
writer.source,
);
try scanner.next(.omit, @TypeOf(writer), writer, writeBundledChunk);
return scanner.approximate_newline_count;
}
/// The Source must not be empty
pub fn run(
writer: *Writer,
log: *logger.Log,
allocator: std.mem.Allocator,
) anyerror!void {
bun.assert(writer.source.contents.len > 0);
var scanner = Scanner.init(
log,
allocator,
writer.source,
);
try scanner.next(.keep, @TypeOf(writer), writer, commitChunk);
}
fn writeString(writer: *Writer, str: string, quote: Chunk.TextContent.Quote) anyerror!void {
switch (quote) {
.none => {
try writer.ctx.writeAll(str);
return;
},
.single => {
try writer.ctx.writeAll("'");
try writer.ctx.writeAll(str);
try writer.ctx.writeAll("'");
},
.double => {
try writer.ctx.writeAll("\"");
try writer.ctx.writeAll(str);
try writer.ctx.writeAll("\"");
},
}
}
fn writeURL(writer: *Writer, url_str: string, text: Chunk.TextContent) anyerror!void {
switch (text.quote) {
.none => {
try writer.ctx.writeAll("url(");
},
.single => {
try writer.ctx.writeAll("url('");
},
.double => {
try writer.ctx.writeAll("url(\"");
},
}
try writer.ctx.writeAll(url_str);
switch (text.quote) {
.none => {
try writer.ctx.writeAll(")");
},
.single => {
try writer.ctx.writeAll("')");
},
.double => {
try writer.ctx.writeAll("\")");
},
}
}
pub fn scanChunk(writer: *Writer, chunk: Chunk) anyerror!void {
switch (chunk.content) {
.t_url => {},
.t_import => |import| {
const resolved = writer.linker.resolveCSS(
writer.source.path,
import.text.utf8,
chunk.range,
import_record.ImportKind.at,
writer.buildCtx.origin,
Options.BundleOptions.ImportPathFormat.absolute_path,
true,
) catch |err| {
switch (err) {
error.ModuleNotFound, error.FileNotFound => {
writer.log.addResolveError(
writer.source,
chunk.range,
writer.buildCtx.allocator,
"Not Found - \"{s}\"",
.{import.text.utf8},
import_record.ImportKind.at,
err,
) catch {};
},
else => {},
}
return err;
};
// TODO: just check is_external instead
if (strings.startsWith(import.text.utf8, "https://") or strings.startsWith(import.text.utf8, "http://")) {
return;
}
try writer.buildCtx.addCSSImport(resolved);
},
.t_verbatim => {},
}
}
pub fn commitChunk(writer: *Writer, chunk: Chunk) anyerror!void {
return try writeChunk(writer, chunk, false);
}
pub fn writeBundledChunk(writer: *Writer, chunk: Chunk) anyerror!void {
return try writeChunk(writer, chunk, true);
}
pub fn writeChunk(writer: *Writer, chunk: Chunk, comptime omit_imports: bool) anyerror!void {
switch (chunk.content) {
.t_url => |url| {
const url_str = try writer.linker.resolveCSS(
writer.source.path,
url.utf8,
chunk.range,
import_record.ImportKind.url,
writer.buildCtx.origin,
import_path_format,
false,
);
try writer.writeURL(url_str, url);
},
.t_import => |import| {
if (!omit_imports) {
const url_str = try writer.linker.resolveCSS(
writer.source.path,
import.text.utf8,
chunk.range,
import_record.ImportKind.at,
writer.buildCtx.origin,
import_path_format,
false,
);
try writer.ctx.writeAll("@import ");
if (import.url) {
try writer.writeURL(url_str, import.text);
} else {
try writer.writeString(url_str, import.text.quote);
}
try writer.ctx.writeAll(import.suffix);
try writer.ctx.writeAll("\n");
}
},
.t_verbatim => {
if (comptime std.meta.hasFn(WriterType, "copyFileRange")) {
try writer.ctx.copyFileRange(
@as(usize, @intCast(chunk.range.loc.start)),
@as(
usize,
@intCast(@as(
usize,
@intCast(chunk.range.len),
)),
),
);
} else {
try writer.ctx.writeAll(
writer.source.contents[@as(usize, @intCast(chunk.range.loc.start))..][0..@as(
usize,
@intCast(chunk.range.len),
)],
);
}
},
}
}
};
}
pub const CodeCount = struct {
approximate_newline_count: usize = 0,
written: usize = 0,
};
const ImportQueueFifo = std.fifo.LinearFifo(u32, .Dynamic);
const QueuedList = std.ArrayList(u32);
threadlocal var global_queued: QueuedList = undefined;
threadlocal var global_import_queud: ImportQueueFifo = undefined;
threadlocal var global_bundle_queud: QueuedList = undefined;
threadlocal var has_set_global_queue = false;
pub fn NewBundler(
comptime Writer: type,
comptime Linker: type,
comptime FileReader: type,
comptime Watcher: type,
comptime FSType: type,
comptime hot_module_reloading: bool,
comptime import_path_format: options.BundleOptions.ImportPathFormat,
) type {
return struct {
const CSSBundler = @This();
queued: *QueuedList,
import_queue: *ImportQueueFifo,
bundle_queue: *QueuedList,
writer: Writer,
watcher: *Watcher,
fs_reader: FileReader,
fs: FSType,
allocator: std.mem.Allocator,
origin: URL = URL{},
pub fn bundle(
absolute_path: string,
fs: FSType,
writer: Writer,
watcher: *Watcher,
fs_reader: FileReader,
hash: u32,
_: ?StoredFileDescriptorType,
allocator: std.mem.Allocator,
log: *logger.Log,
linker: Linker,
origin: URL,
) !CodeCount {
var int_buf_print: [256]u8 = undefined;
const start_count = writer.written;
if (!has_set_global_queue) {
global_queued = QueuedList.init(default_allocator);
global_import_queud = ImportQueueFifo.init(default_allocator);
global_bundle_queud = QueuedList.init(default_allocator);
has_set_global_queue = true;
} else {
global_queued.clearRetainingCapacity();
global_import_queud.head = 0;
global_import_queud.count = 0;
global_bundle_queud.clearRetainingCapacity();
}
var this = CSSBundler{
.queued = &global_queued,
.import_queue = &global_import_queud,
.bundle_queue = &global_bundle_queud,
.writer = writer,
.fs_reader = fs_reader,
.fs = fs,
.origin = origin,
.allocator = allocator,
.watcher = watcher,
};
const CSSWriter = NewWriter(*CSSBundler, Linker, import_path_format, *CSSBundler);
var css = CSSWriter.init(
undefined,
&this,
linker,
log,
);
css.buildCtx = &this;
try this.addCSSImport(absolute_path);
while (this.import_queue.readItem()) |item| {
const watcher_id = this.watcher.indexOf(item) orelse unreachable;
const watch_item = this.watcher.watchlist.get(watcher_id);
const source = try this.getSource(watch_item.file_path, if (watch_item.fd > 0) watch_item.fd else null);
css.source = &source;
if (source.contents.len > 0)
try css.scan(log, allocator);
}
// This exists to identify the entry point
// When we do HMR, ask the entire bundle to be regenerated
// But, we receive a file change event for a file within the bundle
// So the inner ID is used to say "does this bundle need to be reloaded?"
// The outer ID is used to say "go ahead and reload this"
if (hot_module_reloading and FeatureFlags.css_supports_fence and this.bundle_queue.items.len > 0) {
try this.writeAll("\n@supports (hmr-bid:");
const int_buf_size = std.fmt.formatIntBuf(&int_buf_print, hash, 10, .upper, .{});
try this.writeAll(int_buf_print[0..int_buf_size]);
try this.writeAll(") {}\n");
}
var lines_of_code: usize = 0;
// We LIFO
var i: i32 = @as(i32, @intCast(this.bundle_queue.items.len - 1));
while (i >= 0) : (i -= 1) {
const item = this.bundle_queue.items[@as(usize, @intCast(i))];
const watcher_id = this.watcher.indexOf(item) orelse unreachable;
const watch_item = this.watcher.watchlist.get(watcher_id);
const source = try this.getSource(watch_item.file_path, if (watch_item.fd > 0) watch_item.fd else null);
css.source = &source;
const file_path = fs.relativeTo(watch_item.file_path);
if (hot_module_reloading and FeatureFlags.css_supports_fence) {
try this.writeAll("\n@supports (hmr-wid:");
const int_buf_size = std.fmt.formatIntBuf(&int_buf_print, item, 10, .upper, .{});
try this.writeAll(int_buf_print[0..int_buf_size]);
try this.writeAll(") and (hmr-file:\"");
try this.writeAll(file_path);
try this.writeAll("\") {}\n");
}
try this.writeAll("/* ");
try this.writeAll(file_path);
try this.writeAll("*/\n");
if (source.contents.len > 0)
lines_of_code += try css.append(
log,
allocator,
);
}
try this.writer.done();
return CodeCount{
.written = @as(usize, @intCast(@max(this.writer.written - start_count, 0))),
.approximate_newline_count = lines_of_code,
};
}
pub fn getSource(this: *CSSBundler, url: string, input_fd: ?StoredFileDescriptorType) !logger.Source {
const entry = try this.fs_reader.readFile(this.fs, url, 0, true, input_fd);
return logger.Source.initFile(
.{
.path = Fs.Path.init(url),
.contents = entry.contents,
},
this.allocator,
);
}
pub fn addCSSImport(this: *CSSBundler, absolute_path: string) anyerror!void {
const hash = Watcher.getHash(absolute_path);
if (this.queued.items.len > 0 and std.mem.indexOfScalar(u32, this.queued.items, hash) != null) {
return;
}
const watcher_index = this.watcher.indexOf(hash);
if (watcher_index == null) {
const file = try std.fs.openFileAbsolute(absolute_path, .{ .mode = .read_only });
try this.watcher.appendFile(file.handle, absolute_path, hash, .css, 0, null, true);
if (this.watcher.watchloop_handle == null) {
try this.watcher.start();
}
}
try this.import_queue.writeItem(hash);
try this.queued.append(hash);
try this.bundle_queue.append(hash);
}
pub fn writeAll(this: *CSSBundler, buf: anytype) anyerror!void {
_ = try this.writer.writeAll(buf);
}
// pub fn copyFileRange(this: *CSSBundler, buf: anytype) !void {}
};
}
| https://raw.githubusercontent.com/oven-sh/bun/fab96a74ea13da04459ea7f62663c4d2fd421778/src/css_scanner.zig |
const common = @import("./common.zig");
const mulf3 = @import("./mulf3.zig").mulf3;
pub const panic = common.panic;
comptime {
@export(__mulxf3, .{ .name = "__mulxf3", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __mulxf3(a: f80, b: f80) callconv(.C) f80 {
return mulf3(f80, a, b);
}
| https://raw.githubusercontent.com/ziglang/zig/d9bd34fd0533295044ffb4160da41f7873aff905/lib/compiler_rt/mulxf3.zig |
const ChainedStruct = @import("main.zig").ChainedStruct;
const Impl = @import("interface.zig").Impl;
pub const CommandBuffer = opaque {
pub const Descriptor = extern struct {
next_in_chain: ?*const ChainedStruct = null,
label: ?[*:0]const u8 = null,
};
pub inline fn setLabel(command_buffer: *CommandBuffer, label: [*:0]const u8) void {
Impl.commandBufferSetLabel(command_buffer, label);
}
pub inline fn reference(command_buffer: *CommandBuffer) void {
Impl.commandBufferReference(command_buffer);
}
pub inline fn release(command_buffer: *CommandBuffer) void {
Impl.commandBufferRelease(command_buffer);
}
};
| https://raw.githubusercontent.com/hexops/mach/b72f0e11b6d292c2b60789359a61f7ee6d7dc371/src/gpu/command_buffer.zig |
const std = @import("std");
const builtin = @import("builtin");
const str = @import("glue").str;
const RocStr = str.RocStr;
const testing = std.testing;
const expectEqual = testing.expectEqual;
const expect = testing.expect;
const maxInt = std.math.maxInt;
const mem = std.mem;
const Allocator = mem.Allocator;
extern fn roc__mainForHost_1_exposed_generic([*]u8) void;
extern fn roc__mainForHost_1_exposed_size() i64;
extern fn roc__mainForHost_0_caller(*const u8, [*]u8, [*]u8) void;
extern fn roc__mainForHost_0_size() i64;
extern fn roc__mainForHost_0_result_size() i64;
const Align = 2 * @alignOf(usize);
extern fn malloc(size: usize) callconv(.C) ?*align(Align) anyopaque;
extern fn realloc(c_ptr: [*]align(Align) u8, size: usize) callconv(.C) ?*anyopaque;
extern fn free(c_ptr: [*]align(Align) u8) callconv(.C) void;
extern fn memcpy(dst: [*]u8, src: [*]u8, size: usize) callconv(.C) void;
extern fn memset(dst: [*]u8, value: i32, size: usize) void;
extern fn kill(pid: c_int, sig: c_int) c_int;
extern fn shm_open(name: *const i8, oflag: c_int, mode: c_uint) c_int;
extern fn mmap(addr: ?*anyopaque, length: c_uint, prot: c_int, flags: c_int, fd: c_int, offset: c_uint) *anyopaque;
extern fn getppid() c_int;
const DEBUG: bool = false;
export fn roc_alloc(size: usize, alignment: u32) callconv(.C) ?*anyopaque {
if (DEBUG) {
var ptr = malloc(size);
const stdout = std.io.getStdOut().writer();
stdout.print("alloc: {d} (alignment {d}, size {d})\n", .{ ptr, alignment, size }) catch unreachable;
return ptr;
} else {
return malloc(size);
}
}
export fn roc_realloc(c_ptr: *anyopaque, new_size: usize, old_size: usize, alignment: u32) callconv(.C) ?*anyopaque {
if (DEBUG) {
const stdout = std.io.getStdOut().writer();
stdout.print("realloc: {d} (alignment {d}, old_size {d})\n", .{ c_ptr, alignment, old_size }) catch unreachable;
}
return realloc(@as([*]align(Align) u8, @alignCast(@ptrCast(c_ptr))), new_size);
}
export fn roc_dealloc(c_ptr: *anyopaque, alignment: u32) callconv(.C) void {
if (DEBUG) {
const stdout = std.io.getStdOut().writer();
stdout.print("dealloc: {d} (alignment {d})\n", .{ c_ptr, alignment }) catch unreachable;
}
free(@as([*]align(Align) u8, @alignCast(@ptrCast(c_ptr))));
}
export fn roc_panic(msg: *RocStr, tag_id: u32) callconv(.C) void {
const stderr = std.io.getStdErr().writer();
switch (tag_id) {
0 => {
stderr.print("Roc standard library crashed with message\n\n {s}\n\nShutting down\n", .{msg.asSlice()}) catch unreachable;
},
1 => {
stderr.print("Application crashed with message\n\n {s}\n\nShutting down\n", .{msg.asSlice()}) catch unreachable;
},
else => unreachable,
}
std.process.exit(1);
}
export fn roc_dbg(loc: *RocStr, msg: *RocStr, src: *RocStr) callconv(.C) void {
const stderr = std.io.getStdErr().writer();
stderr.print("[{s}] {s} = {s}\n", .{ loc.asSlice(), src.asSlice(), msg.asSlice() }) catch unreachable;
}
export fn roc_memset(dst: [*]u8, value: i32, size: usize) callconv(.C) void {
return memset(dst, value, size);
}
fn roc_getppid() callconv(.C) c_int {
return getppid();
}
fn roc_getppid_windows_stub() callconv(.C) c_int {
return 0;
}
fn roc_shm_open(name: *const i8, oflag: c_int, mode: c_uint) callconv(.C) c_int {
return shm_open(name, oflag, mode);
}
fn roc_mmap(addr: ?*anyopaque, length: c_uint, prot: c_int, flags: c_int, fd: c_int, offset: c_uint) callconv(.C) *anyopaque {
return mmap(addr, length, prot, flags, fd, offset);
}
comptime {
if (builtin.os.tag == .macos or builtin.os.tag == .linux) {
@export(roc_getppid, .{ .name = "roc_getppid", .linkage = .Strong });
@export(roc_mmap, .{ .name = "roc_mmap", .linkage = .Strong });
@export(roc_shm_open, .{ .name = "roc_shm_open", .linkage = .Strong });
}
if (builtin.os.tag == .windows) {
@export(roc_getppid_windows_stub, .{ .name = "roc_getppid", .linkage = .Strong });
}
}
const Unit = extern struct {};
pub export fn main() u8 {
const allocator = std.heap.page_allocator;
const stderr = std.io.getStdErr().writer();
// NOTE the return size can be zero, which will segfault. Always allocate at least 8 bytes
const size = @max(8, @as(usize, @intCast(roc__mainForHost_1_exposed_size())));
const raw_output = allocator.alignedAlloc(u8, @alignOf(u64), @as(usize, @intCast(size))) catch unreachable;
var output = @as([*]u8, @ptrCast(raw_output));
defer {
allocator.free(raw_output);
}
var timer = std.time.Timer.start() catch unreachable;
roc__mainForHost_1_exposed_generic(output);
call_the_closure(output);
const nanos = timer.read();
const seconds = (@as(f64, @floatFromInt(nanos)) / 1_000_000_000.0);
stderr.print("runtime: {d:.3}ms\n", .{seconds * 1000}) catch unreachable;
return 0;
}
fn to_seconds(tms: std.os.timespec) f64 {
return @as(f64, @floatFromInt(tms.tv_sec)) + (@as(f64, @floatFromInt(tms.tv_nsec)) / 1_000_000_000.0);
}
fn call_the_closure(closure_data_pointer: [*]u8) void {
const allocator = std.heap.page_allocator;
const size = roc__mainForHost_0_result_size();
if (size == 0) {
// the function call returns an empty record
// allocating 0 bytes causes issues because the allocator will return a NULL pointer
// So it's special-cased
const flags: u8 = 0;
var result: [1]u8 = .{0};
roc__mainForHost_0_caller(&flags, closure_data_pointer, &result);
return;
}
const raw_output = allocator.alignedAlloc(u8, @alignOf(u64), @as(usize, @intCast(size))) catch unreachable;
var output = @as([*]u8, @ptrCast(raw_output));
defer {
allocator.free(raw_output);
}
const flags: u8 = 0;
roc__mainForHost_0_caller(&flags, closure_data_pointer, output);
return;
}
pub export fn roc_fx_getLine() str.RocStr {
return roc_fx_getLine_help() catch return str.RocStr.empty();
}
fn roc_fx_getLine_help() !RocStr {
const stdin = std.io.getStdIn().reader();
var buf: [400]u8 = undefined;
const line: []u8 = (try stdin.readUntilDelimiterOrEof(&buf, '\n')) orelse "";
return str.RocStr.init(@as([*]const u8, @ptrCast(line)), line.len);
}
pub export fn roc_fx_putLine(rocPath: *str.RocStr) i64 {
const stdout = std.io.getStdOut().writer();
for (rocPath.asSlice()) |char| {
stdout.print("{c}", .{char}) catch unreachable;
}
stdout.print("\n", .{}) catch unreachable;
return 0;
}
const GetInt = extern struct {
value: i64,
error_code: u8,
is_error: bool,
};
pub export fn roc_fx_getInt() GetInt {
if (roc_fx_getInt_help()) |value| {
const get_int = GetInt{ .is_error = false, .value = value, .error_code = 0 };
return get_int;
} else |err| switch (err) {
error.InvalidCharacter => {
return GetInt{ .is_error = true, .value = 0, .error_code = 0 };
},
else => {
return GetInt{ .is_error = true, .value = 0, .error_code = 1 };
},
}
return 0;
}
fn roc_fx_getInt_help() !i64 {
const stdin = std.io.getStdIn().reader();
var buf: [40]u8 = undefined;
// make sure to strip `\r` on windows
const raw_line: []u8 = (try stdin.readUntilDelimiterOrEof(&buf, '\n')) orelse "";
const line = std.mem.trimRight(u8, raw_line, &std.ascii.whitespace);
return std.fmt.parseInt(i64, line, 10);
}
| https://raw.githubusercontent.com/roc-lang/roc/a6f1408b5c97e2f5eb3b62b0b481b6a29c958e83/examples/cli/effects-platform/host.zig |
// posix-node binaries should work across a very wide range of nodejs versions
// for their given architecture.
//
// "For an addon to remain ABI-compatible across Node.js major versions, it must use
// Node-API exclusively by restricting itself to using #include <node_api.h>."
//
// -- https://nodejs.org/api/n-api.html
//
// TODO: In https://nodejs.org/api/n-api.html#usage they explain that we can do something
// like "#define NAPI_VERSION 3" to ensure our code all works with a specific range
// of nodejs versions.
pub usingnamespace @cImport({
@cInclude("node_api.h");
@cInclude("stdlib.h");
});
| https://raw.githubusercontent.com/sagemathinc/cowasm/e8bba5584f03937611619934f0d5858e4d03853d/core/posix-node/src/c.zig |
// Autogenerated from https://www.unicode.org/Public/14.0.0/ucd/. Edits will be lost on update.
pub fn isNumeric(cp: u21) bool {
if (cp < 0xbc or cp > 0x2f890) return false;
return switch (cp) {
0xbc...0xbe => true,
0x9f4...0x9f9 => true,
0xb72...0xb77 => true,
0xbf0...0xbf2 => true,
0xc78...0xc7e => true,
0xd58...0xd5e => true,
0xd70...0xd78 => true,
0xf2a...0xf33 => true,
0x1372...0x137c => true,
0x16ee...0x16f0 => true,
0x17f0...0x17f9 => true,
0x2150...0x215f => true,
0x2160...0x2182 => true,
0x2185...0x2188 => true,
0x2189 => true,
0x2469...0x2473 => true,
0x247d...0x2487 => true,
0x2491...0x249b => true,
0x24eb...0x24f4 => true,
0x24fe => true,
0x277f => true,
0x2789 => true,
0x2793 => true,
0x2cfd => true,
0x3007 => true,
0x3021...0x3029 => true,
0x3038...0x303a => true,
0x3192...0x3195 => true,
0x3220...0x3229 => true,
0x3248...0x324f => true,
0x3251...0x325f => true,
0x3280...0x3289 => true,
0x32b1...0x32bf => true,
0x3405 => true,
0x3483 => true,
0x382a => true,
0x3b4d => true,
0x4e00 => true,
0x4e03 => true,
0x4e07 => true,
0x4e09 => true,
0x4e5d => true,
0x4e8c => true,
0x4e94 => true,
0x4e96 => true,
0x4ebf...0x4ec0 => true,
0x4edf => true,
0x4ee8 => true,
0x4f0d => true,
0x4f70 => true,
0x5104 => true,
0x5146 => true,
0x5169 => true,
0x516b => true,
0x516d => true,
0x5341 => true,
0x5343...0x5345 => true,
0x534c => true,
0x53c1...0x53c4 => true,
0x56db => true,
0x58f1 => true,
0x58f9 => true,
0x5e7a => true,
0x5efe...0x5eff => true,
0x5f0c...0x5f0e => true,
0x5f10 => true,
0x62fe => true,
0x634c => true,
0x67d2 => true,
0x6f06 => true,
0x7396 => true,
0x767e => true,
0x8086 => true,
0x842c => true,
0x8cae => true,
0x8cb3 => true,
0x8d30 => true,
0x9621 => true,
0x9646 => true,
0x964c => true,
0x9678 => true,
0x96f6 => true,
0xa6e6...0xa6ef => true,
0xa830...0xa835 => true,
0xf96b => true,
0xf973 => true,
0xf978 => true,
0xf9b2 => true,
0xf9d1 => true,
0xf9d3 => true,
0xf9fd => true,
0x10107...0x10133 => true,
0x10140...0x10174 => true,
0x10175...0x10178 => true,
0x1018a...0x1018b => true,
0x102e1...0x102fb => true,
0x10320...0x10323 => true,
0x10341 => true,
0x1034a => true,
0x103d1...0x103d5 => true,
0x10858...0x1085f => true,
0x10879...0x1087f => true,
0x108a7...0x108af => true,
0x108fb...0x108ff => true,
0x10916...0x1091b => true,
0x109bc...0x109bd => true,
0x109c0...0x109cf => true,
0x109d2...0x109ff => true,
0x10a44...0x10a48 => true,
0x10a7d...0x10a7e => true,
0x10a9d...0x10a9f => true,
0x10aeb...0x10aef => true,
0x10b58...0x10b5f => true,
0x10b78...0x10b7f => true,
0x10ba9...0x10baf => true,
0x10cfa...0x10cff => true,
0x10e69...0x10e7e => true,
0x10f1d...0x10f26 => true,
0x10f51...0x10f54 => true,
0x10fc5...0x10fcb => true,
0x1105b...0x11065 => true,
0x111e1...0x111f4 => true,
0x1173a...0x1173b => true,
0x118ea...0x118f2 => true,
0x11c5a...0x11c6c => true,
0x11fc0...0x11fd4 => true,
0x12400...0x1246e => true,
0x16b5b...0x16b61 => true,
0x16e80...0x16e96 => true,
0x1d2e0...0x1d2f3 => true,
0x1d360...0x1d378 => true,
0x1e8c7...0x1e8cf => true,
0x1ec71...0x1ecab => true,
0x1ecad...0x1ecaf => true,
0x1ecb1...0x1ecb4 => true,
0x1ed01...0x1ed2d => true,
0x1ed2f...0x1ed3d => true,
0x1f10b...0x1f10c => true,
0x20001 => true,
0x20064 => true,
0x200e2 => true,
0x20121 => true,
0x2092a => true,
0x20983 => true,
0x2098c => true,
0x2099c => true,
0x20aea => true,
0x20afd => true,
0x20b19 => true,
0x22390 => true,
0x22998 => true,
0x23b1b => true,
0x2626d => true,
0x2f890 => true,
else => false,
};
}
pub fn isDigit(cp: u21) bool {
if (cp < 0xb2 or cp > 0x1f10a) return false;
return switch (cp) {
0xb2...0xb3 => true,
0xb9 => true,
0x1369...0x1371 => true,
0x19da => true,
0x2070 => true,
0x2074...0x2079 => true,
0x2080...0x2089 => true,
0x2460...0x2468 => true,
0x2474...0x247c => true,
0x2488...0x2490 => true,
0x24ea => true,
0x24f5...0x24fd => true,
0x24ff => true,
0x2776...0x277e => true,
0x2780...0x2788 => true,
0x278a...0x2792 => true,
0x10a40...0x10a43 => true,
0x10e60...0x10e68 => true,
0x11052...0x1105a => true,
0x1f100...0x1f10a => true,
else => false,
};
}
pub fn isDecimal(cp: u21) bool {
if (cp < 0x30 or cp > 0x1fbf9) return false;
return switch (cp) {
0x30...0x39 => true,
0x660...0x669 => true,
0x6f0...0x6f9 => true,
0x7c0...0x7c9 => true,
0x966...0x96f => true,
0x9e6...0x9ef => true,
0xa66...0xa6f => true,
0xae6...0xaef => true,
0xb66...0xb6f => true,
0xbe6...0xbef => true,
0xc66...0xc6f => true,
0xce6...0xcef => true,
0xd66...0xd6f => true,
0xde6...0xdef => true,
0xe50...0xe59 => true,
0xed0...0xed9 => true,
0xf20...0xf29 => true,
0x1040...0x1049 => true,
0x1090...0x1099 => true,
0x17e0...0x17e9 => true,
0x1810...0x1819 => true,
0x1946...0x194f => true,
0x19d0...0x19d9 => true,
0x1a80...0x1a89 => true,
0x1a90...0x1a99 => true,
0x1b50...0x1b59 => true,
0x1bb0...0x1bb9 => true,
0x1c40...0x1c49 => true,
0x1c50...0x1c59 => true,
0xa620...0xa629 => true,
0xa8d0...0xa8d9 => true,
0xa900...0xa909 => true,
0xa9d0...0xa9d9 => true,
0xa9f0...0xa9f9 => true,
0xaa50...0xaa59 => true,
0xabf0...0xabf9 => true,
0xff10...0xff19 => true,
0x104a0...0x104a9 => true,
0x10d30...0x10d39 => true,
0x11066...0x1106f => true,
0x110f0...0x110f9 => true,
0x11136...0x1113f => true,
0x111d0...0x111d9 => true,
0x112f0...0x112f9 => true,
0x11450...0x11459 => true,
0x114d0...0x114d9 => true,
0x11650...0x11659 => true,
0x116c0...0x116c9 => true,
0x11730...0x11739 => true,
0x118e0...0x118e9 => true,
0x11950...0x11959 => true,
0x11c50...0x11c59 => true,
0x11d50...0x11d59 => true,
0x11da0...0x11da9 => true,
0x16a60...0x16a69 => true,
0x16ac0...0x16ac9 => true,
0x16b50...0x16b59 => true,
0x1d7ce...0x1d7ff => true,
0x1e140...0x1e149 => true,
0x1e2f0...0x1e2f9 => true,
0x1e950...0x1e959 => true,
0x1fbf0...0x1fbf9 => true,
else => false,
};
}
| https://raw.githubusercontent.com/mbrock/wisp/b12b8afa6b680cc173fd5e5a09d6a40a528ff2c1/core/lib/ziglyph/src/autogen/derived_numeric_type.zig |
use @import("SDL_stdinc.zig");
pub const SDL_BLENDMODE_NONE = 0;
pub const SDL_BLENDMODE_BLEND = 1;
pub const SDL_BLENDMODE_ADD = 2;
pub const SDL_BLENDMODE_MOD = 4;
pub const SDL_BLENDMODE_INVALID = 2147483647;
pub const SDL_BlendMode = extern enum {
SDL_BLENDMODE_NONE = 0,
SDL_BLENDMODE_BLEND = 1,
SDL_BLENDMODE_ADD = 2,
SDL_BLENDMODE_MOD = 4,
SDL_BLENDMODE_INVALID = 2147483647,
};
pub const SDL_GL_RED_SIZE = 0;
pub const SDL_GL_GREEN_SIZE = 1;
pub const SDL_GL_BLUE_SIZE = 2;
pub const SDL_GL_ALPHA_SIZE = 3;
pub const SDL_GL_BUFFER_SIZE = 4;
pub const SDL_GL_DOUBLEBUFFER = 5;
pub const SDL_GL_DEPTH_SIZE = 6;
pub const SDL_GL_STENCIL_SIZE = 7;
pub const SDL_GL_ACCUM_RED_SIZE = 8;
pub const SDL_GL_ACCUM_GREEN_SIZE = 9;
pub const SDL_GL_ACCUM_BLUE_SIZE = 10;
pub const SDL_GL_ACCUM_ALPHA_SIZE = 11;
pub const SDL_GL_STEREO = 12;
pub const SDL_GL_MULTISAMPLEBUFFERS = 13;
pub const SDL_GL_MULTISAMPLESAMPLES = 14;
pub const SDL_GL_ACCELERATED_VISUAL = 15;
pub const SDL_GL_RETAINED_BACKING = 16;
pub const SDL_GL_CONTEXT_MAJOR_VERSION = 17;
pub const SDL_GL_CONTEXT_MINOR_VERSION = 18;
pub const SDL_GL_CONTEXT_EGL = 19;
pub const SDL_GL_CONTEXT_FLAGS = 20;
pub const SDL_GL_CONTEXT_PROFILE_MASK = 21;
pub const SDL_GL_SHARE_WITH_CURRENT_CONTEXT = 22;
pub const SDL_GL_FRAMEBUFFER_SRGB_CAPABLE = 23;
pub const SDL_GL_CONTEXT_RELEASE_BEHAVIOR = 24;
pub const SDL_GL_CONTEXT_RESET_NOTIFICATION = 25;
pub const SDL_GL_CONTEXT_NO_ERROR = 26;
pub const SDL_GLattr = extern enum {
SDL_GL_RED_SIZE = 0,
SDL_GL_GREEN_SIZE = 1,
SDL_GL_BLUE_SIZE = 2,
SDL_GL_ALPHA_SIZE = 3,
SDL_GL_BUFFER_SIZE = 4,
SDL_GL_DOUBLEBUFFER = 5,
SDL_GL_DEPTH_SIZE = 6,
SDL_GL_STENCIL_SIZE = 7,
SDL_GL_ACCUM_RED_SIZE = 8,
SDL_GL_ACCUM_GREEN_SIZE = 9,
SDL_GL_ACCUM_BLUE_SIZE = 10,
SDL_GL_ACCUM_ALPHA_SIZE = 11,
SDL_GL_STEREO = 12,
SDL_GL_MULTISAMPLEBUFFERS = 13,
SDL_GL_MULTISAMPLESAMPLES = 14,
SDL_GL_ACCELERATED_VISUAL = 15,
SDL_GL_RETAINED_BACKING = 16,
SDL_GL_CONTEXT_MAJOR_VERSION = 17,
SDL_GL_CONTEXT_MINOR_VERSION = 18,
SDL_GL_CONTEXT_EGL = 19,
SDL_GL_CONTEXT_FLAGS = 20,
SDL_GL_CONTEXT_PROFILE_MASK = 21,
SDL_GL_SHARE_WITH_CURRENT_CONTEXT = 22,
SDL_GL_FRAMEBUFFER_SRGB_CAPABLE = 23,
SDL_GL_CONTEXT_RELEASE_BEHAVIOR = 24,
SDL_GL_CONTEXT_RESET_NOTIFICATION = 25,
SDL_GL_CONTEXT_NO_ERROR = 26,
};
pub const SDL_GL_CONTEXT_DEBUG_FLAG = 1;
pub const SDL_GL_CONTEXT_FORWARD_COMPATIBLE_FLAG = 2;
pub const SDL_GL_CONTEXT_ROBUST_ACCESS_FLAG = 4;
pub const SDL_GL_CONTEXT_RESET_ISOLATION_FLAG = 8;
pub const SDL_GLcontextFlag = extern enum {
SDL_GL_CONTEXT_DEBUG_FLAG = 1,
SDL_GL_CONTEXT_FORWARD_COMPATIBLE_FLAG = 2,
SDL_GL_CONTEXT_ROBUST_ACCESS_FLAG = 4,
SDL_GL_CONTEXT_RESET_ISOLATION_FLAG = 8,
};
pub const SDL_GL_CONTEXT_PROFILE_CORE = 1;
pub const SDL_GL_CONTEXT_PROFILE_COMPATIBILITY = 2;
pub const SDL_GL_CONTEXT_PROFILE_ES = 4;
pub const SDL_GLprofile = extern enum {
SDL_GL_CONTEXT_PROFILE_CORE = 1,
SDL_GL_CONTEXT_PROFILE_COMPATIBILITY = 2,
SDL_GL_CONTEXT_PROFILE_ES = 4,
};
// TODO: Draft?
// SDL_GLcontextReleaseFlag
// SDL_GLcontextResetNotification
pub const SDL_GL_CONTEXT_RELEASE_BEHAVIOR_NONE = 0;
pub const SDL_GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH = 1;
pub const SDL_GLcontextReleaseFlag = extern enum {
SDL_GL_CONTEXT_RELEASE_BEHAVIOR_NONE = 0,
SDL_GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH = 1,
};
pub const SDL_GL_CONTEXT_RESET_NO_NOTIFICATION = 0;
pub const SDL_GL_CONTEXT_RESET_LOSE_CONTEXT = 1;
pub const SDL_GLContextResetNotification = extern enum {
SDL_GL_CONTEXT_RESET_NO_NOTIFICATION = 0,
SDL_GL_CONTEXT_RESET_LOSE_CONTEXT = 1,
};
pub const SDL_HITTEST_NORMAL = 0;
pub const SDL_HITTEST_DRAGGABLE = 1;
pub const SDL_HITTEST_RESIZE_TOPLEFT = 2;
pub const SDL_HITTEST_RESIZE_TOP = 3;
pub const SDL_HITTEST_RESIZE_TOPRIGHT = 4;
pub const SDL_HITTEST_RESIZE_RIGHT = 5;
pub const SDL_HITTEST_RESIZE_BOTTOMRIGHT = 6;
pub const SDL_HITTEST_RESIZE_BOTTOM = 7;
pub const SDL_HITTEST_RESIZE_BOTTOMLEFT = 8;
pub const SDL_HITTEST_RESIZE_LEFT = 9;
pub const SDL_HitTestResult = extern enum {
SDL_HITTEST_NORMAL = 0,
SDL_HITTEST_DRAGGABLE = 1,
SDL_HITTEST_RESIZE_TOPLEFT = 2,
SDL_HITTEST_RESIZE_TOP = 3,
SDL_HITTEST_RESIZE_TOPRIGHT = 4,
SDL_HITTEST_RESIZE_RIGHT = 5,
SDL_HITTEST_RESIZE_BOTTOMRIGHT = 6,
SDL_HITTEST_RESIZE_BOTTOM = 7,
SDL_HITTEST_RESIZE_BOTTOMLEFT = 8,
SDL_HITTEST_RESIZE_LEFT = 9,
};
pub const SDL_MESSAGEBOX_BUTTON_RETURNKEY_DEFAULT = 1;
pub const SDL_MESSAGEBOX_BUTTON_ESCAPEKEY_DEFAULT = 2;
pub const SDL_MessageBoxButtonFlags = extern enum {
SDL_MESSAGEBOX_BUTTON_RETURNKEY_DEFAULT = 1,
SDL_MESSAGEBOX_BUTTON_ESCAPEKEY_DEFAULT = 2,
};
pub const SDL_MESSAGEBOX_COLOR_BACKGROUND = 0;
pub const SDL_MESSAGEBOX_COLOR_TEXT = 1;
pub const SDL_MESSAGEBOX_COLOR_BUTTON_BORDER = 2;
pub const SDL_MESSAGEBOX_COLOR_BUTTON_BACKGROUND = 3;
pub const SDL_MESSAGEBOX_COLOR_BUTTON_SELECTED = 4;
pub const SDL_MESSAGEBOX_COLOR_MAX = 5;
pub const SDL_MessageBoxColorType = extern enum {
SDL_MESSAGEBOX_COLOR_BACKGROUND = 0,
SDL_MESSAGEBOX_COLOR_TEXT = 1,
SDL_MESSAGEBOX_COLOR_BUTTON_BORDER = 2,
SDL_MESSAGEBOX_COLOR_BUTTON_BACKGROUND = 3,
SDL_MESSAGEBOX_COLOR_BUTTON_SELECTED = 4,
SDL_MESSAGEBOX_COLOR_MAX = 5,
};
pub const SDL_MESSAGEBOX_ERROR = 16;
pub const SDL_MESSAGEBOX_WARNING = 32;
pub const SDL_MESSAGEBOX_INFORMATION = 64;
pub const SDL_MessageBoxFlags = extern enum {
SDL_MESSAGEBOX_ERROR = 16,
SDL_MESSAGEBOX_WARNING = 32,
SDL_MESSAGEBOX_INFORMATION = 64,
};
pub const SDL_WINDOW_FULLSCREEN = 1;
pub const SDL_WINDOW_OPENGL = 2;
pub const SDL_WINDOW_SHOWN = 4;
pub const SDL_WINDOW_HIDDEN = 8;
pub const SDL_WINDOW_BORDERLESS = 16;
pub const SDL_WINDOW_RESIZABLE = 32;
pub const SDL_WINDOW_MINIMIZED = 64;
pub const SDL_WINDOW_MAXIMIZED = 128;
pub const SDL_WINDOW_INPUT_GRABBED = 256;
pub const SDL_WINDOW_INPUT_FOCUS = 512;
pub const SDL_WINDOW_MOUSE_FOCUS = 1024;
pub const SDL_WINDOW_FULLSCREEN_DESKTOP = 4097;
pub const SDL_WINDOW_FOREIGN = 2048;
pub const SDL_WINDOW_ALLOW_HIGHDPI = 8192;
pub const SDL_WINDOW_MOUSE_CAPTURE = 16384;
pub const SDL_WINDOW_ALWAYS_ON_TOP = 32768;
pub const SDL_WINDOW_SKIP_TASKBAR = 65536;
pub const SDL_WINDOW_UTILITY = 131072;
pub const SDL_WINDOW_TOOLTIP = 262144;
pub const SDL_WINDOW_POPUP_MENU = 524288;
pub const SDL_WINDOW_VULKAN = 268435456;
pub const SDL_WindowFlags = extern enum {
SDL_WINDOW_FULLSCREEN = 1,
SDL_WINDOW_OPENGL = 2,
SDL_WINDOW_SHOWN = 4,
SDL_WINDOW_HIDDEN = 8,
SDL_WINDOW_BORDERLESS = 16,
SDL_WINDOW_RESIZABLE = 32,
SDL_WINDOW_MINIMIZED = 64,
SDL_WINDOW_MAXIMIZED = 128,
SDL_WINDOW_INPUT_GRABBED = 256,
SDL_WINDOW_INPUT_FOCUS = 512,
SDL_WINDOW_MOUSE_FOCUS = 1024,
SDL_WINDOW_FULLSCREEN_DESKTOP = 4097,
SDL_WINDOW_FOREIGN = 2048,
SDL_WINDOW_ALLOW_HIGHDPI = 8192,
SDL_WINDOW_MOUSE_CAPTURE = 16384,
SDL_WINDOW_ALWAYS_ON_TOP = 32768,
SDL_WINDOW_SKIP_TASKBAR = 65536,
SDL_WINDOW_UTILITY = 131072,
SDL_WINDOW_TOOLTIP = 262144,
SDL_WINDOW_POPUP_MENU = 524288,
SDL_WINDOW_VULKAN = 268435456,
};
pub const SDL_WINDOWPOS_UNDEFINED_MASK = 0x1FFF0000;
pub const SDL_WINDOWPOS_UNDEFINED = SDL_WINDOWPOS_UNDEFINED_DISPLAY(0);
pub fn SDL_WINDOWPOS_UNDEFINED_DISPLAY(x: var) @typeOf(x) {
return SDL_WINDOWPOS_UNDEFINED_MASK | x;
}
pub fn SDL_WINDOWPOS_ISUNDEFINED(pos: var) bool {
return (pos & SDL_WINDOWPOS_UNDEFINED_MASK) == SDL_WINDOWPOS_UNDEFINED_MASK;
}
pub const SDL_WINDOWPOS_CENTERED_MASK = 0x2FFF0000;
pub const SDL_WINDOWPOS_CENTERED = SDL_WINDOWPOS_CENTERED_DISPLAY(0);
pub fn SDL_WINDOWPOS_CENTERED_DISPLAY(x: var) @typeOf(x) {
return SDL_WINDOWPOS_CENTERED_MASK | x;
}
pub fn SDL_WINDOWPOS_ISCENTERED(pos: var) bool {
return (pos & SDL_WINDOWPOS_CENTERED_MASK) == SDL_WINDOWPOS_CENTERED_MASK;
}
pub const SDL_WINDOWEVENT_NONE = 0;
pub const SDL_WINDOWEVENT_SHOWN = 1;
pub const SDL_WINDOWEVENT_HIDDEN = 2;
pub const SDL_WINDOWEVENT_EXPOSED = 3;
pub const SDL_WINDOWEVENT_MOVED = 4;
pub const SDL_WINDOWEVENT_RESIZED = 5;
pub const SDL_WINDOWEVENT_SIZE_CHANGED = 6;
pub const SDL_WINDOWEVENT_MINIMIZED = 7;
pub const SDL_WINDOWEVENT_MAXIMIZED = 8;
pub const SDL_WINDOWEVENT_RESTORED = 9;
pub const SDL_WINDOWEVENT_ENTER = 10;
pub const SDL_WINDOWEVENT_LEAVE = 11;
pub const SDL_WINDOWEVENT_FOCUS_GAINED = 12;
pub const SDL_WINDOWEVENT_FOCUS_LOST = 13;
pub const SDL_WINDOWEVENT_CLOSE = 14;
pub const SDL_WINDOWEVENT_TAKE_FOCUS = 15;
pub const SDL_WINDOWEVENT_HIT_TEST = 16;
pub const SDL_WindowEventID = extern enum {
SDL_WINDOWEVENT_NONE = 0,
SDL_WINDOWEVENT_SHOWN = 1,
SDL_WINDOWEVENT_HIDDEN = 2,
SDL_WINDOWEVENT_EXPOSED = 3,
SDL_WINDOWEVENT_MOVED = 4,
SDL_WINDOWEVENT_RESIZED = 5,
SDL_WINDOWEVENT_SIZE_CHANGED = 6,
SDL_WINDOWEVENT_MINIMIZED = 7,
SDL_WINDOWEVENT_MAXIMIZED = 8,
SDL_WINDOWEVENT_RESTORED = 9,
SDL_WINDOWEVENT_ENTER = 10,
SDL_WINDOWEVENT_LEAVE = 11,
SDL_WINDOWEVENT_FOCUS_GAINED = 12,
SDL_WINDOWEVENT_FOCUS_LOST = 13,
SDL_WINDOWEVENT_CLOSE = 14,
SDL_WINDOWEVENT_TAKE_FOCUS = 15,
SDL_WINDOWEVENT_HIT_TEST = 16,
};
pub const SDL_DisplayMode = extern struct {
format: u32,
w: c_int,
h: c_int,
refresh_rate: c_int,
driverdata: ?*c_void,
};
pub const SDL_MessageBoxButtonData = extern struct {
flags: u32,
buttonid: c_int,
text: [*]const u8,
};
pub const SDL_MessageBoxColor = extern struct {
r: u8,
g: u8,
b: u8,
};
// NOTE: manual
pub const SDL_MessageBoxColorScheme = extern struct {
colors: [5]SDL_MessageBoxColor,
};
// NOTE: manual
pub const SDL_MessageBoxData = extern struct {
flags: u32,
window: ?*SDL_Window,
title: [*]const u8,
message: [*]const u8,
numbuttons: c_int,
buttons: [*]const SDL_MessageBoxButtonData,
colorScheme: ?[*]const SDL_MessageBoxColorScheme,
};
const SDL_Renderer = @import("SDL_render.zig").SDL_Renderer;
const SDL_WindowEvent = @import("SDL_events.zig").SDL_WindowEvent;
pub const struct_SDL_Window = @OpaqueType();
pub const SDL_Window = struct_SDL_Window;
pub const SDL_HitTest = extern fn (*SDL_Window, *const SDL_Point, ?*c_void) SDL_HitTestResult;
pub extern fn SDL_CreateWindow(title: ?[*]const u8, x: c_int, y: c_int, w: c_int, h: c_int, flags: u32) ?*SDL_Window;
pub extern fn SDL_CreateWindowAndRenderer(width: c_int, height: c_int, window_flags: u32, window: **SDL_Window, renderer: **SDL_Renderer) c_int;
pub extern fn SDL_CreateWindowFrom(data: ?*const c_void) ?*SDL_Window;
pub extern fn SDL_DestroyWindow(window: *SDL_Window) void;
pub extern fn SDL_DisableScreenSaver() void;
pub extern fn SDL_EnableScreenSaver() void;
pub extern fn SDL_GL_CreateContext(window: *SDL_Window) SDL_GLContext;
pub extern fn SDL_GL_DeleteContext(context: SDL_GLContext) void;
pub extern fn SDL_GL_ExtensionSupported(extension: [*]const u8) SDL_bool;
pub extern fn SDL_GL_GetAttribute(attr: SDL_GLattr, value: *c_int) c_int;
pub extern fn SDL_GL_GetCurrentContext() SDL_GLContext;
pub extern fn SDL_GL_GetCurrentWindow() ?*SDL_Window;
pub extern fn SDL_GL_GetDrawableSize(window: *SDL_Window, w: ?*c_int, h: ?*c_int) void;
pub extern fn SDL_GL_GetProcAddress(proc: [*]const u8) ?*c_void;
pub extern fn SDL_GL_GetSwapInterval() c_int;
pub extern fn SDL_GL_LoadLibrary(path: [*]const u8) c_int;
pub extern fn SDL_GL_MakeCurrent(window: *SDL_Window, context: SDL_GLContext) c_int;
pub extern fn SDL_GL_ResetAttributes() void;
pub extern fn SDL_GL_SetAttribute(attr: SDL_GLattr, value: c_int) c_int;
pub extern fn SDL_GL_SetSwapInterval(interval: c_int) c_int;
pub extern fn SDL_GL_SwapWindow(window: *SDL_Window) void;
pub extern fn SDL_GL_UnloadLibrary() void;
pub extern fn SDL_GetClosestDisplayMode(displayIndex: c_int, mode: *const SDL_DisplayMode, closest: *SDL_DisplayMode) ?*SDL_DisplayMode;
pub extern fn SDL_GetCurrentDisplayMode(displayIndex: c_int, mode: *SDL_DisplayMode) c_int;
pub extern fn SDL_GetCurrentVideoDriver() ?[*]const u8;
pub extern fn SDL_GetDesktopDisplayMode(displayIndex: c_int, mode: *SDL_DisplayMode) c_int;
pub extern fn SDL_GetDisplayBounds(displayIndex: c_int, rect: *SDL_Rect) c_int;
pub extern fn SDL_GetDisplayDPI(displayIndex: c_int, ddpi: ?*f32, hdpi: ?*f32, vdpi: ?*f32) c_int;
pub extern fn SDL_GetDisplayMode(displayIndex: c_int, modeIndex: c_int, mode: *SDL_DisplayMode) c_int;
pub extern fn SDL_GetDisplayName(displayIndex: c_int) ?[*]const u8;
pub extern fn SDL_GetDisplayUsableBounds(displayIndex: c_int, rect: *SDL_Rect) c_int;
pub extern fn SDL_GetGrabbedWindow() ?*SDL_Window;
pub extern fn SDL_GetNumDisplayModes(displayIndex: c_int) c_int;
pub extern fn SDL_GetNumVideoDisplays() c_int;
pub extern fn SDL_GetNumVideoDrivers() c_int;
pub extern fn SDL_GetVideoDriver(index_0: c_int) ?[*]const u8;
pub extern fn SDL_GetWindowBordersSize(window: *SDL_Window, top: ?*c_int, left: ?*c_int, bottom: ?*c_int, right: ?*c_int) c_int;
pub extern fn SDL_GetWindowBrightness(window: *SDL_Window) f32;
pub extern fn SDL_GetWindowData(window: *SDL_Window, name: [*]const u8) ?*c_void;
pub extern fn SDL_GetWindowDisplayIndex(window: *SDL_Window) c_int;
pub extern fn SDL_GetWindowDisplayMode(window: *SDL_Window, mode: *SDL_DisplayMode) c_int;
pub extern fn SDL_GetWindowFlags(window: *SDL_Window) u32;
pub extern fn SDL_GetWindowFromID(id: u32) *SDL_Window;
pub extern fn SDL_GetWindowGammaRamp(window: *SDL_Window, red: ?*u16, green: ?*u16, blue: ?*u16) c_int;
pub extern fn SDL_GetWindowGrab(window: *SDL_Window) SDL_bool;
pub extern fn SDL_GetWindowID(window: *SDL_Window) u32;
pub extern fn SDL_GetWindowMaximumSize(window: *SDL_Window, w: ?*c_int, h: ?*c_int) void;
pub extern fn SDL_GetWindowMinimumSize(window: *SDL_Window, w: ?*c_int, h: ?*c_int) void;
pub extern fn SDL_GetWindowOpacity(window: *SDL_Window, out_opacity: *f32) c_int;
pub extern fn SDL_GetWindowPixelFormat(window: *SDL_Window) u32;
pub extern fn SDL_GetWindowPosition(window: *SDL_Window, x: ?*c_int, y: ?*c_int) void;
pub extern fn SDL_GetWindowSize(window: *SDL_Window, w: ?*c_int, h: ?*c_int) void;
pub extern fn SDL_GetWindowSurface(window: *SDL_Window) ?*SDL_Surface;
pub extern fn SDL_GetWindowTitle(window: *SDL_Window) ?[*]const u8;
pub extern fn SDL_HideWindow(window: *SDL_Window) void;
pub extern fn SDL_IsScreenSaverEnabled() SDL_bool;
pub extern fn SDL_MaximizeWindow(window: *SDL_Window) void;
pub extern fn SDL_MinimizeWindow(window: *SDL_Window) void;
pub extern fn SDL_RaiseWindow(window: *SDL_Window) void;
pub extern fn SDL_RestoreWindow(window: *SDL_Window) void;
pub extern fn SDL_SetWindowBordered(window: *SDL_Window, bordered: SDL_bool) void;
pub extern fn SDL_SetWindowBrightness(window: *SDL_Window, brightness: f32) c_int;
pub extern fn SDL_SetWindowData(window: *SDL_Window, name: [*]const u8, userdata: ?*c_void) ?*c_void;
pub extern fn SDL_SetWindowDisplayMode(window: *SDL_Window, mode: *const SDL_DisplayMode) c_int;
pub extern fn SDL_SetWindowFullscreen(window: *SDL_Window, flags: u32) c_int;
pub extern fn SDL_SetWindowGammaRamp(window: *SDL_Window, red: ?*const u16, green: ?*const u16, blue: ?*const u16) c_int;
pub extern fn SDL_SetWindowGrab(window: *SDL_Window, grabbed: SDL_bool) void;
pub extern fn SDL_SetWindowHitTest(window: *SDL_Window, callback: SDL_HitTest, callback_data: ?*c_void) c_int;
pub extern fn SDL_SetWindowIcon(window: *SDL_Window, icon: *SDL_Surface) void;
pub extern fn SDL_SetWindowInputFocus(window: *SDL_Window) c_int;
pub extern fn SDL_SetWindowMaximumSize(window: *SDL_Window, max_w: c_int, max_h: c_int) void;
pub extern fn SDL_SetWindowMinimumSize(window: *SDL_Window, min_w: c_int, min_h: c_int) void;
pub extern fn SDL_SetWindowModalFor(modal_window: *SDL_Window, parent_window: *SDL_Window) c_int;
pub extern fn SDL_SetWindowOpacity(window: *SDL_Window, opacity: f32) c_int;
pub extern fn SDL_SetWindowPosition(window: *SDL_Window, x: c_int, y: c_int) void;
pub extern fn SDL_SetWindowResizable(window: *SDL_Window, resizable: SDL_bool) void;
pub extern fn SDL_SetWindowSize(window: *SDL_Window, w: c_int, h: c_int) void;
pub extern fn SDL_SetWindowTitle(window: *SDL_Window, title: [*]const u8) void;
pub extern fn SDL_ShowWindow(window: *SDL_Window) void;
pub extern fn SDL_UpdateWindowSurfaceRects(window: *SDL_Window, rects: [*]const SDL_Rect, numrects: c_int) c_int;
pub extern fn SDL_UpdateWindowSurface(window: *SDL_Window) c_int;
pub extern fn SDL_VideoInit(driver_name: [*]const u8) c_int;
pub extern fn SDL_VideoQuit() void;
| https://raw.githubusercontent.com/tiehuis/zig-sdl2/43e0f5fa4a2b7f8a156c13cbd8540ef18373561e/src/SDL_video.zig |
pub const combn = @import("combn/combn.zig");
pub const dsl = @import("dsl/dsl.zig");
test "include" {
_ = dsl.Program;
}
| https://raw.githubusercontent.com/hexops/zorex/44732bc9c05dcde40f26f8b5e058c438c011a17e/src/zorex.zig |
// glinka
// Copyright (C) 2021-2022 Ollie Etherington
// <www.etherington.io>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
const std = @import("std");
const expectEqual = std.testing.expectEqual;
const Cursor = @import("../../common/cursor.zig").Cursor;
const TokenType = @import("../../common/token.zig").Token.Type;
const Type = @import("../../common/types/type.zig").Type;
pub const OpError = struct {
csr: Cursor,
op: TokenType,
ty: Type.Ptr,
pub fn new(csr: Cursor, op: TokenType, ty: Type.Ptr) OpError {
return OpError{
.csr = csr,
.op = op,
.ty = ty,
};
}
pub fn report(self: OpError, writer: anytype) !void {
try writer.print(
"Error: {d}:{d}: Operator '{s}' is not defined for type '",
.{
self.csr.ln,
self.csr.ch,
@tagName(self.op),
},
);
try self.ty.write(writer);
try writer.print("'\n", .{});
}
};
test "can initialize an OpError" {
const csr = Cursor.new(2, 5);
const op = TokenType.Sub;
const ty = Type.newString();
const err = OpError.new(csr, op, &ty);
try expectEqual(csr, err.csr);
try expectEqual(op, err.op);
try expectEqual(&ty, err.ty);
}
| https://raw.githubusercontent.com/oetherington/glinka/57cac7047f22926d162f82378b6ea965596d9708/src/compiler/errors/op_error.zig |
pub const OUTPUT_PATH: []const u8 = "outputs.txt";
pub const MEASUREMENTS_PATH: []const u8 = "measurements.txt";
| https://raw.githubusercontent.com/myyrakle/billion_row_challenge/8cd13292804208eaa5247f624232ddfda2fc2b0b/zig/common.zig |
const c = @import("cimport.zig");
const common = @import("common.zig");
const bool_to_c_int = common.bool_to_c_int;
const Container = @import("container.zig").Container;
const Dialog = @import("dialog.zig").Dialog;
const WindowType = @import("enums.zig").WindowType;
const Widget = @import("widget.zig").Widget;
pub const ApplicationWindow = struct {
ptr: *c.GtkApplicationWindow,
pub fn new(app: *c.GtkApplication) ApplicationWindow {
return ApplicationWindow{
.ptr = @ptrCast(*c.GtkApplicationWindow, c.gtk_application_window_new(app)),
};
}
pub fn as_window(self: ApplicationWindow) Window {
return Window{
.ptr = @ptrCast(*c.GtkWindow, self.ptr),
};
}
pub fn as_container(self: ApplicationWindow) Container {
return Container{
.ptr = @ptrCast(*c.GtkContainer, self.ptr),
};
}
pub fn as_widget(self: ApplicationWindow) Widget {
return Widget{
.ptr = @ptrCast(*c.GtkWidget, self.ptr),
};
}
pub fn is_instance(gtype: u64) bool {
return (gtype == c.gtk_application_window_get_type());
}
};
pub const Window = struct {
ptr: *c.GtkWindow,
const Self = @This();
pub fn new(window_type: WindowType) Self {
return Self{
.ptr = @ptrCast(*c.GtkWindow, c.gtk_window_new(@enumToInt(window_type))),
};
}
pub fn set_title(self: Self, title: [:0]const u8) void {
c.gtk_window_set_title(self.ptr, title.ptr);
}
pub fn set_default_size(self: Self, hsize: c_int, vsize: c_int) void {
c.gtk_window_set_default_size(self.ptr, hsize, vsize);
}
pub fn set_decorated(self: Self, decorated: bool) void {
const val = bool_to_c_int(decorated);
c.gtk_window_set_decorated(self.ptr, val);
}
pub fn close(self: Self) void {
c.gtk_window_close(self.ptr);
}
pub fn set_transient_for(self: Self, parent: Self) void {
c.gtk_window_set_transient_for(self.ptr, parent.ptr);
}
pub fn as_container(self: Self) Container {
return Container{
.ptr = @ptrCast(*c.GtkContainer, self.ptr),
};
}
pub fn as_widget(self: Self) Widget {
return Widget{
.ptr = @ptrCast(*c.GtkWidget, self.ptr),
};
}
pub fn is_instance(gtype: u64) bool {
return (gtype == c.gtk_window_get_type() or ApplicationWindow.is_instance(gtype) or Dialog.is_instance(gtype));
}
};
| https://raw.githubusercontent.com/nfisher1226/zig-gtk3/da5042dd2b0b4e19d25c6143c369be4a0a3eda23/src/window.zig |
// https://github.com/nektro/zig-asn1
const std = @import("std");
const string = []const u8;
const assert = std.debug.assert;
pub const Tag = enum(u8) {
// zig fmt: off
end_of_content = @as(u8, 0) | @intFromEnum(PC.primitive),
boolean = @as(u8, 1) | @intFromEnum(PC.primitive),
integer = @as(u8, 2) | @intFromEnum(PC.primitive),
bit_string = @as(u8, 3) | @intFromEnum(PC.primitive),
octet_string = @as(u8, 4) | @intFromEnum(PC.primitive),
null = @as(u8, 5) | @intFromEnum(PC.primitive),
object_identifier = @as(u8, 6) | @intFromEnum(PC.primitive),
object_descriptor = @as(u8, 7) | @intFromEnum(PC.primitive),
external_type = @as(u8, 8) | @intFromEnum(PC.primitive),
real_type = @as(u8, 9) | @intFromEnum(PC.primitive),
enumerated_type = @as(u8,10) | @intFromEnum(PC.primitive),
embedded_pdv = @as(u8,11) | @intFromEnum(PC.primitive),
utf8_string = @as(u8,12) | @intFromEnum(PC.primitive),
relative_oid = @as(u8,13) | @intFromEnum(PC.primitive),
time = @as(u8,14) | @intFromEnum(PC.primitive),
_reserved2 = @as(u8,15) | @intFromEnum(PC.primitive),
sequence = @as(u8,16) | @intFromEnum(PC.constructed),
set = @as(u8,17) | @intFromEnum(PC.constructed),
numeric_string = @as(u8,18) | @intFromEnum(PC.primitive),
printable_string = @as(u8,19) | @intFromEnum(PC.primitive),
teletex_string = @as(u8,20) | @intFromEnum(PC.primitive),
videotex_string = @as(u8,21) | @intFromEnum(PC.primitive),
ia5_string = @as(u8,22) | @intFromEnum(PC.primitive),
utc_time = @as(u8,23) | @intFromEnum(PC.primitive),
generalized_time = @as(u8,24) | @intFromEnum(PC.primitive),
graphic_string = @as(u8,25) | @intFromEnum(PC.primitive),
visible_string = @as(u8,26) | @intFromEnum(PC.primitive),
general_string = @as(u8,27) | @intFromEnum(PC.primitive),
universal_string = @as(u8,28) | @intFromEnum(PC.primitive),
unrestricted_string = @as(u8,29) | @intFromEnum(PC.primitive),
bmp_string = @as(u8,30) | @intFromEnum(PC.primitive),
date = @as(u8,31) | @intFromEnum(PC.primitive),
_,
const PC = enum(u8) {
primitive = 0b00000000,
constructed = 0b00100000,
};
const Class = enum(u8) {
universal = 0b00000000,
application = 0b01000000,
context = 0b10000000,
private = 0b11000000,
};
// zig fmt: on
pub fn int(tag: Tag) u8 {
return @intFromEnum(tag);
}
pub fn extra(pc: PC, class: Class, ty: u5) Tag {
var res: u8 = ty;
res |= @intFromEnum(pc);
res |= @intFromEnum(class);
return @enumFromInt(res);
}
pub fn read(reader: anytype) !Tag {
return @enumFromInt(try reader.readByte());
}
};
pub const Length = packed struct(u8) {
len: u7,
form: enum(u1) { short, long },
pub fn read(reader: anytype) !u64 {
const octet: Length = @bitCast(try reader.readByte());
switch (octet.form) {
.short => return octet.len,
.long => {
var res: u64 = 0;
assert(octet.len <= 8); // long form length exceeds bounds of u64
assert(octet.len > 0); // TODO indefinite form
for (0..octet.len) |i| {
res |= (@as(u64, try reader.readByte()) << @as(u6, @intCast(8 * (octet.len - 1 - @as(u6, @intCast(i))))));
}
return res;
},
}
}
};
fn expectTag(reader: anytype, tag: Tag) !void {
const actual = try Tag.read(reader);
if (actual != tag) return error.UnexpectedTag;
}
fn expectLength(reader: anytype, len: u64) !void {
const actual = try Length.read(reader);
if (actual != len) return error.UnexpectedLength;
}
pub fn readBoolean(reader: anytype) !bool {
try expectTag(reader, .boolean);
try expectLength(reader, 1);
return (try reader.readByte()) > 0;
}
pub fn readInt(reader: anytype, comptime Int: type) !Int {
comptime assert(@bitSizeOf(Int) % 8 == 0);
const L2Int = std.math.Log2Int(Int);
try expectTag(reader, .integer);
const len = try Length.read(reader);
assert(len <= 8); // TODO implement readIntBig
assert(len > 0);
assert(len <= @sizeOf(Int));
var res: Int = 0;
for (0..len) |i| {
res |= (@as(Int, try reader.readByte()) << @as(L2Int, @intCast(8 * (len - 1 - @as(L2Int, @intCast(i))))));
}
return res;
}
pub fn readNull(reader: anytype) !void {
try expectTag(reader, .null);
try expectLength(reader, 0);
}
| https://raw.githubusercontent.com/The-Z-Labs/bof-launcher/3b951c355952ef5c2f6b37d703f0072849151208/include/asn1.zig |
const std = @import("std");
const assert = std.debug.assert;
fn abc(v: u64) bool {
var ret_val: bool = v >= 0x8000000000000000;
std.debug.warn("v={x} ret_val={}\n", v, ret_val);
return ret_val;
}
pub fn main() void {
var prng = std.rand.DefaultPrng.init(12345678);
assert(!abc(prng.random.scalar(u64)));
assert(!abc(prng.random.scalar(u64)));
assert(abc(prng.random.scalar(u64)));
assert(abc(prng.random.scalar(u64)));
assert(!abc(prng.random.scalar(u64)));
assert(!abc(prng.random.scalar(u64)));
// This should fail in debug and --release-safe builds
// and succeed in --release-fast and --release-small builds
assert(!abc(prng.random.scalar(u64)));
}
| https://raw.githubusercontent.com/winksaville/zig-explore/34a19e1540a3b1a3e72c4d0ed339cd0384c0760e/assert/main.zig |
const Allocator = @import("std").mem.Allocator;
const heap = @import("heap.zig");
const log = @import("log.zig");
const stream = @import("stream.zig");
const sync = @import("sync.zig");
const lwip = @import("lwip.zig");
const net = @import("drivers/virtio/net.zig");
const util = @import("util.zig");
const virito_net = @import("drivers/virtio/net.zig");
const wasi = @import("wasi.zig");
const types = @import("wasi/types.zig");
const Stream = stream.Stream;
pub const IpAddr = extern struct {
addr: u32,
};
pub const Socket = struct {
pcb_addr: usize,
buffer: sync.SpinLock(util.RingBuffer),
waiter: sync.Waiter,
fd: i32 = -1,
flags: u16 = 0,
is_connected: bool = false,
is_read_shutdown: bool = false,
is_write_shutdown: bool = false,
is_listening: bool = false,
const Self = @This();
pub const Error = error{ Failed, Again };
const BUFFER_SIZE: usize = 16384;
pub fn new(af: wasi.AddressFamily, allocator: Allocator) Allocator.Error!Self {
const buffer = try heap.runtime_allocator.create(util.RingBuffer);
buffer.* = try util.RingBuffer.new(BUFFER_SIZE, allocator);
var ret = Self{
.pcb_addr = undefined,
.buffer = sync.SpinLock(util.RingBuffer).new(buffer),
.waiter = sync.Waiter.new(),
};
ret.pcb_addr = lwip.acquire().lwip_new_tcp_pcb(wasiToLwipAddressType(af));
lwip.release();
return ret;
}
pub fn newFromPcb(pcb: *anyopaque, allocator: Allocator) Allocator.Error!Self {
const buffer = try heap.runtime_allocator.create(util.RingBuffer);
buffer.* = try util.RingBuffer.new(BUFFER_SIZE, allocator);
const ret = Self{
.pcb_addr = @intFromPtr(pcb),
.buffer = sync.SpinLock(util.RingBuffer).new(buffer),
.waiter = sync.Waiter.new(),
};
return ret;
}
pub fn bind(self: *Self, ip_addr: *anyopaque, port: i32) Error!void {
const pcb = @as(*anyopaque, @ptrFromInt(self.pcb_addr));
const err = lwip.acquire().lwip_tcp_bind(pcb, ip_addr, port);
lwip.release();
if (err != 0) {
return Error.Failed;
}
}
pub fn listen(self: *Self, backlog: i32) Error!void {
const pcb = @as(*anyopaque, @ptrFromInt(self.pcb_addr));
const new_pcb_ptr = lwip.acquire().tcp_listen_with_backlog(pcb, @as(u8, @intCast(backlog)));
lwip.release();
if (new_pcb_ptr == null) {
return Error.Failed;
}
self.pcb_addr = @intFromPtr(new_pcb_ptr);
lwip.acquire().lwip_accept(new_pcb_ptr.?);
lwip.release();
self.is_listening = true;
return;
}
pub fn accept(self: *Self) Error!i32 {
var sock_buf = self.buffer.acquire();
if (sock_buf.availableToRead() > 0) {
var buf = [4]u8{ 0, 0, 0, 0 };
if (sock_buf.read(buf[0..]) != 4) {
@panic("accept: new file descriptor not found");
}
self.buffer.release();
const new_fd = @as(*i32, @alignCast(@ptrCast(buf[0..].ptr)));
return new_fd.*;
}
if (self.isNonBlocking()) {
self.buffer.release();
return Error.Again;
}
self.waiter.setWait();
self.buffer.release();
const pcb = @as(*anyopaque, @ptrFromInt(self.pcb_addr));
lwip.acquire().lwip_accept(pcb);
lwip.release();
self.waiter.wait();
return self.accept();
}
pub fn read(self: *Self, buffer: []u8) Stream.Error!usize {
// if not connected, simply return read buffer even if it is empty
if (!self.is_connected) {
const size = self.buffer.acquire().read(buffer);
self.buffer.release();
return size;
}
const size = self.buffer.acquire().read(buffer);
if (size > 0) {
self.buffer.release();
return size;
}
if (self.isNonBlocking()) {
self.buffer.release();
return Stream.Error.Again;
}
self.waiter.setWait();
self.buffer.release();
self.waiter.wait();
return self.read(buffer);
}
pub fn send(self: *Self, buffer: []u8) Error!usize {
const locked_lwip = lwip.acquire();
defer lwip.release();
const pcb = @as(*anyopaque, @ptrFromInt(self.pcb_addr));
const len = @min(buffer.len, locked_lwip.lwip_tcp_sndbuf(pcb));
if (len == 0) {
net.flush();
return Error.Again;
}
const err = locked_lwip.lwip_send(pcb, buffer.ptr, len);
if (err < 0) {
log.warn.printf("lwip_send failed: {d}\n", .{err});
return Error.Failed;
}
return len;
}
pub fn connect(self: *Self, ip_addr: *anyopaque, port: i32) Error!void {
const pcb = @as(*anyopaque, @ptrFromInt(self.pcb_addr));
self.waiter.setWait();
const err = lwip.acquire().lwip_connect(pcb, ip_addr, port);
lwip.release();
if (err != 0) {
return Error.Failed;
}
self.waiter.wait();
return;
}
pub fn shutdown(self: *Self, read_close: bool, write_close: bool) Error!void {
if (self.alreadyClosed()) {
return;
}
const pcb = @as(*anyopaque, @ptrFromInt(self.pcb_addr));
const read_flag: i32 = if (read_close) 1 else 0;
const write_flag: i32 = if (write_close) 1 else 0;
// ensure releasing pcb and unsetting fd are done atomically
const locked_lwip = lwip.acquire();
defer lwip.release();
const err = locked_lwip.tcp_shutdown(pcb, read_flag, write_flag);
if (err != 0) {
return Error.Failed;
}
if (read_close) {
self.is_read_shutdown = true;
}
if (write_close) {
self.is_write_shutdown = true;
}
if (self.alreadyClosed()) {
locked_lwip.lwip_unset_fd(@as(*anyopaque, @ptrFromInt(self.pcb_addr)));
}
return;
}
pub fn close(self: *Self) Error!void {
if (!self.alreadyClosed()) {
// ensure releasing pcb and unsetting fd are done atomically
const locked_lwip = lwip.acquire();
defer lwip.release();
const pcb = @as(*anyopaque, @ptrFromInt(self.pcb_addr));
const err = locked_lwip.lwip_tcp_close(pcb);
if (err != 0) {
return Error.Failed;
}
locked_lwip.lwip_unset_fd(@as(*anyopaque, @ptrFromInt(self.pcb_addr)));
}
self.waiter.waiting = false;
self.is_connected = false;
self.buffer.acquire().deinit(heap.runtime_allocator);
self.buffer.release();
heap.runtime_allocator.destroy(@as(*util.RingBuffer, @alignCast(@ptrCast(self.buffer.ptr))));
}
pub fn getRemoteAddr(self: *Self) *IpAddr {
const pcb = @as(*anyopaque, @ptrFromInt(self.pcb_addr));
const addr = lwip.acquire().lwip_get_remote_ip(pcb);
lwip.release();
return addr;
}
pub fn getLocalAddr(self: *Self) *IpAddr {
const pcb = @as(*anyopaque, @ptrFromInt(self.pcb_addr));
const addr = lwip.acquire().lwip_get_local_ip(pcb);
lwip.release();
return addr;
}
pub fn getRemotePort(self: *Self) u16 {
const pcb = @as(*anyopaque, @ptrFromInt(self.pcb_addr));
const port = lwip.acquire().lwip_get_remote_port(pcb);
lwip.release();
return port;
}
pub fn getLocalPort(self: *Self) u16 {
const pcb = @as(*anyopaque, @ptrFromInt(self.pcb_addr));
const port = lwip.acquire().lwip_get_local_port(pcb);
lwip.release();
return port;
}
pub fn setFd(self: *Self, fd: i32) void {
self.fd = fd;
lwip.acquire().lwip_set_fd(@as(*anyopaque, @ptrFromInt(self.pcb_addr)), &self.fd);
lwip.release();
}
pub fn bytesCanRead(self: *Self) ?usize {
const buf = self.buffer.acquire();
defer self.buffer.release();
const nbytes = buf.availableToRead();
if (nbytes == 0) {
if (!self.is_listening and !self.is_connected) {
return 0;
}
return null;
}
// if the socket is listening, return the number of connections available
if (self.is_listening) {
return nbytes / 4;
}
return nbytes;
}
pub fn bytesCanWrite(self: *Self) ?usize {
if (!self.is_listening and !self.is_connected) {
return 0;
}
const buf = self.buffer.acquire();
defer self.buffer.release();
const nbytes = buf.availableToWrite();
if (nbytes == 0) {
return null;
}
return nbytes;
}
fn alreadyClosed(self: *Self) bool {
return self.is_read_shutdown and self.is_write_shutdown;
}
fn isNonBlocking(self: *Self) bool {
return self.flags & types.FdFlag.NonBlock.toInt() != 0;
}
};
fn wasiToLwipAddressType(t: wasi.AddressFamily) u8 {
switch (t) {
wasi.AddressFamily.INET4 => return 0,
wasi.AddressFamily.INET6 => return 6,
wasi.AddressFamily.Unspec => return 46,
}
}
pub extern fn init(ip: u32, netmask: u32, gateway: u32, macaddr: *[6]u8) void;
export fn transmit(addr: [*c]u8, size: u32) callconv(.C) void {
const data = addr[0..size];
virito_net.virtio_net.transmit(data);
}
export fn socketPush(fd: i32, ptr: [*]u8, len: usize) i32 {
const s = stream.fd_table.get(fd) orelse @panic("socketPush: invalid fd");
var socket = switch (s.*) {
stream.Stream.socket => &s.socket,
else => @panic("socketPush: invalid fd"),
};
const buffer = ptr[0..len];
const sock_buf = socket.buffer.acquire();
defer socket.buffer.release();
sock_buf.write(buffer) catch return -1;
return 0;
}
export fn notifyAccepted(pcb: *anyopaque, fd: i32) callconv(.C) ?*i32 {
// unset waiter
const s = stream.fd_table.get(fd) orelse @panic("notifyAccepted: invalid fd");
var socket = switch (s.*) {
stream.Stream.socket => &s.socket,
else => @panic("notifyAccepted: invalid fd"),
};
socket.waiter.waiting = false;
// create new socket
var new_socket = Socket.newFromPcb(pcb, heap.runtime_allocator) catch return null;
new_socket.is_connected = true;
const new_fd = stream.fd_table.set(Stream{ .socket = new_socket }) catch return null;
var set_stream = stream.fd_table.get(new_fd) orelse @panic("notifyConnected: new_socket is not set");
const set_socket = &set_stream.socket;
return &set_socket.*.fd;
}
// This function is called when in the lwIP receive callback.
// It notifies the socket that data is available by setting the waiter.
export fn notifyReceived(fd: i32) callconv(.C) void {
const s = stream.fd_table.get(fd) orelse @panic("notifyConnected: invalid fd");
var socket = switch (s.*) {
stream.Stream.socket => &s.socket,
else => @panic("notifyReceived: invalid fd"),
};
// This function is called from the interrupt handler,
// so we don't need to make it atomic.
socket.waiter.waiting = false;
}
export fn notifyConnected(fd: i32) callconv(.C) void {
const s = stream.fd_table.get(fd) orelse @panic("notifyConnected: invalid fd");
var socket = switch (s.*) {
stream.Stream.socket => &s.socket,
else => @panic("notifyConnected: invalid fd"),
};
socket.is_connected = true;
socket.waiter.waiting = false;
}
export fn notifyClosed(fd: i32) callconv(.C) void {
// if the socket is already closed, just return
const s = stream.fd_table.get(fd) orelse return;
var socket = switch (s.*) {
stream.Stream.socket => &s.socket,
else => @panic("notifyClosed: invalid fd"),
};
socket.is_connected = false;
socket.waiter.waiting = false;
}
export fn notifyError(fd: i32, err: i32) callconv(.C) void {
_ = err;
// if the socket is already closed, just return
const s = stream.fd_table.get(fd) orelse return;
var socket = switch (s.*) {
stream.Stream.socket => &s.socket,
else => @panic("notifyError: invalid fd"),
};
socket.waiter.waiting = false;
socket.is_connected = false;
socket.is_read_shutdown = true;
socket.is_write_shutdown = true;
}
| https://raw.githubusercontent.com/mewz-project/mewz/0c6724f3f37552c3a36728fef9d7682ff3589586/src/tcpip.zig |
const std = @import("std");
const raylib = @import("src/build.zig");
pub fn build(b: *std.build.Builder) void {
raylib.build(b);
}
| https://raw.githubusercontent.com/ishaan-bose/Cpp-Raytracing-Project/941f3f4260eddcb3d7458cb4ddbf4e0505be780e/raylib-master/build.zig |
//! Communication of Client and Server, also communication of operating system and this application.
const std = @import("std");
const os = std.os;
const net = std.net;
const testing = std.testing;
const assert = std.debug.assert;
const known_folders = @import("known-folders");
pub const known_folders_config = .{
.xdg_on_mac = true,
};
pub const TransportKind = enum {
un_socket,
};
pub const max_packet_size: usize = 1024 * 8;
pub const max_method_size: usize = 1024;
pub const max_message_size: usize = max_packet_size;
const max_connect_retries = 50;
const connect_retry_delay = std.time.ns_per_ms * 5;
const listen_socket_backlog = 10;
pub fn bindUnixSocket(address: *net.Address) !os.socket_t {
const socket = try os.socket(
os.AF.UNIX,
os.SOCK.STREAM | os.SOCK.CLOEXEC,
// Should be PF.UNIX but it is only available as os.linux.PF.UNIX which is not
// cross-compatible across OS. But the implementation says that PF and AF values
// are same in this case since PF is redundant now and was a design precaution/mistake
// made in the past.
os.AF.UNIX,
);
errdefer os.closeSocket(socket);
try os.bind(socket, &address.any, address.getOsSockLen());
try os.listen(socket, listen_socket_backlog);
return socket;
}
pub fn connectToUnixSocket(address: *net.Address) !os.socket_t {
const socket = try os.socket(
os.AF.UNIX,
os.SOCK.STREAM | os.SOCK.CLOEXEC,
os.AF.UNIX,
);
errdefer os.closeSocket(socket);
var connect_retries: u8 = max_connect_retries;
while (true) {
os.connect(socket, &address.any, address.getOsSockLen()) catch |err| switch (err) {
error.ConnectionRefused, error.FileNotFound => {
// If the server is not yet listening, wait a bit.
if (connect_retries == 0) return err;
std.time.sleep(connect_retry_delay);
connect_retries -= 1;
continue;
},
else => return err,
};
break;
}
return socket;
}
/// Caller owns the memory.
pub fn pathForUnixSocket(ally: std.mem.Allocator) ![]u8 {
const runtime_dir = (try known_folders.getPath(
ally,
.runtime,
)) orelse (try known_folders.getPath(
ally,
.app_menu,
)) orelse return error.NoRuntimeDirectory;
defer ally.free(runtime_dir);
const subpath = "kisa";
var path_builder = std.ArrayList(u8).init(ally);
errdefer path_builder.deinit();
try path_builder.appendSlice(runtime_dir);
try path_builder.append(std.fs.path.sep);
try path_builder.appendSlice(subpath);
std.fs.makeDirAbsolute(path_builder.items) catch |err| switch (err) {
error.PathAlreadyExists => {},
else => return err,
};
const filename = try std.fmt.allocPrint(ally, "{d}", .{os.linux.getpid()});
defer ally.free(filename);
try path_builder.append(std.fs.path.sep);
try path_builder.appendSlice(filename);
std.fs.deleteFileAbsolute(path_builder.items) catch |err| switch (err) {
error.FileNotFound => {},
else => return err,
};
return path_builder.toOwnedSlice();
}
/// Caller owns the memory.
pub fn addressForUnixSocket(ally: std.mem.Allocator, path: []const u8) !*net.Address {
const address = try ally.create(net.Address);
errdefer ally.destroy(address);
address.* = try net.Address.initUnix(path);
return address;
}
pub const CommunicationResources = union(TransportKind) {
un_socket: struct {
socket: os.socket_t,
buffered_reader: SocketBufferedReader,
},
const Self = @This();
const SocketReader = std.io.Reader(os.socket_t, os.RecvFromError, socketRead);
fn socketRead(socket: os.socket_t, buffer: []u8) os.RecvFromError!usize {
return try os.recv(socket, buffer, 0);
}
const SocketBufferedReader = std.io.BufferedReader(max_packet_size, SocketReader);
pub fn initWithUnixSocket(socket: os.socket_t) Self {
var socket_stream = SocketReader{ .context = socket };
var buffered_reader = SocketBufferedReader{ .unbuffered_reader = socket_stream };
return Self{ .un_socket = .{
.socket = socket,
.buffered_reader = buffered_reader,
} };
}
};
/// `CommunicationContainer` must be a container which has `comms` field
/// of type `CommunicationResources`.
pub fn CommunicationMixin(comptime CommunicationContainer: type) type {
return struct {
const Self = CommunicationContainer;
/// ASCII: end of transmission block. (isn't this the perfect character to send?)
const packet_delimiter = 0x17;
pub fn initWithUnixSocket(socket: os.socket_t) Self {
return Self{ .comms = CommunicationResources.initWithUnixSocket(socket) };
}
pub fn deinitComms(self: Self) void {
switch (self.comms) {
.un_socket => |s| {
os.closeSocket(s.socket);
},
}
}
// TODO: allow the caller to pass `packet_buf`.
/// Sends a message, `message` must implement `generate` receiving a buffer and putting
/// []u8 contents into it.
pub fn send(self: Self, message: anytype) !void {
var packet_buf: [max_packet_size]u8 = undefined;
const packet = try message.generate(&packet_buf);
packet_buf[packet.len] = packet_delimiter;
try self.sendPacket(packet_buf[0 .. packet.len + 1]);
}
pub fn sendPacket(self: Self, packet: []const u8) !void {
// std.debug.print("sending: {s}\n", .{packet});
switch (self.comms) {
.un_socket => |s| {
const bytes_sent = try os.send(s.socket, packet, 0);
assert(packet.len == bytes_sent);
},
}
}
/// Reads a message of type `Message` with memory stored inside `out_buf`, `Message` must
/// implement `parse` taking a buffer and a string, returning `Message` object.
pub fn recv(self: *Self, comptime Message: type, out_buf: []u8) !?Message {
var packet_buf: [max_packet_size]u8 = undefined;
if (try self.readPacket(&packet_buf)) |packet| {
return try Message.parse(out_buf, packet);
} else {
return null;
}
}
/// Returns the slice with the length of a received packet.
pub fn readPacket(self: *Self, buf: []u8) !?[]u8 {
switch (self.comms) {
.un_socket => |*s| {
var stream = s.buffered_reader.reader();
var read_buf = stream.readUntilDelimiter(buf, packet_delimiter) catch |e| switch (e) {
error.EndOfStream => return null,
else => return e,
};
if (read_buf.len == 0) return null;
if (buf.len == read_buf.len) return error.MessageTooBig;
return read_buf;
},
}
}
};
}
pub const FdType = enum {
/// Listens for incoming client connections to the server.
listen_socket,
/// Used for communication between client and server.
connection_socket,
};
pub const WatcherFd = struct {
/// Native to the OS structure which is used for polling.
pollfd: os.pollfd,
/// Metadata for identifying the type of a file descriptor.
ty: FdType,
/// External identifier which is interpreted by the user of this API.
id: u32,
};
pub const Watcher = struct {
ally: std.mem.Allocator,
/// Array of file descriptor data. Must not be modified directly, only with provided API.
fds: std.MultiArrayList(WatcherFd) = std.MultiArrayList(WatcherFd){},
/// `poll` call can return several events at a time, this is their total count per call.
pending_events_count: usize = 0,
/// When `poll` returns several events, this cursor is used for subsequent searches
/// inside `pollfd` array.
pending_events_cursor: usize = 0,
const Self = @This();
const Result = struct { fd: os.fd_t, id: u32, ty: FdType, fd_index: usize };
const PollResult = union(enum) {
success: Result,
err: struct { id: u32 },
};
pub fn init(ally: std.mem.Allocator) Self {
return Self{ .ally = ally };
}
pub fn deinit(self: *Self) void {
for (self.fds.items(.pollfd)) |pollfd| os.close(pollfd.fd);
self.fds.deinit(self.ally);
}
/// Adds listen socket which is used for listening for other sockets' connections.
pub fn addListenSocket(self: *Self, fd: os.fd_t, id: u32) !void {
try self.addFd(fd, os.POLL.IN, .listen_socket, id);
}
/// Adds connection socket which is used for communication between server and client.
pub fn addConnectionSocket(self: *Self, fd: os.fd_t, id: u32) !void {
try self.addFd(fd, os.POLL.IN, .connection_socket, id);
}
pub fn findFileDescriptor(self: Self, id: u32) ?Result {
for (self.fds.items(.id)) |fds_id, idx| {
if (fds_id == id) {
const pollfds = self.fds.items(.pollfd);
const tys = self.fds.items(.ty);
return Result{
.fd = pollfds[idx].fd,
.id = id,
.ty = tys[idx],
.fd_index = idx,
};
}
}
return null;
}
/// Removes any file descriptor with `id`, `id` must exist in the current
/// array of ids.
pub fn removeFileDescriptor(self: *Self, id: u32) void {
for (self.fds.items(.id)) |fds_id, idx| {
if (fds_id == id) {
self.removeFd(idx);
return;
}
}
}
fn addFd(self: *Self, fd: os.fd_t, events: i16, ty: FdType, id: u32) !void {
// Only add ready-for-reading notifications with current assumptions of `pollReadable`.
assert(events == os.POLL.IN);
// Ensure the `id` is unique across all elements.
for (self.fds.items(.id)) |existing_id| assert(id != existing_id);
try self.fds.append(self.ally, .{
.pollfd = os.pollfd{
.fd = fd,
.events = events,
.revents = 0,
},
.ty = ty,
.id = id,
});
}
fn removeFd(self: *Self, index: usize) void {
os.close(self.fds.items(.pollfd)[index].fd);
self.fds.swapRemove(index);
}
/// Returns a readable file descriptor or `null` if timeout has expired. If timeout is -1,
/// always returns non-null result. Assumes that we don't have any other descriptors
/// other than readable and that this will block if no readable file descriptors are
/// available.
pub fn pollReadable(self: *Self, timeout: i32) !?PollResult {
if ((try self.poll(timeout)) == 0) return null;
const pollfds = self.fds.items(.pollfd);
const ids = self.fds.items(.id);
const tys = self.fds.items(.ty);
while (self.pending_events_cursor < pollfds.len) : (self.pending_events_cursor += 1) {
const revents = pollfds[self.pending_events_cursor].revents;
if (revents != 0) {
self.pending_events_count -= 1;
if (revents & (os.POLL.ERR | os.POLL.HUP | os.POLL.NVAL) != 0) {
// `pollfd` is removed by swapping current one with the last one, so the cursor
// stays the same.
const result = PollResult{ .err = .{
.id = ids[self.pending_events_cursor],
} };
self.removeFd(self.pending_events_cursor);
return result;
} else if (revents & os.POLL.IN != 0) {
const result = PollResult{ .success = .{
.fd = pollfds[self.pending_events_cursor].fd,
.id = ids[self.pending_events_cursor],
.ty = tys[self.pending_events_cursor],
.fd_index = self.pending_events_cursor,
} };
self.pending_events_cursor += 1;
return result;
}
}
}
unreachable;
}
/// Fills current `fds` array with result events which can be inspected.
fn poll(self: *Self, timeout: i32) !usize {
if (self.pending_events_count == 0) {
self.pending_events_count = try os.poll(self.fds.items(.pollfd), timeout);
self.pending_events_cursor = 0;
}
return self.pending_events_count;
}
};
const MyContainer = struct {
comms: CommunicationResources,
usingnamespace CommunicationMixin(@This());
};
const MyMessage = struct {
contents: []u8,
const Self = @This();
fn generate(message: Self, out_buf: []u8) ![]u8 {
const str = message.contents;
std.mem.copy(u8, out_buf, str);
return out_buf[0..str.len];
}
fn parse(out_buf: []u8, string: []const u8) !Self {
const str = string;
std.mem.copy(u8, out_buf, str);
return Self{ .contents = out_buf[0..str.len] };
}
};
test "transport/fork1: communication via un_socket" {
const path = try pathForUnixSocket(testing.allocator);
defer testing.allocator.free(path);
const address = try addressForUnixSocket(testing.allocator, path);
defer testing.allocator.destroy(address);
const str1 = "generated string1";
const str2 = "gerted stng2";
const pid = try os.fork();
if (pid == 0) {
const listen_socket = try bindUnixSocket(address);
const accepted_socket = try os.accept(listen_socket, null, null, 0);
var server = MyContainer.initWithUnixSocket(accepted_socket);
var buf: [256]u8 = undefined;
{
const message = try server.recv(MyMessage, &buf);
std.debug.assert(message != null);
try testing.expectEqualStrings(str1, message.?.contents);
}
{
const message = try server.recv(MyMessage, &buf);
std.debug.assert(message != null);
try testing.expectEqualStrings(str2, message.?.contents);
}
} else {
const client = MyContainer.initWithUnixSocket(try connectToUnixSocket(address));
var buf: [200]u8 = undefined;
// Attempt to send 2 packets simultaneously.
const str = str1 ++ "\x17" ++ str2;
std.mem.copy(u8, &buf, str);
const message = MyMessage{ .contents = buf[0..str.len] };
try client.send(message);
}
}
test "transport/fork2: client and server both poll events with watcher" {
const path = try pathForUnixSocket(testing.allocator);
defer testing.allocator.free(path);
const address = try addressForUnixSocket(testing.allocator, path);
defer testing.allocator.destroy(address);
const client_message = "hello from client";
const pid = try os.fork();
if (pid == 0) {
// Server
const listen_socket = try bindUnixSocket(address);
var watcher = Watcher.init(testing.allocator);
defer watcher.deinit();
try watcher.addListenSocket(listen_socket, 0);
var cnt: u8 = 3;
while (cnt > 0) : (cnt -= 1) {
switch ((try watcher.pollReadable(-1)).?) {
.success => |polled_data| {
switch (polled_data.ty) {
.listen_socket => {
const accepted_socket = try os.accept(polled_data.fd, null, null, 0);
try watcher.addConnectionSocket(accepted_socket, 1);
const bytes_sent = try os.send(accepted_socket, "1", 0);
try testing.expectEqual(@as(usize, 1), bytes_sent);
},
.connection_socket => {
var buf: [256]u8 = undefined;
const bytes_read = try os.recv(polled_data.fd, &buf, 0);
if (bytes_read != 0) {
try testing.expectEqualStrings(client_message, buf[0..bytes_read]);
} else {
// This should have been handled by POLLHUP event and union is `err`.
unreachable;
}
},
}
},
.err => {},
}
}
} else {
// Client
const message = try std.fmt.allocPrint(testing.allocator, client_message, .{});
defer testing.allocator.free(message);
const socket = try connectToUnixSocket(address);
var watcher = Watcher.init(testing.allocator);
defer watcher.deinit();
try watcher.addConnectionSocket(socket, 0);
switch ((try watcher.pollReadable(-1)).?) {
.success => |polled_data| {
var buf: [256]u8 = undefined;
const bytes_read = try os.recv(polled_data.fd, &buf, 0);
try testing.expectEqualStrings("1", buf[0..bytes_read]);
},
.err => unreachable,
}
const bytes_sent = try os.send(socket, message, 0);
try testing.expectEqual(message.len, bytes_sent);
}
}
| https://raw.githubusercontent.com/greenfork/kisa/68f344bd2fc28ccdd850dec14859a1995f8cb97f/not_good_enough/src/transport.zig |
//
// It has probably not escaped your attention that we are no
// longer capturing a return value from foo() because the 'async'
// keyword returns the frame instead.
//
// One way to solve this is to use a global variable.
//
// See if you can make this program print "1 2 3 4 5".
//
const print = @import("std").debug.print;
var global_counter: i32 = 0;
pub fn main() void {
var foo_frame = async foo();
while (global_counter <= 5) {
print("{} ", .{global_counter});
resume foo_frame;
}
print("\n", .{});
}
fn foo() void {
while (true) {
global_counter += 1;
suspend {}
}
}
| https://raw.githubusercontent.com/peteraba/ziglings/296745f8e3650ad54900ba62bbae032f169441cb/exercises/087_async4.zig |
const std = @import("std");
const sdl = @cImport({
@cInclude("SDL2/SDL.h");
});
pub const Keyboard = struct {
const Self = @This();
keypad: [16]bool,
lastState: [16]bool,
pub fn init() Self {
return Keyboard {
.keypad = [_]bool{false} ** 16,
.lastState = [_]bool{false} ** 16
};
}
pub fn handleInput(self: *Self) void {
var index: u16 = 0;
while (index < self.keypad.len) {
self.lastState[index] = self.keypad[index];
index += 1;
}
}
pub fn handleKeyDown(self: *Self, keycode: sdl.SDL_Keycode) void {
switch (keycode) {
sdl.SDLK_1 => self.keypad[0x1] = true,
sdl.SDLK_2 => self.keypad[0x2] = true,
sdl.SDLK_3 => self.keypad[0x3] = true,
sdl.SDLK_4 => self.keypad[0xC] = true,
sdl.SDLK_q => self.keypad[0x4] = true,
sdl.SDLK_w => self.keypad[0x5] = true,
sdl.SDLK_e => self.keypad[0x6] = true,
sdl.SDLK_r => self.keypad[0xD] = true,
sdl.SDLK_a => self.keypad[0x8] = true,
sdl.SDLK_s => self.keypad[0x8] = true,
sdl.SDLK_d => self.keypad[0x9] = true,
sdl.SDLK_f => self.keypad[0xE] = true,
sdl.SDLK_z => self.keypad[0xA] = true,
sdl.SDLK_x => self.keypad[0x0] = true,
sdl.SDLK_c => self.keypad[0xB] = true,
sdl.SDLK_v => self.keypad[0xF] = true,
else => {}
}
}
pub fn handleKeyUp(self: *Self, keycode: sdl.SDL_Keycode) void {
switch (keycode) {
sdl.SDLK_1 => self.keypad[0x1] = false,
sdl.SDLK_2 => self.keypad[0x2] = false,
sdl.SDLK_3 => self.keypad[0x3] = false,
sdl.SDLK_4 => self.keypad[0xC] = false,
sdl.SDLK_q => self.keypad[0x4] = false,
sdl.SDLK_w => self.keypad[0x5] = false,
sdl.SDLK_e => self.keypad[0x6] = false,
sdl.SDLK_r => self.keypad[0xD] = false,
sdl.SDLK_a => self.keypad[0x8] = false,
sdl.SDLK_s => self.keypad[0x8] = false,
sdl.SDLK_d => self.keypad[0x9] = false,
sdl.SDLK_f => self.keypad[0xE] = false,
sdl.SDLK_z => self.keypad[0xA] = false,
sdl.SDLK_x => self.keypad[0x0] = false,
sdl.SDLK_c => self.keypad[0xB] = false,
sdl.SDLK_v => self.keypad[0xF] = false,
else => {}
}
}
pub fn isKeyPressed(self: *Self, key: u8) bool {
return self.keypad[key];
}
pub fn hasKeyBeenReleased(self: *Self, key: u8) bool {
return self.lastState[key] and self.keypad[key] == false;
}
pub fn cleanUp(_: *Self) void {
}
}; | https://raw.githubusercontent.com/johanlindfors/chip8/73fca5ca805741b0670dfc59ec6c7468004fe901/zig/src/keyboard.zig |
pub fn textGridOf(rows: u32, columns: u32) type {
var SomeTextGrid = struct {
const Self = @This();
font: *Spritesheet,
fb_x: u32,
fb_y: u32,
pending_buf: [rows * columns]u8,
pending_index: u32,
rendered_buf: [rows * columns]u8,
rendered_index: u32,
fn write(self: *Self, bytes: []const u8) void {
for (bytes) |c| {
self.pending_buf[self.pending_index] = c;
self.pending_index += 1;
if (self.pending_index == self.pending_buf.len) {
self.pending_index = 0;
}
}
}
fn line(self: *Self, bytes: []const u8) void {
self.write(bytes);
const next_line_index = (self.pending_index + columns) / columns * columns;
while (self.pending_index < next_line_index) : (self.pending_index += 1) {
self.pending_buf[self.pending_index] = ' ';
}
if (self.pending_index == self.pending_buf.len) {
self.pending_index = 0;
}
}
fn home(self: *Self) void {
self.move(0, 0);
}
fn move(self: *Self, row: u32, column: u32) void {
self.pending_index = row * columns + column;
if (self.pending_index >= self.pending_buf.len) {
panicf("TextGrid move ({}, {}) does not fit in ({}, {})", row, column, rows, columns);
}
}
fn limitedUpdate(self: *Self, render_limit: u32, scan_limit: u32) void {
var scanned: u32 = 0;
var rendered: u32 = 0;
while (rendered < render_limit and scanned < scan_limit) : (scanned += 1) {
const pending = self.pending_buf[self.rendered_index];
if (pending != self.rendered_buf[self.rendered_index]) {
const row = self.rendered_index / columns;
const column = self.rendered_index - row * columns;
const fb_x = self.fb_x + column * self.font.sprite_width;
const fb_y = self.fb_y + row * self.font.sprite_height;
self.font.draw(pending, fb_x, fb_y);
self.rendered_buf[self.rendered_index] = pending;
rendered += 1;
}
self.rendered_index += 1;
if (self.rendered_index == self.rendered_buf.len) {
self.rendered_index = 0;
}
}
}
fn init(self: *Self, font: *Spritesheet, fb_x: u32, fb_y: u32) void {
self.font = font;
self.fb_x = fb_x;
self.fb_y = fb_y;
self.pending_index = 0;
self.rendered_index = 0;
var i: u32 = 0;
while(i < self.rendered_buf.len) : (i += 1) {
self.rendered_buf[i] = ' ';
self.pending_buf[i] = ' ';
}
}
};
return SomeTextGrid;
}
const arm = @import("arm_assembly_code.zig");
const math = @import("std").math;
const panicf = arm.panicf;
const Spritesheet = @import("video_core_frame_buffer.zig").Spritesheet;
| https://raw.githubusercontent.com/markfirmware/zig-bare-metal-raspberry-pi/0f0275138ee70a901f58f607d6dee175fae9632f/src/text_grid.zig |
const InvocationRequest = @This();
const std = @import("std");
//
// The user's payload represented as a UTF-8 string.
//
payload: ?[:0]const u8 = null,
//
// An identifier unique to the current invocation.
//
request_id: ?[:0]const u8 = null,
//
// X-Ray tracing ID of the current invocation.
//
xray_trace_id: ?[:0]const u8 = null,
//
// Information about the client application and device when invoked through the AWS Mobile SDK.
//
client_context: ?[:0]const u8 = null,
//
// Information about the Amazon Cognito identity provider when invoked through the AWS Mobile SDK.
//
cognito_identity: ?[:0]const u8 = null,
//
//The ARN requested. This can be different in each invoke that executes the same version.
//
function_arn: ?[:0]const u8 = null,
//
// Function execution deadline counted in milliseconds since the Unix epoch.
//
deadline: i64 = 0,
//
// The number of milliseconds left before lambda terminates the current execution.
//
pub fn getTimeRemaining(self: *InvocationRequest) i64 {
return (self.deadline - std.time.milliTimestamp());
}
test "InvocationRequest" {
const expect = std.testing.expect;
var ir = InvocationRequest{ .payload = "BODY", .request_id = "request_id", .deadline = (std.time.milliTimestamp() + 10000) };
ir.xray_trace_id = "xray_trace_id";
ir.client_context = "client_context";
ir.cognito_identity = "cognito_identity";
ir.function_arn = "function_arn";
try expect(ir.getTimeRemaining() > 0);
}
| https://raw.githubusercontent.com/gitusel/aws-lambda-zig/9526867e32fb08519f26a46f73777cdce3313e8e/src/lambda_runtime/InvocationRequest.zig |
const zignite = @import("../zignite.zig");
const expect = @import("std").testing.expect;
const ConsumerType = @import("consumer_type.zig").ConsumerType;
test "all: odd" {
const odd = struct {
pub fn odd(x: i32) bool {
return @mod(x, 2) == 1;
}
}.odd;
try expect(zignite.fromSlice(i32, &[_]i32{ 1, 3, 5 }).all(odd));
try expect(zignite.empty(i32).all(odd));
try expect(!zignite.range(i32, 1, 5).all(odd));
try expect(!zignite.repeat(i32, 2).all(odd));
}
pub fn All(comptime T: type, comptime predicate: fn (value: T) bool) type {
return struct {
pub const Type = ConsumerType(T, @This(), bool);
pub const init = Type.State{};
pub fn next(event: Type.Event) Type.Action {
return switch (event.tag) {
._break => Type.Action._return(init, true),
._continue => Type.Action._await(init),
._yield => |v| await_or_return(v),
};
}
pub const deinit = Type.nop;
inline fn await_or_return(value: T) Type.Action {
const a_i = .{ .modifier = .always_inline };
if (@call(a_i, predicate, .{value})) {
return Type.Action._await(init);
} else {
return Type.Action._return(init, false);
}
}
};
}
| https://raw.githubusercontent.com/shunkeen/zignite/840dcda178be270a2e7eb273aed98e3740c82f22/src/consumer/all.zig |
pub const LndConf = @import("lightning/LndConf.zig");
pub const lndhttp = @import("lightning/lndhttp.zig");
test {
const std = @import("std");
std.testing.refAllDecls(@This());
}
| https://raw.githubusercontent.com/nakamochi/ndg/538ecd957c3f989485b0f8f6982c9cd1817dc56b/src/lightning.zig |
//! Support: X ∈ (-∞,∞)
//!
//! Parameters:
//! - μ: `location` ∈ (-∞,∞)
//! - σ: `scale` ∈ ( 0,∞)
const std = @import("std");
const assert = std.debug.assert;
const isFinite = std.math.isFinite;
const isNan = std.math.isNan;
const inf = std.math.inf(f64);
pub const discrete = false;
/// f(x) = 1 / (πσ (1 + ((x - μ) / σ)^2)).
pub fn density(x: f64, location: f64, scale: f64) f64 {
assert(isFinite(location) and isFinite(scale));
assert(scale > 0);
assert(!isNan(x));
const z = (x - location) / scale;
return 1 / (std.math.pi * scale * (1 + z * z));
}
/// F(q) = 0.5 + arctan((q - μ) / σ) / π.
pub fn probability(q: f64, location: f64, scale: f64) f64 {
assert(isFinite(location) and isFinite(scale));
assert(scale > 0);
assert(!isNan(q));
const z = (q - location) / scale;
return 0.5 + std.math.atan(z) / std.math.pi;
}
/// Q(p) = μ + σ tan(π (p - 0.5)).
pub fn quantile(p: f64, location: f64, scale: f64) f64 {
assert(isFinite(location) and isFinite(scale));
assert(scale > 0);
assert(0 <= p and p <= 1);
if (p == 0) {
return -inf;
}
if (p == 1) {
return inf;
}
const q = @tan(std.math.pi * (p - 0.5));
return location + scale * q;
}
pub fn random(generator: std.Random, location: f64, scale: f64) f64 {
assert(isFinite(location) and isFinite(scale));
assert(scale > 0);
const uni = generator.float(f64);
return location + scale * @tan(std.math.pi * uni);
}
pub fn fill(buffer: []f64, generator: std.Random, location: f64, scale: f64) []f64 {
assert(isFinite(location) and isFinite(scale));
assert(scale > 0);
for (buffer) |*x| {
const uni = generator.float(f64);
x.* = location + scale * @tan(std.math.pi * uni);
}
return buffer;
}
const expectEqual = std.testing.expectEqual;
const expectApproxEqRel = std.testing.expectApproxEqRel;
const eps = 10 * std.math.floatEps(f64); // 2.22 × 10^-15
// zig fmt: off
test density {
try expectEqual(0, density(-inf, 0, 1));
try expectEqual(0, density( inf, 0, 1));
try expectApproxEqRel(0.3183098861837906, density(0, 0, 1), eps);
try expectApproxEqRel(0.1591549430918953, density(1, 0, 1), eps);
try expectApproxEqRel(0.0636619772367581, density(2, 0, 1), eps);
}
test probability {
try expectEqual(0, probability(-inf, 0, 1));
try expectEqual(1, probability( inf, 0, 1));
try expectApproxEqRel(0.5 , probability(0, 0, 1), eps);
try expectApproxEqRel(0.75 , probability(1, 0, 1), eps);
try expectApproxEqRel(0.8524163823495667, probability(2, 0, 1), eps);
}
test quantile {
try expectEqual (-inf , quantile(0 , 0, 1) );
try expectApproxEqRel(-1.3763819204711736, quantile(0.2, 0, 1), eps);
try expectApproxEqRel(-0.3249196962329063, quantile(0.4, 0, 1), eps);
try expectApproxEqRel( 0.3249196962329066, quantile(0.6, 0, 1), eps);
try expectApproxEqRel( 1.3763819204711740, quantile(0.8, 0, 1), eps);
try expectEqual ( inf , quantile(1 , 0, 1) );
}
| https://raw.githubusercontent.com/PauloCampana/random_variable/fcac8c4fcc43072380b79829359555956258b37f/src/distribution/cauchy.zig |
/// mesh data imported from zmesh.Shape and model file
const std = @import("std");
const math = std.math;
const assert = std.debug.assert;
const jok = @import("../jok.zig");
const sdl = jok.sdl;
const internal = @import("internal.zig");
const Vector = @import("Vector.zig");
const TriangleRenderer = @import("TriangleRenderer.zig");
const ShadingMethod = TriangleRenderer.ShadingMethod;
const Camera = @import("Camera.zig");
const lighting = @import("lighting.zig");
const zmath = jok.zmath;
const zmesh = jok.zmesh;
const Self = @This();
pub const Error = error{
InvalidFormat,
InvalidAnimation,
};
pub const RenderOption = struct {
texture: ?sdl.Texture = null,
color: sdl.Color = sdl.Color.white,
shading_method: ShadingMethod = .gouraud,
cull_faces: bool = true,
lighting: ?lighting.LightingOption = null,
};
pub const GltfNode = zmesh.io.zcgltf.Node;
pub const Node = struct {
pub const SubMesh = struct {
mesh: *Self,
indices: std.ArrayList(u32),
positions: std.ArrayList([3]f32),
normals: std.ArrayList([3]f32),
colors: std.ArrayList(sdl.Color),
texcoords: std.ArrayList([2]f32),
joints: std.ArrayList([4]u8),
weights: std.ArrayList([4]f32),
aabb: ?[6]f32,
tex_id: usize,
pub fn init(allocator: std.mem.Allocator, mesh: *Self) SubMesh {
return .{
.mesh = mesh,
.indices = std.ArrayList(u32).init(allocator),
.positions = std.ArrayList([3]f32).init(allocator),
.normals = std.ArrayList([3]f32).init(allocator),
.colors = std.ArrayList(sdl.Color).init(allocator),
.texcoords = std.ArrayList([2]f32).init(allocator),
.joints = std.ArrayList([4]u8).init(allocator),
.weights = std.ArrayList([4]f32).init(allocator),
.aabb = null,
.tex_id = 0,
};
}
/// Push attributes data
pub fn appendAttributes(
self: *SubMesh,
indices: []u32,
positions: [][3]f32,
normals: ?[][3]f32,
colors: ?[]sdl.Color,
texcoords: ?[][2]f32,
joints: ?[][4]u8,
weights: ?[][4]f32,
) !void {
if (indices.len == 0) return;
assert(@rem(indices.len, 3) == 0);
assert(if (normals) |ns| positions.len == ns.len else true);
assert(if (texcoords) |ts| positions.len == ts.len else true);
assert(if (joints) |js| positions.len == js.len else true);
assert(if (weights) |ws| positions.len == ws.len else true);
if ((self.normals.items.len > 0 and normals == null) or
(self.indices.items.len > 0 and self.normals.items.len == 0 and normals != null) or
(self.colors.items.len > 0 and colors == null) or
(self.indices.items.len > 0 and self.colors.items.len == 0 and colors != null) or
(self.texcoords.items.len > 0 and texcoords == null) or
(self.indices.items.len > 0 and self.texcoords.items.len == 0 and texcoords != null) or
(self.joints.items.len > 0 and joints == null) or
(self.indices.items.len > 0 and self.joints.items.len == 0 and joints != null) or
(self.weights.items.len > 0 and weights == null) or
(self.indices.items.len > 0 and self.weights.items.len == 0 and weights != null))
{
return error.InvalidFormat;
}
const index_offset = @as(u32, @intCast(self.positions.items.len));
try self.indices.ensureTotalCapacity(self.indices.items.len + indices.len);
for (indices) |idx| self.indices.appendAssumeCapacity(idx + index_offset);
try self.positions.appendSlice(positions);
if (normals) |ns| try self.normals.appendSlice(ns);
if (colors) |cs| try self.colors.appendSlice(cs);
if (texcoords) |ts| try self.texcoords.appendSlice(ts);
if (joints) |js| try self.joints.appendSlice(js);
if (weights) |ws| try self.weights.appendSlice(ws);
}
/// Compute AABB of mesh
pub fn computeAabb(self: *SubMesh) void {
var aabb_min = Vector.new(
self.positions.items[0][0],
self.positions.items[0][1],
self.positions.items[0][2],
);
var aabb_max = aabb_min;
for (0..self.positions.items.len) |i| {
const v = Vector.new(
self.positions.items[i][0],
self.positions.items[i][1],
self.positions.items[i][2],
);
aabb_min = aabb_min.min(v);
aabb_max = aabb_max.max(v);
}
self.aabb = [6]f32{
aabb_min.x(), aabb_min.y(), aabb_min.z(),
aabb_max.x(), aabb_max.y(), aabb_max.z(),
};
}
/// Remap texture coordinates to new range
pub fn remapTexcoords(self: *SubMesh, uv0: sdl.PointF, uv1: sdl.PointF) void {
for (self.texcoords.items) |*ts| {
ts[0] = jok.utils.math.linearMap(ts[0], 0, 1, uv0.x, uv1.x);
ts[1] = jok.utils.math.linearMap(ts[1], 0, 1, uv0.y, uv1.y);
}
}
/// Get texture
pub fn getTexture(self: *const SubMesh) ?sdl.Texture {
return self.mesh.textures.get(self.tex_id);
}
};
mesh: *Self,
parent: ?*Node,
children: std.ArrayList(*Node),
scale: zmath.Vec,
rotation: zmath.Vec,
translation: zmath.Vec,
matrix: zmath.Mat,
meshes: []SubMesh,
skin: ?*Skin = null,
is_joint: bool = false,
fn init(
allocator: std.mem.Allocator,
mesh: *Self,
parent: ?*Node,
mesh_count: usize,
) !Node {
const self = Node{
.mesh = mesh,
.parent = parent,
.children = std.ArrayList(*Node).init(allocator),
.scale = zmath.f32x4s(1),
.rotation = zmath.f32x4s(0),
.translation = zmath.f32x4s(0),
.matrix = zmath.identity(),
.meshes = try allocator.alloc(SubMesh, mesh_count),
};
for (self.meshes) |*m| m.* = SubMesh.init(allocator, mesh);
return self;
}
fn fromGltfNode(
allocator: std.mem.Allocator,
mesh: *Self,
parent: *Node,
gltf_node: *const GltfNode,
) !Node {
var self = try Node.init(
allocator,
mesh,
parent,
if (gltf_node.mesh) |m| m.primitives_count else 0,
);
if (gltf_node.has_matrix == 1) {
const m = zmath.loadMat(&gltf_node.transformLocal());
self.matrix = zmath.loadMat(&gltf_node.transformWorld());
self.translation = zmath.util.getTranslationVec(m);
self.rotation = zmath.util.getRotationQuat(m);
self.scale = zmath.util.getScaleVec(m);
} else {
self.scale = if (gltf_node.has_scale == 1)
zmath.f32x4(
gltf_node.scale[0],
gltf_node.scale[1],
gltf_node.scale[2],
0,
)
else
zmath.f32x4(1.0, 1.0, 1.0, 0.0);
self.rotation = if (gltf_node.has_rotation == 1)
zmath.f32x4(
gltf_node.rotation[0],
gltf_node.rotation[1],
gltf_node.rotation[2],
gltf_node.rotation[3],
)
else
zmath.f32x4s(0);
self.translation = if (gltf_node.has_translation == 1)
zmath.f32x4(
gltf_node.translation[0],
gltf_node.translation[1],
gltf_node.translation[2],
0,
)
else
zmath.f32x4s(0);
self.matrix = zmath.mul(self.calcLocalTransform(), parent.matrix);
}
for (self.meshes) |*m| m.* = SubMesh.init(allocator, mesh);
return self;
}
fn calcLocalTransform(node: *const Node) zmath.Mat {
return zmath.mul(
zmath.mul(
zmath.scalingV(node.scale),
zmath.matFromQuat(@as(zmath.Quat, node.rotation)),
),
zmath.translationV(node.translation),
);
}
fn render(
node: *Node,
csz: sdl.PointF,
target: *internal.RenderTarget,
model: zmath.Mat,
camera: Camera,
tri_rd: *TriangleRenderer,
opt: RenderOption,
) !void {
for (node.meshes) |sm| {
try tri_rd.renderMesh(
csz,
target,
zmath.mul(node.matrix, model),
camera,
sm.indices.items,
sm.positions.items,
if (sm.normals.items.len == 0)
null
else
sm.normals.items,
if (sm.colors.items.len == 0)
null
else
sm.colors.items,
if (sm.texcoords.items.len == 0)
null
else
sm.texcoords.items,
.{
.aabb = sm.aabb,
.cull_faces = opt.cull_faces,
.color = opt.color,
.shading_method = opt.shading_method,
.texture = opt.texture orelse sm.getTexture(),
.lighting = opt.lighting,
.animation = null,
},
);
}
for (node.children.items) |c| {
try c.render(
csz,
target,
model,
camera,
tri_rd,
opt,
);
}
}
};
pub const GltfAnimation = zmesh.io.zcgltf.Animation;
pub const GltfAnimationPathType = zmesh.io.zcgltf.AnimationPathType;
pub const GltfInterpolationType = zmesh.io.zcgltf.InterpolationType;
pub const Animation = struct {
const Channel = struct {
node: *Node,
path: GltfAnimationPathType,
interpolation: GltfInterpolationType,
timesteps: []f32,
samples: []zmath.Vec,
};
mesh: *Self,
name: []const u8,
channels: std.ArrayList(Channel),
duration: f32,
fn fromGltfAnimation(allocator: std.mem.Allocator, mesh: *Self, gltf_anim: GltfAnimation, name: []const u8) !?Animation {
var anim = Animation{
.mesh = mesh,
.name = name,
.channels = std.ArrayList(Channel).init(allocator),
.duration = 0,
};
for (gltf_anim.channels[0..gltf_anim.channels_count]) |ch| {
if (ch.target_path == .weights) continue; // TODO weights path not supported
if (ch.sampler.interpolation == .cubic_spline) continue; // TODO cubis-spline not supported
assert(ch.sampler.output.component_type == .r_32f);
var samples = try allocator.alloc(zmath.Vec, ch.sampler.output.count);
switch (ch.target_path) {
.scale => {
var xs: [3]f32 = undefined;
for (0..ch.sampler.output.count) |i| {
const ret = ch.sampler.output.readFloat(i, &xs);
assert(ret == true);
samples[i] = zmath.f32x4(
xs[0],
xs[1],
xs[2],
0,
);
}
},
.rotation => {
var xs: [4]f32 = undefined;
for (0..ch.sampler.output.count) |i| {
const ret = ch.sampler.output.readFloat(i, &xs);
assert(ret == true);
samples[i] = zmath.f32x4(
xs[0],
xs[1],
xs[2],
xs[3],
);
}
},
.translation => {
var xs: [3]f32 = undefined;
for (0..ch.sampler.output.count) |i| {
const ret = ch.sampler.output.readFloat(i, &xs);
assert(ret == true);
samples[i] = zmath.f32x4(
xs[0],
xs[1],
xs[2],
0,
);
}
},
else => unreachable,
}
const timesteps = try allocator.alloc(f32, ch.sampler.input.unpackFloatsCount());
_ = ch.sampler.input.unpackFloats(timesteps);
assert(timesteps.len == samples.len);
assert(std.sort.isSorted(f32, timesteps, {}, std.sort.asc(f32)));
anim.duration = @max(anim.duration, timesteps[timesteps.len - 1]);
try anim.channels.append(Channel{
.node = mesh.nodes_map.get(ch.target_node.?).?,
.path = ch.target_path,
.interpolation = ch.sampler.interpolation,
.timesteps = timesteps,
.samples = samples,
});
}
return if (anim.channels.items.len == 0) null else anim;
}
pub fn isSkeletonAnimation(anim: Animation) bool {
return anim.channels.items[0].node.is_joint;
}
};
pub const GltfSkin = zmesh.io.zcgltf.Skin;
pub const Skin = struct {
inverse_matrices: []zmath.Mat,
nodes: []*Node,
fn fromGltfSkin(allocator: std.mem.Allocator, mesh: *Self, gltf_skin: *const GltfSkin) !Skin {
assert(gltf_skin.joints_count > 0);
var matrices = try allocator.alloc(zmath.Mat, gltf_skin.joints_count);
var xs: [16]f32 = undefined;
for (0..gltf_skin.joints_count) |i| {
const ret = gltf_skin.inverse_bind_matrices.?.readFloat(i, &xs);
assert(ret == true);
matrices[i] = zmath.loadMat(&xs);
}
var nodes = try allocator.alloc(*Node, gltf_skin.joints_count);
for (0..gltf_skin.joints_count) |i| {
nodes[i] = mesh.nodes_map.get(gltf_skin.joints[i]).?;
nodes[i].is_joint = true;
}
return .{ .inverse_matrices = matrices, .nodes = nodes };
}
};
allocator: std.mem.Allocator,
arena: std.heap.ArenaAllocator,
root: *Node,
textures: std.AutoHashMap(usize, sdl.Texture),
nodes_map: std.AutoHashMap(*const GltfNode, *Node),
animations: std.StringHashMap(Animation),
skins_map: std.AutoHashMap(*const GltfSkin, *Skin),
own_textures: bool,
pub fn create(allocator: std.mem.Allocator, mesh_count: usize) !*Self {
var self = try allocator.create(Self);
errdefer allocator.destroy(self);
self.allocator = allocator;
self.arena = std.heap.ArenaAllocator.init(allocator);
self.root = try self.createRootNode(mesh_count);
self.textures = std.AutoHashMap(usize, sdl.Texture).init(self.arena.allocator());
self.nodes_map = std.AutoHashMap(*const GltfNode, *Node).init(self.arena.allocator());
self.animations = std.StringHashMap(Animation).init(self.arena.allocator());
self.skins_map = std.AutoHashMap(*const GltfSkin, *Skin).init(self.arena.allocator());
self.own_textures = false;
return self;
}
/// Create mesh with zmesh.Shape
pub const ShapeOption = struct {
compute_aabb: bool = true,
tex: ?sdl.Texture = null,
uvs: ?[2]sdl.PointF = null,
};
pub fn fromShape(
allocator: std.mem.Allocator,
shape: zmesh.Shape,
opt: ShapeOption,
) !*Self {
var self = try create(allocator, 1);
errdefer self.destroy();
try self.root.meshes[0].appendAttributes(
shape.indices,
shape.positions,
shape.normals,
null,
shape.texcoords,
null,
null,
);
if (shape.texcoords != null) {
if (opt.tex) |t| {
const tex_id = @intFromPtr(t.ptr);
self.root.meshes[0].tex_id = tex_id;
try self.textures.put(tex_id, t);
if (opt.uvs != null) {
self.root.meshes[0].remapTexcoords(opt.uvs.?[0], opt.uvs.?[1]);
}
}
}
if (opt.compute_aabb) self.root.meshes[0].computeAabb();
return self;
}
/// Create mesh with GLTF model file
pub const GltfOption = struct {
compute_aabb: bool = true,
tex: ?sdl.Texture = null,
uvs: ?[2]sdl.PointF = null,
};
pub fn fromGltf(
ctx: jok.Context,
file_path: [:0]const u8,
opt: GltfOption,
) !*Self {
const data = try zmesh.io.parseAndLoadFile(file_path);
defer zmesh.io.freeData(data);
var self = try create(ctx.allocator(), 0);
errdefer self.destroy();
if (opt.tex) |t| { // Use external texture
const tex_id = @intFromPtr(t.ptr);
try self.textures.put(tex_id, t);
} else {
self.own_textures = true;
}
// Load the scene/nodes
const dir = std.fs.path.dirname(file_path);
var node_index: usize = 0;
while (node_index < data.scene.?.nodes_count) : (node_index += 1) {
try self.loadNodeTree(ctx, dir, data.scene.?.nodes.?[node_index], self.root, opt);
}
// Load animations
var index: usize = 0;
while (index < data.animations_count) : (index += 1) {
try self.loadAnimation(data.animations.?[index]);
}
index = 0;
while (index < data.skins_count) : (index += 1) {
try self.loadSkin(&data.skins.?[index]);
}
// Connect nodes and skins
var it = self.nodes_map.iterator();
while (it.next()) |kv| {
const gltf_node = kv.key_ptr.*;
const node = kv.value_ptr.*;
if (gltf_node.skin) |s| {
node.skin = self.skins_map.get(s).?;
}
}
return self;
}
pub fn destroy(self: *Self) void {
if (self.own_textures) {
var it = self.textures.iterator();
while (it.next()) |kv| {
kv.value_ptr.destroy();
}
}
self.arena.deinit();
self.allocator.destroy(self);
}
pub fn render(
self: *const Self,
csz: sdl.PointF,
target: *internal.RenderTarget,
model: zmath.Mat,
camera: Camera,
tri_rd: *TriangleRenderer,
opt: RenderOption,
) !void {
try self.root.render(
csz,
target,
model,
camera,
tri_rd,
opt,
);
}
pub inline fn getAnimation(self: Self, name: []const u8) ?*Animation {
var anim = self.animations.getPtr(name);
if (anim == null) {
if (self.animations.count() == 1 and
std.mem.eql(u8, name, "default"))
{
var it = self.animations.valueIterator();
anim = it.next();
assert(anim != null);
}
}
return anim;
}
fn createRootNode(self: *Self, mesh_count: usize) !*Node {
const allocator = self.arena.allocator();
const node = try allocator.create(Node);
errdefer allocator.destroy(node);
node.* = try Node.init(allocator, self, null, mesh_count);
return node;
}
fn loadNodeTree(
self: *Self,
ctx: jok.Context,
dir: ?[]const u8,
gltf_node: *const GltfNode,
parent: *Node,
opt: GltfOption,
) !void {
const node = try self.arena.allocator().create(Node);
node.* = try Node.fromGltfNode(self.arena.allocator(), self, parent, gltf_node);
try parent.children.append(node);
try self.nodes_map.putNoClobber(gltf_node, node);
if (gltf_node.mesh) |mesh| {
for (0..mesh.primitives_count, node.meshes) |prim_index, *sm| {
const prim = &mesh.primitives[prim_index];
// Load material
var transform: ?zmesh.io.zcgltf.TextureTransform = null;
if (opt.tex) |t| {
sm.tex_id = @intFromPtr(t.ptr);
if (opt.uvs != null) {
sm.remapTexcoords(opt.uvs.?[0], opt.uvs.?[1]);
}
} else if (prim.material) |mat| {
if (mat.has_pbr_metallic_roughness != 0 and
mat.pbr_metallic_roughness.base_color_texture.texture != null)
{
const image = mat.pbr_metallic_roughness.base_color_texture.texture.?.image.?;
sm.tex_id = @intFromPtr(image);
assert(sm.tex_id != 0);
// Lazily load textures
if (self.textures.get(sm.tex_id) == null) {
var tex: sdl.Texture = undefined;
if (image.uri) |p| { // Read external file
const uri_path = std.mem.sliceTo(p, '\x00');
tex = if (dir) |d| BLK: {
const path = try std.fs.path.joinZ(
self.allocator,
&.{ d, uri_path },
);
defer self.allocator.free(path);
break :BLK try jok.utils.gfx.createTextureFromFile(
ctx,
path,
.static,
false,
);
} else try jok.utils.gfx.createTextureFromFile(
ctx,
uri_path,
.static,
false,
);
} else if (image.buffer_view) |v| { // Read embedded file
var file_data: []u8 = undefined;
file_data.ptr = @as([*]u8, @ptrCast(v.buffer.data.?)) + v.offset;
file_data.len = v.size;
tex = try jok.utils.gfx.createTextureFromFileData(
ctx,
file_data,
.static,
false,
);
} else unreachable;
try self.textures.putNoClobber(sm.tex_id, tex);
}
if (mat.pbr_metallic_roughness.base_color_texture.has_transform != 0) {
transform = mat.pbr_metallic_roughness.base_color_texture.transform;
}
}
}
// Indices.
const num_vertices: u32 = @intCast(prim.attributes[0].data.count);
const index_offset = @as(u32, @intCast(sm.positions.items.len));
if (prim.indices) |accessor| {
const num_indices: u32 = @intCast(accessor.count);
try sm.indices.ensureTotalCapacity(sm.indices.items.len + num_indices);
const buffer_view = accessor.buffer_view.?;
assert(accessor.stride == buffer_view.stride or buffer_view.stride == 0);
assert(accessor.stride * accessor.count <= buffer_view.size);
assert(buffer_view.buffer.data != null);
const data_addr = @intFromPtr(buffer_view.buffer.data.?) + accessor.offset + buffer_view.offset;
if (accessor.stride == 1) {
assert(accessor.component_type == .r_8u);
const src = @as([*]const u8, @ptrFromInt(data_addr));
for (0..num_indices) |i| {
sm.indices.appendAssumeCapacity(src[i] + index_offset);
}
} else if (accessor.stride == 2) {
assert(accessor.component_type == .r_16u);
const src = @as([*]const u16, @ptrFromInt(data_addr));
for (0..num_indices) |i| {
sm.indices.appendAssumeCapacity(src[i] + index_offset);
}
} else if (accessor.stride == 4) {
assert(accessor.component_type == .r_32u);
const src = @as([*]const u32, @ptrFromInt(data_addr));
for (0..num_indices) |i| {
sm.indices.appendAssumeCapacity(src[i] + index_offset);
}
} else {
unreachable;
}
} else {
assert(@rem(num_vertices, 3) == 0);
try sm.indices.ensureTotalCapacity(num_vertices);
for (0..num_vertices) |i| {
sm.indices.appendAssumeCapacity(@as(u32, @intCast(i)) + index_offset);
}
}
// Attributes.
{
const attributes = prim.attributes[0..prim.attributes_count];
for (attributes) |attrib| {
const accessor = attrib.data;
const buffer_view = accessor.buffer_view.?;
assert(buffer_view.buffer.data != null);
assert(accessor.stride == buffer_view.stride or buffer_view.stride == 0);
assert(accessor.stride * accessor.count <= buffer_view.size);
const data_addr = @intFromPtr(buffer_view.buffer.data.?) + accessor.offset + buffer_view.offset;
if (attrib.type == .position) {
assert(accessor.type == .vec3);
assert(accessor.component_type == .r_32f);
const slice = @as([*]const [3]f32, @ptrFromInt(data_addr))[0..num_vertices];
try sm.positions.appendSlice(slice);
} else if (attrib.type == .normal) {
assert(accessor.type == .vec3);
assert(accessor.component_type == .r_32f);
const slice = @as([*]const [3]f32, @ptrFromInt(data_addr))[0..num_vertices];
try sm.normals.appendSlice(slice);
} else if (attrib.type == .color) {
assert(accessor.component_type == .r_32f);
if (accessor.type == .vec3) {
const slice = @as([*]const [3]f32, @ptrFromInt(data_addr))[0..num_vertices];
for (slice) |c| try sm.colors.append(sdl.Color.rgb(
@intFromFloat(255 * c[0]),
@intFromFloat(255 * c[1]),
@intFromFloat(255 * c[2]),
));
} else if (accessor.type == .vec4) {
const slice = @as([*]const [4]f32, @ptrFromInt(data_addr))[0..num_vertices];
for (slice) |c| try sm.colors.append(sdl.Color.rgba(
@intFromFloat(255 * c[0]),
@intFromFloat(255 * c[1]),
@intFromFloat(255 * c[2]),
@intFromFloat(255 * c[3]),
));
}
} else if (attrib.type == .texcoord) {
assert(accessor.type == .vec2);
assert(accessor.component_type == .r_32f);
const slice = @as([*]const [2]f32, @ptrFromInt(data_addr))[0..num_vertices];
try sm.texcoords.ensureTotalCapacity(sm.texcoords.items.len + slice.len);
if (transform) |tr| {
for (slice) |ts| {
sm.texcoords.appendAssumeCapacity(.{
zmath.clamp(tr.offset[0] + ts[0] * tr.scale[0], 0.0, 1.0),
zmath.clamp(tr.offset[1] + ts[1] * tr.scale[1], 0.0, 1.0),
});
}
} else {
for (slice) |ts| {
sm.texcoords.appendAssumeCapacity(.{
zmath.clamp(ts[0], 0.0, 1.0),
zmath.clamp(ts[1], 0.0, 1.0),
});
}
}
} else if (attrib.type == .joints) {
assert(accessor.type == .vec4);
try sm.joints.ensureTotalCapacity(sm.joints.items.len + num_vertices);
if (accessor.component_type == .r_8u) {
const slice = @as([*]const [4]u8, @ptrFromInt(data_addr))[0..num_vertices];
try sm.joints.appendSlice(slice);
} else if (accessor.component_type == .r_16u) {
const slice = @as([*]const [4]u16, @ptrFromInt(data_addr))[0..num_vertices];
for (slice) |xs| {
sm.joints.appendAssumeCapacity([4]u8{
@intCast(xs[0]),
@intCast(xs[1]),
@intCast(xs[2]),
@intCast(xs[3]),
});
}
} else unreachable;
} else if (attrib.type == .weights) {
assert(accessor.type == .vec4);
try sm.weights.ensureTotalCapacity(sm.weights.items.len + num_vertices);
if (accessor.component_type == .r_32f) {
const slice = @as([*]const [4]f32, @ptrFromInt(data_addr))[0..num_vertices];
try sm.weights.appendSlice(slice);
} else if (accessor.component_type == .r_8u) {
const slice = @as([*]const [4]u8, @ptrFromInt(data_addr))[0..num_vertices];
for (slice) |xs| {
sm.weights.appendAssumeCapacity([4]f32{
@as(f32, @floatFromInt(xs[0])) / 255.0,
@as(f32, @floatFromInt(xs[1])) / 255.0,
@as(f32, @floatFromInt(xs[2])) / 255.0,
@as(f32, @floatFromInt(xs[3])) / 255.0,
});
}
} else if (accessor.component_type == .r_16u) {
const slice = @as([*]const [4]u16, @ptrFromInt(data_addr))[0..num_vertices];
for (slice) |xs| {
sm.weights.appendAssumeCapacity([4]f32{
@as(f32, @floatFromInt(xs[0])) / 65535.0,
@as(f32, @floatFromInt(xs[1])) / 65535.0,
@as(f32, @floatFromInt(xs[2])) / 65535.0,
@as(f32, @floatFromInt(xs[3])) / 65535.0,
});
}
} else unreachable;
}
}
}
// Compute AABB
if (opt.compute_aabb) sm.computeAabb();
}
}
// Load children
for (0..gltf_node.children_count) |node_index| {
try self.loadNodeTree(ctx, dir, gltf_node.children.?[node_index], node, opt);
}
}
fn loadAnimation(self: *Self, gltf_anim: GltfAnimation) !void {
const name = try self.arena.allocator().dupe(
u8,
std.mem.sliceTo(gltf_anim.name orelse "default", 0),
);
errdefer self.arena.allocator().free(name);
if (try Animation.fromGltfAnimation(self.arena.allocator(), self, gltf_anim, name)) |anim| {
try self.animations.putNoClobber(name, anim);
}
}
fn loadSkin(self: *Self, gltf_skin: *const GltfSkin) !void {
const allocator = self.arena.allocator();
const skin = try allocator.create(Skin);
errdefer allocator.destroy(skin);
skin.* = try Skin.fromGltfSkin(allocator, self, gltf_skin);
try self.skins_map.putNoClobber(gltf_skin, skin);
}
| https://raw.githubusercontent.com/Jack-Ji/jok/8cd4d0e0ad8f073ace74c6b2e3a60ff9f27889a9/src/j3d/Mesh.zig |
const std = @import("std.zig");
const debug = std.debug;
const assert = debug.assert;
const testing = std.testing;
const mem = std.mem;
const math = std.math;
const Allocator = mem.Allocator;
/// A contiguous, growable list of items in memory.
/// This is a wrapper around an array of T values. Initialize with `init`.
///
/// This struct internally stores a `std.mem.Allocator` for memory management.
/// To manually specify an allocator with each method call see `ArrayListUnmanaged`.
pub fn ArrayList(comptime T: type) type {
return ArrayListAligned(T, null);
}
/// A contiguous, growable list of arbitrarily aligned items in memory.
/// This is a wrapper around an array of T values aligned to `alignment`-byte
/// addresses. If the specified alignment is `null`, then `@alignOf(T)` is used.
/// Initialize with `init`.
///
/// This struct internally stores a `std.mem.Allocator` for memory management.
/// To manually specify an allocator with each method call see `ArrayListAlignedUnmanaged`.
pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
if (alignment) |a| {
if (a == @alignOf(T)) {
return ArrayListAligned(T, null);
}
}
return struct {
const Self = @This();
/// Contents of the list. Pointers to elements in this slice are
/// **invalid after resizing operations** on the ArrayList, unless the
/// operation explicitly either: (1) states otherwise or (2) lists the
/// invalidated pointers.
///
/// The allocator used determines how element pointers are
/// invalidated, so the behavior may vary between lists. To avoid
/// illegal behavior, take into account the above paragraph plus the
/// explicit statements given in each method.
items: Slice,
/// How many T values this list can hold without allocating
/// additional memory.
capacity: usize,
allocator: Allocator,
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
pub fn SentinelSlice(comptime s: T) type {
return if (alignment) |a| ([:s]align(a) T) else [:s]T;
}
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn init(allocator: Allocator) Self {
return Self{
.items = &[_]T{},
.capacity = 0,
.allocator = allocator,
};
}
/// Initialize with capacity to hold at least `num` elements.
/// The resulting capacity is likely to be equal to `num`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn initCapacity(allocator: Allocator, num: usize) Allocator.Error!Self {
var self = Self.init(allocator);
try self.ensureTotalCapacityPrecise(num);
return self;
}
/// Release all allocated memory.
pub fn deinit(self: Self) void {
if (@sizeOf(T) > 0) {
self.allocator.free(self.allocatedSlice());
}
}
/// ArrayList takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn fromOwnedSlice(allocator: Allocator, slice: Slice) Self {
return Self{
.items = slice,
.capacity = slice.len,
.allocator = allocator,
};
}
/// Initializes an ArrayListUnmanaged with the `items` and `capacity` fields
/// of this ArrayList. Empties this ArrayList.
pub fn moveToUnmanaged(self: *Self) ArrayListAlignedUnmanaged(T, alignment) {
const allocator = self.allocator;
const result = .{ .items = self.items, .capacity = self.capacity };
self.* = init(allocator);
return result;
}
/// The caller owns the returned memory. Empties this ArrayList,
/// Its capacity is cleared, making deinit() safe but unnecessary to call.
pub fn toOwnedSlice(self: *Self) Allocator.Error!Slice {
const allocator = self.allocator;
const old_memory = self.allocatedSlice();
if (allocator.resize(old_memory, self.items.len)) {
const result = self.items;
self.* = init(allocator);
return result;
}
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
mem.copy(T, new_memory, self.items);
@memset(@ptrCast([*]u8, self.items.ptr), undefined, self.items.len * @sizeOf(T));
self.clearAndFree();
return new_memory;
}
/// The caller owns the returned memory. Empties this ArrayList.
pub fn toOwnedSliceSentinel(self: *Self, comptime sentinel: T) Allocator.Error!SentinelSlice(sentinel) {
try self.ensureTotalCapacityPrecise(self.items.len + 1);
self.appendAssumeCapacity(sentinel);
const result = try self.toOwnedSlice();
return result[0 .. result.len - 1 :sentinel];
}
/// Creates a copy of this ArrayList, using the same allocator.
pub fn clone(self: Self) Allocator.Error!Self {
var cloned = try Self.initCapacity(self.allocator, self.capacity);
cloned.appendSliceAssumeCapacity(self.items);
return cloned;
}
/// Insert `item` at index `n` by moving `list[n .. list.len]` to make room.
/// This operation is O(N).
/// Invalidates pointers if additional memory is needed.
pub fn insert(self: *Self, n: usize, item: T) Allocator.Error!void {
try self.ensureUnusedCapacity(1);
self.items.len += 1;
mem.copyBackwards(T, self.items[n + 1 .. self.items.len], self.items[n .. self.items.len - 1]);
self.items[n] = item;
}
/// Insert slice `items` at index `i` by moving `list[i .. list.len]` to make room.
/// This operation is O(N).
/// Invalidates pointers if additional memory is needed.
pub fn insertSlice(self: *Self, i: usize, items: []const T) Allocator.Error!void {
try self.ensureUnusedCapacity(items.len);
self.items.len += items.len;
mem.copyBackwards(T, self.items[i + items.len .. self.items.len], self.items[i .. self.items.len - items.len]);
mem.copy(T, self.items[i .. i + items.len], items);
}
/// Replace range of elements `list[start..start+len]` with `new_items`.
/// Grows list if `len < new_items.len`.
/// Shrinks list if `len > new_items.len`.
/// Invalidates pointers if this ArrayList is resized.
pub fn replaceRange(self: *Self, start: usize, len: usize, new_items: []const T) Allocator.Error!void {
const after_range = start + len;
const range = self.items[start..after_range];
if (range.len == new_items.len)
mem.copy(T, range, new_items)
else if (range.len < new_items.len) {
const first = new_items[0..range.len];
const rest = new_items[range.len..];
mem.copy(T, range, first);
try self.insertSlice(after_range, rest);
} else {
mem.copy(T, range, new_items);
const after_subrange = start + new_items.len;
for (self.items[after_range..]) |item, i| {
self.items[after_subrange..][i] = item;
}
self.items.len -= len - new_items.len;
}
}
/// Extend the list by 1 element. Allocates more memory as necessary.
/// Invalidates pointers if additional memory is needed.
pub fn append(self: *Self, item: T) Allocator.Error!void {
const new_item_ptr = try self.addOne();
new_item_ptr.* = item;
}
/// Extend the list by 1 element, but assert `self.capacity`
/// is sufficient to hold an additional item. **Does not**
/// invalidate pointers.
pub fn appendAssumeCapacity(self: *Self, item: T) void {
const new_item_ptr = self.addOneAssumeCapacity();
new_item_ptr.* = item;
}
/// Remove the element at index `i`, shift elements after index
/// `i` forward, and return the removed element.
/// Asserts the array has at least one item.
/// Invalidates pointers to end of list.
/// This operation is O(N).
pub fn orderedRemove(self: *Self, i: usize) T {
const newlen = self.items.len - 1;
if (newlen == i) return self.pop();
const old_item = self.items[i];
for (self.items[i..newlen]) |*b, j| b.* = self.items[i + 1 + j];
self.items[newlen] = undefined;
self.items.len = newlen;
return old_item;
}
/// Removes the element at the specified index and returns it.
/// The empty slot is filled from the end of the list.
/// This operation is O(1).
pub fn swapRemove(self: *Self, i: usize) T {
if (self.items.len - 1 == i) return self.pop();
const old_item = self.items[i];
self.items[i] = self.pop();
return old_item;
}
/// Append the slice of items to the list. Allocates more
/// memory as necessary.
/// Invalidates pointers if additional memory is needed.
pub fn appendSlice(self: *Self, items: []const T) Allocator.Error!void {
try self.ensureUnusedCapacity(items.len);
self.appendSliceAssumeCapacity(items);
}
/// Append the slice of items to the list, asserting the capacity is already
/// enough to store the new items. **Does not** invalidate pointers.
pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void {
const old_len = self.items.len;
const new_len = old_len + items.len;
assert(new_len <= self.capacity);
self.items.len = new_len;
mem.copy(T, self.items[old_len..], items);
}
/// Append an unaligned slice of items to the list. Allocates more
/// memory as necessary. Only call this function if calling
/// `appendSlice` instead would be a compile error.
/// Invalidates pointers if additional memory is needed.
pub fn appendUnalignedSlice(self: *Self, items: []align(1) const T) Allocator.Error!void {
try self.ensureUnusedCapacity(items.len);
self.appendUnalignedSliceAssumeCapacity(items);
}
/// Append the slice of items to the list, asserting the capacity is already
/// enough to store the new items. **Does not** invalidate pointers.
/// Only call this function if calling `appendSliceAssumeCapacity` instead
/// would be a compile error.
pub fn appendUnalignedSliceAssumeCapacity(self: *Self, items: []align(1) const T) void {
const old_len = self.items.len;
const new_len = old_len + items.len;
assert(new_len <= self.capacity);
self.items.len = new_len;
@memcpy(
@ptrCast([*]align(@alignOf(T)) u8, self.items.ptr + old_len),
@ptrCast([*]const u8, items.ptr),
items.len * @sizeOf(T),
);
}
pub const Writer = if (T != u8)
@compileError("The Writer interface is only defined for ArrayList(u8) " ++
"but the given type is ArrayList(" ++ @typeName(T) ++ ")")
else
std.io.Writer(*Self, error{OutOfMemory}, appendWrite);
/// Initializes a Writer which will append to the list.
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
/// Same as `append` except it returns the number of bytes written, which is always the same
/// as `m.len`. The purpose of this function existing is to match `std.io.Writer` API.
/// Invalidates pointers if additional memory is needed.
fn appendWrite(self: *Self, m: []const u8) Allocator.Error!usize {
try self.appendSlice(m);
return m.len;
}
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
/// Invalidates pointers if additional memory is needed.
pub fn appendNTimes(self: *Self, value: T, n: usize) Allocator.Error!void {
const old_len = self.items.len;
try self.resize(self.items.len + n);
mem.set(T, self.items[old_len..self.items.len], value);
}
/// Append a value to the list `n` times.
/// Asserts the capacity is enough. **Does not** invalidate pointers.
pub fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
const new_len = self.items.len + n;
assert(new_len <= self.capacity);
mem.set(T, self.items.ptr[self.items.len..new_len], value);
self.items.len = new_len;
}
/// Adjust the list's length to `new_len`.
/// Does not initialize added items if any.
/// Invalidates pointers if additional memory is needed.
pub fn resize(self: *Self, new_len: usize) Allocator.Error!void {
try self.ensureTotalCapacity(new_len);
self.items.len = new_len;
}
/// Reduce allocated capacity to `new_len`.
/// May invalidate element pointers.
pub fn shrinkAndFree(self: *Self, new_len: usize) void {
var unmanaged = self.moveToUnmanaged();
unmanaged.shrinkAndFree(self.allocator, new_len);
self.* = unmanaged.toManaged(self.allocator);
}
/// Reduce length to `new_len`.
/// Invalidates pointers for the elements `items[new_len..]`.
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
assert(new_len <= self.items.len);
self.items.len = new_len;
}
/// Invalidates all element pointers.
pub fn clearRetainingCapacity(self: *Self) void {
self.items.len = 0;
}
/// Invalidates all element pointers.
pub fn clearAndFree(self: *Self) void {
self.allocator.free(self.allocatedSlice());
self.items.len = 0;
self.capacity = 0;
}
/// Modify the array so that it can hold at least `new_capacity` items.
/// Invalidates pointers if additional memory is needed.
pub fn ensureTotalCapacity(self: *Self, new_capacity: usize) Allocator.Error!void {
if (@sizeOf(T) == 0) {
self.capacity = math.maxInt(usize);
return;
}
if (self.capacity >= new_capacity) return;
var better_capacity = self.capacity;
while (true) {
better_capacity +|= better_capacity / 2 + 8;
if (better_capacity >= new_capacity) break;
}
return self.ensureTotalCapacityPrecise(better_capacity);
}
/// Modify the array so that it can hold at least `new_capacity` items.
/// Like `ensureTotalCapacity`, but the resulting capacity is much more likely
/// (but not guaranteed) to be equal to `new_capacity`.
/// Invalidates pointers if additional memory is needed.
pub fn ensureTotalCapacityPrecise(self: *Self, new_capacity: usize) Allocator.Error!void {
if (@sizeOf(T) == 0) {
self.capacity = math.maxInt(usize);
return;
}
if (self.capacity >= new_capacity) return;
// Here we avoid copying allocated but unused bytes by
// attempting a resize in place, and falling back to allocating
// a new buffer and doing our own copy. With a realloc() call,
// the allocator implementation would pointlessly copy our
// extra capacity.
const old_memory = self.allocatedSlice();
if (self.allocator.resize(old_memory, new_capacity)) {
self.capacity = new_capacity;
} else {
const new_memory = try self.allocator.alignedAlloc(T, alignment, new_capacity);
mem.copy(T, new_memory, self.items);
self.allocator.free(old_memory);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
}
}
/// Modify the array so that it can hold at least `additional_count` **more** items.
/// Invalidates pointers if additional memory is needed.
pub fn ensureUnusedCapacity(self: *Self, additional_count: usize) Allocator.Error!void {
return self.ensureTotalCapacity(self.items.len + additional_count);
}
/// Increases the array's length to match the full capacity that is already allocated.
/// The new elements have `undefined` values. **Does not** invalidate pointers.
pub fn expandToCapacity(self: *Self) void {
self.items.len = self.capacity;
}
/// Increase length by 1, returning pointer to the new item.
/// The returned pointer becomes invalid when the list resized.
pub fn addOne(self: *Self) Allocator.Error!*T {
try self.ensureTotalCapacity(self.items.len + 1);
return self.addOneAssumeCapacity();
}
/// Increase length by 1, returning pointer to the new item.
/// Asserts that there is already space for the new item without allocating more.
/// The returned pointer becomes invalid when the list is resized.
/// **Does not** invalidate element pointers.
pub fn addOneAssumeCapacity(self: *Self) *T {
assert(self.items.len < self.capacity);
self.items.len += 1;
return &self.items[self.items.len - 1];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is an array pointing to the newly allocated elements.
/// The returned pointer becomes invalid when the list is resized.
/// Resizes list if `self.capacity` is not large enough.
pub fn addManyAsArray(self: *Self, comptime n: usize) Allocator.Error!*[n]T {
const prev_len = self.items.len;
try self.resize(self.items.len + n);
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is an array pointing to the newly allocated elements.
/// Asserts that there is already space for the new item without allocating more.
/// **Does not** invalidate element pointers.
/// The returned pointer becomes invalid when the list is resized.
pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T {
assert(self.items.len + n <= self.capacity);
const prev_len = self.items.len;
self.items.len += n;
return self.items[prev_len..][0..n];
}
/// Remove and return the last element from the list.
/// Asserts the list has at least one item.
/// Invalidates pointers to the removed element.
pub fn pop(self: *Self) T {
const val = self.items[self.items.len - 1];
self.items.len -= 1;
return val;
}
/// Remove and return the last element from the list, or
/// return `null` if list is empty.
/// Invalidates pointers to the removed element, if any.
pub fn popOrNull(self: *Self) ?T {
if (self.items.len == 0) return null;
return self.pop();
}
/// Returns a slice of all the items plus the extra capacity, whose memory
/// contents are `undefined`.
pub fn allocatedSlice(self: Self) Slice {
// For a nicer API, `items.len` is the length, not the capacity.
// This requires "unsafe" slicing.
return self.items.ptr[0..self.capacity];
}
/// Returns a slice of only the extra capacity after items.
/// This can be useful for writing directly into an ArrayList.
/// Note that such an operation must be followed up with a direct
/// modification of `self.items.len`.
pub fn unusedCapacitySlice(self: Self) Slice {
return self.allocatedSlice()[self.items.len..];
}
};
}
/// An ArrayList, but the allocator is passed as a parameter to the relevant functions
/// rather than stored in the struct itself. The same allocator **must** be used throughout
/// the entire lifetime of an ArrayListUnmanaged. Initialize directly or with
/// `initCapacity`, and deinitialize with `deinit` or use `toOwnedSlice`.
pub fn ArrayListUnmanaged(comptime T: type) type {
return ArrayListAlignedUnmanaged(T, null);
}
/// An ArrayListAligned, but the allocator is passed as a parameter to the relevant
/// functions rather than stored in the struct itself. The same allocator **must**
/// be used throughout the entire lifetime of an ArrayListAlignedUnmanaged.
/// Initialize directly or with `initCapacity`, and deinitialize with `deinit` or use `toOwnedSlice`.
pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) type {
if (alignment) |a| {
if (a == @alignOf(T)) {
return ArrayListAlignedUnmanaged(T, null);
}
}
return struct {
const Self = @This();
/// Contents of the list. Pointers to elements in this slice are
/// **invalid after resizing operations** on the ArrayList, unless the
/// operation explicitly either: (1) states otherwise or (2) lists the
/// invalidated pointers.
///
/// The allocator used determines how element pointers are
/// invalidated, so the behavior may vary between lists. To avoid
/// illegal behavior, take into account the above paragraph plus the
/// explicit statements given in each method.
items: Slice = &[_]T{},
/// How many T values this list can hold without allocating
/// additional memory.
capacity: usize = 0,
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
pub fn SentinelSlice(comptime s: T) type {
return if (alignment) |a| ([:s]align(a) T) else [:s]T;
}
/// Initialize with capacity to hold at least num elements.
/// The resulting capacity is likely to be equal to `num`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn initCapacity(allocator: Allocator, num: usize) Allocator.Error!Self {
var self = Self{};
try self.ensureTotalCapacityPrecise(allocator, num);
return self;
}
/// Release all allocated memory.
pub fn deinit(self: *Self, allocator: Allocator) void {
allocator.free(self.allocatedSlice());
self.* = undefined;
}
/// Convert this list into an analogous memory-managed one.
/// The returned list has ownership of the underlying memory.
pub fn toManaged(self: *Self, allocator: Allocator) ArrayListAligned(T, alignment) {
return .{ .items = self.items, .capacity = self.capacity, .allocator = allocator };
}
/// The caller owns the returned memory. Empties this ArrayList.
/// Its capacity is cleared, making deinit() safe but unnecessary to call.
pub fn toOwnedSlice(self: *Self, allocator: Allocator) Allocator.Error!Slice {
const old_memory = self.allocatedSlice();
if (allocator.resize(old_memory, self.items.len)) {
const result = self.items;
self.* = .{};
return result;
}
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
mem.copy(T, new_memory, self.items);
@memset(@ptrCast([*]u8, self.items.ptr), undefined, self.items.len * @sizeOf(T));
self.clearAndFree(allocator);
return new_memory;
}
/// The caller owns the returned memory. ArrayList becomes empty.
pub fn toOwnedSliceSentinel(self: *Self, allocator: Allocator, comptime sentinel: T) Allocator.Error!SentinelSlice(sentinel) {
try self.ensureTotalCapacityPrecise(allocator, self.items.len + 1);
self.appendAssumeCapacity(sentinel);
const result = try self.toOwnedSlice(allocator);
return result[0 .. result.len - 1 :sentinel];
}
/// Creates a copy of this ArrayList.
pub fn clone(self: Self, allocator: Allocator) Allocator.Error!Self {
var cloned = try Self.initCapacity(allocator, self.capacity);
cloned.appendSliceAssumeCapacity(self.items);
return cloned;
}
/// Insert `item` at index `n`. Moves `list[n .. list.len]`
/// to higher indices to make room.
/// This operation is O(N).
/// Invalidates pointers if additional memory is needed.
pub fn insert(self: *Self, allocator: Allocator, n: usize, item: T) Allocator.Error!void {
try self.ensureUnusedCapacity(allocator, 1);
self.items.len += 1;
mem.copyBackwards(T, self.items[n + 1 .. self.items.len], self.items[n .. self.items.len - 1]);
self.items[n] = item;
}
/// Insert slice `items` at index `i`. Moves `list[i .. list.len]` to
/// higher indicices make room.
/// This operation is O(N).
/// Invalidates pointers if additional memory is needed.
pub fn insertSlice(self: *Self, allocator: Allocator, i: usize, items: []const T) Allocator.Error!void {
try self.ensureUnusedCapacity(allocator, items.len);
self.items.len += items.len;
mem.copyBackwards(T, self.items[i + items.len .. self.items.len], self.items[i .. self.items.len - items.len]);
mem.copy(T, self.items[i .. i + items.len], items);
}
/// Replace range of elements `list[start..start+len]` with `new_items`
/// Grows list if `len < new_items.len`.
/// Shrinks list if `len > new_items.len`
/// Invalidates pointers if this ArrayList is resized.
pub fn replaceRange(self: *Self, allocator: Allocator, start: usize, len: usize, new_items: []const T) Allocator.Error!void {
var managed = self.toManaged(allocator);
try managed.replaceRange(start, len, new_items);
self.* = managed.moveToUnmanaged();
}
/// Extend the list by 1 element. Allocates more memory as necessary.
/// Invalidates pointers if additional memory is needed.
pub fn append(self: *Self, allocator: Allocator, item: T) Allocator.Error!void {
const new_item_ptr = try self.addOne(allocator);
new_item_ptr.* = item;
}
/// Extend the list by 1 element, but asserting `self.capacity`
/// is sufficient to hold an additional item.
pub fn appendAssumeCapacity(self: *Self, item: T) void {
const new_item_ptr = self.addOneAssumeCapacity();
new_item_ptr.* = item;
}
/// Remove the element at index `i` from the list and return its value.
/// Asserts the array has at least one item. Invalidates pointers to
/// last element.
/// This operation is O(N).
pub fn orderedRemove(self: *Self, i: usize) T {
const newlen = self.items.len - 1;
if (newlen == i) return self.pop();
const old_item = self.items[i];
for (self.items[i..newlen]) |*b, j| b.* = self.items[i + 1 + j];
self.items[newlen] = undefined;
self.items.len = newlen;
return old_item;
}
/// Removes the element at the specified index and returns it.
/// The empty slot is filled from the end of the list.
/// Invalidates pointers to last element.
/// This operation is O(1).
pub fn swapRemove(self: *Self, i: usize) T {
if (self.items.len - 1 == i) return self.pop();
const old_item = self.items[i];
self.items[i] = self.pop();
return old_item;
}
/// Append the slice of items to the list. Allocates more
/// memory as necessary.
/// Invalidates pointers if additional memory is needed.
pub fn appendSlice(self: *Self, allocator: Allocator, items: []const T) Allocator.Error!void {
try self.ensureUnusedCapacity(allocator, items.len);
self.appendSliceAssumeCapacity(items);
}
/// Append the slice of items to the list, asserting the capacity is enough
/// to store the new items.
pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void {
const old_len = self.items.len;
const new_len = old_len + items.len;
assert(new_len <= self.capacity);
self.items.len = new_len;
mem.copy(T, self.items[old_len..], items);
}
/// Append the slice of items to the list. Allocates more
/// memory as necessary. Only call this function if a call to `appendSlice` instead would
/// be a compile error.
/// Invalidates pointers if additional memory is needed.
pub fn appendUnalignedSlice(self: *Self, allocator: Allocator, items: []align(1) const T) Allocator.Error!void {
try self.ensureUnusedCapacity(allocator, items.len);
self.appendUnalignedSliceAssumeCapacity(items);
}
/// Append an unaligned slice of items to the list, asserting the capacity is enough
/// to store the new items. Only call this function if a call to `appendSliceAssumeCapacity`
/// instead would be a compile error.
pub fn appendUnalignedSliceAssumeCapacity(self: *Self, items: []align(1) const T) void {
const old_len = self.items.len;
const new_len = old_len + items.len;
assert(new_len <= self.capacity);
self.items.len = new_len;
@memcpy(
@ptrCast([*]align(@alignOf(T)) u8, self.items.ptr + old_len),
@ptrCast([*]const u8, items.ptr),
items.len * @sizeOf(T),
);
}
pub const WriterContext = struct {
self: *Self,
allocator: Allocator,
};
pub const Writer = if (T != u8)
@compileError("The Writer interface is only defined for ArrayList(u8) " ++
"but the given type is ArrayList(" ++ @typeName(T) ++ ")")
else
std.io.Writer(WriterContext, error{OutOfMemory}, appendWrite);
/// Initializes a Writer which will append to the list.
pub fn writer(self: *Self, allocator: Allocator) Writer {
return .{ .context = .{ .self = self, .allocator = allocator } };
}
/// Same as `append` except it returns the number of bytes written, which is always the same
/// as `m.len`. The purpose of this function existing is to match `std.io.Writer` API.
/// Invalidates pointers if additional memory is needed.
fn appendWrite(context: WriterContext, m: []const u8) Allocator.Error!usize {
try context.self.appendSlice(context.allocator, m);
return m.len;
}
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
/// Invalidates pointers if additional memory is needed.
pub fn appendNTimes(self: *Self, allocator: Allocator, value: T, n: usize) Allocator.Error!void {
const old_len = self.items.len;
try self.resize(allocator, self.items.len + n);
mem.set(T, self.items[old_len..self.items.len], value);
}
/// Append a value to the list `n` times.
/// **Does not** invalidate pointers.
/// Asserts the capacity is enough.
pub fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
const new_len = self.items.len + n;
assert(new_len <= self.capacity);
mem.set(T, self.items.ptr[self.items.len..new_len], value);
self.items.len = new_len;
}
/// Adjust the list's length to `new_len`.
/// Does not initialize added items, if any.
/// Invalidates pointers if additional memory is needed.
pub fn resize(self: *Self, allocator: Allocator, new_len: usize) Allocator.Error!void {
try self.ensureTotalCapacity(allocator, new_len);
self.items.len = new_len;
}
/// Reduce allocated capacity to `new_len`.
/// May invalidate element pointers.
pub fn shrinkAndFree(self: *Self, allocator: Allocator, new_len: usize) void {
assert(new_len <= self.items.len);
if (@sizeOf(T) == 0) {
self.items.len = new_len;
return;
}
const old_memory = self.allocatedSlice();
if (allocator.resize(old_memory, new_len)) {
self.capacity = new_len;
self.items.len = new_len;
return;
}
const new_memory = allocator.alignedAlloc(T, alignment, new_len) catch |e| switch (e) {
error.OutOfMemory => {
// No problem, capacity is still correct then.
self.items.len = new_len;
return;
},
};
mem.copy(T, new_memory, self.items[0..new_len]);
allocator.free(old_memory);
self.items = new_memory;
self.capacity = new_memory.len;
}
/// Reduce length to `new_len`.
/// Invalidates pointers to elements `items[new_len..]`.
/// Keeps capacity the same.
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
assert(new_len <= self.items.len);
self.items.len = new_len;
}
/// Invalidates all element pointers.
pub fn clearRetainingCapacity(self: *Self) void {
self.items.len = 0;
}
/// Invalidates all element pointers.
pub fn clearAndFree(self: *Self, allocator: Allocator) void {
allocator.free(self.allocatedSlice());
self.items.len = 0;
self.capacity = 0;
}
/// Modify the array so that it can hold at least `new_capacity` items.
/// Invalidates pointers if additional memory is needed.
pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void {
if (self.capacity >= new_capacity) return;
var better_capacity = self.capacity;
while (true) {
better_capacity +|= better_capacity / 2 + 8;
if (better_capacity >= new_capacity) break;
}
return self.ensureTotalCapacityPrecise(allocator, better_capacity);
}
/// Modify the array so that it can hold at least `new_capacity` items.
/// Like `ensureTotalCapacity`, but the resulting capacity is much more likely
/// (but not guaranteed) to be equal to `new_capacity`.
/// Invalidates pointers if additional memory is needed.
pub fn ensureTotalCapacityPrecise(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void {
if (@sizeOf(T) == 0) {
self.capacity = math.maxInt(usize);
return;
}
if (self.capacity >= new_capacity) return;
// Here we avoid copying allocated but unused bytes by
// attempting a resize in place, and falling back to allocating
// a new buffer and doing our own copy. With a realloc() call,
// the allocator implementation would pointlessly copy our
// extra capacity.
const old_memory = self.allocatedSlice();
if (allocator.resize(old_memory, new_capacity)) {
self.capacity = new_capacity;
} else {
const new_memory = try allocator.alignedAlloc(T, alignment, new_capacity);
mem.copy(T, new_memory, self.items);
allocator.free(old_memory);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
}
}
/// Modify the array so that it can hold at least `additional_count` **more** items.
/// Invalidates pointers if additional memory is needed.
pub fn ensureUnusedCapacity(
self: *Self,
allocator: Allocator,
additional_count: usize,
) Allocator.Error!void {
return self.ensureTotalCapacity(allocator, self.items.len + additional_count);
}
/// Increases the array's length to match the full capacity that is already allocated.
/// The new elements have `undefined` values.
/// **Does not** invalidate pointers.
pub fn expandToCapacity(self: *Self) void {
self.items.len = self.capacity;
}
/// Increase length by 1, returning pointer to the new item.
/// The returned pointer becomes invalid when the list resized.
pub fn addOne(self: *Self, allocator: Allocator) Allocator.Error!*T {
const newlen = self.items.len + 1;
try self.ensureTotalCapacity(allocator, newlen);
return self.addOneAssumeCapacity();
}
/// Increase length by 1, returning pointer to the new item.
/// Asserts that there is already space for the new item without allocating more.
/// **Does not** invalidate pointers.
/// The returned pointer becomes invalid when the list resized.
pub fn addOneAssumeCapacity(self: *Self) *T {
assert(self.items.len < self.capacity);
self.items.len += 1;
return &self.items[self.items.len - 1];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is an array pointing to the newly allocated elements.
/// The returned pointer becomes invalid when the list is resized.
pub fn addManyAsArray(self: *Self, allocator: Allocator, comptime n: usize) Allocator.Error!*[n]T {
const prev_len = self.items.len;
try self.resize(allocator, self.items.len + n);
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is an array pointing to the newly allocated elements.
/// Asserts that there is already space for the new item without allocating more.
/// **Does not** invalidate pointers.
/// The returned pointer becomes invalid when the list is resized.
pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T {
assert(self.items.len + n <= self.capacity);
const prev_len = self.items.len;
self.items.len += n;
return self.items[prev_len..][0..n];
}
/// Remove and return the last element from the list.
/// Asserts the list has at least one item.
/// Invalidates pointers to last element.
pub fn pop(self: *Self) T {
const val = self.items[self.items.len - 1];
self.items.len -= 1;
return val;
}
/// Remove and return the last element from the list.
/// If the list is empty, returns `null`.
/// Invalidates pointers to last element.
pub fn popOrNull(self: *Self) ?T {
if (self.items.len == 0) return null;
return self.pop();
}
/// For a nicer API, `items.len` is the length, not the capacity.
/// This requires "unsafe" slicing.
pub fn allocatedSlice(self: Self) Slice {
return self.items.ptr[0..self.capacity];
}
/// Returns a slice of only the extra capacity after items.
/// This can be useful for writing directly into an ArrayList.
/// Note that such an operation must be followed up with a direct
/// modification of `self.items.len`.
pub fn unusedCapacitySlice(self: Self) Slice {
return self.allocatedSlice()[self.items.len..];
}
};
}
test "std.ArrayList/ArrayListUnmanaged.init" {
{
var list = ArrayList(i32).init(testing.allocator);
defer list.deinit();
try testing.expect(list.items.len == 0);
try testing.expect(list.capacity == 0);
}
{
var list = ArrayListUnmanaged(i32){};
try testing.expect(list.items.len == 0);
try testing.expect(list.capacity == 0);
}
}
test "std.ArrayList/ArrayListUnmanaged.initCapacity" {
const a = testing.allocator;
{
var list = try ArrayList(i8).initCapacity(a, 200);
defer list.deinit();
try testing.expect(list.items.len == 0);
try testing.expect(list.capacity >= 200);
}
{
var list = try ArrayListUnmanaged(i8).initCapacity(a, 200);
defer list.deinit(a);
try testing.expect(list.items.len == 0);
try testing.expect(list.capacity >= 200);
}
}
test "std.ArrayList/ArrayListUnmanaged.clone" {
const a = testing.allocator;
{
var array = ArrayList(i32).init(a);
try array.append(-1);
try array.append(3);
try array.append(5);
const cloned = try array.clone();
defer cloned.deinit();
try testing.expectEqualSlices(i32, array.items, cloned.items);
try testing.expectEqual(array.allocator, cloned.allocator);
try testing.expect(cloned.capacity >= array.capacity);
array.deinit();
try testing.expectEqual(@as(i32, -1), cloned.items[0]);
try testing.expectEqual(@as(i32, 3), cloned.items[1]);
try testing.expectEqual(@as(i32, 5), cloned.items[2]);
}
{
var array = ArrayListUnmanaged(i32){};
try array.append(a, -1);
try array.append(a, 3);
try array.append(a, 5);
var cloned = try array.clone(a);
defer cloned.deinit(a);
try testing.expectEqualSlices(i32, array.items, cloned.items);
try testing.expect(cloned.capacity >= array.capacity);
array.deinit(a);
try testing.expectEqual(@as(i32, -1), cloned.items[0]);
try testing.expectEqual(@as(i32, 3), cloned.items[1]);
try testing.expectEqual(@as(i32, 5), cloned.items[2]);
}
}
test "std.ArrayList/ArrayListUnmanaged.basic" {
const a = testing.allocator;
{
var list = ArrayList(i32).init(a);
defer list.deinit();
{
var i: usize = 0;
while (i < 10) : (i += 1) {
list.append(@intCast(i32, i + 1)) catch unreachable;
}
}
{
var i: usize = 0;
while (i < 10) : (i += 1) {
try testing.expect(list.items[i] == @intCast(i32, i + 1));
}
}
for (list.items) |v, i| {
try testing.expect(v == @intCast(i32, i + 1));
}
try testing.expect(list.pop() == 10);
try testing.expect(list.items.len == 9);
list.appendSlice(&[_]i32{ 1, 2, 3 }) catch unreachable;
try testing.expect(list.items.len == 12);
try testing.expect(list.pop() == 3);
try testing.expect(list.pop() == 2);
try testing.expect(list.pop() == 1);
try testing.expect(list.items.len == 9);
var unaligned: [3]i32 align(1) = [_]i32{ 4, 5, 6 };
list.appendUnalignedSlice(&unaligned) catch unreachable;
try testing.expect(list.items.len == 12);
try testing.expect(list.pop() == 6);
try testing.expect(list.pop() == 5);
try testing.expect(list.pop() == 4);
try testing.expect(list.items.len == 9);
list.appendSlice(&[_]i32{}) catch unreachable;
try testing.expect(list.items.len == 9);
// can only set on indices < self.items.len
list.items[7] = 33;
list.items[8] = 42;
try testing.expect(list.pop() == 42);
try testing.expect(list.pop() == 33);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
{
var i: usize = 0;
while (i < 10) : (i += 1) {
list.append(a, @intCast(i32, i + 1)) catch unreachable;
}
}
{
var i: usize = 0;
while (i < 10) : (i += 1) {
try testing.expect(list.items[i] == @intCast(i32, i + 1));
}
}
for (list.items) |v, i| {
try testing.expect(v == @intCast(i32, i + 1));
}
try testing.expect(list.pop() == 10);
try testing.expect(list.items.len == 9);
list.appendSlice(a, &[_]i32{ 1, 2, 3 }) catch unreachable;
try testing.expect(list.items.len == 12);
try testing.expect(list.pop() == 3);
try testing.expect(list.pop() == 2);
try testing.expect(list.pop() == 1);
try testing.expect(list.items.len == 9);
var unaligned: [3]i32 align(1) = [_]i32{ 4, 5, 6 };
list.appendUnalignedSlice(a, &unaligned) catch unreachable;
try testing.expect(list.items.len == 12);
try testing.expect(list.pop() == 6);
try testing.expect(list.pop() == 5);
try testing.expect(list.pop() == 4);
try testing.expect(list.items.len == 9);
list.appendSlice(a, &[_]i32{}) catch unreachable;
try testing.expect(list.items.len == 9);
// can only set on indices < self.items.len
list.items[7] = 33;
list.items[8] = 42;
try testing.expect(list.pop() == 42);
try testing.expect(list.pop() == 33);
}
}
test "std.ArrayList/ArrayListUnmanaged.appendNTimes" {
const a = testing.allocator;
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.appendNTimes(2, 10);
try testing.expectEqual(@as(usize, 10), list.items.len);
for (list.items) |element| {
try testing.expectEqual(@as(i32, 2), element);
}
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.appendNTimes(a, 2, 10);
try testing.expectEqual(@as(usize, 10), list.items.len);
for (list.items) |element| {
try testing.expectEqual(@as(i32, 2), element);
}
}
}
test "std.ArrayList/ArrayListUnmanaged.appendNTimes with failing allocator" {
const a = testing.failing_allocator;
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try testing.expectError(error.OutOfMemory, list.appendNTimes(2, 10));
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try testing.expectError(error.OutOfMemory, list.appendNTimes(a, 2, 10));
}
}
test "std.ArrayList/ArrayListUnmanaged.orderedRemove" {
const a = testing.allocator;
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.append(1);
try list.append(2);
try list.append(3);
try list.append(4);
try list.append(5);
try list.append(6);
try list.append(7);
//remove from middle
try testing.expectEqual(@as(i32, 4), list.orderedRemove(3));
try testing.expectEqual(@as(i32, 5), list.items[3]);
try testing.expectEqual(@as(usize, 6), list.items.len);
//remove from end
try testing.expectEqual(@as(i32, 7), list.orderedRemove(5));
try testing.expectEqual(@as(usize, 5), list.items.len);
//remove from front
try testing.expectEqual(@as(i32, 1), list.orderedRemove(0));
try testing.expectEqual(@as(i32, 2), list.items[0]);
try testing.expectEqual(@as(usize, 4), list.items.len);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.append(a, 1);
try list.append(a, 2);
try list.append(a, 3);
try list.append(a, 4);
try list.append(a, 5);
try list.append(a, 6);
try list.append(a, 7);
//remove from middle
try testing.expectEqual(@as(i32, 4), list.orderedRemove(3));
try testing.expectEqual(@as(i32, 5), list.items[3]);
try testing.expectEqual(@as(usize, 6), list.items.len);
//remove from end
try testing.expectEqual(@as(i32, 7), list.orderedRemove(5));
try testing.expectEqual(@as(usize, 5), list.items.len);
//remove from front
try testing.expectEqual(@as(i32, 1), list.orderedRemove(0));
try testing.expectEqual(@as(i32, 2), list.items[0]);
try testing.expectEqual(@as(usize, 4), list.items.len);
}
}
test "std.ArrayList/ArrayListUnmanaged.swapRemove" {
const a = testing.allocator;
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.append(1);
try list.append(2);
try list.append(3);
try list.append(4);
try list.append(5);
try list.append(6);
try list.append(7);
//remove from middle
try testing.expect(list.swapRemove(3) == 4);
try testing.expect(list.items[3] == 7);
try testing.expect(list.items.len == 6);
//remove from end
try testing.expect(list.swapRemove(5) == 6);
try testing.expect(list.items.len == 5);
//remove from front
try testing.expect(list.swapRemove(0) == 1);
try testing.expect(list.items[0] == 5);
try testing.expect(list.items.len == 4);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.append(a, 1);
try list.append(a, 2);
try list.append(a, 3);
try list.append(a, 4);
try list.append(a, 5);
try list.append(a, 6);
try list.append(a, 7);
//remove from middle
try testing.expect(list.swapRemove(3) == 4);
try testing.expect(list.items[3] == 7);
try testing.expect(list.items.len == 6);
//remove from end
try testing.expect(list.swapRemove(5) == 6);
try testing.expect(list.items.len == 5);
//remove from front
try testing.expect(list.swapRemove(0) == 1);
try testing.expect(list.items[0] == 5);
try testing.expect(list.items.len == 4);
}
}
test "std.ArrayList/ArrayListUnmanaged.insert" {
const a = testing.allocator;
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.append(1);
try list.append(2);
try list.append(3);
try list.insert(0, 5);
try testing.expect(list.items[0] == 5);
try testing.expect(list.items[1] == 1);
try testing.expect(list.items[2] == 2);
try testing.expect(list.items[3] == 3);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.append(a, 1);
try list.append(a, 2);
try list.append(a, 3);
try list.insert(a, 0, 5);
try testing.expect(list.items[0] == 5);
try testing.expect(list.items[1] == 1);
try testing.expect(list.items[2] == 2);
try testing.expect(list.items[3] == 3);
}
}
test "std.ArrayList/ArrayListUnmanaged.insertSlice" {
const a = testing.allocator;
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.append(1);
try list.append(2);
try list.append(3);
try list.append(4);
try list.insertSlice(1, &[_]i32{ 9, 8 });
try testing.expect(list.items[0] == 1);
try testing.expect(list.items[1] == 9);
try testing.expect(list.items[2] == 8);
try testing.expect(list.items[3] == 2);
try testing.expect(list.items[4] == 3);
try testing.expect(list.items[5] == 4);
const items = [_]i32{1};
try list.insertSlice(0, items[0..0]);
try testing.expect(list.items.len == 6);
try testing.expect(list.items[0] == 1);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.append(a, 1);
try list.append(a, 2);
try list.append(a, 3);
try list.append(a, 4);
try list.insertSlice(a, 1, &[_]i32{ 9, 8 });
try testing.expect(list.items[0] == 1);
try testing.expect(list.items[1] == 9);
try testing.expect(list.items[2] == 8);
try testing.expect(list.items[3] == 2);
try testing.expect(list.items[4] == 3);
try testing.expect(list.items[5] == 4);
const items = [_]i32{1};
try list.insertSlice(a, 0, items[0..0]);
try testing.expect(list.items.len == 6);
try testing.expect(list.items[0] == 1);
}
}
test "std.ArrayList/ArrayListUnmanaged.replaceRange" {
var arena = std.heap.ArenaAllocator.init(testing.allocator);
defer arena.deinit();
const a = arena.allocator();
const init = [_]i32{ 1, 2, 3, 4, 5 };
const new = [_]i32{ 0, 0, 0 };
const result_zero = [_]i32{ 1, 0, 0, 0, 2, 3, 4, 5 };
const result_eq = [_]i32{ 1, 0, 0, 0, 5 };
const result_le = [_]i32{ 1, 0, 0, 0, 4, 5 };
const result_gt = [_]i32{ 1, 0, 0, 0 };
{
var list_zero = ArrayList(i32).init(a);
var list_eq = ArrayList(i32).init(a);
var list_lt = ArrayList(i32).init(a);
var list_gt = ArrayList(i32).init(a);
try list_zero.appendSlice(&init);
try list_eq.appendSlice(&init);
try list_lt.appendSlice(&init);
try list_gt.appendSlice(&init);
try list_zero.replaceRange(1, 0, &new);
try list_eq.replaceRange(1, 3, &new);
try list_lt.replaceRange(1, 2, &new);
// after_range > new_items.len in function body
try testing.expect(1 + 4 > new.len);
try list_gt.replaceRange(1, 4, &new);
try testing.expectEqualSlices(i32, list_zero.items, &result_zero);
try testing.expectEqualSlices(i32, list_eq.items, &result_eq);
try testing.expectEqualSlices(i32, list_lt.items, &result_le);
try testing.expectEqualSlices(i32, list_gt.items, &result_gt);
}
{
var list_zero = ArrayListUnmanaged(i32){};
var list_eq = ArrayListUnmanaged(i32){};
var list_lt = ArrayListUnmanaged(i32){};
var list_gt = ArrayListUnmanaged(i32){};
try list_zero.appendSlice(a, &init);
try list_eq.appendSlice(a, &init);
try list_lt.appendSlice(a, &init);
try list_gt.appendSlice(a, &init);
try list_zero.replaceRange(a, 1, 0, &new);
try list_eq.replaceRange(a, 1, 3, &new);
try list_lt.replaceRange(a, 1, 2, &new);
// after_range > new_items.len in function body
try testing.expect(1 + 4 > new.len);
try list_gt.replaceRange(a, 1, 4, &new);
try testing.expectEqualSlices(i32, list_zero.items, &result_zero);
try testing.expectEqualSlices(i32, list_eq.items, &result_eq);
try testing.expectEqualSlices(i32, list_lt.items, &result_le);
try testing.expectEqualSlices(i32, list_gt.items, &result_gt);
}
}
const Item = struct {
integer: i32,
sub_items: ArrayList(Item),
};
const ItemUnmanaged = struct {
integer: i32,
sub_items: ArrayListUnmanaged(ItemUnmanaged),
};
test "std.ArrayList/ArrayListUnmanaged: ArrayList(T) of struct T" {
const a = std.testing.allocator;
{
var root = Item{ .integer = 1, .sub_items = ArrayList(Item).init(a) };
defer root.sub_items.deinit();
try root.sub_items.append(Item{ .integer = 42, .sub_items = ArrayList(Item).init(a) });
try testing.expect(root.sub_items.items[0].integer == 42);
}
{
var root = ItemUnmanaged{ .integer = 1, .sub_items = ArrayListUnmanaged(ItemUnmanaged){} };
defer root.sub_items.deinit(a);
try root.sub_items.append(a, ItemUnmanaged{ .integer = 42, .sub_items = ArrayListUnmanaged(ItemUnmanaged){} });
try testing.expect(root.sub_items.items[0].integer == 42);
}
}
test "std.ArrayList(u8)/ArrayListAligned implements writer" {
const a = testing.allocator;
{
var buffer = ArrayList(u8).init(a);
defer buffer.deinit();
const x: i32 = 42;
const y: i32 = 1234;
try buffer.writer().print("x: {}\ny: {}\n", .{ x, y });
try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
}
{
var list = ArrayListAligned(u8, 2).init(a);
defer list.deinit();
const writer = list.writer();
try writer.writeAll("a");
try writer.writeAll("bc");
try writer.writeAll("d");
try writer.writeAll("efg");
try testing.expectEqualSlices(u8, list.items, "abcdefg");
}
}
test "std.ArrayListUnmanaged(u8) implements writer" {
const a = testing.allocator;
{
var buffer: ArrayListUnmanaged(u8) = .{};
defer buffer.deinit(a);
const x: i32 = 42;
const y: i32 = 1234;
try buffer.writer(a).print("x: {}\ny: {}\n", .{ x, y });
try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
}
{
var list: ArrayListAlignedUnmanaged(u8, 2) = .{};
defer list.deinit(a);
const writer = list.writer(a);
try writer.writeAll("a");
try writer.writeAll("bc");
try writer.writeAll("d");
try writer.writeAll("efg");
try testing.expectEqualSlices(u8, list.items, "abcdefg");
}
}
test "shrink still sets length when resizing is disabled" {
// Use the testing allocator but with resize disabled.
var a = testing.allocator;
a.vtable = &.{
.alloc = a.vtable.alloc,
.resize = Allocator.noResize,
.free = a.vtable.free,
};
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.append(1);
try list.append(2);
try list.append(3);
list.shrinkAndFree(1);
try testing.expect(list.items.len == 1);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.append(a, 1);
try list.append(a, 2);
try list.append(a, 3);
list.shrinkAndFree(a, 1);
try testing.expect(list.items.len == 1);
}
}
test "shrinkAndFree with a copy" {
// Use the testing allocator but with resize disabled.
var a = testing.allocator;
a.vtable = &.{
.alloc = a.vtable.alloc,
.resize = Allocator.noResize,
.free = a.vtable.free,
};
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.appendNTimes(3, 16);
list.shrinkAndFree(4);
try testing.expect(mem.eql(i32, list.items, &.{ 3, 3, 3, 3 }));
}
test "std.ArrayList/ArrayListUnmanaged.addManyAsArray" {
const a = std.testing.allocator;
{
var list = ArrayList(u8).init(a);
defer list.deinit();
(try list.addManyAsArray(4)).* = "aoeu".*;
try list.ensureTotalCapacity(8);
list.addManyAsArrayAssumeCapacity(4).* = "asdf".*;
try testing.expectEqualSlices(u8, list.items, "aoeuasdf");
}
{
var list = ArrayListUnmanaged(u8){};
defer list.deinit(a);
(try list.addManyAsArray(a, 4)).* = "aoeu".*;
try list.ensureTotalCapacity(a, 8);
list.addManyAsArrayAssumeCapacity(4).* = "asdf".*;
try testing.expectEqualSlices(u8, list.items, "aoeuasdf");
}
}
test "std.ArrayList/ArrayListUnmanaged.toOwnedSliceSentinel" {
const a = testing.allocator;
{
var list = ArrayList(u8).init(a);
defer list.deinit();
try list.appendSlice("foobar");
const result = try list.toOwnedSliceSentinel(0);
defer a.free(result);
try testing.expectEqualStrings(result, mem.sliceTo(result.ptr, 0));
}
{
var list = ArrayListUnmanaged(u8){};
defer list.deinit(a);
try list.appendSlice(a, "foobar");
const result = try list.toOwnedSliceSentinel(a, 0);
defer a.free(result);
try testing.expectEqualStrings(result, mem.sliceTo(result.ptr, 0));
}
}
test "ArrayListAligned/ArrayListAlignedUnmanaged accepts unaligned slices" {
const a = testing.allocator;
{
var list = std.ArrayListAligned(u8, 8).init(a);
defer list.deinit();
try list.appendSlice(&.{ 0, 1, 2, 3 });
try list.insertSlice(2, &.{ 4, 5, 6, 7 });
try list.replaceRange(1, 3, &.{ 8, 9 });
try testing.expectEqualSlices(u8, list.items, &.{ 0, 8, 9, 6, 7, 2, 3 });
}
{
var list = std.ArrayListAlignedUnmanaged(u8, 8){};
defer list.deinit(a);
try list.appendSlice(a, &.{ 0, 1, 2, 3 });
try list.insertSlice(a, 2, &.{ 4, 5, 6, 7 });
try list.replaceRange(a, 1, 3, &.{ 8, 9 });
try testing.expectEqualSlices(u8, list.items, &.{ 0, 8, 9, 6, 7, 2, 3 });
}
}
test "std.ArrayList(u0)" {
// An ArrayList on zero-sized types should not need to allocate
var failing_allocator = testing.FailingAllocator.init(testing.allocator, 0);
const a = failing_allocator.allocator();
var list = ArrayList(u0).init(a);
defer list.deinit();
try list.append(0);
try list.append(0);
try list.append(0);
try testing.expectEqual(list.items.len, 3);
var count: usize = 0;
for (list.items) |x| {
try testing.expectEqual(x, 0);
count += 1;
}
try testing.expectEqual(count, 3);
}
| https://raw.githubusercontent.com/mazino3/ziglang/3db8cffa3b383011471f425983a7e98ad8a46aa5/lib/std/array_list.zig |
pub usingnamespace @cImport({
@cDefine("CGLM_FORCE_DEPTH_ZERO_TO_ONE", "");
@cInclude("cglm/cglm.h");
@cInclude("cglm/call.h");
});
| https://raw.githubusercontent.com/treemcgee42/r4-engine/3f503075e591ea2831a603dbf02a4f83a25f8eed/src/c/cglm.zig |
const std = @import("std");
const testing = std.testing;
const Allocator = std.mem.Allocator;
const mem = @import("std").mem;
pub const Writer = struct {
memory: []u8,
capacity: usize,
allocator: Allocator,
isFixed: bool,
pub fn Alloc(self: *Writer, len: usize) Allocator.Error!u32 {
var offset: usize = self.memory.len;
var total: usize = offset + len;
if (total > self.capacity) {
var capacityTarget: usize = self.capacity * 2;
if (total > capacityTarget) {
capacityTarget = total;
}
try Writer.Grow(self, capacityTarget);
}
self.memory.len = total;
return @intCast(u32, offset);
}
pub fn Grow(self: *Writer, cap: usize) Allocator.Error!void {
if (self.isFixed) {
return std.mem.Allocator.Error.OutOfMemory;
}
const new_memory = try self.allocator.reallocAtLeast(self.memory.ptr[0..self.capacity], cap);
self.memory.ptr = new_memory.ptr;
self.capacity = new_memory.len;
}
pub fn WriteAt(self: *Writer, offset: u32, src: [*]const u8, len: usize) void {
@memcpy(self.memory.ptr[offset .. offset + len].ptr, src, len);
}
pub fn Bytes(self: *Writer) []u8 {
return self.memory;
}
pub fn Reset(self: *Writer) void {
self.memory.len = 0;
}
pub fn Free(self: *Writer) void {
self.allocator.free(self.memory.ptr[0..self.capacity]);
}
};
pub fn NewWriter(allocator: Allocator, cap: usize) !Writer {
var r: Writer = .{
.memory = &[_]u8{},
.capacity = 0,
.allocator = allocator,
.isFixed = false,
};
try Writer.Grow(&r, cap);
return r;
}
pub fn NewFixedWriter(slice: []u8) Writer {
var r: Writer = .{
.memory = slice,
.capacity = slice.len,
.allocator = std.heap.page_allocator,
.isFixed = true,
};
Writer.Reset(&r);
return r;
}
pub const Reader = struct {
memory: []u8,
length: u64,
allocator: Allocator,
pub fn IsValidOffset(x: *Reader, offset: u32, size: u32) bool {
return x.length >= (@intCast(u64, offset) + @intCast(u64, size));
}
pub fn SetSize(x: *Reader, size: u32) bool {
if (size > x.memory.len) {
return false;
}
x.length = @intCast(u64, size);
return true;
}
};
pub fn NewReader(allocator: Allocator, memory: []u8) Reader {
return Reader{
.memory = memory,
.length = @intCast(u64, memory.len),
.allocator = allocator,
};
}
| https://raw.githubusercontent.com/inkeliz/karmem/8949d1b5e1fd945f69aa2ab860df27277f64e32c/zig/karmem.zig |
const serial = @import("./drivers/serial.zig");
var GDT: [5]GDTEntry = [_]GDTEntry{
GDTEntry{},
GDTEntry{ .access_byte = .{ .present = true, .executable = true, .read_write = true, .typebit = TypeBit.CODE_DATA }, .flags = .{ .granularity = true, .long = true } },
GDTEntry{ .access_byte = .{ .present = true, .read_write = true, .typebit = TypeBit.CODE_DATA }, .flags = .{ .granularity = true, .descriptor = 1 } },
GDTEntry{ .access_byte = .{ .present = true, .read_write = true, .typebit = TypeBit.CODE_DATA, .privilege = 3 }, .flags = .{ .granularity = true, .long = true } },
GDTEntry{ .access_byte = .{ .present = true, .executable = true, .read_write = true, .typebit = TypeBit.CODE_DATA, .privilege = 3 }, .flags = .{ .granularity = true, .descriptor = 1 } },
};
const GDTPtr = packed struct {
size: u16,
address: u64,
};
const TypeBit = enum(u1) {
SYSTEM = 0,
CODE_DATA = 1,
};
const AccessByte = packed struct(u8) {
accessed_bit: bool = false,
read_write: bool = false,
dc: bool = false,
executable: bool = false,
typebit: TypeBit = TypeBit.SYSTEM,
privilege: u2 = 0,
present: bool = false,
};
pub const Flags = packed struct(u4) {
reversed: u1 = 0,
long: bool = false,
descriptor: u1 = 0,
granularity: bool = false,
comptime {
const std = @import("std");
std.debug.assert(@as(u4, @bitCast(Flags{ .granularity = true })) == 0b1000);
}
};
pub const GDTEntry = packed struct(u64) {
limit_low: u16 = 0x00,
base_low: u16 = 0x00,
base_middle: u8 = 0x00,
access_byte: AccessByte = .{},
limit_high: u4 = 0x00,
flags: Flags = .{},
base: u8 = 0x00,
};
extern fn load_gdt(gdt_descriptor: *const GDTPtr) void;
pub fn init() void {
serial.print("Start GDT Init\n", .{});
load_gdt(&GDTPtr{ .size = @sizeOf([5]GDTEntry) - 1, .address = @intFromPtr(&GDT) });
serial.print_ok("GDT", .{});
}
| https://raw.githubusercontent.com/Rheydskey/zros/2dd07b403ed9a09359e679c19bb4504b5fbd2338/src/gdt.zig |
const print = std.debug.print;
const std = @import("std");
pub fn main() !void {
var u: u8 = undefined; //XXX: if this becomes 'var' then no error below:
var w: u8 = u + 1; // undef-tst.zig:7:17: error: use of undefined value here causes undefined behavior
//var x: u8 = undefined / 1;
//var xx:Undefined=undefined;
print("Hi! {}\n", .{u});
print("{}\n", .{u == undefined}); // false
print("{}\n", .{undefined == u}); // true
// XXX: ^ wtf?
//print("Hi! {}\n", .{undefined == undefined}); // error: operator == not allowed for type '@TypeOf(undefined)' //This makes some sense.
u = u + 1;
print("Hi! {}\n", .{w});
//print("Hi! {}\n", .{x});
print("Hi! {}\n", .{@TypeOf(undefined)});
const r: @TypeOf(undefined) = undefined;
_ = r;
print("{u8}\n", .{r});
}
| https://raw.githubusercontent.com/correabuscar/sandbox/21c51381071ed27b36b15d9e9d701f15cde8f101/zig/zigbyexample/undef/undef-tst.zig |
const std = @import("std");
const t = @import("../../types.zig");
const vk = @import("vulkan");
const zwin = @import("zwin");
const Allocator = @import("../../allocator.zig");
const Ctx = @import("Context.zig");
pub const Swapchain = struct {
pub const PresentState = enum {
optimal,
suboptimal,
};
surface_format: vk.SurfaceFormatKHR,
present_mode: vk.PresentModeKHR,
extent: vk.Extent2D,
handle: vk.SwapchainKHR,
swap_images: []SwapImage,
image_index: u32,
next_image_acquired: vk.Semaphore,
pub fn init(extent: vk.Extent2D) !Swapchain {
return try init_recycle(extent, .null_handle);
}
pub fn init_recycle(extent: vk.Extent2D, old_handle: vk.SwapchainKHR) !Swapchain {
const capabilities = try Ctx.vki.getPhysicalDeviceSurfaceCapabilitiesKHR(Ctx.physical_device, Ctx.surface);
const actual_extent = find_actual_extent(capabilities, extent);
if (actual_extent.width == 0 or actual_extent.height == 0) {
return error.InvalidSurfaceDimensions;
}
const surface_format = try find_surface_format();
const present_mode = try find_present_mode();
var image_count = capabilities.min_image_count + 1;
if (capabilities.max_image_count > 0) {
image_count = @min(image_count, capabilities.max_image_count);
}
const qfi = [_]u32{ Ctx.graphics_queue.family, Ctx.present_queue.family };
const sharing_mode: vk.SharingMode = if (Ctx.graphics_queue.family != Ctx.present_queue.family) .concurrent else .exclusive;
const handle = try Ctx.vkd.createSwapchainKHR(Ctx.device, &.{
.surface = Ctx.surface,
.min_image_count = image_count,
.image_format = surface_format.format,
.image_color_space = surface_format.color_space,
.image_extent = actual_extent,
.image_array_layers = 1,
.image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true },
.image_sharing_mode = sharing_mode,
.queue_family_index_count = qfi.len,
.p_queue_family_indices = &qfi,
.pre_transform = capabilities.current_transform,
.composite_alpha = .{ .opaque_bit_khr = true },
.present_mode = present_mode,
.clipped = vk.TRUE,
.old_swapchain = old_handle,
}, null);
errdefer Ctx.vkd.destroySwapchainKHR(Ctx.device, handle, null);
if (old_handle != .null_handle) {
Ctx.vkd.destroySwapchainKHR(Ctx.device, old_handle, null);
}
const swap_images = try init_swapchain_images(handle, surface_format.format);
const alloc = try Allocator.allocator();
errdefer {
for (swap_images) |si| si.deinit();
alloc.free(swap_images);
}
var next_image_acquired = try Ctx.vkd.createSemaphore(Ctx.device, &.{}, null);
errdefer Ctx.vkd.destroySemaphore(Ctx.device, next_image_acquired, null);
const result = try Ctx.vkd.acquireNextImageKHR(Ctx.device, handle, std.math.maxInt(u64), next_image_acquired, .null_handle);
if (result.result != .success) {
return error.ImageAcquireFailed;
}
std.mem.swap(vk.Semaphore, &swap_images[result.image_index].image_acquired, &next_image_acquired);
return Swapchain{
.surface_format = surface_format,
.present_mode = present_mode,
.extent = actual_extent,
.handle = handle,
.swap_images = swap_images,
.image_index = result.image_index,
.next_image_acquired = next_image_acquired,
};
}
fn deinitExceptSwapchain(self: Swapchain) void {
for (self.swap_images) |si| si.deinit();
const allocator = Allocator.allocator() catch unreachable;
allocator.free(self.swap_images);
Ctx.vkd.destroySemaphore(Ctx.device, self.next_image_acquired, null);
}
pub fn waitForAllFences(self: Swapchain) !void {
for (self.swap_images) |si| si.waitForFence() catch {};
}
pub fn deinit(self: Swapchain) void {
self.deinitExceptSwapchain();
Ctx.vkd.destroySwapchainKHR(Ctx.device, self.handle, null);
}
pub fn recreate(self: *Swapchain, new_extent: vk.Extent2D) !void {
const old_handle = self.handle;
self.deinitExceptSwapchain();
self.* = try init_recycle(new_extent, old_handle);
}
pub fn currentImage(self: Swapchain) vk.Image {
return self.swap_images[self.image_index].image;
}
pub fn currentSwapImage(self: Swapchain) *const SwapImage {
return &self.swap_images[self.image_index];
}
fn find_actual_extent(caps: vk.SurfaceCapabilitiesKHR, extent: vk.Extent2D) vk.Extent2D {
if (caps.current_extent.width != 0xFFFF_FFFF) {
return caps.current_extent;
} else {
return .{
.width = std.math.clamp(extent.width, caps.min_image_extent.width, caps.max_image_extent.width),
.height = std.math.clamp(extent.height, caps.min_image_extent.height, caps.max_image_extent.height),
};
}
}
fn find_present_mode() !vk.PresentModeKHR {
var count: u32 = undefined;
_ = try Ctx.vki.getPhysicalDeviceSurfacePresentModesKHR(Ctx.physical_device, Ctx.surface, &count, null);
const alloc = try Allocator.allocator();
const present_modes = try alloc.alloc(vk.PresentModeKHR, count);
defer alloc.free(present_modes);
_ = try Ctx.vki.getPhysicalDeviceSurfacePresentModesKHR(Ctx.physical_device, Ctx.surface, &count, present_modes.ptr);
const preferred = [_]vk.PresentModeKHR{
.mailbox_khr,
.immediate_khr,
};
for (preferred) |mode| {
if (std.mem.indexOfScalar(vk.PresentModeKHR, present_modes, mode) != null) {
return mode;
}
}
return .fifo_khr;
}
pub fn start_frame(self: *Swapchain) !void {
// Step 1: Wait for frame
const current = self.currentSwapImage();
try current.waitForFence();
try Ctx.vkd.resetFences(Ctx.device, 1, @ptrCast(¤t.frame_fence));
}
pub fn present_frame(self: *Swapchain, cmdbuf: vk.CommandBuffer) !PresentState {
// Step 2: Submit the command buffer
const current = self.currentSwapImage();
const wait_stage = [_]vk.PipelineStageFlags{.{ .top_of_pipe_bit = true }};
try Ctx.vkd.queueSubmit(Ctx.graphics_queue.handle, 1, &[_]vk.SubmitInfo{.{
.wait_semaphore_count = 1,
.p_wait_semaphores = @ptrCast(¤t.image_acquired),
.p_wait_dst_stage_mask = &wait_stage,
.command_buffer_count = 1,
.p_command_buffers = @ptrCast(&cmdbuf),
.signal_semaphore_count = 1,
.p_signal_semaphores = @ptrCast(¤t.render_finished),
}}, current.frame_fence);
// Step 3: Present the current frame
_ = try Ctx.vkd.queuePresentKHR(Ctx.present_queue.handle, &.{
.wait_semaphore_count = 1,
.p_wait_semaphores = @as([*]const vk.Semaphore, @ptrCast(¤t.render_finished)),
.swapchain_count = 1,
.p_swapchains = @as([*]const vk.SwapchainKHR, @ptrCast(&self.handle)),
.p_image_indices = @as([*]const u32, @ptrCast(&self.image_index)),
});
// Step 4: Acquire next frame
const result = try Ctx.vkd.acquireNextImageKHR(
Ctx.device,
self.handle,
std.math.maxInt(u64),
self.next_image_acquired,
.null_handle,
);
std.mem.swap(vk.Semaphore, &self.swap_images[result.image_index].image_acquired, &self.next_image_acquired);
self.image_index = result.image_index;
return switch (result.result) {
.success => .optimal,
.suboptimal_khr => .suboptimal,
else => unreachable,
};
}
fn find_surface_format() !vk.SurfaceFormatKHR {
const preferred = vk.SurfaceFormatKHR{
.format = .r8g8b8a8_unorm,
.color_space = .srgb_nonlinear_khr,
};
var count: u32 = undefined;
_ = try Ctx.vki.getPhysicalDeviceSurfaceFormatsKHR(Ctx.physical_device, Ctx.surface, &count, null);
const alloc = try Allocator.allocator();
const surface_formats = try alloc.alloc(vk.SurfaceFormatKHR, count);
defer alloc.free(surface_formats);
_ = try Ctx.vki.getPhysicalDeviceSurfaceFormatsKHR(Ctx.physical_device, Ctx.surface, &count, surface_formats.ptr);
for (surface_formats) |sfmt| {
if (std.meta.eql(sfmt, preferred)) {
return preferred;
}
}
return surface_formats[0];
}
};
pub const SwapImage = struct {
image: vk.Image,
view: vk.ImageView,
image_acquired: vk.Semaphore,
render_finished: vk.Semaphore,
frame_fence: vk.Fence,
fn init(image: vk.Image, format: vk.Format) !SwapImage {
const view = try Ctx.vkd.createImageView(Ctx.device, &.{
.image = image,
.view_type = .@"2d",
.format = format,
.components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity },
.subresource_range = .{
.aspect_mask = .{ .color_bit = true },
.base_mip_level = 0,
.level_count = 1,
.base_array_layer = 0,
.layer_count = 1,
},
}, null);
errdefer Ctx.vkd.destroyImageView(Ctx.device, view, null);
const image_acquired = try Ctx.vkd.createSemaphore(Ctx.device, &.{}, null);
errdefer Ctx.vkd.destroySemaphore(Ctx.device, image_acquired, null);
const render_finished = try Ctx.vkd.createSemaphore(Ctx.device, &.{}, null);
errdefer Ctx.vkd.destroySemaphore(Ctx.device, render_finished, null);
const frame_fence = try Ctx.vkd.createFence(Ctx.device, &.{ .flags = .{ .signaled_bit = true } }, null);
errdefer Ctx.vkd.destroyFence(Ctx.device, frame_fence, null);
return SwapImage{
.image = image,
.view = view,
.image_acquired = image_acquired,
.render_finished = render_finished,
.frame_fence = frame_fence,
};
}
fn deinit(self: SwapImage) void {
self.waitForFence() catch return;
Ctx.vkd.destroyImageView(Ctx.device, self.view, null);
Ctx.vkd.destroySemaphore(Ctx.device, self.image_acquired, null);
Ctx.vkd.destroySemaphore(Ctx.device, self.render_finished, null);
Ctx.vkd.destroyFence(Ctx.device, self.frame_fence, null);
}
pub fn waitForFence(self: SwapImage) !void {
_ = try Ctx.vkd.waitForFences(Ctx.device, 1, @ptrCast(&self.frame_fence), vk.TRUE, std.math.maxInt(u64));
}
};
fn init_swapchain_images(swapchain: vk.SwapchainKHR, format: vk.Format) ![]SwapImage {
var count: u32 = undefined;
_ = try Ctx.vkd.getSwapchainImagesKHR(Ctx.device, swapchain, &count, null);
const allocator = try Allocator.allocator();
const images = try allocator.alloc(vk.Image, count);
defer allocator.free(images);
_ = try Ctx.vkd.getSwapchainImagesKHR(Ctx.device, swapchain, &count, images.ptr);
const swap_images = try allocator.alloc(SwapImage, count);
errdefer allocator.free(swap_images);
var i: usize = 0;
errdefer for (swap_images[0..i]) |si| si.deinit();
for (images) |image| {
swap_images[i] = try SwapImage.init(image, format);
i += 1;
}
return swap_images;
}
| https://raw.githubusercontent.com/IridescenceTech/Aether-Platform/f94978e25bc2e532bdf616e06ac621705c96812b/src/graphics/Vulkan/Swapchain.zig |
const std = @import("std");
const Game = @import("../../../../game.zig").Game;
const ReadPacketBuffer = @import("../../../../network/packet/ReadPacketBuffer.zig");
network_id: i32,
head_yaw: u8,
comptime handle_on_network_thread: bool = false,
pub fn decode(buffer: *ReadPacketBuffer, allocator: std.mem.Allocator) !@This() {
_ = allocator;
_ = buffer;
return undefined;
}
pub fn handleOnMainThread(self: *@This(), game: *Game, allocator: std.mem.Allocator) !void {
_ = allocator;
_ = game;
_ = self;
}
| https://raw.githubusercontent.com/InspectorBoat/zig-client/7b64ee2143cd171b6e38b8679b886849d42fa953/main/network/packet/s2c/play/EntityHeadAnglesS2CPacket.zig |
//! A file-level comment
const builtin = @import("builtin");
pub const bindings = struct {
pub const console = struct {
/// console.log invokes the JS console.log API
log: fn (string: String) void,
log2: fn (string: []const u8, []const u8) void,
};
pub const TextDecoder = struct {
new: fn () TextDecoder,
decode: fn (td: TextDecoder, str: []const u8) String,
};
pub const String = struct {
new: fn (buf: []const u8) String,
charAt: fn (string: String, index: u32) String,
};
pub const navigator = struct {
pub const gpu = struct {
requestAdapter: fn (options: RequestAdapterOptions) void,
};
};
};
pub const RequestAdapterOptions = struct {
powerPreference: String,
};
/// doPrint does stuff
pub fn doPrint() void {
// use console.log
console.log("zig:js.console.log(\"hello from Zig\")");
}
| https://raw.githubusercontent.com/hexops-graveyard/mach-sysjs/c826e0f6c70e3b6db1769c84c1356985bb845ea7/example/sysjs.zig |
const std = @import("std");
const bolt = @import("./bolt.zig");
kind: Kind,
start: usize,
end: usize,
pub const Kind = enum(u8) {
eof,
line_break,
hard_line_break,
space,
nonbreaking_space,
text,
digits,
lower_alpha,
upper_alpha,
lower_roman,
upper_roman,
escape,
heading,
right_angle,
left_square,
right_square,
left_paren,
right_paren,
ticks,
tildes,
asterisk,
open_asterisk,
close_asterisk,
space_asterisk,
underscore,
open_underscore,
close_underscore,
space_underscore,
autolink,
autolink_email,
inline_link_url,
exclaimation,
hyphen,
period,
colon,
plus,
pipe,
percent,
left_curl,
right_curl,
pub fn isAsterisk(this: @This()) bool {
switch (this) {
.asterisk,
.space_asterisk,
.open_asterisk,
.close_asterisk,
=> return true,
else => return false,
}
}
pub fn isUnderscore(this: @This()) bool {
switch (this) {
.underscore,
.space_underscore,
.open_underscore,
.close_underscore,
=> return true,
else => return false,
}
}
};
pub const MultiArrayList = std.MultiArrayList(Tok);
pub const Slice = MultiArrayList.Slice;
/// A cut down version of token, only include the kind and start index
pub const Tok = struct {
kind: Kind,
start: u32,
};
pub fn parseAll(allocator: std.mem.Allocator, source: []const u8) !Slice {
var tokens = std.MultiArrayList(Tok){};
defer tokens.deinit(allocator);
var pos: usize = 0;
while (true) {
const token = parse(source, pos);
try tokens.append(allocator, .{
.kind = token.kind,
.start = @intCast(u32, token.start),
});
if (token.kind == .eof) {
break;
}
pos = token.end;
}
return tokens.toOwnedSlice();
}
pub fn parse(source: []const u8, start: usize) @This() {
const State = enum {
default,
text,
text_period1,
text_period2,
text_newline,
text_space,
text_hyphen,
heading,
escape,
digits,
lower_roman,
upper_roman,
lower_alpha,
upper_alpha,
ticks,
tildes,
lcurl,
asterisk,
underscore,
space,
autolink,
autolink_email,
rsquare,
rsquare_lparen,
rsquare_lparen_url,
};
var res = @This(){
.kind = .eof,
.start = start,
.end = start,
};
var state = State.default;
var index = start;
while (bolt.raw.next(u8, usize, source, &index)) |c| {
switch (state) {
.default => switch (c) {
'#' => {
res.kind = .heading;
res.end = index;
state = .heading;
},
' ',
'\t',
=> {
res.kind = .space;
res.end = index;
state = .space;
},
'\n' => {
res.kind = .line_break;
res.end = index;
break;
},
'<' => {
res.kind = .text;
res.end = index;
state = .autolink;
},
'>' => {
res.kind = .right_angle;
res.end = index;
break;
},
'*' => {
res.kind = .asterisk;
res.end = index;
state = .asterisk;
},
'_' => {
res.kind = .underscore;
res.end = index;
state = .underscore;
},
'{' => {
res.kind = .left_curl;
res.end = index;
state = .lcurl;
},
'}' => {
res.kind = .right_curl;
res.end = index;
break;
},
'!' => {
res.kind = .exclaimation;
res.end = index;
break;
},
'[' => {
res.kind = .left_square;
res.end = index;
state = .rsquare;
},
']' => {
res.kind = .right_square;
res.end = index;
state = .rsquare;
},
'-' => {
res.kind = .hyphen;
res.end = index;
break;
},
'`' => {
res.kind = .ticks;
res.end = index;
state = .ticks;
},
'~' => {
res.kind = .tildes;
res.end = index;
state = .tildes;
},
'\\' => {
res.kind = .text;
res.end = index;
state = .escape;
},
'.' => {
res.kind = .period;
res.end = index;
break;
},
':' => {
res.kind = .colon;
res.end = index;
break;
},
'(' => {
res.kind = .left_paren;
res.end = index;
break;
},
')' => {
res.kind = .right_paren;
res.end = index;
break;
},
'+' => {
res.kind = .plus;
res.end = index;
break;
},
'0'...'9' => {
res.kind = .digits;
res.end = index;
state = .digits;
},
'I', 'V', 'X', 'L', 'C', 'D', 'M' => {
res.kind = .upper_roman;
res.end = index;
state = .upper_roman;
},
'i', 'v', 'x', 'l', 'c', 'd', 'm' => {
res.kind = .lower_roman;
res.end = index;
state = .lower_roman;
},
'A'...'B', 'E'...'H', 'J'...'K', 'N'...'U', 'W', 'Y', 'Z' => {
res.kind = .upper_alpha;
res.end = index;
state = .upper_alpha;
},
'a'...'b', 'e'...'h', 'j'...'k', 'n'...'u', 'w', 'y', 'z' => {
res.kind = .lower_alpha;
res.end = index;
state = .lower_alpha;
},
'|' => {
res.kind = .pipe;
res.end = index;
break;
},
'%' => {
res.kind = .percent;
res.end = index;
break;
},
else => {
res.kind = .text;
res.end = index;
state = .text;
},
},
.heading => switch (c) {
'#' => res.end = index,
else => break,
},
.text => switch (c) {
'`',
'*',
'_',
'{',
'\\',
'!',
'[',
']',
')',
=> break,
'.' => state = .text_period1,
'-' => state = .text_hyphen,
' ' => state = .text_space,
'\n' => state = .text_newline,
else => res.end = index,
},
.text_period1 => switch (c) {
'.' => state = .text_period2,
'`',
'*',
'_',
'{',
'\\',
'!',
'[',
']',
'|',
')',
'%',
=> break,
' ' => {
res.end = index;
state = .text_space;
},
'\n' => {
res.end = index - 1;
state = .text_newline;
},
else => {
res.end = index;
state = .text;
},
},
.text_period2 => switch (c) {
'.' => break,
'`',
'*',
'_',
'{',
'\\',
'!',
'[',
']',
'|',
')',
'%',
=> break,
' ' => state = .text_space,
'\n' => {
res.end = index - 1;
state = .text_newline;
},
else => {
res.end = index;
state = .text;
},
},
.text_newline => switch (c) {
'\n',
'0'...'9',
'-',
'+',
'*',
'<',
'>',
'`',
'_',
'{',
'\\',
'!',
'[',
']',
')',
'|',
'%',
=> break,
' ' => {
res.kind = .text;
state = .text_space;
},
'.' => break,
else => {
res.end = index;
state = .text;
},
},
.text_space => switch (c) {
'`',
'*',
'_',
'{',
'\\',
'<',
'!',
'[',
']',
')',
'|',
'%',
=> break,
' ' => {},
'.' => state = .text_period1,
'-' => state = .text_hyphen,
'\n' => state = .text_newline,
else => {
res.kind = .text;
res.end = index;
state = .text;
},
},
.text_hyphen => switch (c) {
'-' => break,
'`', '*', '_', '{', '\\', '!', '[', ']', '|', '%' => break,
' ' => state = .text_space,
'\n' => {
res.end = index - 1;
state = .text_newline;
},
else => {
res.end = index;
state = .text;
},
},
.ticks => switch (c) {
'`' => res.end = index,
else => break,
},
.tildes => switch (c) {
'~' => res.end = index,
else => break,
},
.escape => switch (c) {
' ' => {
res.kind = .nonbreaking_space;
res.end = index;
break;
},
'\n' => {
res.kind = .hard_line_break;
res.end = index;
break;
},
else => if (std.ascii.isPunct(c)) {
res.kind = .escape;
res.end = index;
break;
} else {
res.end = index;
state = .text;
},
},
.digits => switch (c) {
'0'...'9' => res.end = index,
' ' => {
res.kind = .text;
res.end = index;
state = .text_space;
},
else => break,
},
.lower_roman => switch (c) {
'i', 'v', 'x', 'l', 'c', 'd', 'm' => res.end = index,
'a'...'b', 'e'...'h', 'j'...'k', 'n'...'u', 'w', 'y', 'z' => {
res.end = index;
state = .lower_alpha;
},
'A'...'Z', '0'...'9' => {
res.kind = .text;
res.end = index;
state = .text;
},
' ' => state = .text_space,
else => break,
},
.upper_roman => switch (c) {
'I', 'V', 'X', 'L', 'C', 'D', 'M' => res.end = index,
'A'...'B', 'E'...'H', 'J'...'K', 'N'...'U', 'W', 'Y', 'Z' => {
res.end = index;
state = .upper_alpha;
},
'a'...'z', '0'...'9' => {
res.kind = .text;
res.end = index;
state = .text_space;
},
' ' => state = .text_space,
else => break,
},
.lower_alpha => switch (c) {
'a'...'z' => res.end = index,
'A'...'Z', '0'...'9' => {
res.kind = .text;
res.end = index;
state = .text_space;
},
' ' => state = .text_space,
else => break,
},
.upper_alpha => switch (c) {
'A'...'Z' => res.end = index,
'a'...'z', '0'...'9' => {
res.kind = .text;
res.end = index;
state = .text_space;
},
' ' => state = .text_space,
else => break,
},
.lcurl => switch (c) {
'*' => {
res.kind = .open_asterisk;
res.end = index;
break;
},
'_' => {
res.kind = .open_underscore;
res.end = index;
break;
},
else => break,
},
.asterisk => switch (c) {
'}' => {
res.kind = .close_asterisk;
res.end = index;
break;
},
else => break,
},
.underscore => switch (c) {
'}' => {
res.kind = .close_underscore;
res.end = index;
break;
},
else => break,
},
.space => switch (c) {
'*' => {
res.kind = .space_asterisk;
res.end = index;
break;
},
'_' => {
res.kind = .space_underscore;
res.end = index;
break;
},
else => break,
},
.autolink => switch (c) {
'>' => {
res.kind = .autolink;
res.end = index;
break;
},
'@' => state = .autolink_email,
'\n' => break,
else => res.end = index,
},
.autolink_email => switch (c) {
'>' => {
res.kind = .autolink_email;
res.end = index;
break;
},
'\n' => break,
else => res.end = index,
},
.rsquare => switch (c) {
'(' => state = .rsquare_lparen,
else => break,
},
.rsquare_lparen => switch (c) {
'A'...'Z', 'a'...'z' => state = .rsquare_lparen_url,
else => break,
},
.rsquare_lparen_url => switch (c) {
')' => {
res.kind = .inline_link_url;
res.end = index;
break;
},
else => {},
},
}
}
if (index == source.len) {
switch (state) {
.text_period1, .text_hyphen => res.end = index,
else => {},
}
}
return res;
}
| https://raw.githubusercontent.com/leroycep/djot.zig/6757f2893c3d755fc341c8a18ee6dfb6751df6d5/src/Token.zig |
const std = @import("std");
const api = @import("api.zig");
const tile = @import("tiles.zig");
const anime = @import("animation.zig");
const map = @import("map.zig");
const texture = @import("textures.zig");
const key = @import("keybindings.zig");
const options = @import("options.zig");
const ecs = @import("ecs.zig");
const SparseSet = @import("sparse_set.zig").SparseSet;
const Grid = @import("grid.zig").Grid;
const coll = @import("collisions.zig");
const cam = @import("camera.zig");
const Lua = @import("ziglua").Lua;
pub const Component = @import("components.zig");
const intersection = @import("sparse_set.zig").intersection;
const ray = @cImport({
@cInclude("raylib.h");
});
const eql = std.mem.eql;
// =========Component types=========
pub const ItemComponent = struct {
pub const name = "item";
type: [:0]const u8 = "unknown",
max_stack_size: usize = 99,
status: enum { in_inventory, in_world } = .in_world,
};
pub const InventoryComponent = struct {
pub const name = "inventory";
const max_cap = 256;
const Slot = struct {
item_count: usize = 1,
id: usize = 0,
};
buffer: [max_cap]Slot = [_]Slot{.{}} ** max_cap,
len: usize = 0,
capacity: usize = 64,
selected_index: usize = 0,
pub inline fn slots(self: *@This()) []Slot {
return self.buffer[0..self.len];
}
};
////======normal code=========
fn findExistingSlot(self: *const ecs.ECS, inv: *Component.Inventory, item: Component.Item) ?usize {
if (item.max_stack_size == 1) {
return null;
}
for (inv.slots(), 0..) |slot, i| {
const inv_item = self.get(Component.Item, slot.id);
if (std.mem.eql(u8, item.type, inv_item.type) and slot.item_count < item.max_stack_size and item.max_stack_size == inv_item.max_stack_size) {
return i;
}
}
return null;
}
pub fn addItem(self: *ecs.ECS, a: std.mem.Allocator, inv: *Component.Inventory, new_item_id: usize) !void {
const new_item = self.getMaybe(Component.Item, new_item_id) orelse return error.id_missing_item_component;
if (findExistingSlot(self, inv, new_item.*)) |index| {
inv.slots()[index].item_count += 1;
try self.deleteEntity(a, new_item_id);
} else {
inv.buffer[inv.len] = .{
.id = new_item_id,
};
inv.len += 1;
self.get(Component.Item, new_item_id).status = .in_inventory;
}
}
pub fn updateInventorySystem(
self: *ecs.ECS,
a: std.mem.Allocator,
opt: options.Update,
) !void {
_ = opt;
const systems = [_]type{ Component.Inventory, Component.Hitbox };
const set = self.getSystemDomain(a, &systems);
for (set) |member| {
const colliders = try coll.findCollidingEntities(self, a, member);
var inventory = self.get(Component.Inventory, member);
_ = &inventory;
if (inventory.len >= inventory.capacity) {
continue;
}
for (colliders) |entity| {
if (self.getMaybe(Component.Item, entity)) |item| {
if (item.status == .in_world) {
addItem(self, a, inventory, entity) catch break;
break;
}
}
}
}
}
pub fn renderPlayerInventory(
self: *ecs.ECS,
a: std.mem.Allocator,
animation_state: *const anime.AnimationState,
) void {
const systems = [_]type{ Component.IsPlayer, Component.Inventory };
const set = self.getSystemDomain(a, &systems);
_ = animation_state;
for (set) |member| {
const inventory = self.get(Component.Inventory, member);
for (0..inventory.len) |i| {
//_ = std.fmt.bufPrintZ(&buf, "{} entities", .{inventory.slots()[i].item_count}) catch unreachable;
const y: c_int = @intCast(i * 20);
const item = self.get(Component.Item, inventory.slots()[i].id);
ray.DrawText(item.type.ptr, 200, 25 + y, 15, ray.RAYWHITE);
var buf: [1024:0]u8 = undefined;
_ = std.fmt.bufPrintZ(&buf, "{}", .{inventory.slots()[i].item_count}) catch unreachable;
ray.DrawText(&buf, 170, 25 + y, 15, ray.RAYWHITE);
}
}
}
//const inventory self.get(Component.Inventory, )
| https://raw.githubusercontent.com/VisenDev/ziggity/7dce0f51e093f2f9f352697b3d659be51672098d/src/inventory.zig |
pub const vulkan = @import("vulkan");
pub const l0vk = @import("./vulkan.zig");
pub const VkImage = vulkan.VkImage;
pub const VkFormat = enum(c_uint) {
undefined = 0,
r4g4_unorm_pack8 = 1,
r4g4b4a4_unorm_pack16 = 2,
b4g4r4a4_unorm_pack16 = 3,
r5g6b5_unorm_pack16 = 4,
b5g6r5_unorm_pack16 = 5,
r5g5b5a1_unorm_pack16 = 6,
b5g5r5a1_unorm_pack16 = 7,
a1r5g5b5_unorm_pack16 = 8,
r8_unorm = 9,
r8_snorm = 10,
r8_uscaled = 11,
r8_sscaled = 12,
r8_uint = 13,
r8_sint = 14,
r8_srgb = 15,
r8g8_unorm = 16,
r8g8_snorm = 17,
r8g8_uscaled = 18,
r8g8_sscaled = 19,
r8g8_uint = 20,
r8g8_sint = 21,
r8g8_srgb = 22,
r8g8b8_unorm = 23,
r8g8b8_snorm = 24,
r8g8b8_uscaled = 25,
r8g8b8_sscaled = 26,
r8g8b8_uint = 27,
r8g8b8_sint = 28,
r8g8b8_srgb = 29,
b8g8r8_unorm = 30,
b8g8r8_snorm = 31,
b8g8r8_uscaled = 32,
b8g8r8_sscaled = 33,
b8g8r8_uint = 34,
b8g8r8_sint = 35,
b8g8r8_srgb = 36,
r8g8b8a8_unorm = 37,
r8g8b8a8_snorm = 38,
r8g8b8a8_uscaled = 39,
r8g8b8a8_sscaled = 40,
r8g8b8a8_uint = 41,
r8g8b8a8_sint = 42,
r8g8b8a8_srgb = 43,
b8g8r8a8_unorm = 44,
b8g8r8a8_snorm = 45,
b8g8r8a8_uscaled = 46,
b8g8r8a8_sscaled = 47,
b8g8r8a8_uint = 48,
b8g8r8a8_sint = 49,
b8g8r8a8_srgb = 50,
a8b8g8r8_unorm_pack32 = 51,
a8b8g8r8_snorm_pack32 = 52,
a8b8g8r8_uscaled_pack32 = 53,
a8b8g8r8_sscaled_pack32 = 54,
a8b8g8r8_uint_pack32 = 55,
a8b8g8r8_sint_pack32 = 56,
a8b8g8r8_srgb_pack32 = 57,
a2r10g10b10_unorm_pack32 = 58,
a2r10g10b10_snorm_pack32 = 59,
a2r10g10b10_uscaled_pack32 = 60,
a2r10g10b10_sscaled_pack32 = 61,
a2r10g10b10_uint_pack32 = 62,
a2r10g10b10_sint_pack32 = 63,
a2b10g10r10_unorm_pack32 = 64,
a2b10g10r10_snorm_pack32 = 65,
a2b10g10r10_uscaled_pack32 = 66,
a2b10g10r10_sscaled_pack32 = 67,
a2b10g10r10_uint_pack32 = 68,
a2b10g10r10_sint_pack32 = 69,
r16_unorm = 70,
r16_snorm = 71,
r16_uscaled = 72,
r16_sscaled = 73,
r16_uint = 74,
r16_sint = 75,
r16_sfloat = 76,
r16g16_unorm = 77,
r16g16_snorm = 78,
r16g16_uscaled = 79,
r16g16_sscaled = 80,
r16g16_uint = 81,
r16g16_sint = 82,
r16g16_sfloat = 83,
r16g16b16_unorm = 84,
r16g16b16_snorm = 85,
r16g16b16_uscaled = 86,
r16g16b16_sscaled = 87,
r16g16b16_uint = 88,
r16g16b16_sint = 89,
r16g16b16_sfloat = 90,
r16g16b16a16_unorm = 91,
r16g16b16a16_snorm = 92,
r16g16b16a16_uscaled = 93,
r16g16b16a16_sscaled = 94,
r16g16b16a16_uint = 95,
r16g16b16a16_sint = 96,
r16g16b16a16_sfloat = 97,
r32_uint = 98,
r32_sint = 99,
r32_sfloat = 100,
r32g32_uint = 101,
r32g32_sint = 102,
r32g32_sfloat = 103,
r32g32b32_uint = 104,
r32g32b32_sint = 105,
r32g32b32_sfloat = 106,
r32g32b32a32_uint = 107,
r32g32b32a32_sint = 108,
r32g32b32a32_sfloat = 109,
r64_uint = 110,
r64_sint = 111,
r64_sfloat = 112,
r64g64_uint = 113,
r64g64_sint = 114,
r64g64_sfloat = 115,
r64g64b64_uint = 116,
r64g64b64_sint = 117,
r64g64b64_sfloat = 118,
r64g64b64a64_uint = 119,
r64g64b64a64_sint = 120,
r64g64b64a64_sfloat = 121,
b10g11r11_ufloat_pack32 = 122,
e5b9g9r9_ufloat_pack32 = 123,
d16_unorm = 124,
x8_d24_unorm_pack32 = 125,
d32_sfloat = 126,
s8_uint = 127,
d16_unorm_s8_uint = 128,
d24_unorm_s8_uint = 129,
d32_sfloat_s8_uint = 130,
bc1_rgb_unorm_block = 131,
bc1_rgb_srgb_block = 132,
bc1_rgba_unorm_block = 133,
bc1_rgba_srgb_block = 134,
bc2_unorm_block = 135,
bc2_srgb_block = 136,
bc3_unorm_block = 137,
bc3_srgb_block = 138,
bc4_unorm_block = 139,
bc4_snorm_block = 140,
bc5_unorm_block = 141,
bc5_snorm_block = 142,
bc6h_ufloat_block = 143,
bc6h_sfloat_block = 144,
bc7_unorm_block = 145,
bc7_srgb_block = 146,
etc2_r8g8b8_unorm_block = 147,
etc2_r8g8b8_srgb_block = 148,
etc2_r8g8b8a1_unorm_block = 149,
etc2_r8g8b8a1_srgb_block = 150,
etc2_r8g8b8a8_unorm_block = 151,
etc2_r8g8b8a8_srgb_block = 152,
eac_r11_unorm_block = 153,
eac_r11_snorm_block = 154,
eac_r11g11_unorm_block = 155,
eac_r11g11_snorm_block = 156,
astc_4x4_unorm_block = 157,
astc_4x4_srgb_block = 158,
astc_5x4_unorm_block = 159,
astc_5x4_srgb_block = 160,
astc_5x5_unorm_block = 161,
astc_5x5_srgb_block = 162,
astc_6x5_unorm_block = 163,
astc_6x5_srgb_block = 164,
astc_6x6_unorm_block = 165,
astc_6x6_srgb_block = 166,
astc_8x5_unorm_block = 167,
astc_8x5_srgb_block = 168,
astc_8x6_unorm_block = 169,
astc_8x6_srgb_block = 170,
astc_8x8_unorm_block = 171,
astc_8x8_srgb_block = 172,
astc_10x5_unorm_block = 173,
astc_10x5_srgb_block = 174,
astc_10x6_unorm_block = 175,
astc_10x6_srgb_block = 176,
astc_10x8_unorm_block = 177,
astc_10x8_srgb_block = 178,
astc_10x10_unorm_block = 179,
astc_10x10_srgb_block = 180,
astc_12x10_unorm_block = 181,
astc_12x10_srgb_block = 182,
astc_12x12_unorm_block = 183,
astc_12x12_srgb_block = 184,
// Provided by VK_VERSION_1_1
g8b8g8r8_422_unorm = 1000156000,
b8g8r8g8_422_unorm = 1000156001,
g8_b8_r8_3plane_420_unorm = 1000156002,
g8_b8r8_2plane_420_unorm = 1000156003,
g8_b8_r8_3plane_422_unorm = 1000156004,
g8_b8r8_2plane_422_unorm = 1000156005,
g8_b8_r8_3plane_444_unorm = 1000156006,
r10x6_unorm_pack16 = 1000156007,
r10x6g10x6_unorm_2pack16 = 1000156008,
r10x6g10x6b10x6a10x6_unorm_4pack16 = 1000156009,
g10x6b10x6g10x6r10x6_422_unorm_4pack16 = 1000156010,
b10x6g10x6r10x6g10x6_422_unorm_4pack16 = 1000156011,
g10x6_b10x6_r10x6_3plane_420_unorm_3pack16 = 1000156012,
g10x6_b10x6r10x6_2plane_420_unorm_3pack16 = 1000156013,
g10x6_b10x6_r10x6_3plane_422_unorm_3pack16 = 1000156014,
g10x6_b10x6r10x6_2plane_422_unorm_3pack16 = 1000156015,
g10x6_b10x6_r10x6_3plane_444_unorm_3pack16 = 1000156016,
r12x4_unorm_pack16 = 1000156017,
r12x4g12x4_unorm_2pack16 = 1000156018,
r12x4g12x4b12x4a12x4_unorm_4pack16 = 1000156019,
g12x4b12x4g12x4r12x4_422_unorm_4pack16 = 1000156020,
b12x4g12x4r12x4g12x4_422_unorm_4pack16 = 1000156021,
g12x4_b12x4_r12x4_3plane_420_unorm_3pack16 = 1000156022,
g12x4_b12x4r12x4_2plane_420_unorm_3pack16 = 1000156023,
g12x4_b12x4_r12x4_3plane_422_unorm_3pack16 = 1000156024,
g12x4_b12x4r12x4_2plane_422_unorm_3pack16 = 1000156025,
g12x4_b12x4_r12x4_3plane_444_unorm_3pack16 = 1000156026,
g16b16g16r16_422_unorm = 1000156027,
b16g16r16g16_422_unorm = 1000156028,
g16_b16_r16_3plane_420_unorm = 1000156029,
g16_b16r16_2plane_420_unorm = 1000156030,
g16_b16_r16_3plane_422_unorm = 1000156031,
g16_b16r16_2plane_422_unorm = 1000156032,
g16_b16_r16_3plane_444_unorm = 1000156033,
// Provided by VK_VERSION_1_3
g8_b8r8_2plane_444_unorm = 1000330000,
g10x6_b10x6r10x6_2plane_444_unorm_3pack16 = 1000330001,
g12x4_b12x4r12x4_2plane_444_unorm_3pack16 = 1000330002,
g16_b16r16_2plane_444_unorm = 1000330003,
a4r4g4b4_unorm_pack16 = 1000340000,
a4b4g4r4_unorm_pack16 = 1000340001,
astc_4x4_sfloat_block = 1000066000,
astc_5x4_sfloat_block = 1000066001,
astc_5x5_sfloat_block = 1000066002,
astc_6x5_sfloat_block = 1000066003,
astc_6x6_sfloat_block = 1000066004,
astc_8x5_sfloat_block = 1000066005,
astc_8x6_sfloat_block = 1000066006,
astc_8x8_sfloat_block = 1000066007,
astc_10x5_sfloat_block = 1000066008,
astc_10x6_sfloat_block = 1000066009,
astc_10x8_sfloat_block = 1000066010,
astc_10x10_sfloat_block = 1000066011,
astc_12x10_sfloat_block = 1000066012,
astc_12x12_sfloat_block = 1000066013,
// Provided by VK_IMG_format_pvrtc
pvrtc1_2bpp_unorm_block_img = 1000054000,
pvrtc1_4bpp_unorm_block_img = 1000054001,
pvrtc2_2bpp_unorm_block_img = 1000054002,
pvrtc2_4bpp_unorm_block_img = 1000054003,
pvrtc1_2bpp_srgb_block_img = 1000054004,
pvrtc1_4bpp_srgb_block_img = 1000054005,
pvrtc2_2bpp_srgb_block_img = 1000054006,
pvrtc2_4bpp_srgb_block_img = 1000054007,
// Provided by VK_NV_optical_flow
r16g16_s10_5_1_NV = 1000464000,
// Provided by VK_KHR_maintenance5
a1b5g5r5_unorm_pack16_khr = 1000470000,
a8_unorm_khr = 1000470001,
// // Provided by VK_EXT_texture_compression_astc_hdr
// VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK,
// // Provided by VK_EXT_texture_compression_astc_hdr
// VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK,
// // Provided by VK_EXT_texture_compression_astc_hdr
// VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK,
// // Provided by VK_EXT_texture_compression_astc_hdr
// VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK,
// // Provided by VK_EXT_texture_compression_astc_hdr
// VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK,
// // Provided by VK_EXT_texture_compression_astc_hdr
// VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK,
// // Provided by VK_EXT_texture_compression_astc_hdr
// VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK,
// // Provided by VK_EXT_texture_compression_astc_hdr
// VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK,
// // Provided by VK_EXT_texture_compression_astc_hdr
// VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK,
// // Provided by VK_EXT_texture_compression_astc_hdr
// VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK,
// // Provided by VK_EXT_texture_compression_astc_hdr
// VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK,
// // Provided by VK_EXT_texture_compression_astc_hdr
// VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK,
// // Provided by VK_EXT_texture_compression_astc_hdr
// VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK,
// // Provided by VK_EXT_texture_compression_astc_hdr
// VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G8B8G8R8_422_UNORM_KHR = VK_FORMAT_G8B8G8R8_422_UNORM,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_B8G8R8G8_422_UNORM_KHR = VK_FORMAT_B8G8R8G8_422_UNORM,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_R10X6_UNORM_PACK16_KHR = VK_FORMAT_R10X6_UNORM_PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_R10X6G10X6_UNORM_2PACK16_KHR = VK_FORMAT_R10X6G10X6_UNORM_2PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR = VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_R12X4_UNORM_PACK16_KHR = VK_FORMAT_R12X4_UNORM_PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_R12X4G12X4_UNORM_2PACK16_KHR = VK_FORMAT_R12X4G12X4_UNORM_2PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR = VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G16B16G16R16_422_UNORM_KHR = VK_FORMAT_G16B16G16R16_422_UNORM,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_B16G16R16G16_422_UNORM_KHR = VK_FORMAT_B16G16R16G16_422_UNORM,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_420_UNORM,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_422_UNORM,
// // Provided by VK_KHR_sampler_ycbcr_conversion
// VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM,
// // Provided by VK_EXT_ycbcr_2plane_444_formats
// VK_FORMAT_G8_B8R8_2PLANE_444_UNORM_EXT = VK_FORMAT_G8_B8R8_2PLANE_444_UNORM,
// // Provided by VK_EXT_ycbcr_2plane_444_formats
// VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16_EXT = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16,
// // Provided by VK_EXT_ycbcr_2plane_444_formats
// VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16_EXT = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16,
// // Provided by VK_EXT_ycbcr_2plane_444_formats
// VK_FORMAT_G16_B16R16_2PLANE_444_UNORM_EXT = VK_FORMAT_G16_B16R16_2PLANE_444_UNORM,
// // Provided by VK_EXT_4444_formats
// VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT = VK_FORMAT_A4R4G4B4_UNORM_PACK16,
// // Provided by VK_EXT_4444_formats
// VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT = VK_FORMAT_A4B4G4R4_UNORM_PACK16,
};
pub const VkImageUsageFlags = packed struct(u32) {
transfer_src: bool = false,
transfer_dst: bool = false,
sampled: bool = false,
storage: bool = false,
color_attachment: bool = false,
depth_stencil_attachment: bool = false,
transient_attachment: bool = false,
input_attachment: bool = false,
fragment_shading_rate_attachment_khr: bool = false,
fragment_density_map_ext: bool = false,
video_decode_dst_khr: bool = false,
video_decode_src_khr: bool = false,
video_decode_dpb_khr: bool = false,
video_encode_dst_khr: bool = false,
video_encode_src_khr: bool = false,
video_encode_dpb_khr: bool = false,
invocation_mask_huawei: bool = false,
attachment_feedback_loop_ext: bool = false,
_: u2 = 0,
sample_weight_bit_qcom: bool = false,
sample_block_match_qcom: bool = false,
host_transfer_ext: bool = false,
_a: u1 = 0,
_b: u8 = 0,
pub const Bits = enum(c_uint) {
transfer_src = 0x00000001,
transfer_dst = 0x00000002,
sampled = 0x00000004,
storage = 0x00000008,
color_attachment = 0x00000010,
depth_stencil_attachment = 0x00000020,
transient_attachment = 0x00000040,
input_attachment = 0x00000080,
fragment_shading_rate_attachment_khr = 0x00000100,
fragment_density_map_ext = 0x00000200,
video_decode_dst_khr = 0x00000400,
video_decode_src_khr = 0x00000800,
video_decode_dpb_khr = 0x00001000,
video_encode_dst_khr = 0x00002000,
video_encode_src_khr = 0x00004000,
video_encode_dpb_khr = 0x00008000,
invocation_mask_huawei = 0x00040000,
attachment_feedback_loop_ext = 0x00080000,
sample_weight_bit_qcom = 0x00100000,
sample_block_match_qcom = 0x00200000,
host_transfer_ext = 0x00400000,
// Provided by VK_NV_shading_rate_image
// VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV = VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,
};
};
pub const VkImageLayout = enum(c_uint) {
undefined = 0,
general = 1,
color_attachment_optimal = 2,
depth_stencil_attachment_optimal = 3,
depth_stencil_read_only_optimal = 4,
shader_read_only_optimal = 5,
transfer_src_optimal = 6,
transfer_dst_optimal = 7,
preinitialized = 8,
depth_read_only_stencil_attachment_optimal = 1000117000,
depth_attachment_stencil_read_only_optimal = 1000117001,
depth_attachment_optimal = 1000241000,
depth_read_only_optimal = 1000241001,
stencil_attachment_optimal = 1000241002,
stencil_read_only_optimal = 1000241003,
read_only_optimal = 1000314000,
attachment_optimal = 1000314001,
present_src_khr = 1000001002,
video_decode_dst_khr = 1000024000,
video_decode_src_khr = 1000024001,
video_decode_dpb_khr = 1000024002,
shared_present_khr = 1000111000,
fragment_density_map_optimal_ext = 1000218000,
fragment_shading_rate_attachment_optimal_khr = 1000164003,
video_encode_dst_khr = 1000299000,
video_encode_src_khr = 1000299001,
video_encode_dpb_khr = 1000299002,
feedback_loop_optimal_ext = 1000339000,
};
// --- ImageView.
pub const VkImageView = vulkan.VkImageView;
pub const VkImageViewCreateFlags = packed struct(u32) {
fragment_density_map_dynamic_ext: bool = false,
fragment_density_map_deferred_ext: bool = false,
descriptor_buffer_capture_replay_ext: bool = false,
_: u1 = 0,
_a: u28 = 0,
pub const Bits = enum(c_uint) {
fragment_density_map_dynamic_ext = 0x00000001,
fragment_density_map_deferred_ext = 0x00000002,
descriptor_buffer_capture_replay_ext = 0x00000004,
};
};
pub const VkImageViewType = enum(c_uint) {
ty_1d = 0,
ty_2d = 1,
ty_3d = 2,
cube = 3,
ty_1d_array = 4,
ty_2d_array = 5,
cube_array = 6,
max_enum = 2147483647,
};
pub const VkComponentSwizzle = enum(c_uint) {
identity = 0,
zero = 1,
one = 2,
r = 3,
g = 4,
b = 5,
a = 6,
max_enum = 2147483647,
};
pub const VkComponentMapping = struct {
r: VkComponentSwizzle,
g: VkComponentSwizzle,
b: VkComponentSwizzle,
a: VkComponentSwizzle,
pub fn to_vulkan_ty(self: *const VkComponentMapping) vulkan.VkComponentMapping {
return vulkan.VkComponentMapping{
.r = @intFromEnum(self.r),
.g = @intFromEnum(self.g),
.b = @intFromEnum(self.b),
.a = @intFromEnum(self.a),
};
}
};
pub const VkImageAspectFlags = packed struct(u32) {
color: bool = false,
depth: bool = false,
stencil: bool = false,
metadata: bool = false,
plane_0: bool = false,
plane_1: bool = false,
plane_2: bool = false,
memory_plane_0: bool = false,
memory_plane_1: bool = false,
memory_plane_2: bool = false,
memory_plane_3: bool = false,
_: u1 = 0,
_a: u20 = 0,
pub const Bits = enum(c_uint) {
color = 0x00000001,
depth = 0x00000002,
stencil = 0x00000004,
metadata = 0x00000008,
plane_0 = 0x00000010,
plane_1 = 0x00000020,
plane_2 = 0x00000040,
memory_plane_0 = 0x00000080,
memory_plane_1 = 0x00000100,
memory_plane_2 = 0x00000200,
memory_plane_3 = 0x00000400,
};
};
pub const VkImageSubresourceRange = struct {
aspectMask: VkImageAspectFlags = .{},
baseMipLevel: u32,
levelCount: u32,
baseArrayLayer: u32,
layerCount: u32,
pub fn to_vulkan_ty(self: *const VkImageSubresourceRange) vulkan.VkImageSubresourceRange {
return vulkan.VkImageSubresourceRange{
.aspectMask = @bitCast(self.aspectMask),
.baseMipLevel = self.baseMipLevel,
.levelCount = self.levelCount,
.baseArrayLayer = self.baseArrayLayer,
.layerCount = self.layerCount,
};
}
};
pub const VkImageViewCreateInfo = struct {
pNext: ?*const anyopaque = null,
flags: VkImageViewCreateFlags = .{},
image: VkImage,
viewType: VkImageViewType,
format: VkFormat,
components: VkComponentMapping,
subresourceRange: VkImageSubresourceRange,
pub fn to_vulkan_ty(self: *const VkImageViewCreateInfo) vulkan.VkImageViewCreateInfo {
return .{
.sType = vulkan.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = self.pNext,
.flags = @bitCast(self.flags),
.image = self.image,
.viewType = @intFromEnum(self.viewType),
.format = @intFromEnum(self.format),
.components = self.components.to_vulkan_ty(),
.subresourceRange = self.subresourceRange.to_vulkan_ty(),
};
}
};
pub const vkCreateImageViewError = error{
VK_ERROR_OUT_OF_HOST_MEMORY,
VK_ERROR_OUT_OF_DEVICE_MEMORY,
VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR,
};
pub fn vkCreateImageView(
device: l0vk.VkDevice,
pCreateInfo: *const VkImageViewCreateInfo,
pAllocator: [*c]const l0vk.VkAllocationCallbacks,
) vkCreateImageViewError!VkImageView {
const create_info = pCreateInfo.to_vulkan_ty();
var image_view: l0vk.VkImageView = undefined;
const result = vulkan.vkCreateImageView(device, &create_info, pAllocator, &image_view);
if (result != vulkan.VK_SUCCESS) {
switch (result) {
vulkan.VK_ERROR_OUT_OF_HOST_MEMORY => return vkCreateImageViewError.VK_ERROR_OUT_OF_HOST_MEMORY,
vulkan.VK_ERROR_OUT_OF_DEVICE_MEMORY => return vkCreateImageViewError.VK_ERROR_OUT_OF_DEVICE_MEMORY,
vulkan.VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR => return vkCreateImageViewError.VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR,
else => unreachable,
}
}
return image_view;
}
pub inline fn vkDestroyImageView(
device: l0vk.VkDevice,
imageView: VkImageView,
pAllocator: [*c]const l0vk.VkAllocationCallbacks,
) void {
vulkan.vkDestroyImageView(device, imageView, pAllocator);
}
| https://raw.githubusercontent.com/treemcgee42/r4-engine/3f503075e591ea2831a603dbf02a4f83a25f8eed/src/core/renderer/layer0/vulkan/image.zig |
const std = @import("std");
const Alloc = std.mem.Allocator;
const ArrayList = std.ArrayList;
const HashMap = std.HashMap;
const ArrayHashMap = std.ArrayHashMap;
const StringHashMap = std.StringHashMap;
const string = @import("string.zig");
const String = string.String;
const Str = string.Str;
/// A type such as `Int` or `Maybe[T]`.
pub const Ty = struct {
name: Str,
args: []const Ty,
const Self = @This();
pub fn named(name: Str) Self {
return .{ .name = name, .args = &[_]Ty{} };
}
pub fn format(
self: Self,
comptime fmt: Str,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
try writer.print("{s}", .{self.name});
if (string.eql(self.name, "&"))
try self.args[0].format(fmt, options, writer)
else
try print_args_of_tys(writer, self.args);
}
pub fn print_args_of_tys(writer: anytype, args: ?[]const Ty) !void {
try print_args(Ty, "{any}", writer, args);
}
pub fn print_args_of_strs(writer: anytype, args: ?[]const Str) !void {
try print_args(Str, "{s}", writer, args);
}
fn print_args(comptime T: type, comptime fmt: []const u8, writer: anytype, args: ?[]const T) !void {
if (args) |ty_args| if (ty_args.len > 0) {
try writer.print("[", .{});
for (ty_args, 0..) |arg, i| {
if (i > 0) try writer.print(", ", .{});
try writer.print(fmt, .{arg});
}
try writer.print("]", .{});
};
}
pub fn specialize(self: Self, alloc: std.mem.Allocator, ty_env: StringHashMap(Ty)) !Self {
if (ty_env.get(self.name)) |ty| {
// TODO: Make sure generic types doesn't have parameters.
return ty;
}
var mapped_args = ArrayList(Ty).init(alloc);
for (self.args) |arg| try mapped_args.append(try arg.specialize(alloc, ty_env));
return .{ .name = self.name, .args = mapped_args.items };
}
pub fn hash(self: Self) u32 {
_ = self;
return 0;
}
pub fn eql(self: Self, other: Self) bool {
if (!string.eql(self.name, other.name)) return false;
if (self.args.len != other.args.len) return false;
for (self.args, other.args) |a, b| if (!a.eql(b)) return false;
return true;
}
};
pub fn TyArrayHashMap(comptime V: type) type {
return ArrayHashMap(Ty, V, struct {
pub fn hash(self: @This(), ty: Ty) u32 {
_ = self;
return ty.hash();
}
pub fn eql(self: @This(), a: Ty, b: Ty, b_index: usize) bool {
_ = b_index;
_ = self;
return a.eql(b);
}
}, false);
}
pub fn TyHashMap(comptime V: type) type {
return HashMap(Ty, V, struct {
pub fn hash(self: @This(), ty: Ty) u64 {
_ = self;
return ty.hash();
}
pub fn eql(self: @This(), a: Ty, b: Ty) bool {
_ = self;
return a.eql(b);
}
}, 60);
}
| https://raw.githubusercontent.com/MarcelGarus/martinaise/6394308d7853402e7a568d87fe1829c8a3fb948f/compiler/0/src/ty.zig |
const std = @import("std");
const builtin = @import("builtin");
const IsWasm = builtin.target.isWasm();
const stdx = @import("stdx");
const Mat4 = stdx.math.Mat4;
const Mat3 = stdx.math.Mat3;
const Vec3 = stdx.math.Vec3;
const gl = @import("gl");
const GLtextureId = gl.GLuint;
const graphics = @import("../../graphics.zig");
const gpu = graphics.gpu;
const TexShaderVertex = gpu.TexShaderVertex;
const TextureId = gpu.TextureId;
const Mesh = gpu.Mesh;
const shaders = @import("shaders.zig");
const log = stdx.log.scoped(.gl_renderer);
/// Initial buffer sizes
const MatBufferInitialSize = 5000;
const MatBufferInitialSizeBytes = MatBufferInitialSize * @sizeOf(Mat4);
const MaterialBufferInitialSize = 100;
const MaterialBufferInitialSizeBytes = MaterialBufferInitialSize * @sizeOf(graphics.Material);
pub const SlaveRenderer = struct {
dummy: bool,
pub fn init(self: *SlaveRenderer, alloc: std.mem.Allocator) !void {
_ = self;
_ = alloc;
}
};
/// Provides an API to make direct draw calls using OpenGL shaders.
/// Makes no assumptions about how to group draw calls together.
/// Manages common buffers used for shaders.
/// Keeps some OpenGL state to avoid redundant calls.
pub const Renderer = struct {
/// Buffers.
vert_buf_id: gl.GLuint,
index_buf_id: gl.GLuint,
mats_buf_id: gl.GLuint,
mats_buf: []stdx.math.Mat4,
materials_buf_id: gl.GLuint,
materials_buf: []graphics.Material,
mesh: Mesh,
image_store: *graphics.gpu.ImageStore,
/// Pipelines.
pipelines: Pipelines,
/// [State]
scissor_test: bool,
depth_test: bool,
binded_draw_framebuffer: gl.GLuint,
pub fn init(self: *Renderer, alloc: std.mem.Allocator) !void {
self.* = .{
.vert_buf_id = undefined,
.index_buf_id = undefined,
.mats_buf_id = undefined,
.materials_buf_id = undefined,
.materials_buf = undefined,
.mats_buf = undefined,
.depth_test = undefined,
.scissor_test = undefined,
.mesh = undefined,
.pipelines = undefined,
.image_store = undefined,
.binded_draw_framebuffer = 0,
};
const max_total_textures = gl.getMaxTotalTextures();
const max_fragment_textures = gl.getMaxFragmentTextures();
log.debug("max frag textures: {}, max total textures: {}", .{ max_fragment_textures, max_total_textures });
// Generate buffers.
var buf_ids: [4]gl.GLuint = undefined;
gl.genBuffers(4, &buf_ids);
self.vert_buf_id = buf_ids[0];
self.index_buf_id = buf_ids[1];
self.mats_buf_id = buf_ids[2];
self.materials_buf_id = buf_ids[3];
self.mats_buf = try alloc.alloc(Mat4, MatBufferInitialSize);
self.materials_buf = try alloc.alloc(graphics.Material, MaterialBufferInitialSize);
self.mesh = Mesh.init(alloc, self.mats_buf, self.materials_buf);
// Initialize pipelines.
self.pipelines = .{
.tex = try shaders.TexShader.init(self.vert_buf_id),
.gradient = try shaders.GradientShader.init(self.vert_buf_id),
.plane = try shaders.PlaneShader.init(self.vert_buf_id),
.tex_pbr = try shaders.TexPbrShader.init(alloc, self.vert_buf_id),
};
// Enable blending by default.
gl.enable(gl.GL_BLEND);
// Cull back face.
gl.enable(gl.GL_CULL_FACE);
gl.frontFace(gl.GL_CCW);
// Disable depth test by default.
gl.disable(gl.GL_DEPTH_TEST);
self.depth_test = false;
gl.disable(gl.GL_SCISSOR_TEST);
self.scissor_test = false;
gl.disable(gl.GL_POLYGON_OFFSET_FILL);
}
pub fn deinit(self: Renderer, alloc: std.mem.Allocator) void {
const bufs = [_]gl.GLuint{
self.vert_buf_id,
self.index_buf_id,
self.mats_buf_id,
self.materials_buf_id,
};
gl.deleteBuffers(4, &bufs);
alloc.free(self.mats_buf);
alloc.free(self.materials_buf);
self.mesh.deinit();
self.pipelines.deinit();
}
pub fn bindDrawFramebuffer(self: *Renderer, framebuffer: gl.GLuint) void {
gl.bindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, framebuffer);
self.binded_draw_framebuffer = framebuffer;
}
pub fn pushTex3D(self: *Renderer, mvp: Mat4, tex_id: TextureId) void {
const gl_tex_id = self.image_store.getTexture(tex_id).inner.tex_id;
self.setDepthTest(true);
self.pipelines.tex.bind(mvp, gl_tex_id);
gl.bindVertexArray(self.pipelines.tex.shader.vao_id);
self.pushCurrentElements();
}
pub fn pushTexWireframe3D(self: *Renderer, mvp: Mat4, tex_id: TextureId) void {
// Only supported on Desktop atm.
if (!IsWasm) {
const gl_tex_id = self.image_store.getTexture(tex_id).inner.tex_id;
gl.polygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE);
self.setDepthTest(true);
self.pipelines.tex.bind(mvp, gl_tex_id);
gl.bindVertexArray(self.pipelines.tex.shader.vao_id);
self.pushCurrentElements();
gl.polygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL);
}
}
pub fn pushTexPbr3D(self: *Renderer, mvp: Mat4, model: Mat4, normal: Mat3, mat: graphics.Material, light: gpu.ShaderCamera, tex_id: TextureId) void {
const gl_tex_id = self.image_store.getTexture(tex_id).inner.tex_id;
self.setDepthTest(true);
self.pipelines.tex_pbr.bind(mvp, model, normal, gl_tex_id, mat, light);
gl.bindVertexArray(self.pipelines.tex_pbr.shader.vao_id);
self.pushCurrentElements();
}
pub fn ensurePushMeshData(self: *Renderer, verts: []const TexShaderVertex, indexes: []const u16) void {
self.ensureUnusedBuffer(verts.len, indexes.len);
const vert_start = self.mesh.pushVertexes(verts);
self.mesh.pushDeltaIndexes(vert_start, indexes);
}
/// Ensures that the buffer has enough space.
pub fn ensureUnusedBuffer(self: *Renderer, vert_inc: usize, index_inc: usize) void {
if (!self.mesh.ensureUnusedBuffer(vert_inc, index_inc)) {
// Currently, draw calls reset the mesh so data that proceeds the current buffer belongs to the same draw call.
stdx.panic("buffer limit");
}
}
fn pushCurrentElements(self: *Renderer) void {
const num_verts = self.mesh.cur_vert_buf_size;
const num_indexes = self.mesh.cur_index_buf_size;
// Update vertex buffer.
gl.bindBuffer(gl.GL_ARRAY_BUFFER, self.vert_buf_id);
gl.bufferData(gl.GL_ARRAY_BUFFER, @intCast(c_long, num_verts * @sizeOf(TexShaderVertex)), self.mesh.vert_buf.ptr, gl.GL_DYNAMIC_DRAW);
// Update index buffer.
gl.bindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.index_buf_id);
gl.bufferData(gl.GL_ELEMENT_ARRAY_BUFFER, @intCast(c_long, num_indexes * 2), self.mesh.index_buf.ptr, gl.GL_DYNAMIC_DRAW);
gl.drawElements(gl.GL_TRIANGLES, num_indexes, self.mesh.index_buffer_type, 0);
self.mesh.reset();
}
pub fn setDepthTest(self: *Renderer, depth_test: bool) void {
if (self.depth_test == depth_test) {
return;
}
if (depth_test) {
gl.enable(gl.GL_DEPTH_TEST);
} else {
gl.disable(gl.GL_DEPTH_TEST);
}
self.depth_test = depth_test;
}
pub fn setScissorTest(self: *Renderer, scissor_test: bool) void {
if (self.scissor_test == scissor_test) {
return;
}
if (scissor_test) {
gl.enable(gl.GL_SCISSOR_TEST);
} else {
gl.disable(gl.GL_SCISSOR_TEST);
}
self.scissor_test = scissor_test;
}
};
pub const Pipelines = struct {
tex: shaders.TexShader,
gradient: shaders.GradientShader,
plane: shaders.PlaneShader,
tex_pbr: shaders.TexPbrShader,
pub fn deinit(self: Pipelines) void {
self.tex.deinit();
self.tex_pbr.deinit();
self.gradient.deinit();
self.plane.deinit();
}
}; | https://raw.githubusercontent.com/fubark/cosmic/15f85e4bf5c6618c237d6f11f540717f7df16eb6/graphics/src/backend/gl/renderer.zig |
//! Another World compresses hundreds of game resources (audio, bitmaps, bytecode polygon data)
//! into a set of BANK01-BANK0D data files. To keep track of where each resource lives,
//! the game defines _resource descriptors_ in a file named MEMLIST.BIN.
//!
//! This file defines the structure of these resource descriptors, along with methods
//! to parse them from a MEMLIST.BIN file.
const anotherworld = @import("../anotherworld.zig");
const meta = @import("utils").meta;
const ResourceType = @import("resource_type.zig").ResourceType;
const Filename = @import("filename.zig").Filename;
/// Describes an individual resource in Another World's data files:
/// its length, type and the bank file in which it is located.
pub const ResourceDescriptor = struct {
/// The type of content stored in this resource.
type: ResourceType,
/// The bank file to look for the resource in: in the MS-DOS version these are numbered from 01 to 0D.
bank_id: Filename.BankID,
/// The byte offset (within the packed data of the bank file) at which the resource is located.
bank_offset: usize,
/// The compressed size of the resource in bytes.
compressed_size: usize,
/// The uncompressed size of the resource in bytes.
/// If this differs from compressed_size, it indicates the resource has been compressed
/// with run-length encoding (RLE).
uncompressed_size: usize,
pub fn isCompressed(self: ResourceDescriptor) bool {
return self.uncompressed_size != self.compressed_size;
}
/// An iterator that parses resource descriptors from a `Reader` instance until it reaches
/// an end-of-file marker.
/// Intended for parsing the MEMLIST.BIN file in an Another World game directory.
pub fn iterator(reader: anytype) Iterator(@TypeOf(reader)) {
return Iterator(@TypeOf(reader)){ .reader = reader };
}
pub fn Error(comptime Reader: type) type {
const ReaderError = meta.ErrorType(Reader.readNoEof);
return ReaderError || ResourceType.Error || error{
/// A resource defined a compressed size that was larger than its uncompressed size.
InvalidResourceSize,
};
}
};
fn Iterator(comptime Reader: type) type {
return struct {
const Self = @This();
/// The reader being iterated over.
reader: Reader,
/// Returns the next resource descriptor from the reader.
/// Returns null if it hits an end-of-file marker, or an error if it cannot parse more descriptor data.
pub fn next(self: *Self) ResourceDescriptor.Error(Reader)!?ResourceDescriptor {
// The layout of each entry in the MEMLIST.BIN file matches the layout of an in-memory data structure
// which the original Another World executable used for tracking whether a given resource was currently
// loaded. The contents of the file were poured directly into a contiguous memory block and used as-is
// for tracking state.
//
// Because of this layout, there are gaps in the stored data that corresponded to fields in that
// in-memory struct: fields which were used at runtime, but whose values are irrelevant in the file itself.
// (These fields are expected to be filled with zeroes in MEMLIST.BIN, but we don't actually check.)
// Our own Instance struct doesn't match this layout, so we just pick out the fields we care about.
//
// The layout is as follows (all multibyte fields are big-endian):
// (Byte offset, size, purpose, description)
// 0 u8 loading state In MEMLIST.BIN: 0, or 255 to mark the end of the list of descriptors.
// In original runtime: tracked the loaded state of the resource:
// 0: "not needed, can be cleaned up"
// 1: "loaded"
// 2: "needs to be loaded"
// 1 u8 resource type The type of data in this resource: values correspond to `ResourceType`s.
// 2 u16 buffer pointer In MEMLIST.BIN: Unused.
// In original runtime: a 16-bit pointer to the location in memory
// at which the resource is loaded.
// 4 u16 <unknown> Unknown, apparently unused.
// 6 u8 load priority In MEMLIST.BIN: Unused.
// In original runtime: used to load resources in order of priority
// (higher was better).
// 7 u8 bank ID Which BANKXX file this resource is located in (from 01-0D).
// 8 u32 bank offset The byte offset within the BANK file at which the resource is located.
// 12 u16 <unknown> Unknown, apparently unused.
// 14 u16 packed size The compressed size of the resource in bytes.
// 16 u16 <unknown> Unknown, apparently unused.
// 18 u16 unpacked size The final uncompressed size of the resource in bytes.
const end_of_file_flag = try self.reader.readByte();
if (end_of_file_flag == end_of_file_marker) {
return null;
}
const raw_type = try self.reader.readByte();
_ = try self.reader.readInt(u16, .Big);
_ = try self.reader.readInt(u16, .Big);
_ = try self.reader.readByte();
const bank_id = try self.reader.readByte();
const bank_offset = try self.reader.readInt(u32, .Big);
_ = try self.reader.readInt(u16, .Big);
const compressed_size = try self.reader.readInt(u16, .Big);
_ = try self.reader.readInt(u16, .Big);
const uncompressed_size = try self.reader.readInt(u16, .Big);
if (compressed_size > uncompressed_size) {
return error.InvalidResourceSize;
}
return ResourceDescriptor{
.type = try ResourceType.parse(raw_type),
.bank_id = bank_id,
.bank_offset = bank_offset,
.compressed_size = compressed_size,
.uncompressed_size = uncompressed_size,
};
}
};
}
/// A 255 byte at the starting position of a descriptor block marks the end of an
/// Another World MEMLIST.BIN resource list.
/// No more descriptors should be parsed after that marker is reached.
const end_of_file_marker: u8 = 0xFF;
// -- Example data --
// zig fmt: off
pub const DescriptorExamples = struct {
pub const valid_data = [_]u8{
// See documentation in `parse` for the expected byte layout.
0x00, // loading state/end-of-file marker
0x04, // resource type (4 == ResourceType.bytecode)
0x00, 0x00, // buffer pointer: unused
0x00, 0x00, // unknown: unused
0x00, // priority: unused
0x05, // bank ID (5 == BANK05 file)
0xDE, 0xAD, 0xBE, 0xEF, // bank offset (big-endian 32-bit unsigned integer)
0x00, 0x00, // unknown: unused
0x8B, 0xAD, // packed size (big-endian 16-bit unsigned integer)
0x00, 0x00, // unknown: unused
0xF0, 0x0D, // unpacked size (big-endian 16-bit unsigned integer)
};
const invalid_resource_type = block: {
var invalid_data = valid_data;
invalid_data[1] = 0xFF; // Does not map to any ResourceType value
break :block invalid_data;
};
const invalid_resource_size = block: {
var invalid_data = valid_data;
// Zero out the unpacked size to ensure that the compressed size is higher
invalid_data[18] = 0x00;
invalid_data[19] = 0x00;
break :block invalid_data;
};
const valid_end_of_file = [_]u8{end_of_file_marker};
const valid_descriptor = ResourceDescriptor{
.type = .bytecode,
.bank_id = 5,
.bank_offset = 0xDEADBEEF,
.compressed_size = 0x8BAD,
.uncompressed_size = 0xF00D,
};
};
pub const FileExamples = struct {
pub const valid = (DescriptorExamples.valid_data ** 3) ++ DescriptorExamples.valid_end_of_file;
pub const truncated = DescriptorExamples.valid_data ** 2;
pub const invalid_resource_type =
DescriptorExamples.valid_data ++
DescriptorExamples.invalid_resource_type ++
DescriptorExamples.valid_end_of_file;
};
// zig fmt: on
// -- Tests --
const testing = @import("utils").testing;
const fixedBufferStream = @import("std").io.fixedBufferStream;
test "iterator.next() correctly parses file descriptor" {
const reader = fixedBufferStream(&DescriptorExamples.valid_data).reader();
var descriptors = ResourceDescriptor.iterator(reader);
try testing.expectEqual(DescriptorExamples.valid_descriptor, descriptors.next());
}
test "iterator.next() stops parsing at end-of-file marker" {
const reader = fixedBufferStream(&DescriptorExamples.valid_end_of_file).reader();
var descriptors = ResourceDescriptor.iterator(reader);
try testing.expectEqual(null, descriptors.next());
}
test "iterator.next() returns error.InvalidResourceType when resource type byte is not recognized" {
const reader = fixedBufferStream(&DescriptorExamples.invalid_resource_type).reader();
var descriptors = ResourceDescriptor.iterator(reader);
try testing.expectError(error.InvalidResourceType, descriptors.next());
}
test "iterator.next() returns error.InvalidResourceSize when compressed size is larger than uncompressed size" {
const reader = fixedBufferStream(&DescriptorExamples.invalid_resource_size).reader();
var descriptors = ResourceDescriptor.iterator(reader);
try testing.expectError(error.InvalidResourceSize, descriptors.next());
}
test "iterator.next() returns error.EndOfStream on incomplete data" {
const reader = fixedBufferStream(DescriptorExamples.valid_data[0..4]).reader();
var descriptors = ResourceDescriptor.iterator(reader);
try testing.expectError(error.EndOfStream, descriptors.next());
}
test "iterator parses all expected descriptors until it reaches end-of-file marker" {
const reader = fixedBufferStream(&FileExamples.valid).reader();
var descriptors = ResourceDescriptor.iterator(reader);
while (try descriptors.next()) |descriptor| {
try testing.expectEqual(DescriptorExamples.valid_descriptor, descriptor);
}
// Check that it parsed all available bytes from the reader
try testing.expectError(error.EndOfStream, reader.readByte());
}
test "iterator returns error.EndOfStream when it runs out of data before encountering end-of-file marker" {
const reader = fixedBufferStream(&FileExamples.truncated).reader();
var descriptors = ResourceDescriptor.iterator(reader);
try testing.expectEqual(DescriptorExamples.valid_descriptor, descriptors.next());
try testing.expectEqual(DescriptorExamples.valid_descriptor, descriptors.next());
try testing.expectError(error.EndOfStream, descriptors.next());
}
test "iterator returns error when it reaches invalid data in the middle of stream" {
const reader = fixedBufferStream(&FileExamples.invalid_resource_type).reader();
var descriptors = ResourceDescriptor.iterator(reader);
try testing.expectEqual(DescriptorExamples.valid_descriptor, descriptors.next());
try testing.expectError(error.InvalidResourceType, descriptors.next());
}
| https://raw.githubusercontent.com/alunbestor/AZiggierWorld/05b059051ef8ec16918d25477b1cf85aa04f5685/src/lib/resources/resource_descriptor.zig |
const print = @import("std").debug.print;
const expect = @import("std").testing.expect;
const std = @import("std");
const token = @import("token.zig");
// Types
const Token = token.Token;
const TokenMap = token.TokenMap;
const TokenRange = struct {
start: u8,
end: u8
};
pub const Lexer = struct {
// allocator
allocator: *std.mem.Allocator,
// input
input: []const u8,
// current position in input (points to current char)
position: u8,
// current reading position in input (after current char)
readPosition: u8,
// current char under examination
ch: TokenMap,
// current literal
literal: []const u8,
// Checks if we reached end of input
// if so sets to 0 (ASCII code for "NUL" char)
// otherwise, sets `l.ch` to the next char
fn readChar(self: *@This()) Token {
if (self.readPosition >= self.input.len) {
// Update character (set EOF)
self.ch = TokenMap.eof;
// Update literal
self.literal = "";
} else if (isLetter(self.input[self.readPosition])) {
// Find the word range
const wr = self.getRange(self.readPosition, isLetter);
// Update positions
self.updatePosition(wr);
// Update literal
self.literal = self.input[wr.start..wr.end];
// Update character
self.ch = self.literalToChar(self.literal) catch TokenMap.nul;
} else if (isDigit(self.input[self.readPosition])) {
// Find the word range
const wr = self.getRange(self.readPosition, isDigit);
// Update positions
self.updatePosition(wr);
// Update literal
self.literal = self.input[wr.start..wr.end];
// Update character
self.ch = TokenMap.int;
// Update positions
// self.step();
} else if (isWhiteSpace(self.input[self.readPosition])) {
// Update positions
self.step();
// Skip to next
return self.readChar();
} else {
// Update character
self.ch = @intToEnum(TokenMap, self.input[self.readPosition]);
// Update literal
self.literal = self.input[self.readPosition..self.readPosition+1];
// Override peekChar matches
if (self.ch == TokenMap.assign and self.peekChar() == TokenMap.assign) {
self.ch = TokenMap.eq;
self.literal = "==";
// Update positions
self.step();
} else if (self.ch == TokenMap.bang and self.peekChar() == TokenMap.assign) {
self.ch = TokenMap.neq;
self.literal = "!=";
// Update positions
self.step();
}
// Update positions
self.step();
}
const tk = Token {
.type = self.ch,
.literal = self.literal
};
return tk;
}
fn getRange(self: *@This(), startPos: u8, callback: fn(u8) bool) TokenRange {
var i: u8 = startPos;
while (i < self.input.len): (i += 1) {
if (!callback(self.input[i])) {
return TokenRange {
.start = startPos,
.end = i
};
}
}
return TokenRange {
.start = startPos,
.end = @intCast(u8, self.input.len)
};
}
fn isLetter(ch: u8) bool {
return (ch >= 'a' and ch <= 'z') or (ch >= 'A' and ch <= 'B');
}
fn isDigit(ch: u8) bool {
return ch >= '0' and ch <= '9';
}
fn isWhiteSpace(ch: u8) bool {
return ch == ' ' or ch == '\t' or ch == '\n' or ch == '\r';
}
pub fn nextToken(self: *@This()) ?Token {
const tk: Token = self.readChar();
return tk;
}
fn peekChar(self: *@This()) TokenMap {
const nextPosition = self.readPosition + 1;
if (nextPosition >= self.input.len or isLetter(self.input[nextPosition]) or isWhiteSpace(self.input[nextPosition]) or isDigit(self.input[nextPosition])) {
return TokenMap.nul;
} else {
return @intToEnum(TokenMap, self.input[nextPosition]);
}
}
fn literalToChar(self: *@This(), literal: []const u8) !TokenMap {
var ch: TokenMap = undefined;
// Some keywords might clash with Zig syntax
// for that reason we prefixed our keywords with a `_`
const prefixed = try std.mem.concat(self.allocator, u8, &[_][]const u8{ "_", literal });
if (std.meta.stringToEnum(TokenMap, prefixed)) |char| {
ch = char;
} else {
ch = TokenMap.ident;
}
return ch;
}
fn step(self: *@This()) void {
self.position = self.readPosition;
self.readPosition += 1;
}
fn updatePosition(self: *@This(), wr: TokenRange) void {
self.position = wr.start;
self.readPosition = wr.end;
}
fn init(self: *@This(), allocator: *std.mem.Allocator, input: []const u8) !void {
self.allocator = allocator;
try self.new(input);
}
pub fn new(self: *@This(), input: []const u8) !void {
self.input = input;
self.position = 0;
self.readPosition = 0;
}
pub fn create(allocator: *std.mem.Allocator, input: []const u8) !*Lexer {
var l = try allocator.create(Lexer);
// Initialisation
try l.init(allocator, input);
return l;
}
};
test "Gets correct word range\n" {
const input: []const u8 = "let{let!{return";
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = &arena.allocator;
var l = try Lexer.create(allocator, input);
const wrFirst = l.getRange(0, Lexer.isLetter);
const wrSecond = l.getRange(4, Lexer.isLetter);
const wrThird = l.getRange(9, Lexer.isLetter);
expect(wrFirst.start == 0 and wrFirst.end == 3);
expect(wrSecond.start == 4 and wrSecond.end == 7);
expect(wrThird.start == 9 and wrThird.end == 15);
expect(std.mem.eql(u8, input[wrFirst.start..wrFirst.end], "let"));
expect(std.mem.eql(u8, input[wrSecond.start..wrSecond.end], "let"));
expect(std.mem.eql(u8, input[wrThird.start..wrThird.end], "return"));
}
test "Verifies token types\n" {
const input: []const u8 =
\\ let five = 5;
\\ let three = 3;
\\ let add = function(x, y) {
\\ return x + y;
\\ };
\\
\\ let result = add(five, ten);
\\ <>!-/*!
\\ if (5 < 10) {
\\ return true;
\\ } else {
\\ return false;
\\ }
\\ 8 == 8
\\ 10 != 8
;
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = &arena.allocator;
var l = try Lexer.create(allocator, input);
const expectedType = struct {
expectedType: TokenMap,
expectedLiteral: []const u8
};
const testCases = [_]expectedType {
.{
.expectedType = TokenMap._let,
.expectedLiteral = "let"
},
.{
.expectedType = TokenMap.ident,
.expectedLiteral = "five"
},
.{
.expectedType = TokenMap.assign,
.expectedLiteral = "="
},
.{
.expectedType = TokenMap.int,
.expectedLiteral = "5"
},
.{
.expectedType = TokenMap.semicolon,
.expectedLiteral = ";"
},
.{
.expectedType = TokenMap._let,
.expectedLiteral = "let"
},
.{
.expectedType = TokenMap.ident,
.expectedLiteral = "three"
},
.{
.expectedType = TokenMap.assign,
.expectedLiteral = "="
},
.{
.expectedType = TokenMap.int,
.expectedLiteral = "3"
},
.{
.expectedType = TokenMap.semicolon,
.expectedLiteral = ";"
},
.{
.expectedType = TokenMap._let,
.expectedLiteral = "let"
},
.{
.expectedType = TokenMap.ident,
.expectedLiteral = "add"
},
.{
.expectedType = TokenMap.assign,
.expectedLiteral = "="
},
.{
.expectedType = TokenMap._function,
.expectedLiteral = "function"
},
.{
.expectedType = TokenMap.lparen,
.expectedLiteral = "("
},
.{
.expectedType = TokenMap.ident,
.expectedLiteral = "x"
},
.{
.expectedType = TokenMap.comma,
.expectedLiteral = ","
},
.{
.expectedType = TokenMap.ident,
.expectedLiteral = "y"
},
.{
.expectedType = TokenMap.rparen,
.expectedLiteral = ")"
},
.{
.expectedType = TokenMap.lbrace,
.expectedLiteral = "{"
},
.{
.expectedType = TokenMap._return,
.expectedLiteral = "return"
},
.{
.expectedType = TokenMap.ident,
.expectedLiteral = "x"
},
.{
.expectedType = TokenMap.plus,
.expectedLiteral = "+"
},
.{
.expectedType = TokenMap.ident,
.expectedLiteral = "y"
},
.{
.expectedType = TokenMap.semicolon,
.expectedLiteral = ";"
},
.{
.expectedType = TokenMap.rbrace,
.expectedLiteral = "}"
},
.{
.expectedType = TokenMap.semicolon,
.expectedLiteral = ";"
},
.{
.expectedType = TokenMap._let,
.expectedLiteral = "let"
},
.{
.expectedType = TokenMap.ident,
.expectedLiteral = "result"
},
.{
.expectedType = TokenMap.assign,
.expectedLiteral = "="
},
.{
.expectedType = TokenMap.ident,
.expectedLiteral = "add"
},
.{
.expectedType = TokenMap.lparen,
.expectedLiteral = "("
},
.{
.expectedType = TokenMap.ident,
.expectedLiteral = "five"
},
.{
.expectedType = TokenMap.comma,
.expectedLiteral = ","
},
.{
.expectedType = TokenMap.ident,
.expectedLiteral = "ten"
},
.{
.expectedType = TokenMap.rparen,
.expectedLiteral = ")"
},
.{
.expectedType = TokenMap.semicolon,
.expectedLiteral = ";"
},
.{
.expectedType = TokenMap.lt,
.expectedLiteral = "<"
},
.{
.expectedType = TokenMap.gt,
.expectedLiteral = ">"
},
.{
.expectedType = TokenMap.bang,
.expectedLiteral = "!"
},
.{
.expectedType = TokenMap.minus,
.expectedLiteral = "-"
},
.{
.expectedType = TokenMap.slash,
.expectedLiteral = "/"
},
.{
.expectedType = TokenMap.asterisk,
.expectedLiteral = "*"
},
.{
.expectedType = TokenMap.bang,
.expectedLiteral = "!"
},
.{
.expectedType = TokenMap._if,
.expectedLiteral = "if"
},
.{
.expectedType = TokenMap.lparen,
.expectedLiteral = "("
},
.{
.expectedType = TokenMap.int,
.expectedLiteral = "5"
},
.{
.expectedType = TokenMap.lt,
.expectedLiteral = "<"
},
.{
.expectedType = TokenMap.int,
.expectedLiteral = "10"
},
.{
.expectedType = TokenMap.rparen,
.expectedLiteral = ")"
},
.{
.expectedType = TokenMap.lbrace,
.expectedLiteral = "{"
},
.{
.expectedType = TokenMap._return,
.expectedLiteral = "return"
},
.{
.expectedType = TokenMap._true,
.expectedLiteral = "true"
},
.{
.expectedType = TokenMap.semicolon,
.expectedLiteral = ";"
},
.{
.expectedType = TokenMap.rbrace,
.expectedLiteral = "{"
},
.{
.expectedType = TokenMap._else,
.expectedLiteral = "else"
},
.{
.expectedType = TokenMap.lbrace,
.expectedLiteral = "{"
},
.{
.expectedType = TokenMap._return,
.expectedLiteral = "return"
},
.{
.expectedType = TokenMap._false,
.expectedLiteral = "false"
},
.{
.expectedType = TokenMap.semicolon,
.expectedLiteral = ";"
},
.{
.expectedType = TokenMap.rbrace,
.expectedLiteral = "}"
},
.{
.expectedType = TokenMap.int,
.expectedLiteral = "8"
},
.{
.expectedType = TokenMap.eq,
.expectedLiteral = "=="
},
.{
.expectedType = TokenMap.int,
.expectedLiteral = "8"
},
.{
.expectedType = TokenMap.int,
.expectedLiteral = "10"
},
.{
.expectedType = TokenMap.neq,
.expectedLiteral = "!="
},
.{
.expectedType = TokenMap.int,
.expectedLiteral = "8"
},
.{
.expectedType = TokenMap.eof,
.expectedLiteral = ""
}
};
for (testCases) |field| {
// Get next token
if (l.nextToken()) |tok| {
// Assertion
expect(tok.type == field.expectedType);
}
}
} | https://raw.githubusercontent.com/heldrida/interpreter-in-zig/1e5fb2be9135ce2854865ef2896723c90105dd8c/src/lexer.zig |
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std.zig");
const builtin = std.builtin;
const debug = std.debug;
const mem = std.mem;
const math = std.math;
const testing = std.testing;
const root = @import("root");
pub const trait = @import("meta/trait.zig");
pub const TrailerFlags = @import("meta/trailer_flags.zig").TrailerFlags;
const TypeInfo = builtin.TypeInfo;
pub fn tagName(v: anytype) []const u8 {
const T = @TypeOf(v);
switch (@typeInfo(T)) {
.ErrorSet => return @errorName(v),
else => return @tagName(v),
}
}
test "std.meta.tagName" {
const E1 = enum {
A,
B,
};
const E2 = enum(u8) {
C = 33,
D,
};
const U1 = union(enum) {
G: u8,
H: u16,
};
const U2 = union(E2) {
C: u8,
D: u16,
};
var u1g = U1{ .G = 0 };
var u1h = U1{ .H = 0 };
var u2a = U2{ .C = 0 };
var u2b = U2{ .D = 0 };
try testing.expect(mem.eql(u8, tagName(E1.A), "A"));
try testing.expect(mem.eql(u8, tagName(E1.B), "B"));
try testing.expect(mem.eql(u8, tagName(E2.C), "C"));
try testing.expect(mem.eql(u8, tagName(E2.D), "D"));
try testing.expect(mem.eql(u8, tagName(error.E), "E"));
try testing.expect(mem.eql(u8, tagName(error.F), "F"));
try testing.expect(mem.eql(u8, tagName(u1g), "G"));
try testing.expect(mem.eql(u8, tagName(u1h), "H"));
try testing.expect(mem.eql(u8, tagName(u2a), "C"));
try testing.expect(mem.eql(u8, tagName(u2b), "D"));
}
pub fn stringToEnum(comptime T: type, str: []const u8) ?T {
// Using ComptimeStringMap here is more performant, but it will start to take too
// long to compile if the enum is large enough, due to the current limits of comptime
// performance when doing things like constructing lookup maps at comptime.
// TODO The '100' here is arbitrary and should be increased when possible:
// - https://github.com/ziglang/zig/issues/4055
// - https://github.com/ziglang/zig/issues/3863
if (@typeInfo(T).Enum.fields.len <= 100) {
const kvs = comptime build_kvs: {
// In order to generate an array of structs that play nice with anonymous
// list literals, we need to give them "0" and "1" field names.
// TODO https://github.com/ziglang/zig/issues/4335
const EnumKV = struct {
@"0": []const u8,
@"1": T,
};
var kvs_array: [@typeInfo(T).Enum.fields.len]EnumKV = undefined;
inline for (@typeInfo(T).Enum.fields) |enumField, i| {
kvs_array[i] = .{ .@"0" = enumField.name, .@"1" = @field(T, enumField.name) };
}
break :build_kvs kvs_array[0..];
};
const map = std.ComptimeStringMap(T, kvs);
return map.get(str);
} else {
inline for (@typeInfo(T).Enum.fields) |enumField| {
if (mem.eql(u8, str, enumField.name)) {
return @field(T, enumField.name);
}
}
return null;
}
}
test "std.meta.stringToEnum" {
const E1 = enum {
A,
B,
};
try testing.expect(E1.A == stringToEnum(E1, "A").?);
try testing.expect(E1.B == stringToEnum(E1, "B").?);
try testing.expect(null == stringToEnum(E1, "C"));
}
pub fn bitCount(comptime T: type) comptime_int {
return switch (@typeInfo(T)) {
.Bool => 1,
.Int => |info| info.bits,
.Float => |info| info.bits,
else => @compileError("Expected bool, int or float type, found '" ++ @typeName(T) ++ "'"),
};
}
test "std.meta.bitCount" {
try testing.expect(bitCount(u8) == 8);
try testing.expect(bitCount(f32) == 32);
}
/// Returns the alignment of type T.
/// Note that if T is a pointer or function type the result is different than
/// the one returned by @alignOf(T).
/// If T is a pointer type the alignment of the type it points to is returned.
/// If T is a function type the alignment a target-dependent value is returned.
pub fn alignment(comptime T: type) comptime_int {
return switch (@typeInfo(T)) {
.Optional => |info| switch (@typeInfo(info.child)) {
.Pointer, .Fn => alignment(info.child),
else => @alignOf(T),
},
.Pointer => |info| info.alignment,
.Fn => |info| info.alignment,
else => @alignOf(T),
};
}
test "std.meta.alignment" {
try testing.expect(alignment(u8) == 1);
try testing.expect(alignment(*align(1) u8) == 1);
try testing.expect(alignment(*align(2) u8) == 2);
try testing.expect(alignment([]align(1) u8) == 1);
try testing.expect(alignment([]align(2) u8) == 2);
try testing.expect(alignment(fn () void) > 0);
try testing.expect(alignment(fn () align(128) void) == 128);
}
pub fn Child(comptime T: type) type {
return switch (@typeInfo(T)) {
.Array => |info| info.child,
.Vector => |info| info.child,
.Pointer => |info| info.child,
.Optional => |info| info.child,
else => @compileError("Expected pointer, optional, array or vector type, found '" ++ @typeName(T) ++ "'"),
};
}
test "std.meta.Child" {
try testing.expect(Child([1]u8) == u8);
try testing.expect(Child(*u8) == u8);
try testing.expect(Child([]u8) == u8);
try testing.expect(Child(?u8) == u8);
try testing.expect(Child(Vector(2, u8)) == u8);
}
/// Given a "memory span" type, returns the "element type".
pub fn Elem(comptime T: type) type {
switch (@typeInfo(T)) {
.Array => |info| return info.child,
.Vector => |info| return info.child,
.Pointer => |info| switch (info.size) {
.One => switch (@typeInfo(info.child)) {
.Array => |array_info| return array_info.child,
.Vector => |vector_info| return vector_info.child,
else => {},
},
.Many, .C, .Slice => return info.child,
},
.Optional => |info| return Elem(info.child),
else => {},
}
@compileError("Expected pointer, slice, array or vector type, found '" ++ @typeName(T) ++ "'");
}
test "std.meta.Elem" {
try testing.expect(Elem([1]u8) == u8);
try testing.expect(Elem([*]u8) == u8);
try testing.expect(Elem([]u8) == u8);
try testing.expect(Elem(*[10]u8) == u8);
try testing.expect(Elem(Vector(2, u8)) == u8);
try testing.expect(Elem(*Vector(2, u8)) == u8);
try testing.expect(Elem(?[*]u8) == u8);
}
/// Given a type which can have a sentinel e.g. `[:0]u8`, returns the sentinel value,
/// or `null` if there is not one.
/// Types which cannot possibly have a sentinel will be a compile error.
pub fn sentinel(comptime T: type) ?Elem(T) {
switch (@typeInfo(T)) {
.Array => |info| return info.sentinel,
.Pointer => |info| {
switch (info.size) {
.Many, .Slice => return info.sentinel,
.One => switch (@typeInfo(info.child)) {
.Array => |array_info| return array_info.sentinel,
else => {},
},
else => {},
}
},
else => {},
}
@compileError("type '" ++ @typeName(T) ++ "' cannot possibly have a sentinel");
}
test "std.meta.sentinel" {
try testSentinel();
comptime try testSentinel();
}
fn testSentinel() !void {
try testing.expectEqual(@as(u8, 0), sentinel([:0]u8).?);
try testing.expectEqual(@as(u8, 0), sentinel([*:0]u8).?);
try testing.expectEqual(@as(u8, 0), sentinel([5:0]u8).?);
try testing.expectEqual(@as(u8, 0), sentinel(*const [5:0]u8).?);
try testing.expect(sentinel([]u8) == null);
try testing.expect(sentinel([*]u8) == null);
try testing.expect(sentinel([5]u8) == null);
try testing.expect(sentinel(*const [5]u8) == null);
}
/// Given a "memory span" type, returns the same type except with the given sentinel value.
pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
switch (@typeInfo(T)) {
.Pointer => |info| switch (info.size) {
.One => switch (@typeInfo(info.child)) {
.Array => |array_info| return @Type(.{
.Pointer = .{
.size = info.size,
.is_const = info.is_const,
.is_volatile = info.is_volatile,
.alignment = info.alignment,
.child = @Type(.{
.Array = .{
.len = array_info.len,
.child = array_info.child,
.sentinel = sentinel_val,
},
}),
.is_allowzero = info.is_allowzero,
.sentinel = info.sentinel,
},
}),
else => {},
},
.Many, .Slice => return @Type(.{
.Pointer = .{
.size = info.size,
.is_const = info.is_const,
.is_volatile = info.is_volatile,
.alignment = info.alignment,
.child = info.child,
.is_allowzero = info.is_allowzero,
.sentinel = sentinel_val,
},
}),
else => {},
},
.Optional => |info| switch (@typeInfo(info.child)) {
.Pointer => |ptr_info| switch (ptr_info.size) {
.Many => return @Type(.{
.Optional = .{
.child = @Type(.{
.Pointer = .{
.size = ptr_info.size,
.is_const = ptr_info.is_const,
.is_volatile = ptr_info.is_volatile,
.alignment = ptr_info.alignment,
.child = ptr_info.child,
.is_allowzero = ptr_info.is_allowzero,
.sentinel = sentinel_val,
},
}),
},
}),
else => {},
},
else => {},
},
else => {},
}
@compileError("Unable to derive a sentinel pointer type from " ++ @typeName(T));
}
/// Takes a Slice or Many Pointer and returns it with the Type modified to have the given sentinel value.
/// This function assumes the caller has verified the memory contains the sentinel value.
pub fn assumeSentinel(p: anytype, comptime sentinel_val: Elem(@TypeOf(p))) Sentinel(@TypeOf(p), sentinel_val) {
const T = @TypeOf(p);
const ReturnType = Sentinel(T, sentinel_val);
switch (@typeInfo(T)) {
.Pointer => |info| switch (info.size) {
.Slice => return @bitCast(ReturnType, p),
.Many, .One => return @ptrCast(ReturnType, p),
.C => {},
},
.Optional => |info| switch (@typeInfo(info.child)) {
.Pointer => |ptr_info| switch (ptr_info.size) {
.Many => return @ptrCast(ReturnType, p),
else => {},
},
else => {},
},
else => {},
}
@compileError("Unable to derive a sentinel pointer type from " ++ @typeName(T));
}
test "std.meta.assumeSentinel" {
try testing.expect([*:0]u8 == @TypeOf(assumeSentinel(@as([*]u8, undefined), 0)));
try testing.expect([:0]u8 == @TypeOf(assumeSentinel(@as([]u8, undefined), 0)));
try testing.expect([*:0]const u8 == @TypeOf(assumeSentinel(@as([*]const u8, undefined), 0)));
try testing.expect([:0]const u8 == @TypeOf(assumeSentinel(@as([]const u8, undefined), 0)));
try testing.expect([*:0]u16 == @TypeOf(assumeSentinel(@as([*]u16, undefined), 0)));
try testing.expect([:0]const u16 == @TypeOf(assumeSentinel(@as([]const u16, undefined), 0)));
try testing.expect([*:3]u8 == @TypeOf(assumeSentinel(@as([*:1]u8, undefined), 3)));
try testing.expect([:null]?[*]u8 == @TypeOf(assumeSentinel(@as([]?[*]u8, undefined), null)));
try testing.expect([*:null]?[*]u8 == @TypeOf(assumeSentinel(@as([*]?[*]u8, undefined), null)));
try testing.expect(*[10:0]u8 == @TypeOf(assumeSentinel(@as(*[10]u8, undefined), 0)));
try testing.expect(?[*:0]u8 == @TypeOf(assumeSentinel(@as(?[*]u8, undefined), 0)));
}
pub fn containerLayout(comptime T: type) TypeInfo.ContainerLayout {
return switch (@typeInfo(T)) {
.Struct => |info| info.layout,
.Enum => |info| info.layout,
.Union => |info| info.layout,
else => @compileError("Expected struct, enum or union type, found '" ++ @typeName(T) ++ "'"),
};
}
test "std.meta.containerLayout" {
const E1 = enum {
A,
};
const S1 = struct {};
const S2 = packed struct {};
const S3 = extern struct {};
const U1 = union {
a: u8,
};
const U2 = packed union {
a: u8,
};
const U3 = extern union {
a: u8,
};
try testing.expect(containerLayout(E1) == .Auto);
try testing.expect(containerLayout(S1) == .Auto);
try testing.expect(containerLayout(S2) == .Packed);
try testing.expect(containerLayout(S3) == .Extern);
try testing.expect(containerLayout(U1) == .Auto);
try testing.expect(containerLayout(U2) == .Packed);
try testing.expect(containerLayout(U3) == .Extern);
}
pub fn declarations(comptime T: type) []const TypeInfo.Declaration {
return switch (@typeInfo(T)) {
.Struct => |info| info.decls,
.Enum => |info| info.decls,
.Union => |info| info.decls,
.Opaque => |info| info.decls,
else => @compileError("Expected struct, enum, union, or opaque type, found '" ++ @typeName(T) ++ "'"),
};
}
test "std.meta.declarations" {
const E1 = enum {
A,
fn a() void {}
};
const S1 = struct {
fn a() void {}
};
const U1 = union {
a: u8,
fn a() void {}
};
const O1 = opaque {
fn a() void {}
};
const decls = comptime [_][]const TypeInfo.Declaration{
declarations(E1),
declarations(S1),
declarations(U1),
declarations(O1),
};
inline for (decls) |decl| {
try testing.expect(decl.len == 1);
try testing.expect(comptime mem.eql(u8, decl[0].name, "a"));
}
}
pub fn declarationInfo(comptime T: type, comptime decl_name: []const u8) TypeInfo.Declaration {
inline for (comptime declarations(T)) |decl| {
if (comptime mem.eql(u8, decl.name, decl_name))
return decl;
}
@compileError("'" ++ @typeName(T) ++ "' has no declaration '" ++ decl_name ++ "'");
}
test "std.meta.declarationInfo" {
const E1 = enum {
A,
fn a() void {}
};
const S1 = struct {
fn a() void {}
};
const U1 = union {
a: u8,
fn a() void {}
};
const infos = comptime [_]TypeInfo.Declaration{
declarationInfo(E1, "a"),
declarationInfo(S1, "a"),
declarationInfo(U1, "a"),
};
inline for (infos) |info| {
try testing.expect(comptime mem.eql(u8, info.name, "a"));
try testing.expect(!info.is_pub);
}
}
pub fn fields(comptime T: type) switch (@typeInfo(T)) {
.Struct => []const TypeInfo.StructField,
.Union => []const TypeInfo.UnionField,
.ErrorSet => []const TypeInfo.Error,
.Enum => []const TypeInfo.EnumField,
else => @compileError("Expected struct, union, error set or enum type, found '" ++ @typeName(T) ++ "'"),
} {
return switch (@typeInfo(T)) {
.Struct => |info| info.fields,
.Union => |info| info.fields,
.Enum => |info| info.fields,
.ErrorSet => |errors| errors.?, // must be non global error set
else => @compileError("Expected struct, union, error set or enum type, found '" ++ @typeName(T) ++ "'"),
};
}
test "std.meta.fields" {
const E1 = enum {
A,
};
const E2 = error{A};
const S1 = struct {
a: u8,
};
const U1 = union {
a: u8,
};
const e1f = comptime fields(E1);
const e2f = comptime fields(E2);
const sf = comptime fields(S1);
const uf = comptime fields(U1);
try testing.expect(e1f.len == 1);
try testing.expect(e2f.len == 1);
try testing.expect(sf.len == 1);
try testing.expect(uf.len == 1);
try testing.expect(mem.eql(u8, e1f[0].name, "A"));
try testing.expect(mem.eql(u8, e2f[0].name, "A"));
try testing.expect(mem.eql(u8, sf[0].name, "a"));
try testing.expect(mem.eql(u8, uf[0].name, "a"));
try testing.expect(comptime sf[0].field_type == u8);
try testing.expect(comptime uf[0].field_type == u8);
}
pub fn fieldInfo(comptime T: type, comptime field: FieldEnum(T)) switch (@typeInfo(T)) {
.Struct => TypeInfo.StructField,
.Union => TypeInfo.UnionField,
.ErrorSet => TypeInfo.Error,
.Enum => TypeInfo.EnumField,
else => @compileError("Expected struct, union, error set or enum type, found '" ++ @typeName(T) ++ "'"),
} {
return fields(T)[@enumToInt(field)];
}
test "std.meta.fieldInfo" {
const E1 = enum {
A,
};
const E2 = error{A};
const S1 = struct {
a: u8,
};
const U1 = union {
a: u8,
};
const e1f = fieldInfo(E1, .A);
const e2f = fieldInfo(E2, .A);
const sf = fieldInfo(S1, .a);
const uf = fieldInfo(U1, .a);
try testing.expect(mem.eql(u8, e1f.name, "A"));
try testing.expect(mem.eql(u8, e2f.name, "A"));
try testing.expect(mem.eql(u8, sf.name, "a"));
try testing.expect(mem.eql(u8, uf.name, "a"));
try testing.expect(comptime sf.field_type == u8);
try testing.expect(comptime uf.field_type == u8);
}
pub fn fieldNames(comptime T: type) *const [fields(T).len][]const u8 {
comptime {
const fieldInfos = fields(T);
var names: [fieldInfos.len][]const u8 = undefined;
for (fieldInfos) |field, i| {
names[i] = field.name;
}
return &names;
}
}
test "std.meta.fieldNames" {
const E1 = enum { A, B };
const E2 = error{A};
const S1 = struct {
a: u8,
};
const U1 = union {
a: u8,
b: void,
};
const e1names = fieldNames(E1);
const e2names = fieldNames(E2);
const s1names = fieldNames(S1);
const u1names = fieldNames(U1);
try testing.expect(e1names.len == 2);
try testing.expectEqualSlices(u8, e1names[0], "A");
try testing.expectEqualSlices(u8, e1names[1], "B");
try testing.expect(e2names.len == 1);
try testing.expectEqualSlices(u8, e2names[0], "A");
try testing.expect(s1names.len == 1);
try testing.expectEqualSlices(u8, s1names[0], "a");
try testing.expect(u1names.len == 2);
try testing.expectEqualSlices(u8, u1names[0], "a");
try testing.expectEqualSlices(u8, u1names[1], "b");
}
pub fn FieldEnum(comptime T: type) type {
const fieldInfos = fields(T);
var enumFields: [fieldInfos.len]std.builtin.TypeInfo.EnumField = undefined;
var decls = [_]std.builtin.TypeInfo.Declaration{};
inline for (fieldInfos) |field, i| {
enumFields[i] = .{
.name = field.name,
.value = i,
};
}
return @Type(.{
.Enum = .{
.layout = .Auto,
.tag_type = std.math.IntFittingRange(0, fieldInfos.len - 1),
.fields = &enumFields,
.decls = &decls,
.is_exhaustive = true,
},
});
}
fn expectEqualEnum(expected: anytype, actual: @TypeOf(expected)) !void {
// TODO: https://github.com/ziglang/zig/issues/7419
// testing.expectEqual(@typeInfo(expected).Enum, @typeInfo(actual).Enum);
try testing.expectEqual(@typeInfo(expected).Enum.layout, @typeInfo(actual).Enum.layout);
try testing.expectEqual(@typeInfo(expected).Enum.tag_type, @typeInfo(actual).Enum.tag_type);
comptime try testing.expectEqualSlices(std.builtin.TypeInfo.EnumField, @typeInfo(expected).Enum.fields, @typeInfo(actual).Enum.fields);
comptime try testing.expectEqualSlices(std.builtin.TypeInfo.Declaration, @typeInfo(expected).Enum.decls, @typeInfo(actual).Enum.decls);
try testing.expectEqual(@typeInfo(expected).Enum.is_exhaustive, @typeInfo(actual).Enum.is_exhaustive);
}
test "std.meta.FieldEnum" {
try expectEqualEnum(enum { a }, FieldEnum(struct { a: u8 }));
try expectEqualEnum(enum { a, b, c }, FieldEnum(struct { a: u8, b: void, c: f32 }));
try expectEqualEnum(enum { a, b, c }, FieldEnum(union { a: u8, b: void, c: f32 }));
}
// Deprecated: use Tag
pub const TagType = Tag;
pub fn Tag(comptime T: type) type {
return switch (@typeInfo(T)) {
.Enum => |info| info.tag_type,
.Union => |info| info.tag_type orelse @compileError(@typeName(T) ++ " has no tag type"),
else => @compileError("expected enum or union type, found '" ++ @typeName(T) ++ "'"),
};
}
test "std.meta.Tag" {
const E = enum(u8) {
C = 33,
D,
};
const U = union(E) {
C: u8,
D: u16,
};
try testing.expect(Tag(E) == u8);
try testing.expect(Tag(U) == E);
}
///Returns the active tag of a tagged union
pub fn activeTag(u: anytype) Tag(@TypeOf(u)) {
const T = @TypeOf(u);
return @as(Tag(T), u);
}
test "std.meta.activeTag" {
const UE = enum {
Int,
Float,
};
const U = union(UE) {
Int: u32,
Float: f32,
};
var u = U{ .Int = 32 };
try testing.expect(activeTag(u) == UE.Int);
u = U{ .Float = 112.9876 };
try testing.expect(activeTag(u) == UE.Float);
}
const TagPayloadType = TagPayload;
///Given a tagged union type, and an enum, return the type of the union
/// field corresponding to the enum tag.
pub fn TagPayload(comptime U: type, tag: Tag(U)) type {
try testing.expect(trait.is(.Union)(U));
const info = @typeInfo(U).Union;
inline for (info.fields) |field_info| {
if (comptime mem.eql(u8, field_info.name, @tagName(tag)))
return field_info.field_type;
}
unreachable;
}
test "std.meta.TagPayload" {
const Event = union(enum) {
Moved: struct {
from: i32,
to: i32,
},
};
const MovedEvent = TagPayload(Event, Event.Moved);
var e: Event = undefined;
try testing.expect(MovedEvent == @TypeOf(e.Moved));
}
/// Compares two of any type for equality. Containers are compared on a field-by-field basis,
/// where possible. Pointers are not followed.
pub fn eql(a: anytype, b: @TypeOf(a)) bool {
const T = @TypeOf(a);
switch (@typeInfo(T)) {
.Struct => |info| {
inline for (info.fields) |field_info| {
if (!eql(@field(a, field_info.name), @field(b, field_info.name))) return false;
}
return true;
},
.ErrorUnion => {
if (a) |a_p| {
if (b) |b_p| return eql(a_p, b_p) else |_| return false;
} else |a_e| {
if (b) |_| return false else |b_e| return a_e == b_e;
}
},
.Union => |info| {
if (info.tag_type) |UnionTag| {
const tag_a = activeTag(a);
const tag_b = activeTag(b);
if (tag_a != tag_b) return false;
inline for (info.fields) |field_info| {
if (@field(UnionTag, field_info.name) == tag_a) {
return eql(@field(a, field_info.name), @field(b, field_info.name));
}
}
return false;
}
@compileError("cannot compare untagged union type " ++ @typeName(T));
},
.Array => {
if (a.len != b.len) return false;
for (a) |e, i|
if (!eql(e, b[i])) return false;
return true;
},
.Vector => |info| {
var i: usize = 0;
while (i < info.len) : (i += 1) {
if (!eql(a[i], b[i])) return false;
}
return true;
},
.Pointer => |info| {
return switch (info.size) {
.One, .Many, .C => a == b,
.Slice => a.ptr == b.ptr and a.len == b.len,
};
},
.Optional => {
if (a == null and b == null) return true;
if (a == null or b == null) return false;
return eql(a.?, b.?);
},
else => return a == b,
}
}
test "std.meta.eql" {
const S = struct {
a: u32,
b: f64,
c: [5]u8,
};
const U = union(enum) {
s: S,
f: ?f32,
};
const s_1 = S{
.a = 134,
.b = 123.3,
.c = "12345".*,
};
var s_3 = S{
.a = 134,
.b = 123.3,
.c = "12345".*,
};
const u_1 = U{ .f = 24 };
const u_2 = U{ .s = s_1 };
const u_3 = U{ .f = 24 };
try testing.expect(eql(s_1, s_3));
try testing.expect(eql(&s_1, &s_1));
try testing.expect(!eql(&s_1, &s_3));
try testing.expect(eql(u_1, u_3));
try testing.expect(!eql(u_1, u_2));
var a1 = "abcdef".*;
var a2 = "abcdef".*;
var a3 = "ghijkl".*;
try testing.expect(eql(a1, a2));
try testing.expect(!eql(a1, a3));
try testing.expect(!eql(a1[0..], a2[0..]));
const EU = struct {
fn tst(err: bool) !u8 {
if (err) return error.Error;
return @as(u8, 5);
}
};
try testing.expect(eql(EU.tst(true), EU.tst(true)));
try testing.expect(eql(EU.tst(false), EU.tst(false)));
try testing.expect(!eql(EU.tst(false), EU.tst(true)));
var v1 = @splat(4, @as(u32, 1));
var v2 = @splat(4, @as(u32, 1));
var v3 = @splat(4, @as(u32, 2));
try testing.expect(eql(v1, v2));
try testing.expect(!eql(v1, v3));
}
test "intToEnum with error return" {
const E1 = enum {
A,
};
const E2 = enum {
A,
B,
};
var zero: u8 = 0;
var one: u16 = 1;
try testing.expect(intToEnum(E1, zero) catch unreachable == E1.A);
try testing.expect(intToEnum(E2, one) catch unreachable == E2.B);
try testing.expectError(error.InvalidEnumTag, intToEnum(E1, one));
}
pub const IntToEnumError = error{InvalidEnumTag};
pub fn intToEnum(comptime EnumTag: type, tag_int: anytype) IntToEnumError!EnumTag {
inline for (@typeInfo(EnumTag).Enum.fields) |f| {
const this_tag_value = @field(EnumTag, f.name);
if (tag_int == @enumToInt(this_tag_value)) {
return this_tag_value;
}
}
return error.InvalidEnumTag;
}
/// Given a type and a name, return the field index according to source order.
/// Returns `null` if the field is not found.
pub fn fieldIndex(comptime T: type, comptime name: []const u8) ?comptime_int {
inline for (fields(T)) |field, i| {
if (mem.eql(u8, field.name, name))
return i;
}
return null;
}
pub const refAllDecls = @compileError("refAllDecls has been moved from std.meta to std.testing");
/// Returns a slice of pointers to public declarations of a namespace.
pub fn declList(comptime Namespace: type, comptime Decl: type) []const *const Decl {
const S = struct {
fn declNameLessThan(context: void, lhs: *const Decl, rhs: *const Decl) bool {
_ = context;
return mem.lessThan(u8, lhs.name, rhs.name);
}
};
comptime {
const decls = declarations(Namespace);
var array: [decls.len]*const Decl = undefined;
for (decls) |decl, i| {
array[i] = &@field(Namespace, decl.name);
}
std.sort.sort(*const Decl, &array, {}, S.declNameLessThan);
return &array;
}
}
pub const IntType = @compileError("replaced by std.meta.Int");
pub fn Int(comptime signedness: builtin.Signedness, comptime bit_count: u16) type {
return @Type(TypeInfo{
.Int = .{
.signedness = signedness,
.bits = bit_count,
},
});
}
pub fn Vector(comptime len: u32, comptime child: type) type {
return @Type(TypeInfo{
.Vector = .{
.len = len,
.child = child,
},
});
}
/// For a given function type, returns a tuple type which fields will
/// correspond to the argument types.
///
/// Examples:
/// - `ArgsTuple(fn() void)` ⇒ `tuple { }`
/// - `ArgsTuple(fn(a: u32) u32)` ⇒ `tuple { u32 }`
/// - `ArgsTuple(fn(a: u32, b: f16) noreturn)` ⇒ `tuple { u32, f16 }`
pub fn ArgsTuple(comptime Function: type) type {
const info = @typeInfo(Function);
if (info != .Fn)
@compileError("ArgsTuple expects a function type");
const function_info = info.Fn;
if (function_info.is_generic)
@compileError("Cannot create ArgsTuple for generic function");
if (function_info.is_var_args)
@compileError("Cannot create ArgsTuple for variadic function");
var argument_field_list: [function_info.args.len]std.builtin.TypeInfo.StructField = undefined;
inline for (function_info.args) |arg, i| {
const T = arg.arg_type.?;
@setEvalBranchQuota(10_000);
var num_buf: [128]u8 = undefined;
argument_field_list[i] = std.builtin.TypeInfo.StructField{
.name = std.fmt.bufPrint(&num_buf, "{d}", .{i}) catch unreachable,
.field_type = T,
.default_value = @as(?T, null),
.is_comptime = false,
.alignment = if (@sizeOf(T) > 0) @alignOf(T) else 0,
};
}
return @Type(std.builtin.TypeInfo{
.Struct = std.builtin.TypeInfo.Struct{
.is_tuple = true,
.layout = .Auto,
.decls = &[_]std.builtin.TypeInfo.Declaration{},
.fields = &argument_field_list,
},
});
}
/// For a given anonymous list of types, returns a new tuple type
/// with those types as fields.
///
/// Examples:
/// - `Tuple(&[_]type {})` ⇒ `tuple { }`
/// - `Tuple(&[_]type {f32})` ⇒ `tuple { f32 }`
/// - `Tuple(&[_]type {f32,u32})` ⇒ `tuple { f32, u32 }`
pub fn Tuple(comptime types: []const type) type {
var tuple_fields: [types.len]std.builtin.TypeInfo.StructField = undefined;
inline for (types) |T, i| {
@setEvalBranchQuota(10_000);
var num_buf: [128]u8 = undefined;
tuple_fields[i] = std.builtin.TypeInfo.StructField{
.name = std.fmt.bufPrint(&num_buf, "{d}", .{i}) catch unreachable,
.field_type = T,
.default_value = @as(?T, null),
.is_comptime = false,
.alignment = if (@sizeOf(T) > 0) @alignOf(T) else 0,
};
}
return @Type(std.builtin.TypeInfo{
.Struct = std.builtin.TypeInfo.Struct{
.is_tuple = true,
.layout = .Auto,
.decls = &[_]std.builtin.TypeInfo.Declaration{},
.fields = &tuple_fields,
},
});
}
const TupleTester = struct {
fn assertTypeEqual(comptime Expected: type, comptime Actual: type) void {
if (Expected != Actual)
@compileError("Expected type " ++ @typeName(Expected) ++ ", but got type " ++ @typeName(Actual));
}
fn assertTuple(comptime expected: anytype, comptime Actual: type) void {
const info = @typeInfo(Actual);
if (info != .Struct)
@compileError("Expected struct type");
if (!info.Struct.is_tuple)
@compileError("Struct type must be a tuple type");
const fields_list = std.meta.fields(Actual);
if (expected.len != fields_list.len)
@compileError("Argument count mismatch");
inline for (fields_list) |fld, i| {
if (expected[i] != fld.field_type) {
@compileError("Field " ++ fld.name ++ " expected to be type " ++ @typeName(expected[i]) ++ ", but was type " ++ @typeName(fld.field_type));
}
}
}
};
test "ArgsTuple" {
TupleTester.assertTuple(.{}, ArgsTuple(fn () void));
TupleTester.assertTuple(.{u32}, ArgsTuple(fn (a: u32) []const u8));
TupleTester.assertTuple(.{ u32, f16 }, ArgsTuple(fn (a: u32, b: f16) noreturn));
TupleTester.assertTuple(.{ u32, f16, []const u8, void }, ArgsTuple(fn (a: u32, b: f16, c: []const u8, void) noreturn));
}
test "Tuple" {
TupleTester.assertTuple(.{}, Tuple(&[_]type{}));
TupleTester.assertTuple(.{u32}, Tuple(&[_]type{u32}));
TupleTester.assertTuple(.{ u32, f16 }, Tuple(&[_]type{ u32, f16 }));
TupleTester.assertTuple(.{ u32, f16, []const u8, void }, Tuple(&[_]type{ u32, f16, []const u8, void }));
}
/// TODO: https://github.com/ziglang/zig/issues/425
pub fn globalOption(comptime name: []const u8, comptime T: type) ?T {
if (!@hasDecl(root, name))
return null;
return @as(T, @field(root, name));
}
/// Returns whether `error_union` contains an error.
pub fn isError(error_union: anytype) bool {
return if (error_union) |_| false else |_| true;
}
test "isError" {
try std.testing.expect(isError(math.absInt(@as(i8, -128))));
try std.testing.expect(!isError(math.absInt(@as(i8, -127))));
}
| https://raw.githubusercontent.com/collinalexbell/all-the-compilers/7f834984f71054806bfec8604e02e86b99c0f831/zig/lib/std/meta.zig |
const std = @import("std");
const builtin = @import("builtin");
const ac = @import("lib.zig");
const ascii = std.ascii;
const Allocator = std.mem.Allocator;
const StringHashMap = std.StringHashMap;
// Takes an input and provides an iterator over normalized word. Every
// result includes the word and the word_index.
// Normalizing means ignoring non-alphanumeric (ascii) + lowercasing the input.
pub const Input = struct {
// The raw input. Not normalized, not trimmed. We don't own this.
input: []const u8,
// The normalized input. We own this, but if this is being called when adding
// an item to the Index, the Index will take over this.
normalized_buffer: []u8,
// We normalize one character at a time, and this in where in normalized that
// we're at. The final normalized.len will always be <= input.len.
normalized_position: u32,
// The 0-based number of words we've seen
word_count: u3,
const Self = @This();
// the word that we're yielding, which includes the word itself and it's 0-based index
const Word = struct {
value: []const u8,
index: ac.WordIndex,
};
pub fn parse(allocator: Allocator, input: []const u8) !Self {
return Input{
.input = input,
.word_count = 0,
.normalized_position = 0,
.normalized_buffer = try allocator.alloc(u8, input.len),
};
}
pub fn next(self: *Self) ?Word {
var word_count = self.word_count;
if (word_count == ac.MAX_WORDS) {
// we've reached the max number of words we support per entry
return null;
}
var input = std.mem.trimLeft(u8, self.input, &ascii.whitespace);
if (input.len == 0) {
// no more input
return null;
}
var norm = self.normalized_buffer;
var norm_position = self.normalized_position;
var word_start = norm_position;
if (word_count > 0) {
norm[norm_position] = ' ';
norm_position += 1;
word_start += 1;
}
var i: u32 = 0;
for (input) |b| {
i += 1;
if (ascii.isAlphanumeric(b)) {
norm[norm_position] = ascii.toLower(b);
norm_position += 1;
continue;
}
if (b == ' ') {
break;
}
}
// when next() is called again, we'll start scanning input from where we left off
self.input = input[i..];
if (norm_position == word_start) {
// this was a non-ascii word, remove the whitespace we added and go to the
// next word
norm_position -= 1;
return self.next();
}
self.normalized_position = norm_position;
if (norm_position - word_start < 3) {
// our "word" is only 1 or 2 characters, skip to the next word
// TODO: remove this recursion.
return self.next();
}
var word = norm[word_start..norm_position];
if (word.len > ac.MAX_WORD_LENGTH) {
word = word[0..ac.MAX_WORD_LENGTH];
}
self.word_count = word_count + 1;
return .{
.value = word,
.index = word_count,
};
}
pub fn normalized(self: Self) []const u8 {
return self.normalized_buffer[0..self.normalized_position];
}
};
const t = ac.testing;
test "input: parse single word" {
{
// 2 letter word
var input = try testCollectInput("hi");
defer input.deinit();
try t.expectEqual(0, input.word_count);
try t.expectEqual(0, input.lookup.count());
}
{
// 3 letter word
var input = try testCollectInput("Tea");
defer input.deinit();
try t.expectString("tea", input.normalized);
try t.expectEqual(1,input.word_count);
try t.expectEqual(1,input.lookup.count());
try input.expectWord("tea", 0);
}
const values = [_][]const u8{
"Keemun", " keEMun", "keeMUN ", " keemun", "KEEMUN ", " keemun ", " KEEmUN ",
" Kee%mun ", "keemun\t", "\t\nkeemun", "Kee-mun"
};
for (values) |value| {
var input = try testCollectInput(value);
defer input.deinit();
try t.expectString("keemun", input.normalized);
try t.expectEqual(1, input.word_count);
try t.expectEqual(1, input.lookup.count());
try input.expectWord("keemun", 0);
}
}
test "input: parse two word" {
const values = [_][]const u8{
"black bear",
" black bear",
"black bear ",
" black Bear ",
" black bear ",
"BLACK BEAR",
" BLACK BEAR ",
};
for (values) |value| {
var input = try testCollectInput(value);
defer input.deinit();
try t.expectString("black bear", input.normalized);
try t.expectEqual(2, input.word_count);
try t.expectEqual(2, input.lookup.count());
try input.expectWord("black", 0);
try input.expectWord("bear", 1);
}
{
// ignore short words
var input = try testCollectInput(" Black at");
defer input.deinit();
try t.expectString("black at", input.normalized);
try t.expectEqual(1, input.word_count);
try t.expectEqual(1, input.lookup.count());
try input.expectWord("black", 0);
}
{
// ignore short words
var input = try testCollectInput(" Black a cat ");
defer input.deinit();
try t.expectString("black a cat", input.normalized);
try t.expectEqual(2, input.word_count);
try t.expectEqual(2, input.lookup.count());
try input.expectWord("black", 0);
try input.expectWord("cat", 1);
}
}
test "input: stops at 8 words" {
var input = try testCollectInput("wrd1 wrd2 wrd3 wrd4 wrd5 wrd6 wrd7 wrd8 wrd9");
defer input.deinit();
try t.expectEqual(7, input.word_count);
}
test "input: stops at 31 character words" {
var input = try testCollectInput("0123456789012345678901234567ABC 0123456789012345678901234567VWXYZ");
defer input.deinit();
try t.expectEqual(2, input.word_count);
try input.expectWord("0123456789012345678901234567abc", 0);
try input.expectWord("0123456789012345678901234567vwx", 1);
}
test "input: normalize non-ascii words" {
var input = try testCollectInput("Cat & Dog");
defer input.deinit();
try t.expectEqual(2, input.word_count);
try input.expectWord("cat", 0);
try input.expectWord("dog", 1);
try t.expectString("cat dog", input.normalized);
}
const ParseTestResult = struct {
word_count: u8,
normalized: []const u8,
normalized_buffer: []const u8,
lookup: StringHashMap(ac.WordIndex),
const Self = @This();
fn expectWord(self: Self, word: []const u8, word_index: ac.WordIndex) !void {
var actual = self.lookup.get(word) orelse unreachable;
try t.expectEqual(word_index, actual);
}
fn deinit(self: *Self) void {
t.allocator.free(self.normalized_buffer);
self.lookup.deinit();
self.* = undefined;
}
};
fn testCollectInput(value: []const u8) !ParseTestResult {
var lookup = StringHashMap(ac.WordIndex).init(t.allocator);
var input = try Input.parse(t.allocator, value);
while (input.next()) |word| {
try lookup.put(word.value, word.index);
}
return ParseTestResult{
.lookup = lookup,
.word_count = input.word_count,
.normalized = input.normalized(),
.normalized_buffer = input.normalized_buffer,
};
}
// if word.len < 4, 0 entries
// if word.len == 4, 2 entries
// else (word.len - 4) * 2 entries
// fuzzyTrigram(word);
// fn fuzzyTrigram(word: []const u8) void {
// for (0..word.len-3) |i| {
// std.debug.print(" {s}{s}\n", .{word[i..i+1], word[i+2..i+4]});
// std.debug.print(" {s}{s}\n", .{word[i..i+2], word[i+3..i+4]});
// }
// }
| https://raw.githubusercontent.com/goblgobl/autocomplete/3d55d5b4d63df2901b28eb32ac656288be3ed516/src/input.zig |
//
// Now that we have optional types, we can apply them to structs.
// The last time we checked in with our elephants, we had to link
// all three of them together in a "circle" so that the last tail
// linked to the first elephant. This is because we had NO CONCEPT
// of a tail that didn't point to another elephant!
//
// We also introduce the handy ".?" shortcut:
//
// const foo = bar.?;
//
// is the same as
//
// const foo = bar orelse unreachable;
//
// See if you can find where we use this shortcut below.
//
// Now let's make those elephant tails optional!
//
const std = @import("std");
const Elephant = struct {
letter: u8,
tail: ?*Elephant = null, // Hmm... tail needs something...
visited: bool = false,
};
pub fn main() void {
var elephantA = Elephant{ .letter = 'A' };
var elephantB = Elephant{ .letter = 'B' };
var elephantC = Elephant{ .letter = 'C' };
// Link the elephants so that each tail "points" to the next.
elephantA.tail = &elephantB;
elephantB.tail = &elephantC;
elephantC.tail = &elephantA;
visitElephants(&elephantA);
std.debug.print("\n", .{});
}
// This function visits all elephants once, starting with the
// first elephant and following the tails to the next elephant.
fn visitElephants(first_elephant: *Elephant) void {
var e = first_elephant;
while (!e.visited) {
std.debug.print("Elephant {u}. ", .{e.letter});
e.visited = true;
// We should stop once we encounter a tail that
// does NOT point to another element. What can
// we put here to make that happen?
if (e.tail == null) return;
e = e.tail.?;
}
}
| https://raw.githubusercontent.com/egonik-unlp/Ziglings---egonik-unlp/1d7e89127f1f905d49e5c2c5e176f17a0fdce7e2/exercises/046_optionals2.zig |
const Builder = @import("std").build.Builder;
pub fn build(b: *Builder) void {
const test_debug = b.addTest("test/des_test.zig");
test_debug.addPackagePath("zig-crypto", "zig_crypto.zig");
b.step("test_debug", "Run all tests in debug mode").dependOn(&test_debug.step);
const test_release_fast = b.addTest("test/des_test.zig");
test_release_fast.setBuildMode(.ReleaseFast);
test_release_fast.addPackagePath("zig-crypto", "zig_crypto.zig");
b.step("test_release_fast", "Run all tests in release-fast mode").dependOn(&test_release_fast.step);
const test_release_small = b.addTest("test/des_test.zig");
test_release_small.setBuildMode(.ReleaseSmall);
test_release_small.addPackagePath("zig-crypto", "zig_crypto.zig");
b.step("test_release_small", "Run all tests in release-small mode").dependOn(&test_release_small.step);
b.default_step.dependOn(&test_debug.step);
b.default_step.dependOn(&test_release_fast.step);
b.default_step.dependOn(&test_release_small.step);
}
| https://raw.githubusercontent.com/schmee/zig-crypto/951fa4ef3c3e655fcd41760ada97d83c9b3cb2a5/build.zig |
const builtin = @import("builtin");
const std = @import("std");
const Env = @import("Env.zig");
const Eval = @import("Eval.zig");
const PROMPT = "lisp-zig> ";
const DELIMITER = if (builtin.os.tag == .windows) '\r' else '\n';
pub fn start(allocator: std.mem.Allocator, stdin: anytype, stdout: anytype) !void {
var env = try Env.init(allocator);
var input = std.ArrayList(u8).init(allocator);
loop: while (true) {
try stdout.writeAll(PROMPT);
stdin.streamUntilDelimiter(input.writer(), DELIMITER, null) catch |err| switch (err) {
error.EndOfStream => {
input.deinit();
break :loop;
},
else => |x| return x,
};
const space = if (builtin.os.tag == .windows) " \n" else " ";
const line = std.mem.trim(u8, input.items, space);
if (line.len == 0) {
input.clearRetainingCapacity();
continue;
}
if (std.mem.eql(u8, line, "exit")) {
break :loop;
}
const val = try Eval.eval(allocator, line, &env);
try val.inspect(stdout);
try stdout.writeByte('\n');
input.clearRetainingCapacity();
}
try stdout.print("Good bye", .{});
}
| https://raw.githubusercontent.com/doccaico/lisp-zig/2601d2ee953584c31d3508c2bfb57c8579905b5f/src/Repl.zig |
const std = @import("std");
const vmath = @import("vectormath.zig");
const platform = @import("platform.zig");
pub const Gl = @import("opengl_bindings");
pub usingnamespace Gl;
pub const c = @cImport({
if (platform.IS_WEB) {
@cDefine("PLATFORM_WEB", "1");
} else {
@cDefine("PLATFORM_DESKTOP", "1");
}
@cInclude("GL/gl.h");
@cInclude("GL/glext.h");
@cInclude("stb_image.c");
});
pub const GetGlProcAddressSig = fn([*:0]const u8) callconv(.C) (?fn() callconv(.C) void);
pub fn linkGlFunctions(get_proc_fn: ?GetGlProcAddressSig) void
{
inline for(@typeInfo(Gl).Struct.decls) |decl|
{
if (comptime decl.is_pub and decl.data == .Var)
{
const proc_name = "gl" ++ decl.name ++ "\x00";
const bare_proc = get_proc_fn.?(proc_name).?;
const FnType = @TypeOf(@field(Gl, decl.name));
@field(Gl, decl.name) = @ptrCast(FnType, bare_proc);
}
}
}
var currently_bound_shader_program: ?ShaderProgram = null;
pub const ShaderProgram = struct {
program_id: Gl.Uint,
mvp_matrix_uniform_id: Gl.Int = 0,
texture_sampler_uniform_id: Gl.Int = 0,
};
fn checkForShaderCompileErrors(shader_id: Gl.Uint, comptime is_program: bool) void
{
var compile_result: Gl.Int = c.GL_FALSE;
var info_log_length: Gl.Int = 0;
const getShaderParam = if (is_program) Gl.GetProgramiv else Gl.GetShaderiv;
const status_type = if (is_program) c.GL_LINK_STATUS else c.GL_COMPILE_STATUS;
getShaderParam(shader_id, status_type, &compile_result);
getShaderParam(shader_id, c.GL_INFO_LOG_LENGTH, &info_log_length);
if (compile_result == c.GL_FALSE) {
var err_msg = std.heap.page_allocator.alloc(u8, @intCast(u32, info_log_length) + 1) catch @panic("Failed to alloc.");
defer std.heap.page_allocator.free(err_msg);
const getInfoLog = if (is_program) Gl.GetProgramInfoLog else Gl.GetShaderInfoLog;
getInfoLog(shader_id, @intCast(u32, info_log_length), null, @ptrCast(Gl.String, err_msg));
std.log.err("OpenGL {s}\n", .{err_msg});
}
}
pub fn compileShaders(vert_shader_src: []const u8, frag_shader_src: []const u8) Gl.Uint
{
const vert_shader = Gl.CreateShader(c.GL_VERTEX_SHADER);
Gl.ShaderSource(vert_shader, 1, &@ptrCast([*c]const u8, vert_shader_src), null);
Gl.CompileShader(vert_shader);
checkForShaderCompileErrors(vert_shader, false);
const frag_shader = Gl.CreateShader(c.GL_FRAGMENT_SHADER);
Gl.ShaderSource(frag_shader, 1, &@ptrCast([*c]const u8, frag_shader_src), null);
Gl.CompileShader(frag_shader);
checkForShaderCompileErrors(frag_shader, false);
// Link the program
const shader_program_id = Gl.CreateProgram();
Gl.AttachShader(shader_program_id, vert_shader);
Gl.AttachShader(shader_program_id, frag_shader);
Gl.LinkProgram(shader_program_id);
checkForShaderCompileErrors(shader_program_id, true);
return shader_program_id;
}
pub fn createShaderProgram(vert_shader_src: []const u8, frag_shader_src: []const u8) ShaderProgram
{
const shader_program = compileShaders(vert_shader_src, frag_shader_src);
const mvp_matrix_id = Gl.GetUniformLocation(shader_program, "transform");
const texture_sampler_id = Gl.GetUniformLocation(shader_program, "texSampler");
return ShaderProgram{
.program_id = shader_program,
.mvp_matrix_uniform_id = mvp_matrix_id,
.texture_sampler_uniform_id = texture_sampler_id
};
}
pub fn setActiveShaderProgram(maybe_shader_program: ?ShaderProgram) ShaderProgram
{
if (maybe_shader_program) |shader_program| {
Gl.UseProgram(shader_program.program_id);
currently_bound_shader_program = shader_program;
}
return currently_bound_shader_program.?;
}
pub fn getActiveShaderProgram() ShaderProgram
{
return currently_bound_shader_program.?;
}
pub fn setMatrixUniform(matrix: vmath.Mat4, uniform_id: Gl.Int) void
{
Gl.UniformMatrix4fv(
uniform_id, // Uniform ID
1, // count
c.GL_FALSE, // transpose?
matrix.rawPtr() // ptr to matrix data
);
}
pub const VertexObject = struct {
vertex_pos_buffer_id: Gl.Uint,
vertex_uv_buffer_id: Gl.Uint,
vertex_color_buffer_id: Gl.Uint,
vertex_count: usize,
texture_id: Gl.Uint,
shader_program: ?ShaderProgram = null,
model_matrix: vmath.Mat4,
draw_kind: DrawKind,
pub const DrawKind = enum {
triangles,
lines
};
pub fn init(vertices: []const f32,
uvs: ?[]const f32,
colors: ?[]const f32,
texture_data: ?LabeledData,
shader_program: ?ShaderProgram,
model_matrix: vmath.Mat4,
draw_kind: DrawKind) VertexObject
{
var texture_id: Gl.Uint = 0;
const maybe_bitmap = Texture.initFromMemory(texture_data) catch null;
if (maybe_bitmap) |bitmap| {
texture_id = bitmap.sendToGpu();
bitmap.deinit();
}
return VertexObject{
.vertex_pos_buffer_id = generateAndSendBufferData(vertices),
.vertex_uv_buffer_id = generateAndSendBufferData(uvs),
.vertex_color_buffer_id = generateAndSendBufferData(colors),
.vertex_count = vertices.len/3,
.shader_program = shader_program,
.texture_id = texture_id,
.model_matrix = model_matrix,
.draw_kind = draw_kind
};
}
pub fn deinit(self: VertexObject) void
{
Gl.DeleteBuffers(1, self.vertex_pos_buffer_id);
if (self.vertex_uv_buffer_id != 0)
Gl.DeleteBuffers(1, self.vertex_uv_buffer_id);
if (self.vertex_color_buffer_id != 0)
Gl.DeleteBuffers(1, self.vertex_color_buffer_id);
if (self.texture_id != 0)
Gl.DeleteTextures(1, self.texture_id);
}
pub fn updateVertexBuffer(self: *VertexObject, data: []const f32) void
{
if (data.len > 0)
sendBufferDataToGpu(f32, self.vertex_pos_buffer_id, data);
self.vertex_count = data.len / 3;
}
pub fn updateUvBuffer(self: *VertexObject, data: []const f32) void
{
if (data.len > 0)
sendBufferDataToGpu(f32, self.vertex_uv_buffer_id, data);
}
pub fn updateColorBuffer(self: *VertexObject, data: []const f32) void
{
if (data.len > 0)
sendBufferDataToGpu(f32, self.vertex_color_buffer_id, data);
}
pub fn draw(self: VertexObject, camera_transform: vmath.Mat4) void
{
// Enable shader.
const program = setActiveShaderProgram(self.shader_program);
// Send Model matrix to shader.
//const transform = self.model_matrix.mul(camera_transform);
const transform = camera_transform;
setMatrixUniform(transform, program.mvp_matrix_uniform_id);
// Bind texture in graphics card.
if (self.texture_id != 0) {
Gl.ActiveTexture(c.GL_TEXTURE0);
Gl.BindTexture(c.GL_TEXTURE_2D, self.texture_id);
// Set sampler to use bound texture.
Gl.Uniform1i(program.texture_sampler_uniform_id, 0);
}
// 1st attribute buffer : vertices
Gl.EnableVertexAttribArray(0);
Gl.BindBuffer(c.GL_ARRAY_BUFFER, self.vertex_pos_buffer_id);
Gl.VertexAttribPointer(
0, // attribute index.
3, // size
c.GL_FLOAT, // type
c.GL_FALSE, // normalized?
0, // stride
null // array buffer offset
);
// 2nd attribute buffer : UVs
if (self.vertex_uv_buffer_id != 0) {
Gl.EnableVertexAttribArray(1);
Gl.BindBuffer(c.GL_ARRAY_BUFFER, self.vertex_uv_buffer_id);
Gl.VertexAttribPointer(
1, // attribute index.
2, // size
c.GL_FLOAT, // type
c.GL_FALSE, // normalized?
0, // stride
null // array buffer offset
);
}
// 3rd attribute buffer : Colors
if (self.vertex_color_buffer_id != 0) {
Gl.EnableVertexAttribArray(2);
Gl.BindBuffer(c.GL_ARRAY_BUFFER, self.vertex_color_buffer_id);
Gl.VertexAttribPointer(
2, // attribute index.
4, // size
c.GL_FLOAT, // type
c.GL_FALSE, // normalized?
0, // stride
null // array buffer offset
);
}
// Draw it.
const draw_kind_enum: u32 = switch(self.draw_kind) {
.triangles => c.GL_TRIANGLES,
.lines => c.GL_LINES
};
Gl.DrawArrays(draw_kind_enum, 0, self.vertex_count); // Starting from vertex 0; 3 vertices total -> 1 triangle
Gl.DisableVertexAttribArray(0);
if (self.vertex_uv_buffer_id != 0) {
Gl.DisableVertexAttribArray(1);
}
if (self.vertex_color_buffer_id != 0) {
Gl.DisableVertexAttribArray(2);
}
}
};
pub const LabeledData = struct {
name: []const u8,
data: []const u8
};
const ReadTextureError = error {
ReadError
};
pub const Texture = struct {
width: usize = 0,
height: usize = 0,
data: [*c]u8 = undefined,
var WHITE_PIXEL = [_]u8{255} ** (4*4);
pub fn init1by1() Texture
{
return Texture{
.width = 1,
.height = 1,
.data = @ptrCast([*c]u8, WHITE_PIXEL[0..])
};
}
pub fn initFromPixels(pixels: [*c]u8, width: usize, height: usize) Texture
{
return Texture{
.width = width,
.height = height,
.data = pixels
};
}
pub fn initFromMemory(texture_data: ?LabeledData) !?Texture
{
if (texture_data == null) {
return null;
}
const tdata = texture_data.?;
var width: i32 = undefined;
var height: i32 = undefined;
var pixel_size: i32 = undefined;
const data = c.stbi_load_from_memory(
&tdata.data[0],
@intCast(c_int, tdata.data.len),
&width,
&height,
&pixel_size,
0
);
if (data == 0) {
std.log.err("Texture.initFromMemory: unable to read texture {s}\n", .{tdata.name});
return ReadTextureError.ReadError;
}
return Texture{
.width = @intCast(usize, width),
.height = @intCast(usize, height),
.data = data
};
}
pub fn deinit(self: *const Texture) void
{
c.stbi_image_free(self.data);
}
pub fn sendToGpu(self: *const Texture) Gl.Uint
{
var texture_id: Gl.Uint = 0;
if (self.width > 0 and self.height > 0) {
Gl.GenTextures(1, &texture_id);
Gl.BindTexture(c.GL_TEXTURE_2D, texture_id);
Gl.TexImage2D(
c.GL_TEXTURE_2D,
0,
c.GL_RGB,
@intCast(Gl.Sizei, self.width),
@intCast(Gl.Sizei, self.height),
0,
c.GL_RGB,
c.GL_UNSIGNED_BYTE,
&self.data[0]
);
Gl.TexParameteri(c.GL_TEXTURE_2D, c.GL_TEXTURE_MAG_FILTER, c.GL_NEAREST);
Gl.TexParameteri(c.GL_TEXTURE_2D, c.GL_TEXTURE_MIN_FILTER, c.GL_NEAREST);
}
return texture_id;
}
};
fn sendBufferDataToGpu(comptime data_type: type, buffer_id: Gl.Uint, data: []const data_type) void
{
const size = @intCast(u32, data.len * @sizeOf(data_type));
// The following commands will talk about our 'vertexbuffer' buffer
Gl.BindBuffer(c.GL_ARRAY_BUFFER, buffer_id);
// Give our vertices to OpenGL.
Gl.BufferData(c.GL_ARRAY_BUFFER, size, &data[0], c.GL_STATIC_DRAW);
}
fn generateAndSendBufferData(maybe_data: ?[]const f32) Gl.Uint
{
if (maybe_data) |data| {
var buffer_id: Gl.Uint = 0;
Gl.GenBuffers(1, &buffer_id);
sendBufferDataToGpu(f32, buffer_id, data);
return buffer_id;
} else {
return 0;
}
}
pub fn clear(r: f32, g: f32, b: f32) void
{
c.glClearColor(r, g, b, 1.0);
c.glClear(c.GL_COLOR_BUFFER_BIT | c.GL_DEPTH_BUFFER_BIT);
}
pub fn getShaderVersion() [*c]const u8
{
return c.glGetString(c.GL_SHADING_LANGUAGE_VERSION);
} | https://raw.githubusercontent.com/perky/ZigGraphics/f89b92f1a893c4df62d9287f376f53c8ea3cc9d1/src/opengl.zig |
const std = @import("std");
const Map = @import("./map.zig").Map;
pub fn main() anyerror!void {
var map = Map.init();
defer map.deinit();
const inp = std.io.getStdIn().reader();
var buf: [10240]u8 = undefined;
while (try inp.readUntilDelimiterOrEof(&buf, '\n')) |line| {
try map.process_line(line);
}
const risk = map.get_total_risk();
const out = std.io.getStdOut().writer();
try out.print("Total risk: {}\n", .{risk});
}
| https://raw.githubusercontent.com/gonzus/AdventOfCode/7e972b92a23db29461b2869713a3251998af5822/2021/p09/p09a.zig |
const std = @import("std");
const t = @import("../../types.zig");
const vk = @import("vulkan");
const zwin = @import("zwin");
const Allocator = @import("../../allocator.zig");
const Ctx = @import("Context.zig");
const Pipeline = @import("Pipeline.zig");
const shaders = @import("shaders");
const Buffer = @import("Buffer.zig");
pub const POSITION_ATTRIBUTE = 0;
pub const COLOR_ATTRIBUTE = 1;
pub const TEXTURE_ATTRIBUTE = 2;
pub const MeshContext = struct {
matrix: [16]f32 = [_]f32{
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
},
texture: u32 = 1337,
};
pub var ActiveMeshContext = MeshContext{};
pub const Mesh = struct {
pub const Flags = packed struct {
texture_enabled: u1,
color_enabled: u1,
fixed_point5: u1,
reserved: u29,
};
vert_buffer: vk.Buffer = undefined,
vert_memory: vk.DeviceMemory = undefined,
idx_buffer: vk.Buffer = undefined,
idx_memory: vk.DeviceMemory = undefined,
initialized: bool = false,
idx_count: usize = 0,
dead: bool = false,
flags: Flags = undefined,
constants: Pipeline.PushConstants = Pipeline.PushConstants{},
bindings: std.ArrayList(vk.VertexInputBindingDescription2EXT) = undefined,
attributes: std.ArrayList(vk.VertexInputAttributeDescription2EXT) = undefined,
fn get_format(dimensions: usize, normalized: bool, backing: t.VertexLayout.Type) vk.Format {
if (backing == .Float) {
return switch (dimensions) {
1 => .r32_sfloat,
2 => .r32g32_sfloat,
3 => .r32g32b32_sfloat,
4 => .r32g32b32a32_sfloat,
else => unreachable,
};
} else if (backing == .UByte) {
return switch (dimensions) {
1 => if (normalized) .r8_unorm else .r8_uint,
2 => if (normalized) .r8g8_unorm else .r8g8_uint,
3 => if (normalized) .r8g8b8_unorm else .r8g8b8_uint,
4 => if (normalized) .r8g8b8a8_unorm else .r8g8b8a8_uint,
else => unreachable,
};
}
return .r32g32b32_sfloat;
}
pub fn update(ctx: *anyopaque, vertices: *anyopaque, vert_count: usize, indices: *anyopaque, ind_count: usize, layout: *const t.VertexLayout) void {
const self = t.coerce_ptr(Mesh, ctx);
const alloc = Allocator.allocator() catch unreachable;
if (!self.initialized) {
self.bindings = std.ArrayList(vk.VertexInputBindingDescription2EXT).init(alloc);
self.bindings.append(.{
.binding = 0,
.stride = @intCast(layout.size),
.input_rate = .vertex,
.divisor = 1,
}) catch unreachable;
self.attributes = std.ArrayList(vk.VertexInputAttributeDescription2EXT).init(alloc);
if (layout.vertex) |entry| {
self.attributes.append(vk.VertexInputAttributeDescription2EXT{
.binding = 0,
.location = POSITION_ATTRIBUTE,
.offset = @intCast(entry.offset),
.format = get_format(entry.dimensions, entry.normalize, entry.backing_type),
}) catch unreachable;
if (entry.backing_type == t.VertexLayout.Type.UShort) {
self.flags.fixed_point5 = 1;
} else {
self.flags.fixed_point5 = 0;
}
} else {
self.flags.fixed_point5 = 0;
}
if (layout.color) |entry| {
self.attributes.append(vk.VertexInputAttributeDescription2EXT{
.binding = 0,
.location = COLOR_ATTRIBUTE,
.offset = @intCast(entry.offset),
.format = get_format(entry.dimensions, entry.normalize, entry.backing_type),
}) catch unreachable;
self.flags.color_enabled = 1;
} else {
self.flags.color_enabled = 0;
}
if (layout.texture) |entry| {
self.attributes.append(vk.VertexInputAttributeDescription2EXT{
.binding = 0,
.location = TEXTURE_ATTRIBUTE,
.offset = @intCast(entry.offset),
.format = get_format(entry.dimensions, entry.normalize, entry.backing_type),
}) catch unreachable;
self.flags.texture_enabled = 1;
} else {
self.flags.texture_enabled = 0;
}
}
const vert_size = layout.size * vert_count;
const idx_size = @sizeOf(u16) * ind_count;
{
// Create staging buffer
var staging_buffer: vk.Buffer = undefined;
var staging_buffer_memory: vk.DeviceMemory = undefined;
Buffer.create(
vert_size,
.{ .transfer_src_bit = true },
.{ .host_visible_bit = true, .host_coherent_bit = true },
&staging_buffer,
&staging_buffer_memory,
) catch unreachable;
defer Ctx.vkd.freeMemory(Ctx.device, staging_buffer_memory, null);
defer Ctx.vkd.destroyBuffer(Ctx.device, staging_buffer, null);
// Transfer data from RAM to staging buffer
{
const data = Ctx.vkd.mapMemory(Ctx.device, staging_buffer_memory, 0, vk.WHOLE_SIZE, .{}) catch unreachable;
defer Ctx.vkd.unmapMemory(Ctx.device, staging_buffer_memory);
const gpu_buffer: [*]u8 = @ptrCast(@alignCast(data));
const vert_buffer: [*]const u8 = @ptrCast(@alignCast(vertices));
var i: usize = 0;
while (i < vert_size) : (i += 1) {
gpu_buffer[i] = vert_buffer[i];
}
}
// Create vertex buffer
Buffer.create(
vert_size,
.{ .transfer_dst_bit = true, .vertex_buffer_bit = true },
.{ .device_local_bit = true },
&self.vert_buffer,
&self.vert_memory,
) catch unreachable;
Buffer.copy(staging_buffer, self.vert_buffer, vert_size) catch unreachable;
}
{
// Create staging buffer
var staging_buffer: vk.Buffer = undefined;
var staging_buffer_memory: vk.DeviceMemory = undefined;
Buffer.create(
idx_size,
.{ .transfer_src_bit = true },
.{ .host_visible_bit = true, .host_coherent_bit = true },
&staging_buffer,
&staging_buffer_memory,
) catch unreachable;
defer Ctx.vkd.freeMemory(Ctx.device, staging_buffer_memory, null);
defer Ctx.vkd.destroyBuffer(Ctx.device, staging_buffer, null);
// Transfer data from RAM to staging buffer
{
const data = Ctx.vkd.mapMemory(Ctx.device, staging_buffer_memory, 0, vk.WHOLE_SIZE, .{}) catch unreachable;
defer Ctx.vkd.unmapMemory(Ctx.device, staging_buffer_memory);
const gpu_buffer: [*]u8 = @ptrCast(@alignCast(data));
const idx_buffer: [*]const u8 = @ptrCast(@alignCast(indices));
var i: usize = 0;
while (i < idx_size) : (i += 1) {
gpu_buffer[i] = idx_buffer[i];
}
}
// Create index buffer
Buffer.create(
idx_size,
.{ .transfer_dst_bit = true, .index_buffer_bit = true },
.{ .device_local_bit = true },
&self.idx_buffer,
&self.idx_memory,
) catch unreachable;
Buffer.copy(staging_buffer, self.idx_buffer, idx_size) catch unreachable;
self.idx_count = ind_count;
}
}
pub fn draw(ctx: *anyopaque) void {
const self = t.coerce_ptr(Mesh, ctx);
const cmdbuf = Pipeline.current_cmd_buffer.?.*;
const offsets = [_]vk.DeviceSize{0};
Ctx.vkd.cmdSetVertexInputEXT(
cmdbuf,
@intCast(self.bindings.items.len),
self.bindings.items.ptr,
@intCast(self.attributes.items.len),
self.attributes.items.ptr,
);
Ctx.vkd.cmdBindVertexBuffers(
cmdbuf,
0,
1,
@ptrCast(&self.vert_buffer),
&offsets,
);
Ctx.vkd.cmdBindIndexBuffer(
cmdbuf,
self.idx_buffer,
0,
.uint16,
);
self.constants.model = ActiveMeshContext.matrix;
self.constants.texture = ActiveMeshContext.texture;
self.constants.flags = @as(*u32, @ptrCast(&self.flags)).*;
Ctx.vkd.cmdPushConstants(
cmdbuf,
Pipeline.pipeline_layout,
.{ .vertex_bit = true },
0,
@sizeOf(Pipeline.PushConstants),
&self.constants,
);
Ctx.vkd.cmdDrawIndexed(
cmdbuf,
@intCast(self.idx_count),
1,
0,
0,
0,
);
}
pub fn deinit(ctx: *anyopaque) void {
var self = t.coerce_ptr(Mesh, ctx);
self.dead = true;
}
pub fn gc(self: *Mesh) void {
if (self.dead) {
Ctx.vkd.destroyBuffer(Ctx.device, self.idx_buffer, null);
Ctx.vkd.freeMemory(Ctx.device, self.idx_memory, null);
Ctx.vkd.destroyBuffer(Ctx.device, self.vert_buffer, null);
Ctx.vkd.freeMemory(Ctx.device, self.vert_memory, null);
self.bindings.clearAndFree();
self.attributes.clearAndFree();
}
}
pub fn interface(self: *Mesh) t.MeshInternal {
return .{
.ptr = self,
.size = @sizeOf(Mesh),
.tab = .{
.update = update,
.draw = draw,
.deinit = Mesh.deinit,
},
};
}
};
pub const MeshManager = struct {
list: std.ArrayList(*Mesh) = undefined,
pub fn init(self: *MeshManager) !void {
self.list = std.ArrayList(*Mesh).init(try Allocator.allocator());
}
pub fn gc(self: *MeshManager) void {
const alloc = Allocator.allocator() catch unreachable;
var new_list = std.ArrayList(*Mesh).init(alloc);
for (self.list.items) |mesh| {
if (mesh.dead) {
mesh.gc();
alloc.destroy(mesh);
} else {
new_list.append(mesh) catch unreachable;
}
}
self.list.clearAndFree();
self.list = new_list;
}
pub fn deinit(self: *MeshManager) void {
const alloc = Allocator.allocator() catch unreachable;
for (self.list.items) |mesh| {
mesh.dead = true;
mesh.gc();
alloc.destroy(mesh);
}
self.list.clearAndFree();
self.list.deinit();
}
};
| https://raw.githubusercontent.com/IridescenceTech/Aether-Platform/f94978e25bc2e532bdf616e06ac621705c96812b/src/graphics/Vulkan/Mesh.zig |
const std = @import("std");
const testing = std.testing;
const print = std.debug.print;
pub fn hasPiece(buf: []const u8, index: u32) bool {
const byte_index = index / 8;
const offset: u3 = @truncate(index % 8);
if (byte_index < 0 or byte_index >= buf.len) {
return false;
}
return buf[byte_index] >> (7 - offset) & 1 != 0;
}
test "hasPiece" {
const buf = [_]u8{ 0b01010100, 0b01010100 };
const outputs = [_]bool{ false, true, false, true, false, true, false, false, false, true, false, true, false, true, false, false, false, false, false, false };
for (0..outputs.len) |i| {
try testing.expectEqual(outputs[i], hasPiece(&buf, @intCast(i)));
}
}
pub fn setPiece(buf: []u8, index: u32) void {
const byte_index = index / 8;
const offset: u3 = @truncate(index % 8);
if (byte_index < 0 or byte_index >= buf.len) {
return;
}
buf[byte_index] |= @as(u8, 1) << (7 - offset);
}
test "setPiece OK" {
var input = [_]u8{ 0b01010100, 0b01010100 };
setPiece(&input, 4);
}
| https://raw.githubusercontent.com/duyquang6/zigtor/30984dd24ed912e09c90607438116430b0c8a17c/src/bitfield.zig |
const std = @import("../std.zig");
const math = std.math;
const expect = std.testing.expect;
const maxInt = std.math.maxInt;
// Returns whether x has a normalized representation (i.e. integer part of mantissa is 1).
pub fn isNormal(x: anytype) bool {
const T = @TypeOf(x);
switch (T) {
f16 => {
const bits = @bitCast(u16, x);
return (bits + (1 << 10)) & (maxInt(u16) >> 1) >= (1 << 11);
},
f32 => {
const bits = @bitCast(u32, x);
return (bits + (1 << 23)) & (maxInt(u32) >> 1) >= (1 << 24);
},
f64 => {
const bits = @bitCast(u64, x);
return (bits + (1 << 52)) & (maxInt(u64) >> 1) >= (1 << 53);
},
f128 => {
const bits = @bitCast(u128, x);
return (bits + (1 << 112)) & (maxInt(u128) >> 1) >= (1 << 113);
},
else => {
@compileError("isNormal not implemented for " ++ @typeName(T));
},
}
}
test "math.isNormal" {
try expect(!isNormal(math.nan(f16)));
try expect(!isNormal(math.nan(f32)));
try expect(!isNormal(math.nan(f64)));
try expect(!isNormal(math.nan(f128)));
try expect(!isNormal(@as(f16, 0)));
try expect(!isNormal(@as(f32, 0)));
try expect(!isNormal(@as(f64, 0)));
try expect(!isNormal(@as(f128, 0)));
try expect(isNormal(@as(f16, 1.0)));
try expect(isNormal(@as(f32, 1.0)));
try expect(isNormal(@as(f64, 1.0)));
try expect(isNormal(@as(f128, 1.0)));
}
| https://raw.githubusercontent.com/natanalt/zig-x86_16/1b38fc3ef5e539047c76604ffe71b81e246f1a1e/lib/std/math/isnormal.zig |
//
// When Andrew Kelley announced the idea of a new programming language
// - namely Zig - in his blog on February 8, 2016, he also immediately
// stated his ambitious goal: to replace the C language!
//
// In order to be able to achieve this goal at all, Zig should be
// as compatible as possible with its "predecessor".
// Only if it is possible to exchange individual modules in existing
// C programs without having to use complicated wrappers,
// the undertaking has a chance of success.
//
// So it is not surprising that calling C functions and vice versa
// is extremely "smooth".
//
// To call C functions in Zig, you only need to specify the library
// that contains said function. For this purpose there is a built-in
// function corresponding to the well-known @import():
//
// @cImport()
//
// All required libraries can now be included in the usual Zig notation:
//
// const c = @cImport({
// @cInclude("stdio.h");
// @cInclude("...");
// });
//
// Now a function can be called via the (in this example) constant 'c':
//
// c.puts("Hello world!");
//
// By the way, most C functions have return values in the form of an
// integer value. Errors can then be evaluated (return < 0) or other
// information can be obtained. For example, 'puts' returns the number
// of characters output.
//
// So that all this does not remain a dry theory now, let's just start
// and call a C function out of Zig.
// our well-known "import" for Zig
const std = @import("std");
// and here the new the import for C
const c = @cImport({
@cInclude("unistd.h");
});
pub fn main() void {
// In order to output text that can be evaluated by the
// Zig Builder, we need to write it to the Error output.
// In Zig, we do this with "std.debug.print" and in C we can
// specify a file descriptor i.e. 2 for error console.
//
// In this exercise we use 'write' to output 17 chars,
// but something is still missing...
const c_res = write(2, "Hello C from Zig!", 17);
// let's see what the result from C is:
std.debug.print(" - C result is {d} chars written.\n", .{c_res});
}
//
// Something must be considered when compiling with C functions.
// Namely that the Zig compiler knows that it should include
// corresponding libraries. For this purpose we call the compiler
// with the parameter "lc" for such a program,
// e.g. "zig run -lc hello_c.zig".
//
| https://raw.githubusercontent.com/smn-1/ziglings-smn/b4a446d4317748bd4d01c7b35e857928275fe33e/exercises/093_hello_c.zig |