text
stringlengths
32
314k
url
stringlengths
93
243
//! generated by flatc-zig from Schema.fbs const flatbuffers = @import("flatbuffers"); /// A Map is a logical nested type that is represented as /// /// List<entries: Struct<key: K, value: V>> /// /// In this layout, the keys and values are each respectively contiguous. We do /// not constrain the key and value types, so the application is responsible /// for ensuring that the keys are hashable and unique. Whether the keys are sorted /// may be set in the metadata for this field. /// /// In a field with Map type, the field has a child Struct field, which then /// has two children: key type and the second the value type. The names of the /// child fields may be respectively "entries", "key", and "value", but this is /// not enforced. /// /// Map /// ```text /// - child[0] entries: Struct /// - child[0] key: K /// - child[1] value: V /// ``` /// Neither the "entries" field nor the "key" field may be nullable. /// /// The metadata is structured so that Arrow systems without special handling /// for Map can make Map an alias for List. The "layout" attribute for the Map /// field must have the same contents as a List. pub const Map = struct { /// Set to true if the keys within each value are sorted keys_sorted: bool = false, const Self = @This(); pub fn init(packed_: PackedMap) flatbuffers.Error!Self { return .{ .keys_sorted = try packed_.keysSorted(), }; } pub fn pack(self: Self, builder: *flatbuffers.Builder) flatbuffers.Error!u32 { try builder.startTable(); try builder.appendTableFieldWithDefault(bool, self.keys_sorted, false); return builder.endTable(); } }; /// A Map is a logical nested type that is represented as /// /// List<entries: Struct<key: K, value: V>> /// /// In this layout, the keys and values are each respectively contiguous. We do /// not constrain the key and value types, so the application is responsible /// for ensuring that the keys are hashable and unique. Whether the keys are sorted /// may be set in the metadata for this field. /// /// In a field with Map type, the field has a child Struct field, which then /// has two children: key type and the second the value type. The names of the /// child fields may be respectively "entries", "key", and "value", but this is /// not enforced. /// /// Map /// ```text /// - child[0] entries: Struct /// - child[0] key: K /// - child[1] value: V /// ``` /// Neither the "entries" field nor the "key" field may be nullable. /// /// The metadata is structured so that Arrow systems without special handling /// for Map can make Map an alias for List. The "layout" attribute for the Map /// field must have the same contents as a List. pub const PackedMap = struct { table: flatbuffers.Table, const Self = @This(); pub fn init(size_prefixed_bytes: []u8) flatbuffers.Error!Self { return .{ .table = try flatbuffers.Table.init(size_prefixed_bytes) }; } /// Set to true if the keys within each value are sorted pub fn keysSorted(self: Self) flatbuffers.Error!bool { return self.table.readFieldWithDefault(bool, 0, false); } };
https://raw.githubusercontent.com/clickingbuttons/arrow-zig/c74941d19773f2a905d40cb661a32eb33c63ce33/src/ipc/gen/Map.zig
const std = @import("std"); const parseU = std.fmt.parseUnsigned; const R = struct { id: u64, t: u8, c: u8, x: []usize, y: []usize }; var letters: std.ArrayList(u8) = undefined; var fact: std.ArrayList([2]usize) = undefined; const Ret = struct { starts: []usize, ends: []usize }; fn nfa_concat(allocator: *std.mem.Allocator, rules: []R, ids: []usize) std.mem.Allocator.Error!Ret { var first = try nfa(allocator, rules, ids[0]); var last = first; for (ids[1..]) |id| { var this = try nfa(allocator, rules, id); for (last.ends) |e| { for (this.starts) |s| { try fact.append(.{e, s}); } } last = this; } return Ret { .starts = first.starts, .ends = last.ends }; } fn nfa(allocator: *std.mem.Allocator, rules: []R, id: usize) std.mem.Allocator.Error!Ret { var starts = std.ArrayList(usize).init(allocator); var ends = std.ArrayList(usize).init(allocator); for (rules) |r| { if (r.id == id) { if (r.t == 0) { try starts.append(letters.items.len); try ends.append(letters.items.len); try letters.append(r.c); } else { const left = try nfa_concat(allocator, rules, r.x); for (left.starts) |s| try starts.append(s); for (left.ends) |e| try ends.append(e); if (r.y.len == 0) continue; const right = try nfa_concat(allocator, rules, r.y); for (right.starts) |s| try starts.append(s); for (right.ends) |e| try ends.append(e); } } } return Ret { .starts = starts.items, .ends = ends.items }; } fn match(node: usize, ends: []usize, s: []u8) bool { if (s[0] != letters.items[node]) return false; if (s.len == 1) { for (ends) |e| if (e == node) return true; return false; } for (fact.items) |f| { if (f[0] == node) { if (match(f[1], ends, s[1..])) return true; } } return false; } pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}) {}; const allocator = &gpa.allocator; const args = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, args); const file = try std.fs.cwd().openFile(args[1], .{ .read = true }); defer file.close(); var rules = std.ArrayList(R).init(allocator); letters = std.ArrayList(u8).init(allocator); fact = std.ArrayList([2]usize).init(allocator); const reader = file.reader(); while (reader.readUntilDelimiterAlloc(allocator, '\n', 10000)) |line| { defer allocator.free(line); if (line.len == 0) break; const id = try parseU(u64, line[0..std.mem.indexOf(u8, line, ":").?], 10); var s = line[std.mem.indexOf(u8, line, ":").? + 2..]; if (s[0] == '"') { try rules.append(.{ .id = id, .c = s[1], .t = 0, .x = undefined, .y = undefined }); } else { var x = std.ArrayList(usize).init(allocator); while (s.len > 0 and s[0] != '|') { const z = try parseU(usize, s[0..std.mem.indexOf(u8, s, " ") orelse s.len], 10); s = s[(std.mem.indexOf(u8, s, " ") orelse s.len - 1) + 1..]; try x.append(z); } var y = std.ArrayList(usize).init(allocator); if (s.len > 0) s = s[2..]; while (s.len > 0) { const z = try parseU(usize, s[0..std.mem.indexOf(u8, s, " ") orelse s.len], 10); s = s[(std.mem.indexOf(u8, s, " ") orelse s.len - 1) + 1..]; try y.append(z); } try rules.append(.{ .id = id, .c = 0, .t = 1, .x = x.items, .y = y.items }); } } else |err| { std.testing.expect(err == error.EndOfStream); } const x = try nfa(allocator, rules.items, 0); var count: u64 = 0; while (reader.readUntilDelimiterAlloc(allocator, '\n', 10000)) |line| { defer allocator.free(line); for (x.starts) |s| { if (match(s, x.ends, line)) count += 1; } } else |err| { std.testing.expect(err == error.EndOfStream); } std.debug.print("{}\n", .{count}); }
https://raw.githubusercontent.com/jeltz/advent/c2a5671a9db86e73aa12313badb5fe4dfd344847/2020/19/19a.zig
pub const Latch = @import("sync/latch.zig").Latch;
https://raw.githubusercontent.com/tdeebswihart/zdb/1e8ec95f12ef496abb4192b86e23d172c8d0abfc/lib/sync.zig
const print = @import("std").debug.print; const mem = @import("std").mem; // will be used to compare bytes pub fn main() void { const bytes = "hello"; print("{s}\n", .{@typeName(@TypeOf(bytes))}); // *const [5:0]u8 print("{d}\n", .{bytes.len}); // 5 print("{c}\n", .{bytes[1]}); // 'e' print("{d}\n", .{bytes[5]}); // 0 print("{}\n", .{'e' == '\x65'}); // true print("{d}\n", .{'\u{1f4a9}'}); // 128169 print("{d}\n", .{'💯'}); // 128175 print("{}\n", .{mem.eql(u8, "hello", "h\x65llo")}); // true print("0x{x}\n", .{"\xff"[0]}); // non-UTF-8 strings are possible with \xNN notation. print("{u}\n", .{'⚡'}); }
https://raw.githubusercontent.com/tamalsaha/learn-zig/d9ec87d177f2a181bbca06b48cfa38309ecc6812/string_literals.zig
// // We've absorbed a lot of information about the variations of types // we can use in Zig. Roughly, in order we have: // // u8 single item // *u8 single-item pointer // []u8 slice (size known at runtime) // [5]u8 array of 5 u8s // [*]u8 many-item pointer (zero or more) // enum {a, b} set of unique values a and b // error {e, f} set of unique error values e and f // struct {y: u8, z: i32} group of values y and z // union(enum) {a: u8, b: i32} single value either u8 or i32 // // Values of any of the above types can be assigned as "var" or "const" // to allow or disallow changes (mutability) via the assigned name: // // const a: u8 = 5; // immutable // var b: u8 = 5; // mutable // // We can also make error unions or optional types from any of // the above: // // var a: E!u8 = 5; // can be u8 or error from set E // var b: ?u8 = 5; // can be u8 or null // // Knowing all of this, maybe we can help out a local hermit. He made // a little Zig program to help him plan his trips through the woods, // but it has some mistakes. // // ************************************************************* // * A NOTE ABOUT THIS EXERCISE * // * * // * You do NOT have to read and understand every bit of this * // * program. This is a very big example. Feel free to skim * // * through it and then just focus on the few parts that are * // * actually broken! * // * * // ************************************************************* // const print = @import("std").debug.print; // The grue is a nod to Zork. const TripError = error{ Unreachable, EatenByAGrue }; // Let's start with the Places on the map. Each has a name and a // distance or difficulty of travel (as judged by the hermit). // // Note that we declare the places as mutable (var) because we need to // assign the paths later. And why is that? Because paths contain // pointers to places and assigning them now would create a dependency // loop! const Place = struct { name: []const u8, paths: []const Path = undefined, }; var a = Place{ .name = "Archer's Point" }; var b = Place{ .name = "Bridge" }; var c = Place{ .name = "Cottage" }; var d = Place{ .name = "Dogwood Grove" }; var e = Place{ .name = "East Pond" }; var f = Place{ .name = "Fox Pond" }; // The hermit's hand-drawn ASCII map // +---------------------------------------------------+ // | * Archer's Point ~~~~ | // | ~~~ ~~~~~~~~ | // | ~~~| |~~~~~~~~~~~~ ~~~~~~~ | // | Bridge ~~~~~~~~ | // | ^ ^ ^ | // | ^ ^ / \ | // | ^ ^ ^ ^ |_| Cottage | // | Dogwood Grove | // | ^ <boat> | // | ^ ^ ^ ^ ~~~~~~~~~~~~~ ^ ^ | // | ^ ~~ East Pond ~~~ | // | ^ ^ ^ ~~~~~~~~~~~~~~ | // | ~~ ^ | // | ^ ~~~ <-- short waterfall | // | ^ ~~~~~ | // | ~~~~~~~~~~~~~~~~~ | // | ~~~~ Fox Pond ~~~~~~~ ^ ^ | // | ^ ~~~~~~~~~~~~~~~ ^ ^ | // | ~~~~~ | // +---------------------------------------------------+ // // We'll be reserving memory in our program based on the number of // places on the map. Note that we do not have to specify the type of // this value because we don't actually use it in our program once // it's compiled! (Don't worry if this doesn't make sense yet.) const place_count = 6; // Now let's create all of the paths between sites. A path goes from // one place to another and has a distance. const Path = struct { from: *const Place, to: *const Place, dist: u8, }; // By the way, if the following code seems like a lot of tedious // manual labor, you're right! One of Zig's killer features is letting // us write code that runs at compile time to "automate" repetitive // code (much like macros in other languages), but we haven't learned // how to do that yet! const a_paths = [_]Path{ Path{ .from = &a, // from: Archer's Point .to = &b, // to: Bridge .dist = 2, }, }; const b_paths = [_]Path{ Path{ .from = &b, // from: Bridge .to = &a, // to: Archer's Point .dist = 2, }, Path{ .from = &b, // from: Bridge .to = &d, // to: Dogwood Grove .dist = 1, }, }; const c_paths = [_]Path{ Path{ .from = &c, // from: Cottage .to = &d, // to: Dogwood Grove .dist = 3, }, Path{ .from = &c, // from: Cottage .to = &e, // to: East Pond .dist = 2, }, }; const d_paths = [_]Path{ Path{ .from = &d, // from: Dogwood Grove .to = &b, // to: Bridge .dist = 1, }, Path{ .from = &d, // from: Dogwood Grove .to = &c, // to: Cottage .dist = 3, }, Path{ .from = &d, // from: Dogwood Grove .to = &f, // to: Fox Pond .dist = 7, }, }; const e_paths = [_]Path{ Path{ .from = &e, // from: East Pond .to = &c, // to: Cottage .dist = 2, }, Path{ .from = &e, // from: East Pond .to = &f, // to: Fox Pond .dist = 1, // (one-way down a short waterfall!) }, }; const f_paths = [_]Path{ Path{ .from = &f, // from: Fox Pond .to = &d, // to: Dogwood Grove .dist = 7, }, }; // Once we've plotted the best course through the woods, we'll make a // "trip" out of it. A trip is a series of Places connected by Paths. // We use a TripItem union to allow both Places and Paths to be in the // same array. const TripItem = union(enum) { place: *const Place, path: *const Path, // This is a little helper function to print the two different // types of item correctly. fn printMe(self: TripItem) void { switch (self) { // Oops! The hermit forgot how to capture the union values // in a switch statement. Please capture both values as // 'p' so the print statements work! .place => |p| print("{s}", .{p.name}), .path => |p| print("--{}->", .{p.dist}), } } }; // The Hermit's Notebook is where all the magic happens. A notebook // entry is a Place discovered on the map along with the Path taken to // get there and the distance to reach it from the start point. If we // find a better Path to reach a Place (shorter distance), we update the // entry. Entries also serve as a "todo" list which is how we keep // track of which paths to explore next. const NotebookEntry = struct { place: *const Place, coming_from: ?*const Place, via_path: ?*const Path, dist_to_reach: u16, }; // +------------------------------------------------+ // | ~ Hermit's Notebook ~ | // +---+----------------+----------------+----------+ // | | Place | From | Distance | // +---+----------------+----------------+----------+ // | 0 | Archer's Point | null | 0 | // | 1 | Bridge | Archer's Point | 2 | < next_entry // | 2 | Dogwood Grove | Bridge | 1 | // | 3 | | | | < end_of_entries // | ... | // +---+----------------+----------------+----------+ // const HermitsNotebook = struct { // Remember the array repetition operator `**`? It is no mere // novelty, it's also a great way to assign multiple items in an // array without having to list them one by one. Here we use it to // initialize an array with null values. entries: [place_count]?NotebookEntry = .{null} ** place_count, // The next entry keeps track of where we are in our "todo" list. next_entry: u8 = 0, // Mark the start of empty space in the notebook. end_of_entries: u8 = 0, // We'll often want to find an entry by Place. If one is not // found, we return null. fn getEntry(self: *HermitsNotebook, place: *const Place) ?*NotebookEntry { for (self.entries) |*entry, i| { if (i >= self.end_of_entries) break; // Here's where the hermit got stuck. We need to return // an optional pointer to a NotebookEntry. // // What we have with "entry" is the opposite: a pointer to // an optional NotebookEntry! // // To get one from the other, we need to dereference // "entry" (with .*) and get the non-null value from the // optional (with .?) and return the address of that. The // if statement provides some clues about how the // dereference and optional value "unwrapping" look // together. Remember that you return the address with the // "&" operator. if (place == entry.*.?.place) return &entry.*.?; // Try to make your answer this long:__________; } return null; } // The checkNote() method is the beating heart of the magical // notebook. Given a new note in the form of a NotebookEntry // struct, we check to see if we already have an entry for the // note's Place. // // If we DON'T, we'll add the entry to the end of the notebook // along with the Path taken and distance. // // If we DO, we check to see if the path is "better" (shorter // distance) than the one we'd noted before. If it is, we // overwrite the old entry with the new one. fn checkNote(self: *HermitsNotebook, note: NotebookEntry) void { var existing_entry = self.getEntry(note.place); if (existing_entry == null) { self.entries[self.end_of_entries] = note; self.end_of_entries += 1; } else if (note.dist_to_reach < existing_entry.?.dist_to_reach) { existing_entry.?.* = note; } } // The next two methods allow us to use the notebook as a "todo" // list. fn hasNextEntry(self: *HermitsNotebook) bool { return self.next_entry < self.end_of_entries; } fn getNextEntry(self: *HermitsNotebook) *const NotebookEntry { defer self.next_entry += 1; // Increment after getting entry return &self.entries[self.next_entry].?; } // After we've completed our search of the map, we'll have // computed the shortest Path to every Place. To collect the // complete trip from the start to the destination, we need to // walk backwards from the destination's notebook entry, following // the coming_from pointers back to the start. What we end up with // is an array of TripItems with our trip in reverse order. // // We need to take the trip array as a parameter because we want // the main() function to "own" the array memory. What do you // suppose could happen if we allocated the array in this // function's stack frame (the space allocated for a function's // "local" data) and returned a pointer or slice to it? // // Looks like the hermit forgot something in the return value of // this function. What could that be? fn getTripTo(self: *HermitsNotebook, trip: []?TripItem, dest: *Place) TripError!void { // We start at the destination entry. const destination_entry = self.getEntry(dest); // This function needs to return an error if the requested // destination was never reached. (This can't actually happen // in our map since every Place is reachable by every other // Place.) if (destination_entry == null) { return TripError.Unreachable; } // Variables hold the entry we're currently examining and an // index to keep track of where we're appending trip items. var current_entry = destination_entry.?; var i: u8 = 0; // At the end of each looping, a continue expression increments // our index. Can you see why we need to increment by two? while (true) : (i += 2) { trip[i] = TripItem{ .place = current_entry.place }; // An entry "coming from" nowhere means we've reached the // start, so we're done. if (current_entry.coming_from == null) break; // Otherwise, entries have a path. trip[i + 1] = TripItem{ .path = current_entry.via_path.? }; // Now we follow the entry we're "coming from". If we // aren't able to find the entry we're "coming from" by // Place, something has gone horribly wrong with our // program! (This really shouldn't ever happen. Have you // checked for grues?) // Note: you do not need to fix anything here. const previous_entry = self.getEntry(current_entry.coming_from.?); if (previous_entry == null) return TripError.EatenByAGrue; current_entry = previous_entry.?; } } }; pub fn main() void { // Here's where the hermit decides where he would like to go. Once // you get the program working, try some different Places on the // map! const start = &a; // Archer's Point const destination = &f; // Fox Pond // Store each Path array as a slice in each Place. As mentioned // above, we needed to delay making these references to avoid // creating a dependency loop when the compiler is trying to // figure out how to allocate space for each item. a.paths = a_paths[0..]; b.paths = b_paths[0..]; c.paths = c_paths[0..]; d.paths = d_paths[0..]; e.paths = e_paths[0..]; f.paths = f_paths[0..]; // Now we create an instance of the notebook and add the first // "start" entry. Note the null values. Read the comments for the // checkNote() method above to see how this entry gets added to // the notebook. var notebook = HermitsNotebook{}; var working_note = NotebookEntry{ .place = start, .coming_from = null, .via_path = null, .dist_to_reach = 0, }; notebook.checkNote(working_note); // Get the next entry from the notebook (the first being the // "start" entry we just added) until we run out, at which point // we'll have checked every reachable Place. while (notebook.hasNextEntry()) { var place_entry = notebook.getNextEntry(); // For every Path that leads FROM the current Place, create a // new note (in the form of a NotebookEntry) with the // destination Place and the total distance from the start to // reach that place. Again, read the comments for the // checkNote() method to see how this works. for (place_entry.place.paths) |*path| { working_note = NotebookEntry{ .place = path.to, .coming_from = place_entry.place, .via_path = path, .dist_to_reach = place_entry.dist_to_reach + path.dist, }; notebook.checkNote(working_note); } } // Once the loop above is complete, we've calculated the shortest // path to every reachable Place! What we need to do now is set // aside memory for the trip and have the hermit's notebook fill // in the trip from the destination back to the path. Note that // this is the first time we've actually used the destination! var trip = [_]?TripItem{null} ** (place_count * 2); notebook.getTripTo(trip[0..], destination) catch |err| { print("Oh no! {}\n", .{err}); return; }; // Print the trip with a little helper function below. printTrip(trip[0..]); } // Remember that trips will be a series of alternating TripItems // containing a Place or Path from the destination back to the start. // The remaining space in the trip array will contain null values, so // we need to loop through the items in reverse, skipping nulls, until // we reach the destination at the front of the array. fn printTrip(trip: []?TripItem) void { // We convert the usize length to a u8 with @intCast(), a // builtin function just like @import(). We'll learn about // these properly in a later exercise. var i: u8 = @intCast(u8, trip.len); while (i > 0) { i -= 1; if (trip[i] == null) continue; trip[i].?.printMe(); } print("\n", .{}); } // Going deeper: // // In computer science terms, our map places are "nodes" or "vertices" and // the paths are "edges". Together, they form a "weighted, directed // graph". It is "weighted" because each path has a distance (also // known as a "cost"). It is "directed" because each path goes FROM // one place TO another place (undirected graphs allow you to travel // on an edge in either direction). // // Since we append new notebook entries at the end of the list and // then explore each sequentially from the beginning (like a "todo" // list), we are treating the notebook as a "First In, First Out" // (FIFO) queue. // // Since we examine all closest paths first before trying further ones // (thanks to the "todo" queue), we are performing a "Breadth-First // Search" (BFS). // // By tracking "lowest cost" paths, we can also say that we're // performing a "least-cost search". // // Even more specifically, the Hermit's Notebook most closely // resembles the Shortest Path Faster Algorithm (SPFA), attributed to // Edward F. Moore. By replacing our simple FIFO queue with a // "priority queue", we would basically have Dijkstra's algorithm. A // priority queue retrieves items sorted by "weight" (in our case, it // would keep the paths with the shortest distance at the front of the // queue). Dijkstra's algorithm is more efficient because longer paths // can be eliminated more quickly. (Work it out on paper to see why!)
https://raw.githubusercontent.com/matthewsimo/ziglings/76b194775858a18b011a0f38206ae16024d2a221/exercises/058_quiz7.zig
//! A set of array and slice types that bit-pack integer elements. A normal [12]u3 //! takes up 12 bytes of memory since u3's alignment is 1. PackedArray(u3, 12) only //! takes up 4 bytes of memory. const std = @import("std"); const builtin = @import("builtin"); const debug = std.debug; const testing = std.testing; const native_endian = builtin.target.cpu.arch.endian(); const Endian = std.builtin.Endian; /// Provides a set of functions for reading and writing packed integers from a /// slice of bytes. pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type { // The general technique employed here is to cast bytes in the array to a container // integer (having bits % 8 == 0) large enough to contain the number of bits we want, // then we can retrieve or store the new value with a relative minimum of masking // and shifting. In this worst case, this means that we'll need an integer that's // actually 1 byte larger than the minimum required to store the bits, because it // is possible that the bits start at the end of the first byte, continue through // zero or more, then end in the beginning of the last. But, if we try to access // a value in the very last byte of memory with that integer size, that extra byte // will be out of bounds. Depending on the circumstances of the memory, that might // mean the OS fatally kills the program. Thus, we use a larger container (MaxIo) // most of the time, but a smaller container (MinIo) when touching the last byte // of the memory. const int_bits = @bitSizeOf(Int); // In the best case, this is the number of bytes we need to touch // to read or write a value, as bits. const min_io_bits = ((int_bits + 7) / 8) * 8; // In the worst case, this is the number of bytes we need to touch // to read or write a value, as bits. To calculate for int_bits > 1, // set aside 2 bits to touch the first and last bytes, then divide // by 8 to see how many bytes can be filled up in between. const max_io_bits = switch (int_bits) { 0 => 0, 1 => 8, else => ((int_bits - 2) / 8 + 2) * 8, }; // We bitcast the desired Int type to an unsigned version of itself // to avoid issues with shifting signed ints. const UnInt = std.meta.Int(.unsigned, int_bits); // The maximum container int type const MinIo = std.meta.Int(.unsigned, min_io_bits); // The minimum container int type const MaxIo = std.meta.Int(.unsigned, max_io_bits); return struct { /// Retrieves the integer at `index` from the packed data beginning at `bit_offset` /// within `bytes`. pub fn get(bytes: []const u8, index: usize, bit_offset: u7) Int { if (int_bits == 0) return 0; const bit_index = (index * int_bits) + bit_offset; const max_end_byte = (bit_index + max_io_bits) / 8; //using the larger container size will potentially read out of bounds if (max_end_byte > bytes.len) return getBits(bytes, MinIo, bit_index); return getBits(bytes, MaxIo, bit_index); } fn getBits(bytes: []const u8, comptime Container: type, bit_index: usize) Int { const container_bits = @bitSizeOf(Container); const start_byte = bit_index / 8; const head_keep_bits = bit_index - (start_byte * 8); const tail_keep_bits = container_bits - (int_bits + head_keep_bits); //read bytes as container const value_ptr: *align(1) const Container = @ptrCast(&bytes[start_byte]); var value = value_ptr.*; if (endian != native_endian) value = @byteSwap(value); switch (endian) { .big => { value <<= @intCast(head_keep_bits); value >>= @intCast(head_keep_bits); value >>= @intCast(tail_keep_bits); }, .little => { value <<= @intCast(tail_keep_bits); value >>= @intCast(tail_keep_bits); value >>= @intCast(head_keep_bits); }, } return @bitCast(@as(UnInt, @truncate(value))); } /// Sets the integer at `index` to `val` within the packed data beginning /// at `bit_offset` into `bytes`. pub fn set(bytes: []u8, index: usize, bit_offset: u3, int: Int) void { if (int_bits == 0) return; const bit_index = (index * int_bits) + bit_offset; const max_end_byte = (bit_index + max_io_bits) / 8; //using the larger container size will potentially write out of bounds if (max_end_byte > bytes.len) return setBits(bytes, MinIo, bit_index, int); setBits(bytes, MaxIo, bit_index, int); } fn setBits(bytes: []u8, comptime Container: type, bit_index: usize, int: Int) void { const container_bits = @bitSizeOf(Container); const Shift = std.math.Log2Int(Container); const start_byte = bit_index / 8; const head_keep_bits = bit_index - (start_byte * 8); const tail_keep_bits = container_bits - (int_bits + head_keep_bits); const keep_shift: Shift = switch (endian) { .big => @intCast(tail_keep_bits), .little => @intCast(head_keep_bits), }; //position the bits where they need to be in the container const value = @as(Container, @intCast(@as(UnInt, @bitCast(int)))) << keep_shift; //read existing bytes const target_ptr: *align(1) Container = @ptrCast(&bytes[start_byte]); var target = target_ptr.*; if (endian != native_endian) target = @byteSwap(target); //zero the bits we want to replace in the existing bytes const inv_mask = @as(Container, @intCast(std.math.maxInt(UnInt))) << keep_shift; const mask = ~inv_mask; target &= mask; //merge the new value target |= value; if (endian != native_endian) target = @byteSwap(target); //save it back target_ptr.* = target; } /// Provides a PackedIntSlice of the packed integers in `bytes` (which begins at `bit_offset`) /// from the element specified by `start` to the element specified by `end`. pub fn slice(bytes: []u8, bit_offset: u3, start: usize, end: usize) PackedIntSliceEndian(Int, endian) { debug.assert(end >= start); const length = end - start; const bit_index = (start * int_bits) + bit_offset; const start_byte = bit_index / 8; const end_byte = (bit_index + (length * int_bits) + 7) / 8; const new_bytes = bytes[start_byte..end_byte]; if (length == 0) return PackedIntSliceEndian(Int, endian).init(new_bytes[0..0], 0); var new_slice = PackedIntSliceEndian(Int, endian).init(new_bytes, length); new_slice.bit_offset = @intCast((bit_index - (start_byte * 8))); return new_slice; } /// Recasts a packed slice to a version with elements of type `NewInt` and endianness `new_endian`. /// Slice will begin at `bit_offset` within `bytes` and the new length will be automatically /// calculated from `old_len` using the sizes of the current integer type and `NewInt`. pub fn sliceCast(bytes: []u8, comptime NewInt: type, comptime new_endian: Endian, bit_offset: u3, old_len: usize) PackedIntSliceEndian(NewInt, new_endian) { const new_int_bits = @bitSizeOf(NewInt); const New = PackedIntSliceEndian(NewInt, new_endian); const total_bits = (old_len * int_bits); const new_int_count = total_bits / new_int_bits; debug.assert(total_bits == new_int_count * new_int_bits); var new = New.init(bytes, new_int_count); new.bit_offset = bit_offset; return new; } }; } /// Creates a bit-packed array of `Int`. Non-byte-multiple integers /// will take up less memory in PackedIntArray than in a normal array. /// Elements are packed using native endianness and without storing any /// meta data. PackedArray(i3, 8) will occupy exactly 3 bytes /// of memory. pub fn PackedIntArray(comptime Int: type, comptime int_count: usize) type { return PackedIntArrayEndian(Int, native_endian, int_count); } /// Creates a bit-packed array of `Int` with bit order specified by `endian`. /// Non-byte-multiple integers will take up less memory in PackedIntArrayEndian /// than in a normal array. Elements are packed without storing any meta data. /// PackedIntArrayEndian(i3, 8) will occupy exactly 3 bytes of memory. pub fn PackedIntArrayEndian(comptime Int: type, comptime endian: Endian, comptime int_count: usize) type { const int_bits = @bitSizeOf(Int); const total_bits = int_bits * int_count; const total_bytes = (total_bits + 7) / 8; const Io = PackedIntIo(Int, endian); return struct { const Self = @This(); /// The byte buffer containing the packed data. bytes: [total_bytes]u8, /// The number of elements in the packed array. comptime len: usize = int_count, /// The integer type of the packed array. pub const Child = Int; /// Initialize a packed array using an unpacked array /// or, more likely, an array literal. pub fn init(ints: [int_count]Int) Self { var self: Self = undefined; for (ints, 0..) |int, i| self.set(i, int); return self; } /// Initialize all entries of a packed array to the same value. pub fn initAllTo(int: Int) Self { var self: Self = undefined; self.setAll(int); return self; } /// Return the integer stored at `index`. pub fn get(self: Self, index: usize) Int { debug.assert(index < int_count); return Io.get(&self.bytes, index, 0); } ///Copy the value of `int` into the array at `index`. pub fn set(self: *Self, index: usize, int: Int) void { debug.assert(index < int_count); return Io.set(&self.bytes, index, 0, int); } /// Set all entries of a packed array to the value of `int`. pub fn setAll(self: *Self, int: Int) void { var i: usize = 0; while (i < int_count) : (i += 1) { self.set(i, int); } } /// Create a PackedIntSlice of the array from `start` to `end`. pub fn slice(self: *Self, start: usize, end: usize) PackedIntSliceEndian(Int, endian) { debug.assert(start < int_count); debug.assert(end <= int_count); return Io.slice(&self.bytes, 0, start, end); } /// Create a PackedIntSlice of the array using `NewInt` as the integer type. /// `NewInt`'s bit width must fit evenly within the array's `Int`'s total bits. pub fn sliceCast(self: *Self, comptime NewInt: type) PackedIntSlice(NewInt) { return self.sliceCastEndian(NewInt, endian); } /// Create a PackedIntSliceEndian of the array using `NewInt` as the integer type /// and `new_endian` as the new endianness. `NewInt`'s bit width must fit evenly /// within the array's `Int`'s total bits. pub fn sliceCastEndian(self: *Self, comptime NewInt: type, comptime new_endian: Endian) PackedIntSliceEndian(NewInt, new_endian) { return Io.sliceCast(&self.bytes, NewInt, new_endian, 0, int_count); } }; } /// A type representing a sub range of a PackedIntArray. pub fn PackedIntSlice(comptime Int: type) type { return PackedIntSliceEndian(Int, native_endian); } /// A type representing a sub range of a PackedIntArrayEndian. pub fn PackedIntSliceEndian(comptime Int: type, comptime endian: Endian) type { const int_bits = @bitSizeOf(Int); const Io = PackedIntIo(Int, endian); return struct { const Self = @This(); bytes: []u8, bit_offset: u3, len: usize, /// The integer type of the packed slice. pub const Child = Int; /// Calculates the number of bytes required to store a desired count /// of `Int`s. pub fn bytesRequired(int_count: usize) usize { const total_bits = int_bits * int_count; const total_bytes = (total_bits + 7) / 8; return total_bytes; } /// Initialize a packed slice using the memory at `bytes`, with `int_count` /// elements. `bytes` must be large enough to accommodate the requested /// count. pub fn init(bytes: []u8, int_count: usize) Self { debug.assert(bytes.len >= bytesRequired(int_count)); return Self{ .bytes = bytes, .len = int_count, .bit_offset = 0, }; } /// Return the integer stored at `index`. pub fn get(self: Self, index: usize) Int { debug.assert(index < self.len); return Io.get(self.bytes, index, self.bit_offset); } /// Copy `int` into the slice at `index`. pub fn set(self: *Self, index: usize, int: Int) void { debug.assert(index < self.len); return Io.set(self.bytes, index, self.bit_offset, int); } /// Create a PackedIntSlice of this slice from `start` to `end`. pub fn slice(self: Self, start: usize, end: usize) PackedIntSliceEndian(Int, endian) { debug.assert(start < self.len); debug.assert(end <= self.len); return Io.slice(self.bytes, self.bit_offset, start, end); } /// Create a PackedIntSlice of the sclice using `NewInt` as the integer type. /// `NewInt`'s bit width must fit evenly within the slice's `Int`'s total bits. pub fn sliceCast(self: Self, comptime NewInt: type) PackedIntSliceEndian(NewInt, endian) { return self.sliceCastEndian(NewInt, endian); } /// Create a PackedIntSliceEndian of the slice using `NewInt` as the integer type /// and `new_endian` as the new endianness. `NewInt`'s bit width must fit evenly /// within the slice's `Int`'s total bits. pub fn sliceCastEndian(self: Self, comptime NewInt: type, comptime new_endian: Endian) PackedIntSliceEndian(NewInt, new_endian) { return Io.sliceCast(self.bytes, NewInt, new_endian, self.bit_offset, self.len); } }; } test "PackedIntArray" { // TODO @setEvalBranchQuota generates panics in wasm32. Investigate. if (builtin.target.cpu.arch == .wasm32) return error.SkipZigTest; // TODO: enable this test if (true) return error.SkipZigTest; @setEvalBranchQuota(10000); const max_bits = 256; const int_count = 19; comptime var bits = 0; inline while (bits <= max_bits) : (bits += 1) { //alternate unsigned and signed const sign: std.builtin.Signedness = if (bits % 2 == 0) .signed else .unsigned; const I = std.meta.Int(sign, bits); const PackedArray = PackedIntArray(I, int_count); const expected_bytes = ((bits * int_count) + 7) / 8; try testing.expect(@sizeOf(PackedArray) == expected_bytes); var data: PackedArray = undefined; //write values, counting up var i: usize = 0; var count: I = 0; while (i < data.len) : (i += 1) { data.set(i, count); if (bits > 0) count +%= 1; } //read and verify values i = 0; count = 0; while (i < data.len) : (i += 1) { const val = data.get(i); try testing.expect(val == count); if (bits > 0) count +%= 1; } } } test "PackedIntIo" { const bytes = [_]u8{ 0b01101_000, 0b01011_110, 0b00011_101 }; try testing.expectEqual(@as(u15, 0x2bcd), PackedIntIo(u15, .little).get(&bytes, 0, 3)); try testing.expectEqual(@as(u16, 0xabcd), PackedIntIo(u16, .little).get(&bytes, 0, 3)); try testing.expectEqual(@as(u17, 0x1abcd), PackedIntIo(u17, .little).get(&bytes, 0, 3)); try testing.expectEqual(@as(u18, 0x3abcd), PackedIntIo(u18, .little).get(&bytes, 0, 3)); } test "PackedIntArray init" { const S = struct { fn doTheTest() !void { const PackedArray = PackedIntArray(u3, 8); var packed_array = PackedArray.init([_]u3{ 0, 1, 2, 3, 4, 5, 6, 7 }); var i: usize = 0; while (i < packed_array.len) : (i += 1) try testing.expectEqual(@as(u3, @intCast(i)), packed_array.get(i)); } }; try S.doTheTest(); try comptime S.doTheTest(); } test "PackedIntArray initAllTo" { const S = struct { fn doTheTest() !void { const PackedArray = PackedIntArray(u3, 8); var packed_array = PackedArray.initAllTo(5); var i: usize = 0; while (i < packed_array.len) : (i += 1) try testing.expectEqual(@as(u3, 5), packed_array.get(i)); } }; try S.doTheTest(); try comptime S.doTheTest(); } test "PackedIntSlice" { // TODO @setEvalBranchQuota generates panics in wasm32. Investigate. if (builtin.target.cpu.arch == .wasm32) return error.SkipZigTest; // TODO enable this test if (true) return error.SkipZigTest; @setEvalBranchQuota(10000); const max_bits = 256; const int_count = 19; const total_bits = max_bits * int_count; const total_bytes = (total_bits + 7) / 8; var buffer: [total_bytes]u8 = undefined; comptime var bits = 0; inline while (bits <= max_bits) : (bits += 1) { //alternate unsigned and signed const sign: std.builtin.Signedness = if (bits % 2 == 0) .signed else .unsigned; const I = std.meta.Int(sign, bits); const P = PackedIntSlice(I); var data = P.init(&buffer, int_count); //write values, counting up var i: usize = 0; var count: I = 0; while (i < data.len) : (i += 1) { data.set(i, count); if (bits > 0) count +%= 1; } //read and verify values i = 0; count = 0; while (i < data.len) : (i += 1) { const val = data.get(i); try testing.expect(val == count); if (bits > 0) count +%= 1; } } } test "PackedIntSlice of PackedInt(Array/Slice)" { // TODO enable this test if (true) return error.SkipZigTest; const max_bits = 16; const int_count = 19; comptime var bits = 0; inline while (bits <= max_bits) : (bits += 1) { const Int = std.meta.Int(.unsigned, bits); const PackedArray = PackedIntArray(Int, int_count); var packed_array: PackedArray = undefined; const limit = (1 << bits); var i: usize = 0; while (i < packed_array.len) : (i += 1) { packed_array.set(i, @intCast(i % limit)); } //slice of array var packed_slice = packed_array.slice(2, 5); try testing.expect(packed_slice.len == 3); const ps_bit_count = (bits * packed_slice.len) + packed_slice.bit_offset; const ps_expected_bytes = (ps_bit_count + 7) / 8; try testing.expect(packed_slice.bytes.len == ps_expected_bytes); try testing.expect(packed_slice.get(0) == 2 % limit); try testing.expect(packed_slice.get(1) == 3 % limit); try testing.expect(packed_slice.get(2) == 4 % limit); packed_slice.set(1, 7 % limit); try testing.expect(packed_slice.get(1) == 7 % limit); //write through slice try testing.expect(packed_array.get(3) == 7 % limit); //slice of a slice const packed_slice_two = packed_slice.slice(0, 3); try testing.expect(packed_slice_two.len == 3); const ps2_bit_count = (bits * packed_slice_two.len) + packed_slice_two.bit_offset; const ps2_expected_bytes = (ps2_bit_count + 7) / 8; try testing.expect(packed_slice_two.bytes.len == ps2_expected_bytes); try testing.expect(packed_slice_two.get(1) == 7 % limit); try testing.expect(packed_slice_two.get(2) == 4 % limit); //size one case const packed_slice_three = packed_slice_two.slice(1, 2); try testing.expect(packed_slice_three.len == 1); const ps3_bit_count = (bits * packed_slice_three.len) + packed_slice_three.bit_offset; const ps3_expected_bytes = (ps3_bit_count + 7) / 8; try testing.expect(packed_slice_three.bytes.len == ps3_expected_bytes); try testing.expect(packed_slice_three.get(0) == 7 % limit); //empty slice case const packed_slice_empty = packed_slice.slice(0, 0); try testing.expect(packed_slice_empty.len == 0); try testing.expect(packed_slice_empty.bytes.len == 0); //slicing at byte boundaries const packed_slice_edge = packed_array.slice(8, 16); try testing.expect(packed_slice_edge.len == 8); const pse_bit_count = (bits * packed_slice_edge.len) + packed_slice_edge.bit_offset; const pse_expected_bytes = (pse_bit_count + 7) / 8; try testing.expect(packed_slice_edge.bytes.len == pse_expected_bytes); try testing.expect(packed_slice_edge.bit_offset == 0); } } test "PackedIntSlice accumulating bit offsets" { //bit_offset is u3, so standard debugging asserts should catch // anything { const PackedArray = PackedIntArray(u3, 16); var packed_array: PackedArray = undefined; var packed_slice = packed_array.slice(0, packed_array.len); var i: usize = 0; while (i < packed_array.len - 1) : (i += 1) { packed_slice = packed_slice.slice(1, packed_slice.len); } } { const PackedArray = PackedIntArray(u11, 88); var packed_array: PackedArray = undefined; var packed_slice = packed_array.slice(0, packed_array.len); var i: usize = 0; while (i < packed_array.len - 1) : (i += 1) { packed_slice = packed_slice.slice(1, packed_slice.len); } } } test "PackedInt(Array/Slice) sliceCast" { const PackedArray = PackedIntArray(u1, 16); var packed_array = PackedArray.init([_]u1{ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 }); const packed_slice_cast_2 = packed_array.sliceCast(u2); const packed_slice_cast_4 = packed_slice_cast_2.sliceCast(u4); var packed_slice_cast_9 = packed_array.slice(0, (packed_array.len / 9) * 9).sliceCast(u9); const packed_slice_cast_3 = packed_slice_cast_9.sliceCast(u3); var i: usize = 0; while (i < packed_slice_cast_2.len) : (i += 1) { const val = switch (native_endian) { .big => 0b01, .little => 0b10, }; try testing.expect(packed_slice_cast_2.get(i) == val); } i = 0; while (i < packed_slice_cast_4.len) : (i += 1) { const val = switch (native_endian) { .big => 0b0101, .little => 0b1010, }; try testing.expect(packed_slice_cast_4.get(i) == val); } i = 0; while (i < packed_slice_cast_9.len) : (i += 1) { const val = 0b010101010; try testing.expect(packed_slice_cast_9.get(i) == val); packed_slice_cast_9.set(i, 0b111000111); } i = 0; while (i < packed_slice_cast_3.len) : (i += 1) { const val: u3 = switch (native_endian) { .big => if (i % 2 == 0) 0b111 else 0b000, .little => if (i % 2 == 0) 0b111 else 0b000, }; try testing.expect(packed_slice_cast_3.get(i) == val); } } test "PackedInt(Array/Slice)Endian" { { const PackedArrayBe = PackedIntArrayEndian(u4, .big, 8); var packed_array_be = PackedArrayBe.init([_]u4{ 0, 1, 2, 3, 4, 5, 6, 7 }); try testing.expect(packed_array_be.bytes[0] == 0b00000001); try testing.expect(packed_array_be.bytes[1] == 0b00100011); var i: usize = 0; while (i < packed_array_be.len) : (i += 1) { try testing.expect(packed_array_be.get(i) == i); } var packed_slice_le = packed_array_be.sliceCastEndian(u4, .little); i = 0; while (i < packed_slice_le.len) : (i += 1) { const val = if (i % 2 == 0) i + 1 else i - 1; try testing.expect(packed_slice_le.get(i) == val); } var packed_slice_le_shift = packed_array_be.slice(1, 5).sliceCastEndian(u4, .little); i = 0; while (i < packed_slice_le_shift.len) : (i += 1) { const val = if (i % 2 == 0) i else i + 2; try testing.expect(packed_slice_le_shift.get(i) == val); } } { const PackedArrayBe = PackedIntArrayEndian(u11, .big, 8); var packed_array_be = PackedArrayBe.init([_]u11{ 0, 1, 2, 3, 4, 5, 6, 7 }); try testing.expect(packed_array_be.bytes[0] == 0b00000000); try testing.expect(packed_array_be.bytes[1] == 0b00000000); try testing.expect(packed_array_be.bytes[2] == 0b00000100); try testing.expect(packed_array_be.bytes[3] == 0b00000001); try testing.expect(packed_array_be.bytes[4] == 0b00000000); var i: usize = 0; while (i < packed_array_be.len) : (i += 1) { try testing.expect(packed_array_be.get(i) == i); } var packed_slice_le = packed_array_be.sliceCastEndian(u11, .little); try testing.expect(packed_slice_le.get(0) == 0b00000000000); try testing.expect(packed_slice_le.get(1) == 0b00010000000); try testing.expect(packed_slice_le.get(2) == 0b00000000100); try testing.expect(packed_slice_le.get(3) == 0b00000000000); try testing.expect(packed_slice_le.get(4) == 0b00010000011); try testing.expect(packed_slice_le.get(5) == 0b00000000010); try testing.expect(packed_slice_le.get(6) == 0b10000010000); try testing.expect(packed_slice_le.get(7) == 0b00000111001); var packed_slice_le_shift = packed_array_be.slice(1, 5).sliceCastEndian(u11, .little); try testing.expect(packed_slice_le_shift.get(0) == 0b00010000000); try testing.expect(packed_slice_le_shift.get(1) == 0b00000000100); try testing.expect(packed_slice_le_shift.get(2) == 0b00000000000); try testing.expect(packed_slice_le_shift.get(3) == 0b00010000011); } } //@NOTE: Need to manually update this list as more posix os's get // added to DirectAllocator. // These tests prove we aren't accidentally accessing memory past // the end of the array/slice by placing it at the end of a page // and reading the last element. The assumption is that the page // after this one is not mapped and will cause a segfault if we // don't account for the bounds. test "PackedIntArray at end of available memory" { switch (builtin.target.os.tag) { .linux, .macos, .ios, .freebsd, .netbsd, .openbsd, .windows => {}, else => return, } const PackedArray = PackedIntArray(u3, 8); const Padded = struct { _: [std.mem.page_size - @sizeOf(PackedArray)]u8, p: PackedArray, }; const allocator = std.testing.allocator; var pad = try allocator.create(Padded); defer allocator.destroy(pad); pad.p.set(7, std.math.maxInt(u3)); } test "PackedIntSlice at end of available memory" { switch (builtin.target.os.tag) { .linux, .macos, .ios, .freebsd, .netbsd, .openbsd, .windows => {}, else => return, } const PackedSlice = PackedIntSlice(u11); const allocator = std.testing.allocator; var page = try allocator.alloc(u8, std.mem.page_size); defer allocator.free(page); var p = PackedSlice.init(page[std.mem.page_size - 2 ..], 1); p.set(0, std.math.maxInt(u11)); }
https://raw.githubusercontent.com/ziglang/zig/d9bd34fd0533295044ffb4160da41f7873aff905/lib/std/packed_int_array.zig
const ecs = @import("../mod.zig"); const rl = @import("raylib"); const Components = @import("../components/mod.zig"); const main = @import("../../main.zig"); const tex = @import("../../tex.zig"); const std = @import("std"); pub const Systems = extern struct { // this ordering matters mouse: TankMouseSystem, keyb: KeyboardMovementSystem, mvmt: MovementSystem, // render goes last der_trans: DerivedTransformSystem, spawner: SpawnerSystem, rndr: RenderSystem, timed_destr: TimedDestructionSystem, pub fn init(comp: *Components) !Systems { _ = comp; var systems: Systems = undefined; // for each system field in Systems inline for (std.meta.fields(Systems)) |field| { @field(systems, field.name) = (field.type){}; // // for each component field in a system // inline for (std.meta.fields(field.type)) |comp_field| { // // set the value of that field to be a reference to the same field name in comp // @field(system, comp_field.name) = &@field(comp, comp_field.name); // } } // return Systems{ .mvmt = MovementSystem{ .pos = &comp.pos, .vel = &comp.vel }, .rndr = RenderSystem{.pos} }; return systems; } pub fn deinit(self: *Systems) void { _ = self; } }; pub const TimedDestructionSystem = struct { const Self = @This(); pub fn update(self: *const Self, game: *main.Game) !void { _ = self; const comps = &game.components; const ents = comps.time_destr.data.keys(); for (ents) |ent| { const timed_destr: Components.TimeDestruct = comps.time_destr.get(ent) orelse continue; if (timed_destr.lifetime + timed_destr.birth_time < rl.getTime()) { try timed_destr.destroy_fn(game, ent); } } } }; pub const TankMouseSystem = struct { const Self = @This(); pub fn update(self: *const Self, game: *main.Game) !void { _ = self; const comps = &game.components; const ents = comps.mouse.data.keys(); const mouse_pos = rl.getMousePosition(); for (ents) |ent| { const trans: *Components.TransformObj = comps.trans.getPtr(ent) orelse continue; if (trans.dirty) continue; // ignore if dirty const wt = trans.getWorld(); trans.setRot(std.math.atan2(f32, wt.pos.y - mouse_pos.y, wt.pos.x - mouse_pos.x)); } } }; pub const SpawnerSystem = struct { const Self = @This(); pub fn update(self: *const Self, game: *main.Game) !void { _ = self; const comps = &game.components; const ents = comps.spawner.data.keys(); const time = rl.getTime(); for (ents) |ent| { const spawner: *Components.Spawner = comps.spawner.getPtr(ent).?; if (!spawner.is_active) continue; if (time - spawner.last_spawn < spawner.spawn_rate) continue; spawner.last_spawn = time; const spawn = try spawner.factory_fn(game, ent); try comps.spawn_src.add(spawn, ent); if (spawner.set_inactive_on_spawn) spawner.is_active = false; } } }; pub const MovementSystem = struct { const Self = @This(); pub fn update(self: *const Self, game: *main.Game) !void { _ = self; const comps = &game.components; const ents = comps.vel.data.keys(); for (ents) |ent| { const vel = comps.vel.get(ent).?; const trans = comps.trans.getPtr(ent).?; // anything with vel should have a trans comp trans.moveBy(vel); } } }; pub const DerivedTransformSystem = struct { const Self = @This(); pub fn update(self: *const Self, game: *main.Game) !void { _ = self; const comps = &game.components; const ents = comps.trans.data.keys(); for (ents) |ent| { var trans: *Components.TransformObj = comps.trans.getPtr(ent).?; trans.syncWorld(ent, game); } } }; pub const RenderSystem = struct { const Self = @This(); pub fn update(self: *const Self, game: *main.Game) !void { _ = self; const comps = &game.components; const ents = comps.tex.data.keys(); for (ents) |ent| { const tex_info: *const tex.Info = comps.tex.get(ent).?; const trans = comps.trans.get(ent) orelse continue; const tfm: Components.Transform2D = trans.getWorld(); const s_w: f32 = tex_info.width * tfm.scale.x; const s_h: f32 = tex_info.height * tfm.scale.y; var origin = tex_info.origin; origin.x *= tfm.scale.x; origin.y *= tfm.scale.y; // std.log.info("origin: {d} {d}", .{ origin.x, origin.y }); // rl.drawRectanglePro( // .{ .x = tfm.pos.x, .y = tfm.pos.y, .width = s_w, .height = s_h }, // origin, // std.math.radiansToDegrees(f32, tfm.rot - tex_info.rot_offset), // rl.Color.sky_blue, // ); game.atlas.tex.drawPro( tex_info.getSourceRect(), .{ .x = tfm.pos.x, .y = tfm.pos.y, .width = s_w, .height = s_h }, origin, std.math.radiansToDegrees(f32, tfm.rot - tex_info.rot_offset), rl.Color.white, ); rl.drawCircle( @intFromFloat(tfm.pos.x), @intFromFloat(tfm.pos.y), 2, rl.Color.sky_blue, ); } } }; pub const KeyboardMovementSystem = struct { const Self = @This(); pub fn update(self: *const Self, game: *main.Game) !void { _ = self; const comps = &game.components; const ents = comps.keyb.data.keys(); for (ents) |ent| { const trans = comps.trans.getPtr(ent) orelse continue; const rot_speed = comps.rot_speed.get(ent) orelse 0; const speed = comps.speed.get(ent) orelse 0; if (rl.isKeyDown(.key_right) or rl.isKeyDown(.key_d)) trans.rotBy(rot_speed); if (rl.isKeyDown(.key_left) or rl.isKeyDown(.key_a)) trans.rotBy(-rot_speed); if (rl.isKeyDown(.key_up) or rl.isKeyDown(.key_w)) { const rot = trans.getLocal().rot; trans.moveBy(.{ .x = @cos(rot) * speed, .y = @sin(rot) * speed }); } if (rl.isKeyDown(.key_down) or rl.isKeyDown(.key_s)) { const rot = trans.getLocal().rot; trans.moveBy(.{ .x = @cos(rot) * -speed, .y = @sin(rot) * -speed }); } } } };
https://raw.githubusercontent.com/candrewlee14/tank-game/ec4cc80681d4ee9a4985355a1075cbaaca8325fb/src/ecs/systems/mod.zig
const std = @import("std"); const wren = @import("wren"); pub var alloc = std.testing.allocator; pub fn main() anyerror!void { // Initialize the data structures for the wrapper wren.init(alloc); defer wren.deinit(); // Set up a VM configuration using the supplied default bindings // You can override the bindings after calling this to change them var config = wren.util.defaultConfig(); // Create a new VM from our config we generated previously const vm = wren.newVM(&config); defer wren.freeVM(vm); // This code has some issues, and Wren knows it! // The default runner will output a stack trace to stdout, and // the returned error will be of type wren.ErrType std.debug.print("\n=== Have an Error ===\n",.{}); wren.util.run(vm,"main", \\ System.print("Hello from error!) \\ System.prit("Ohno!") ) catch |err| { std.debug.print("THIS IS FINE - {s}\n",.{err}); }; }
https://raw.githubusercontent.com/NewbLuck/zig-wren/cdb4ee342c00ce17518720333a8d07203d78bdd2/example/syntax_error.zig
const std = @import("std"); const builtin = @import("builtin"); const cetech1 = @import("cetech1"); const static_modules = @import("static_modules"); const kernel = @import("kernel.zig"); pub const std_options = .{ .logFn = @import("log.zig").zigLogFn, }; const cetech1_options = @import("cetech1_options"); pub fn main() anyerror!u8 { const descs = if (!cetech1_options.static_modules) .{} else static_modules.descs; try kernel.boot(&descs, .{}); return 0; }
https://raw.githubusercontent.com/cyberegoorg/cetech1/6504ecf36b11e394f92af7a88a5ee922f7afac01/src/main.zig
pub const std = @import("std"); pub const Allocator = std.mem.Allocator; pub const testing = std.testing; pub const Array = std.ArrayList; pub const atoi = stringDigitsToNumber; pub fn stringDigitsToNumber(input: []const u8) !u32 { var ret: u32 = 0; for (input) |c| { const d: u8 = std.fmt.charToDigit(c, 10) catch |err| { std.debug.print("atoi Invalid string passed: \"{s}\"\n", .{input}); return err; }; ret = ret * 10 + d; // std.debug.print("c: {c} d: {} ret: {}\n", .{ c, d, ret }); } return ret; } pub fn atoi2(comptime T: type, input: []const u8) !T { var ret: u64 = 0; for (input) |c| { const d: u8 = std.fmt.charToDigit(c, 10) catch |err| { std.debug.print("atoi Invalid string passed: \"{s}\"\n", .{input}); return err; }; ret = ret * 10 + d; // std.debug.print("c: {c} d: {} ret: {}\n", .{ c, d, ret }); } return @as(T, @intCast(ret)); } test { testing.refAllDecls(@This()); }
https://raw.githubusercontent.com/tsunaminoai/aoc/a94bc63fec2a300e248b6052844342468976b498/src/root.zig
const std = @import("std"); const string = []const u8; const input = @embedFile("../input/day07.txt"); pub fn main() !void { // var iter = std.mem.split(u8, std.mem.trim(u8, input, "\n"), ","); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const alloc = &arena.allocator; var list = std.ArrayList(u32).init(alloc); defer list.deinit(); while (iter.next()) |num_s| { const num = std.fmt.parseUnsigned(u32, num_s, 10) catch @panic(""); try list.append(num); } const items = list.toOwnedSlice(); const min = minL(items); const max = maxL(items); { var cheapest: u32 = std.math.maxInt(u32); var i = min; while (i <= max) : (i += 1) { cheapest = std.math.min(cheapest, fuelCost1(items, i)); } std.debug.print("{d}\n", .{cheapest}); } { var cheapest: u32 = std.math.maxInt(u32); var i = min; while (i <= max) : (i += 1) { cheapest = std.math.min(cheapest, fuelCost2(items, i)); } std.debug.print("{d}\n", .{cheapest}); } } fn minL(slice: []const u32) u32 { return minI(slice[0], slice[1..]); } fn minI(runner: u32, slice: []const u32) u32 { if (slice.len == 0) return runner; return minI(std.math.min(runner, slice[0]), slice[1..]); } fn maxL(slice: []const u32) u32 { return maxI(slice[0], slice[1..]); } fn maxI(runner: u32, slice: []const u32) u32 { if (slice.len == 0) return runner; return maxI(std.math.max(runner, slice[0]), slice[1..]); } fn fuelCost1(slice: []const u32, to: u32) u32 { var ret: u32 = 0; for (slice) |item| { ret += @intCast(u32, std.math.absInt(@intCast(i32, item) - @intCast(i32, to)) catch @panic("")); } return ret; } fn fuelCost2(slice: []const u32, to: u32) u32 { var ret: u32 = 0; for (slice) |item| { const dist = @intCast(u32, std.math.absInt(@intCast(i32, item) - @intCast(i32, to)) catch @panic("")); ret += (dist * (dist + 1)) / 2; } return ret; }
https://raw.githubusercontent.com/nektro/aoc2021/2d65b46d4aaf485b61fd854da8215a0d5b42f47c/src/day07.zig
const std = @import("std"); pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t { const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits); const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits); const srcSigBits = std.math.floatMantissaBits(src_t); const dstSigBits = std.math.floatMantissaBits(dst_t); // Various constants whose values follow from the type parameters. // Any reasonable optimizer will fold and propagate all of these. const srcBits = @typeInfo(src_t).Float.bits; const srcExpBits = srcBits - srcSigBits - 1; const srcInfExp = (1 << srcExpBits) - 1; const srcExpBias = srcInfExp >> 1; const srcMinNormal = 1 << srcSigBits; const srcSignificandMask = srcMinNormal - 1; const srcInfinity = srcInfExp << srcSigBits; const srcSignMask = 1 << (srcSigBits + srcExpBits); const srcAbsMask = srcSignMask - 1; const roundMask = (1 << (srcSigBits - dstSigBits)) - 1; const halfway = 1 << (srcSigBits - dstSigBits - 1); const srcQNaN = 1 << (srcSigBits - 1); const srcNaNCode = srcQNaN - 1; const dstBits = @typeInfo(dst_t).Float.bits; const dstExpBits = dstBits - dstSigBits - 1; const dstInfExp = (1 << dstExpBits) - 1; const dstExpBias = dstInfExp >> 1; const underflowExponent = srcExpBias + 1 - dstExpBias; const overflowExponent = srcExpBias + dstInfExp - dstExpBias; const underflow = underflowExponent << srcSigBits; const overflow = overflowExponent << srcSigBits; const dstQNaN = 1 << (dstSigBits - 1); const dstNaNCode = dstQNaN - 1; // Break a into a sign and representation of the absolute value const aRep: src_rep_t = @bitCast(a); const aAbs: src_rep_t = aRep & srcAbsMask; const sign: src_rep_t = aRep & srcSignMask; var absResult: dst_rep_t = undefined; if (aAbs -% underflow < aAbs -% overflow) { // The exponent of a is within the range of normal numbers in the // destination format. We can convert by simply right-shifting with // rounding and adjusting the exponent. absResult = @truncate(aAbs >> (srcSigBits - dstSigBits)); absResult -%= @as(dst_rep_t, srcExpBias - dstExpBias) << dstSigBits; const roundBits: src_rep_t = aAbs & roundMask; if (roundBits > halfway) { // Round to nearest absResult += 1; } else if (roundBits == halfway) { // Ties to even absResult += absResult & 1; } } else if (aAbs > srcInfinity) { // a is NaN. // Conjure the result by beginning with infinity, setting the qNaN // bit and inserting the (truncated) trailing NaN field. absResult = @as(dst_rep_t, @intCast(dstInfExp)) << dstSigBits; absResult |= dstQNaN; absResult |= @intCast(((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode); } else if (aAbs >= overflow) { // a overflows to infinity. absResult = @as(dst_rep_t, @intCast(dstInfExp)) << dstSigBits; } else { // a underflows on conversion to the destination type or is an exact // zero. The result may be a denormal or zero. Extract the exponent // to get the shift amount for the denormalization. const aExp: u32 = @intCast(aAbs >> srcSigBits); const shift: u32 = @intCast(srcExpBias - dstExpBias - aExp + 1); const significand: src_rep_t = (aRep & srcSignificandMask) | srcMinNormal; // Right shift by the denormalization amount with sticky. if (shift > srcSigBits) { absResult = 0; } else { const sticky: src_rep_t = @intFromBool(significand << @intCast(srcBits - shift) != 0); const denormalizedSignificand: src_rep_t = significand >> @intCast(shift) | sticky; absResult = @intCast(denormalizedSignificand >> (srcSigBits - dstSigBits)); const roundBits: src_rep_t = denormalizedSignificand & roundMask; if (roundBits > halfway) { // Round to nearest absResult += 1; } else if (roundBits == halfway) { // Ties to even absResult += absResult & 1; } } } const result: dst_rep_t align(@alignOf(dst_t)) = absResult | @as(dst_rep_t, @truncate(sign >> @intCast(srcBits - dstBits))); return @bitCast(result); } pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t { const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits); const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit const dst_sig_bits = std.math.floatMantissaBits(dst_t); const src_exp_bias = 16383; const round_mask = (1 << (src_sig_bits - dst_sig_bits)) - 1; const halfway = 1 << (src_sig_bits - dst_sig_bits - 1); const dst_bits = @typeInfo(dst_t).Float.bits; const dst_exp_bits = dst_bits - dst_sig_bits - 1; const dst_inf_exp = (1 << dst_exp_bits) - 1; const dst_exp_bias = dst_inf_exp >> 1; const underflow = src_exp_bias + 1 - dst_exp_bias; const overflow = src_exp_bias + dst_inf_exp - dst_exp_bias; const dst_qnan = 1 << (dst_sig_bits - 1); const dst_nan_mask = dst_qnan - 1; // Break a into a sign and representation of the absolute value var a_rep = std.math.break_f80(a); const sign = a_rep.exp & 0x8000; a_rep.exp &= 0x7FFF; a_rep.fraction &= 0x7FFFFFFFFFFFFFFF; var abs_result: dst_rep_t = undefined; if (a_rep.exp -% underflow < a_rep.exp -% overflow) { // The exponent of a is within the range of normal numbers in the // destination format. We can convert by simply right-shifting with // rounding and adjusting the exponent. abs_result = @as(dst_rep_t, a_rep.exp) << dst_sig_bits; abs_result |= @truncate(a_rep.fraction >> (src_sig_bits - dst_sig_bits)); abs_result -%= @as(dst_rep_t, src_exp_bias - dst_exp_bias) << dst_sig_bits; const round_bits = a_rep.fraction & round_mask; if (round_bits > halfway) { // Round to nearest abs_result += 1; } else if (round_bits == halfway) { // Ties to even abs_result += abs_result & 1; } } else if (a_rep.exp == 0x7FFF and a_rep.fraction != 0) { // a is NaN. // Conjure the result by beginning with infinity, setting the qNaN // bit and inserting the (truncated) trailing NaN field. abs_result = @as(dst_rep_t, @intCast(dst_inf_exp)) << dst_sig_bits; abs_result |= dst_qnan; abs_result |= @intCast((a_rep.fraction >> (src_sig_bits - dst_sig_bits)) & dst_nan_mask); } else if (a_rep.exp >= overflow) { // a overflows to infinity. abs_result = @as(dst_rep_t, @intCast(dst_inf_exp)) << dst_sig_bits; } else { // a underflows on conversion to the destination type or is an exact // zero. The result may be a denormal or zero. Extract the exponent // to get the shift amount for the denormalization. const shift = src_exp_bias - dst_exp_bias - a_rep.exp; // Right shift by the denormalization amount with sticky. if (shift > src_sig_bits) { abs_result = 0; } else { const sticky = @intFromBool(a_rep.fraction << @intCast(shift) != 0); const denormalized_significand = a_rep.fraction >> @intCast(shift) | sticky; abs_result = @intCast(denormalized_significand >> (src_sig_bits - dst_sig_bits)); const round_bits = denormalized_significand & round_mask; if (round_bits > halfway) { // Round to nearest abs_result += 1; } else if (round_bits == halfway) { // Ties to even abs_result += abs_result & 1; } } } const result align(@alignOf(dst_t)) = abs_result | @as(dst_rep_t, sign) << dst_bits - 16; return @bitCast(result); } test { _ = @import("truncf_test.zig"); }
https://raw.githubusercontent.com/ziglang/zig/d9bd34fd0533295044ffb4160da41f7873aff905/lib/compiler_rt/truncf.zig
// (Almost-)Concurrent String Hash Count // // `key` là chuỗi ngắn độ dài trung bình 15-bytes, được lưu riêng trong mảng keys_bytes // Mỗi hashtable entry gồm: // * `hash` u64 // * `count` là u32 // * `offset` u24, trỏ tới vị trí đầu của key trong keys_bytes nếu key là string // => Total 15-bytes (23% cache-line) // // HashCount chỉ cần 2 thao tác là `insert` và `count` // HashCount cho phép nhiều threads truy cập // // Với `count` thực hiện cùng lúc bởi threads mà ko dùng lock có khả năng count update bị trùng lặp // => chấp nhận được! vì với dữ liệu lớn sai số ko thành vấn đề. // // Với `insert` cần phải xử lý race condition ở thao tác grow hashtable. Giải pháp: // * 1/ Init hashtable size đủ lớn để ko bao giờ phải grow // * 2/ Dùng lock khi cần grow (chưa impl) // // - - - // // Tham khảo https://github.com/VKCOM/YouTokenToMe/blob/master/youtokentome/cpp/third_party/flat_hash_map.h // // >> small strings: 1_099_201, ss puts: 38_210_356, ss bytes: 6_788_771 // total 2_051_991 43_811_775 21_215_772 // => Chiếm 50% types, 87% số lần put vào HashCount, và 32% lượng bytes const std = @import("std"); const builtin = @import("builtin"); const Prime = @import("primes.zig").Prime; pub const HashType = u64; pub const CountType = u32; pub const IndexType = u24; pub const KeyType = []const u8; pub const GUARD_BYTE = 32; // vì token ko có space nên gán = 32 để in ra dễ đọc pub const MAX_CAPACITY: usize = std.math.maxInt(IndexType); pub const MAX_KEY_LEN: usize = 32; // need <= 63 (để dành 1 cho guard byte) pub const AVG_KEY_LEN: usize = 15; pub const MAXX_HASH = std.math.maxInt(HashType); pub const MAXX_INDEX = std.math.maxInt(IndexType); pub const Entry = packed struct { hash: HashType = MAXX_HASH, count: CountType = 0, offset: IndexType = 0, }; pub fn HashCount(comptime capacity: IndexType) type { const bits = std.math.log2_int(HashType, capacity); const shift = 63 - bits; const size = (@as(usize, 2) << bits) + (capacity / 8); // const prime = Prime.pick((capacity / 2) * 3); // const size = prime.value; std.debug.assert(size < MAX_CAPACITY); std.debug.assert(size > capacity); return struct { const lock_init = if (builtin.single_threaded) {} else false; allocator: std.mem.Allocator, spinlock: @TypeOf(lock_init), entries: []Entry, len: usize, keys_bytes: []u8, keys_bytes_len: usize, // Statistic information max_probs: usize, total_probs: usize, total_puts: usize, const Self = @This(); pub fn deinit(self: *Self) void { if (self.len > 0) { self.allocator.free(self.entries); self.allocator.free(self.keys_bytes); self.len = 0; } } pub fn init(self: *Self, init_allocator: std.mem.Allocator) !void { self.max_probs = 0; self.total_probs = 0; self.total_puts = 1; // tránh chia cho 0 self.len = 0; self.keys_bytes_len = MAX_KEY_LEN + 1; // Đảm bảo entry.offset > MAX_KEY_LEN để với trường hợp long string // thì entry.offset luôn lớn hơn key.len self.spinlock = lock_init; self.allocator = init_allocator; var n: usize = capacity * AVG_KEY_LEN + MAX_KEY_LEN; if (n > std.math.maxInt(IndexType)) n = std.math.maxInt(IndexType); self.keys_bytes = try self.allocator.alloc(u8, n); std.mem.set(u8, self.keys_bytes, GUARD_BYTE); self.entries = try self.allocator.alloc(Entry, size); std.mem.set(Entry, self.entries, .{ .hash = MAXX_HASH, .count = 0, .offset = 0 }); } inline fn recordStats(self: *Self, _probs: usize) void { const probs = _probs + 1; self.total_probs += probs; self.total_puts += 1; if (probs > self.max_probs) self.max_probs = probs; } fn keyStr(self: Self, entry: *const Entry, ss_ptr: *HashType) []const u8 { const offset = entry.offset; if (offset <= 8) { // small string ss_ptr.* = entry.hash *% 0x2040003d780970bd; return std.mem.asBytes(ss_ptr)[0..offset]; } const ending: usize = offset + self.keys_bytes[offset - 1]; return self.keys_bytes[offset..ending]; } // x == (x * 0x517cc1b727220a95) * 0x2040003d780970bd // wrapping_mul inline fn _hash(key: KeyType) HashType { if (key.len <= 8) { var value: HashType = 0; for (key) |byte, i| value += @intCast(HashType, byte) << @intCast(u6, i) * 8; return value *% 0x517cc1b727220a95; } return std.hash.Wyhash.hash(key[0], key); } pub fn put(self: *Self, key: KeyType) void { if (key.len > MAX_KEY_LEN) return; // reject if (self.len == capacity) { std.debug.print("`hash_count_str.zig`: hashtable bị đầy.", .{}); unreachable; } var it: Entry = .{ .hash = _hash(key), .count = 1 }; var i: IndexType = @intCast(IndexType, it.hash >> shift); // var i = prime.mod(it.hash); // const _i = i; while (self.entries[i].hash < it.hash) : (i += 1) {} var ss: HashType = undefined; const ss_ptr = &ss; var entry = &self.entries[i]; while (entry.hash == it.hash) : (i += 1) { // key ngắn cần offset == key.len, còn ko so sánh cả key const found = (entry.offset == key.len) or (std.mem.eql(u8, self.keyStr(entry, ss_ptr), key)); if (found) { // key đã tồn tại từ trước entry.count += 1; // xáo trộn duy nhất là thay đổi giá trị count // self.recordStats(i - _i); return; } entry = &self.entries[i + 1]; } { // Chỉ dùng lock khi có xáo trộn dữ liệu lớn while (@atomicRmw(bool, &self.spinlock, .Xchg, true, .SeqCst)) {} defer std.debug.assert(@atomicRmw(bool, &self.spinlock, .Xchg, false, .SeqCst)); // key lần đầu xuất hiện, ghi lại offset // for_bpe key đã được mã hoá trong hash nên ko cần ghi lại if (key.len <= 8) { it.offset = @intCast(IndexType, key.len); } else { var ending = self.keys_bytes_len; self.keys_bytes[ending] = @intCast(u8, key.len); it.offset = @intCast(IndexType, ending + 1); ending += 1; for (key) |byte| { self.keys_bytes[ending] = byte; ending += 1; } self.keys_bytes[ending] = GUARD_BYTE; self.keys_bytes_len = ending + 1; } while (true) : (i += 1) { // Tráo giá trị it và entries[i] để đảm bảo tính tăng dần của hash const tmp = self.entries[i]; self.entries[i] = it; // !! Luôn kiểm tra hash == MAXX_HASH để xác định ô rỗng !! // Các so sánh khác để bổ trợ trường hợp edge case if (tmp.hash == MAXX_HASH and tmp.offset == 0) { // ô rỗng, dừng thuật toán self.len += 1; // thêm 1 phần tử mới được ghi vào HashCount // self.recordStats(i - _i); return; } it = tmp; } // while } // spinlock context } pub fn get(self: Self, key: KeyType) CountType { if (key.len > MAX_KEY_LEN) return 0; const hash = _hash(key); var i = hash >> shift; // var i = prime.mod(hash); while (self.entries[i].hash < hash) : (i += 1) {} var entry = &self.entries[i]; var ss: HashType = undefined; const ss_ptr = &ss; while (entry.hash == hash) : (i += 1) { const found = std.mem.eql(u8, self.keyStr(entry, ss_ptr), key); if (found) return entry.count; entry = &self.entries[i + 1]; } return 0; } pub fn validate(self: *Self) bool { var prev: HashType = 0; var ss: HashType = undefined; const ss_ptr = &ss; for (self.entries[0..]) |*entry| { const curr = entry.hash; if (curr < MAXX_HASH and prev < MAXX_HASH) { if (prev > curr) { std.debug.print("\n!! hash ko tăng dần !!\n", .{}); return false; } prev = curr; const hash = _hash(self.keyStr(entry, ss_ptr)); if (curr != hash) { std.debug.print("\n!! hash ko trùng với key !!\n", .{}); return false; } } } return true; } pub fn showStats(self: *Self) void { std.debug.print("\n\n(( HASH COUNT STATS ))\n", .{}); const avg_probs = self.total_probs / self.total_puts; std.debug.print( "\nTOTAL {d} entries, max_probs: {d}, avg_probs: {d} ({d} / {d}).", .{ self.len, self.max_probs, avg_probs, self.total_probs, self.total_puts }, ); std.debug.print("\nHash Count Validation: {}\n", .{self.validate()}); } }; } test "HashCount for string" { const HC1024 = HashCount(1024); var counters: HC1024 = undefined; try counters.init(std.testing.allocator); defer counters.deinit(); counters.put("a"); try std.testing.expectEqual(@as(CountType, 1), counters.get("a")); try std.testing.expectEqual(@as(CountType, 1), counters.get("a")); try std.testing.expectEqual(@as(CountType, 0), counters.get("b")); counters.put("a"); try std.testing.expectEqual(@as(CountType, 2), counters.get("a")); counters.put("b"); try std.testing.expectEqual(@as(CountType, 1), counters.get("b")); }
https://raw.githubusercontent.com/telexyz/bon/819e71c2e7195c5fe350be1434a67e142eafb184/src/hash_count_str.zig
const std = @import("std"); const buf = @import("ringbuffer.zig"); const gpio = @import("gpio.zig"); const stm32 = @import("stm32f103.zig"); pub const Error = error{ParityAndWordsizeNotSupportedByHw}; pub const Parity = enum { None, Even, Odd }; pub const WordSize = enum { Bit7, Bit8 }; pub const StopBits = enum { Stop05, Stop10, Stop15, Stop20 }; pub const PinMapping = enum { Standart, Remap, Uart3RemapToGpioD }; pub fn NewUsart(comptime baseAdr: *volatile USART_t, comptime mapping: PinMapping, comptime UartClkFreq: u32) type { return struct { const Self = @This(); fmt_buffer: [30]u8 = undefined, tx_buffer: buf.RingBuffer(30, u8) = buf.RingBuffer(30, u8){}, pub fn init(self: *Self, comptime baudrate: u32, comptime size: WordSize, comptime parity: Parity, comptime stopBits: StopBits) Error!void { const wordSize = switch (size) { .Bit7 => 7, .Bit8 => 8, } + switch (parity) { .None => 0, .Even => 1, .Odd => 1, }; const M = switch (wordSize) { 8 => 0, 9 => 1, else => return Error.ParityAndWordsizeNotSupportedByHw, }; if (mapping != .Standart) { RCC.APB2ENR |= RCC_APB2Periph_AFIO; } const pins = switch (mapping) { .Standart => switch (baseAdr) { USART1 => .{ .txd = gpio.Pin{ .gpio = gpio.GPIOA, .nr = 9 }, .rxd = gpio.Pin{ .gpio = gpio.GPIOA, .nr = 10 }, }, USART2 => .{ .txd = gpio.Pin{ .gpio = gpio.GPIOA, .nr = 2 }, .rxd = gpio.Pin{ .gpio = gpio.GPIOA, .nr = 3 }, }, USART3 => .{ .txd = gpio.Pin{ .gpio = gpio.GPIOB, .nr = 10 }, .rxd = gpio.Pin{ .gpio = gpio.GPIOB, .nr = 11 }, }, else => unreachable, // unknown USART }, .Remap => switch (baseAdr) { USART1 => .{ .txd = gpio.Pin{ .gpio = gpio.GPIOB, .nr = 6 }, .rxd = gpio.Pin{ .gpio = gpio.GPIOB, .nr = 7 }, }, USART2 => .{ .txd = gpio.Pin{ .gpio = gpio.GPIOD, .nr = 5 }, .rxd = gpio.Pin{ .gpio = gpio.GPIOD, .nr = 6 }, }, USART3 => .{ .txd = gpio.Pin{ .gpio = gpio.GPIOC, .nr = 10 }, .rxd = gpio.Pin{ .gpio = gpio.GPIOC, .nr = 11 }, }, else => unreachable, // unknown USART }, .Uart3RemapToGpioD => switch (baseAdr) { USART3 => .{ .txd = gpio.Pin{ .gpio = gpio.GPIOD, .nr = 8 }, .rxd = gpio.Pin{ .gpio = gpio.GPIOD, .nr = 9 }, }, else => unreachable, // unknown USART }, }; switch (baseAdr) { USART1 => stm32.RCC.APB2ENR |= stm32.RCC_APB2Periph_USART1, USART1 => stm32.RCC.APB2ENR |= stm32.RCC_APB2Periph_USART2, USART1 => stm32.RCC.APB2ENR |= stm32.RCC_APB2Periph_USART3, else => unreachable, } gpio.enableClk(pins.rxd.gpio); gpio.configInput(pins.rxd, .Pullup); gpio.configOutput(pins.txd, .AlternatePushPull, .MHz10); // 19200 Baud: // 72Mhz/16/19200 = 234.375 // 234 = 0xEA // .375*16 = 6 => BRR = 0xEA6 // // 115200 Baud: // 72MHz/16/115200 = 39.0625 // 39 = 0x27 // 0.0625*16 = 1 // BRR = 0x271 baseAdr.BRR = (UartClkFreq + baudrate / 2) / baudrate; var cr1: u32 = (1 << 13 | 1 << 3 | 1 << 7); cr1 |= M << 12; cr1 |= switch (parity) { .None => 0b00, .Even => 0b10, .Odd => 0b11, } << 9; const cr2 = switch (stopBits) { .Stop10 => 0b00, .Stop05 => 0b01, .Stop15 => 0b10, .Stop20 => 0b11, } << 12; baseAdr.CR2 = cr2; baseAdr.CR1 = cr1; } pub fn print(self: *Self, comptime fmt: []const u8, args: anytype) void { var fba = std.heap.FixedBufferAllocator.init(&self.fmt_buffer); var allocator = &fba.allocator; const string = std.fmt.allocPrint(allocator, fmt, args) catch |_| { writeText(self, "fmt_buffer too small"); return; }; defer allocator.free(string); writeText(self, string); } pub fn writeText(self: *Self, txt: []const u8) void { for (txt) |c| { if (self.tx_buffer.write(c) == false) break; } if (!self.tx_buffer.empty()) baseAdr.CR1 |= 1 << 7; // ISR an } pub fn send(self: *Self) void { while (self.tx_buffer.read()) |c| { writeChar(self, c); } } pub fn writeChar(self: *Self, c: u8) void { while ((baseAdr.SR & 128) == 0) {} baseAdr.DR = c; } pub fn Isr(self: *Self) void { if ((baseAdr.SR & 128) == 128) { if (self.tx_buffer.read()) |c| baseAdr.DR = c else baseAdr.CR1 &= ~@as(u32, (1 << 7)); } } }; } pub const USART_t = packed struct { SR: u32, DR: u32, BRR: u32, CR1: u32, CR2: u32, CR3: u32, GTPR: u32 }; const USART1_BASE: u32 = stm32.APB2PERIPH_BASE + 0x3800; pub const USART1 = @intToPtr(*volatile USART_t, USART1_BASE); const USART2_BASE: u32 = stm32.PERIPH_BASE + 0x4400; pub const USART2 = @intToPtr(*volatile USART_t, USART2_BASE); const USART3_BASE: u32 = stm32.PERIPH_BASE + 0x4800; pub const USART3 = @intToPtr(*volatile USART_t, USART3_BASE);
https://raw.githubusercontent.com/WoodyAtHome/Stm32BluePill/128d7abc5bf8fc1a1b4f4d0fd50827329dd8f937/usart.zig
//! __emutls_get_address specific builtin //! //! derived work from LLVM Compiler Infrastructure - release 8.0 (MIT) //! https://github.com/llvm-mirror/compiler-rt/blob/release_80/lib/builtins/emutls.c const std = @import("std"); const builtin = @import("builtin"); const common = @import("common.zig"); const abort = std.os.abort; const assert = std.debug.assert; const expect = std.testing.expect; /// defined in C as: /// typedef unsigned int gcc_word __attribute__((mode(word))); const gcc_word = usize; pub const panic = common.panic; comptime { if (builtin.link_libc and (builtin.abi == .android or builtin.os.tag == .openbsd)) { @export(__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = common.linkage, .visibility = common.visibility }); } } /// public entrypoint for generated code using EmulatedTLS pub fn __emutls_get_address(control: *emutls_control) callconv(.C) *anyopaque { return control.getPointer(); } /// Simple allocator interface, to avoid pulling in the while /// std allocator implementation. const simple_allocator = struct { /// Allocate a memory chunk for requested type. Return a pointer on the data. pub fn alloc(comptime T: type) *T { return @ptrCast(@alignCast(advancedAlloc(@alignOf(T), @sizeOf(T)))); } /// Allocate a slice of T, with len elements. pub fn allocSlice(comptime T: type, len: usize) []T { return @as([*]T, @ptrCast(@alignCast( advancedAlloc(@alignOf(T), @sizeOf(T) * len), )))[0 .. len - 1]; } /// Allocate a memory chunk. pub fn advancedAlloc(alignment: u29, size: usize) [*]u8 { const minimal_alignment = @max(@alignOf(usize), alignment); var aligned_ptr: ?*anyopaque = undefined; if (std.c.posix_memalign(&aligned_ptr, minimal_alignment, size) != 0) { abort(); } return @as([*]u8, @ptrCast(aligned_ptr)); } /// Resize a slice. pub fn reallocSlice(comptime T: type, slice: []T, len: usize) []T { var c_ptr: *anyopaque = @as(*anyopaque, @ptrCast(slice.ptr)); var new_array: [*]T = @ptrCast(@alignCast(std.c.realloc(c_ptr, @sizeOf(T) * len) orelse abort())); return new_array[0..len]; } /// Free a memory chunk allocated with simple_allocator. pub fn free(ptr: anytype) void { std.c.free(@as(*anyopaque, @ptrCast(ptr))); } }; /// Simple array of ?ObjectPointer with automatic resizing and /// automatic storage allocation. const ObjectArray = struct { const ObjectPointer = *anyopaque; // content of the array slots: []?ObjectPointer, /// create a new ObjectArray with n slots. must call deinit() to deallocate. pub fn init(n: usize) *ObjectArray { var array = simple_allocator.alloc(ObjectArray); array.* = ObjectArray{ .slots = simple_allocator.allocSlice(?ObjectPointer, n), }; for (array.slots) |*object| { object.* = null; } return array; } /// deallocate the ObjectArray. pub fn deinit(self: *ObjectArray) void { // deallocated used objects in the array for (self.slots) |*object| { simple_allocator.free(object.*); } simple_allocator.free(self.slots); simple_allocator.free(self); } /// resize the ObjectArray if needed. pub fn ensureLength(self: *ObjectArray, new_len: usize) *ObjectArray { const old_len = self.slots.len; if (old_len > new_len) { return self; } // reallocate self.slots = simple_allocator.reallocSlice(?ObjectPointer, self.slots, new_len); // init newly added slots for (self.slots[old_len..]) |*object| { object.* = null; } return self; } /// Retrieve the pointer at request index, using control to initialize it if needed. pub fn getPointer(self: *ObjectArray, index: usize, control: *emutls_control) ObjectPointer { if (self.slots[index] == null) { // initialize the slot const size = control.size; const alignment: u29 = @truncate(control.alignment); var data = simple_allocator.advancedAlloc(alignment, size); errdefer simple_allocator.free(data); if (control.default_value) |value| { // default value: copy the content to newly allocated object. @memcpy(data[0..size], @as([*]const u8, @ptrCast(value))); } else { // no default: return zeroed memory. @memset(data[0..size], 0); } self.slots[index] = @as(*anyopaque, @ptrCast(data)); } return self.slots[index].?; } }; // Global stucture for Thread Storage. // It provides thread-safety for on-demand storage of Thread Objects. const current_thread_storage = struct { var key: std.c.pthread_key_t = undefined; var init_once = std.once(current_thread_storage.init); /// Return a per thread ObjectArray with at least the expected index. pub fn getArray(index: usize) *ObjectArray { if (current_thread_storage.getspecific()) |array| { // we already have a specific. just ensure the array is // big enough for the wanted index. return array.ensureLength(index); } // no specific. we need to create a new array. // make it to contains at least 16 objects (to avoid too much // reallocation at startup). const size = @max(16, index); // create a new array and store it. var array: *ObjectArray = ObjectArray.init(size); current_thread_storage.setspecific(array); return array; } /// Return casted thread specific value. fn getspecific() ?*ObjectArray { return @ptrCast(@alignCast(std.c.pthread_getspecific(current_thread_storage.key))); } /// Set casted thread specific value. fn setspecific(new: ?*ObjectArray) void { if (std.c.pthread_setspecific(current_thread_storage.key, @as(*anyopaque, @ptrCast(new))) != 0) { abort(); } } /// Initialize pthread_key_t. fn init() void { if (std.c.pthread_key_create(&current_thread_storage.key, current_thread_storage.deinit) != .SUCCESS) { abort(); } } /// Invoked by pthread specific destructor. the passed argument is the ObjectArray pointer. fn deinit(arrayPtr: *anyopaque) callconv(.C) void { var array: *ObjectArray = @ptrCast(@alignCast(arrayPtr)); array.deinit(); } }; const emutls_control = extern struct { // A emutls_control value is a global value across all // threads. The threads shares the index of TLS variable. The data // array (containing address of allocated variables) is thread // specific and stored using pthread_setspecific(). // size of the object in bytes size: gcc_word, // alignment of the object in bytes alignment: gcc_word, object: extern union { // data[index-1] is the object address / 0 = uninit index: usize, // object address, when in single thread env (not used) address: *anyopaque, }, // null or non-zero initial value for the object default_value: ?*const anyopaque, // global Mutex used to serialize control.index initialization. var mutex: std.c.pthread_mutex_t = std.c.PTHREAD_MUTEX_INITIALIZER; // global counter for keeping track of requested indexes. // access should be done with mutex held. var next_index: usize = 1; /// Simple wrapper for global lock. fn lock() void { if (std.c.pthread_mutex_lock(&emutls_control.mutex) != .SUCCESS) { abort(); } } /// Simple wrapper for global unlock. fn unlock() void { if (std.c.pthread_mutex_unlock(&emutls_control.mutex) != .SUCCESS) { abort(); } } /// Helper to retrieve nad initialize global unique index per emutls variable. pub fn getIndex(self: *emutls_control) usize { // Two threads could race against the same emutls_control. // Use atomic for reading coherent value lockless. const index_lockless = @atomicLoad(usize, &self.object.index, .Acquire); if (index_lockless != 0) { // index is already initialized, return it. return index_lockless; } // index is uninitialized: take global lock to avoid possible race. emutls_control.lock(); defer emutls_control.unlock(); const index_locked = self.object.index; if (index_locked != 0) { // we lost a race, but index is already initialized: nothing particular to do. return index_locked; } // Store a new index atomically (for having coherent index_lockless reading). @atomicStore(usize, &self.object.index, emutls_control.next_index, .Release); // Increment the next available index emutls_control.next_index += 1; return self.object.index; } /// Simple helper for testing purpose. pub fn init(comptime T: type, default_value: ?*const T) emutls_control { return emutls_control{ .size = @sizeOf(T), .alignment = @alignOf(T), .object = .{ .index = 0 }, .default_value = @as(?*const anyopaque, @ptrCast(default_value)), }; } /// Get the pointer on allocated storage for emutls variable. pub fn getPointer(self: *emutls_control) *anyopaque { // ensure current_thread_storage initialization is done current_thread_storage.init_once.call(); const index = self.getIndex(); var array = current_thread_storage.getArray(index); return array.getPointer(index - 1, self); } /// Testing helper for retrieving typed pointer. pub fn get_typed_pointer(self: *emutls_control, comptime T: type) *T { assert(self.size == @sizeOf(T)); assert(self.alignment == @alignOf(T)); return @ptrCast(@alignCast(self.getPointer())); } }; test "simple_allocator" { if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest; var data1: *[64]u8 = simple_allocator.alloc([64]u8); defer simple_allocator.free(data1); for (data1) |*c| { c.* = 0xff; } var data2: [*]u8 = simple_allocator.advancedAlloc(@alignOf(u8), 64); defer simple_allocator.free(data2); for (data2[0..63]) |*c| { c.* = 0xff; } } test "__emutls_get_address zeroed" { if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest; var ctl = emutls_control.init(usize, null); try expect(ctl.object.index == 0); // retrieve a variable from ctl var x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl))); try expect(ctl.object.index != 0); // index has been allocated for this ctl try expect(x.* == 0); // storage has been zeroed // modify the storage x.* = 1234; // retrieve a variable from ctl (same ctl) var y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl))); try expect(y.* == 1234); // same content that x.* try expect(x == y); // same pointer } test "__emutls_get_address with default_value" { if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest; const value: usize = 5678; // default value var ctl = emutls_control.init(usize, &value); try expect(ctl.object.index == 0); var x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl))); try expect(ctl.object.index != 0); try expect(x.* == 5678); // storage initialized with default value // modify the storage x.* = 9012; try expect(value == 5678); // the default value didn't change var y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl))); try expect(y.* == 9012); // the modified storage persists } test "test default_value with differents sizes" { if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest; const testType = struct { fn _testType(comptime T: type, value: T) !void { var ctl = emutls_control.init(T, &value); var x = ctl.get_typed_pointer(T); try expect(x.* == value); } }._testType; try testType(usize, 1234); try testType(u32, 1234); try testType(i16, -12); try testType(f64, -12.0); try testType( @TypeOf("012345678901234567890123456789"), "012345678901234567890123456789", ); }
https://raw.githubusercontent.com/mundusnine/FoundryTools_windows_x64/b64cdb7e56db28eb710a05a089aed0daff8bc8be/lib/compiler_rt/emutls.zig
const std = @import("std"); const print = std.debug.print; const heap = @import("heap.zig"); const MinHeap = heap.MinHeap; const CharTableError = error{OutOfSpace}; const sample = @embedFile("sample.txt"); pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); // var utf8_iterator = (try std.unicode.Utf8View.init(sample_string)).iterator(); // while (utf8_iterator.nextCodepoint()) |code_point| { // _ = code_point; // // print("type: {}\n", .{@TypeOf(code_point)}); // // print("0x{x} is {u} \n", .{ code_point, code_point }); // // TODO: add to hash table here // } var ct = try char_table.init(arena.allocator(), prospectorHash, 4096); defer ct.deinit(arena.allocator()); try ct.add('a'); try ct.add('b'); try ct.add('c'); const head = (try createTree(arena.allocator(), ct)).?; print("head count: {}\n", .{head.count}); } const Node = struct { left: ?*Node = null, right: ?*Node = null, char: u21, // char represents a Unicode codepoint count: u32, fn traverse(head: *Node) void { if (head.left) |left| { traverse(left); } print("current char: {u}\tcurrent count: {d}\n", .{ head.char, head.count }); if (head.right) |right| { traverse(right); } } }; const TreeError = error{ EmptyTable, }; fn createTree(allocator: std.mem.Allocator, ct: char_table) !?*Node { if (ct.distinct_char_count == 0) { // no chars in char table return TreeError.EmptyTable; } if (ct.distinct_char_count == 1) { return allocator.create(Node); // TODO: make this return a useful pointer to a node } // TODO: allocate all nodes at once and create tree in one pass const CharHeap = MinHeap(Node); var target_cap = ct.distinct_char_count; var char_heap: CharHeap = try CharHeap.init(allocator, target_cap); // defer heap.deinit(); // print("heap array len: {}\n", .{heap.array.items.len}); // print("heap cap: {}\n", .{heap.array.capacity}); var heap_ptr = &char_heap; for (ct.table) |e| { if (e.isInitialized()) { // print("element: {}:{d}\n\n", .{ e.char, e.count }); var new_node = try allocator.create(Node); new_node.char = e.char; new_node.count = e.count; new_node.left = null; new_node.right = null; heap_ptr.insert(new_node); // print("heap array: {any}\n\n", .{heap_ptr.array.items}); } } var head = try allocator.create(Node); // print("heap array: {any}\n", .{heap_ptr.array.items}); while (char_heap.array.items.len > 1) { // print("heap before: {any}\n\n", .{heap_ptr.array.items}); const left_child = heap_ptr.extract() catch unreachable; const right_child = heap_ptr.extract() catch unreachable; const sum_counts: u32 = left_child.count + right_child.count; const new_node = try allocator.create(Node); new_node.* = Node{ .left = left_child, .right = right_child, .char = 0x00, .count = sum_counts }; head = new_node; // print("new node: {any}\n\n", .{new_node}); heap_ptr.insert(new_node); } // const ret = Tree{ .head = head, .size = ct.cap }; return head; } // fn freeTree(allocator: std.mem.Allocator, head: ?*Node) void { // var not_null_head = head orelse return; // print("head stuff: {}\n", .{not_null_head}); // freeTree(allocator, not_null_head.left); // freeTree(allocator, not_null_head.right); // allocator.destroy(not_null_head); // } test "create tree" { var arena = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena.deinit(); var ct = try char_table.init(arena.allocator(), prospectorHash, 4096); // defer ct.deinit(std.testing.allocator); try ct.add('a'); // try ct.add('a'); try ct.add('b'); try ct.add('c'); // for (ct.table) |item| { // if (item.isInitialized()) { // print("ct item: {any}\n", .{item}); // } // } const head = try createTree(arena.allocator(), ct); head.?.traverse(); // defer freeTree(std.testing.allocator, head); const node = head.?; print("head count: {}\n", .{node.count}); } // ---------------------------------------- // Counting chars // ---------------------------------------- /// a char_table is used to count the number of UTF-8 characters in a string pub const char_table = struct { cap: u32, distinct_char_count: u32, table: []char_table_entry, hash_fn: *const fn (char: u21) u32, /// hash_fn can be any function that hashes Unicode code points to u32. The char_table /// capacity is determined by initial_cap and currently doesn't grow. TODO: make it grow pub fn init(allocator: std.mem.Allocator, hash_fn: *const fn (char: u21) u32, initial_cap: u32) !char_table { // NOTE: use a std.ArrayList here (like the min heap implementation) var table = try allocator.alloc(char_table_entry, initial_cap); for (0..table.len) |i| { // these values for char and count represent an uninitialized entry table[i] = char_table_entry{ .char = 0x00, .count = std.math.maxInt(u32) }; } return char_table{ .cap = initial_cap, .distinct_char_count = 0, .table = table, .hash_fn = hash_fn, }; } pub fn deinit(Self: @This(), allocator: std.mem.Allocator) void { allocator.free(Self.table); } /// add one to the count of char in the table pub fn add(Self: *@This(), char: u21) !void { const idx = try Self.lookup(char); // initialize if needed if (Self.table[idx].char == 0x00 and Self.table[idx].count == std.math.maxInt(u32)) { Self.table[idx] = char_table_entry{ .char = char, .count = 0 }; Self.distinct_char_count += 1; } Self.table[idx].count += 1; } /// returns the index where the char is located in the backing array. If the char hasn't /// been added yet, returns the index of an uninitialized entry. fn lookup(Self: @This(), char: u21) !usize { const hash = Self.hash_fn(char); var candidate_idx = @rem(hash, Self.cap); var candidate = Self.table[candidate_idx]; while (candidate.count != std.math.maxInt(u32) and candidate.char != char) { candidate_idx += 1; // TODO: use a better heuristic here, like if array is 80% full or something if (candidate_idx >= Self.cap) { // TODO: grow the array instead of returning error return CharTableError.OutOfSpace; } candidate = Self.table[candidate_idx]; } return candidate_idx; } }; test "basic hash table ops" { // var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); // defer arena.deinit(); // var allocator = arena.allocator(); var arena = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena.deinit(); var ct = try char_table.init(arena.allocator(), prospectorHash, 4096); var utf8_iterator = (try std.unicode.Utf8View.init(sample)).iterator(); while (utf8_iterator.nextCodepoint()) |code_point| { try ct.add(code_point); } var total_codepoint_count: u32 = 0; for (ct.table) |e| { if (e.count != std.math.maxInt(u32)) { total_codepoint_count += e.count; // var s: [4]u8 = undefined; // _ = try std.unicode.utf8Encode(e.char, &s); // print("char: {s} \t count: {d}\n", .{ s, e.count }); } } const expected_count = try std.unicode.utf8CountCodepoints(sample); try std.testing.expectEqual(expected_count, total_codepoint_count); print("expected: {d}, actual: {d}\n", .{ expected_count, total_codepoint_count }); } // can I embed this in the char table? const char_table_entry = struct { char: u21, count: u32, fn isInitialized(Self: @This()) bool { return Self.char != 0x00 and Self.count != std.math.maxInt(u32); } }; /// taken from https://nullprogram.com/blog/2018/07/31/ fn prospectorHash(c: u21) u32 { var x = @as(u32, c); x ^= x >> 16; x *%= @as(u32, 0x45d9f3b); x ^= x >> 16; x *%= @as(u32, 0x45d9f3b); x ^= x >> 16; return x; }
https://raw.githubusercontent.com/benmuth/huffman-zig/d573d327888517295267bcb67196bdf1d2836c80/src/main.zig
const os = @import("root").os; const lib = @import("lib"); const config = @import("config"); const log = lib.output.log.scoped(.{ .prefix = "gpu/virtio", .filter = .info, }).write; const virtio_pci = os.drivers.misc.virtio_pci; /// virtio-gpu driver instance const Driver = struct { transport: virtio_pci.Driver, inflight: u32 = 0, display_region: lib.graphics.image_region.ImageRegion = undefined, // Initialize the virtio transport, but don't change modes pub fn init(pciaddr: os.platform.pci.Addr) !Driver { var v = try virtio_pci.Driver.init(pciaddr, 0, 0); var d: Driver = .{ .transport = v }; return d; } fn invalidateRectFunc(region: *lib.graphics.image_region.ImageRegion, x: usize, y: usize, width: usize, height: usize) void { const self = @fieldParentPtr(Driver, "display_region", region); self.updateRect(@ptrToInt(self.display_region.subregion(x, y, width, height).bytes.ptr) - @ptrToInt(self.display_region.bytes.ptr), .{ .x = @intCast(u32, x), .y = @intCast(u32, y), .width = @intCast(u32, width), .height = @intCast(u32, height), }); } // Do a modeswitch to the described mode pub fn modeset(self: *Driver, phys: usize) void { var iter = self.transport.iter(0); { var msg: ResourceCreate2D = .{ .hdr = .{ .cmdtype = virtio_gpu_cmd_res_create_2d, .flags = 0, .fenceid = 0, .ctxid = 0, }, .resid = 1, .format = 1, .width = @intCast(u32, self.display_region.width), .height = @intCast(u32, self.display_region.height), }; var resp: ConfHdr = undefined; iter.begin(); iter.put(&msg, @sizeOf(ResourceCreate2D), virtio_pci.vring_desc_flag_next); iter.put(&resp, @sizeOf(ConfHdr), virtio_pci.vring_desc_flag_write); self.inflight += 1; } { var msg: ResourceAttachBacking = .{ .hdr = .{ .cmdtype = virtio_gpu_cmd_res_attach_backing, .flags = 0, .fenceid = 0, .ctxid = 0, }, .resid = 1, .entrynum = 1, }; var msg1: ResourceAttachBackingEntry = .{ .addr = phys, .len = @intCast(u32, self.display_region.width) * @intCast(u32, self.display_region.height) * 4, }; var resp: ConfHdr = undefined; iter.begin(); iter.put(&msg, @sizeOf(ResourceAttachBacking), virtio_pci.vring_desc_flag_next); iter.put(&msg1, @sizeOf(ResourceAttachBackingEntry), virtio_pci.vring_desc_flag_next); iter.put(&resp, @sizeOf(ConfHdr), virtio_pci.vring_desc_flag_write); self.inflight += 1; } { var msg: SetScanout = .{ .hdr = .{ .cmdtype = virtio_gpu_cmd_set_scanout, .flags = 0, .fenceid = 0, .ctxid = 0, }, .resid = 1, .scanid = 0, .rect = .{ .x = 0, .y = 0, .width = @intCast(u32, self.display_region.width), .height = @intCast(u32, self.display_region.height), }, }; var resp: ConfHdr = undefined; iter.begin(); iter.put(&msg, @sizeOf(SetScanout), virtio_pci.vring_desc_flag_next); iter.put(&resp, @sizeOf(ConfHdr), virtio_pci.vring_desc_flag_write); self.inflight += 1; } self.transport.start(0); self.wait(); } /// Update *only* the rectangle pub fn updateRect(self: *Driver, offset: u64, rect: Rect) void { var iter = self.transport.iter(0); { var msg: TransferHost2D = .{ .hdr = .{ .cmdtype = virtio_gpu_cmd_transfer_to_host_2d, .flags = 0, .fenceid = 0, .ctxid = 0, }, .resid = 1, .offset = offset, .rect = rect, }; var resp: ConfHdr = undefined; iter.begin(); iter.put(&msg, @sizeOf(TransferHost2D), virtio_pci.vring_desc_flag_next); iter.put(&resp, @sizeOf(ConfHdr), virtio_pci.vring_desc_flag_write); self.inflight += 1; } { var msg: ResourceFlush = .{ .hdr = .{ .cmdtype = virtio_gpu_cmd_res_flush, .flags = 0, .fenceid = 0, .ctxid = 0, }, .resid = 1, .rect = rect, }; var resp: ConfHdr = undefined; iter.begin(); iter.put(&msg, @sizeOf(ResourceFlush), virtio_pci.vring_desc_flag_next); iter.put(&resp, @sizeOf(ConfHdr), virtio_pci.vring_desc_flag_write); self.inflight += 1; } self.transport.start(0); self.wait(); } /// Wait for request to finish. fn wait(self: *Driver) void { while (true) { var a: *volatile u32 = &self.inflight; if (a.* == 0) break; self.transport.process(0, process, self); } } }; const ConfHdr = packed struct { cmdtype: u32, flags: u32, fenceid: u64, ctxid: u32, _: u32 = 0, }; const ResourceCreate2D = packed struct { hdr: ConfHdr, resid: u32, format: u32, width: u32, height: u32 }; const ResourceAttachBacking = packed struct { hdr: ConfHdr, resid: u32, entrynum: u32 }; const ResourceAttachBackingEntry = packed struct { addr: u64, len: u32, _: u32 = 0, }; const Rect = packed struct { x: u32, y: u32, width: u32, height: u32 }; const SetScanout = packed struct { hdr: ConfHdr, rect: Rect, scanid: u32, resid: u32, }; const TransferHost2D = packed struct { hdr: ConfHdr, rect: Rect, offset: u64, resid: u32, _: u32 = 0 }; const ResourceFlush = packed struct { hdr: ConfHdr, rect: Rect, resid: u32, _: u32 = 0 }; // Feature bits const virtio_feature_version_1 = 32; const virtio_feature_access_platform = 33; const virtio_feature_ring_packed = 34; const virtio_feature_order_platform = 36; const virtio_feature_sr_iov = 37; // 2D cmds const virtio_gpu_cmd_get_display_info = 0x0100; const virtio_gpu_cmd_res_create_2d = 0x101; const virtio_gpu_cmd_res_unref = 0x102; const virtio_gpu_cmd_set_scanout = 0x103; const virtio_gpu_cmd_res_flush = 0x104; const virtio_gpu_cmd_transfer_to_host_2d = 0x105; const virtio_gpu_cmd_res_attach_backing = 0x106; const virtio_gpu_cmd_res_detatch_backing = 0x107; const virtio_gpu_cmd_get_capset_info = 0x108; const virtio_gpu_cmd_get_capset = 0x109; const virtio_gpu_cmd_get_edid = 0x10A; // Cursor cmds const virtio_gpu_cmd_update_cursor = 0x0300; const virtio_gpu_cmd_move_cursor = 0x301; // Success const virtio_gpu_resp_ok_nodata = 0x1100; const virtio_gpu_resp_ok_display_info = 0x1101; const virtio_gpu_resp_ok_capset_info = 0x1102; const virtio_gpu_resp_ok_capset = 0x1103; const virtio_gpu_resp_ok_edid = 0x1104; // Error const virtio_gpu_resp_err_unspecified = 0x1200; const virtio_gpu_resp_err_out_of_mem = 0x1201; const virtio_gpu_resp_err_invalid_scanout_id = 0x1202; const virtio_gpu_resp_err_invalid_res_id = 0x1203; const virtio_gpu_resp_err_invalid_ctx_id = 0x1204; const virtio_gpu_resp_err_invalid_parameter = 0x1205; const virtio_gpu_flag_fence = (1 << 0); fn process(self: *Driver, i: u8, head: virtio_pci.Descriptor) void { self.transport.freeChain(i, head); self.inflight -= 1; } /// Global rectangle update, but with a global context fn updater( bb: [*]u8, yoff_src: usize, yoff_dest: usize, ysize: usize, pitch: usize, ctx: usize, ) void { var self = @intToPtr(*Driver, ctx); _ = bb; _ = pitch; self.updateRect(self.pitch * yoff_src, .{ .x = 0, .y = @truncate(u32, yoff_dest), .width = self.width, .height = @truncate(u32, ysize), }); } pub fn registerController(addr: os.platform.pci.Addr) void { if (comptime (!config.drivers.gpu.virtio_gpu.enable)) return; const drv = os.memory.pmm.physHeap().create(Driver) catch { log(.err, "Virtio display controller: Allocation failure", .{}); return; }; errdefer os.memory.pmm.physHeap().destroy(drv); drv.* = Driver.init(addr) catch { log(.err, "Virtio display controller: Init has failed!", .{}); return; }; errdefer drv.deinit(); // @TODO: Get the actual screen resolution const res = config.drivers.gpu.virtio_gpu.default_resolution; const num_bytes = res.width * res.height * 4; const phys = os.memory.pmm.allocPhys(num_bytes) catch return; errdefer os.memory.pmm.freePhys(phys, num_bytes); drv.display_region = .{ .bytes = os.platform.phys_ptr([*]u8).from_int(phys).get_writeback()[0..num_bytes], .pitch = res.width * 4, .width = res.width, .height = res.height, .invalidateRectFunc = Driver.invalidateRectFunc, .pixel_format = .rgbx, }; drv.modeset(phys); log(.info, "Modeset done!", .{}); os.kernel.addFramebuffer(&drv.display_region); } /// General callback on an interrupt, context is a pointer to a Driver structure pub fn interrupt(_: *os.platform.InterruptFrame, context: u64) void { var driver = @intToPtr(*Driver, context); driver.transport.acknowledge(); driver.transport.process(0, process, driver); }
https://raw.githubusercontent.com/FlorenceOS/Florence/aaa5a9e568197ad24780ec9adb421217530d4466/subprojects/flork/src/drivers/gpu/virtio_gpu.zig
const std = @import("std"); const testing = std.testing; const expect = testing.expect; const expectError = testing.expectError; const Allocator = std.mem.Allocator; const memory_package = @import("memory.zig"); const Memory = memory_package.Memory; const test_suite_package = @import("test_util.zig"); const GeneralSuite = test_suite_package.GeneralSuite; const RleError = error{IndexOutOfBounds}; pub fn RleArray(comptime T: type) type { return struct { const Self = @This(); const Entry = struct { T, usize }; memory: *Memory, entries_count: usize, entries: []Entry, count: usize, pub fn init(memory: *Memory) !Self { return .{ .memory = memory, .entries_count = 0, .entries = try memory.alloc(Entry, 8), .count = 0, }; } pub fn deinit(self: Self) void { self.memory.free(self.entries); } pub fn push(self: *Self, value: T) !void { const top_index = if (self.entries_count == 0) 0 else self.entries_count - 1; if (self.entries.len <= 0 or self.entries[top_index][0] != value) { try self.push_entry(.{ value, 1 }); } else { self.entries[top_index][1] += 1; } self.count += 1; } pub fn get(self: *const Self, index: usize) !T { if (index < 0 or index >= self.count) { return error.IndexOutOfBounds; } var offset: i128 = index; for (0..self.entries_count) |i| { const entry = self.entries[i]; offset -= entry[1]; if (offset < 0) { return entry[0]; } } unreachable; } fn push_entry(self: *Self, entry: Entry) !void { const capacity = self.entries.len; if (capacity < self.entries_count + 1) { const new_capacity = Memory.growCapacity(capacity); self.entries = try self.memory.realloc(self.entries, new_capacity); } self.entries[self.entries_count] = entry; self.entries_count += 1; } }; } test "array size gets properly reallocated" { const allocator = testing.allocator; var s = try GeneralSuite.init(allocator); defer s.deinit(); var array = try RleArray(u8).init(s.memory); defer array.deinit(); try expect(array.entries.len == 8); for (0..8) |i| { try array.push(@intCast(i)); try expect(array.entries.len == 8); try expect(array.entries[i][0] == i); } try array.push(8); try expect(array.entries.len == 16); try expect(array.entries[8][0] == 8); for (9..16) |i| { try array.push(@intCast(i)); try expect(array.entries.len == 16); try expect(array.entries[i][0] == i); } try array.push(16); try expect(array.entries.len == 32); try expect(array.entries[16][0] == 16); } test "run-length encoding should work as expected" { const allocator = testing.allocator; var s = try GeneralSuite.init(allocator); defer s.deinit(); var array = try RleArray(u8).init(s.memory); defer array.deinit(); try expect(array.entries_count == 0); try array.push(10); try expect(array.entries_count == 1); try expect(array.entries[0][0] == 10); try expect(array.entries[0][1] == 1); for (2..21) |i| { try array.push(10); try expect(array.entries_count == 1); try expect(array.entries[0][0] == 10); try expect(array.entries[0][1] == @as(u8, @intCast(i))); } try array.push(20); try expect(array.entries_count == 2); try expect(array.entries[1][0] == 20); try expect(array.entries[1][1] == 1); for (2..21) |i| { try array.push(20); try expect(array.entries_count == 2); try expect(array.entries[1][0] == 20); try expect(array.entries[1][1] == @as(u8, @intCast(i))); } try array.push(10); try expect(array.entries_count == 3); try expect(array.entries[2][0] == 10); try expect(array.entries[2][1] == 1); } test "sequencing works properly" { const allocator = testing.allocator; var s = try GeneralSuite.init(allocator); defer s.deinit(); var array = try RleArray(u8).init(s.memory); defer array.deinit(); try expectError(error.IndexOutOfBounds, array.get(0)); try array.push(10); try expect(try array.get(0) == 10); try expectError(error.IndexOutOfBounds, array.get(1)); try array.push(10); try expect(try array.get(1) == 10); try expectError(error.IndexOutOfBounds, array.get(2)); for (2..20) |i| { try array.push(10); try expect(try array.get(i) == 10); try expectError(error.IndexOutOfBounds, array.get(i + 1)); } try array.push(20); try expect(try array.get(19) == 10); try expect(try array.get(20) == 20); try expectError(error.IndexOutOfBounds, array.get(21)); try array.push(10); try expect(try array.get(20) == 20); try expect(try array.get(21) == 10); try expectError(error.IndexOutOfBounds, array.get(22)); }
https://raw.githubusercontent.com/InfiniteRain/zig-lox/c97c367d5f849a102d7e0c838d740a7a2d5fe3ad/src/rle_array.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const vk = @import("vulkan"); const render = @import("../render.zig"); const Context = render.Context; const GpuBufferMemory = render.GpuBufferMemory; const Swapchain = render.swapchain.Data; const memory = render.memory; const shaders = @import("shaders"); const Vertex = extern struct { pos: [3]f32, uv: [2]f32, }; pub const vertex_size = vertices.len * @sizeOf(Vertex); pub const indices_size = indices.len * @sizeOf(u16); pub const vertices = [_]Vertex{ .{ .pos = .{ 1.0, 1.0, 0.0 }, .uv = .{ 1.0, 1.0 } }, .{ .pos = .{ -1.0, 1.0, 0.0 }, .uv = .{ 0.0, 1.0 } }, .{ .pos = .{ -1.0, -1.0, 0.0 }, .uv = .{ 0.0, 0.0 } }, .{ .pos = .{ 1.0, -1.0, 0.0 }, .uv = .{ 1.0, 0.0 } }, }; pub const indices = [_]u16{ 0, 1, 2, 2, 3, 0 }; pub const PushConstant = extern struct { samples: i32, distribution_bias: f32, pixel_multiplier: f32, inverse_hue_tolerance: f32, }; pub const Config = struct { samples: i32 = 20, // HIGHER = NICER = SLOWER distribution_bias: f32 = 0.6, // between 0. and 1. pixel_multiplier: f32 = 1.5, // between 1. and 3. (keep low) inverse_hue_tolerance: f32 = 20, // (2. - 30.) }; /// Pipeline to draw a single texture to screen const GraphicsPipeline = @This(); bytes_used_in_buffer: vk.DeviceSize, pipeline_cache: vk.PipelineCache, pipeline_layout: vk.PipelineLayout, pipeline: vk.Pipeline, descriptor_set_layout: vk.DescriptorSetLayout, descriptor_set: vk.DescriptorSet, descriptor_pool: vk.DescriptorPool, current_frame: usize, command_pools: []vk.CommandPool, command_buffers: []vk.CommandBuffer, framebuffers: []vk.Framebuffer, shader_constants: *PushConstant, // shader modules stored for cleanup shader_modules: [2]vk.ShaderModule, pub fn init( allocator: Allocator, ctx: Context, swapchain: Swapchain, render_pass: vk.RenderPass, draw_sampler: vk.Sampler, draw_image_view: vk.ImageView, vertex_index_buffer: *GpuBufferMemory, config: Config, ) !GraphicsPipeline { try vertex_index_buffer.transferToDevice(ctx, Vertex, 0, vertices[0..]); try vertex_index_buffer.transferToDevice(ctx, u16, vertex_size, indices[0..]); const bytes_used_in_buffer = memory.nonCoherentAtomSize(ctx, vertex_size * indices_size); if (bytes_used_in_buffer > vertex_index_buffer.size) { return error.OutOfDeviceMemory; } const descriptor_pool = blk: { const pool_sizes = [_]vk.DescriptorPoolSize{.{ .type = .combined_image_sampler, .descriptor_count = 1, // TODO: swap image size ? }}; const descriptor_pool_info = vk.DescriptorPoolCreateInfo{ .flags = .{}, .max_sets = @intCast(u32, swapchain.images.len), .pool_size_count = pool_sizes.len, .p_pool_sizes = &pool_sizes, }; break :blk try ctx.vkd.createDescriptorPool(ctx.logical_device, &descriptor_pool_info, null); }; errdefer ctx.vkd.destroyDescriptorPool(ctx.logical_device, descriptor_pool, null); const descriptor_set_layout = blk: { const set_layout_bindings = [_]vk.DescriptorSetLayoutBinding{.{ .binding = 0, .descriptor_type = .combined_image_sampler, .descriptor_count = 1, .stage_flags = .{ .fragment_bit = true, }, .p_immutable_samplers = null, }}; const set_layout_info = vk.DescriptorSetLayoutCreateInfo{ .flags = .{}, .binding_count = set_layout_bindings.len, .p_bindings = &set_layout_bindings, }; break :blk try ctx.vkd.createDescriptorSetLayout(ctx.logical_device, &set_layout_info, null); }; errdefer ctx.vkd.destroyDescriptorSetLayout(ctx.logical_device, descriptor_set_layout, null); const descriptor_set = blk: { const alloc_info = vk.DescriptorSetAllocateInfo{ .descriptor_pool = descriptor_pool, .descriptor_set_count = 1, .p_set_layouts = @ptrCast([*]const vk.DescriptorSetLayout, &descriptor_set_layout), }; var descriptor_set_tmp: vk.DescriptorSet = undefined; try ctx.vkd.allocateDescriptorSets( ctx.logical_device, &alloc_info, @ptrCast([*]vk.DescriptorSet, &descriptor_set_tmp), ); break :blk descriptor_set_tmp; }; errdefer ctx.vkd.freeDescriptorSets( ctx.logical_device, descriptor_pool, 1, @ptrCast([*]const vk.DescriptorSet, &descriptor_set), ) catch {}; { const descriptor_info = vk.DescriptorImageInfo{ .sampler = draw_sampler, .image_view = draw_image_view, .image_layout = .shader_read_only_optimal, }; const write_descriptor_sets = [_]vk.WriteDescriptorSet{.{ .dst_set = descriptor_set, .dst_binding = 0, .dst_array_element = 0, .descriptor_count = 1, .descriptor_type = .combined_image_sampler, .p_image_info = @ptrCast([*]const vk.DescriptorImageInfo, &descriptor_info), .p_buffer_info = undefined, .p_texel_buffer_view = undefined, }}; ctx.vkd.updateDescriptorSets( ctx.logical_device, write_descriptor_sets.len, @ptrCast([*]const vk.WriteDescriptorSet, &write_descriptor_sets), 0, undefined, ); } const pipeline_layout = blk: { const push_constant_range = vk.PushConstantRange{ .stage_flags = .{ .fragment_bit = true }, .offset = 0, .size = @sizeOf(PushConstant), }; const pipeline_layout_info = vk.PipelineLayoutCreateInfo{ .flags = .{}, .set_layout_count = 1, .p_set_layouts = @ptrCast([*]const vk.DescriptorSetLayout, &descriptor_set_layout), .push_constant_range_count = 1, .p_push_constant_ranges = @ptrCast([*]const vk.PushConstantRange, &push_constant_range), }; break :blk try ctx.vkd.createPipelineLayout(ctx.logical_device, &pipeline_layout_info, null); }; errdefer ctx.vkd.destroyPipelineLayout(ctx.logical_device, pipeline_layout, null); const input_assembly_state = vk.PipelineInputAssemblyStateCreateInfo{ .flags = .{}, .topology = .triangle_list, .primitive_restart_enable = vk.FALSE, }; const rasterization_state = vk.PipelineRasterizationStateCreateInfo{ .flags = .{}, .depth_clamp_enable = vk.FALSE, .rasterizer_discard_enable = vk.FALSE, .polygon_mode = .fill, .cull_mode = .{}, .front_face = .counter_clockwise, .depth_bias_enable = 0, .depth_bias_constant_factor = 0, .depth_bias_clamp = 0, .depth_bias_slope_factor = 0, .line_width = 1, }; const blend_attachment_state = vk.PipelineColorBlendAttachmentState{ .blend_enable = vk.TRUE, .src_color_blend_factor = .src_alpha, .dst_color_blend_factor = .one_minus_src_alpha, .color_blend_op = .add, .src_alpha_blend_factor = .one_minus_src_alpha, .dst_alpha_blend_factor = .zero, .alpha_blend_op = .add, .color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true, }, }; const color_blend_state = vk.PipelineColorBlendStateCreateInfo{ .flags = .{}, .logic_op_enable = vk.FALSE, .logic_op = .clear, .attachment_count = 1, .p_attachments = @ptrCast([*]const vk.PipelineColorBlendAttachmentState, &blend_attachment_state), .blend_constants = [4]f32{ 0, 0, 0, 0 }, }; // TODO: deviation from guide. Validate that still valid! const depth_stencil_state: ?*vk.PipelineDepthStencilStateCreateInfo = null; const viewport_state = vk.PipelineViewportStateCreateInfo{ .flags = .{}, .viewport_count = 1, .p_viewports = null, // viewport is created on draw .scissor_count = 1, .p_scissors = null, // scissor is created on draw }; const multisample_state = vk.PipelineMultisampleStateCreateInfo{ .flags = .{}, .rasterization_samples = .{ .@"1_bit" = true }, .sample_shading_enable = vk.FALSE, .min_sample_shading = 0, .p_sample_mask = null, .alpha_to_coverage_enable = vk.FALSE, .alpha_to_one_enable = vk.FALSE, }; const dynamic_state_enabled = [_]vk.DynamicState{ .viewport, .scissor, }; const dynamic_state = vk.PipelineDynamicStateCreateInfo{ .flags = .{}, .dynamic_state_count = dynamic_state_enabled.len, .p_dynamic_states = &dynamic_state_enabled, }; const vert = blk: { const create_info = vk.ShaderModuleCreateInfo{ .flags = .{}, .p_code = @ptrCast([*]const u32, &shaders.image_vert_spv), .code_size = shaders.image_vert_spv.len, }; const module = try ctx.vkd.createShaderModule(ctx.logical_device, &create_info, null); break :blk vk.PipelineShaderStageCreateInfo{ .flags = .{}, .stage = .{ .vertex_bit = true }, .module = module, .p_name = "main", .p_specialization_info = null, }; }; errdefer ctx.vkd.destroyShaderModule(ctx.logical_device, vert.module, null); const frag = blk: { const create_info = vk.ShaderModuleCreateInfo{ .flags = .{}, .p_code = @ptrCast([*]const u32, &shaders.image_frag_spv), .code_size = shaders.image_frag_spv.len, }; const module = try ctx.vkd.createShaderModule(ctx.logical_device, &create_info, null); break :blk vk.PipelineShaderStageCreateInfo{ .flags = .{}, .stage = .{ .fragment_bit = true }, .module = module, .p_name = "main", .p_specialization_info = null, }; }; errdefer ctx.vkd.destroyShaderModule(ctx.logical_device, frag.module, null); const shader_stages = [_]vk.PipelineShaderStageCreateInfo{ vert, frag }; const vertex_input_bindings = [_]vk.VertexInputBindingDescription{.{ .binding = 0, .stride = @sizeOf(Vertex), .input_rate = .vertex, }}; const vertex_input_attributes = [_]vk.VertexInputAttributeDescription{ .{ .location = 0, .binding = 0, .format = .r32g32_sfloat, .offset = @offsetOf(Vertex, "pos"), }, .{ .location = 1, .binding = 0, .format = .r32g32_sfloat, .offset = @offsetOf(Vertex, "uv"), } }; const vertex_input_state = vk.PipelineVertexInputStateCreateInfo{ .flags = .{}, .vertex_binding_description_count = vertex_input_bindings.len, .p_vertex_binding_descriptions = &vertex_input_bindings, .vertex_attribute_description_count = vertex_input_attributes.len, .p_vertex_attribute_descriptions = &vertex_input_attributes, }; const pipeline_cache = blk: { const pipeline_cache_info = vk.PipelineCacheCreateInfo{ .flags = .{}, .initial_data_size = 0, .p_initial_data = undefined, }; break :blk try ctx.vkd.createPipelineCache(ctx.logical_device, &pipeline_cache_info, null); }; errdefer ctx.vkd.destroyPipelineCache(ctx.logical_device, pipeline_cache, null); const pipeline_create_info = vk.GraphicsPipelineCreateInfo{ .flags = .{}, .stage_count = shader_stages.len, .p_stages = &shader_stages, .p_vertex_input_state = &vertex_input_state, .p_input_assembly_state = &input_assembly_state, .p_tessellation_state = null, .p_viewport_state = &viewport_state, .p_rasterization_state = &rasterization_state, .p_multisample_state = &multisample_state, .p_depth_stencil_state = depth_stencil_state, .p_color_blend_state = &color_blend_state, .p_dynamic_state = &dynamic_state, .layout = pipeline_layout, .render_pass = render_pass, .subpass = 0, .base_pipeline_handle = vk.Pipeline.null_handle, .base_pipeline_index = -1, }; var pipeline: vk.Pipeline = undefined; _ = try ctx.vkd.createGraphicsPipelines( ctx.logical_device, pipeline_cache, 1, @ptrCast([*]const vk.GraphicsPipelineCreateInfo, &pipeline_create_info), null, @ptrCast([*]vk.Pipeline, &pipeline), ); errdefer ctx.vkd.destroyPipeline(ctx.logical_device, pipeline, null); const pool_info = vk.CommandPoolCreateInfo{ .flags = .{ .transient_bit = true }, .queue_family_index = ctx.queue_indices.graphics, }; const command_pools = try allocator.alloc(vk.CommandPool, swapchain.images.len); errdefer allocator.free(command_pools); const command_buffers = try allocator.alloc(vk.CommandBuffer, swapchain.images.len); errdefer allocator.free(command_buffers); var initialized_pools: usize = 0; var initialized_buffers: usize = 0; for (command_pools, 0..) |*command_pool, i| { command_pool.* = try ctx.vkd.createCommandPool(ctx.logical_device, &pool_info, null); initialized_pools = i + 1; command_buffers[i] = try render.pipeline.createCmdBuffer(ctx, command_pool.*); initialized_buffers = i + 1; } errdefer { var i: usize = 0; while (i < initialized_buffers) : (i += 1) { ctx.vkd.freeCommandBuffers( ctx.logical_device, command_pools[i], 1, @ptrCast([*]const vk.CommandBuffer, &command_buffers[i]), ); } i = 0; while (i < initialized_pools) : (i += 1) { ctx.vkd.destroyCommandPool(ctx.logical_device, command_pools[i], null); } } const framebuffers = try render.pipeline.createFramebuffers(allocator, ctx, &swapchain, render_pass, null); errdefer { for (framebuffers) |buffer| { ctx.vkd.destroyFramebuffer(ctx.logical_device, buffer, null); } allocator.free(framebuffers); } const shader_constants = try allocator.create(PushConstant); shader_constants.* = .{ .samples = config.samples, .distribution_bias = config.distribution_bias, .pixel_multiplier = config.pixel_multiplier, .inverse_hue_tolerance = config.inverse_hue_tolerance, }; return GraphicsPipeline{ .bytes_used_in_buffer = bytes_used_in_buffer, .pipeline_cache = pipeline_cache, .pipeline_layout = pipeline_layout, .pipeline = pipeline, .descriptor_set_layout = descriptor_set_layout, .descriptor_set = descriptor_set, .descriptor_pool = descriptor_pool, .current_frame = 0, .command_pools = command_pools, .command_buffers = command_buffers, .framebuffers = framebuffers, .shader_modules = [2]vk.ShaderModule{ vert.module, frag.module }, .shader_constants = shader_constants, }; } pub fn deinit(self: GraphicsPipeline, allocator: Allocator, ctx: Context) void { for (self.framebuffers) |buffer| { ctx.vkd.destroyFramebuffer(ctx.logical_device, buffer, null); } allocator.free(self.framebuffers); for (self.command_buffers, 0..) |command_buffer, i| { ctx.vkd.freeCommandBuffers( ctx.logical_device, self.command_pools[i], 1, @ptrCast([*]const vk.CommandBuffer, &command_buffer), ); } for (self.command_pools) |command_pool| { ctx.vkd.destroyCommandPool(ctx.logical_device, command_pool, null); } allocator.free(self.command_pools); allocator.free(self.command_buffers); ctx.vkd.destroyPipeline(ctx.logical_device, self.pipeline, null); ctx.vkd.destroyPipelineCache(ctx.logical_device, self.pipeline_cache, null); ctx.vkd.destroyShaderModule(ctx.logical_device, self.shader_modules[0], null); ctx.vkd.destroyShaderModule(ctx.logical_device, self.shader_modules[1], null); ctx.vkd.destroyPipelineLayout(ctx.logical_device, self.pipeline_layout, null); ctx.vkd.destroyDescriptorSetLayout(ctx.logical_device, self.descriptor_set_layout, null); ctx.vkd.destroyDescriptorPool(ctx.logical_device, self.descriptor_pool, null); allocator.destroy(self.shader_constants); }
https://raw.githubusercontent.com/Avokadoen/zig_vulkan/d54b1c364d6f2f394343cdbbaf6dbd37699821ac/src/modules/voxel_rt/GraphicsPipeline.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const Hash = std.hash.Wyhash; const Limb = std.math.big.Limb; const Interner = @This(); map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, items: std.MultiArrayList(struct { tag: Tag, data: u32, }) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, limbs: std.ArrayListUnmanaged(Limb) = .{}, strings: std.ArrayListUnmanaged(u8) = .{}, const KeyAdapter = struct { interner: *const Interner, pub fn eql(adapter: KeyAdapter, a: Key, b_void: void, b_map_index: usize) bool { _ = b_void; return adapter.interner.get(@as(Ref, @enumFromInt(b_map_index))).eql(a); } pub fn hash(adapter: KeyAdapter, a: Key) u32 { _ = adapter; return a.hash(); } }; pub const Key = union(enum) { int_ty: u16, float_ty: u16, ptr_ty, noreturn_ty, void_ty, func_ty, array_ty: struct { len: u64, child: Ref, }, vector_ty: struct { len: u32, child: Ref, }, record_ty: []const Ref, /// May not be zero null, int: union(enum) { u64: u64, i64: i64, big_int: BigIntConst, pub fn toBigInt(repr: @This(), space: *Tag.Int.BigIntSpace) BigIntConst { return switch (repr) { .big_int => |x| x, inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), }; } }, float: Float, bytes: []const u8, pub const Float = union(enum) { f16: f16, f32: f32, f64: f64, f80: f80, f128: f128, }; pub fn hash(key: Key) u32 { var hasher = Hash.init(0); const tag = std.meta.activeTag(key); std.hash.autoHash(&hasher, tag); switch (key) { .bytes => |bytes| { hasher.update(bytes); }, .record_ty => |elems| for (elems) |elem| { std.hash.autoHash(&hasher, elem); }, .float => |repr| switch (repr) { inline else => |data| std.hash.autoHash( &hasher, @as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(data))), @bitCast(data)), ), }, .int => |repr| { var space: Tag.Int.BigIntSpace = undefined; const big = repr.toBigInt(&space); std.hash.autoHash(&hasher, big.positive); for (big.limbs) |limb| std.hash.autoHash(&hasher, limb); }, inline else => |info| { std.hash.autoHash(&hasher, info); }, } return @truncate(hasher.final()); } pub fn eql(a: Key, b: Key) bool { const KeyTag = std.meta.Tag(Key); const a_tag: KeyTag = a; const b_tag: KeyTag = b; if (a_tag != b_tag) return false; switch (a) { .record_ty => |a_elems| { const b_elems = b.record_ty; if (a_elems.len != b_elems.len) return false; for (a_elems, b_elems) |a_elem, b_elem| { if (a_elem != b_elem) return false; } return true; }, .bytes => |a_bytes| { const b_bytes = b.bytes; return std.mem.eql(u8, a_bytes, b_bytes); }, .int => |a_repr| { var a_space: Tag.Int.BigIntSpace = undefined; const a_big = a_repr.toBigInt(&a_space); var b_space: Tag.Int.BigIntSpace = undefined; const b_big = b.int.toBigInt(&b_space); return a_big.eql(b_big); }, inline else => |a_info, tag| { const b_info = @field(b, @tagName(tag)); return std.meta.eql(a_info, b_info); }, } } fn toRef(key: Key) ?Ref { switch (key) { .int_ty => |bits| switch (bits) { 1 => return .i1, 8 => return .i8, 16 => return .i16, 32 => return .i32, 64 => return .i64, 128 => return .i128, else => {}, }, .float_ty => |bits| switch (bits) { 16 => return .f16, 32 => return .f32, 64 => return .f64, 80 => return .f80, 128 => return .f128, else => unreachable, }, .ptr_ty => return .ptr, .func_ty => return .func, .noreturn_ty => return .noreturn, .void_ty => return .void, .int => |repr| { var space: Tag.Int.BigIntSpace = undefined; const big = repr.toBigInt(&space); if (big.eqlZero()) return .zero; const big_one = BigIntConst{ .limbs = &.{1}, .positive = true }; if (big.eql(big_one)) return .one; }, .float => |repr| switch (repr) { inline else => |data| { if (std.math.isPositiveZero(data)) return .zero; if (data == 1) return .one; }, }, .null => return .null, else => {}, } return null; } }; pub const Ref = enum(u32) { const max = std.math.maxInt(u32); ptr = max - 1, noreturn = max - 2, void = max - 3, i1 = max - 4, i8 = max - 5, i16 = max - 6, i32 = max - 7, i64 = max - 8, i128 = max - 9, f16 = max - 10, f32 = max - 11, f64 = max - 12, f80 = max - 13, f128 = max - 14, func = max - 15, zero = max - 16, one = max - 17, null = max - 18, _, }; pub const OptRef = enum(u32) { const max = std.math.maxInt(u32); none = max - 0, ptr = max - 1, noreturn = max - 2, void = max - 3, i1 = max - 4, i8 = max - 5, i16 = max - 6, i32 = max - 7, i64 = max - 8, i128 = max - 9, f16 = max - 10, f32 = max - 11, f64 = max - 12, f80 = max - 13, f128 = max - 14, func = max - 15, zero = max - 16, one = max - 17, null = max - 18, _, }; pub const Tag = enum(u8) { /// `data` is `u16` int_ty, /// `data` is `u16` float_ty, /// `data` is index to `Array` array_ty, /// `data` is index to `Vector` vector_ty, /// `data` is `u32` u32, /// `data` is `i32` i32, /// `data` is `Int` int_positive, /// `data` is `Int` int_negative, /// `data` is `f16` f16, /// `data` is `f32` f32, /// `data` is `F64` f64, /// `data` is `F80` f80, /// `data` is `F128` f128, /// `data` is `Bytes` bytes, /// `data` is `Record` record_ty, pub const Array = struct { len0: u32, len1: u32, child: Ref, pub fn getLen(a: Array) u64 { return (PackedU64{ .a = a.len0, .b = a.len1, }).get(); } }; pub const Vector = struct { len: u32, child: Ref, }; pub const Int = struct { limbs_index: u32, limbs_len: u32, /// Big enough to fit any non-BigInt value pub const BigIntSpace = struct { /// The +1 is headroom so that operations such as incrementing once /// or decrementing once are possible without using an allocator. limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb, }; }; pub const F64 = struct { piece0: u32, piece1: u32, pub fn get(self: F64) f64 { const int_bits = @as(u64, self.piece0) | (@as(u64, self.piece1) << 32); return @bitCast(int_bits); } fn pack(val: f64) F64 { const bits = @as(u64, @bitCast(val)); return .{ .piece0 = @as(u32, @truncate(bits)), .piece1 = @as(u32, @truncate(bits >> 32)), }; } }; pub const F80 = struct { piece0: u32, piece1: u32, piece2: u32, // u16 part, top bits pub fn get(self: F80) f80 { const int_bits = @as(u80, self.piece0) | (@as(u80, self.piece1) << 32) | (@as(u80, self.piece2) << 64); return @bitCast(int_bits); } fn pack(val: f80) F80 { const bits = @as(u80, @bitCast(val)); return .{ .piece0 = @as(u32, @truncate(bits)), .piece1 = @as(u32, @truncate(bits >> 32)), .piece2 = @as(u16, @truncate(bits >> 64)), }; } }; pub const F128 = struct { piece0: u32, piece1: u32, piece2: u32, piece3: u32, pub fn get(self: F128) f128 { const int_bits = @as(u128, self.piece0) | (@as(u128, self.piece1) << 32) | (@as(u128, self.piece2) << 64) | (@as(u128, self.piece3) << 96); return @bitCast(int_bits); } fn pack(val: f128) F128 { const bits = @as(u128, @bitCast(val)); return .{ .piece0 = @as(u32, @truncate(bits)), .piece1 = @as(u32, @truncate(bits >> 32)), .piece2 = @as(u32, @truncate(bits >> 64)), .piece3 = @as(u32, @truncate(bits >> 96)), }; } }; pub const Bytes = struct { strings_index: u32, len: u32, }; pub const Record = struct { elements_len: u32, // trailing // [elements_len]Ref }; }; pub const PackedU64 = packed struct(u64) { a: u32, b: u32, pub fn get(x: PackedU64) u64 { return @bitCast(x); } pub fn init(x: u64) PackedU64 { return @bitCast(x); } }; pub fn deinit(i: *Interner, gpa: Allocator) void { i.map.deinit(gpa); i.items.deinit(gpa); i.extra.deinit(gpa); i.limbs.deinit(gpa); i.strings.deinit(gpa); } pub fn put(i: *Interner, gpa: Allocator, key: Key) !Ref { if (key.toRef()) |some| return some; const adapter: KeyAdapter = .{ .interner = i }; const gop = try i.map.getOrPutAdapted(gpa, key, adapter); if (gop.found_existing) return @enumFromInt(gop.index); try i.items.ensureUnusedCapacity(gpa, 1); switch (key) { .int_ty => |bits| { i.items.appendAssumeCapacity(.{ .tag = .int_ty, .data = bits, }); }, .float_ty => |bits| { i.items.appendAssumeCapacity(.{ .tag = .float_ty, .data = bits, }); }, .array_ty => |info| { const split_len = PackedU64.init(info.len); i.items.appendAssumeCapacity(.{ .tag = .array_ty, .data = try i.addExtra(gpa, Tag.Array{ .len0 = split_len.a, .len1 = split_len.b, .child = info.child, }), }); }, .vector_ty => |info| { i.items.appendAssumeCapacity(.{ .tag = .vector_ty, .data = try i.addExtra(gpa, Tag.Vector{ .len = info.len, .child = info.child, }), }); }, .int => |repr| int: { var space: Tag.Int.BigIntSpace = undefined; const big = repr.toBigInt(&space); switch (repr) { .u64 => |data| if (std.math.cast(u32, data)) |small| { i.items.appendAssumeCapacity(.{ .tag = .u32, .data = small, }); break :int; }, .i64 => |data| if (std.math.cast(i32, data)) |small| { i.items.appendAssumeCapacity(.{ .tag = .i32, .data = @bitCast(small), }); break :int; }, .big_int => |data| { if (data.fitsInTwosComp(.unsigned, 32)) { i.items.appendAssumeCapacity(.{ .tag = .u32, .data = data.to(u32) catch unreachable, }); break :int; } else if (data.fitsInTwosComp(.signed, 32)) { i.items.appendAssumeCapacity(.{ .tag = .i32, .data = @bitCast(data.to(i32) catch unreachable), }); break :int; } }, } const limbs_index: u32 = @intCast(i.limbs.items.len); try i.limbs.appendSlice(gpa, big.limbs); i.items.appendAssumeCapacity(.{ .tag = if (big.positive) .int_positive else .int_negative, .data = try i.addExtra(gpa, Tag.Int{ .limbs_index = limbs_index, .limbs_len = @intCast(big.limbs.len), }), }); }, .float => |repr| switch (repr) { .f16 => |data| i.items.appendAssumeCapacity(.{ .tag = .f16, .data = @as(u16, @bitCast(data)), }), .f32 => |data| i.items.appendAssumeCapacity(.{ .tag = .f32, .data = @as(u32, @bitCast(data)), }), .f64 => |data| i.items.appendAssumeCapacity(.{ .tag = .f64, .data = try i.addExtra(gpa, Tag.F64.pack(data)), }), .f80 => |data| i.items.appendAssumeCapacity(.{ .tag = .f80, .data = try i.addExtra(gpa, Tag.F80.pack(data)), }), .f128 => |data| i.items.appendAssumeCapacity(.{ .tag = .f128, .data = try i.addExtra(gpa, Tag.F128.pack(data)), }), }, .bytes => |bytes| { const strings_index: u32 = @intCast(i.strings.items.len); try i.strings.appendSlice(gpa, bytes); i.items.appendAssumeCapacity(.{ .tag = .bytes, .data = try i.addExtra(gpa, Tag.Bytes{ .strings_index = strings_index, .len = @intCast(bytes.len), }), }); }, .record_ty => |elems| { try i.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.Record).Struct.fields.len + elems.len); i.items.appendAssumeCapacity(.{ .tag = .record_ty, .data = i.addExtraAssumeCapacity(Tag.Record{ .elements_len = @intCast(elems.len), }), }); i.extra.appendSliceAssumeCapacity(@ptrCast(elems)); }, .ptr_ty, .noreturn_ty, .void_ty, .func_ty, .null, => unreachable, } return @enumFromInt(gop.index); } fn addExtra(i: *Interner, gpa: Allocator, extra: anytype) Allocator.Error!u32 { const fields = @typeInfo(@TypeOf(extra)).Struct.fields; try i.extra.ensureUnusedCapacity(gpa, fields.len); return i.addExtraAssumeCapacity(extra); } fn addExtraAssumeCapacity(i: *Interner, extra: anytype) u32 { const result = @as(u32, @intCast(i.extra.items.len)); inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { i.extra.appendAssumeCapacity(switch (field.type) { Ref => @intFromEnum(@field(extra, field.name)), u32 => @field(extra, field.name), else => @compileError("bad field type: " ++ @typeName(field.type)), }); } return result; } pub fn get(i: *const Interner, ref: Ref) Key { switch (ref) { .ptr => return .ptr_ty, .func => return .func_ty, .noreturn => return .noreturn_ty, .void => return .void_ty, .i1 => return .{ .int_ty = 1 }, .i8 => return .{ .int_ty = 8 }, .i16 => return .{ .int_ty = 16 }, .i32 => return .{ .int_ty = 32 }, .i64 => return .{ .int_ty = 64 }, .i128 => return .{ .int_ty = 128 }, .f16 => return .{ .float_ty = 16 }, .f32 => return .{ .float_ty = 32 }, .f64 => return .{ .float_ty = 64 }, .f80 => return .{ .float_ty = 80 }, .f128 => return .{ .float_ty = 128 }, .zero => return .{ .int = .{ .u64 = 0 } }, .one => return .{ .int = .{ .u64 = 1 } }, .null => return .null, else => {}, } const item = i.items.get(@intFromEnum(ref)); const data = item.data; return switch (item.tag) { .int_ty => .{ .int_ty = @intCast(data) }, .float_ty => .{ .float_ty = @intCast(data) }, .array_ty => { const array_ty = i.extraData(Tag.Array, data); return .{ .array_ty = .{ .len = array_ty.getLen(), .child = array_ty.child, } }; }, .vector_ty => { const vector_ty = i.extraData(Tag.Vector, data); return .{ .vector_ty = .{ .len = vector_ty.len, .child = vector_ty.child, } }; }, .u32 => .{ .int = .{ .u64 = data } }, .i32 => .{ .int = .{ .i64 = @as(i32, @bitCast(data)) } }, .int_positive, .int_negative => { const int_info = i.extraData(Tag.Int, data); const limbs = i.limbs.items[int_info.limbs_index..][0..int_info.limbs_len]; return .{ .int = .{ .big_int = .{ .positive = item.tag == .int_positive, .limbs = limbs, }, } }; }, .f16 => .{ .float = .{ .f16 = @bitCast(@as(u16, @intCast(data))) } }, .f32 => .{ .float = .{ .f32 = @bitCast(data) } }, .f64 => { const float = i.extraData(Tag.F64, data); return .{ .float = .{ .f64 = float.get() } }; }, .f80 => { const float = i.extraData(Tag.F80, data); return .{ .float = .{ .f80 = float.get() } }; }, .f128 => { const float = i.extraData(Tag.F128, data); return .{ .float = .{ .f128 = float.get() } }; }, .bytes => { const bytes = i.extraData(Tag.Bytes, data); return .{ .bytes = i.strings.items[bytes.strings_index..][0..bytes.len] }; }, .record_ty => { const extra = i.extraDataTrail(Tag.Record, data); return .{ .record_ty = @ptrCast(i.extra.items[extra.end..][0..extra.data.elements_len]), }; }, }; } fn extraData(i: *const Interner, comptime T: type, index: usize) T { return i.extraDataTrail(T, index).data; } fn extraDataTrail(i: *const Interner, comptime T: type, index: usize) struct { data: T, end: u32 } { var result: T = undefined; const fields = @typeInfo(T).Struct.fields; inline for (fields, 0..) |field, field_i| { const int32 = i.extra.items[field_i + index]; @field(result, field.name) = switch (field.type) { Ref => @enumFromInt(int32), u32 => int32, else => @compileError("bad field type: " ++ @typeName(field.type)), }; } return .{ .data = result, .end = @intCast(index + fields.len), }; }
https://raw.githubusercontent.com/ziglang/zig/d9bd34fd0533295044ffb4160da41f7873aff905/lib/compiler/aro/backend/Interner.zig
const win32 = @import("win32.zig"); pub var token: win32.ULONG = undefined; pub const GpError = error{ Ok, GenericError, InvalidParameter, OutOfMemory, ObjectBusy, InsufficientBuffer, NotImplemented, Win32Error, WrongState, Aborted, FileNotFound, ValueOverflow, AccessDenied, UnknownImageFormat, FontFamilyNotFound, FontStyleNotFound, NotTrueTypeFont, UnsupportedGdiplusVersion, GdiplusNotInitialized, PropertyNotFound, PropertyNotSupported, ProfileNotFound, }; pub fn gdipWrap(status: win32.GpStatus) GpError!void { if (status != .Ok) { // TODO: return error type @panic("TODO: correctly handle GDI+ errors"); } } pub const Graphics = struct { peer: win32.GpGraphics, pub fn createFromHdc(hdc: win32.HDC) GpError!Graphics { var peer: win32.GpGraphics = undefined; try gdipWrap(win32.GdipCreateFromHDC(hdc, &peer)); return Graphics{ .peer = peer }; } };
https://raw.githubusercontent.com/SaicharanKandukuri/capy-299f994/512c88540a160cea2e824c363e7261679b558add/src/backends/win32/gdip.zig
const std = @import("std"); const initial = @import("initial"); const preprocessor = @import("preprocessor"); pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.detectLeaks(); const a = gpa.allocator(); var args = std.process.args(); _ = args.next(); const filename = args.next() orelse return error.InvalidInput; var initial_arena = std.heap.ArenaAllocator.init(a); const raw = try initial.readIn(initial_arena.allocator(), filename); var lines = try initial.breakLines(initial_arena.allocator(), raw); try initial.mergeEscapedNewlines(&lines); try initial.delComments(&lines); const input = try initial.unlines(a, lines); initial_arena.deinit(); defer a.free(input); const tokens = try preprocessor.lex(a, input); defer tokens.deinit(); for (tokens.items) |tok| { std.debug.print("{}\n", .{tok.kind}); } }
https://raw.githubusercontent.com/Phytolizer/zparser/f7e0d15371a20a80949642ee0bd0bbf15a603ea5/src/main.zig
const std = @import("std"); pub const Vpk = struct { arena: std.heap.ArenaAllocator, dir_tree: DirTree1, // indexed by file extensions const DirTree1 = std.StringArrayHashMapUnmanaged(DirTree2); // indexed by directory path const DirTree2 = std.StringArrayHashMapUnmanaged(DirTree3); // indexed by filename const DirTree3 = std.StringArrayHashMapUnmanaged(DirEntry); const DirEntry = struct { crc: u32, data: []u8, }; const ArchiveWriteInfo = struct { files: []Entry, const Entry = struct { skip_preload: u16, entry: *DirEntry, }; }; const max_archive_size = 256 * 1024 * 1024; // 256 MiB pub fn init(allocator: std.mem.Allocator) Vpk { return Vpk{ .arena = std.heap.ArenaAllocator.init(allocator), .dir_tree = .{}, }; } pub fn deinit(self: *Vpk) void { self.arena.deinit(); self.* = undefined; } pub fn addFile(self: *Vpk, path: []const u8, data: []const u8) !void { if (path.len == 0) return error.BadPath; if (path[0] == '/' or path[path.len - 1] == '/') return error.BadPath; if (path[0] == '\\' or path[path.len - 1] == '\\') return error.BadPath; const path1 = try self.arena.allocator().alloc(u8, path.len); _ = std.mem.replace(u8, path, "\\", "/", path1); var dir_str: []const u8 = undefined; var filename: []const u8 = undefined; if (std.mem.lastIndexOfScalar(u8, path1, '/')) |idx| { dir_str = path1[0..idx]; filename = path1[idx + 1 ..]; } else { dir_str = ""; filename = path1; } var file_str: []const u8 = undefined; var ext_str: []const u8 = undefined; // filenames like 'foo.' should include the dot in the name part if (std.mem.indexOfScalar(u8, filename[0 .. filename.len - 1], '.')) |first_idx| { // replicate a bug in valve's code: middle extension parts are lost. // e.g. 'model.dx90.vtx' gets split into 'model' and 'vtx' const last_idx = first_idx + std.mem.lastIndexOfScalar(u8, filename[first_idx .. filename.len - 1], '.').?; file_str = filename[0..first_idx]; ext_str = filename[last_idx + 1 ..]; } else { file_str = filename; ext_str = ""; } const ext = try self.dir_tree.getOrPut(self.arena.allocator(), ext_str); if (!ext.found_existing) ext.value_ptr.* = .{}; const dir = try ext.value_ptr.getOrPut(self.arena.allocator(), dir_str); if (!dir.found_existing) dir.value_ptr.* = .{}; const file = try dir.value_ptr.getOrPut(self.arena.allocator(), file_str); if (file.found_existing) { return error.FileInArchive; } else { const data1 = try self.arena.allocator().dupe(u8, data); file.value_ptr.* = .{ .crc = std.hash.Crc32.hash(data1), .data = data1, }; } } pub fn write(self: *Vpk, dir: std.fs.Dir, comptime name: []const u8) !void { var dir_file = try dir.createFile(name ++ "_dir.vpk", .{}); defer dir_file.close(); const archives = try self.writeDir(dir_file); if (archives.len > 0) { try writeArchive(dir_file.writer(), archives[0]); } for (archives[1..]) |archive, i| { var name_buf: [name.len + 8]u8 = undefined; std.io.fixedBufferStream(&name_buf).writer().print("{s}_{d:0>3}.vpk", .{ name, i }) catch unreachable; var f = try dir.createFile(&name_buf, .{}); defer f.close(); try writeArchive(f.writer(), archive); } } fn writeDir(self: *Vpk, f: std.fs.File) ![]ArchiveWriteInfo { // Header try f.writer().writeIntLittle(u32, 0x55aa1234); // Signature try f.writer().writeIntLittle(u32, 1); // Version // Write placeholder size to be filled in later const dir_size_pos = try f.getPos(); try f.writer().writeIntLittle(u32, 0); // Write dir tree, tracking its size var counting = std.io.countingWriter(f.writer()); const archives = try self.writeDirTree(counting.writer()); // Overwrite dir tree size try f.seekTo(dir_size_pos); try f.writer().writeIntLittle(u32, @intCast(u32, counting.bytes_written)); // Skip back past dir tree try f.seekBy(@intCast(i64, counting.bytes_written)); return archives; } fn writeArchive(w: anytype, archive: ArchiveWriteInfo) !void { var bw = std.io.bufferedWriter(w); for (archive.files) |file| { try bw.writer().writeAll(file.entry.data[file.skip_preload..]); } try bw.flush(); } // Write the directory tree and returns information on the position of // every file in the archive fn writeDirTree(self: *Vpk, w: anytype) ![]ArchiveWriteInfo { // archive number 0 = dir var archives = std.ArrayList(ArchiveWriteInfo).init(self.arena.allocator()); defer archives.deinit(); var cur_archive = std.ArrayList(ArchiveWriteInfo.Entry).init(self.arena.allocator()); defer archives.deinit(); var cur_archive_off: u32 = 0; var it1 = self.dir_tree.iterator(); while (it1.next()) |extension| { try writeName(w, extension.key_ptr.*); var it2 = extension.value_ptr.iterator(); while (it2.next()) |directory| { try writeName(w, directory.key_ptr.*); var it3 = directory.value_ptr.iterator(); while (it3.next()) |file| { try writeName(w, file.key_ptr.*); const size = @intCast(u32, file.value_ptr.data.len); if (size > max_archive_size) { return error.FileTooLarge; } // Go to the next archive if necessary if (cur_archive_off + size > max_archive_size) { try archives.append(.{ .files = cur_archive.toOwnedSlice(), }); cur_archive_off = 0; } const archive_idx: u16 = if (archives.items.len == 0) 0x7FFF else @intCast(u16, archives.items.len - 1); // Write the entry data try w.writeIntLittle(u32, file.value_ptr.crc); try w.writeIntLittle(u16, 0); // PreloadBytes try w.writeIntLittle(u16, archive_idx); // ArchiveIndex try w.writeIntLittle(u32, cur_archive_off); // EntryOffset try w.writeIntLittle(u32, size); // EntryLength try w.writeIntLittle(u16, 0xFFFF); // Terminator // Append the record to the archive try cur_archive.append(.{ .skip_preload = 0, .entry = file.value_ptr, }); cur_archive_off += size; } try writeString(w, ""); // terminator } try writeString(w, ""); // terminator } try writeString(w, ""); // terminator if (cur_archive.items.len > 0) { // Flush final archive try archives.append(.{ .files = cur_archive.toOwnedSlice(), }); } return archives.toOwnedSlice(); } fn writeName(w: anytype, str: []const u8) !void { if (std.mem.eql(u8, str, "")) { try writeString(w, " "); // empty names represented by a single space } else { try writeString(w, str); } } fn writeString(w: anytype, str: []const u8) !void { try w.writeAll(str); try w.writeByte(0); } };
https://raw.githubusercontent.com/mlugg/zvpk/389105e2fcd7ac82eb29867932a44c9fb37a0026/src/vpk.zig
const std = @import("std"); const builtin = @import("builtin"); const zlox = struct { usingnamespace @import("chunk.zig"); usingnamespace @import("value.zig"); }; const Chunk = zlox.Chunk; const OpCode = zlox.OpCode; const Value = zlox.Value; const Object = zlox.Object; const Function = zlox.Function; pub const log = std.log.scoped(.zlox); pub const Verbosity = enum(u8) { Silent, Normal, Verbose, }; /// Enable/Disable extra verbosity in output pub const verbosity: Verbosity = if (builtin.mode == .Debug) .Normal else .Silent; /// Print the disassembled instruction to the console /// Disabled in non-Debug builds pub fn disassembleChunk(chunk: *const Chunk, name: []const u8) void { if (verbosity != .Silent) { std.debug.print("== {s} ==\n", .{name}); std.debug.print("{s:<4} {s:<4} {s:<16} Data\n", .{ "ip", "Line", "OpCode" }); var offset: usize = 0; while (offset < chunk.code.items.len) { offset = disassembleInstruction(chunk, offset); } } } pub fn disassembleInstruction(chunk: *const Chunk, offset: usize) usize { std.debug.print("{d:0>4} ", .{offset}); if (offset > 0 and chunk.lines.items[offset] == chunk.lines.items[offset - 1]) { std.debug.print(" | ", .{}); } else if (offset < chunk.lines.items.len) { std.debug.print("{d:>4} ", .{chunk.lines.items[offset]}); } std.debug.assert(chunk.code.items.len > 0); const op: OpCode = chunk.code.items[offset]; switch (op) { .OP_CONSTANT => return constantInstruction(op, chunk, offset), .OP_DEFINE_GLOBAL => return constantInstruction(op, chunk, offset), .OP_GET_GLOBAL, .OP_SET_GLOBAL => return constantInstruction(op, chunk, offset), .OP_GET_LOCAL, .OP_SET_LOCAL => return byteInstruction(op, chunk, offset), .OP_GET_UPVALUE, .OP_SET_UPVALUE => return byteInstruction(op, chunk, offset), .OP_JUMP_IF_FALSE, .OP_JUMP => return jumpInstruction(op, 1, chunk, offset), .OP_LOOP => return jumpInstruction(op, -1, chunk, offset), .OP_CALL => return byteInstruction(op, chunk, offset), .OP_CLOSURE => return closureInstruction(op, chunk, offset), else => return simpleInstruction(op, offset), } } fn constantInstruction(op: OpCode, chunk: *const Chunk, offset: usize) usize { const constant: u8 = chunk.code.items[offset + 1].byte(); const value: Value = chunk.constants.items[constant]; std.debug.print("{s:<16} {d:>4} ", .{ @tagName(op), constant }); printValue(value); std.debug.print("\n", .{}); return offset + 2; } fn closureInstruction(op: OpCode, chunk: *const Chunk, in_offset: usize) usize { var offset: usize = in_offset + 1; const constant: u8 = chunk.code.items[offset].byte(); const value: Value = chunk.constants.items[constant]; offset += 1; // Print the function name std.debug.print("{s:<16} {d:>4} ", .{ @tagName(op), constant }); printValue(value); std.debug.print("\n", .{}); // Print the closure values std.debug.assert(value == .object); std.debug.assert((value.object.* == .function)); const function: *Function = &value.object.function; var i: u8 = 0; while (i < function.upvalueCount) : (i += 1) { const isLocal: bool = (chunk.code.items[offset].byte() == 1); const kind: []const u8 = if (isLocal) "local" else "upvalue"; const idx: u8 = chunk.code.items[offset + 1].byte(); std.debug.print("{d:0>4} | {s:<16} ", .{ offset, " " }); std.debug.print("{s} {d}\n", .{ kind, idx }); offset += 2; } return offset; } fn byteInstruction(op: OpCode, chunk: *const Chunk, offset: usize) usize { const slot: u8 = chunk.code.items[offset + 1].byte(); std.debug.print("{s:<16} {d:>4}\n", .{ @tagName(op), slot }); return offset + 2; } fn jumpInstruction(op: OpCode, sign: i8, chunk: *const Chunk, offset: usize) usize { const jump: u16 = (@as(u16, chunk.code.items[offset + 1].byte()) << 8) | chunk.code.items[offset + 2].byte(); var dest: i64 = @intCast(offset + 3); dest += @as(i64, sign) * jump; std.debug.print("{s:<16} {d:>4} -> {d:<4}\n", .{ @tagName(op), offset, dest }); return offset + 3; } fn simpleInstruction(op: OpCode, offset: usize) usize { std.debug.print("{s:<16}\n", .{@tagName(op)}); return offset + 1; } pub fn printObject(obj: *Object) void { if (verbosity == .Verbose) { // More verbose type output for debugging switch (obj.*) { .string => |s| std.debug.print("Obj.String: '{s}'", .{s}), .upvalue => |_| std.debug.print("upvalue", .{}), .function => |f| { if (f.name) |str_obj| { if (str_obj.string.len > 0) { std.debug.print("Obj.Function: '{s}'", .{str_obj.string}); } else { std.debug.print("Obj.Function: '<invalid>'", .{}); } } else { std.debug.print("Obj.Function: '<script>", .{}); } }, .closure => |c| { printObject(c.obj); // the enclosed function object }, .native => std.debug.print("<native fn>", .{}), } } else { // Normal output for normal usage switch (obj.*) { .string => |s| std.debug.print("'{s}'", .{s}), .upvalue => |_| std.debug.print("upvalue", .{}), .function => |f| { if (f.name) |str_obj| { std.debug.print("<fn {s}>", .{str_obj.string}); } else { std.debug.print("<script>", .{}); } }, .closure => |f| { if (f.obj.function.name) |str_obj| { std.debug.print("<fn {s}>", .{str_obj.string}); } else { std.debug.print("<script>", .{}); } }, .native => std.debug.print("<native fn>", .{}), } } } pub fn printValue(value: Value) void { switch (value) { .number => |num| std.debug.print("'{d:.3}'", .{num}), .bool => |b| std.debug.print("'{}'", .{b}), .none => std.debug.print("'none'", .{}), .object => |obj| printObject(obj), } }
https://raw.githubusercontent.com/JacobCrabill/zLox/113adf5ee5dc626162a1d094c8bdbdabbe5a1b2d/src/debug.zig
// Have a good evening, and thank you for choosing the Curly Bracket Format const std = @import("std"); const meta = std.meta; const mem = std.mem; const unicode = std.unicode; const fmt = std.fmt; const math = std.math; const assert = std.debug.assert; const testing = std.testing; const LinkedList = @import("list.zig").LinkedList; const StackBuffer = @import("buffer.zig").StackBuffer; const KVList = std.ArrayList(KV); const StringBuffer = StackBuffer(u8, 2048); const Value = union(enum) { True, False, None, String: StringBuffer, Char: u21, List: KVList, Usize: usize, Isize: isize, }; const Key = union(enum) { Numeric: usize, String: []const u8, }; const KV = struct { key: Key, value: Value, }; fn _lastNumericKey(list: *KVList) ?usize { var last: ?usize = null; for (list.items) |*node| switch (node.key) { .Numeric => |n| last = n, else => {}, }; return last; } test "_lastNumericKey()" { var gpa = GPA{}; var list = KVList.init(gpa.allocator()); try testing.expectEqual(_lastNumericKey(&list), null); try list.append(KV{ .key = Key{ .String = "hai" }, .value = .None }); try testing.expectEqual(_lastNumericKey(&list), null); try list.append(KV{ .key = Key{ .Numeric = 0 }, .value = .None }); try testing.expectEqual(_lastNumericKey(&list), 0); try list.append(KV{ .key = Key{ .String = "abcd" }, .value = .None }); try testing.expectEqual(_lastNumericKey(&list), 0); try list.append(KV{ .key = Key{ .String = "foobarbaz" }, .value = .None }); try testing.expectEqual(_lastNumericKey(&list), 0); try list.append(KV{ .key = Key{ .Numeric = 3 }, .value = .None }); try testing.expectEqual(_lastNumericKey(&list), 3); try list.append(KV{ .key = Key{ .String = "bazbarfoo" }, .value = .None }); try testing.expectEqual(_lastNumericKey(&list), 3); try list.append(KV{ .key = Key{ .Numeric = 6 }, .value = .None }); try testing.expectEqual(_lastNumericKey(&list), 6); try list.append(KV{ .key = Key{ .Numeric = 7 }, .value = .None }); try testing.expectEqual(_lastNumericKey(&list), 7); list.deinit(); try testing.expect(!gpa.deinit()); } pub const Parser = struct { input: []const u8, index: usize = 0, stack: usize = 0, const Self = @This(); const StringParserError = error{ UnterminatedString, StringTooLong, InvalidEscape, TooManyCodepointsInChar, InvalidUnicode, }; const ParserError = error{ NoMatchingParen, UnknownToken, OutOfMemory, UnexpectedClosingParen, InvalidKeyChar, NoMatchingBrace, UnexpectedKey, } || StringParserError || std.fmt.ParseIntError; pub fn deinit(data: *KVList) void { for (data.items) |*node| switch (node.value) { .List => |*l| deinit(l), else => {}, }; data.deinit(); } fn parseKey(self: *Self) ParserError![]const u8 { assert(self.input[self.index] == '['); self.index += 1; const oldi = self.index; while (self.index < self.input.len) : (self.index += 1) { switch (self.input[self.index]) { ']' => return self.input[oldi..self.index], 'a'...'z', '_', '0'...'9', 'A'...'Z' => {}, else => return error.InvalidKeyChar, } } return error.NoMatchingBrace; } fn parseValue(self: *Self) ParserError!Value { const oldi = self.index; while (self.index < self.input.len) : (self.index += 1) { switch (self.input[self.index]) { 0x09, 0x0a...0x0d, 0x20, '(', ')', '[', ']' => break, else => {}, } } const word = self.input[oldi..self.index]; assert(word.len > 0); // parse() expects index to point to last non-word char, so move index // back self.index -= 1; if (mem.eql(u8, word, "yea")) { return .True; } else if (mem.eql(u8, word, "nah")) { return .False; } else if (mem.eql(u8, word, "nil")) { return .None; } else if (word[0] >= '0' and word[0] <= '9') { // TODO: u8, u16, u21, u32, u64, u128, and signed variants. // (add them as needed...) // TODO: multibase (0x, 0o, 0b) if (mem.endsWith(u8, word, "z")) { const num = try std.fmt.parseInt(usize, word[0 .. word.len - 1], 10); return Value{ .Usize = num }; } else if (mem.endsWith(u8, word, "i")) { const num = try std.fmt.parseInt(isize, word[0 .. word.len - 1], 10); return Value{ .Isize = num }; } else { return Value{ .String = StringBuffer.init(word) }; } } else { return Value{ .String = StringBuffer.init(word) }; } } fn parseString(self: *Self, mode: u8) StringParserError!Value { assert(self.input[self.index] == mode); self.index += 1; var buf = StringBuffer.init(""); while (self.index < self.input.len) : (self.index += 1) { switch (self.input[self.index]) { '\'' => { if ((unicode.utf8CountCodepoints(buf.constSlice()) catch return error.InvalidUnicode) > 1) return error.TooManyCodepointsInChar; return Value{ .Char = unicode.utf8Decode(buf.constSlice()) catch return error.InvalidUnicode }; }, '"' => { return Value{ .String = buf }; }, '\\' => { self.index += 1; const esc: u8 = switch (self.input[self.index]) { '"' => '"', '\'' => '\'', '\\' => '\\', 'n' => '\n', 'r' => '\r', 'a' => 0x07, '0' => 0x00, 't' => '\t', else => return error.InvalidEscape, }; buf.append(esc) catch return error.StringTooLong; }, else => buf.append(self.input[self.index]) catch return error.StringTooLong, } } return error.UnterminatedString; } pub fn parse(self: *Self, alloc: mem.Allocator) ParserError!KVList { self.stack += 1; if (self.stack > 1) { assert(self.input[self.index] == '('); self.index += 1; } var list = KVList.init(alloc); var next_key: ?[]const u8 = null; while (self.index < self.input.len) : (self.index += 1) { const v: ?Value = switch (self.input[self.index]) { '(' => Value{ .List = try self.parse(alloc) }, ')' => { if (self.stack <= 1) { return error.UnexpectedClosingParen; } self.stack -= 1; return list; }, '[' => c: { if (next_key) |_| { return error.UnexpectedKey; } next_key = try self.parseKey(); break :c null; }, ']', 0x09, 0x0a...0x0d, 0x20 => continue, '"' => try self.parseString('"'), '\'' => try self.parseString('\''), else => try self.parseValue(), }; if (v) |value| { var key: Key = undefined; if (next_key) |nk| { key = Key{ .String = nk }; next_key = null; } else if (_lastNumericKey(&list)) |ln| { key = Key{ .Numeric = ln + 1 }; } else { key = Key{ .Numeric = 0 }; } list.append(KV{ .key = key, .value = value }) catch { return error.OutOfMemory; }; } } if (self.stack > 1) { // We didn't find a matching paren return error.NoMatchingParen; } else { return list; } } }; // Use a GPA for tests as then we get an error when there's a memory leak. // Also, the StringBuffers are too big for a FBA. const GPA = std.heap.GeneralPurposeAllocator(.{}); test "parse values" { var gpa = GPA{}; const input = "yea nah nil 'f' 129z 0z"; const output = [_]Value{ .True, .False, .None, .{ .Char = 'f' }, .{ .Usize = 129 }, .{ .Usize = 0 } }; var p = Parser{ .input = input }; var res = try p.parse(gpa.allocator()); for (res.items) |kv, i| { const key = Key{ .Numeric = i }; try testing.expectEqual(KV{ .key = key, .value = output[i] }, kv); } Parser.deinit(&res); try testing.expect(!gpa.deinit()); } test "parse strings" { var gpa = GPA{}; const Case = struct { input: []const u8, output: []const u8 }; const cases = [_]Case{ Case{ .input = "\"test\"", .output = "test" }, Case{ .input = "\"henlo world\"", .output = "henlo world" }, Case{ .input = "\"hi\n\n\"", .output = "hi\n\n" }, Case{ .input = "\"abcd\r\nabcd\r\n\\\\\"", .output = "abcd\r\nabcd\r\n\\" }, Case{ .input = "\"\\\" \\\" \\\" \\\\ \"", .output = "\" \" \" \\ " }, }; for (&cases) |case| { var p = Parser{ .input = case.input }; var res = try p.parse(gpa.allocator()); try testing.expectEqual(meta.activeTag(res.items[0].value), .String); try testing.expectEqualSlices( u8, res.items[0].value.String.slice(), case.output, ); Parser.deinit(&res); } try testing.expect(!gpa.deinit()); } test "parse basic list" { var gpa = GPA{}; const input = "yea (nah nil) nah"; var p = Parser{ .input = input }; var res = try p.parse(gpa.allocator()); try testing.expectEqual(res.items[0].value, .True); try testing.expectEqual(meta.activeTag(res.items[1].value), .List); try testing.expectEqual(res.items[1].value.List.items[0].value, .False); try testing.expectEqual(res.items[1].value.List.items[1].value, .None); try testing.expectEqual(res.items[2].value, .False); Parser.deinit(&res); try testing.expect(!gpa.deinit()); } test "parse nested list" { var gpa = GPA{}; const input = "yea ((nah nil) nah (nah yea )) nah"; var p = Parser{ .input = input }; var res = try p.parse(gpa.allocator()); try testing.expectEqual(res.items[0].value, .True); try testing.expectEqual(meta.activeTag(res.items[1].value), .List); var list1 = res.items[1].value.List; try testing.expectEqual(meta.activeTag(list1.items[0].value), .List); try testing.expectEqual(list1.items[1].value, .False); try testing.expectEqual(meta.activeTag(list1.items[2].value), .List); try testing.expectEqual(list1.items[0].value.List.items[0].value, .False); try testing.expectEqual(list1.items[0].value.List.items[1].value, .None); try testing.expectEqual(list1.items[2].value.List.items[0].value, .False); try testing.expectEqual(list1.items[2].value.List.items[1].value, .True); Parser.deinit(&res); try testing.expect(!gpa.deinit()); } test "parse values with tags" { var gpa = GPA{}; const input = "nil [frobnicate]yea [confuzzlementate]nah [fillibigimentate]nil"; var p = Parser{ .input = input }; var res = try p.parse(gpa.allocator()); // Keys try testing.expectEqual(res.items[0].key.Numeric, 0); try testing.expectEqualSlices(u8, res.items[1].key.String, "frobnicate"); try testing.expectEqualSlices(u8, res.items[2].key.String, "confuzzlementate"); try testing.expectEqualSlices(u8, res.items[3].key.String, "fillibigimentate"); // Values try testing.expectEqual(res.items[0].value, .None); try testing.expectEqual(res.items[1].value, .True); try testing.expectEqual(res.items[2].value, .False); try testing.expectEqual(res.items[3].value, .None); Parser.deinit(&res); try testing.expect(!gpa.deinit()); } test "parse lists with tags" { var gpa = GPA{}; const input = "[xyz](nil [foo]yea [bar]nah [baz]nil nil)"; var p = Parser{ .input = input }; var res = try p.parse(gpa.allocator()); try testing.expectEqualSlices(u8, res.items[0].key.String, "xyz"); try testing.expectEqual(meta.activeTag(res.items[0].value), .List); var list = res.items[0].value.List; // Keys try testing.expectEqual(list.items[0].key.Numeric, 0); try testing.expectEqualSlices(u8, list.items[1].key.String, "foo"); try testing.expectEqualSlices(u8, list.items[2].key.String, "bar"); try testing.expectEqualSlices(u8, list.items[3].key.String, "baz"); try testing.expectEqual(list.items[4].key.Numeric, 1); // Values try testing.expectEqual(list.items[0].value, .None); try testing.expectEqual(list.items[1].value, .True); try testing.expectEqual(list.items[2].value, .False); try testing.expectEqual(list.items[3].value, .None); try testing.expectEqual(list.items[4].value, .None); Parser.deinit(&res); try testing.expect(!gpa.deinit()); } pub fn deserializeValue(comptime T: type, val: Value, default: ?T) !T { return switch (@typeInfo(T)) { .NoReturn, .Void, .Type => error.E, .Vector, .ComptimeInt, .ComptimeFloat, .Undefined => error.E, .Bool => switch (val) { .True => true, .False => false, else => error.E, }, .Int => switch (val) { .Char => |c| @intCast(T, c), .String => |s| fmt.parseInt(T, s.constSlice(), 0) catch error.E, .Usize => |u| if (T == usize) u else error.E, .Isize => |u| if (T == isize) u else error.E, else => error.E, }, .Float => switch (val) { .String => |s| fmt.parseFloat(T, s.constSlice()) catch error.E, else => error.E, }, .Enum => switch (val) { .String => |s| std.meta.stringToEnum(T, s.constSlice()) orelse error.E, else => error.E, }, .Union => switch (val) { .List => |l| { if (l.items.len > 2 or l.items.len == 0) return error.E; const tag = try deserializeValue(std.meta.Tag(T), l.items[0].value, null); inline for (std.meta.fields(T)) |field| { if (mem.eql(u8, field.name, @tagName(tag))) { if (field.field_type == void) { if (l.items.len == 2) return error.E; return @unionInit(T, field.name, {}); } else { if (l.items.len == 1) return error.E; const fld = try deserializeValue(field.field_type, l.items[1].value, null); return @unionInit(T, field.name, fld); } } } return error.E; }, else => error.E, }, .Optional => |optional| switch (val) { .None => null, else => try deserializeValue(optional.child, val, null), }, .Struct => switch (val) { .List => |l| try deserializeStruct(T, l, default.?), else => error.E, }, else => @panic("TODO"), }; } pub fn deserializeStruct(comptime T: type, data: KVList, initial: T) !T { const struct_info = @typeInfo(T).Struct; const fields = struct_info.fields; var output: T = initial; for (data.items) |node| { // Using block labels and 'break :block;' instead of the clunky 'found' // variables segfaults Zig (See: #2727) // // https://github.com/ziglang/zig/issues/2727 switch (node.key) { .Numeric => |n| { var found: bool = false; inline for (fields) |f, i| { if (n == i) { @field(output, f.name) = try deserializeValue(f.field_type, node.value, f.default_value); found = true; } } if (!found) return error.TooManyItems; }, .String => |s| { var found: bool = false; inline for (fields) |f| { if (mem.eql(u8, s, f.name)) { @field(output, f.name) = try deserializeValue(f.field_type, node.value, f.default_value); found = true; } } if (!found) return error.NoSuchTag; }, } } return output; } test "value deserial" { try testing.expectEqual(deserializeValue(bool, .True, null), true); try testing.expectEqual(deserializeValue(bool, .False, null), false); try testing.expectEqual(deserializeValue(usize, Value{ .String = StringBuffer.init("0") }, null), 0); try testing.expectEqual(deserializeValue(usize, Value{ .String = StringBuffer.init("231") }, null), 231); try testing.expectEqual(deserializeValue(isize, Value{ .String = StringBuffer.init("-1") }, null), -1); try testing.expectEqual(deserializeValue(isize, Value{ .String = StringBuffer.init("91") }, null), 91); try testing.expectEqual(deserializeValue(f64, Value{ .String = StringBuffer.init("15.21") }, null), 15.21); } test "struct deserial" { const Type = struct { foo: usize = 0, bar: bool = true, baz: isize = 0 }; var gpa = GPA{}; const input = "([foo]12 [bar]yea [baz]-2)"; var p = Parser{ .input = input }; var res = try p.parse(gpa.allocator()); const r = try deserializeStruct(Type, res.items[0].value.List, .{}); try testing.expectEqual(r.foo, 12); try testing.expectEqual(r.bar, true); try testing.expectEqual(r.baz, -2); Parser.deinit(&res); try testing.expect(!gpa.deinit()); } test "union deserial" { const Type = union(enum) { foo: usize, bar: bool, baz: isize, flu }; var gpa = GPA{}; { const input = "(foo 12)"; var p = Parser{ .input = input }; var res = try p.parse(gpa.allocator()); defer Parser.deinit(&res); try testing.expectEqual(deserializeValue(Type, res.items[0].value, null), .{ .foo = 12 }); } { const input = "(bar nah)"; var p = Parser{ .input = input }; var res = try p.parse(gpa.allocator()); defer Parser.deinit(&res); try testing.expectEqual(deserializeValue(Type, res.items[0].value, null), .{ .bar = false }); } { const input = "(baz -49)"; var p = Parser{ .input = input }; var res = try p.parse(gpa.allocator()); defer Parser.deinit(&res); try testing.expectEqual(deserializeValue(Type, res.items[0].value, null), .{ .baz = -49 }); } { const input = "(flu)"; var p = Parser{ .input = input }; var res = try p.parse(gpa.allocator()); defer Parser.deinit(&res); try testing.expectEqual(deserializeValue(Type, res.items[0].value, null), .flu); } try testing.expect(!gpa.deinit()); }
https://raw.githubusercontent.com/kiedtl/roguelike/ce3a7aab0dcac8da9f0bc75fe79d6a1dc1f78604/src/cbf.zig
const std = @import("std"); const math = std.math; const print = std.debug.print; const Complex = std.math.Complex; const bit_reverse = @import("bit_reverse").bit_reverse; const ct_dit_bf2_0 = @import("butterflies").ct_dit_bf2_0; const ct_dit_bf2 = @import("butterflies").ct_dit_bf2; const sr_dit_bf4_0 = @import("butterflies").sr_dit_bf4_0; const sr_dit_bf4_pi4 = @import("butterflies").sr_dit_bf4_pi4; const sr_dit_bf4 = @import("butterflies").sr_dit_bf4; const get_twiddle = @import("Twiddles").Std.get; pub fn fft( comptime C: type, N: usize, w1: [*]C, w3: [*]C, out: [*]C, in: [*]C, ) void { const log2_N: usize = math.log2(N); sr_dit_bi_G_P(C, w1, w3, out, in, N, log2_N); } pub fn sr_dit_bi_G_P( comptime C: type, w1: [*]C, w3: [*]C, out: [*]C, in: [*]C, N: usize, log2_N: usize, ) void { var i: usize = 0; var j: usize = 0; while (i < N) : (i += 1) { j = bit_reverse(i, log2_N); out[i] = in[j]; } // length 2 transforms var is: usize = 0; var id: usize = 4; var i_0: usize = undefined; while (is < N) { i_0 = is; while (i_0 < N) : (i_0 += id) { ct_dit_bf2_0(C, 1, 1, out + i_0, out + i_0); } is = 2 * id - 2; id = 4 * id; } if (log2_N < 2) { return; } // L shaped butterflies is = 0; id = 8; while (is < N) { i_0 = is; while (i_0 < N) : (i_0 += id) { sr_dit_bf4_0(C, 1, out + i_0); } is = 2 * id - 4 + 0; id = 4 * id; } var k: usize = 3; while (k <= log2_N) : (k += 1) { var n2: usize = math.shl(usize, 1, k); var n4: usize = math.shr(usize, n2, 2); var n8: usize = math.shr(usize, n2, 3); is = 0; id = math.shl(usize, 2, k); while (is < N) { i_0 = is; while (i_0 < N) : (i_0 += id) { sr_dit_bf4_0(C, n4, out + i_0); } is = 2 * id - n2 + 0; id = 4 * id; } is = n8; id = math.shl(usize, 2, k); while (is < N) { i_0 = is; while (i_0 < N) : (i_0 += id) { sr_dit_bf4_pi4(C, n4, out + i_0); } is = 2 * id - n2 + n8; id = 4 * id; } j = 1; while (j < n8) : (j += 1) { var t: usize = math.shl(usize, j, log2_N - k); var tw1: C = get_twiddle(C, t, log2_N, w1); var tw3: C = get_twiddle(C, t, log2_N, w3); is = j; id = math.shl(usize, 2, k); while (is < N) { i_0 = is; while (i_0 < N) : (i_0 += id) { sr_dit_bf4(C, n4, out + i_0, tw1, tw3); } is = 2 * id - n2 + j; id = 4 * id; } t += math.shl(usize, n8, log2_N - k); tw1 = get_twiddle(C, t, log2_N, w1); tw3 = get_twiddle(C, t, log2_N, w3); is = j; id = math.shl(usize, 2, k); while (is < N) { i_0 = is; while (i_0 < N) : (i_0 += id) { sr_dit_bf4(C, n4, out + i_0 + n8, tw1, tw3); } is = 2 * id - n2 + j; id = 4 * id; } } } }
https://raw.githubusercontent.com/BlueAlmost/zfft-orchard/b83445ab809b26e5e5dfd3092a2d695355cab02a/src/ffts/sr_dit_bi_G_P.zig
const std = @import("std"); const StaticStringMap = std.StaticStringMap; const Token = @import("token.zig").Token; /// current position in input (points to current char) position: usize = 0, /// iterator over the utf8 codepoints utf8: std.unicode.Utf8Iterator, const Lexer = @This(); // pub const SpannedToken = struct { // tok: Token, // loc: Loc, // span: Span, // pub const Loc = struct { // line: u32 = 1, // col: u32 = 1, // }; // pub const Span = struct { // start: u32, // end: u32, // }; // }; pub fn init(input: []const u8) !Lexer { return .{ .utf8 = (try std.unicode.Utf8View.init(input)).iterator() }; } pub fn nextToken(self: *Lexer) ?Token { self.skipWhitespace(); return switch (self.readChar() orelse return null) { '+' => .plus, '*' => .star, '/' => .slash, '%' => .percent, ',' => .comma, ';' => .semicolon, '(' => .lparen, ')' => .rparen, '{' => .lbrace, '}' => .rbrace, '=' => if (self.peekChar() == '=') blk: { _ = self.readChar(); break :blk .eq; } else .assign, '!' => if (self.peekChar() == '=') blk: { _ = self.readChar(); break :blk .neq; } else .bang, '<' => if (self.peekChar() == '=') blk: { _ = self.readChar(); break :blk .leq; } else .lt, '>' => if (self.peekChar() == '=') blk: { _ = self.readChar(); break :blk .geq; } else .gt, '-' => if (isDigit(self.peekChar())) blk: { const start = self.position; const end = self.endOfInt(); const literal = self.utf8.bytes[start..end]; break :blk .{ .int = literal }; } else .minus, else => |ch| if (isIdentStart(ch)) blk: { const start = self.position; const end = self.endOfIdentifier(); const literal = self.utf8.bytes[start..end]; break :blk fromIdentifier(literal); } else if (isDigit(ch)) blk: { const start = self.position; const end = self.endOfInt(); const literal = self.utf8.bytes[start..end]; break :blk .{ .int = literal }; } else blk: { const len = std.unicode.utf8CodepointSequenceLength(ch) catch unreachable; break :blk .{ .illegal = self.utf8.bytes[self.position..][0..len] }; }, }; } fn readChar(self: *Lexer) ?u21 { self.position = self.utf8.i; return self.utf8.nextCodepoint(); } fn peekChar(self: *Lexer) ?u21 { const slice = self.utf8.peek(1); if (slice.len == 0) return null; return std.unicode.utf8Decode(slice) catch unreachable; } fn skipWhitespace(self: *Lexer) void { while (isWhitespace(self.peekChar())) { _ = self.readChar(); } } fn endOfIdentifier(self: *Lexer) usize { while (isIdentContinue(self.peekChar())) { _ = self.readChar(); } return self.utf8.i; } fn endOfInt(self: *Lexer) usize { while (isDigit(self.peekChar())) { _ = self.readChar(); } return self.utf8.i; } fn isWhitespace(char: ?u21) bool { return switch (char orelse return false) { ' ', '\t', '\r', '\n' => true, else => false, }; } const isIdentStart = isLetter; fn isIdentContinue(char: ?u21) bool { return isLetter(char) or isDigit(char); } fn isLetter(char: ?u21) bool { return switch (char orelse return false) { 'a'...'z', 'A'...'Z', '_' => true, else => false, }; } fn isDigit(char: ?u21) bool { return switch (char orelse return false) { '0'...'9' => true, else => false, }; } fn fromIdentifier(literal: []const u8) Token { const keywords = comptime StaticStringMap(Token).initComptime(.{ .{ "fn", .func }, .{ "let", .let }, .{ "true", .true }, .{ "false", .false }, .{ "if", .@"if" }, .{ "else", .@"else" }, .{ "return", .@"return" }, .{ "break", .@"break" }, }); return keywords.get(literal) orelse .{ .ident = literal }; } const testing = std.testing; test "next token" { const input = \\let five = 5; \\let ten = 10; \\ \\break; \\ \\let add = fn(x, y) { \\ x + y \\}; \\ \\let result = add(five, ten); \\ \\let assert = fn(cond) { \\ if (cond) true else false \\}; \\ \\let assertNot = fn(cond) { \\ return !assert(cond); \\}; \\ \\assert(five <= ten); \\assert(five != ten); \\assertNot(five == ten); \\assertNot(five >= ten); \\!-5 < 10 > 5 / 2 * 1 ; const tests = [_]?Token{ .let, .{ .ident = "five" }, .assign, .{ .int = "5" }, .semicolon, .let, .{ .ident = "ten" }, .assign, .{ .int = "10" }, .semicolon, .@"break", .semicolon, .let, .{ .ident = "add" }, .assign, .func, .lparen, .{ .ident = "x" }, .comma, .{ .ident = "y" }, .rparen, .lbrace, .{ .ident = "x" }, .plus, .{ .ident = "y" }, .rbrace, .semicolon, .let, .{ .ident = "result" }, .assign, .{ .ident = "add" }, .lparen, .{ .ident = "five" }, .comma, .{ .ident = "ten" }, .rparen, .semicolon, .let, .{ .ident = "assert" }, .assign, .func, .lparen, .{ .ident = "cond" }, .rparen, .lbrace, .@"if", .lparen, .{ .ident = "cond" }, .rparen, .true, .@"else", .false, .rbrace, .semicolon, .let, .{ .ident = "assertNot" }, .assign, .func, .lparen, .{ .ident = "cond" }, .rparen, .lbrace, .@"return", .bang, .{ .ident = "assert" }, .lparen, .{ .ident = "cond" }, .rparen, .semicolon, .rbrace, .semicolon, .{ .ident = "assert" }, .lparen, .{ .ident = "five" }, .leq, .{ .ident = "ten" }, .rparen, .semicolon, .{ .ident = "assert" }, .lparen, .{ .ident = "five" }, .neq, .{ .ident = "ten" }, .rparen, .semicolon, .{ .ident = "assertNot" }, .lparen, .{ .ident = "five" }, .eq, .{ .ident = "ten" }, .rparen, .semicolon, .{ .ident = "assertNot" }, .lparen, .{ .ident = "five" }, .geq, .{ .ident = "ten" }, .rparen, .semicolon, .bang, .{ .int = "-5" }, .lt, .{ .int = "10" }, .gt, .{ .int = "5" }, .slash, .{ .int = "2" }, .star, .{ .int = "1" }, null, }; var lexer = Lexer.init(input) catch unreachable; inline for (0.., tests) |i, expected| { const tok = lexer.nextToken(); testing.expectEqualDeep(expected, tok) catch |err| switch (err) { error.TestExpectedEqual => std.debug.panic("[test #{d}] actual: {?}", .{ i, tok }), }; } }
https://raw.githubusercontent.com/DanikVitek/monkey-lang/ae9ab1453190c79135b1352cd6a79afb0892a7a1/src/Lexer.zig
const std = @import("std"); pub fn build(b: *std.build.Builder) void { // Standard target options allows the person running `zig build` to choose // what target to build for. Here we do not override the defaults, which // means any target is allowed, and the default is native. Other options // for restricting supported target set are available. const target = b.standardTargetOptions(.{}); // Standard release options allow the person running `zig build` to select // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. const mode = b.standardReleaseOptions(); const exe = b.addExecutable("lc3-zig", "src/main.zig"); exe.setTarget(target); exe.setBuildMode(mode); exe.install(); const run_cmd = exe.run(); run_cmd.step.dependOn(b.getInstallStep()); if (b.args) |args| { run_cmd.addArgs(args); } const run_step = b.step("run", "Run the app"); run_step.dependOn(&run_cmd.step); }
https://raw.githubusercontent.com/deckarep/lc3-zig/6d38a33097cb31d8d89f8ca1ba7030492bee09aa/build.zig
const vk = @import("vk"); pub fn createShadowRenderPass(device: vk.VkDevice, format: vk.VkFormat) vk.VkRenderPass { const attachment = vk.VkAttachmentDescription{ .format = format, .samples = vk.VK_SAMPLE_COUNT_1_BIT, .loadOp = vk.VK_ATTACHMENT_LOAD_OP_CLEAR, .storeOp = vk.VK_ATTACHMENT_STORE_OP_STORE, .stencilLoadOp = vk.VK_ATTACHMENT_LOAD_OP_DONT_CARE, .stencilStoreOp = vk.VK_ATTACHMENT_STORE_OP_DONT_CARE, .initialLayout = vk.VK_IMAGE_LAYOUT_UNDEFINED, // Transitioned to shader read at render pass end. .finalLayout = vk.VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, .flags = 0, }; const ds_attachment = vk.VkAttachmentReference{ .attachment = 0, .layout = vk.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, }; const subpasses = [_]vk.VkSubpassDescription{vk.VkSubpassDescription{ .pipelineBindPoint = vk.VK_PIPELINE_BIND_POINT_GRAPHICS, .colorAttachmentCount = 0, .pColorAttachments = null, .flags = 0, .inputAttachmentCount = 0, .pInputAttachments = null, .pResolveAttachments = null, .pDepthStencilAttachment = &ds_attachment, .preserveAttachmentCount = 0, .pPreserveAttachments = null, }}; const dependencies = [_]vk.VkSubpassDependency{ vk.VkSubpassDependency{ .srcSubpass = vk.VK_SUBPASS_EXTERNAL, .dstSubpass = 0, .srcStageMask = vk.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, .dstStageMask = vk.VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, .srcAccessMask = vk.VK_ACCESS_SHADER_READ_BIT, .dstAccessMask = vk.VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, .dependencyFlags = vk.VK_DEPENDENCY_BY_REGION_BIT, }, vk.VkSubpassDependency{ .srcSubpass = 0, .dstSubpass = vk.VK_SUBPASS_EXTERNAL, .srcStageMask = vk.VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, .dstStageMask = vk.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, .srcAccessMask = vk.VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, .dstAccessMask = vk.VK_ACCESS_SHADER_READ_BIT, .dependencyFlags = vk.VK_DEPENDENCY_BY_REGION_BIT, }, }; const attachments = [_]vk.VkAttachmentDescription{ attachment }; const info = vk.VkRenderPassCreateInfo{ .sType = vk.VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, .attachmentCount = attachments.len, .pAttachments = &attachments, .subpassCount = subpasses.len, .pSubpasses = &subpasses, .dependencyCount = dependencies.len, .pDependencies = &dependencies, .pNext = null, .flags = 0, }; var ret: vk.VkRenderPass = undefined; const res = vk.createRenderPass(device, &info, null, &ret); vk.assertSuccess(res); return ret; } pub fn createRenderPass(device: vk.VkDevice, format: vk.VkFormat) vk.VkRenderPass { const attachment = vk.VkAttachmentDescription{ .format = format, .samples = vk.VK_SAMPLE_COUNT_1_BIT, // Clear at beginning of the render pass. .loadOp = vk.VK_ATTACHMENT_LOAD_OP_CLEAR, // Store for reading. .storeOp = vk.VK_ATTACHMENT_STORE_OP_STORE, .stencilLoadOp = vk.VK_ATTACHMENT_LOAD_OP_DONT_CARE, .stencilStoreOp = vk.VK_ATTACHMENT_STORE_OP_DONT_CARE, .initialLayout = vk.VK_IMAGE_LAYOUT_UNDEFINED, .finalLayout = vk.VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, .flags = 0, }; const attachment_ref = [1]vk.VkAttachmentReference{vk.VkAttachmentReference{ .attachment = 0, .layout = vk.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }}; const depth_attachment = vk.VkAttachmentDescription{ .format = vk.VK_FORMAT_D32_SFLOAT, .samples = vk.VK_SAMPLE_COUNT_1_BIT, .loadOp = vk.VK_ATTACHMENT_LOAD_OP_CLEAR, .storeOp = vk.VK_ATTACHMENT_STORE_OP_DONT_CARE, .stencilLoadOp = vk.VK_ATTACHMENT_LOAD_OP_DONT_CARE, .stencilStoreOp = vk.VK_ATTACHMENT_STORE_OP_DONT_CARE, .initialLayout = vk.VK_IMAGE_LAYOUT_UNDEFINED, .finalLayout = vk.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, .flags = 0, }; const depth_attachment_ref = vk.VkAttachmentReference{ .attachment = 1, .layout = vk.VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, }; const subpasses = [_]vk.VkSubpassDescription{vk.VkSubpassDescription{ .pipelineBindPoint = vk.VK_PIPELINE_BIND_POINT_GRAPHICS, .colorAttachmentCount = 1, .pColorAttachments = @as(*const [1]vk.VkAttachmentReference, &attachment_ref), .flags = 0, .inputAttachmentCount = 0, .pInputAttachments = null, .pResolveAttachments = null, .pDepthStencilAttachment = &depth_attachment_ref, .preserveAttachmentCount = 0, .pPreserveAttachments = null, }}; const dependencies = [_]vk.VkSubpassDependency{vk.VkSubpassDependency{ .srcSubpass = vk.VK_SUBPASS_EXTERNAL, .dstSubpass = 0, .srcStageMask = vk.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | vk.VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, .srcAccessMask = 0, .dstStageMask = vk.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | vk.VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, .dstAccessMask = vk.VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | vk.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | vk.VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, .dependencyFlags = 0, }}; const attachments = [_]vk.VkAttachmentDescription{ attachment, depth_attachment }; const info = vk.VkRenderPassCreateInfo{ .sType = vk.VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, .attachmentCount = attachments.len, .pAttachments = &attachments, .subpassCount = subpasses.len, .pSubpasses = &subpasses, .dependencyCount = dependencies.len, .pDependencies = &dependencies, .pNext = null, .flags = 0, }; var ret: vk.VkRenderPass = undefined; const res = vk.createRenderPass(device, &info, null, &ret); vk.assertSuccess(res); return ret; }
https://raw.githubusercontent.com/fubark/cosmic/15f85e4bf5c6618c237d6f11f540717f7df16eb6/graphics/src/backend/vk/renderpass.zig
const std = @import("std"); const HandType = enum { highCard, onePair, twoPair, threeOfAKind, fullHouse, fourOfAKind, fiveOfAKind }; fn cardValue(card: u8) u8 { return switch (card) { 'A' => 13, 'K' => 12, 'Q' => 11, 'J' => 10, 'T' => 9, '2'...'9' => card - '0' - 1, else => 0, }; } const Hand = struct { cards: []const u8, bid: u32, type: HandType, fn getType(self: Hand) HandType { var pop_count: u32 = 0; var pop_card: ?u8 = null; for (self.cards) |card| { if (pop_card != null and pop_card.? == card) { continue; } const card_count = self.count(card); if (card_count >= pop_count) { pop_count = card_count; if (card_count > 1) { pop_card = card; } } } switch (pop_count) { 5 => { return HandType.fiveOfAKind; }, 4 => { return HandType.fourOfAKind; }, 3 => { for (self.cards) |card| { if (card != pop_card.?) { if (self.count(card) == 2) { return HandType.fullHouse; } return HandType.threeOfAKind; } } }, 2 => { var found_card = false; for (self.cards) |card| { if (card != pop_card.?) { if (self.count(card) == 2) { return HandType.twoPair; } if (found_card) { return HandType.onePair; } found_card = true; } } return HandType.onePair; }, else => { return HandType.highCard; }, } return HandType.highCard; } pub fn parseHands(reader: anytype, allocator: std.mem.Allocator) ![]Hand { var buf: [64]u8 = undefined; var hands = std.ArrayList(Hand).init(allocator); defer hands.deinit(); while (true) { const line = (try reader.readUntilDelimiterOrEof(&buf, '\n')) orelse break; var splitter = std.mem.split(u8, line, " "); const cards = splitter.next() orelse return error.ParseError; const bid_str = splitter.next() orelse return error.ParseError; const bid = try std.fmt.parseUnsigned(u32, bid_str, 10); var hand: Hand = undefined; hand.cards = try allocator.dupe(u8, cards); hand.bid = bid; hand.type = hand.getType(); try hands.append(hand); } var sorted_hands = try hands.toOwnedSlice(); sorted_hands[0] = sorted_hands[0]; std.mem.sort(Hand, sorted_hands, {}, Hand.lessThan); return sorted_hands; } pub fn lessThan(context: void, a: Hand, b: Hand) bool { _ = context; const a_type = @intFromEnum(a.type); const b_type = @intFromEnum(b.type); if (a_type < b_type) { return true; } if (a_type > b_type) { return false; } for (a.cards, b.cards) |a_card, b_card| { const a_rank = cardValue(a_card); const b_rank = cardValue(b_card); if (a_rank < b_rank) { return true; } if (a_rank > b_rank) { return false; } } return false; } pub fn count(self: Hand, card: u8) u32 { var num: u32 = 0; for (self.cards) |c| { if (c == card) { num += 1; } } return num; } }; pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const allocator = arena.allocator(); const stdin = std.io.getStdIn(); const stdout = std.io.getStdOut(); const sorted_hands = try Hand.parseHands(stdin.reader(), allocator); var sum: u32 = 0; var rank: u32 = 1; for (sorted_hands) |hand| { defer rank += 1; std.debug.print("{} * {}: {s} ({})\n", .{ rank, hand.bid, hand.cards, hand.type }); sum += rank * hand.bid; } try stdout.writer().print("Sum: {}\n", .{sum}); }
https://raw.githubusercontent.com/spicydll/AdventOfCode/e6fa45eb3264315c805aea72c1464ce291f46aed/2023/7/zig/d7p1.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const List = std.ArrayList; const Map = std.AutoHashMap; const StrMap = std.StringHashMap; const BitSet = std.DynamicBitSet; const Str = []const u8; const int = i64; const util = @import("util.zig"); const gpa = util.gpa; const data = @embedFile("../data/day19.txt"); const P3 = struct { x: i32, y: i32, z: i32, pub fn add(self: @This(), b: @This()) @This() { const result = P3{ .x = self.x + b.x, .y = self.y + b.y, .z = self.z + b.z, }; return result; } pub fn sub(self: @This(), b: @This()) @This() { const result = P3{ .x = self.x - b.x, .y = self.y - b.y, .z = self.z - b.z, }; return result; } pub fn rotate(self: @This(), rotation: u32) P3 { const x = self.x; const y = self.y; const z = self.z; const result = switch (rotation) { 0 => P3{ .x = x, .y = y, .z = z }, 1 => P3{ .x = x, .y = z, .z = -y }, 2 => P3{ .x = x, .y = -y, .z = -z }, 3 => P3{ .x = x, .y = -z, .z = y }, 4 => P3{ .x = -x, .y = y, .z = -z }, 5 => P3{ .x = -x, .y = z, .z = y }, 6 => P3{ .x = -x, .y = -y, .z = z }, 7 => P3{ .x = -x, .y = -z, .z = -y }, 8 => P3{ .x = y, .y = -x, .z = z }, 9 => P3{ .x = y, .y = -z, .z = -x }, 10 => P3{ .x = y, .y = x, .z = -z }, 11 => P3{ .x = y, .y = z, .z = x }, 12 => P3{ .x = -y, .y = -x, .z = -z }, 13 => P3{ .x = -y, .y = -z, .z = x }, 14 => P3{ .x = -y, .y = x, .z = z }, 15 => P3{ .x = -y, .y = z, .z = -x }, 16 => P3{ .x = z, .y = x, .z = y }, 17 => P3{ .x = z, .y = y, .z = -x }, 18 => P3{ .x = z, .y = -x, .z = -y }, 19 => P3{ .x = z, .y = -y, .z = x }, 20 => P3{ .x = -z, .y = x, .z = -y }, 21 => P3{ .x = -z, .y = y, .z = x }, 22 => P3{ .x = -z, .y = -x, .z = y }, 23 => P3{ .x = -z, .y = -y, .z = -x }, else => unreachable, }; return result; } pub fn cross(a: P3, b: P3) P3 { const result = P3{ .x = a.y * b.z - a.z * b.y, .y = a.z * b.x - a.x * b.z, .z = a.x * b.y - a.y * b.x, }; return result; } pub fn eql(a: P3, b: P3) bool { return a.x == b.x and a.y == b.y and a.z == b.z; } }; test "rotations" { var rot: u32 = 0; var any_failed = false; while (rot < 24) : (rot += 1) { const x = P3{ .x = 1, .y = 0, .z = 0 }; const y = P3{ .x = 0, .y = 1, .z = 0 }; const z = P3{ .x = 0, .y = 0, .z = 1 }; const rx = x.rotate(rot); const ry = y.rotate(rot); const rz = z.rotate(rot); if (!rx.cross(ry).eql(rz)) { print("\nwrong: {}\n", .{rot}); any_failed = true; } } if (any_failed) unreachable; } const Scanner = struct { beacons: []const P3, }; const ScanMap = struct { placed: std.DynamicBitSet, rotations: []u32, positions: []P3, known_beacons: std.AutoArrayHashMap(P3, void), pub fn init(count: usize) !ScanMap { return ScanMap{ .placed = try std.DynamicBitSet.initFull(gpa, count), .rotations = try gpa.alloc(u32, count), .positions = try gpa.alloc(P3, count), .known_beacons = std.AutoArrayHashMap(P3, void).init(gpa), }; } pub fn deinit(self: *@This()) void { self.known_beacons.deinit(); gpa.free(self.positions); gpa.free(self.rotations); self.placed.deinit(); self.* = undefined; } pub fn add(self: *@This(), scanner: Scanner, rotation: u32, position: P3, index: usize) void { self.placed.unset(index); self.rotations[index] = rotation; self.positions[index] = position; for (scanner.beacons) |be| { self.known_beacons.put(be.rotate(rotation).add(position), {}) catch unreachable; } } }; pub fn main() !void { var timer = try std.time.Timer.start(); var recs = blk: { var lines = tokenize(u8, data, "\r\n"); var beacons = std.ArrayList(Scanner).init(gpa); errdefer beacons.deinit(); var points = std.ArrayList(P3).init(gpa); defer points.deinit(); while (lines.next()) |line| { if (line.len == 0) { continue; } if (line[1] == '-') { if (points.items.len > 0) { const items = points.toOwnedSlice(); try beacons.append(.{ .beacons = items }); } continue; } var parts = split(u8, line, ","); try points.append(.{ .x = parseInt(i32, parts.next().?, 10) catch unreachable, .y = parseInt(i32, parts.next().?, 10) catch unreachable, .z = parseInt(i32, parts.next().?, 10) catch unreachable, }); assert(parts.next() == null); } if (points.items.len > 0) { const items = points.toOwnedSlice(); try beacons.append(.{ .beacons = items }); } break :blk beacons.toOwnedSlice(); }; defer gpa.free(recs); const parse_time = timer.lap(); var count_table = Map(P3, u8).init(gpa); defer count_table.deinit(); var maps = try ScanMap.init(recs.len); defer maps.deinit(); maps.add(recs[0], 0, .{.x = 0, .y = 0, .z = 0}, 0); placed: while (maps.placed.count() != 0) { var it = maps.placed.iterator(.{}); while (it.next()) |idx| { const scanner = recs[idx]; var rotation: u32 = 0; while (rotation < 24) : (rotation += 1) { count_table.clearRetainingCapacity(); for (scanner.beacons) |ank_be| { for (maps.known_beacons.keys()) |ank_raw| { const scanner_pos = ank_raw.sub(ank_be.rotate(rotation)); const entry = try count_table.getOrPut(scanner_pos); if (entry.found_existing) { entry.value_ptr.* += 1; if (entry.value_ptr.* >= 12) { //print("Scanner {} @{},{},{}\n", .{idx, scanner_pos.x, scanner_pos.y, scanner_pos.z}); maps.add(scanner, rotation, scanner_pos, idx); continue :placed; } } else { entry.value_ptr.* = 1; } } } } } unreachable; } const part1 = maps.known_beacons.count(); const part1_time = timer.lap(); var part2: int = 0; for (maps.positions[0..maps.positions.len-1]) |p0, i| { for (maps.positions[i+1..]) |p1| { const delta = p0.sub(p1); const manh = (std.math.absInt(delta.x) catch unreachable) + (std.math.absInt(delta.y) catch unreachable) + (std.math.absInt(delta.z) catch unreachable); if (manh > part2) part2 = manh; } } const part2_time = timer.read(); print("part1={}, part2={}\n", .{part1, part2}); print("Timing: parse={}, part1={}, part2={}, total={}\n", .{parse_time, part1_time, part2_time, parse_time + part1_time + part2_time}); } // Useful stdlib functions const tokenize = std.mem.tokenize; const split = std.mem.split; const indexOf = std.mem.indexOfScalar; const indexOfAny = std.mem.indexOfAny; const indexOfStr = std.mem.indexOfPosLinear; const lastIndexOf = std.mem.lastIndexOfScalar; const lastIndexOfAny = std.mem.lastIndexOfAny; const lastIndexOfStr = std.mem.lastIndexOfLinear; const trim = std.mem.trim; const sliceMin = std.mem.min; const sliceMax = std.mem.max; const eql = std.mem.eql; const parseEnum = std.meta.stringToEnum; const parseInt = std.fmt.parseInt; const parseFloat = std.fmt.parseFloat; const min = std.math.min; const min3 = std.math.min3; const max = std.math.max; const max3 = std.math.max3; const print = std.debug.print; const assert = std.debug.assert; const sort = std.sort.sort; const asc = std.sort.asc; const desc = std.sort.desc;
https://raw.githubusercontent.com/SpexGuy/Advent2021/2cb7b10b412fe6c67c1ae57f32a4b791bd9a41a2/src/day19.zig
const std = @import("std"); const config = @import("config.zig"); const Todo = @import("todo.zig"); const Allocator = std.mem.Allocator; const Dir = std.fs.Dir; const File = std.fs.File; const OpenDirOptions = Dir.OpenDirOptions; const OpenFlags = File.OpenFlags; const CreateFlags = File.CreateFlags; pub const MAX_PATH_BYTES = std.fs.MAX_PATH_BYTES; pub fn getRootDir() !Dir { var buffer: [MAX_PATH_BYTES]u8 = undefined; const root_path = try std.fs.selfExeDirPath(&buffer); return try std.fs.cwd().makeOpenPath(root_path, OpenDirOptions{}); } /// Save Todo to a file pub fn save(todo: Todo) !void { var dir = try getRootDir(); const file = try dir.createFile(config.FILE_NAME, CreateFlags{}); var buffer: [config.MAX_LINE * 100]u8 = undefined; var string = try Todo.Parser.str(todo, buffer[0..]); _ = try file.write(string); } /// Parse Todo from file. Returns a null Todo if file is not found. pub fn read(allocator: *Allocator) !?Todo { var buffer = try allocator.alloc(u8, config.MAX_LINE * 100); var dir = try getRootDir(); const file = dir.openFile(config.FILE_NAME, OpenFlags {}) catch return null; const tail = try file.reader().read(buffer); buffer[tail] = 0; return Todo.Parser.parse(allocator, buffer[0..tail:0]) catch blk: { try @import("cli.zig").printFail("File {s} is corrupted.\n", .{config.FILE_NAME}); break :blk null; }; } pub fn delete(self: Self) Dir.DeleteFileError!void { var dir = try getRootDir(); try dir.deleteFile(config.FILE_NAME); }
https://raw.githubusercontent.com/amrojjeh/Zig-to-do-list/3426534f217e41b0c63ad5b0deaf9a31773b4363/src/io.zig
const std = @import("std"); path: []const u8, data_offset: u32, data_length: u32, fd: ?std.fs.File = null, pub fn deinit(self: @This(), allocator: std.mem.Allocator) void { allocator.free(self.path); if (self.fd) |fd| fd.close(); }
https://raw.githubusercontent.com/jvyden/SPK/a7371f63fbcbe7cf73ae852ed469a4d61274f903/src/types/package_file/package_file.zig
//! NOTE: this file is autogenerated, DO NOT MODIFY //-------------------------------------------------------------------------------- // Section: Constants (92) //-------------------------------------------------------------------------------- pub const ASN_UNIVERSAL = @as(u32, 0); pub const ASN_APPLICATION = @as(u32, 64); pub const ASN_CONTEXT = @as(u32, 128); pub const ASN_PRIVATE = @as(u32, 192); pub const ASN_PRIMITIVE = @as(u32, 0); pub const ASN_CONSTRUCTOR = @as(u32, 32); pub const SNMP_ACCESS_NONE = @as(u32, 0); pub const SNMP_ACCESS_NOTIFY = @as(u32, 1); pub const SNMP_ACCESS_READ_ONLY = @as(u32, 2); pub const SNMP_ACCESS_READ_WRITE = @as(u32, 3); pub const SNMP_ACCESS_READ_CREATE = @as(u32, 4); pub const SNMPAPI_NOERROR = @as(u32, 1); pub const SNMPAPI_ERROR = @as(u32, 0); pub const SNMP_OUTPUT_TO_EVENTLOG = @as(u32, 4); pub const DEFAULT_SNMP_PORT_UDP = @as(u32, 161); pub const DEFAULT_SNMP_PORT_IPX = @as(u32, 36879); pub const DEFAULT_SNMPTRAP_PORT_UDP = @as(u32, 162); pub const DEFAULT_SNMPTRAP_PORT_IPX = @as(u32, 36880); pub const SNMP_MAX_OID_LEN = @as(u32, 128); pub const SNMP_MEM_ALLOC_ERROR = @as(u32, 1); pub const SNMP_BERAPI_INVALID_LENGTH = @as(u32, 10); pub const SNMP_BERAPI_INVALID_TAG = @as(u32, 11); pub const SNMP_BERAPI_OVERFLOW = @as(u32, 12); pub const SNMP_BERAPI_SHORT_BUFFER = @as(u32, 13); pub const SNMP_BERAPI_INVALID_OBJELEM = @as(u32, 14); pub const SNMP_PDUAPI_UNRECOGNIZED_PDU = @as(u32, 20); pub const SNMP_PDUAPI_INVALID_ES = @as(u32, 21); pub const SNMP_PDUAPI_INVALID_GT = @as(u32, 22); pub const SNMP_AUTHAPI_INVALID_VERSION = @as(u32, 30); pub const SNMP_AUTHAPI_INVALID_MSG_TYPE = @as(u32, 31); pub const SNMP_AUTHAPI_TRIV_AUTH_FAILED = @as(u32, 32); pub const ASN_CONTEXTSPECIFIC = @as(u32, 128); pub const ASN_PRIMATIVE = @as(u32, 0); pub const SNMP_MGMTAPI_TIMEOUT = @as(u32, 40); pub const SNMP_MGMTAPI_SELECT_FDERRORS = @as(u32, 41); pub const SNMP_MGMTAPI_TRAP_ERRORS = @as(u32, 42); pub const SNMP_MGMTAPI_TRAP_DUPINIT = @as(u32, 43); pub const SNMP_MGMTAPI_NOTRAPS = @as(u32, 44); pub const SNMP_MGMTAPI_AGAIN = @as(u32, 45); pub const SNMP_MGMTAPI_INVALID_CTL = @as(u32, 46); pub const SNMP_MGMTAPI_INVALID_SESSION = @as(u32, 47); pub const SNMP_MGMTAPI_INVALID_BUFFER = @as(u32, 48); pub const MGMCTL_SETAGENTPORT = @as(u32, 1); pub const MAXOBJIDSIZE = @as(u32, 128); pub const MAXOBJIDSTRSIZE = @as(u32, 1408); pub const SNMPLISTEN_USEENTITY_ADDR = @as(u32, 0); pub const SNMPLISTEN_ALL_ADDR = @as(u32, 1); pub const SNMP_TRAP_COLDSTART = @as(u32, 0); pub const SNMP_TRAP_WARMSTART = @as(u32, 1); pub const SNMP_TRAP_LINKDOWN = @as(u32, 2); pub const SNMP_TRAP_LINKUP = @as(u32, 3); pub const SNMP_TRAP_AUTHFAIL = @as(u32, 4); pub const SNMP_TRAP_EGPNEIGHBORLOSS = @as(u32, 5); pub const SNMP_TRAP_ENTERPRISESPECIFIC = @as(u32, 6); pub const SNMPAPI_NO_SUPPORT = @as(u32, 0); pub const SNMPAPI_V1_SUPPORT = @as(u32, 1); pub const SNMPAPI_V2_SUPPORT = @as(u32, 2); pub const SNMPAPI_M2M_SUPPORT = @as(u32, 3); pub const SNMPAPI_FAILURE = @as(u32, 0); pub const SNMPAPI_SUCCESS = @as(u32, 1); pub const SNMPAPI_ALLOC_ERROR = @as(u32, 2); pub const SNMPAPI_CONTEXT_INVALID = @as(u32, 3); pub const SNMPAPI_CONTEXT_UNKNOWN = @as(u32, 4); pub const SNMPAPI_ENTITY_INVALID = @as(u32, 5); pub const SNMPAPI_ENTITY_UNKNOWN = @as(u32, 6); pub const SNMPAPI_INDEX_INVALID = @as(u32, 7); pub const SNMPAPI_NOOP = @as(u32, 8); pub const SNMPAPI_OID_INVALID = @as(u32, 9); pub const SNMPAPI_OPERATION_INVALID = @as(u32, 10); pub const SNMPAPI_OUTPUT_TRUNCATED = @as(u32, 11); pub const SNMPAPI_PDU_INVALID = @as(u32, 12); pub const SNMPAPI_SESSION_INVALID = @as(u32, 13); pub const SNMPAPI_SYNTAX_INVALID = @as(u32, 14); pub const SNMPAPI_VBL_INVALID = @as(u32, 15); pub const SNMPAPI_MODE_INVALID = @as(u32, 16); pub const SNMPAPI_SIZE_INVALID = @as(u32, 17); pub const SNMPAPI_NOT_INITIALIZED = @as(u32, 18); pub const SNMPAPI_MESSAGE_INVALID = @as(u32, 19); pub const SNMPAPI_HWND_INVALID = @as(u32, 20); pub const SNMPAPI_OTHER_ERROR = @as(u32, 99); pub const SNMPAPI_TL_NOT_INITIALIZED = @as(u32, 100); pub const SNMPAPI_TL_NOT_SUPPORTED = @as(u32, 101); pub const SNMPAPI_TL_NOT_AVAILABLE = @as(u32, 102); pub const SNMPAPI_TL_RESOURCE_ERROR = @as(u32, 103); pub const SNMPAPI_TL_UNDELIVERABLE = @as(u32, 104); pub const SNMPAPI_TL_SRC_INVALID = @as(u32, 105); pub const SNMPAPI_TL_INVALID_PARAM = @as(u32, 106); pub const SNMPAPI_TL_IN_USE = @as(u32, 107); pub const SNMPAPI_TL_TIMEOUT = @as(u32, 108); pub const SNMPAPI_TL_PDU_TOO_BIG = @as(u32, 109); pub const SNMPAPI_TL_OTHER = @as(u32, 199); pub const MAXVENDORINFO = @as(u32, 32); //-------------------------------------------------------------------------------- // Section: Types (29) //-------------------------------------------------------------------------------- pub const SNMP_PDU_TYPE = enum(u32) { GET = 160, GETNEXT = 161, RESPONSE = 162, SET = 163, GETBULK = 165, TRAP = 167, }; pub const SNMP_PDU_GET = SNMP_PDU_TYPE.GET; pub const SNMP_PDU_GETNEXT = SNMP_PDU_TYPE.GETNEXT; pub const SNMP_PDU_RESPONSE = SNMP_PDU_TYPE.RESPONSE; pub const SNMP_PDU_SET = SNMP_PDU_TYPE.SET; pub const SNMP_PDU_GETBULK = SNMP_PDU_TYPE.GETBULK; pub const SNMP_PDU_TRAP = SNMP_PDU_TYPE.TRAP; pub const SNMP_EXTENSION_REQUEST_TYPE = enum(u32) { GET = 160, GET_NEXT = 161, SET_TEST = 224, SET_COMMIT = 163, SET_UNDO = 225, SET_CLEANUP = 226, }; pub const SNMP_EXTENSION_GET = SNMP_EXTENSION_REQUEST_TYPE.GET; pub const SNMP_EXTENSION_GET_NEXT = SNMP_EXTENSION_REQUEST_TYPE.GET_NEXT; pub const SNMP_EXTENSION_SET_TEST = SNMP_EXTENSION_REQUEST_TYPE.SET_TEST; pub const SNMP_EXTENSION_SET_COMMIT = SNMP_EXTENSION_REQUEST_TYPE.SET_COMMIT; pub const SNMP_EXTENSION_SET_UNDO = SNMP_EXTENSION_REQUEST_TYPE.SET_UNDO; pub const SNMP_EXTENSION_SET_CLEANUP = SNMP_EXTENSION_REQUEST_TYPE.SET_CLEANUP; pub const SNMP_API_TRANSLATE_MODE = enum(u32) { TRANSLATED = 0, UNTRANSLATED_V1 = 1, UNTRANSLATED_V2 = 2, }; pub const SNMPAPI_TRANSLATED = SNMP_API_TRANSLATE_MODE.TRANSLATED; pub const SNMPAPI_UNTRANSLATED_V1 = SNMP_API_TRANSLATE_MODE.UNTRANSLATED_V1; pub const SNMPAPI_UNTRANSLATED_V2 = SNMP_API_TRANSLATE_MODE.UNTRANSLATED_V2; pub const SNMP_GENERICTRAP = enum(u32) { COLDSTART = 0, WARMSTART = 1, LINKDOWN = 2, LINKUP = 3, AUTHFAILURE = 4, EGPNEIGHLOSS = 5, ENTERSPECIFIC = 6, }; pub const SNMP_GENERICTRAP_COLDSTART = SNMP_GENERICTRAP.COLDSTART; pub const SNMP_GENERICTRAP_WARMSTART = SNMP_GENERICTRAP.WARMSTART; pub const SNMP_GENERICTRAP_LINKDOWN = SNMP_GENERICTRAP.LINKDOWN; pub const SNMP_GENERICTRAP_LINKUP = SNMP_GENERICTRAP.LINKUP; pub const SNMP_GENERICTRAP_AUTHFAILURE = SNMP_GENERICTRAP.AUTHFAILURE; pub const SNMP_GENERICTRAP_EGPNEIGHLOSS = SNMP_GENERICTRAP.EGPNEIGHLOSS; pub const SNMP_GENERICTRAP_ENTERSPECIFIC = SNMP_GENERICTRAP.ENTERSPECIFIC; pub const SNMP_ERROR_STATUS = enum(u32) { NOERROR = 0, TOOBIG = 1, NOSUCHNAME = 2, BADVALUE = 3, READONLY = 4, GENERR = 5, NOACCESS = 6, WRONGTYPE = 7, WRONGLENGTH = 8, WRONGENCODING = 9, WRONGVALUE = 10, NOCREATION = 11, INCONSISTENTVALUE = 12, RESOURCEUNAVAILABLE = 13, COMMITFAILED = 14, UNDOFAILED = 15, AUTHORIZATIONERROR = 16, NOTWRITABLE = 17, INCONSISTENTNAME = 18, }; pub const SNMP_ERRORSTATUS_NOERROR = SNMP_ERROR_STATUS.NOERROR; pub const SNMP_ERRORSTATUS_TOOBIG = SNMP_ERROR_STATUS.TOOBIG; pub const SNMP_ERRORSTATUS_NOSUCHNAME = SNMP_ERROR_STATUS.NOSUCHNAME; pub const SNMP_ERRORSTATUS_BADVALUE = SNMP_ERROR_STATUS.BADVALUE; pub const SNMP_ERRORSTATUS_READONLY = SNMP_ERROR_STATUS.READONLY; pub const SNMP_ERRORSTATUS_GENERR = SNMP_ERROR_STATUS.GENERR; pub const SNMP_ERRORSTATUS_NOACCESS = SNMP_ERROR_STATUS.NOACCESS; pub const SNMP_ERRORSTATUS_WRONGTYPE = SNMP_ERROR_STATUS.WRONGTYPE; pub const SNMP_ERRORSTATUS_WRONGLENGTH = SNMP_ERROR_STATUS.WRONGLENGTH; pub const SNMP_ERRORSTATUS_WRONGENCODING = SNMP_ERROR_STATUS.WRONGENCODING; pub const SNMP_ERRORSTATUS_WRONGVALUE = SNMP_ERROR_STATUS.WRONGVALUE; pub const SNMP_ERRORSTATUS_NOCREATION = SNMP_ERROR_STATUS.NOCREATION; pub const SNMP_ERRORSTATUS_INCONSISTENTVALUE = SNMP_ERROR_STATUS.INCONSISTENTVALUE; pub const SNMP_ERRORSTATUS_RESOURCEUNAVAILABLE = SNMP_ERROR_STATUS.RESOURCEUNAVAILABLE; pub const SNMP_ERRORSTATUS_COMMITFAILED = SNMP_ERROR_STATUS.COMMITFAILED; pub const SNMP_ERRORSTATUS_UNDOFAILED = SNMP_ERROR_STATUS.UNDOFAILED; pub const SNMP_ERRORSTATUS_AUTHORIZATIONERROR = SNMP_ERROR_STATUS.AUTHORIZATIONERROR; pub const SNMP_ERRORSTATUS_NOTWRITABLE = SNMP_ERROR_STATUS.NOTWRITABLE; pub const SNMP_ERRORSTATUS_INCONSISTENTNAME = SNMP_ERROR_STATUS.INCONSISTENTNAME; pub const SNMP_STATUS = enum(u32) { N = 1, FF = 0, }; pub const SNMPAPI_ON = SNMP_STATUS.N; pub const SNMPAPI_OFF = SNMP_STATUS.FF; pub const SNMP_OUTPUT_LOG_TYPE = enum(u32) { CONSOLE = 1, LOGFILE = 2, DEBUGGER = 8, }; pub const SNMP_OUTPUT_TO_CONSOLE = SNMP_OUTPUT_LOG_TYPE.CONSOLE; pub const SNMP_OUTPUT_TO_LOGFILE = SNMP_OUTPUT_LOG_TYPE.LOGFILE; pub const SNMP_OUTPUT_TO_DEBUGGER = SNMP_OUTPUT_LOG_TYPE.DEBUGGER; pub const SNMP_LOG = enum(u32) { SILENT = 0, FATAL = 1, ERROR = 2, WARNING = 3, TRACE = 4, VERBOSE = 5, }; pub const SNMP_LOG_SILENT = SNMP_LOG.SILENT; pub const SNMP_LOG_FATAL = SNMP_LOG.FATAL; pub const SNMP_LOG_ERROR = SNMP_LOG.ERROR; pub const SNMP_LOG_WARNING = SNMP_LOG.WARNING; pub const SNMP_LOG_TRACE = SNMP_LOG.TRACE; pub const SNMP_LOG_VERBOSE = SNMP_LOG.VERBOSE; pub const SNMP_ERROR = enum(u32) { NOERROR = 0, TOOBIG = 1, NOSUCHNAME = 2, BADVALUE = 3, READONLY = 4, GENERR = 5, NOACCESS = 6, WRONGTYPE = 7, WRONGLENGTH = 8, WRONGENCODING = 9, WRONGVALUE = 10, NOCREATION = 11, INCONSISTENTVALUE = 12, RESOURCEUNAVAILABLE = 13, COMMITFAILED = 14, UNDOFAILED = 15, AUTHORIZATIONERROR = 16, NOTWRITABLE = 17, INCONSISTENTNAME = 18, }; pub const SNMP_ERROR_NOERROR = SNMP_ERROR.NOERROR; pub const SNMP_ERROR_TOOBIG = SNMP_ERROR.TOOBIG; pub const SNMP_ERROR_NOSUCHNAME = SNMP_ERROR.NOSUCHNAME; pub const SNMP_ERROR_BADVALUE = SNMP_ERROR.BADVALUE; pub const SNMP_ERROR_READONLY = SNMP_ERROR.READONLY; pub const SNMP_ERROR_GENERR = SNMP_ERROR.GENERR; pub const SNMP_ERROR_NOACCESS = SNMP_ERROR.NOACCESS; pub const SNMP_ERROR_WRONGTYPE = SNMP_ERROR.WRONGTYPE; pub const SNMP_ERROR_WRONGLENGTH = SNMP_ERROR.WRONGLENGTH; pub const SNMP_ERROR_WRONGENCODING = SNMP_ERROR.WRONGENCODING; pub const SNMP_ERROR_WRONGVALUE = SNMP_ERROR.WRONGVALUE; pub const SNMP_ERROR_NOCREATION = SNMP_ERROR.NOCREATION; pub const SNMP_ERROR_INCONSISTENTVALUE = SNMP_ERROR.INCONSISTENTVALUE; pub const SNMP_ERROR_RESOURCEUNAVAILABLE = SNMP_ERROR.RESOURCEUNAVAILABLE; pub const SNMP_ERROR_COMMITFAILED = SNMP_ERROR.COMMITFAILED; pub const SNMP_ERROR_UNDOFAILED = SNMP_ERROR.UNDOFAILED; pub const SNMP_ERROR_AUTHORIZATIONERROR = SNMP_ERROR.AUTHORIZATIONERROR; pub const SNMP_ERROR_NOTWRITABLE = SNMP_ERROR.NOTWRITABLE; pub const SNMP_ERROR_INCONSISTENTNAME = SNMP_ERROR.INCONSISTENTNAME; pub const AsnOctetString = extern struct { stream: ?*u8 align(4), length: u32 align(4), dynamic: BOOL align(4), }; pub const AsnObjectIdentifier = extern struct { idLength: u32 align(4), ids: ?*u32 align(4), }; pub const AsnAny = extern struct { asnType: u8, asnValue: extern union { number: i32 align(4), unsigned32: u32 align(4), counter64: ULARGE_INTEGER align(4), string: AsnOctetString align(4), bits: AsnOctetString align(4), object: AsnObjectIdentifier align(4), sequence: AsnOctetString align(4), address: AsnOctetString align(4), counter: u32 align(4), gauge: u32 align(4), ticks: u32 align(4), arbitrary: AsnOctetString align(4), }, }; pub const SnmpVarBind = extern struct { name: AsnObjectIdentifier, value: AsnAny, }; pub const SnmpVarBindList = extern struct { list: ?*SnmpVarBind align(4), len: u32 align(4), }; pub const PFNSNMPEXTENSIONINIT = switch (@import("builtin").zig_backend) { .stage1 => fn( dwUpTimeReference: u32, phSubagentTrapEvent: ?*?HANDLE, pFirstSupportedRegion: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) BOOL, else => *const fn( dwUpTimeReference: u32, phSubagentTrapEvent: ?*?HANDLE, pFirstSupportedRegion: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) BOOL, } ; pub const PFNSNMPEXTENSIONINITEX = switch (@import("builtin").zig_backend) { .stage1 => fn( pNextSupportedRegion: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) BOOL, else => *const fn( pNextSupportedRegion: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) BOOL, } ; pub const PFNSNMPEXTENSIONMONITOR = switch (@import("builtin").zig_backend) { .stage1 => fn( pAgentMgmtData: ?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) BOOL, else => *const fn( pAgentMgmtData: ?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) BOOL, } ; pub const PFNSNMPEXTENSIONQUERY = switch (@import("builtin").zig_backend) { .stage1 => fn( bPduType: u8, pVarBindList: ?*SnmpVarBindList, pErrorStatus: ?*i32, pErrorIndex: ?*i32, ) callconv(@import("std").os.windows.WINAPI) BOOL, else => *const fn( bPduType: u8, pVarBindList: ?*SnmpVarBindList, pErrorStatus: ?*i32, pErrorIndex: ?*i32, ) callconv(@import("std").os.windows.WINAPI) BOOL, } ; pub const PFNSNMPEXTENSIONQUERYEX = switch (@import("builtin").zig_backend) { .stage1 => fn( nRequestType: u32, nTransactionId: u32, pVarBindList: ?*SnmpVarBindList, pContextInfo: ?*AsnOctetString, pErrorStatus: ?*i32, pErrorIndex: ?*i32, ) callconv(@import("std").os.windows.WINAPI) BOOL, else => *const fn( nRequestType: u32, nTransactionId: u32, pVarBindList: ?*SnmpVarBindList, pContextInfo: ?*AsnOctetString, pErrorStatus: ?*i32, pErrorIndex: ?*i32, ) callconv(@import("std").os.windows.WINAPI) BOOL, } ; pub const PFNSNMPEXTENSIONTRAP = switch (@import("builtin").zig_backend) { .stage1 => fn( pEnterpriseOid: ?*AsnObjectIdentifier, pGenericTrapId: ?*i32, pSpecificTrapId: ?*i32, pTimeStamp: ?*u32, pVarBindList: ?*SnmpVarBindList, ) callconv(@import("std").os.windows.WINAPI) BOOL, else => *const fn( pEnterpriseOid: ?*AsnObjectIdentifier, pGenericTrapId: ?*i32, pSpecificTrapId: ?*i32, pTimeStamp: ?*u32, pVarBindList: ?*SnmpVarBindList, ) callconv(@import("std").os.windows.WINAPI) BOOL, } ; pub const PFNSNMPEXTENSIONCLOSE = switch (@import("builtin").zig_backend) { .stage1 => fn( ) callconv(@import("std").os.windows.WINAPI) void, else => *const fn( ) callconv(@import("std").os.windows.WINAPI) void, } ; pub const smiOCTETS = extern struct { len: u32, ptr: ?*u8, }; pub const smiOID = extern struct { len: u32, ptr: ?*u32, }; pub const smiCNTR64 = extern struct { hipart: u32, lopart: u32, }; pub const smiVALUE = extern struct { syntax: u32, value: extern union { sNumber: i32, uNumber: u32, hNumber: smiCNTR64, string: smiOCTETS, oid: smiOID, empty: u8, }, }; pub const smiVENDORINFO = extern struct { vendorName: [64]CHAR, vendorContact: [64]CHAR, vendorVersionId: [32]CHAR, vendorVersionDate: [32]CHAR, vendorEnterprise: u32, }; pub const SNMPAPI_CALLBACK = switch (@import("builtin").zig_backend) { .stage1 => fn( hSession: isize, hWnd: ?HWND, wMsg: u32, wParam: WPARAM, lParam: LPARAM, lpClientData: ?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) u32, else => *const fn( hSession: isize, hWnd: ?HWND, wMsg: u32, wParam: WPARAM, lParam: LPARAM, lpClientData: ?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) u32, } ; pub const PFNSNMPSTARTUPEX = switch (@import("builtin").zig_backend) { .stage1 => fn( param0: ?*u32, param1: ?*u32, param2: ?*u32, param3: ?*u32, param4: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32, else => *const fn( param0: ?*u32, param1: ?*u32, param2: ?*u32, param3: ?*u32, param4: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32, } ; pub const PFNSNMPCLEANUPEX = switch (@import("builtin").zig_backend) { .stage1 => fn( ) callconv(@import("std").os.windows.WINAPI) u32, else => *const fn( ) callconv(@import("std").os.windows.WINAPI) u32, } ; //-------------------------------------------------------------------------------- // Section: Functions (84) //-------------------------------------------------------------------------------- // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOidCpy( pOidDst: ?*AsnObjectIdentifier, pOidSrc: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOidAppend( pOidDst: ?*AsnObjectIdentifier, pOidSrc: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOidNCmp( pOid1: ?*AsnObjectIdentifier, pOid2: ?*AsnObjectIdentifier, nSubIds: u32, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOidCmp( pOid1: ?*AsnObjectIdentifier, pOid2: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOidFree( pOid: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOctetsCmp( pOctets1: ?*AsnOctetString, pOctets2: ?*AsnOctetString, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOctetsNCmp( pOctets1: ?*AsnOctetString, pOctets2: ?*AsnOctetString, nChars: u32, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOctetsCpy( pOctetsDst: ?*AsnOctetString, pOctetsSrc: ?*AsnOctetString, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOctetsFree( pOctets: ?*AsnOctetString, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilAsnAnyCpy( pAnyDst: ?*AsnAny, pAnySrc: ?*AsnAny, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilAsnAnyFree( pAny: ?*AsnAny, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilVarBindCpy( pVbDst: ?*SnmpVarBind, pVbSrc: ?*SnmpVarBind, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilVarBindFree( pVb: ?*SnmpVarBind, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilVarBindListCpy( pVblDst: ?*SnmpVarBindList, pVblSrc: ?*SnmpVarBindList, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilVarBindListFree( pVbl: ?*SnmpVarBindList, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilMemFree( pMem: ?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilMemAlloc( nBytes: u32, ) callconv(@import("std").os.windows.WINAPI) ?*anyopaque; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilMemReAlloc( pMem: ?*anyopaque, nBytes: u32, ) callconv(@import("std").os.windows.WINAPI) ?*anyopaque; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOidToA( Oid: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) ?PSTR; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilIdsToA( Ids: ?*u32, IdLength: u32, ) callconv(@import("std").os.windows.WINAPI) ?PSTR; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilPrintOid( Oid: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilPrintAsnAny( pAny: ?*AsnAny, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpSvcGetUptime( ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpSvcSetLogLevel( nLogLevel: SNMP_LOG, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpSvcSetLogType( nLogType: SNMP_OUTPUT_LOG_TYPE, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilDbgPrint( nLogLevel: SNMP_LOG, szFormat: ?PSTR, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrOpen( lpAgentAddress: ?PSTR, lpAgentCommunity: ?PSTR, nTimeOut: i32, nRetries: i32, ) callconv(@import("std").os.windows.WINAPI) ?*anyopaque; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrCtl( session: ?*anyopaque, dwCtlCode: u32, lpvInBuffer: ?*anyopaque, cbInBuffer: u32, lpvOUTBuffer: ?*anyopaque, cbOUTBuffer: u32, lpcbBytesReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrClose( session: ?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrRequest( session: ?*anyopaque, requestType: u8, variableBindings: ?*SnmpVarBindList, errorStatus: ?*SNMP_ERROR_STATUS, errorIndex: ?*i32, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrStrToOid( string: ?PSTR, oid: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrOidToStr( oid: ?*AsnObjectIdentifier, string: ?*?PSTR, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrTrapListen( phTrapAvailable: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrGetTrap( enterprise: ?*AsnObjectIdentifier, IPAddress: ?*AsnOctetString, genericTrap: ?*SNMP_GENERICTRAP, specificTrap: ?*i32, timeStamp: ?*u32, variableBindings: ?*SnmpVarBindList, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrGetTrapEx( enterprise: ?*AsnObjectIdentifier, agentAddress: ?*AsnOctetString, sourceAddress: ?*AsnOctetString, genericTrap: ?*SNMP_GENERICTRAP, specificTrap: ?*i32, community: ?*AsnOctetString, timeStamp: ?*u32, variableBindings: ?*SnmpVarBindList, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpGetTranslateMode( nTranslateMode: ?*SNMP_API_TRANSLATE_MODE, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpSetTranslateMode( nTranslateMode: SNMP_API_TRANSLATE_MODE, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpGetRetransmitMode( nRetransmitMode: ?*SNMP_STATUS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpSetRetransmitMode( nRetransmitMode: SNMP_STATUS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpGetTimeout( hEntity: isize, nPolicyTimeout: ?*u32, nActualTimeout: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpSetTimeout( hEntity: isize, nPolicyTimeout: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpGetRetry( hEntity: isize, nPolicyRetry: ?*u32, nActualRetry: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpSetRetry( hEntity: isize, nPolicyRetry: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpGetVendorInfo( vendorInfo: ?*smiVENDORINFO, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpStartup( nMajorVersion: ?*u32, nMinorVersion: ?*u32, nLevel: ?*u32, nTranslateMode: ?*SNMP_API_TRANSLATE_MODE, nRetransmitMode: ?*SNMP_STATUS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpCleanup( ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpOpen( hWnd: ?HWND, wMsg: u32, ) callconv(@import("std").os.windows.WINAPI) isize; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpClose( session: isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpSendMsg( session: isize, srcEntity: isize, dstEntity: isize, context: isize, PDU: isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpRecvMsg( session: isize, srcEntity: ?*isize, dstEntity: ?*isize, context: ?*isize, PDU: ?*isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpRegister( session: isize, srcEntity: isize, dstEntity: isize, context: isize, notification: ?*smiOID, state: SNMP_STATUS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpCreateSession( hWnd: ?HWND, wMsg: u32, fCallBack: ?SNMPAPI_CALLBACK, lpClientData: ?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) isize; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpListen( hEntity: isize, lStatus: SNMP_STATUS, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "wsnmp32" fn SnmpListenEx( hEntity: isize, lStatus: u32, nUseEntityAddr: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpCancelMsg( session: isize, reqId: i32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpStartupEx( nMajorVersion: ?*u32, nMinorVersion: ?*u32, nLevel: ?*u32, nTranslateMode: ?*SNMP_API_TRANSLATE_MODE, nRetransmitMode: ?*SNMP_STATUS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpCleanupEx( ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpStrToEntity( session: isize, string: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) isize; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpEntityToStr( entity: isize, size: u32, string: [*:0]u8, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpFreeEntity( entity: isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpStrToContext( session: isize, string: ?*smiOCTETS, ) callconv(@import("std").os.windows.WINAPI) isize; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpContextToStr( context: isize, string: ?*smiOCTETS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpFreeContext( context: isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpSetPort( hEntity: isize, nPort: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpCreatePdu( session: isize, PDU_type: SNMP_PDU_TYPE, request_id: i32, error_status: i32, error_index: i32, varbindlist: isize, ) callconv(@import("std").os.windows.WINAPI) isize; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpGetPduData( PDU: isize, PDU_type: ?*SNMP_PDU_TYPE, request_id: ?*i32, error_status: ?*SNMP_ERROR, error_index: ?*i32, varbindlist: ?*isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpSetPduData( PDU: isize, PDU_type: ?*const i32, request_id: ?*const i32, non_repeaters: ?*const i32, max_repetitions: ?*const i32, varbindlist: ?*const isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpDuplicatePdu( session: isize, PDU: isize, ) callconv(@import("std").os.windows.WINAPI) isize; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpFreePdu( PDU: isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpCreateVbl( session: isize, name: ?*smiOID, value: ?*smiVALUE, ) callconv(@import("std").os.windows.WINAPI) isize; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpDuplicateVbl( session: isize, vbl: isize, ) callconv(@import("std").os.windows.WINAPI) isize; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpFreeVbl( vbl: isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpCountVbl( vbl: isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpGetVb( vbl: isize, index: u32, name: ?*smiOID, value: ?*smiVALUE, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpSetVb( vbl: isize, index: u32, name: ?*smiOID, value: ?*smiVALUE, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpDeleteVb( vbl: isize, index: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpGetLastError( session: isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpStrToOid( string: ?[*:0]const u8, dstOID: ?*smiOID, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpOidToStr( srcOID: ?*smiOID, size: u32, string: [*:0]u8, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpOidCopy( srcOID: ?*smiOID, dstOID: ?*smiOID, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpOidCompare( xOID: ?*smiOID, yOID: ?*smiOID, maxlen: u32, result: ?*i32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpEncodeMsg( session: isize, srcEntity: isize, dstEntity: isize, context: isize, pdu: isize, msgBufDesc: ?*smiOCTETS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpDecodeMsg( session: isize, srcEntity: ?*isize, dstEntity: ?*isize, context: ?*isize, pdu: ?*isize, msgBufDesc: ?*smiOCTETS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpFreeDescriptor( syntax: u32, descriptor: ?*smiOCTETS, ) callconv(@import("std").os.windows.WINAPI) u32; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (8) //-------------------------------------------------------------------------------- const BOOL = @import("../foundation.zig").BOOL; const CHAR = @import("../foundation.zig").CHAR; const HANDLE = @import("../foundation.zig").HANDLE; const HWND = @import("../foundation.zig").HWND; const LPARAM = @import("../foundation.zig").LPARAM; const PSTR = @import("../foundation.zig").PSTR; const ULARGE_INTEGER = @import("../foundation.zig").ULARGE_INTEGER; const WPARAM = @import("../foundation.zig").WPARAM; test { // The following '_ = <FuncPtrType>' lines are a workaround for https://github.com/ziglang/zig/issues/4476 if (@hasDecl(@This(), "PFNSNMPEXTENSIONINIT")) { _ = PFNSNMPEXTENSIONINIT; } if (@hasDecl(@This(), "PFNSNMPEXTENSIONINITEX")) { _ = PFNSNMPEXTENSIONINITEX; } if (@hasDecl(@This(), "PFNSNMPEXTENSIONMONITOR")) { _ = PFNSNMPEXTENSIONMONITOR; } if (@hasDecl(@This(), "PFNSNMPEXTENSIONQUERY")) { _ = PFNSNMPEXTENSIONQUERY; } if (@hasDecl(@This(), "PFNSNMPEXTENSIONQUERYEX")) { _ = PFNSNMPEXTENSIONQUERYEX; } if (@hasDecl(@This(), "PFNSNMPEXTENSIONTRAP")) { _ = PFNSNMPEXTENSIONTRAP; } if (@hasDecl(@This(), "PFNSNMPEXTENSIONCLOSE")) { _ = PFNSNMPEXTENSIONCLOSE; } if (@hasDecl(@This(), "SNMPAPI_CALLBACK")) { _ = SNMPAPI_CALLBACK; } if (@hasDecl(@This(), "PFNSNMPSTARTUPEX")) { _ = PFNSNMPSTARTUPEX; } if (@hasDecl(@This(), "PFNSNMPCLEANUPEX")) { _ = PFNSNMPCLEANUPEX; } @setEvalBranchQuota( comptime @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (comptime @import("std").meta.declarations(@This())) |decl| { _ = @field(@This(), decl.name); } }
https://raw.githubusercontent.com/marlersoft/zigwin32/8e780b4482955c66d1509575938252f5938ccfd4/win32/network_management/snmp.zig
const std = @import("std"); const runner = @import("runner.zig"); pub const main = runner.run(solve); fn solve(_: std.mem.Allocator, input: []const u8) anyerror!void { const mostTotal = try mostTotalCalories(input); std.debug.print("most total: {}\n", .{mostTotal}); const topThree = try topThreeCaloriesTotal(input); std.debug.print("top three total: {}\n", .{topThree}); } fn mostTotalCalories(input: []const u8) !u32 { var buf = [1]u32{0}; try mostTotalCaloriesTop(&buf, input); return buf[0]; } fn topThreeCaloriesTotal(input: []const u8) !u32 { var buf = [1]u32{0} ** 3; try mostTotalCaloriesTop(&buf, input); var total: u32 = 0; for (buf) |n| total += n; return total; } fn lineEmpty(line: ?[]const u8) bool { const l = line orelse return true; return l.len == 0; } fn push_front_slice(slice: []u32, n: u32) void { std.mem.copyBackwards(u32, slice[1..], slice[0..(slice.len - 1)]); slice[0] = n; } fn mostTotalCaloriesTop(buf: []u32, input: []const u8) !void { var iter = std.mem.split(u8, input, "\n"); var current: u32 = 0; while (true) { const line = iter.next(); if (!lineEmpty(line)) { const amount = try std.fmt.parseInt(u32, line.?, 10); current += amount; continue; } for (buf) |n, i| { if (n > current) continue; push_front_slice(buf[i..], current); break; } if (line == null) break; current = 0; } } test { const input = \\1000 \\2000 \\3000 \\ \\4000 \\ \\5000 \\6000 \\ \\7000 \\8000 \\9000 \\ \\10000 ; try std.testing.expectEqual(try mostTotalCalories(input), 24000); try std.testing.expectEqual(try topThreeCaloriesTotal(input), 45000); }
https://raw.githubusercontent.com/JasperDeSutter/advent2022/07ef1a793619a7de1f130c57e568a683e52cbae5/src/day01.zig
const std = @import("std"); const Platform = struct { grid: []u8, width: usize, height: usize, allocator: std.mem.Allocator, fn init(width: usize, height: usize, allocator: std.mem.Allocator) !Platform { return Platform{ .grid = try allocator.alloc(u8, width * height), .width = width, .height = height, .allocator = allocator, }; } fn deinit(self: Platform) void { self.allocator.free(self.grid); } fn clone(self: Platform) !Platform { const platform = try Platform.init(self.width, self.height, self.allocator); @memcpy(platform.grid, self.grid); return platform; } fn parse(input: []const u8, allocator: std.mem.Allocator) !Platform { const width = std.mem.indexOfScalar(u8, input, '\n').?; const height = input.len / (width + 1); const platform = try Platform.init(width, height, allocator); for (0..height) |y| { const dest = platform.grid[y * width ..][0..width]; const source = input[y * (width + 1) ..][0..width]; @memcpy(dest, source); } return platform; } fn getNorthLoad(self: Platform) usize { var load: usize = 0; for (self.grid, 0..) |char, i| { const y = (i - (i % self.width)) / self.width; if (char == 'O') { load += self.height - y; } } return load; } fn tiltNorth(self: Platform) void { for (0..self.width) |x| { var end: usize = x; for (0..self.height) |y| { const index = x + y * self.width; const char = self.grid[index]; if (char == 'O') { self.grid[index] = '.'; self.grid[end] = 'O'; end += self.width; } else if (char == '#') { end = index + self.width; } } } } fn tiltWest(self: Platform) void { for (0..self.height) |y| { var end: usize = y * self.width; for (0..self.width) |x| { const index = x + y * self.width; const char = self.grid[index]; if (char == 'O') { self.grid[index] = '.'; self.grid[end] = 'O'; end += 1; } else if (char == '#') { end = index + 1; } } } } fn tiltSouth(self: Platform) void { for (0..self.width) |x| { var end = self.height; var y = self.height; while (y > 0) { y -= 1; const index = x + y * self.width; const char = self.grid[index]; if (char == 'O') { end -= 1; self.grid[index] = '.'; self.grid[x + end * self.width] = 'O'; } else if (char == '#') { end = y; } } } } fn tiltEast(self: Platform) void { for (0..self.height) |y| { var end = self.width; var x = self.width; while (x > 0) { x -= 1; const index = x + y * self.width; const char = self.grid[index]; if (char == 'O') { end -= 1; self.grid[index] = '.'; self.grid[end + y * self.width] = 'O'; } else if (char == '#') { end = x; } } } } fn cycle(self: Platform) void { self.tiltNorth(); self.tiltWest(); self.tiltSouth(); self.tiltEast(); } }; fn part1(input: []const u8, allocator: std.mem.Allocator) !usize { const platform = try Platform.parse(input, allocator); defer platform.deinit(); platform.tiltNorth(); return platform.getNorthLoad(); } fn part2(input: []const u8, allocator: std.mem.Allocator) !usize { const hare = try Platform.parse(input, allocator); const tortoise = try hare.clone(); defer hare.deinit(); defer tortoise.deinit(); var i: usize = 1; while (true) : (i += 1) { hare.cycle(); if (i & 1 == 0) { tortoise.cycle(); } if (std.mem.eql(u8, hare.grid, tortoise.grid)) { break; } } const period = i - (i / 2); const remaining_cycles = 1_000_000_000 - i; for (0..(remaining_cycles % period)) |_| { hare.cycle(); } return hare.getNorthLoad(); } pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = gpa.allocator(); const input = @embedFile("input.txt"); std.debug.print("Part 1: {}\n", .{try part1(input, allocator)}); std.debug.print("Part 2: {}\n", .{try part2(input, allocator)}); }
https://raw.githubusercontent.com/Chickenkeeper/advent-of-code-2023/79f3ce8b20c13e25be01a5512370c8a7642c08d1/day-14/src/main.zig
const std = @import("std"); const rl = @import("raylib"); pub const Message = struct { author: []const u8, text: []const u8, }; rec: rl.Rectangle = undefined, messages: std.ArrayList(Message) = undefined, pub fn setRec(self: *@This(), x: f32, y: f32, w: f32, h: f32) rl.Rectangle { self.rec = rl.Rectangle.init(x, y, w, h); return self.rec; } pub fn allocMessages(self: *@This(), allocator: std.mem.Allocator) void { self.messages = std.ArrayList(Message).init(allocator); } pub fn isClicked(self: @This()) bool { if (rl.checkCollisionPointRec(rl.getMousePosition(), self.rec)) { if (rl.isMouseButtonPressed(.mouse_button_left)) { return true; } } return false; } pub fn clean(self: *@This()) [256]u8 { _ = self; std.log.err("not implemented", .{}); std.posix.exit(1); } pub fn render(self: *@This(), allocator: std.mem.Allocator, font: rl.Font, font_size: f32, frame_counter: usize) !void { _ = frame_counter; rl.drawRectangleRounded(self.rec, 0.05, 0, rl.Color.black); const padd = 40; for (self.messages.items, 0..) |msg, i| { const msgg = try std.fmt.allocPrintZ(allocator, "{s}: {s}", .{msg.author, msg.text}); defer allocator.free(msgg); const msg_height = rl.measureTextEx(font, msgg, font_size, 0).y; const msg_pos = rl.Vector2{.x = self.rec.x + padd, .y = self.rec.y + padd + msg_height*@as(f32, @floatFromInt(i))}; rl.drawTextEx(font, msgg, msg_pos, font_size, 0, rl.Color.ray_white); } }
https://raw.githubusercontent.com/tayoldotdev/TsockM/c7d685cdce905730bf0abd00cf4182addad253fa/src/client/ui/display.zig
const std = @import("std"); const Image = @This(); pub const PIXEL_SIZE = 4; pub const Format = enum { Srgb, Linear, }; pub const Filter = enum { Nearest, Linear, }; allocator: std.mem.Allocator, data: []u8, width: u32, height: u32, format: Format = .Srgb, filter: Filter = .Nearest, pub fn init( allocator: std.mem.Allocator, data: []const u8, width: u32, height: u32, ) !Image { return .{ .allocator = allocator, .data = try allocator.dupe(data), .width = width, .height = height, }; } pub fn deinit(self: *Image) void { self.allocator.free(self.data); } pub fn load_qoi( allocator: std.mem.Allocator, path: []const u8, ) !Image { const file = try std.fs.cwd().openFile(path, .{}); const reader = file.reader().any(); return read_qoi(allocator, reader); } pub fn read_qoi( allocator: std.mem.Allocator, reader: std.io.AnyReader, ) !Image { const Error = error{ InvalidMagic, InvalidFormat, }; const Header = extern struct { magic: u32, width: u32, height: u32, }; const header: Header = try reader.readStructEndian(Header, .big); const channels = try reader.readByte(); const colorspace = try reader.readByte(); _ = channels; // check that the magic is correct if (header.magic != 0x716f6966) { return Error.InvalidMagic; } const format = switch (colorspace) { 0 => Format.Srgb, 1 => Format.Linear, else => return Error.InvalidFormat, }; const data = try allocator.alloc(u8, header.width * header.height * PIXEL_SIZE); var index: usize = 0; var seen: [64][4]u8 = .{.{ 0, 0, 0, 0 }} ** 64; var r: u8 = 0; var g: u8 = 0; var b: u8 = 0; var a: u8 = 255; var run: u8 = 0; while (true) { if (run == 0) { const tag = try reader.readByte(); const two_bits = tag & 0b1100_0000; const six_bits = tag & 0b0011_1111; if (tag == 0b1111_1110) { r = try reader.readByte(); g = try reader.readByte(); b = try reader.readByte(); } else if (tag == 0b1111_1111) { r = try reader.readByte(); g = try reader.readByte(); b = try reader.readByte(); a = try reader.readByte(); } else if (two_bits == 0b0000_0000) { const pixel = seen[six_bits]; r = pixel[0]; g = pixel[1]; b = pixel[2]; a = pixel[3]; } else if (two_bits == 0b0100_0000) { const dr = ((six_bits >> 4) & 0b11) -% 2; const dg = ((six_bits >> 2) & 0b11) -% 2; const db = ((six_bits >> 0) & 0b11) -% 2; r +%= dr; g +%= dg; b +%= db; } else if (two_bits == 0b1000_0000) { const dg = six_bits -% 32; const drdb = try reader.readByte(); const dr = ((drdb >> 4) & 0b1111) -% 8; const db = ((drdb >> 0) & 0b1111) -% 8; r +%= dg +% dr; g +%= dg; b +%= dg +% db; } else if (two_bits == 0b1100_0000) { run = six_bits; } } else { run -= 1; } data[index + 0] = r; data[index + 1] = g; data[index + 2] = b; data[index + 3] = a; index += PIXEL_SIZE; const hash = qoi_hash(r, g, b, a); seen[hash] = .{ r, g, b, a }; if (index >= data.len) { break; } } return .{ .allocator = allocator, .data = data, .width = header.width, .height = header.height, .format = format, .filter = Filter.Linear, }; } fn qoi_hash(r: u8, g: u8, b: u8, a: u8) u8 { return (r *% 3 +% g *% 5 +% b *% 7 +% a *% 11) % 64; }
https://raw.githubusercontent.com/Sudoku-Boys/Sudoku/218ea16ecc86507698861b59a496ff178a7a7587/src/engine/Image.zig
const std = @import("../../../std.zig"); const kern = @import("kern.zig"); const PtRegs = @compileError("TODO missing os bits: PtRegs"); const TcpHdr = @compileError("TODO missing os bits: TcpHdr"); const SkFullSock = @compileError("TODO missing os bits: SkFullSock"); // in BPF, all the helper calls // TODO: when https://github.com/ziglang/zig/issues/1717 is here, make a nice // function that uses the Helper enum // // Note, these function signatures were created from documentation found in // '/usr/include/linux/bpf.h' pub const map_lookup_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque, @ptrFromInt(1)); pub const map_update_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(2)); pub const map_delete_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long, @ptrFromInt(3)); pub const probe_read = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(4)); pub const ktime_get_ns = @as(*const fn () u64, @ptrFromInt(5)); pub const trace_printk = @as(*const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, @ptrFromInt(6)); pub const get_prandom_u32 = @as(*const fn () u32, @ptrFromInt(7)); pub const get_smp_processor_id = @as(*const fn () u32, @ptrFromInt(8)); pub const skb_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long, @ptrFromInt(9)); pub const l3_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, @ptrFromInt(10)); pub const l4_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, @ptrFromInt(11)); pub const tail_call = @as(*const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(12)); pub const clone_redirect = @as(*const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, @ptrFromInt(13)); pub const get_current_pid_tgid = @as(*const fn () u64, @ptrFromInt(14)); pub const get_current_uid_gid = @as(*const fn () u64, @ptrFromInt(15)); pub const get_current_comm = @as(*const fn (buf: ?*anyopaque, size_of_buf: u32) c_long, @ptrFromInt(16)); pub const get_cgroup_classid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(17)); // Note vlan_proto is big endian pub const skb_vlan_push = @as(*const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, @ptrFromInt(18)); pub const skb_vlan_pop = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(19)); pub const skb_get_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(20)); pub const skb_set_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(21)); pub const perf_event_read = @as(*const fn (map: *const kern.MapDef, flags: u64) u64, @ptrFromInt(22)); pub const redirect = @as(*const fn (ifindex: u32, flags: u64) c_long, @ptrFromInt(23)); pub const get_route_realm = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(24)); pub const perf_event_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(25)); pub const skb_load_bytes = @as(*const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long, @ptrFromInt(26)); pub const get_stackid = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long, @ptrFromInt(27)); // from and to point to __be32 pub const csum_diff = @as(*const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, @ptrFromInt(28)); pub const skb_get_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(29)); pub const skb_set_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(30)); // proto is __be16 pub const skb_change_proto = @as(*const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, @ptrFromInt(31)); pub const skb_change_type = @as(*const fn (skb: *kern.SkBuff, skb_type: u32) c_long, @ptrFromInt(32)); pub const skb_under_cgroup = @as(*const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long, @ptrFromInt(33)); pub const get_hash_recalc = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(34)); pub const get_current_task = @as(*const fn () u64, @ptrFromInt(35)); pub const probe_write_user = @as(*const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long, @ptrFromInt(36)); pub const current_task_under_cgroup = @as(*const fn (map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(37)); pub const skb_change_tail = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(38)); pub const skb_pull_data = @as(*const fn (skb: *kern.SkBuff, len: u32) c_long, @ptrFromInt(39)); pub const csum_update = @as(*const fn (skb: *kern.SkBuff, csum: u32) i64, @ptrFromInt(40)); pub const set_hash_invalid = @as(*const fn (skb: *kern.SkBuff) void, @ptrFromInt(41)); pub const get_numa_node_id = @as(*const fn () c_long, @ptrFromInt(42)); pub const skb_change_head = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(43)); pub const xdp_adjust_head = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(44)); pub const probe_read_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(45)); pub const get_socket_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(46)); pub const get_socket_uid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(47)); pub const set_hash = @as(*const fn (skb: *kern.SkBuff, hash: u32) c_long, @ptrFromInt(48)); pub const setsockopt = @as(*const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(49)); pub const skb_adjust_room = @as(*const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, @ptrFromInt(50)); pub const redirect_map = @as(*const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(51)); pub const sk_redirect_map = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(52)); pub const sock_map_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(53)); pub const xdp_adjust_meta = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(54)); pub const perf_event_read_value = @as(*const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(55)); pub const perf_prog_read_value = @as(*const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(56)); pub const getsockopt = @as(*const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(57)); pub const override_return = @as(*const fn (regs: *PtRegs, rc: u64) c_long, @ptrFromInt(58)); pub const sock_ops_cb_flags_set = @as(*const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, @ptrFromInt(59)); pub const msg_redirect_map = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(60)); pub const msg_apply_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(61)); pub const msg_cork_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(62)); pub const msg_pull_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, @ptrFromInt(63)); pub const bind = @as(*const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, @ptrFromInt(64)); pub const xdp_adjust_tail = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(65)); pub const skb_get_xfrm_state = @as(*const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, @ptrFromInt(66)); pub const get_stack = @as(*const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(67)); pub const skb_load_bytes_relative = @as(*const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long, @ptrFromInt(68)); pub const fib_lookup = @as(*const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, @ptrFromInt(69)); pub const sock_hash_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(70)); pub const msg_redirect_hash = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(71)); pub const sk_redirect_hash = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(72)); pub const lwt_push_encap = @as(*const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long, @ptrFromInt(73)); pub const lwt_seg6_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long, @ptrFromInt(74)); pub const lwt_seg6_adjust_srh = @as(*const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, @ptrFromInt(75)); pub const lwt_seg6_action = @as(*const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long, @ptrFromInt(76)); pub const rc_repeat = @as(*const fn (ctx: ?*anyopaque) c_long, @ptrFromInt(77)); pub const rc_keydown = @as(*const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long, @ptrFromInt(78)); pub const skb_cgroup_id = @as(*const fn (skb: *kern.SkBuff) u64, @ptrFromInt(79)); pub const get_current_cgroup_id = @as(*const fn () u64, @ptrFromInt(80)); pub const get_local_storage = @as(*const fn (map: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(81)); pub const sk_select_reuseport = @as(*const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(82)); pub const skb_ancestor_cgroup_id = @as(*const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, @ptrFromInt(83)); pub const sk_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(84)); pub const sk_lookup_udp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(85)); pub const sk_release = @as(*const fn (sock: *kern.Sock) c_long, @ptrFromInt(86)); pub const map_push_elem = @as(*const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(87)); pub const map_pop_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(88)); pub const map_peek_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(89)); pub const msg_push_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(90)); pub const msg_pop_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(91)); pub const rc_pointer_rel = @as(*const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long, @ptrFromInt(92)); pub const spin_lock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(93)); pub const spin_unlock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(94)); pub const sk_fullsock = @as(*const fn (sk: *kern.Sock) ?*SkFullSock, @ptrFromInt(95)); pub const tcp_sock = @as(*const fn (sk: *kern.Sock) ?*kern.TcpSock, @ptrFromInt(96)); pub const skb_ecn_set_ce = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(97)); pub const get_listener_sock = @as(*const fn (sk: *kern.Sock) ?*kern.Sock, @ptrFromInt(98)); pub const skc_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(99)); pub const tcp_check_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, @ptrFromInt(100)); pub const sysctl_get_name = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, @ptrFromInt(101)); pub const sysctl_get_current_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(102)); pub const sysctl_get_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(103)); pub const sysctl_set_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, @ptrFromInt(104)); pub const strtol = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, @ptrFromInt(105)); pub const strtoul = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, @ptrFromInt(106)); pub const sk_storage_get = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(107)); pub const sk_storage_delete = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, @ptrFromInt(108)); pub const send_signal = @as(*const fn (sig: u32) c_long, @ptrFromInt(109)); pub const tcp_gen_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64, @ptrFromInt(110)); pub const skb_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(111)); pub const probe_read_user = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(112)); pub const probe_read_kernel = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(113)); pub const probe_read_user_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(114)); pub const probe_read_kernel_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(115)); pub const tcp_send_ack = @as(*const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long, @ptrFromInt(116)); pub const send_signal_thread = @as(*const fn (sig: u32) c_long, @ptrFromInt(117)); pub const jiffies64 = @as(*const fn () u64, @ptrFromInt(118)); pub const read_branch_records = @as(*const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(119)); pub const get_ns_current_pid_tgid = @as(*const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, @ptrFromInt(120)); pub const xdp_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(121)); pub const get_netns_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(122)); pub const get_current_ancestor_cgroup_id = @as(*const fn (ancestor_level: c_int) u64, @ptrFromInt(123)); pub const sk_assign = @as(*const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, @ptrFromInt(124)); pub const ktime_get_boot_ns = @as(*const fn () u64, @ptrFromInt(125)); pub const seq_printf = @as(*const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long, @ptrFromInt(126)); pub const seq_write = @as(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, @ptrFromInt(127)); pub const sk_cgroup_id = @as(*const fn (sk: *kern.BpfSock) u64, @ptrFromInt(128)); pub const sk_ancestor_cgroup_id = @as(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, @ptrFromInt(129)); pub const ringbuf_output = @as(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, @ptrFromInt(130)); pub const ringbuf_reserve = @as(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, @ptrFromInt(131)); pub const ringbuf_submit = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(132)); pub const ringbuf_discard = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(133)); pub const ringbuf_query = @as(*const fn (ringbuf: ?*anyopaque, flags: u64) u64, @ptrFromInt(134)); pub const csum_level = @as(*const fn (skb: *kern.SkBuff, level: u64) c_long, @ptrFromInt(135)); pub const skc_to_tcp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock, @ptrFromInt(136)); pub const skc_to_tcp_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpSock, @ptrFromInt(137)); pub const skc_to_tcp_timewait_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock, @ptrFromInt(138)); pub const skc_to_tcp_request_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock, @ptrFromInt(139)); pub const skc_to_udp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Udp6Sock, @ptrFromInt(140)); pub const get_task_stack = @as(*const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(141));
https://raw.githubusercontent.com/beingofexistence13/multiversal-lang/dd769e3fc6182c23ef43ed4479614f43f29738c9/zig/lib/std/os/linux/bpf/helpers.zig
const std = @import("std"); // The purpose of these types is to help in reading this doc, not // because the types matter. const String = []const u8; // All Code variables are potentially tested and run in CI. const Code = []const u8; // All Markdown strings are never tested and run in CI. const Markdown = []const u8; pub const Docs = struct { // Name of the directory (relative to /src/clients) directory: String, // Package name (i.e. tigerbeetle-go, tigerbeetle-node, etc.) name: String, // Name for syntax highlighting (i.e. javascript for node, go for go, etc.) markdown_name: String, // File extension without dot (i.e. js, go, etc.) extension: String, // For the title of the page on the docs site. proper_name: String, // Introduction to the client. Links to docs or build badges or // whatnot. description: Markdown, // Any libraries or languages and their required versions for // using, not necessarily hacking on, this client. prerequisites: Markdown, // If you need an additional project file like pom.xml or // package.json. Leave blank if not needed. project_file_name: String, // The actual contents of the file. Leave blank if not needed. project_file: Code, // If you need to override the default name of test.${extension} // such as when file names have meaning (i.e. Java). test_file_name: String, // Alpine instructions to get the environment set up. install_prereqs: Code, // Alters the project to use a package built from the current commit // Runs before .install_commands current_commit_pre_install_hook: ?fn (*std.heap.ArenaAllocator, []const u8, []const u8) anyerror!void, // Runs after .install_commands current_commit_post_install_hook: ?fn (*std.heap.ArenaAllocator, []const u8, []const u8) anyerror!void, // Any setup needed for a project before compiling and running // such as `go mod init myProject && go mod tidy` or `npm install // tigerbeetle-node`. install_commands: Code, // Minimal code just to test importing the package works. install_sample_file: Code, // Commands for building code without running it. build_commands: Code, // Commands for building and running code. run_commands: Code, current_commit_install_commands_hook: ?fn (*std.heap.ArenaAllocator, Code) anyerror!Code, current_commit_build_commands_hook: ?fn (*std.heap.ArenaAllocator, Code) anyerror!Code, current_commit_run_commands_hook: ?fn (*std.heap.ArenaAllocator, Code) anyerror!Code, // Additional instructions for install. install_documentation: Markdown, // If you want to include links to examples. examples: Markdown, client_object_example: Code, client_object_documentation: Markdown, create_accounts_example: Code, create_accounts_documentation: Markdown, create_accounts_errors_example: Code, create_accounts_errors_documentation: Markdown, account_flags_example: Code, account_flags_documentation: Markdown, lookup_accounts_example: Code, create_transfers_example: Code, create_transfers_documentation: Markdown, create_transfers_errors_example: Code, create_transfers_errors_documentation: Markdown, // Good example of using batches to create transfers. batch_example: Code, // Bad example of not using batches well to create transfers. no_batch_example: Code, transfer_flags_documentation: Markdown, transfer_flags_link_example: Code, transfer_flags_post_example: Code, transfer_flags_void_example: Code, lookup_transfers_example: Code, linked_events_example: Code, developer_setup_documentation: Markdown, developer_setup_sh_commands: Code, developer_setup_pwsh_commands: Code, // Optional prefix if test code must be in a certain directory // (e.g. Java and `src/main/java`). test_source_path: String, // All code that must exist before sample code is concatenated // such as main function declaration, import declaration, etc. test_main_prefix: Code, // All code that must exist after sample code is concatenated such // as closing braces. test_main_suffix: Code, }; pub const Sample = struct { // Capitalized name of the sample program proper_name: String, // e.g. `basic`, `two-phase`, etc. directory: String, // For use in the language primary README short_description: String, // For use as the introduction on the individual sample README long_description: String, };
https://raw.githubusercontent.com/eatonphil/zig-segfault/e15bdacee14d5aae38ce5668597e53bd1403867f/src/clients/docs_types.zig
const std = @import("std"); const tableStruct = struct { name: []const u8, units: *std.StringHashMap(*std.ArrayList(*unitUnion)), rules: *rulesStruct }; const rulesStruct = struct { accept_extension: ?[4][4]u8, accept_size: ?u64, accept_format: ?[4][4]u8, accept_optimise: ?u5, mark: ?[3][3]u8, }; const UNIT = enum { extension, index }; const unitUnion = union(UNIT) { extension: [4]u8, index: [5]u8 }; var root: *std.StringHashMap(*std.StringHashMap(*std.StringHashMap(*tableStruct))) = undefined; pub fn getRandomNumber() ![]const u8 { var rnd = std.rand.DefaultPrng.init(blk: { var seed: u32 = undefined; try std.os.getrandom(std.mem.asBytes(&seed)); break :blk seed; }); var buffer: [10]u8 = undefined; return try std.fmt.bufPrint(&buffer, "{d}", .{rnd.random().intRangeAtMost(u32, 0, 4_294_967_295)}); } pub fn init_table_map() !void { const rootHashMap = try std.heap.page_allocator.create(std.StringHashMap(*std.StringHashMap(*std.StringHashMap(*tableStruct)))); rootHashMap.* = std.StringHashMap(*std.StringHashMap(*std.StringHashMap(*tableStruct))).init(std.heap.page_allocator); root = rootHashMap; } pub fn push_hash(namespace: []const u8, database: []const u8, table: []const u8, hash: []const u8) !void { const unit_array_prt = try std.heap.page_allocator.create(std.ArrayList(*unitUnion)); unit_array_prt.* = std.ArrayList(*unitUnion).init(std.heap.page_allocator); var filledHash: []u8 = try std.heap.page_allocator.alloc(u8, 10); var number: u4 = 0; while (number < 10) { if (number > hash.len - 1) { filledHash[number] = 0; } else { if (hash[number] != 170) { filledHash[number] = hash[number]; } else filledHash[number] = 0; } number += 1; } try (((root.get(namespace).?).get(database).?).get(table).?).units.put(filledHash, unit_array_prt); } pub fn push_unit(namespace: []const u8, database: []const u8, table: []const u8, hash: []const u8, extension: []const u8, index: ?u8) !void { if (root.get(namespace)) |ns| { if (ns.get(database)) |db| { if (db.get(table)) |tbl| { var filledHash: []u8 = try std.heap.page_allocator.alloc(u8, 10); var number: u4 = 0; while (number < 10) { if (number > hash.len - 1) { filledHash[number] = 0; } else { if (hash[number] != 170) { filledHash[number] = hash[number]; } else filledHash[number] = 0; } number += 1; } // var keys = tbl.units.keyIterator(); // while (keys.next()) |key| { // std.debug.print("{s}\n", .{key.*}); // } if (tbl.units.get(filledHash)) |hs| { const unit = try std.heap.page_allocator.create(unitUnion); if (index) |i| { if (extension.len == 4) { unit.* = unitUnion{ .index = [5]u8{ extension[0], extension[1], extension[2], extension[3], i } }; } else { number = 0; var filledExtension: [4]u8 = [4]u8{ 0, 0, 0, 0 }; while (true) { filledExtension[number] = extension[number]; number += 1; if (number == 3) break; } unit.* = unitUnion{ .index = [5]u8{ filledExtension[0], filledExtension[1], filledExtension[2], filledExtension[3], i } }; } } else { if (extension.len == 4) { unit.* = unitUnion{ .extension = extension[0..4].* }; } else { number = 0; var filledExtension: [4]u8 = [4]u8{ 0, 0, 0, 0 }; while (true) { filledExtension[number] = extension[number]; number += 1; if (number == 3) break; } unit.* = unitUnion{ .extension = filledExtension }; } } try hs.append(unit); } else return error.NoUnitFound; } else return error.NoTableFound; } else return error.NoDataBaseFound; } else return error.NoNameSpaceFound; } pub fn get_units_by_hash(namespace: []const u8, database: []const u8, table: []const u8, hash: []const u8) !*std.ArrayList(*unitUnion) { if (root.get(namespace)) |ns| { if (ns.get(database)) |db| { if (db.get(table)) |tbl| { var filledHash: []u8 = try std.heap.page_allocator.alloc(u8, 10); var number: u4 = 0; while (number < 10) { if (number > hash.len - 1) { filledHash[number] = 0; } else { if (hash[number] != 170) { filledHash[number] = hash[number]; } else filledHash[number] = 0; } number += 1; } if (tbl.units.get(filledHash)) |hs| { return hs; } else return error.NoUnitFound; } else return error.NoTableFound; } else return error.NoDataBaseFound; } else return error.NoNameSpaceFound; } pub fn define(namespace: ?[]const u8, database: ?[]const u8, table: ?[]const u8, name: []const u8) !void { if (namespace) |ns| { if (database) |db| { if (table) |_| {} else { if (root.get(ns)) |rns| { if (rns.get(db)) |rdb| { const units_prt = try std.heap.page_allocator.create(std.StringHashMap(*std.ArrayList(*unitUnion))); const rules_prt = try std.heap.page_allocator.create(rulesStruct); const table_prt = try std.heap.page_allocator.create(tableStruct); units_prt.* = std.StringHashMap(*std.ArrayList(*unitUnion)).init(std.heap.page_allocator); rules_prt.* = rulesStruct{ .accept_extension = null, .accept_size = null, .accept_format = null, .accept_optimise = null, .mark = null }; table_prt.* = tableStruct{ .name = name, .units = units_prt, .rules = rules_prt }; var buffer = try std.heap.page_allocator.alloc(u8, 24); std.mem.copyForwards(u8, buffer[0..], name[0..]); try rdb.put(buffer[0..name.len], table_prt); } } } } else { if (root.get(ns)) |rns| { const databaseHashMap = try std.heap.page_allocator.create(std.StringHashMap(*tableStruct)); databaseHashMap.* = std.StringHashMap(*tableStruct).init(std.heap.page_allocator); var buffer = try std.heap.page_allocator.alloc(u8, 24); std.mem.copyForwards(u8, buffer[0..], name[0..]); try rns.put(buffer[0..name.len], databaseHashMap); } } } else { const namespaceHashMap = try std.heap.page_allocator.create(std.StringHashMap(*std.StringHashMap(*tableStruct))); namespaceHashMap.* = std.StringHashMap(*std.StringHashMap(*tableStruct)).init(std.heap.page_allocator); var buffer = try std.heap.page_allocator.alloc(u8, 24); std.mem.copyForwards(u8, buffer[0..], name[0..]); try root.put(buffer[0..name.len], namespaceHashMap); } } pub fn set_format(namespace: []const u8, database: []const u8, table: []const u8, format: [4][4]u8) !void { if (root.get(namespace)) |ns| { if (ns.get(database)) |db| { if (db.get(table)) |t| { t.rules.accept_format = format; } } } } pub fn set_possible_input_extensions(namespace: []const u8, database: []const u8, table: []const u8, format: [4][4]u8) !void { if (root.get(namespace)) |ns| { if (ns.get(database)) |db| { if (db.get(table)) |t| { t.rules.accept_extension = format; } } } } pub fn set_input_size(namespace: []const u8, database: []const u8, table: []const u8, size: u64) !void { if (root.get(namespace)) |ns| { if (ns.get(database)) |db| { if (db.get(table)) |t| { t.rules.accept_size = size; } } } } pub fn set_optimisation(namespace: []const u8, database: []const u8, table: []const u8, level: u5) !void { if (root.get(namespace)) |ns| { if (ns.get(database)) |db| { if (db.get(table)) |t| { t.rules.accept_optimise = level; } } } } pub fn set_mark(namespace: []const u8, database: []const u8, table: []const u8, mark: []const u8) !void { if (root.get(namespace)) |ns| { if (ns.get(database)) |db| { if (db.get(table)) |t| { var markArray: [3][3]u8 = [3][3]u8{ [3]u8{ 0, 0, 0 }, [3]u8{ 0, 0, 0 }, [3]u8{ 0, 0, 0 } }; var parseMarkConfigs = std.mem.split(u8, mark, ","); var number: u2 = 0; while (parseMarkConfigs.next()) |config| { var parseMarkConfigsToken = std.mem.split(u8, config, ":"); var index: u2 = 0; while (parseMarkConfigsToken.next()) |pmcp| { const level: ?u8 = std.fmt.parseInt(u8, pmcp, 10) catch null; if (level) |lvl| { markArray[number][index] = lvl; } else { if (pmcp.len == 0) break; markArray[number][0] = pmcp[0]; } index += 1; } number += 1; } t.rules.mark = markArray; } } } } pub fn get_rules_sturct(namespace: []const u8, database: []const u8, table: []const u8) !*rulesStruct { if (root.get(namespace)) |ns| { if (ns.get(database)) |db| { if (db.get(table)) |tbl| { return tbl.rules; } else return error.NoTableFound; } else return error.NoDataBaseFound; } else return error.NoNameSpaceFound; } pub fn get_rules_portable(namespace: []const u8, database: []const u8, table: []const u8) ![]const u8 { if (root.get(namespace)) |ns| { if (ns.get(database)) |db| { if (db.get(table)) |tbl| { var rules: []const u8 = ""; if (tbl.rules.accept_extension) |tr_ae| { rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, "E" }); if (tr_ae[0][0] != 0) rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, tr_ae[0][0..] }); if (tr_ae[1][0] != 0) rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, ",", tr_ae[1][0..], "" }); if (tr_ae[2][0] != 0) rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, ",", tr_ae[2][0..], "" }); if (tr_ae[3][0] != 0) rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, ",", tr_ae[3][0..], "" }); } if (tbl.rules.accept_size) |tr_as| { var sizeBuffer: [13]u8 = undefined; rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, "S", try std.fmt.bufPrint(&sizeBuffer, "{d}", .{tr_as}) }); } if (tbl.rules.accept_format) |tr_af| { rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, "F" }); if (tr_af[0][0] != 0) rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, tr_af[0][0..] }); if (tr_af[1][0] != 0) rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, ",", tr_af[1][0..], "" }); if (tr_af[2][0] != 0) rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, ",", tr_af[2][0..], "" }); if (tr_af[3][0] != 0) rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, ",", tr_af[3][0..], "" }); } if (tbl.rules.accept_optimise) |tr_ao| { var sizeBuffer: [8]u8 = undefined; rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, "O", try std.fmt.bufPrint(&sizeBuffer, "{d}", .{tr_ao}) }); } if (tbl.rules.mark) |tr_m| { rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, "M" }); for (tr_m) |array| { var qualityBuffer: [2]u8 = undefined; var scaleBuffer: [2]u8 = undefined; if (array[0] == 0) continue; const formatted: []const u8 = &[1]u8{array[0]}; rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, formatted, ":" }); rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, try std.fmt.bufPrint(&qualityBuffer, "{d}", .{array[1]}), ":" }); rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, try std.fmt.bufPrint(&scaleBuffer, "{d}", .{array[2]}), "," }); } rules = rules[0..(rules.len - 1)]; } return rules; } else return error.NoTableFound; } else return error.NoDataBaseFound; } else return error.NoNameSpaceFound; } pub fn get_rules(namespace: []const u8, database: []const u8, table: []const u8) ![]const u8 { if (root.get(namespace)) |ns| { if (ns.get(database)) |db| { if (db.get(table)) |tbl| { var rules: []const u8 = ""; rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, "> ", table, "\n" }); if (tbl.rules.accept_extension) |tr_ae| { rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " EXTENSION [\n" }); if (tr_ae[0][0] != 0) rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " #1:[", tr_ae[0][0..], "]\n" }); if (tr_ae[1][0] != 0) rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " #2:[", tr_ae[1][0..], "]\n" }); if (tr_ae[2][0] != 0) rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " #3:[", tr_ae[2][0..], "]\n" }); if (tr_ae[3][0] != 0) rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " #4:[", tr_ae[3][0..], "]\n" }); rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " ]\n" }); } if (tbl.rules.accept_size) |tr_as| { var sizeBuffer: [8]u8 = undefined; if (tr_as > 1024 * 1024 * 1024 * 1024) { rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " SIZE [", try std.fmt.bufPrint(&sizeBuffer, "{d}", .{tr_as / (1024 * 1024 * 1024 * 1024)}), "TB]\n" }); } else if (tr_as > 1024 * 1024 * 1024) { rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " SIZE [", try std.fmt.bufPrint(&sizeBuffer, "{d}", .{tr_as / (1024 * 1024 * 1024)}), "GB]\n" }); } else if (tr_as > 1024 * 1024) { rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " SIZE [", try std.fmt.bufPrint(&sizeBuffer, "{d}", .{tr_as / (1024 * 1024)}), "MB]\n" }); } else if (tr_as > 1024) { rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " SIZE [", try std.fmt.bufPrint(&sizeBuffer, "{d}", .{tr_as / 1024}), "KB]\n" }); } else { rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " SIZE [", try std.fmt.bufPrint(&sizeBuffer, "{d}", .{tr_as}), "B]\n" }); } } if (tbl.rules.accept_format) |tr_af| { rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " FORMAT [\n" }); if (tr_af[0][0] != 0) rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " #1:[", tr_af[0][0..], "]\n" }); if (tr_af[1][0] != 0) rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " #2:[", tr_af[1][0..], "]\n" }); if (tr_af[2][0] != 0) rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " #3:[", tr_af[2][0..], "]\n" }); if (tr_af[3][0] != 0) rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " #4:[", tr_af[3][0..], "]\n" }); rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " ]\n" }); } if (tbl.rules.accept_optimise) |tr_ao| { var sizeBuffer: [8]u8 = undefined; rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " OPTIMISE [", try std.fmt.bufPrint(&sizeBuffer, "{d}", .{tr_ao}), "]\n" }); } if (tbl.rules.mark) |tr_m| { rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " MARK [\n" }); for (tr_m) |array| { var qualityBuffer: [2]u8 = undefined; var scaleBuffer: [2]u8 = undefined; if (array[0] == 0) continue; const formatted: []const u8 = &[1]u8{array[0]}; rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " [", formatted, " -- " }); rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, try std.fmt.bufPrint(&qualityBuffer, "{d}", .{array[1]}), " -- " }); rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, try std.fmt.bufPrint(&scaleBuffer, "{d}", .{array[2]}), "]\n" }); } rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, " ]\n" }); } rules = try std.mem.join(std.heap.page_allocator, "", &[_][]const u8{ rules, "<\n" }); return rules; } else return error.NoTableFound; } else return error.NoDataBaseFound; } else return error.NoNameSpaceFound; }
https://raw.githubusercontent.com/CrusaderOfTheWhiteRose/IshiaDB/c74398e67bd077bbde1f16383ad68342411a0a75/database/core.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const List = std.ArrayList; const Map = std.AutoHashMap; const StrMap = std.StringHashMap; const BitSet = std.DynamicBitSet; const util = @import("util.zig"); const gpa = util.gpa; // Useful stdlib functions const eql = std.mem.eql; const tokenize = std.mem.tokenize; const tokenizeAny = std.mem.tokenizeAny; const tokenizeSeq = std.mem.tokenizeSequence; const tokenizeSca = std.mem.tokenizeScalar; const splitAny = std.mem.splitAny; const splitSeq = std.mem.splitSequence; const splitSca = std.mem.splitScalar; const indexOf = std.mem.indexOfScalar; const indexOfAny = std.mem.indexOfAny; const indexOfStr = std.mem.indexOfPosLinear; const lastIndexOf = std.mem.lastIndexOfScalar; const lastIndexOfAny = std.mem.lastIndexOfAny; const lastIndexOfStr = std.mem.lastIndexOfLinear; const trim = std.mem.trim; const sliceMin = std.mem.min; const sliceMax = std.mem.max; const parseInt = std.fmt.parseInt; const parseFloat = std.fmt.parseFloat; const print = std.debug.print; const assert = std.debug.assert; const sort = std.sort.block; const asc = std.sort.asc; const desc = std.sort.desc; const data = @embedFile("data/day14.txt"); fn part1() !u32 {} fn part2() !u32 {} pub fn main() !void { print("Day 14 (Part 1) answer: {any}\n", .{part1()}); print("Day 14 (Part 2) answer: {any}\n", .{part2()}); }
https://raw.githubusercontent.com/brandonland/adventofcode2023-zig/ccb9c086f25cfebbab620884cf0ff622cc1b63ed/src/day14.zig
const std = @import("std"); const Token = struct { kind: enum { nil, comment, section, identifier, value, }, value: ?[]const u8, }; const TokenizerState = enum(u3) { nil, comment, section, identifier, value, string, }; const booleanMap = std.ComptimeStringMap(bool, .{ .{ "1", true }, .{ "enabled", true }, .{ "Enabled", true }, .{ "on", true }, .{ "On", true }, .{ "true", true }, .{ "t", true }, .{ "True", true }, .{ "T", true }, .{ "yes", true }, .{ "y", true }, .{ "Yes", true }, .{ "Y", true }, .{ "0", false }, .{ "disabled", false }, .{ "Disabled", false }, .{ "off", false }, .{ "Off", false }, .{ "false", false }, .{ "f", false }, .{ "False", false }, .{ "F", false }, .{ "no", false }, .{ "n", false }, .{ "No", false }, .{ "N", false }, }); pub fn parse(comptime T: type, data: []const u8) !T { var seek: usize = 0; var state = TokenizerState.nil; var val = std.mem.zeroes(T); var csec: []const u8 = undefined; var cid: []const u8 = undefined; while (consume(data[0..], &seek, &state)) |token| { switch (token.kind) { .nil, .comment => {}, .section => csec = token.value.?, .identifier => { cid = token.value.?; const tk = consume(data[0..], &seek, &state).?; if (tk.kind != .value) return error.IniSyntaxError; const info1 = @typeInfo(T); if (info1 != .Struct) @compileError("Invalid Archetype"); inline for (info1.Struct.fields) |f| { if (std.mem.eql(u8, f.name, csec)) { const info2 = @typeInfo(@TypeOf(@field(val, f.name))); if (info2 != .Struct) @compileError("Naked field in archetype"); inline for (info2.Struct.fields) |ff| { if (std.mem.eql(u8, ff.name, cid)) { const TT = ff.field_type; @field(@field(val, f.name), ff.name) = coerce(TT, tk.value.?) catch unreachable; // error.IniInvalidCoerce; } } } } }, else => return error.IniSyntaxError, } } return val; } fn coerce(comptime T: type, v: []const u8) !T { return switch (@typeInfo(T)) { .Bool => booleanMap.get(v).?, .Float, .ComptimeFloat => try std.fmt.parseFloat(T, v), .Int, .ComptimeInt => try std.fmt.parseInt(T, v, 10), else => @as(T, v), }; } const IniMap = std.StringHashMap([]const u8); pub const IniResult = struct { map: IniMap, allocator: std.mem.Allocator, pub fn deinit(self: *IniResult) void { defer self.map.deinit(); var iter = self.map.iterator(); while (iter.next()) |i| self.allocator.free(i.key_ptr.*); } }; pub fn parseIntoMap(data: []const u8, allocator: std.mem.Allocator) !IniResult { var seek: usize = 0; var state = TokenizerState.nil; var csec: []const u8 = undefined; var cid: []const u8 = undefined; var map = IniMap.init(allocator); while (consume(data[0..], &seek, &state)) |token| { switch (token.kind) { .nil, .comment => {}, .section => csec = token.value.?, .identifier => { cid = token.value.?; const tk = consume(data[0..], &seek, &state).?; if (tk.kind != .value) return error.IniSyntaxError; const coc = try std.fmt.allocPrint(allocator, "{s}.{s}", .{ csec, cid }); try map.put(coc, tk.value.?); }, else => return error.IniSyntaxError, } } return IniResult{ .map = map, .allocator = allocator }; } fn consume(data: []const u8, seek: *usize, state: *TokenizerState) ?Token { if (seek.* >= data.len) return null; const token: Token = std.mem.zeroes(Token); var start = seek.*; var end = start; var char: u8 = 0; @setEvalBranchQuota(100000); while (char != '\n') { char = data[seek.*]; seek.* += 1; switch (state.*) { .nil => { switch (char) { ';', '#' => { state.* = .comment; start = seek.*; if (std.ascii.isWhitespace(data[start])) start += 1; end = start; }, '[' => { state.* = .section; start = seek.*; end = start; }, '=' => { state.* = .value; start = seek.*; if (std.ascii.isWhitespace(data[start])) start += 1; end = start; }, else => { if (!std.ascii.isWhitespace(char)) { state.* = .identifier; start = start; end = start; } else { start += 1; end += 1; } }, } }, .identifier => { end += 1; if (!(std.ascii.isAlphanumeric(char) or char == '_')) { state.* = .nil; return Token{ .kind = .identifier, .value = data[start..end], }; } }, .comment => { end += 1; switch (char) { '\n' => { state.* = .nil; return Token{ .kind = .comment, .value = data[start..@max(start, end - 2)], }; }, else => {}, } }, .section => { end += 1; switch (char) { ']' => { state.* = .nil; return Token{ .kind = .section, .value = data[start .. end - 1], }; }, else => {}, } }, .value => { switch (char) { ';', '#' => { state.* = .comment; return Token{ .kind = .value, .value = data[start .. end - 2], }; }, else => { end += 1; switch (char) { '\n' => { state.* = .nil; return Token{ .kind = .value, .value = data[start .. end - 2], }; }, else => {}, } }, } }, else => {}, } } return token; } test "parse into map" { var file = try std.fs.cwd().openFile("src/test.ini", .{ .mode = .read_only }); defer file.close(); const data = try std.testing.allocator.alloc(u8, try file.getEndPos()); defer std.testing.allocator.free(data); _ = try file.read(data); var ini = try parseIntoMap(data, std.testing.allocator); defer ini.deinit(); try std.testing.expectEqualStrings("John Doe", ini.map.get("owner.name").?); try std.testing.expectEqualStrings("Acme Widgets Inc.", ini.map.get("owner.organization").?); try std.testing.expectEqualStrings("192.0.2.62", ini.map.get("database.server").?); try std.testing.expectEqualStrings("143", ini.map.get("database.port").?); try std.testing.expectEqualStrings("payroll.dat", ini.map.get("database.file").?); try std.testing.expectEqualStrings("yes", ini.map.get("database.use").?); try std.testing.expectEqualStrings("bar", ini.map.get("withtabs.foo").?); } test "parse into struct" { var file = try std.fs.cwd().openFile("src/test.ini", .{ .mode = .read_only }); defer file.close(); const data = try std.testing.allocator.alloc(u8, try file.getEndPos()); defer std.testing.allocator.free(data); _ = try file.read(data); const Config = struct { owner: struct { name: []const u8, organization: []const u8, }, database: struct { server: []const u8, port: usize, file: []const u8, use: bool, }, }; const config = try parse(Config, data); try std.testing.expectEqualStrings("John Doe", config.owner.name); try std.testing.expectEqualStrings("Acme Widgets Inc.", config.owner.organization); try std.testing.expectEqualStrings("192.0.2.62", config.database.server); try std.testing.expectEqual(@as(usize, 143), config.database.port); try std.testing.expectEqualStrings("payroll.dat", config.database.file); try std.testing.expectEqual(true, config.database.use); } test "parse in comptime into struct" { const config = comptime block: { const data = @embedFile("test.ini"); const Config = struct { owner: struct { name: []const u8, organization: []const u8, }, database: struct { server: []const u8, port: usize, file: []const u8, use: bool, }, }; const config = try parse(Config, data); break :block config; }; try std.testing.expectEqualStrings("John Doe", config.owner.name); try std.testing.expectEqualStrings("Acme Widgets Inc.", config.owner.organization); try std.testing.expectEqualStrings("192.0.2.62", config.database.server); try std.testing.expectEqual(@as(usize, 143), config.database.port); try std.testing.expectEqualStrings("payroll.dat", config.database.file); try std.testing.expectEqual(true, config.database.use); }
https://raw.githubusercontent.com/nektro/arqv-ini/38a018ad3a19d5b4663a5364d2d31271f250846b/src/ini.zig
const std = @import("std"); const GL = @import("../util/opengl.zig"); const Ring = @import("../util/ring.zig"); const shared = @import("../shared.zig"); const zigimg = @import("zigimg"); const UI = @import("../util/ui.zig"); const GLRenderer = @import("../gl_renderer.zig"); const Editor = @import("../editor.zig"); usingnamespace std.os.windows; const ProcedureData = struct { var wndproc_atom_ptr: ?LPCSTR = null; msg_queue: *MessageQueue, old_wnd_proc: LONG_PTR, }; const Message = union(enum) { MouseMove: UI.MousePos, MouseDown: void, MouseUp: void, KeyDown: usize, KeyUp: usize, }; const DraggingInfo = struct { start: UI.MousePos, current: UI.MousePos, ui_id: u32, }; const MessageQueue = std.fifo.LinearFifo(Message, .Dynamic); pub const Renderer = struct { allocator: *std.mem.Allocator, thread: ?*std.Thread = null, should_close: bool = false, ring: Ring, images: ?GLRenderer.Images = null, editor: *Editor, msg_queue: MessageQueue, pub fn init(allocator: *std.mem.Allocator, editor: *Editor) !Renderer { return Renderer{ .allocator = allocator, .ring = try Ring.init(allocator, 65536), .msg_queue = MessageQueue.init(allocator), .editor = editor, }; } pub fn editorOpen(self: *Renderer, ptr: *c_void) !void { const hwnd = @ptrCast(HWND, ptr); self.thread = try std.Thread.spawn(renderLoop, .{ .self = self, .hwnd = hwnd, }); } pub fn editorClose(self: *Renderer) void { if (self.thread) |thread| { self.should_close = true; thread.wait(); self.thread = null; self.should_close = false; } } pub fn update(self: *Renderer, buffer: []f32) void { self.ring.write(buffer); } const RenderArgs = struct { self: *Renderer, hwnd: HWND, }; fn renderLoop(args: RenderArgs) !void { const self = args.self; const hwnd = args.hwnd; var proc_data = ProcedureData{ .msg_queue = &self.msg_queue, .old_wnd_proc = GetWindowLongPtrA(hwnd, GWLP_WNDPROC), }; if (ProcedureData.wndproc_atom_ptr == null) { const atom = GlobalAddAtomA("zig-analyzer-wndproc-storage"); // High word needs to be all 0 and the low word needs to be the atom identifier. const atom_ptr_int = @as(usize, atom & 0xffff); ProcedureData.wndproc_atom_ptr = @intToPtr([*:0]const CHAR, atom_ptr_int); } if (SetPropA(hwnd, ProcedureData.wndproc_atom_ptr.?, @ptrCast(*c_void, &proc_data)) != TRUE) { std.log.crit("SetPropA failed", .{}); } const proc_ptr = @intCast(isize, @ptrToInt(customWindowProcedure)); if (SetWindowLongPtrA(hwnd, GWLP_WNDPROC, proc_ptr) == 0) { std.log.crit("Failed to set the custom window procedure: {}", .{GetLastError()}); } defer { if (SetWindowLongPtrA(hwnd, GWLP_WNDPROC, proc_data.old_wnd_proc) == 0) { std.log.debug("Failed to reset the window procedure: {}", .{GetLastError()}); } } var instance = try wglSetup(self.allocator, hwnd); defer instance.deinit(); _ = instance.makeCurrent(); var opengl = try instance.loadFunctions(); self.images = self.images orelse try self.loadImages(); var gl_renderer = try GLRenderer.init(self.allocator, &opengl, &self.ring, self.images.?); defer gl_renderer.deinit(); const viewport = GLRenderer.Viewport{ .width = 900, .height = 900 }; gl_renderer.resize(viewport); const time_ms = 800; const frame_count = 48_000 * (time_ms / @as(f64, 1000.0)); gl_renderer.gpu_ring.resize(@floatToInt(usize, frame_count) * 2); var mouse_pos: UI.MousePos = .{ .x = 0, .y = 0 }; var mouse_down: bool = false; var dragging: ?DraggingInfo = null; while (!self.should_close) { var mouse_moved_this_frame = false; var mouse_down_this_frame = false; while (self.msg_queue.readItem()) |msg| switch (msg) { .KeyDown => |code| switch (code) { 84 => { const mode = self.editor.lissajous_mode; const mode_int = @enumToInt(mode); var new_int = mode_int + 1; if (new_int >= comptime std.meta.fields(Editor.LissajousMode).len) { new_int = 0; } self.editor.lissajous_mode = @intToEnum(Editor.LissajousMode, new_int); }, else => {}, }, .MouseMove => |move_pos| { mouse_pos = move_pos; mouse_moved_this_frame = true; }, .MouseDown => { mouse_down = true; mouse_down_this_frame = true; }, .MouseUp => { mouse_down = false; dragging = null; }, else => {}, }; const picked_id = gl_renderer.render(self.editor.*, viewport, mouse_pos); if (dragging) |*drag_info| { drag_info.current = mouse_pos; std.log.debug("{}", .{drag_info}); } else if (mouse_down and mouse_moved_this_frame and picked_id != null) { dragging = DraggingInfo{ .start = mouse_pos, .current = mouse_pos, .ui_id = picked_id.?, }; } else if (mouse_down_this_frame and picked_id != null) { std.log.debug("Clicked {}", .{picked_id.?}); } _ = instance.swapBuffers(); } std.log.debug("Cleanup", .{}); } fn loadImages(self: *Renderer) !GLRenderer.Images { var img = try zigimg.Image.fromMemory(self.allocator, @embedFile("../../resources/scale.png")); var pixels = img.pixels orelse return error.ImageNoPixels; var buffer = try self.allocator.alloc(u8, pixels.len() * 4); var it = img.iterator(); var i: usize = 0; while (it.next()) |value| { buffer[i] = @floatToInt(u8, std.math.floor(value.R * 255)); buffer[i + 1] = @floatToInt(u8, std.math.floor(value.G * 255)); buffer[i + 2] = @floatToInt(u8, std.math.floor(value.B * 255)); buffer[i + 3] = @floatToInt(u8, std.math.floor(value.A * 255)); i += 4; } return GLRenderer.Images{ .scale = buffer, }; } }; fn customWindowProcedure(hwnd: HWND, msg: UINT, wparam: WPARAM, lparam: LPARAM) LRESULT { var proc_data = getProcDataProp(hwnd); var msg_queue = proc_data.msg_queue; switch (msg) { 0x100 => { msg_queue.writeItem(.{ .KeyDown = wparam }) catch unreachable; }, 0x101 => { msg_queue.writeItem(.{ .KeyUp = wparam }) catch unreachable; }, 512 => { const lo = @bitCast(i16, @intCast(u16, lparam & 0xffff)); const hi = @bitCast(i16, @intCast(u16, (lparam >> 16) & 0xffff)); const mouse_pos = UI.MousePos{ .x = lo, .y = hi }; msg_queue.writeItem(.{ .MouseMove = mouse_pos }) catch unreachable; }, 513 => { msg_queue.writeItem(.{ .MouseDown = {} }) catch unreachable; }, 514 => { msg_queue.writeItem(.{ .MouseUp = {} }) catch unreachable; }, else => {}, } const old_proc = @intToPtr(WNDPROC, @intCast(usize, proc_data.old_wnd_proc)); return CallWindowProcA(old_proc, hwnd, msg, wparam, lparam); } fn getProcDataProp(hwnd: HWND) *ProcedureData { const handle = GetPropA(hwnd, ProcedureData.wndproc_atom_ptr.?); const aligned_ptr = @alignCast(@alignOf(ProcedureData), handle); return @ptrCast(*ProcedureData, aligned_ptr); } extern "user32" fn GetDC(hwnd: HWND) HDC; extern "user32" fn GetWindowLongPtrA(HWND, c_int) LONG_PTR; extern "user32" fn CallWindowProcA(WNDPROC, HWND, UINT, WPARAM, LPARAM) LRESULT; extern "user32" fn SetWindowLongPtrA(HWND, c_int, LONG_PTR) LONG_PTR; extern "user32" fn GlobalAddAtomA(LPCSTR) ATOM; extern "user32" fn SetPropA(HWND, LPCSTR, HANDLE) BOOL; extern "user32" fn GetPropA(HWND, LPCSTR) HANDLE; extern "gdi32" fn ChoosePixelFormat(hdc: HDC, ppfd: *const PixelFormatDescriptor) c_int; extern "gdi32" fn SetPixelFormat(hdc: HDC, format: c_int, ppfd: *const PixelFormatDescriptor) BOOL; extern "opengl32" fn wglCreateContext(hdc: HDC) ?HGLRC; extern "opengl32" fn wglDeleteContext(HGLRC) BOOL; extern "opengl32" fn wglMakeCurrent(hdc: HDC, glrc: ?HGLRC) BOOL; extern "opengl32" fn wglGetProcAddress(name: LPCSTR) ?*c_void; extern "opengl32" fn SwapBuffers(hdc: HDC) BOOL; extern fn SetLastError(DWORD) void; extern fn GetLastError() DWORD; const WNDPROC = fn (hwnd: HWND, msg: UINT, wparam: WPARAM, lparam: LPARAM) callconv(.C) LRESULT; const GWLP_WNDPROC = -4; const PixelFormatDescriptor = extern struct { Size: WORD, Version: WORD, wFlags: DWORD, PixelType: BYTE, ColorBits: BYTE, RedBits: BYTE, RedShift: BYTE, GreenBits: BYTE, GreenShift: BYTE, BlueBits: BYTE, BlueShift: BYTE, AlphaBits: BYTE, AlphaShift: BYTE, AccumBits: BYTE, AccumRedBits: BYTE, AccumGreenBits: BYTE, AccumBlueBits: BYTE, AccumAlphaBits: BYTE, DepthBits: BYTE, StencilBits: BYTE, AuxBuffers: BYTE, LayerType: BYTE, Reserved: BYTE, wLayerMask: DWORD, wVisibleMask: DWORD, wDamageMask: DWORD, }; const PFD_DRAW_TO_WINDOW: DWORD = 4; const PFD_SUPPORT_OPENGL: DWORD = 32; const PFD_DOUBLEBUFFER: DWORD = 1; const PFD_TYPE_RGBA: DWORD = 0; const PFD_MAIN_PLANE: DWORD = 1; const WGL_CONTEXT_MAJOR_VERSION_ARB = 0x2091; const WGL_CONTEXT_MINOR_VERSION_ARB = 0x2092; const WGL_CONTEXT_PROFILE_MASK_ARB = 0x9126; const WGL_CONTEXT_CORE_PROFILE_BIT_ARB = 0x00000001; fn wglSetup(allocator: *std.mem.Allocator, hwnd: HWND) !Instance { const pfd = PixelFormatDescriptor{ .Size = @sizeOf(PixelFormatDescriptor), .Version = 1, .wFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER, .PixelType = PFD_TYPE_RGBA, .ColorBits = 32, .RedBits = 0, .RedShift = 0, .GreenBits = 0, .GreenShift = 0, .BlueBits = 0, .BlueShift = 0, .AlphaBits = 0, .AlphaShift = 0, .AccumBits = 0, .AccumRedBits = 0, .AccumGreenBits = 0, .AccumBlueBits = 0, .AccumAlphaBits = 0, .DepthBits = 24, .StencilBits = 8, .AuxBuffers = 0, .LayerType = PFD_MAIN_PLANE, .Reserved = 0, .wLayerMask = 0, .wVisibleMask = 0, .wDamageMask = 0, }; const hdc = GetDC(hwnd); const pixel_format = ChoosePixelFormat(hdc, &pfd); if (pixel_format == 0) { return error.PixelFormatIsZero; } if (SetPixelFormat(hdc, pixel_format, &pfd) == FALSE) { return error.SetPixelFormatFailed; } var gl_lib = try std.DynLib.open("opengl32.dll"); const gl_ctx = wglCreateContext(hdc) orelse return error.wglCreateContextFailed; defer std.debug.assert(wglDeleteContext(gl_ctx) == TRUE); if (wglMakeCurrent(hdc, gl_ctx) == FALSE) return error.wglMakeCurrentFailed; const wglCreateContextAttribsARB = getOpenGLProc(fn (HDC, ?HGLRC, ?[*:0]const i32) ?HGLRC, "wglCreateContextAttribsARB").?; _ = setupSwapInterval(&gl_lib, hdc); const attribs = [_]i32{ WGL_CONTEXT_MAJOR_VERSION_ARB, 3, WGL_CONTEXT_MINOR_VERSION_ARB, 3, WGL_CONTEXT_PROFILE_MASK_ARB, WGL_CONTEXT_CORE_PROFILE_BIT_ARB, } ++ [_]i32{0}; const c_attribs = @ptrCast([*:0]const i32, &attribs); const modern_ctx = wglCreateContextAttribsARB(hdc, null, c_attribs) orelse return error.CreateContextARBNull; if (wglMakeCurrent(hdc, modern_ctx) == FALSE) return error.wglMakeModernCurrentFailed; return Instance{ .allocator = allocator, .hdc = hdc, .ctx = modern_ctx, .dynlib = gl_lib, }; } fn setupSwapInterval(gl_lib: *std.DynLib, hdc: HDC) bool { const glGetString = gl_lib.lookup(fn (GL.Enum) [*:0]GL.char, "glGetString").?; const wglGetExtensionsStringARB = getOpenGLProc(fn (HDC) [*:0]u8, "wglGetExtensionsStringARB").?; const extensions = std.mem.span(glGetString(GL.EXTENSIONS)); const wgl_extensions = std.mem.span(wglGetExtensionsStringARB(hdc)); const ext_supported = std.mem.indexOf(u8, extensions, "WGL_EXT_swap_control") != null; const wgl_ext_supported = std.mem.indexOf(u8, wgl_extensions, "WGL_EXT_swap_control") != null; if (ext_supported and wgl_ext_supported) { const wglSwapIntervalEXT = getOpenGLProc(fn (c_int) BOOL, "wglSwapIntervalEXT").?; return wglSwapIntervalEXT(1) == TRUE; } return false; } fn getOpenGLProc(comptime T: type, name: [*:0]const u8) ?T { const ptr = wglGetProcAddress(name) orelse return null; return @ptrCast(T, ptr); } const Instance = struct { allocator: *std.mem.Allocator, dynlib: std.DynLib, hdc: HDC, ctx: HGLRC, pub fn deinit(self: *Instance) void { self.dynlib.close(); if (wglDeleteContext(self.ctx) != TRUE) std.log.crit("wglDeleteContext failed", .{}); } pub fn makeCurrent(self: *Instance) bool { return wglMakeCurrent(self.hdc, self.ctx) == TRUE; } pub fn swapBuffers(self: *Instance) bool { return SwapBuffers(self.hdc) == TRUE; } pub fn loadFunctions(self: *Instance) !GL { var gl: GL = undefined; var success: bool = true; inline for (std.meta.fields(GL)) |field| { const info = @typeInfo(field.field_type); switch (info) { .Fn => { var buf = try self.allocator.allocSentinel(u8, field.name.len, 0); defer self.allocator.free(buf); std.mem.copy(u8, buf, field.name); buf[buf.len] = 0; if (getOpenGLProc(field.field_type, buf)) |fn_ptr| { @field(gl, field.name) = fn_ptr; } else if (self.dynlib.lookup(field.field_type, buf)) |fn_ptr| { @field(gl, field.name) = fn_ptr; } else { std.log.crit("Unable to get a valid pointer for '{s}'", .{field.name}); success = false; } }, else => {}, } } return if (success) gl else error.UnableToGetAllFunctions; } };
https://raw.githubusercontent.com/schroffl/zig-analyzer-vst/f71084f1312657c131c1d393e58b2a9f20575cd9/src/windows/gl_wrapper.zig
const std = @import("std"); const zlm = @import("zlm"); pub fn lrgbFromSrgb(srgb: zlm.Vec3) zlm.Vec3 { return zlm.vec3(inverseGamma(srgb.x), inverseGamma(srgb.y), inverseGamma(srgb.z)); } pub fn lrgbFromHsv(hsv: zlm.Vec3) zlm.Vec3 { return lrgbFromSrgb(srgbFromHsv(hsv)); } pub fn srgbFromHsv(hsv: zlm.Vec3) zlm.Vec3 { const tau = zlm.toRadians(360.0); const hsv_mod = zlm.vec3(@mod(hsv.x, tau), hsv.y, hsv.z); return zlm.vec3(hsvF(hsv_mod, 5.0), hsvF(hsv_mod, 3.0), hsvF(hsv_mod, 1.0)); } fn hsvF(hsv: zlm.Vec3, n: f32) f32 { const k = @mod(n + hsv.x / zlm.toRadians(60.0), 6.0); return hsv.z - hsv.z * hsv.y * @max(0.0, @min(k, @min(4.0 - k, 1.0))); } fn inverseGamma(c: f32) f32 { if (c <= 0.04045) { return c / 12.92; } else { return std.math.pow(f32, (c + 0.055) / 1.055, 2.4); } }
https://raw.githubusercontent.com/Sirius902/acid-breakout-vk/5805d1f6f8c4eb5ea1f307f8b2c929416d492084/src/color.zig
const std = @import("std"); const object = @import("object.zig"); const lexer = @import("lexer.zig"); const parser = @import("parser.zig"); const compiler = @import("compiler.zig"); const vm = @import("vm.zig"); const evaluator = @import("evaluator.zig"); const env = @import("environment.zig"); const code = @import("code.zig"); const input = \\ \\let fibonacci = fn(x) { \\if (x == 0) { 0 \\ } else { \\ if (x == 1) { \\ return 1; \\ } else { \\ fibonacci(x - 1) + fibonacci(x - 2); \\ } \\} }; \\ fibonacci(50); ; pub fn main() !void { var args = std.process.args(); _ = args.skip(); //to skip the zig call var gpa = std.heap.GeneralPurposeAllocator(.{}){}; var arena = std.heap.ArenaAllocator.init(gpa.allocator()); defer arena.deinit(); const engine = args.next(); var duration: i64 = undefined; var result: *object.Object = undefined; var l = lexer.Lexer.init(arena.allocator(), input); defer l.deinit(); var p = parser.Parser.init(l, arena.allocator()); defer p.deinit(); const prog = p.parseProgram(); var definitions = try code.initDefinitions(arena.allocator()); defer definitions.deinit(arena.allocator()); if (engine) |eng| { std.debug.print("running with {s}\n", .{eng}); if (std.mem.containsAtLeast(u8, eng, 1, "vm")) { var comp = compiler.Compiler.init(arena.allocator(), definitions); defer comp.deinit(); try comp.compile(.{ .program = prog }); var machine = vm.VM.init(arena.allocator(), comp.bytecode()); defer machine.deinit(); const start = std.time.milliTimestamp(); machine.run() catch |e| { std.debug.print("VM Error: {any}\n", .{e}); return; }; const end = std.time.milliTimestamp(); duration = end - start; const r = machine.lastPoppedStackElem(); result = try arena.allocator().create(object.Object); result.* = r; } else { std.debug.print("Using Evaluator\n", .{}); var eval = evaluator.Evaluator.init(arena.allocator()); defer eval.deinit(); var e = env.Environment.init(arena.allocator()); defer e.deinit(); const start = std.time.milliTimestamp(); result = eval.eval(.{ .program = prog }, e).?; const end = std.time.milliTimestamp(); duration = end - start; } } std.debug.print("engine={any} result={s} duration={any}\n", .{ engine, result, duration }); }
https://raw.githubusercontent.com/JonnyWalker81/monkey-zig/0a06f5c33ccde0aa36fb97d612ea48bee2344f28/src/bench.zig
const std = @import("std"); const json = @import("evil-json"); const Uri = std.Uri; const http = std.http; const Client = std.http.Client; const RequestOptions = Client.RequestOptions; const Allocator = std.mem.Allocator; const Arena = std.heap.ArenaAllocator; pub fn main() !void { var arena = Arena.init(std.heap.page_allocator); defer arena.deinit(); const allocator = arena.allocator(); const stdin = std.io.getStdIn().reader(); const stdout = std.io.getStdOut().writer(); var prng = std.rand.DefaultPrng.init(@intCast(std.time.milliTimestamp())); const random = prng.random(); var trivia_provider = TriviaProvider(20).init(allocator); while (true) { if (trivia_provider.next()) |trivia| { var answers: [4][]const u8 = trivia.incorrect_answers ++ .{trivia.correct_answer}; try stdout.print("\n{s}\n", .{trivia.question}); std.rand.shuffle(random, []const u8, answers[0..]); for (0.., answers) |choice, answer| { try stdout.print("({d}) {s}\n", .{ choice, answer }); } while (true) { try stdout.print("Your answer: ", .{}); var buf: [3]u8 = undefined; const input = stdin.readUntilDelimiterOrEof(&buf, '\n') catch ""; if (input == null) { try stdout.print("\nError: Couldn't take input.\n", .{}); return; } if (input.?.len < 2) { try stdout.print("Answer with a number between 0 and 3!\n", .{}); continue; } const index: ?usize = std.fmt.parseInt( usize, input.?[0 .. input.?.len - 1], 10, ) catch null; if (index == null or 4 <= index.?) { try stdout.print("Answer with a number between 0 and 3!\n", .{}); continue; } if (std.mem.eql(u8, answers[index.?], trivia.correct_answer)) { try stdout.print("Good!\n", .{}); break; } else { try stdout.print("Wrong: It is {s}\n", .{trivia.correct_answer}); break; } } } else |_| { try stdout.print("\nThere was some problem getting the trivia, retrying...\n", .{}); } } } const Trivia = struct { question: []const u8, correct_answer: []const u8, incorrect_answers: [3][]const u8, }; /// Makes obtaining new Trivias convenient. pub fn TriviaProvider(buffer_size: comptime_int) type { if (buffer_size < 1 or 50 < buffer_size) { @compileError("buffer_size should be between 1 and 50"); } return struct { current: usize, trivias: [buffer_size]Trivia, allocator: Allocator, /// Initializes a new TriviaProvider. pub fn init(allocator: Allocator) @This() { return @This(){ .current = buffer_size, .trivias = std.mem.zeroes([buffer_size]Trivia), .allocator = allocator, }; } /// Obtains a new trivia. pub fn next(self: *@This()) !Trivia { if (buffer_size <= self.current) { // Get new trivias whenever we run out. try self.newTrivias(); self.current = 0; } defer self.current += 1; return self.trivias[self.current]; } /// Fills the trivias array with new HISTORY trivias obtained from the trivia database. fn newTrivias(self: *@This()) !void { // Here we get the questions, answers and some incorrect answers through an http request. // data is a []const u8 containing JSON as text. // 23 here refers to the category HISTORY. const data = try requestTrivias(self.allocator, buffer_size, 23); defer self.allocator.free(data); // THE MOST IMPORTANT PART: parsing the trivias self.trivias = try parseTrivias(buffer_size, data, self.allocator); } }; } // This is where the evil-json magic happens. fn parseTrivias(amount: comptime_int, data: []const u8, allocator: Allocator) ![amount]Trivia { // This parses the data into a Parsed struct const parsed = try json.parse(data, allocator); // After obtaining all the necessary data we will free the parsed JSON. defer parsed.deinit(); var result: [amount]Trivia = std.mem.zeroes([amount]Trivia); for (0..amount) |i| { // We create a json.Access from the parsed data, it uses the same allocator that parsed was allocated with. // accessUnmanaged() is also available if we want a custom allocator. // Or we could also use Access.init(...) passing both the value and allocator. var a = parsed.access(); // We want to drop this access after the end of the scope. // This is necessary because we won't use get_and_deinit() on this access. defer a.deinit(); // We progress the access by selecting the value with key "results" and then selecting the // item at index i. Open Trivia DB returns multiple questions // (we can specify how many in the request) _ = a.o("results").a(i); // We clone the access here which allows us to keep our "progress" in a, while progressing // further on a1 to get the value at the "question" key. // // There are two methods get() and get_and_deinit() that one can use to evaluate an // access (obtain the actual JSON value). var a1 = try a.clone(); const question = try allocator.dupe(u8, (try a1.o("question").get_and_deinit()).string); // Notice we also dupe the memory because we will free the parsed JSON, // as it contains more information than what we need. // Again we clone a and progress down the JSON object to get the value at the "correct answer" key. var a2 = try a.clone(); const correct_answer = try allocator.dupe(u8, (try a2.o("correct_answer").get_and_deinit()).string); // We get the values at "incorrect answers" similarily as the other two above, "array" here is an std.ArrayList(Value). var a3 = try a.clone(); const incorrect_answers_array = (try a3.o("incorrect_answers").get_and_deinit()).array; var incorrect_answers: [3][]const u8 = std.mem.zeroes([3][]const u8); for (incorrect_answers_array.items, 0..) |answer, j| { incorrect_answers[j] = try allocator.dupe(u8, answer.string); } result[i] = Trivia{ .question = question, .correct_answer = correct_answer, .incorrect_answers = incorrect_answers, }; } return result; } // Requests trivias from the Open Trivia Database, the response is a slice containing JSON data. fn requestTrivias( allocator: Allocator, amount: u8, category: u8, ) ![]const u8 { const amount_val = try std.fmt.allocPrint(allocator, "{d}", .{amount}); const category_val = try std.fmt.allocPrint(allocator, "{d}", .{category}); const host = "https://opentdb.com/api.php"; const params: [3][]const u8 = .{ "amount", "category", "type" }; const values: [3][]const u8 = .{ amount_val, category_val, "multiple" }; var url = std.ArrayList(u8).init(allocator); defer url.deinit(); try url.appendSlice(host); try url.append('?'); for (params, values) |param, value| { try url.appendSlice(param); try url.append('='); try url.appendSlice(value); try url.append('&'); } allocator.free(amount_val); allocator.free(category_val); const uri = Uri.parse(url.items) catch unreachable; var client = Client{ .allocator = allocator }; var request = try client.open(http.Method.GET, uri, .{ .server_header_buffer = try allocator.alloc(u8, 500), }); try request.send(.{}); try request.wait(); return try request.reader().readAllAlloc(allocator, 10240); }
https://raw.githubusercontent.com/Constanze3/evil-json/527da20e0719a8a32c3d4eb1d01e5bda8076372e/examples/trivia/main.zig
//! RFC2109 Cookie Syntax const std = @import("std"); /// RFC2109 Cookie /// /// Cookies are submitted by the server using the `Set-Cookie` header /// and they can be provided to the server using the `Cookie` header. pub const Cookie = struct { /// Name of the cookie name: []const u8, /// Value of the cookie value: []const u8, /// Optional comment, e.g. the intended use of a cookie comment: ?[]const u8 = null, /// Optional domain for which the cookie is valid domain: ?[]const u8 = null, /// Optional lifetime of the cookie in seconds max_age: ?usize = null, /// Optional subset of URLs to which tis cookie applies path: ?[]const u8 = null, /// Optional secure attribute (with no value) secure: ?bool = null, /// Decimal integer that identifies to which version of the /// state management specification the cookie conforms. version: usize, allocator: std.mem.Allocator, /// Deinitialize this struct pub fn deinit(self: *const @This()) void { self.allocator.free(self.name); self.allocator.free(self.value); if (self.comment) |v| { self.allocator.free(v); } if (self.domain) |v| { self.allocator.free(v); } if (self.path) |v| { self.allocator.free(v); } } /// Parse a cookie string returned by a server into a Cookie struct pub fn parse(in: []const u8, allocator: std.mem.Allocator) !@This() { var i: usize = 0; var j: usize = 0; var version_found: bool = false; while (j < in.len and in[j] != '=' and in[j] != ';' and in[j] != ' ') : (j += 1) {} if (in[j] != '=') return error.Malformed; var k = in[i..j]; j += 1; i = j; while (j < in.len and in[j] != '=' and in[j] != ';' and in[j] != ' ') : (j += 1) {} if (in[j] != ';') return error.Malformed; var v = in[i..j]; j += 1; if (in[j] == ' ') j += 1; i = j; var self = @This(){ .name = try allocator.dupe(u8, k), .value = try allocator.dupe(u8, v), .version = undefined, .allocator = allocator, }; errdefer self.deinit(); while (true) { if (j >= in.len) break; while (j < in.len and in[j] != '=' and in[j] != ';') : (j += 1) {} k = in[i..j]; j += 1; i = j; if (j < in.len and in[j] == ';') { if (std.mem.eql(u8, "Secure", k)) { self.secure = true; } continue; } if (j >= in.len) break; while (j < in.len and in[j] != ';') : (j += 1) {} v = in[i..j]; j += 1; if (in[j] == ' ') j += 1; i = j; if (std.mem.eql(u8, "Comment", k) and self.comment == null) { self.comment = try allocator.dupe(u8, v); } else if (std.mem.eql(u8, "Domain", k) and self.domain == null) { self.domain = try allocator.dupe(u8, v); } else if (std.mem.eql(u8, "Max-Age", k) and self.max_age == null) { self.max_age = try std.fmt.parseInt(usize, v, 10); } else if (std.mem.eql(u8, "Path", k) and self.path == null) { self.path = try allocator.dupe(u8, v); } else if (std.mem.eql(u8, "Version", k)) { self.version = try std.fmt.parseInt(usize, v, 10); version_found = true; } } if (!version_found) return error.Malformed; return self; } /// Create a cookie string pub fn stringify(self: *const @This(), allocator: std.mem.Allocator) ![]const u8 { var ret = std.ArrayList(u8).init(allocator); var writer = ret.writer(); try writer.print("$Version={d}; {s}={s}", .{ self.version, self.name, self.value, }); if (self.path) |path| { try writer.print("; $Path={s}", .{path}); } if (self.domain) |domain| { try writer.print("; $Domain={s}", .{domain}); } return try ret.toOwnedSlice(); } }; test "parse cookie #1" { const allocator = std.testing.allocator; const cookie = try Cookie.parse("AuthSession=YWRtaW46NjRERTFEM0E6GcJ5S_VKUNOihqqwnNFiVMoMLr7T0Knn2-bFaYTsz7U; Version=1; Expires=Thu, 17-Aug-2023 13:24:34 GMT; Max-Age=600; Path=/; HttpOnly", allocator); defer cookie.deinit(); try std.testing.expectEqualStrings("AuthSession", cookie.name); try std.testing.expectEqualStrings("YWRtaW46NjRERTFEM0E6GcJ5S_VKUNOihqqwnNFiVMoMLr7T0Knn2-bFaYTsz7U", cookie.value); try std.testing.expectEqual(@as(usize, @intCast(1)), cookie.version); try std.testing.expectEqual(@as(usize, @intCast(600)), cookie.max_age.?); try std.testing.expectEqualStrings("/", cookie.path.?); } test "invalid cookie #1" { const allocator = std.testing.allocator; try std.testing.expectError(error.Malformed, Cookie.parse("AuthSessionYWRtaW46NjRERTFEM0E6GcJ5S_VKUNOihqqwnNFiVMoMLr7T0Knn2-bFaYTsz7U; Version=1; Expires=Thu, 17-Aug-2023 13:24:34 GMT; Max-Age=600; Path=/; HttpOnly", allocator)); } test "invalid cookie #2" { const allocator = std.testing.allocator; try std.testing.expectError(error.Malformed, Cookie.parse("AuthSession=YWRtaW46NjRERTFEM0E6GcJ5S_VKUNOihqqwnNFiVMoMLr7T0Knn2-bFaYTsz7U Version=1; Expires=Thu, 17-Aug-2023 13:24:34 GMT; Max-Age=600; Path=/; HttpOnly", allocator)); } test "stringify cookie #1" { const allocator = std.testing.allocator; const cookie = try Cookie.parse("AuthSession=YWRtaW46NjRERTFEM0E6GcJ5S_VKUNOihqqwnNFiVMoMLr7T0Knn2-bFaYTsz7U; Version=1; Expires=Thu, 17-Aug-2023 13:24:34 GMT; Max-Age=600; Path=/; HttpOnly", allocator); defer cookie.deinit(); const s = try cookie.stringify(allocator); defer allocator.free(s); try std.testing.expectEqualStrings("$Version=1; AuthSession=YWRtaW46NjRERTFEM0E6GcJ5S_VKUNOihqqwnNFiVMoMLr7T0Knn2-bFaYTsz7U; $Path=/", s); }
https://raw.githubusercontent.com/r4gus/snorlax/cb9359bf07ae838e52fd3151c8bceee3022bc641/src/cookie.zig
const std = @import("std"); const arbor = @import("arbor"); pub fn build(b: *std.Build) !void { try arbor.addPlugin(b, .{ .description = .{ .name = "Example Filter", .id = "com.Arbor.ExDist", .company = "Arboreal Audio", .version = "0.1.0", .copyright = "(c) 2024 Arboreal Audio, LLC", .url = "", .manual = "", .contact = "", .description = "Vintage analog warmth", }, .features = arbor.features.STEREO | arbor.features.EFFECT, .root_source_file = "plugin.zig", .target = b.standardTargetOptions(.{}), .optimize = b.standardOptimizeOption(.{}), }); }
https://raw.githubusercontent.com/ArborealAudio/arbor/6619708f4e859226a4f1d9e041f4d86dd78fe059/examples/Filter/build.zig
pub const backends = @import("display/backends.zig");
https://raw.githubusercontent.com/PhantomUIx/display-uefi/f7bc713d8ec5f65cb30261acb3e6714953c8c118/src/phantom/display.zig
//! Main entry point for the xxhash-simple library //! //! Contains all supported of the hashing library pub const xxh3_64b = @import("./xxh3_64b.zig"); pub const HashVariant = enum { xxh3_64b, }; test "force usage" { _ = xxh3_64b; } test "comptime hash" { comptime { const text = "foo bar baz"; _ = xxh3_64b.xxh3_64bits(text); // test larger _ = xxh3_64b.xxh3_64bits(text ** 20); } }
https://raw.githubusercontent.com/Techcable/zig-xxhash-simple/35da1034b4d31f75879f7b4b59f9335c392b250d/src/xxhash.zig
const std = @import("std"); const page_size = 65536; // in bytes pub fn build(b: *std.build.Builder) void { // Adds the option -Drelease=[bool] to create a release build, which we set to be ReleaseSmall by default. b.setPreferredReleaseMode(.ReleaseSmall); // Standard release options allow the person running `zig build` to select // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. const mode = b.standardReleaseOptions(); const raytracer_step = b.step("raytracer", "Compiles raytracer.zig"); const raytracer_lib = b.addSharedLibrary("raytracer", "./raytracer.zig", .unversioned); raytracer_lib.setBuildMode(mode); raytracer_lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding, .abi = .musl, }); raytracer_lib.setOutputDir("."); // https://github.com/ziglang/zig/issues/8633 raytracer_lib.import_memory = true; // import linear memory from the environment raytracer_lib.initial_memory = 32 * page_size; // initial size of the linear memory (1 page = 64kB) raytracer_lib.max_memory = 512 * page_size; // maximum size of the linear memory raytracer_lib.global_base = 6560; // offset in linear memory to place global data raytracer_lib.install(); raytracer_step.dependOn(&raytracer_lib.step); }
https://raw.githubusercontent.com/tmcw/zig-raytracer/8a08e00727910d7ff94a9298acde07ccecbf307f/build.zig
const std = @import("std"); const io = std.io; const testing = std.testing; pub fn PeekReader(comptime ReaderType: type, comptime max_peek_len: usize) type { return struct { inner_reader: ReaderType, peek_buffer: [max_peek_len]u8 = undefined, peek_count: usize = 0, // make this a slice using peek_buffer as a backing field? pub const Error = ReaderType.Error; pub const PeekError = Error || error{PeekLengthTooLarge}; pub const Reader = io.Reader(*Self, Error, read); const Self = @This(); pub fn read(self: *Self, dest: []u8) Error!usize { // update peeked bytes const peeked_bytes_copied = @min(self.peek_count, dest.len); @memcpy(dest[0..peeked_bytes_copied], self.peek_buffer[0..peeked_bytes_copied]); self.peek_count -= peeked_bytes_copied; std.mem.copyForwards(u8, &self.peek_buffer, self.peek_buffer[peeked_bytes_copied..]); // do real read return peeked_bytes_copied + try self.inner_reader.read(dest[peeked_bytes_copied..]); } pub fn peekByte(self: *Self) PeekError!?u8 { return if (try self.peek(1)) |x| x[0] else null; } pub fn peek(self: *Self, bytes_to_peek: usize) PeekError!?[]const u8 { if (bytes_to_peek > max_peek_len) return error.PeekLengthTooLarge; if (bytes_to_peek > self.peek_count) self.peek_count += try self.inner_reader.readAll(self.peek_buffer[0..bytes_to_peek][self.peek_count..]); if (self.peek_count < bytes_to_peek) return null else return self.peek_buffer[0..bytes_to_peek]; } pub fn reader(self: *Self) Reader { return .{ .context = self }; } }; } /// Returns an initialised `PeekReader`. pub fn peekReader(inner_reader: anytype, comptime max_peek_len: u64) PeekReader(@TypeOf(inner_reader), max_peek_len) { return .{ .inner_reader = inner_reader }; } test "peekByte" { const data = "hello world"; var fbs = std.io.fixedBufferStream(data); var peeker = peekReader(fbs.reader(), 3); try testing.expectEqual('h', (try peeker.peekByte()).?); try testing.expectEqual('h', (try peeker.peekByte()).?); try testing.expectEqual('h', try peeker.reader().readByte()); try testing.expectEqual('e', (try peeker.peekByte()).?); try testing.expectEqual('e', (try peeker.peekByte()).?); try testing.expectEqual('e', try peeker.reader().readByte()); } test "peek" { const data = "hello world"; var fbs = std.io.fixedBufferStream(data); var peeker = peekReader(fbs.reader(), 3); try testing.expectEqualStrings("he", (try peeker.peek(2)).?); try testing.expectEqualStrings("hel", (try peeker.peek(3)).?); try testing.expectEqualStrings("he", (try peeker.peek(2)).?); try testing.expectEqual('h', try peeker.reader().readByte()); try testing.expectEqualStrings("el", (try peeker.peek(2)).?); try testing.expectEqualStrings("el", (try peeker.peek(2)).?); try testing.expectEqualStrings("el", &try peeker.reader().readBytesNoEof(2)); try testing.expectEqualStrings("lo", (try peeker.peek(2)).?); try peeker.reader().skipUntilDelimiterOrEof(0); try testing.expectEqual(null, try peeker.peekByte()); }
https://raw.githubusercontent.com/jjkh/jlang/9c8277ced864e700e8bf64d6d612440c1bedffc8/src/helpers/peek_reader.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const List = std.ArrayList; const Map = std.AutoHashMap; const StrMap = std.StringHashMap; const BitSet = std.DynamicBitSet; const util = @import("util.zig"); const gpa = util.gpa; pub const data = @embedFile("data/day02.txt"); const testdata = "ULL\r\nRRDDD\r\nLURDL\r\nUUUUD"; test "day02_part1" { const res = part1(testdata); assert(std.mem.eql(u8, res, "1985")); } pub fn part1(input: []const u8) []u8 { var buffer: [16]u8 = undefined; var lines = splitSeq(u8, input, "\r\n"); var x: u8 = 1; var y: u8 = 1; var bi: u8 = 0; while (lines.next()) |line| { for (line) |c| { switch (c) { 'U' => if (y > 0) { y -= 1; }, 'D' => if (y < 2) { y += 1; }, 'L' => if (x > 0) { x -= 1; }, 'R' => if (x < 2) { x += 1; }, else => unreachable, } } buffer[bi] = '1' + (y * 3 + x); bi += 1; } return gpa.dupe(u8, buffer[0..bi]) catch unreachable; } test "day02_part2" { const res = part2(testdata); assert(std.mem.eql(u8, res, "5DB3")); } pub fn part2(input: []const u8) []u8 { var buffer: [16]u8 = undefined; var lines = splitSeq(u8, input, "\r\n"); var x: u8 = 0; var y: u8 = 2; var bi: u8 = 0; const pad: [5][5]u8 = .{ [_]u8{ 0, 0, '1', 0, 0 }, [_]u8{ 0, '2', '3', '4', 0 }, [_]u8{ '5', '6', '7', '8', '9' }, [_]u8{ 0, 'A', 'B', 'C', 0 }, [_]u8{ 0, 0, 'D', 0, 0 }, }; while (lines.next()) |line| { for (line) |c| { var nx = x; var ny = y; switch (c) { 'U' => ny -|= 1, 'D' => ny += 1, 'L' => nx -|= 1, 'R' => nx += 1, else => unreachable, } if (nx < 5 and ny < 5 and pad[ny][nx] != 0) { x = nx; y = ny; } } buffer[bi] = pad[y][x]; bi += 1; } //print("buf: {s}\n", .{buffer[0..bi]}); return gpa.dupe(u8, buffer[0..bi]) catch unreachable; } pub fn main() !void { var timer = std.time.Timer.start() catch unreachable; const res = part1(data); const time = timer.lap(); const res2 = part2(data); const time2 = timer.lap(); print("Day 02:\n", .{}); print("\tPart 1: {s}\n", .{res}); print("\tPart 2: {s}\n", .{res2}); print("\tTime: {}ns\n", .{time}); print("\tTime: {}ns\n", .{time2}); } // Useful stdlib functions const tokenizeAny = std.mem.tokenizeAny; const tokenizeSeq = std.mem.tokenizeSequence; const tokenizeSca = std.mem.tokenizeScalar; const splitAny = std.mem.splitAny; const splitSeq = std.mem.splitSequence; const splitSca = std.mem.splitScalar; const indexOf = std.mem.indexOfScalar; const indexOfAny = std.mem.indexOfAny; const indexOfStr = std.mem.indexOfPosLinear; const lastIndexOf = std.mem.lastIndexOfScalar; const lastIndexOfAny = std.mem.lastIndexOfAny; const lastIndexOfStr = std.mem.lastIndexOfLinear; const trim = std.mem.trim; const sliceMin = std.mem.min; const sliceMax = std.mem.max; const parseInt = std.fmt.parseInt; const parseFloat = std.fmt.parseFloat; const print = std.debug.print; const assert = std.debug.assert; const sort = std.sort.block; const asc = std.sort.asc; const desc = std.sort.desc; // Generated from template/template.zig. // Run `zig build generate` to update. // Only unmodified days will be updated.
https://raw.githubusercontent.com/jgh713/aoc/9ce203e4d6f53a81ad14e7b0eb095657df4f18e6/2016/src/day02.zig
const logger = std.log.scoped(.main); const C = @cImport({ @cInclude("flanterm.h"); @cInclude("backends/fb.h"); }); const std = @import("std"); const limine = @import("limine"); const acpi = @import("./acpi.zig"); const apic = @import("./apic.zig"); const arch = @import("./arch.zig"); const debug = @import("./debug.zig"); const hpet = @import("./hpet.zig"); const interrupts = @import("./interrupts.zig"); const lock = @import("./lock.zig"); const per_cpu = @import("./per_cpu.zig"); const pci = @import("./pci.zig"); const phys = @import("./phys.zig"); const scheduler = @import("./scheduler.zig"); const time = @import("./time.zig"); const utils = @import("./utils.zig"); const vfs = @import("./vfs.zig"); const virt = @import("./virt.zig"); const PageAllocator = struct { pub fn allocate(_: *@This(), pages: usize) ?u64 { return kernel_paged_arena.allocate(pages * std.mem.page_size); } fn alloc(ctx: *anyopaque, len: usize, _: u8, _: usize) ?[*]u8 { const self = @as(*PageAllocator, @ptrCast(@alignCast(ctx))); const pages = std.math.divCeil(usize, len, std.mem.page_size) catch unreachable; return @ptrFromInt(self.allocate(pages) orelse return null); } fn resize(_: *anyopaque, _: []u8, _: u8, _: usize, _: usize) bool { return false; } fn free(_: *anyopaque, _: []u8, _: u8, _: usize) void {} }; var flanterm_ctx: ?*C.flanterm_context = null; var print_lock: lock.Spinlock = .{}; pub const log_level = std.log.Level.debug; pub const os = struct { pub const system = struct {}; pub const heap = struct { pub const page_allocator = std.mem.Allocator{ .ptr = &page_heap_allocator, .vtable = &std.mem.Allocator.VTable{ .alloc = PageAllocator.alloc, .resize = PageAllocator.resize, .free = PageAllocator.free, }, }; }; }; pub const std_options: std.Options = .{ .logFn = log }; pub var kernel_va_arena: virt.Arena = .{}; pub var kernel_paged_arena: virt.Arena = .{}; pub var page_heap_allocator: PageAllocator = .{}; pub var gp_allocator = std.heap.GeneralPurposeAllocator(.{ .thread_safe = true, .MutexType = lock.Spinlock }){}; pub var allocator = gp_allocator.allocator(); pub export var boot_info_req: limine.BootloaderInfoRequest = .{}; pub export var hhdm_req: limine.HhdmRequest = .{}; pub export var memory_map_req: limine.MemoryMapRequest = .{}; pub export var modules_req: limine.ModuleRequest = .{}; pub export var kernel_file_req: limine.KernelFileRequest = .{}; pub export var rsdp_req: limine.RsdpRequest = .{}; pub export var kernel_addr_req: limine.KernelAddressRequest = .{}; pub export var framebuffer_req: limine.FramebufferRequest = .{}; pub export var boot_time_req: limine.BootTimeRequest = .{}; export fn _start() callconv(.C) noreturn { main() catch |err| { logger.err("Failed to initialize: {any}", .{err}); if (@errorReturnTrace()) |stack_trace| { debug.printStackTrace(stack_trace); } }; while (true) { arch.hlt(); } } fn mainThread(_: u8) !void { try pci.init(); try acpi.enumerateDevices(); // const process = try scheduler.spawnProcess(null); // const thread = try scheduler.spawnThread(process); // const init = try vfs.resolve(null, "/usr/bin/init", 0); // try thread.exec(init, &.{"/usr/bin/init"}, &.{}); // scheduler.enqueue(thread); while (true) { scheduler.yield(); } } fn pagedAlloc(source: *virt.Arena, size: usize) ?u64 { const page_table = virt.kernel_address_space.page_table; const pages = @divExact(size, std.mem.page_size); const address = source.allocate(pages * std.mem.page_size) orelse return null; for (0..pages) |i| { const page = phys.allocate(1, .conventional) orelse return null; page_table.mapPage( address + i * std.mem.page_size, page, virt.PTEFlags.present | virt.PTEFlags.writable, ) catch return null; } return address; } fn pagedFree(source: *virt.Arena, address: u64, size: usize) void { const page_table = virt.kernel_address_space.page_table; const pages = std.math.divCeil(usize, size, std.mem.page_size) catch unreachable; for (0..pages) |i| { const addr = address + i * std.mem.page_size; const phys_addr = page_table.translate(addr) orelse unreachable; page_table.unmapPage(addr) catch unreachable; phys.free(phys_addr, 1); } source.free(address, size); } fn main() !void { asm volatile ("cli"); defer asm volatile ("sti"); per_cpu.initBsp(); per_cpu.initFeatures(); if (framebuffer_req.response) |fb_res| { const framebuffer = fb_res.framebuffers()[0]; flanterm_ctx = C.flanterm_fb_init(null, null, @ptrCast(@alignCast(framebuffer.address)), // framebuffer.width, framebuffer.height, framebuffer.pitch, framebuffer.red_mask_size, framebuffer.red_mask_shift, // framebuffer.green_mask_size, framebuffer.green_mask_shift, framebuffer.blue_mask_size, framebuffer.blue_mask_shift, // null, null, null, null, null, null, null, null, 0, 0, 1, 0, 0, 0); } virt.bootstrapArena(); kernel_va_arena = virt.Arena.init("kernel-va", 0xFFFF_A000_0000_0000, utils.tib(16)); kernel_paged_arena = virt.Arena.initWithSource("kernel-paged", &kernel_va_arena, pagedAlloc, pagedFree); const boot_info_res = boot_info_req.response.?; const hhdm_res = hhdm_req.response.?; const memory_map_res = memory_map_req.response.?; const kernel_addr_res = kernel_addr_req.response.?; const modules_res = modules_req.response.?; const rsdp_res = rsdp_req.response.?; if (kernel_file_req.response) |res| { debug.init(res) catch |err| logger.warn("Failed to parsee debug information: {any}", .{err}); } if (boot_time_req.response) |res| { time.init(res); } std.debug.assert(hhdm_res.offset == virt.asHigherHalf(u64, 0)); logger.info("Booted using {s} {s}", .{ boot_info_res.name, boot_info_res.version }); try phys.init(memory_map_res); try virt.init(memory_map_res, kernel_addr_res); try per_cpu.init(); try acpi.init(rsdp_res); try hpet.init(false); apic.init(); try vfs.init(modules_res); try scheduler.init(); _ = try scheduler.startKernelThread(mainThread, 0); } pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace, ret_addr: ?usize) noreturn { asm volatile ("cli"); _ = error_return_trace; _ = ret_addr; logger.err("Kernel panic: {s}", .{msg}); debug.printStackIterator(std.debug.StackIterator.init(@returnAddress(), @frameAddress())); while (true) { arch.hlt(); } } const LogWriter = struct { pub const Error = error{}; pub fn write(_: @This(), bytes: []const u8) Error!usize { debug.debugPrint(bytes); if (flanterm_ctx) |ctx| { C.flanterm_write(ctx, bytes.ptr, bytes.len); } return bytes.len; } pub fn writeByte(self: @This(), byte: u8) Error!void { _ = try self.write(&.{byte}); } pub fn writeBytesNTimes(self: @This(), bytes: []const u8, n: usize) Error!void { for (0..n) |_| { _ = try self.write(bytes); } } pub fn writeAll(self: @This(), bytes: []const u8) Error!void { _ = try self.write(bytes); } }; pub fn log( comptime level: std.log.Level, comptime scope: anytype, comptime fmt: []const u8, args: anytype, ) void { print_lock.lock(); defer print_lock.unlock(); const current_time = time.getClock(.monotonic); std.fmt.format( @as(LogWriter, undefined), "[{d:>5}.{d:0>6}] [CPU{d}] {s}({s}): " ++ fmt ++ "\n", .{ @as(u64, @intCast(current_time.seconds)), @as(u64, @intCast(current_time.nanoseconds)) / std.time.ns_per_us, per_cpu.get().lapic_id, @tagName(level), @tagName(scope), } ++ args, ) catch unreachable; }
https://raw.githubusercontent.com/48cf/zigux/ad2f32b6ef9767af8ced34368b8aa2578f9a14a2/kernel/src/main.zig
const std = @import("std"); const macho = std.macho; const OData = @import("odata.zig").OData; fn format_prot(prot: std.macho.vm_prot_t) [3]u8 { const PROT = std.macho.PROT; var buffer = [3]u8{ '-', '-', '-' }; if ((prot & PROT.READ) != 0) { buffer[0] = 'R'; } if ((prot & PROT.WRITE) != 0) { buffer[1] = 'W'; } if ((prot & PROT.EXEC) != 0) { buffer[2] = 'X'; } return buffer; } pub fn segment_cmds(odata: *OData) void { for (odata.load_cmds.items) |seg| { std.debug.print("{s:<15}fileoff: {x:<7}filesize: {d:<7}vmaddr: {x:<12}vmsize: {x:<12}maxprot: {s:<7}initprot: {s}\n", .{ seg.segment_cmd.segName(), seg.segment_cmd.fileoff, seg.segment_cmd.filesize, seg.segment_cmd.vmaddr, seg.segment_cmd.vmsize, format_prot(seg.segment_cmd.maxprot), format_prot(seg.segment_cmd.initprot), }); for (seg.sections.items) |sec| { std.debug.print(" {s:<20}addr: {x}\n", .{ sec.sectName(), sec.addr }); } std.debug.print("\n", .{}); } } // pub fn symtab(odata: *OData) void { // std.debug.print("Symtab:\n", .{}); // for (odata.symtab_entries.items) |nlist| { // if (nlist.sect()) { // std.debug.print("nlist value: 0x{x}\n", .{nlist.n_value}); // const seg = odata.segment_at(nlist.n_value); // if (seg != null) // std.debug.print(">> @ 0x{x}\tin {s}\n", .{ nlist.n_value, seg.?.segname }); // } // } // }
https://raw.githubusercontent.com/leBolideur/opack/fa6480a7475a16dbe61cd864a3478b93105f8e5d/src/printer.zig
const std = @import("std"); const bufIter = @import("./buf-iter.zig"); const util = @import("./util.zig"); const assert = std.debug.assert; // alternative with arena allocator: // pub fn main(in_allocator: std.mem.Allocator, args: []const [:0]u8) anyerror!void { // var arena = std.heap.ArenaAllocator.init(in_allocator); // defer arena.deinit(); // var allocator = arena.allocator(); const Coord3 = struct { x: f64, y: f64, z: f64, pub fn format(self: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: std.fs.File.Writer) !void { _ = fmt; _ = options; try std.fmt.format(writer, "{d},{d},{d}", .{ self.x, self.y, self.z }); } }; const Hailstone = struct { p: Coord3, v: Coord3, pub fn format(self: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: std.fs.File.Writer) !void { _ = fmt; _ = options; try std.fmt.format(writer, "{any} @ {any}", .{ self.p, self.v }); } }; fn parseHailstone(line: []const u8) !Hailstone { var intBuf: [6]i64 = undefined; var xs = try util.extractIntsIntoBuf(i64, line, &intBuf); assert(intBuf.len == 6); return Hailstone{ .p = Coord3{ .x = @floatFromInt(xs[0]), .y = @floatFromInt(xs[1]), .z = @floatFromInt(xs[2]), }, .v = Coord3{ .x = @floatFromInt(xs[3]), .y = @floatFromInt(xs[4]), .z = @floatFromInt(xs[5]), }, }; } // x1 = px1 + vx1 * t1 // y1 = py1 + vy1 * t1 // x2 = px2 + vx2 * t2 // y2 = py2 + vy2 * t2 // px1 + vx1 * t1 = px2 + vx2 * t2 // py1 + vy1 * t1 = py2 + vy2 * t2 // (px1 - px2) = vx2 * t2 - vx1 * t1 // (py1 - py2) = vy2 * t2 - vy1 * t1 // ((px1 - px2) + vx1 * t1)/vx2 = t2 // vy2(px1 - px2) = vy2(vx2 * t2 - vx1 * t1) // vx2(py1 - py2) = vx2(vy2 * t2 - vy1 * t1) // vy2(px1 - px2) - vx2(py1 - py2) = (vx2*vy1-vy2*vx1)*t1 // t1 = (vy2(px1 - px2) - vx2(py1 - py2)) / (vx2*vy1-vy2*vx1) const CoordTimes = struct { x: f64, y: f64, z: f64, t1: f64, t2: f64, pub fn format(self: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: std.fs.File.Writer) !void { _ = fmt; _ = options; try std.fmt.format(writer, "{d},{d},{d} @ {d}/{d}", .{ self.x, self.y, self.z, self.t1, self.t2 }); } }; fn intersectionXY(a: Hailstone, b: Hailstone) ?CoordTimes { const px1: f64 = a.p.x; const vx1: f64 = a.v.x; const py1: f64 = a.p.y; const vy1: f64 = a.v.y; const px2: f64 = b.p.x; const vx2: f64 = b.v.x; const py2: f64 = b.p.y; const vy2: f64 = b.v.y; const den: f64 = vx2 * vy1 - vy2 * vx1; if (den == 0) { return null; } const num: f64 = vy2 * (px1 - px2) - vx2 * (py1 - py2); const t1: f64 = num / den; const t2 = ((px1 - px2) + vx1 * t1) / vx2; if (vx2 == 0) { assert(vx1 != 0); const flip = intersectionXY(b, a); if (flip) |h| { return CoordTimes{ .x = h.x, .y = h.y, .z = h.z, .t1 = h.t2, .t2 = h.t1 }; } return null; } const x = a.p.x + a.v.x * t1; const y = a.p.y + a.v.y * t1; const z = a.p.z + a.v.z * t1; return CoordTimes{ .x = x, .y = y, .z = z, .t1 = t1, .t2 = t2 }; } // const min = 7; // const max = 27; const min = 200000000000000; const max = 400000000000000; fn isValid(hit: CoordTimes) bool { return (hit.x >= min and hit.y >= min and hit.x <= max and hit.y <= max and hit.t1 > 0 and hit.t2 > 0); } fn whereAt(h: Hailstone, t: f64) Coord3 { return Coord3{ .x = h.p.x + t * h.v.x, .y = h.p.y + t * h.v.y, .z = h.p.z + t * h.v.z, }; } // if a hailstone hit a at t=0 and b at t=1, what would it be? fn impliedHailstone(a: Hailstone, b: Hailstone) Hailstone { const p1 = whereAt(a, 0); const p2 = whereAt(b, 1); const vx = p2.x - p1.x; const vy = p2.y - p1.y; _ = vy; const vz = p2.z - p1.z; return Hailstone{ .p = p1, .v = Coord3{ .x = vx, .y = vx, .z = vz }, }; } const Plane = struct { a: f128, b: f128, c: f128, }; fn timeForStone(p: Plane, s: Hailstone) f64 { const a = p.a; const b = p.b; const c = p.c; const px: f128 = s.p.x; const py: f128 = s.p.y; const pz: f128 = s.p.z; const vx: f128 = s.v.x; const vy: f128 = s.v.y; const vz: f128 = s.v.z; const num = pz - a * px - b * py - c; const den = a * vx + b * vy - vz; const result = num / den; return @floatCast(result); } fn findOrigin(t1: f64, pt1: Coord3, t2: f64, pt2: Coord3) Hailstone { const dt = t2 - t1; const dx = (pt2.x - pt1.x) / dt; const dy = (pt2.y - pt1.y) / dt; const dz = (pt2.z - pt1.z) / dt; const x0 = pt1.x - dx * t1; const y0 = pt1.y - dy * t1; const z0 = pt1.z - dz * t1; return Hailstone{ .p = Coord3{ .x = x0, .y = y0, .z = z0 }, .v = Coord3{ .x = dx, .y = dy, .z = dz }, }; } fn isInt(v: f64) bool { return areClose(v, std.math.round(v)); } fn areClose(a: f64, b: f64) bool { return @fabs(a - b) < 0.01; } pub fn main(allocator: std.mem.Allocator, args: []const [:0]u8) anyerror!void { const filename = args[0]; var stones = std.ArrayList(Hailstone).init(allocator); defer stones.deinit(); var iter = try bufIter.iterLines(filename); while (try iter.next()) |line| { var stone = try parseHailstone(line); // std.debug.print("{any}\n", .{stone}); try stones.append(stone); } var part1: usize = 0; var parallel: struct { usize, usize } = undefined; for (stones.items, 0..) |a, i| { for (stones.items[(i + 1)..], (i + 1)..) |b, j| { // std.debug.print("A: {any}\n", .{a}); // std.debug.print("B: {any}\n", .{b}); const int = intersectionXY(a, b); // std.debug.print("-> {?any}\n", .{int}); if (int) |hit| { if (isValid(hit)) { part1 += 1; } } else { std.debug.print("parallel hailstones:\n", .{}); std.debug.print("A {d}: {any}\n", .{ i, a }); std.debug.print("B {d}: {any}\n", .{ j, b }); parallel = .{ i, j }; std.debug.print("{d} = {d} = {d}?\n", .{ a.v.x / b.v.x, a.v.y / b.v.y, a.v.z / b.v.z }); } } } std.debug.print("part 1: {d}\n", .{part1}); const s0 = stones.items[1]; const s1 = stones.items[2]; const s2 = stones.items[3]; const p1 = s0.p; const p2 = s1.p; const v1 = s0.v; const v2 = s1.v; for (0..2001) |avx| { const vx: f64 = @as(f64, @floatFromInt(avx)) - 1000; for (0..2001) |avy| { const vy: f64 = @as(f64, @floatFromInt(avy)) - 1000; const num = (p2.y - p1.y) * (vx - v1.x) - (p2.x - p1.x) * (vy - v1.y); const den = (vy - v2.y) * (vx - v1.x) - (vx - v2.x) * (vy - v1.y); const t2 = num / den; const t1 = ((vy - v2.y) * t2 - (p2.y - p1.y)) / (vy - v1.y); const px = p1.x + (v1.x - vx) * t1; const py = p1.y + (v1.y - vy) * t1; const z1 = p1.z + t1 * v1.z; const z2 = p2.z + t2 * v2.z; const vz = (z2 - z1) / (t2 - t1); const pz = p1.z + (v1.z - vz) * t1; if (isInt(t1) and isInt(t2) and isInt(vz)) { const s = Hailstone{ .p = Coord3{ .x = px, .y = py, .z = pz }, .v = Coord3{ .x = vx, .y = vy, .z = vz } }; if (intersectionXY(s, s2)) |hit2| { if (areClose(hit2.t1, hit2.t2)) { const t3 = hit2.t1; const z3 = s2.p.z + t3 * s2.v.z; const sz3 = s.p.z + t3 * s.v.z; if (areClose(z3, sz3)) { // std.debug.print("vx={d}, vy={d}: t1={d}/t2={d}: {any} {any}, p=({d},{d},{d})\n", .{ vx, vy, t1, t2, whereAt(s0, t1), whereAt(s1, t2), px, py, pz }); std.debug.print("part 2: {d}\n", .{px + py + pz}); } } } } } } // std.debug.print("part 2: {d}\n", .{sum2}); } // 697869420003814.2 // 642678186425533.1 const expectEqualDeep = std.testing.expectEqualDeep; const expectEqual = std.testing.expectEqual; const expect = std.testing.expect; test "intersect" { const a = try parseHailstone("19, 13, 30 @ -2, 1, -2"); const b = try parseHailstone("18, 19, 22 @ -1, -1, -2"); const int = intersectionXY(a, b); _ = int; // try expectEqual(int, Coord3{ .x = 14.333, .y = 15.333, .z = 0 }); }
https://raw.githubusercontent.com/danvk/aoc2023/2376e663be8bfbf92c1ae50716b49500d96c966d/src/day24.zig
pub const __builtin_bswap16 = @import("std").zig.c_builtins.__builtin_bswap16; pub const __builtin_bswap32 = @import("std").zig.c_builtins.__builtin_bswap32; pub const __builtin_bswap64 = @import("std").zig.c_builtins.__builtin_bswap64; pub const __builtin_signbit = @import("std").zig.c_builtins.__builtin_signbit; pub const __builtin_signbitf = @import("std").zig.c_builtins.__builtin_signbitf; pub const __builtin_popcount = @import("std").zig.c_builtins.__builtin_popcount; pub const __builtin_ctz = @import("std").zig.c_builtins.__builtin_ctz; pub const __builtin_clz = @import("std").zig.c_builtins.__builtin_clz; pub const __builtin_sqrt = @import("std").zig.c_builtins.__builtin_sqrt; pub const __builtin_sqrtf = @import("std").zig.c_builtins.__builtin_sqrtf; pub const __builtin_sin = @import("std").zig.c_builtins.__builtin_sin; pub const __builtin_sinf = @import("std").zig.c_builtins.__builtin_sinf; pub const __builtin_cos = @import("std").zig.c_builtins.__builtin_cos; pub const __builtin_cosf = @import("std").zig.c_builtins.__builtin_cosf; pub const __builtin_exp = @import("std").zig.c_builtins.__builtin_exp; pub const __builtin_expf = @import("std").zig.c_builtins.__builtin_expf; pub const __builtin_exp2 = @import("std").zig.c_builtins.__builtin_exp2; pub const __builtin_exp2f = @import("std").zig.c_builtins.__builtin_exp2f; pub const __builtin_log = @import("std").zig.c_builtins.__builtin_log; pub const __builtin_logf = @import("std").zig.c_builtins.__builtin_logf; pub const __builtin_log2 = @import("std").zig.c_builtins.__builtin_log2; pub const __builtin_log2f = @import("std").zig.c_builtins.__builtin_log2f; pub const __builtin_log10 = @import("std").zig.c_builtins.__builtin_log10; pub const __builtin_log10f = @import("std").zig.c_builtins.__builtin_log10f; pub const __builtin_abs = @import("std").zig.c_builtins.__builtin_abs; pub const __builtin_fabs = @import("std").zig.c_builtins.__builtin_fabs; pub const __builtin_fabsf = @import("std").zig.c_builtins.__builtin_fabsf; pub const __builtin_floor = @import("std").zig.c_builtins.__builtin_floor; pub const __builtin_floorf = @import("std").zig.c_builtins.__builtin_floorf; pub const __builtin_ceil = @import("std").zig.c_builtins.__builtin_ceil; pub const __builtin_ceilf = @import("std").zig.c_builtins.__builtin_ceilf; pub const __builtin_trunc = @import("std").zig.c_builtins.__builtin_trunc; pub const __builtin_truncf = @import("std").zig.c_builtins.__builtin_truncf; pub const __builtin_round = @import("std").zig.c_builtins.__builtin_round; pub const __builtin_roundf = @import("std").zig.c_builtins.__builtin_roundf; pub const __builtin_strlen = @import("std").zig.c_builtins.__builtin_strlen; pub const __builtin_strcmp = @import("std").zig.c_builtins.__builtin_strcmp; pub const __builtin_object_size = @import("std").zig.c_builtins.__builtin_object_size; pub const __builtin___memset_chk = @import("std").zig.c_builtins.__builtin___memset_chk; pub const __builtin_memset = @import("std").zig.c_builtins.__builtin_memset; pub const __builtin___memcpy_chk = @import("std").zig.c_builtins.__builtin___memcpy_chk; pub const __builtin_memcpy = @import("std").zig.c_builtins.__builtin_memcpy; pub const __builtin_expect = @import("std").zig.c_builtins.__builtin_expect; pub const __builtin_nanf = @import("std").zig.c_builtins.__builtin_nanf; pub const __builtin_huge_valf = @import("std").zig.c_builtins.__builtin_huge_valf; pub const __builtin_inff = @import("std").zig.c_builtins.__builtin_inff; pub const __builtin_isnan = @import("std").zig.c_builtins.__builtin_isnan; pub const __builtin_isinf = @import("std").zig.c_builtins.__builtin_isinf; pub const __builtin_isinf_sign = @import("std").zig.c_builtins.__builtin_isinf_sign; pub const __builtin_va_list = [*c]u8; pub const va_list = __builtin_va_list; pub const __gnuc_va_list = __builtin_va_list; pub const struct_Vector2 = extern struct { x: f32, y: f32, }; pub const Vector2 = struct_Vector2; pub const struct_Vector3 = extern struct { x: f32, y: f32, z: f32, }; pub const Vector3 = struct_Vector3; pub const struct_Vector4 = extern struct { x: f32, y: f32, z: f32, w: f32, }; pub const Vector4 = struct_Vector4; pub const Quaternion = Vector4; pub const struct_Matrix = extern struct { m0: f32, m4: f32, m8: f32, m12: f32, m1: f32, m5: f32, m9: f32, m13: f32, m2: f32, m6: f32, m10: f32, m14: f32, m3: f32, m7: f32, m11: f32, m15: f32, }; pub const Matrix = struct_Matrix; pub const struct_Color = extern struct { r: u8, g: u8, b: u8, a: u8, }; pub const Color = struct_Color; pub const struct_Rectangle = extern struct { x: f32, y: f32, width: f32, height: f32, }; pub const Rectangle = struct_Rectangle; pub const struct_Image = extern struct { data: ?*anyopaque, width: c_int, height: c_int, mipmaps: c_int, format: c_int, }; pub const Image = struct_Image; pub const struct_Texture = extern struct { id: c_uint, width: c_int, height: c_int, mipmaps: c_int, format: c_int, }; pub const Texture = struct_Texture; pub const Texture2D = Texture; pub const TextureCubemap = Texture; pub const struct_RenderTexture = extern struct { id: c_uint, texture: Texture, depth: Texture, }; pub const RenderTexture = struct_RenderTexture; pub const RenderTexture2D = RenderTexture; pub const struct_NPatchInfo = extern struct { source: Rectangle, left: c_int, top: c_int, right: c_int, bottom: c_int, layout: c_int, }; pub const NPatchInfo = struct_NPatchInfo; pub const struct_GlyphInfo = extern struct { value: c_int, offsetX: c_int, offsetY: c_int, advanceX: c_int, image: Image, }; pub const GlyphInfo = struct_GlyphInfo; pub const struct_Font = extern struct { baseSize: c_int, glyphCount: c_int, glyphPadding: c_int, texture: Texture2D, recs: [*c]Rectangle, glyphs: [*c]GlyphInfo, }; pub const Font = struct_Font; pub const struct_Camera3D = extern struct { position: Vector3, target: Vector3, up: Vector3, fovy: f32, projection: c_int, }; pub const Camera3D = struct_Camera3D; pub const Camera = Camera3D; pub const struct_Camera2D = extern struct { offset: Vector2, target: Vector2, rotation: f32, zoom: f32, }; pub const Camera2D = struct_Camera2D; pub const struct_Mesh = extern struct { vertexCount: c_int, triangleCount: c_int, vertices: [*c]f32, texcoords: [*c]f32, texcoords2: [*c]f32, normals: [*c]f32, tangents: [*c]f32, colors: [*c]u8, indices: [*c]c_ushort, animVertices: [*c]f32, animNormals: [*c]f32, boneIds: [*c]u8, boneWeights: [*c]f32, vaoId: c_uint, vboId: [*c]c_uint, }; pub const Mesh = struct_Mesh; pub const struct_Shader = extern struct { id: c_uint, locs: [*c]c_int, }; pub const Shader = struct_Shader; pub const struct_MaterialMap = extern struct { texture: Texture2D, color: Color, value: f32, }; pub const MaterialMap = struct_MaterialMap; pub const struct_Material = extern struct { shader: Shader, maps: [*c]MaterialMap, params: [4]f32, }; pub const Material = struct_Material; pub const struct_Transform = extern struct { translation: Vector3, rotation: Quaternion, scale: Vector3, }; pub const Transform = struct_Transform; pub const struct_BoneInfo = extern struct { name: [32]u8, parent: c_int, }; pub const BoneInfo = struct_BoneInfo; pub const struct_Model = extern struct { transform: Matrix, meshCount: c_int, materialCount: c_int, meshes: [*c]Mesh, materials: [*c]Material, meshMaterial: [*c]c_int, boneCount: c_int, bones: [*c]BoneInfo, bindPose: [*c]Transform, }; pub const Model = struct_Model; pub const struct_ModelAnimation = extern struct { boneCount: c_int, frameCount: c_int, bones: [*c]BoneInfo, framePoses: [*c][*c]Transform, }; pub const ModelAnimation = struct_ModelAnimation; pub const struct_Ray = extern struct { position: Vector3, direction: Vector3, }; pub const Ray = struct_Ray; pub const struct_RayCollision = extern struct { hit: bool, distance: f32, point: Vector3, normal: Vector3, }; pub const RayCollision = struct_RayCollision; pub const struct_BoundingBox = extern struct { min: Vector3, max: Vector3, }; pub const BoundingBox = struct_BoundingBox; pub const struct_Wave = extern struct { frameCount: c_uint, sampleRate: c_uint, sampleSize: c_uint, channels: c_uint, data: ?*anyopaque, }; pub const Wave = struct_Wave; pub const struct_rAudioBuffer = opaque {}; pub const rAudioBuffer = struct_rAudioBuffer; pub const struct_AudioStream = extern struct { buffer: ?*rAudioBuffer, sampleRate: c_uint, sampleSize: c_uint, channels: c_uint, }; pub const AudioStream = struct_AudioStream; pub const struct_Sound = extern struct { stream: AudioStream, frameCount: c_uint, }; pub const Sound = struct_Sound; pub const struct_Music = extern struct { stream: AudioStream, frameCount: c_uint, looping: bool, ctxType: c_int, ctxData: ?*anyopaque, }; pub const Music = struct_Music; pub const struct_VrDeviceInfo = extern struct { hResolution: c_int, vResolution: c_int, hScreenSize: f32, vScreenSize: f32, vScreenCenter: f32, eyeToScreenDistance: f32, lensSeparationDistance: f32, interpupillaryDistance: f32, lensDistortionValues: [4]f32, chromaAbCorrection: [4]f32, }; pub const VrDeviceInfo = struct_VrDeviceInfo; pub const struct_VrStereoConfig = extern struct { projection: [2]Matrix, viewOffset: [2]Matrix, leftLensCenter: [2]f32, rightLensCenter: [2]f32, leftScreenCenter: [2]f32, rightScreenCenter: [2]f32, scale: [2]f32, scaleIn: [2]f32, }; pub const VrStereoConfig = struct_VrStereoConfig; pub const FLAG_VSYNC_HINT: c_int = 64; pub const FLAG_FULLSCREEN_MODE: c_int = 2; pub const FLAG_WINDOW_RESIZABLE: c_int = 4; pub const FLAG_WINDOW_UNDECORATED: c_int = 8; pub const FLAG_WINDOW_HIDDEN: c_int = 128; pub const FLAG_WINDOW_MINIMIZED: c_int = 512; pub const FLAG_WINDOW_MAXIMIZED: c_int = 1024; pub const FLAG_WINDOW_UNFOCUSED: c_int = 2048; pub const FLAG_WINDOW_TOPMOST: c_int = 4096; pub const FLAG_WINDOW_ALWAYS_RUN: c_int = 256; pub const FLAG_WINDOW_TRANSPARENT: c_int = 16; pub const FLAG_WINDOW_HIGHDPI: c_int = 8192; pub const FLAG_MSAA_4X_HINT: c_int = 32; pub const FLAG_INTERLACED_HINT: c_int = 65536; pub const ConfigFlags = c_uint; pub const LOG_ALL: c_int = 0; pub const LOG_TRACE: c_int = 1; pub const LOG_DEBUG: c_int = 2; pub const LOG_INFO: c_int = 3; pub const LOG_WARNING: c_int = 4; pub const LOG_ERROR: c_int = 5; pub const LOG_FATAL: c_int = 6; pub const LOG_NONE: c_int = 7; pub const TraceLogLevel = c_uint; pub const KEY_NULL: c_int = 0; pub const KEY_APOSTROPHE: c_int = 39; pub const KEY_COMMA: c_int = 44; pub const KEY_MINUS: c_int = 45; pub const KEY_PERIOD: c_int = 46; pub const KEY_SLASH: c_int = 47; pub const KEY_ZERO: c_int = 48; pub const KEY_ONE: c_int = 49; pub const KEY_TWO: c_int = 50; pub const KEY_THREE: c_int = 51; pub const KEY_FOUR: c_int = 52; pub const KEY_FIVE: c_int = 53; pub const KEY_SIX: c_int = 54; pub const KEY_SEVEN: c_int = 55; pub const KEY_EIGHT: c_int = 56; pub const KEY_NINE: c_int = 57; pub const KEY_SEMICOLON: c_int = 59; pub const KEY_EQUAL: c_int = 61; pub const KEY_A: c_int = 65; pub const KEY_B: c_int = 66; pub const KEY_C: c_int = 67; pub const KEY_D: c_int = 68; pub const KEY_E: c_int = 69; pub const KEY_F: c_int = 70; pub const KEY_G: c_int = 71; pub const KEY_H: c_int = 72; pub const KEY_I: c_int = 73; pub const KEY_J: c_int = 74; pub const KEY_K: c_int = 75; pub const KEY_L: c_int = 76; pub const KEY_M: c_int = 77; pub const KEY_N: c_int = 78; pub const KEY_O: c_int = 79; pub const KEY_P: c_int = 80; pub const KEY_Q: c_int = 81; pub const KEY_R: c_int = 82; pub const KEY_S: c_int = 83; pub const KEY_T: c_int = 84; pub const KEY_U: c_int = 85; pub const KEY_V: c_int = 86; pub const KEY_W: c_int = 87; pub const KEY_X: c_int = 88; pub const KEY_Y: c_int = 89; pub const KEY_Z: c_int = 90; pub const KEY_LEFT_BRACKET: c_int = 91; pub const KEY_BACKSLASH: c_int = 92; pub const KEY_RIGHT_BRACKET: c_int = 93; pub const KEY_GRAVE: c_int = 96; pub const KEY_SPACE: c_int = 32; pub const KEY_ESCAPE: c_int = 256; pub const KEY_ENTER: c_int = 257; pub const KEY_TAB: c_int = 258; pub const KEY_BACKSPACE: c_int = 259; pub const KEY_INSERT: c_int = 260; pub const KEY_DELETE: c_int = 261; pub const KEY_RIGHT: c_int = 262; pub const KEY_LEFT: c_int = 263; pub const KEY_DOWN: c_int = 264; pub const KEY_UP: c_int = 265; pub const KEY_PAGE_UP: c_int = 266; pub const KEY_PAGE_DOWN: c_int = 267; pub const KEY_HOME: c_int = 268; pub const KEY_END: c_int = 269; pub const KEY_CAPS_LOCK: c_int = 280; pub const KEY_SCROLL_LOCK: c_int = 281; pub const KEY_NUM_LOCK: c_int = 282; pub const KEY_PRINT_SCREEN: c_int = 283; pub const KEY_PAUSE: c_int = 284; pub const KEY_F1: c_int = 290; pub const KEY_F2: c_int = 291; pub const KEY_F3: c_int = 292; pub const KEY_F4: c_int = 293; pub const KEY_F5: c_int = 294; pub const KEY_F6: c_int = 295; pub const KEY_F7: c_int = 296; pub const KEY_F8: c_int = 297; pub const KEY_F9: c_int = 298; pub const KEY_F10: c_int = 299; pub const KEY_F11: c_int = 300; pub const KEY_F12: c_int = 301; pub const KEY_LEFT_SHIFT: c_int = 340; pub const KEY_LEFT_CONTROL: c_int = 341; pub const KEY_LEFT_ALT: c_int = 342; pub const KEY_LEFT_SUPER: c_int = 343; pub const KEY_RIGHT_SHIFT: c_int = 344; pub const KEY_RIGHT_CONTROL: c_int = 345; pub const KEY_RIGHT_ALT: c_int = 346; pub const KEY_RIGHT_SUPER: c_int = 347; pub const KEY_KB_MENU: c_int = 348; pub const KEY_KP_0: c_int = 320; pub const KEY_KP_1: c_int = 321; pub const KEY_KP_2: c_int = 322; pub const KEY_KP_3: c_int = 323; pub const KEY_KP_4: c_int = 324; pub const KEY_KP_5: c_int = 325; pub const KEY_KP_6: c_int = 326; pub const KEY_KP_7: c_int = 327; pub const KEY_KP_8: c_int = 328; pub const KEY_KP_9: c_int = 329; pub const KEY_KP_DECIMAL: c_int = 330; pub const KEY_KP_DIVIDE: c_int = 331; pub const KEY_KP_MULTIPLY: c_int = 332; pub const KEY_KP_SUBTRACT: c_int = 333; pub const KEY_KP_ADD: c_int = 334; pub const KEY_KP_ENTER: c_int = 335; pub const KEY_KP_EQUAL: c_int = 336; pub const KEY_BACK: c_int = 4; pub const KEY_MENU: c_int = 82; pub const KEY_VOLUME_UP: c_int = 24; pub const KEY_VOLUME_DOWN: c_int = 25; pub const KeyboardKey = c_uint; pub const MOUSE_BUTTON_LEFT: c_int = 0; pub const MOUSE_BUTTON_RIGHT: c_int = 1; pub const MOUSE_BUTTON_MIDDLE: c_int = 2; pub const MOUSE_BUTTON_SIDE: c_int = 3; pub const MOUSE_BUTTON_EXTRA: c_int = 4; pub const MOUSE_BUTTON_FORWARD: c_int = 5; pub const MOUSE_BUTTON_BACK: c_int = 6; pub const MouseButton = c_uint; pub const MOUSE_CURSOR_DEFAULT: c_int = 0; pub const MOUSE_CURSOR_ARROW: c_int = 1; pub const MOUSE_CURSOR_IBEAM: c_int = 2; pub const MOUSE_CURSOR_CROSSHAIR: c_int = 3; pub const MOUSE_CURSOR_POINTING_HAND: c_int = 4; pub const MOUSE_CURSOR_RESIZE_EW: c_int = 5; pub const MOUSE_CURSOR_RESIZE_NS: c_int = 6; pub const MOUSE_CURSOR_RESIZE_NWSE: c_int = 7; pub const MOUSE_CURSOR_RESIZE_NESW: c_int = 8; pub const MOUSE_CURSOR_RESIZE_ALL: c_int = 9; pub const MOUSE_CURSOR_NOT_ALLOWED: c_int = 10; pub const MouseCursor = c_uint; pub const GAMEPAD_BUTTON_UNKNOWN: c_int = 0; pub const GAMEPAD_BUTTON_LEFT_FACE_UP: c_int = 1; pub const GAMEPAD_BUTTON_LEFT_FACE_RIGHT: c_int = 2; pub const GAMEPAD_BUTTON_LEFT_FACE_DOWN: c_int = 3; pub const GAMEPAD_BUTTON_LEFT_FACE_LEFT: c_int = 4; pub const GAMEPAD_BUTTON_RIGHT_FACE_UP: c_int = 5; pub const GAMEPAD_BUTTON_RIGHT_FACE_RIGHT: c_int = 6; pub const GAMEPAD_BUTTON_RIGHT_FACE_DOWN: c_int = 7; pub const GAMEPAD_BUTTON_RIGHT_FACE_LEFT: c_int = 8; pub const GAMEPAD_BUTTON_LEFT_TRIGGER_1: c_int = 9; pub const GAMEPAD_BUTTON_LEFT_TRIGGER_2: c_int = 10; pub const GAMEPAD_BUTTON_RIGHT_TRIGGER_1: c_int = 11; pub const GAMEPAD_BUTTON_RIGHT_TRIGGER_2: c_int = 12; pub const GAMEPAD_BUTTON_MIDDLE_LEFT: c_int = 13; pub const GAMEPAD_BUTTON_MIDDLE: c_int = 14; pub const GAMEPAD_BUTTON_MIDDLE_RIGHT: c_int = 15; pub const GAMEPAD_BUTTON_LEFT_THUMB: c_int = 16; pub const GAMEPAD_BUTTON_RIGHT_THUMB: c_int = 17; pub const GamepadButton = c_uint; pub const GAMEPAD_AXIS_LEFT_X: c_int = 0; pub const GAMEPAD_AXIS_LEFT_Y: c_int = 1; pub const GAMEPAD_AXIS_RIGHT_X: c_int = 2; pub const GAMEPAD_AXIS_RIGHT_Y: c_int = 3; pub const GAMEPAD_AXIS_LEFT_TRIGGER: c_int = 4; pub const GAMEPAD_AXIS_RIGHT_TRIGGER: c_int = 5; pub const GamepadAxis = c_uint; pub const MATERIAL_MAP_ALBEDO: c_int = 0; pub const MATERIAL_MAP_METALNESS: c_int = 1; pub const MATERIAL_MAP_NORMAL: c_int = 2; pub const MATERIAL_MAP_ROUGHNESS: c_int = 3; pub const MATERIAL_MAP_OCCLUSION: c_int = 4; pub const MATERIAL_MAP_EMISSION: c_int = 5; pub const MATERIAL_MAP_HEIGHT: c_int = 6; pub const MATERIAL_MAP_CUBEMAP: c_int = 7; pub const MATERIAL_MAP_IRRADIANCE: c_int = 8; pub const MATERIAL_MAP_PREFILTER: c_int = 9; pub const MATERIAL_MAP_BRDF: c_int = 10; pub const MaterialMapIndex = c_uint; pub const SHADER_LOC_VERTEX_POSITION: c_int = 0; pub const SHADER_LOC_VERTEX_TEXCOORD01: c_int = 1; pub const SHADER_LOC_VERTEX_TEXCOORD02: c_int = 2; pub const SHADER_LOC_VERTEX_NORMAL: c_int = 3; pub const SHADER_LOC_VERTEX_TANGENT: c_int = 4; pub const SHADER_LOC_VERTEX_COLOR: c_int = 5; pub const SHADER_LOC_MATRIX_MVP: c_int = 6; pub const SHADER_LOC_MATRIX_VIEW: c_int = 7; pub const SHADER_LOC_MATRIX_PROJECTION: c_int = 8; pub const SHADER_LOC_MATRIX_MODEL: c_int = 9; pub const SHADER_LOC_MATRIX_NORMAL: c_int = 10; pub const SHADER_LOC_VECTOR_VIEW: c_int = 11; pub const SHADER_LOC_COLOR_DIFFUSE: c_int = 12; pub const SHADER_LOC_COLOR_SPECULAR: c_int = 13; pub const SHADER_LOC_COLOR_AMBIENT: c_int = 14; pub const SHADER_LOC_MAP_ALBEDO: c_int = 15; pub const SHADER_LOC_MAP_METALNESS: c_int = 16; pub const SHADER_LOC_MAP_NORMAL: c_int = 17; pub const SHADER_LOC_MAP_ROUGHNESS: c_int = 18; pub const SHADER_LOC_MAP_OCCLUSION: c_int = 19; pub const SHADER_LOC_MAP_EMISSION: c_int = 20; pub const SHADER_LOC_MAP_HEIGHT: c_int = 21; pub const SHADER_LOC_MAP_CUBEMAP: c_int = 22; pub const SHADER_LOC_MAP_IRRADIANCE: c_int = 23; pub const SHADER_LOC_MAP_PREFILTER: c_int = 24; pub const SHADER_LOC_MAP_BRDF: c_int = 25; pub const ShaderLocationIndex = c_uint; pub const SHADER_UNIFORM_FLOAT: c_int = 0; pub const SHADER_UNIFORM_VEC2: c_int = 1; pub const SHADER_UNIFORM_VEC3: c_int = 2; pub const SHADER_UNIFORM_VEC4: c_int = 3; pub const SHADER_UNIFORM_INT: c_int = 4; pub const SHADER_UNIFORM_IVEC2: c_int = 5; pub const SHADER_UNIFORM_IVEC3: c_int = 6; pub const SHADER_UNIFORM_IVEC4: c_int = 7; pub const SHADER_UNIFORM_SAMPLER2D: c_int = 8; pub const ShaderUniformDataType = c_uint; pub const SHADER_ATTRIB_FLOAT: c_int = 0; pub const SHADER_ATTRIB_VEC2: c_int = 1; pub const SHADER_ATTRIB_VEC3: c_int = 2; pub const SHADER_ATTRIB_VEC4: c_int = 3; pub const ShaderAttributeDataType = c_uint; pub const PIXELFORMAT_UNCOMPRESSED_GRAYSCALE: c_int = 1; pub const PIXELFORMAT_UNCOMPRESSED_GRAY_ALPHA: c_int = 2; pub const PIXELFORMAT_UNCOMPRESSED_R5G6B5: c_int = 3; pub const PIXELFORMAT_UNCOMPRESSED_R8G8B8: c_int = 4; pub const PIXELFORMAT_UNCOMPRESSED_R5G5B5A1: c_int = 5; pub const PIXELFORMAT_UNCOMPRESSED_R4G4B4A4: c_int = 6; pub const PIXELFORMAT_UNCOMPRESSED_R8G8B8A8: c_int = 7; pub const PIXELFORMAT_UNCOMPRESSED_R32: c_int = 8; pub const PIXELFORMAT_UNCOMPRESSED_R32G32B32: c_int = 9; pub const PIXELFORMAT_UNCOMPRESSED_R32G32B32A32: c_int = 10; pub const PIXELFORMAT_COMPRESSED_DXT1_RGB: c_int = 11; pub const PIXELFORMAT_COMPRESSED_DXT1_RGBA: c_int = 12; pub const PIXELFORMAT_COMPRESSED_DXT3_RGBA: c_int = 13; pub const PIXELFORMAT_COMPRESSED_DXT5_RGBA: c_int = 14; pub const PIXELFORMAT_COMPRESSED_ETC1_RGB: c_int = 15; pub const PIXELFORMAT_COMPRESSED_ETC2_RGB: c_int = 16; pub const PIXELFORMAT_COMPRESSED_ETC2_EAC_RGBA: c_int = 17; pub const PIXELFORMAT_COMPRESSED_PVRT_RGB: c_int = 18; pub const PIXELFORMAT_COMPRESSED_PVRT_RGBA: c_int = 19; pub const PIXELFORMAT_COMPRESSED_ASTC_4x4_RGBA: c_int = 20; pub const PIXELFORMAT_COMPRESSED_ASTC_8x8_RGBA: c_int = 21; pub const PixelFormat = c_uint; pub const TEXTURE_FILTER_POINT: c_int = 0; pub const TEXTURE_FILTER_BILINEAR: c_int = 1; pub const TEXTURE_FILTER_TRILINEAR: c_int = 2; pub const TEXTURE_FILTER_ANISOTROPIC_4X: c_int = 3; pub const TEXTURE_FILTER_ANISOTROPIC_8X: c_int = 4; pub const TEXTURE_FILTER_ANISOTROPIC_16X: c_int = 5; pub const TextureFilter = c_uint; pub const TEXTURE_WRAP_REPEAT: c_int = 0; pub const TEXTURE_WRAP_CLAMP: c_int = 1; pub const TEXTURE_WRAP_MIRROR_REPEAT: c_int = 2; pub const TEXTURE_WRAP_MIRROR_CLAMP: c_int = 3; pub const TextureWrap = c_uint; pub const CUBEMAP_LAYOUT_AUTO_DETECT: c_int = 0; pub const CUBEMAP_LAYOUT_LINE_VERTICAL: c_int = 1; pub const CUBEMAP_LAYOUT_LINE_HORIZONTAL: c_int = 2; pub const CUBEMAP_LAYOUT_CROSS_THREE_BY_FOUR: c_int = 3; pub const CUBEMAP_LAYOUT_CROSS_FOUR_BY_THREE: c_int = 4; pub const CUBEMAP_LAYOUT_PANORAMA: c_int = 5; pub const CubemapLayout = c_uint; pub const FONT_DEFAULT: c_int = 0; pub const FONT_BITMAP: c_int = 1; pub const FONT_SDF: c_int = 2; pub const FontType = c_uint; pub const BLEND_ALPHA: c_int = 0; pub const BLEND_ADDITIVE: c_int = 1; pub const BLEND_MULTIPLIED: c_int = 2; pub const BLEND_ADD_COLORS: c_int = 3; pub const BLEND_SUBTRACT_COLORS: c_int = 4; pub const BLEND_ALPHA_PREMUL: c_int = 5; pub const BLEND_CUSTOM: c_int = 6; pub const BlendMode = c_uint; pub const GESTURE_NONE: c_int = 0; pub const GESTURE_TAP: c_int = 1; pub const GESTURE_DOUBLETAP: c_int = 2; pub const GESTURE_HOLD: c_int = 4; pub const GESTURE_DRAG: c_int = 8; pub const GESTURE_SWIPE_RIGHT: c_int = 16; pub const GESTURE_SWIPE_LEFT: c_int = 32; pub const GESTURE_SWIPE_UP: c_int = 64; pub const GESTURE_SWIPE_DOWN: c_int = 128; pub const GESTURE_PINCH_IN: c_int = 256; pub const GESTURE_PINCH_OUT: c_int = 512; pub const Gesture = c_uint; pub const CAMERA_CUSTOM: c_int = 0; pub const CAMERA_FREE: c_int = 1; pub const CAMERA_ORBITAL: c_int = 2; pub const CAMERA_FIRST_PERSON: c_int = 3; pub const CAMERA_THIRD_PERSON: c_int = 4; pub const CameraMode = c_uint; pub const CAMERA_PERSPECTIVE: c_int = 0; pub const CAMERA_ORTHOGRAPHIC: c_int = 1; pub const CameraProjection = c_uint; pub const NPATCH_NINE_PATCH: c_int = 0; pub const NPATCH_THREE_PATCH_VERTICAL: c_int = 1; pub const NPATCH_THREE_PATCH_HORIZONTAL: c_int = 2; pub const NPatchLayout = c_uint; pub const TraceLogCallback = ?fn (c_int, [*c]const u8, va_list) callconv(.C) void; pub const LoadFileDataCallback = ?fn ([*c]const u8, [*c]c_uint) callconv(.C) [*c]u8; pub const SaveFileDataCallback = ?fn ([*c]const u8, ?*anyopaque, c_uint) callconv(.C) bool; pub const LoadFileTextCallback = ?fn ([*c]const u8) callconv(.C) [*c]u8; pub const SaveFileTextCallback = ?fn ([*c]const u8, [*c]u8) callconv(.C) bool; pub extern fn InitWindow(width: c_int, height: c_int, title: [*c]const u8) void; pub extern fn WindowShouldClose() bool; pub extern fn CloseWindow() void; pub extern fn IsWindowReady() bool; pub extern fn IsWindowFullscreen() bool; pub extern fn IsWindowHidden() bool; pub extern fn IsWindowMinimized() bool; pub extern fn IsWindowMaximized() bool; pub extern fn IsWindowFocused() bool; pub extern fn IsWindowResized() bool; pub extern fn IsWindowState(flag: c_uint) bool; pub extern fn SetWindowState(flags: c_uint) void; pub extern fn ClearWindowState(flags: c_uint) void; pub extern fn ToggleFullscreen() void; pub extern fn MaximizeWindow() void; pub extern fn MinimizeWindow() void; pub extern fn RestoreWindow() void; pub extern fn SetWindowIcon(image: Image) void; pub extern fn SetWindowTitle(title: [*c]const u8) void; pub extern fn SetWindowPosition(x: c_int, y: c_int) void; pub extern fn SetWindowMonitor(monitor: c_int) void; pub extern fn SetWindowMinSize(width: c_int, height: c_int) void; pub extern fn SetWindowSize(width: c_int, height: c_int) void; pub extern fn SetWindowOpacity(opacity: f32) void; pub extern fn GetWindowHandle() ?*anyopaque; pub extern fn GetScreenWidth() c_int; pub extern fn GetScreenHeight() c_int; pub extern fn GetRenderWidth() c_int; pub extern fn GetRenderHeight() c_int; pub extern fn GetMonitorCount() c_int; pub extern fn GetCurrentMonitor() c_int; pub extern fn GetMonitorPosition(monitor: c_int) Vector2; pub extern fn GetMonitorWidth(monitor: c_int) c_int; pub extern fn GetMonitorHeight(monitor: c_int) c_int; pub extern fn GetMonitorPhysicalWidth(monitor: c_int) c_int; pub extern fn GetMonitorPhysicalHeight(monitor: c_int) c_int; pub extern fn GetMonitorRefreshRate(monitor: c_int) c_int; pub extern fn GetWindowPosition() Vector2; pub extern fn GetWindowScaleDPI() Vector2; pub extern fn GetMonitorName(monitor: c_int) [*c]const u8; pub extern fn SetClipboardText(text: [*c]const u8) void; pub extern fn GetClipboardText() [*c]const u8; pub extern fn SwapScreenBuffer() void; pub extern fn PollInputEvents() void; pub extern fn WaitTime(ms: f32) void; pub extern fn ShowCursor() void; pub extern fn HideCursor() void; pub extern fn IsCursorHidden() bool; pub extern fn EnableCursor() void; pub extern fn DisableCursor() void; pub extern fn IsCursorOnScreen() bool; pub extern fn ClearBackground(color: Color) void; pub extern fn BeginDrawing() void; pub extern fn EndDrawing() void; pub extern fn BeginMode2D(camera: Camera2D) void; pub extern fn EndMode2D() void; pub extern fn BeginMode3D(camera: Camera3D) void; pub extern fn EndMode3D() void; pub extern fn BeginTextureMode(target: RenderTexture2D) void; pub extern fn EndTextureMode() void; pub extern fn BeginShaderMode(shader: Shader) void; pub extern fn EndShaderMode() void; pub extern fn BeginBlendMode(mode: c_int) void; pub extern fn EndBlendMode() void; pub extern fn BeginScissorMode(x: c_int, y: c_int, width: c_int, height: c_int) void; pub extern fn EndScissorMode() void; pub extern fn BeginVrStereoMode(config: VrStereoConfig) void; pub extern fn EndVrStereoMode() void; pub extern fn LoadVrStereoConfig(device: VrDeviceInfo) VrStereoConfig; pub extern fn UnloadVrStereoConfig(config: VrStereoConfig) void; pub extern fn LoadShader(vsFileName: [*c]const u8, fsFileName: [*c]const u8) Shader; pub extern fn LoadShaderFromMemory(vsCode: [*c]const u8, fsCode: [*c]const u8) Shader; pub extern fn GetShaderLocation(shader: Shader, uniformName: [*c]const u8) c_int; pub extern fn GetShaderLocationAttrib(shader: Shader, attribName: [*c]const u8) c_int; pub extern fn SetShaderValue(shader: Shader, locIndex: c_int, value: ?*const anyopaque, uniformType: c_int) void; pub extern fn SetShaderValueV(shader: Shader, locIndex: c_int, value: ?*const anyopaque, uniformType: c_int, count: c_int) void; pub extern fn SetShaderValueMatrix(shader: Shader, locIndex: c_int, mat: Matrix) void; pub extern fn SetShaderValueTexture(shader: Shader, locIndex: c_int, texture: Texture2D) void; pub extern fn UnloadShader(shader: Shader) void; pub extern fn GetMouseRay(mousePosition: Vector2, camera: Camera) Ray; pub extern fn GetCameraMatrix(camera: Camera) Matrix; pub extern fn GetCameraMatrix2D(camera: Camera2D) Matrix; pub extern fn GetWorldToScreen(position: Vector3, camera: Camera) Vector2; pub extern fn GetWorldToScreenEx(position: Vector3, camera: Camera, width: c_int, height: c_int) Vector2; pub extern fn GetWorldToScreen2D(position: Vector2, camera: Camera2D) Vector2; pub extern fn GetScreenToWorld2D(position: Vector2, camera: Camera2D) Vector2; pub extern fn SetTargetFPS(fps: c_int) void; pub extern fn GetFPS() c_int; pub extern fn GetFrameTime() f32; pub extern fn GetTime() f64; pub extern fn GetRandomValue(min: c_int, max: c_int) c_int; pub extern fn SetRandomSeed(seed: c_uint) void; pub extern fn TakeScreenshot(fileName: [*c]const u8) void; pub extern fn SetConfigFlags(flags: c_uint) void; pub extern fn TraceLog(logLevel: c_int, text: [*c]const u8, ...) void; pub extern fn SetTraceLogLevel(logLevel: c_int) void; pub extern fn MemAlloc(size: c_int) ?*anyopaque; pub extern fn MemRealloc(ptr: ?*anyopaque, size: c_int) ?*anyopaque; pub extern fn MemFree(ptr: ?*anyopaque) void; pub extern fn SetTraceLogCallback(callback: TraceLogCallback) void; pub extern fn SetLoadFileDataCallback(callback: LoadFileDataCallback) void; pub extern fn SetSaveFileDataCallback(callback: SaveFileDataCallback) void; pub extern fn SetLoadFileTextCallback(callback: LoadFileTextCallback) void; pub extern fn SetSaveFileTextCallback(callback: SaveFileTextCallback) void; pub extern fn LoadFileData(fileName: [*c]const u8, bytesRead: [*c]c_uint) [*c]u8; pub extern fn UnloadFileData(data: [*c]u8) void; pub extern fn SaveFileData(fileName: [*c]const u8, data: ?*anyopaque, bytesToWrite: c_uint) bool; pub extern fn LoadFileText(fileName: [*c]const u8) [*c]u8; pub extern fn UnloadFileText(text: [*c]u8) void; pub extern fn SaveFileText(fileName: [*c]const u8, text: [*c]u8) bool; pub extern fn FileExists(fileName: [*c]const u8) bool; pub extern fn DirectoryExists(dirPath: [*c]const u8) bool; pub extern fn IsFileExtension(fileName: [*c]const u8, ext: [*c]const u8) bool; pub extern fn GetFileLength(fileName: [*c]const u8) c_int; pub extern fn GetFileExtension(fileName: [*c]const u8) [*c]const u8; pub extern fn GetFileName(filePath: [*c]const u8) [*c]const u8; pub extern fn GetFileNameWithoutExt(filePath: [*c]const u8) [*c]const u8; pub extern fn GetDirectoryPath(filePath: [*c]const u8) [*c]const u8; pub extern fn GetPrevDirectoryPath(dirPath: [*c]const u8) [*c]const u8; pub extern fn GetWorkingDirectory() [*c]const u8; pub extern fn GetApplicationDirectory() [*c]const u8; pub extern fn GetDirectoryFiles(dirPath: [*c]const u8, count: [*c]c_int) [*c][*c]u8; pub extern fn ClearDirectoryFiles() void; pub extern fn ChangeDirectory(dir: [*c]const u8) bool; pub extern fn IsFileDropped() bool; pub extern fn GetDroppedFiles(count: [*c]c_int) [*c][*c]u8; pub extern fn ClearDroppedFiles() void; pub extern fn GetFileModTime(fileName: [*c]const u8) c_long; pub extern fn CompressData(data: [*c]const u8, dataLength: c_int, compDataLength: [*c]c_int) [*c]u8; pub extern fn DecompressData(compData: [*c]const u8, compDataLength: c_int, dataLength: [*c]c_int) [*c]u8; pub extern fn EncodeDataBase64(data: [*c]const u8, dataLength: c_int, outputLength: [*c]c_int) [*c]u8; pub extern fn DecodeDataBase64(data: [*c]const u8, outputLength: [*c]c_int) [*c]u8; pub extern fn SaveStorageValue(position: c_uint, value: c_int) bool; pub extern fn LoadStorageValue(position: c_uint) c_int; pub extern fn OpenURL(url: [*c]const u8) void; pub extern fn IsKeyPressed(key: c_int) bool; pub extern fn IsKeyDown(key: c_int) bool; pub extern fn IsKeyReleased(key: c_int) bool; pub extern fn IsKeyUp(key: c_int) bool; pub extern fn SetExitKey(key: c_int) void; pub extern fn GetKeyPressed() c_int; pub extern fn GetCharPressed() c_int; pub extern fn IsGamepadAvailable(gamepad: c_int) bool; pub extern fn GetGamepadName(gamepad: c_int) [*c]const u8; pub extern fn IsGamepadButtonPressed(gamepad: c_int, button: c_int) bool; pub extern fn IsGamepadButtonDown(gamepad: c_int, button: c_int) bool; pub extern fn IsGamepadButtonReleased(gamepad: c_int, button: c_int) bool; pub extern fn IsGamepadButtonUp(gamepad: c_int, button: c_int) bool; pub extern fn GetGamepadButtonPressed() c_int; pub extern fn GetGamepadAxisCount(gamepad: c_int) c_int; pub extern fn GetGamepadAxisMovement(gamepad: c_int, axis: c_int) f32; pub extern fn SetGamepadMappings(mappings: [*c]const u8) c_int; pub extern fn IsMouseButtonPressed(button: c_int) bool; pub extern fn IsMouseButtonDown(button: c_int) bool; pub extern fn IsMouseButtonReleased(button: c_int) bool; pub extern fn IsMouseButtonUp(button: c_int) bool; pub extern fn GetMouseX() c_int; pub extern fn GetMouseY() c_int; pub extern fn GetMousePosition() Vector2; pub extern fn GetMouseDelta() Vector2; pub extern fn SetMousePosition(x: c_int, y: c_int) void; pub extern fn SetMouseOffset(offsetX: c_int, offsetY: c_int) void; pub extern fn SetMouseScale(scaleX: f32, scaleY: f32) void; pub extern fn GetMouseWheelMove() f32; pub extern fn SetMouseCursor(cursor: c_int) void; pub extern fn GetTouchX() c_int; pub extern fn GetTouchY() c_int; pub extern fn GetTouchPosition(index: c_int) Vector2; pub extern fn GetTouchPointId(index: c_int) c_int; pub extern fn GetTouchPointCount() c_int; pub extern fn SetGesturesEnabled(flags: c_uint) void; pub extern fn IsGestureDetected(gesture: c_int) bool; pub extern fn GetGestureDetected() c_int; pub extern fn GetGestureHoldDuration() f32; pub extern fn GetGestureDragVector() Vector2; pub extern fn GetGestureDragAngle() f32; pub extern fn GetGesturePinchVector() Vector2; pub extern fn GetGesturePinchAngle() f32; pub extern fn SetCameraMode(camera: Camera, mode: c_int) void; pub extern fn UpdateCamera(camera: [*c]Camera) void; pub extern fn SetCameraPanControl(keyPan: c_int) void; pub extern fn SetCameraAltControl(keyAlt: c_int) void; pub extern fn SetCameraSmoothZoomControl(keySmoothZoom: c_int) void; pub extern fn SetCameraMoveControls(keyFront: c_int, keyBack: c_int, keyRight: c_int, keyLeft: c_int, keyUp: c_int, keyDown: c_int) void; pub extern fn SetShapesTexture(texture: Texture2D, source: Rectangle) void; pub extern fn DrawPixel(posX: c_int, posY: c_int, color: Color) void; pub extern fn DrawPixelV(position: Vector2, color: Color) void; pub extern fn DrawLine(startPosX: c_int, startPosY: c_int, endPosX: c_int, endPosY: c_int, color: Color) void; pub extern fn DrawLineV(startPos: Vector2, endPos: Vector2, color: Color) void; pub extern fn DrawLineEx(startPos: Vector2, endPos: Vector2, thick: f32, color: Color) void; pub extern fn DrawLineBezier(startPos: Vector2, endPos: Vector2, thick: f32, color: Color) void; pub extern fn DrawLineBezierQuad(startPos: Vector2, endPos: Vector2, controlPos: Vector2, thick: f32, color: Color) void; pub extern fn DrawLineBezierCubic(startPos: Vector2, endPos: Vector2, startControlPos: Vector2, endControlPos: Vector2, thick: f32, color: Color) void; pub extern fn DrawLineStrip(points: [*c]Vector2, pointCount: c_int, color: Color) void; pub extern fn DrawCircle(centerX: c_int, centerY: c_int, radius: f32, color: Color) void; pub extern fn DrawCircleSector(center: Vector2, radius: f32, startAngle: f32, endAngle: f32, segments: c_int, color: Color) void; pub extern fn DrawCircleSectorLines(center: Vector2, radius: f32, startAngle: f32, endAngle: f32, segments: c_int, color: Color) void; pub extern fn DrawCircleGradient(centerX: c_int, centerY: c_int, radius: f32, color1: Color, color2: Color) void; pub extern fn DrawCircleV(center: Vector2, radius: f32, color: Color) void; pub extern fn DrawCircleLines(centerX: c_int, centerY: c_int, radius: f32, color: Color) void; pub extern fn DrawEllipse(centerX: c_int, centerY: c_int, radiusH: f32, radiusV: f32, color: Color) void; pub extern fn DrawEllipseLines(centerX: c_int, centerY: c_int, radiusH: f32, radiusV: f32, color: Color) void; pub extern fn DrawRing(center: Vector2, innerRadius: f32, outerRadius: f32, startAngle: f32, endAngle: f32, segments: c_int, color: Color) void; pub extern fn DrawRingLines(center: Vector2, innerRadius: f32, outerRadius: f32, startAngle: f32, endAngle: f32, segments: c_int, color: Color) void; pub extern fn DrawRectangle(posX: c_int, posY: c_int, width: c_int, height: c_int, color: Color) void; pub extern fn DrawRectangleV(position: Vector2, size: Vector2, color: Color) void; pub extern fn DrawRectangleRec(rec: Rectangle, color: Color) void; pub extern fn DrawRectanglePro(rec: Rectangle, origin: Vector2, rotation: f32, color: Color) void; pub extern fn DrawRectangleGradientV(posX: c_int, posY: c_int, width: c_int, height: c_int, color1: Color, color2: Color) void; pub extern fn DrawRectangleGradientH(posX: c_int, posY: c_int, width: c_int, height: c_int, color1: Color, color2: Color) void; pub extern fn DrawRectangleGradientEx(rec: Rectangle, col1: Color, col2: Color, col3: Color, col4: Color) void; pub extern fn DrawRectangleLines(posX: c_int, posY: c_int, width: c_int, height: c_int, color: Color) void; pub extern fn DrawRectangleLinesEx(rec: Rectangle, lineThick: f32, color: Color) void; pub extern fn DrawRectangleRounded(rec: Rectangle, roundness: f32, segments: c_int, color: Color) void; pub extern fn DrawRectangleRoundedLines(rec: Rectangle, roundness: f32, segments: c_int, lineThick: f32, color: Color) void; pub extern fn DrawTriangle(v1: Vector2, v2: Vector2, v3: Vector2, color: Color) void; pub extern fn DrawTriangleLines(v1: Vector2, v2: Vector2, v3: Vector2, color: Color) void; pub extern fn DrawTriangleFan(points: [*c]Vector2, pointCount: c_int, color: Color) void; pub extern fn DrawTriangleStrip(points: [*c]Vector2, pointCount: c_int, color: Color) void; pub extern fn DrawPoly(center: Vector2, sides: c_int, radius: f32, rotation: f32, color: Color) void; pub extern fn DrawPolyLines(center: Vector2, sides: c_int, radius: f32, rotation: f32, color: Color) void; pub extern fn DrawPolyLinesEx(center: Vector2, sides: c_int, radius: f32, rotation: f32, lineThick: f32, color: Color) void; pub extern fn CheckCollisionRecs(rec1: Rectangle, rec2: Rectangle) bool; pub extern fn CheckCollisionCircles(center1: Vector2, radius1: f32, center2: Vector2, radius2: f32) bool; pub extern fn CheckCollisionCircleRec(center: Vector2, radius: f32, rec: Rectangle) bool; pub extern fn CheckCollisionPointRec(point: Vector2, rec: Rectangle) bool; pub extern fn CheckCollisionPointCircle(point: Vector2, center: Vector2, radius: f32) bool; pub extern fn CheckCollisionPointTriangle(point: Vector2, p1: Vector2, p2: Vector2, p3: Vector2) bool; pub extern fn CheckCollisionLines(startPos1: Vector2, endPos1: Vector2, startPos2: Vector2, endPos2: Vector2, collisionPoint: [*c]Vector2) bool; pub extern fn CheckCollisionPointLine(point: Vector2, p1: Vector2, p2: Vector2, threshold: c_int) bool; pub extern fn GetCollisionRec(rec1: Rectangle, rec2: Rectangle) Rectangle; pub extern fn LoadImage(fileName: [*c]const u8) Image; pub extern fn LoadImageRaw(fileName: [*c]const u8, width: c_int, height: c_int, format: c_int, headerSize: c_int) Image; pub extern fn LoadImageAnim(fileName: [*c]const u8, frames: [*c]c_int) Image; pub extern fn LoadImageFromMemory(fileType: [*c]const u8, fileData: [*c]const u8, dataSize: c_int) Image; pub extern fn LoadImageFromTexture(texture: Texture2D) Image; pub extern fn LoadImageFromScreen() Image; pub extern fn UnloadImage(image: Image) void; pub extern fn ExportImage(image: Image, fileName: [*c]const u8) bool; pub extern fn ExportImageAsCode(image: Image, fileName: [*c]const u8) bool; pub extern fn GenImageColor(width: c_int, height: c_int, color: Color) Image; pub extern fn GenImageGradientV(width: c_int, height: c_int, top: Color, bottom: Color) Image; pub extern fn GenImageGradientH(width: c_int, height: c_int, left: Color, right: Color) Image; pub extern fn GenImageGradientRadial(width: c_int, height: c_int, density: f32, inner: Color, outer: Color) Image; pub extern fn GenImageChecked(width: c_int, height: c_int, checksX: c_int, checksY: c_int, col1: Color, col2: Color) Image; pub extern fn GenImageWhiteNoise(width: c_int, height: c_int, factor: f32) Image; pub extern fn GenImageCellular(width: c_int, height: c_int, tileSize: c_int) Image; pub extern fn ImageCopy(image: Image) Image; pub extern fn ImageFromImage(image: Image, rec: Rectangle) Image; pub extern fn ImageText(text: [*c]const u8, fontSize: c_int, color: Color) Image; pub extern fn ImageTextEx(font: Font, text: [*c]const u8, fontSize: f32, spacing: f32, tint: Color) Image; pub extern fn ImageFormat(image: [*c]Image, newFormat: c_int) void; pub extern fn ImageToPOT(image: [*c]Image, fill: Color) void; pub extern fn ImageCrop(image: [*c]Image, crop: Rectangle) void; pub extern fn ImageAlphaCrop(image: [*c]Image, threshold: f32) void; pub extern fn ImageAlphaClear(image: [*c]Image, color: Color, threshold: f32) void; pub extern fn ImageAlphaMask(image: [*c]Image, alphaMask: Image) void; pub extern fn ImageAlphaPremultiply(image: [*c]Image) void; pub extern fn ImageResize(image: [*c]Image, newWidth: c_int, newHeight: c_int) void; pub extern fn ImageResizeNN(image: [*c]Image, newWidth: c_int, newHeight: c_int) void; pub extern fn ImageResizeCanvas(image: [*c]Image, newWidth: c_int, newHeight: c_int, offsetX: c_int, offsetY: c_int, fill: Color) void; pub extern fn ImageMipmaps(image: [*c]Image) void; pub extern fn ImageDither(image: [*c]Image, rBpp: c_int, gBpp: c_int, bBpp: c_int, aBpp: c_int) void; pub extern fn ImageFlipVertical(image: [*c]Image) void; pub extern fn ImageFlipHorizontal(image: [*c]Image) void; pub extern fn ImageRotateCW(image: [*c]Image) void; pub extern fn ImageRotateCCW(image: [*c]Image) void; pub extern fn ImageColorTint(image: [*c]Image, color: Color) void; pub extern fn ImageColorInvert(image: [*c]Image) void; pub extern fn ImageColorGrayscale(image: [*c]Image) void; pub extern fn ImageColorContrast(image: [*c]Image, contrast: f32) void; pub extern fn ImageColorBrightness(image: [*c]Image, brightness: c_int) void; pub extern fn ImageColorReplace(image: [*c]Image, color: Color, replace: Color) void; pub extern fn LoadImageColors(image: Image) [*c]Color; pub extern fn LoadImagePalette(image: Image, maxPaletteSize: c_int, colorCount: [*c]c_int) [*c]Color; pub extern fn UnloadImageColors(colors: [*c]Color) void; pub extern fn UnloadImagePalette(colors: [*c]Color) void; pub extern fn GetImageAlphaBorder(image: Image, threshold: f32) Rectangle; pub extern fn GetImageColor(image: Image, x: c_int, y: c_int) Color; pub extern fn ImageClearBackground(dst: [*c]Image, color: Color) void; pub extern fn ImageDrawPixel(dst: [*c]Image, posX: c_int, posY: c_int, color: Color) void; pub extern fn ImageDrawPixelV(dst: [*c]Image, position: Vector2, color: Color) void; pub extern fn ImageDrawLine(dst: [*c]Image, startPosX: c_int, startPosY: c_int, endPosX: c_int, endPosY: c_int, color: Color) void; pub extern fn ImageDrawLineV(dst: [*c]Image, start: Vector2, end: Vector2, color: Color) void; pub extern fn ImageDrawCircle(dst: [*c]Image, centerX: c_int, centerY: c_int, radius: c_int, color: Color) void; pub extern fn ImageDrawCircleV(dst: [*c]Image, center: Vector2, radius: c_int, color: Color) void; pub extern fn ImageDrawRectangle(dst: [*c]Image, posX: c_int, posY: c_int, width: c_int, height: c_int, color: Color) void; pub extern fn ImageDrawRectangleV(dst: [*c]Image, position: Vector2, size: Vector2, color: Color) void; pub extern fn ImageDrawRectangleRec(dst: [*c]Image, rec: Rectangle, color: Color) void; pub extern fn ImageDrawRectangleLines(dst: [*c]Image, rec: Rectangle, thick: c_int, color: Color) void; pub extern fn ImageDraw(dst: [*c]Image, src: Image, srcRec: Rectangle, dstRec: Rectangle, tint: Color) void; pub extern fn ImageDrawText(dst: [*c]Image, text: [*c]const u8, posX: c_int, posY: c_int, fontSize: c_int, color: Color) void; pub extern fn ImageDrawTextEx(dst: [*c]Image, font: Font, text: [*c]const u8, position: Vector2, fontSize: f32, spacing: f32, tint: Color) void; pub extern fn LoadTexture(fileName: [*c]const u8) Texture2D; pub extern fn LoadTextureFromImage(image: Image) Texture2D; pub extern fn LoadTextureCubemap(image: Image, layout: c_int) TextureCubemap; pub extern fn LoadRenderTexture(width: c_int, height: c_int) RenderTexture2D; pub extern fn UnloadTexture(texture: Texture2D) void; pub extern fn UnloadRenderTexture(target: RenderTexture2D) void; pub extern fn UpdateTexture(texture: Texture2D, pixels: ?*const anyopaque) void; pub extern fn UpdateTextureRec(texture: Texture2D, rec: Rectangle, pixels: ?*const anyopaque) void; pub extern fn GenTextureMipmaps(texture: [*c]Texture2D) void; pub extern fn SetTextureFilter(texture: Texture2D, filter: c_int) void; pub extern fn SetTextureWrap(texture: Texture2D, wrap: c_int) void; pub extern fn DrawTexture(texture: Texture2D, posX: c_int, posY: c_int, tint: Color) void; pub extern fn DrawTextureV(texture: Texture2D, position: Vector2, tint: Color) void; pub extern fn DrawTextureEx(texture: Texture2D, position: Vector2, rotation: f32, scale: f32, tint: Color) void; pub extern fn DrawTextureRec(texture: Texture2D, source: Rectangle, position: Vector2, tint: Color) void; pub extern fn DrawTextureQuad(texture: Texture2D, tiling: Vector2, offset: Vector2, quad: Rectangle, tint: Color) void; pub extern fn DrawTextureTiled(texture: Texture2D, source: Rectangle, dest: Rectangle, origin: Vector2, rotation: f32, scale: f32, tint: Color) void; pub extern fn DrawTexturePro(texture: Texture2D, source: Rectangle, dest: Rectangle, origin: Vector2, rotation: f32, tint: Color) void; pub extern fn DrawTextureNPatch(texture: Texture2D, nPatchInfo: NPatchInfo, dest: Rectangle, origin: Vector2, rotation: f32, tint: Color) void; pub extern fn DrawTexturePoly(texture: Texture2D, center: Vector2, points: [*c]Vector2, texcoords: [*c]Vector2, pointCount: c_int, tint: Color) void; pub extern fn Fade(color: Color, alpha: f32) Color; pub extern fn ColorToInt(color: Color) c_int; pub extern fn ColorNormalize(color: Color) Vector4; pub extern fn ColorFromNormalized(normalized: Vector4) Color; pub extern fn ColorToHSV(color: Color) Vector3; pub extern fn ColorFromHSV(hue: f32, saturation: f32, value: f32) Color; pub extern fn ColorAlpha(color: Color, alpha: f32) Color; pub extern fn ColorAlphaBlend(dst: Color, src: Color, tint: Color) Color; pub extern fn GetColor(hexValue: c_uint) Color; pub extern fn GetPixelColor(srcPtr: ?*anyopaque, format: c_int) Color; pub extern fn SetPixelColor(dstPtr: ?*anyopaque, color: Color, format: c_int) void; pub extern fn GetPixelDataSize(width: c_int, height: c_int, format: c_int) c_int; pub extern fn GetFontDefault() Font; pub extern fn LoadFont(fileName: [*c]const u8) Font; pub extern fn LoadFontEx(fileName: [*c]const u8, fontSize: c_int, fontChars: [*c]c_int, glyphCount: c_int) Font; pub extern fn LoadFontFromImage(image: Image, key: Color, firstChar: c_int) Font; pub extern fn LoadFontFromMemory(fileType: [*c]const u8, fileData: [*c]const u8, dataSize: c_int, fontSize: c_int, fontChars: [*c]c_int, glyphCount: c_int) Font; pub extern fn LoadFontData(fileData: [*c]const u8, dataSize: c_int, fontSize: c_int, fontChars: [*c]c_int, glyphCount: c_int, @"type": c_int) [*c]GlyphInfo; pub extern fn GenImageFontAtlas(chars: [*c]const GlyphInfo, recs: [*c][*c]Rectangle, glyphCount: c_int, fontSize: c_int, padding: c_int, packMethod: c_int) Image; pub extern fn UnloadFontData(chars: [*c]GlyphInfo, glyphCount: c_int) void; pub extern fn UnloadFont(font: Font) void; pub extern fn ExportFontAsCode(font: Font, fileName: [*c]const u8) bool; pub extern fn DrawFPS(posX: c_int, posY: c_int) void; pub extern fn DrawText(text: [*c]const u8, posX: c_int, posY: c_int, fontSize: c_int, color: Color) void; pub extern fn DrawTextEx(font: Font, text: [*c]const u8, position: Vector2, fontSize: f32, spacing: f32, tint: Color) void; pub extern fn DrawTextPro(font: Font, text: [*c]const u8, position: Vector2, origin: Vector2, rotation: f32, fontSize: f32, spacing: f32, tint: Color) void; pub extern fn DrawTextCodepoint(font: Font, codepoint: c_int, position: Vector2, fontSize: f32, tint: Color) void; pub extern fn DrawTextCodepoints(font: Font, codepoints: [*c]const c_int, count: c_int, position: Vector2, fontSize: f32, spacing: f32, tint: Color) void; pub extern fn MeasureText(text: [*c]const u8, fontSize: c_int) c_int; pub extern fn MeasureTextEx(font: Font, text: [*c]const u8, fontSize: f32, spacing: f32) Vector2; pub extern fn GetGlyphIndex(font: Font, codepoint: c_int) c_int; pub extern fn GetGlyphInfo(font: Font, codepoint: c_int) GlyphInfo; pub extern fn GetGlyphAtlasRec(font: Font, codepoint: c_int) Rectangle; pub extern fn LoadCodepoints(text: [*c]const u8, count: [*c]c_int) [*c]c_int; pub extern fn UnloadCodepoints(codepoints: [*c]c_int) void; pub extern fn GetCodepointCount(text: [*c]const u8) c_int; pub extern fn GetCodepoint(text: [*c]const u8, bytesProcessed: [*c]c_int) c_int; pub extern fn CodepointToUTF8(codepoint: c_int, byteSize: [*c]c_int) [*c]const u8; pub extern fn TextCodepointsToUTF8(codepoints: [*c]const c_int, length: c_int) [*c]u8; pub extern fn TextCopy(dst: [*c]u8, src: [*c]const u8) c_int; pub extern fn TextIsEqual(text1: [*c]const u8, text2: [*c]const u8) bool; pub extern fn TextLength(text: [*c]const u8) c_uint; pub extern fn TextFormat(text: [*c]const u8, ...) [*c]const u8; pub extern fn TextSubtext(text: [*c]const u8, position: c_int, length: c_int) [*c]const u8; pub extern fn TextReplace(text: [*c]u8, replace: [*c]const u8, by: [*c]const u8) [*c]u8; pub extern fn TextInsert(text: [*c]const u8, insert: [*c]const u8, position: c_int) [*c]u8; pub extern fn TextJoin(textList: [*c][*c]const u8, count: c_int, delimiter: [*c]const u8) [*c]const u8; pub extern fn TextSplit(text: [*c]const u8, delimiter: u8, count: [*c]c_int) [*c][*c]const u8; pub extern fn TextAppend(text: [*c]u8, append: [*c]const u8, position: [*c]c_int) void; pub extern fn TextFindIndex(text: [*c]const u8, find: [*c]const u8) c_int; pub extern fn TextToUpper(text: [*c]const u8) [*c]const u8; pub extern fn TextToLower(text: [*c]const u8) [*c]const u8; pub extern fn TextToPascal(text: [*c]const u8) [*c]const u8; pub extern fn TextToInteger(text: [*c]const u8) c_int; pub extern fn DrawLine3D(startPos: Vector3, endPos: Vector3, color: Color) void; pub extern fn DrawPoint3D(position: Vector3, color: Color) void; pub extern fn DrawCircle3D(center: Vector3, radius: f32, rotationAxis: Vector3, rotationAngle: f32, color: Color) void; pub extern fn DrawTriangle3D(v1: Vector3, v2: Vector3, v3: Vector3, color: Color) void; pub extern fn DrawTriangleStrip3D(points: [*c]Vector3, pointCount: c_int, color: Color) void; pub extern fn DrawCube(position: Vector3, width: f32, height: f32, length: f32, color: Color) void; pub extern fn DrawCubeV(position: Vector3, size: Vector3, color: Color) void; pub extern fn DrawCubeWires(position: Vector3, width: f32, height: f32, length: f32, color: Color) void; pub extern fn DrawCubeWiresV(position: Vector3, size: Vector3, color: Color) void; pub extern fn DrawCubeTexture(texture: Texture2D, position: Vector3, width: f32, height: f32, length: f32, color: Color) void; pub extern fn DrawCubeTextureRec(texture: Texture2D, source: Rectangle, position: Vector3, width: f32, height: f32, length: f32, color: Color) void; pub extern fn DrawSphere(centerPos: Vector3, radius: f32, color: Color) void; pub extern fn DrawSphereEx(centerPos: Vector3, radius: f32, rings: c_int, slices: c_int, color: Color) void; pub extern fn DrawSphereWires(centerPos: Vector3, radius: f32, rings: c_int, slices: c_int, color: Color) void; pub extern fn DrawCylinder(position: Vector3, radiusTop: f32, radiusBottom: f32, height: f32, slices: c_int, color: Color) void; pub extern fn DrawCylinderEx(startPos: Vector3, endPos: Vector3, startRadius: f32, endRadius: f32, sides: c_int, color: Color) void; pub extern fn DrawCylinderWires(position: Vector3, radiusTop: f32, radiusBottom: f32, height: f32, slices: c_int, color: Color) void; pub extern fn DrawCylinderWiresEx(startPos: Vector3, endPos: Vector3, startRadius: f32, endRadius: f32, sides: c_int, color: Color) void; pub extern fn DrawPlane(centerPos: Vector3, size: Vector2, color: Color) void; pub extern fn DrawRay(ray: Ray, color: Color) void; pub extern fn DrawGrid(slices: c_int, spacing: f32) void; pub extern fn LoadModel(fileName: [*c]const u8) Model; pub extern fn LoadModelFromMesh(mesh: Mesh) Model; pub extern fn UnloadModel(model: Model) void; pub extern fn UnloadModelKeepMeshes(model: Model) void; pub extern fn GetModelBoundingBox(model: Model) BoundingBox; pub extern fn DrawModel(model: Model, position: Vector3, scale: f32, tint: Color) void; pub extern fn DrawModelEx(model: Model, position: Vector3, rotationAxis: Vector3, rotationAngle: f32, scale: Vector3, tint: Color) void; pub extern fn DrawModelWires(model: Model, position: Vector3, scale: f32, tint: Color) void; pub extern fn DrawModelWiresEx(model: Model, position: Vector3, rotationAxis: Vector3, rotationAngle: f32, scale: Vector3, tint: Color) void; pub extern fn DrawBoundingBox(box: BoundingBox, color: Color) void; pub extern fn DrawBillboard(camera: Camera, texture: Texture2D, position: Vector3, size: f32, tint: Color) void; pub extern fn DrawBillboardRec(camera: Camera, texture: Texture2D, source: Rectangle, position: Vector3, size: Vector2, tint: Color) void; pub extern fn DrawBillboardPro(camera: Camera, texture: Texture2D, source: Rectangle, position: Vector3, up: Vector3, size: Vector2, origin: Vector2, rotation: f32, tint: Color) void; pub extern fn UploadMesh(mesh: [*c]Mesh, dynamic: bool) void; pub extern fn UpdateMeshBuffer(mesh: Mesh, index: c_int, data: ?*const anyopaque, dataSize: c_int, offset: c_int) void; pub extern fn UnloadMesh(mesh: Mesh) void; pub extern fn DrawMesh(mesh: Mesh, material: Material, transform: Matrix) void; pub extern fn DrawMeshInstanced(mesh: Mesh, material: Material, transforms: [*c]const Matrix, instances: c_int) void; pub extern fn ExportMesh(mesh: Mesh, fileName: [*c]const u8) bool; pub extern fn GetMeshBoundingBox(mesh: Mesh) BoundingBox; pub extern fn GenMeshTangents(mesh: [*c]Mesh) void; pub extern fn GenMeshBinormals(mesh: [*c]Mesh) void; pub extern fn GenMeshPoly(sides: c_int, radius: f32) Mesh; pub extern fn GenMeshPlane(width: f32, length: f32, resX: c_int, resZ: c_int) Mesh; pub extern fn GenMeshCube(width: f32, height: f32, length: f32) Mesh; pub extern fn GenMeshSphere(radius: f32, rings: c_int, slices: c_int) Mesh; pub extern fn GenMeshHemiSphere(radius: f32, rings: c_int, slices: c_int) Mesh; pub extern fn GenMeshCylinder(radius: f32, height: f32, slices: c_int) Mesh; pub extern fn GenMeshCone(radius: f32, height: f32, slices: c_int) Mesh; pub extern fn GenMeshTorus(radius: f32, size: f32, radSeg: c_int, sides: c_int) Mesh; pub extern fn GenMeshKnot(radius: f32, size: f32, radSeg: c_int, sides: c_int) Mesh; pub extern fn GenMeshHeightmap(heightmap: Image, size: Vector3) Mesh; pub extern fn GenMeshCubicmap(cubicmap: Image, cubeSize: Vector3) Mesh; pub extern fn LoadMaterials(fileName: [*c]const u8, materialCount: [*c]c_int) [*c]Material; pub extern fn LoadMaterialDefault() Material; pub extern fn UnloadMaterial(material: Material) void; pub extern fn SetMaterialTexture(material: [*c]Material, mapType: c_int, texture: Texture2D) void; pub extern fn SetModelMeshMaterial(model: [*c]Model, meshId: c_int, materialId: c_int) void; pub extern fn LoadModelAnimations(fileName: [*c]const u8, animCount: [*c]c_uint) [*c]ModelAnimation; pub extern fn UpdateModelAnimation(model: Model, anim: ModelAnimation, frame: c_int) void; pub extern fn UnloadModelAnimation(anim: ModelAnimation) void; pub extern fn UnloadModelAnimations(animations: [*c]ModelAnimation, count: c_uint) void; pub extern fn IsModelAnimationValid(model: Model, anim: ModelAnimation) bool; pub extern fn CheckCollisionSpheres(center1: Vector3, radius1: f32, center2: Vector3, radius2: f32) bool; pub extern fn CheckCollisionBoxes(box1: BoundingBox, box2: BoundingBox) bool; pub extern fn CheckCollisionBoxSphere(box: BoundingBox, center: Vector3, radius: f32) bool; pub extern fn GetRayCollisionSphere(ray: Ray, center: Vector3, radius: f32) RayCollision; pub extern fn GetRayCollisionBox(ray: Ray, box: BoundingBox) RayCollision; pub extern fn GetRayCollisionModel(ray: Ray, model: Model) RayCollision; pub extern fn GetRayCollisionMesh(ray: Ray, mesh: Mesh, transform: Matrix) RayCollision; pub extern fn GetRayCollisionTriangle(ray: Ray, p1: Vector3, p2: Vector3, p3: Vector3) RayCollision; pub extern fn GetRayCollisionQuad(ray: Ray, p1: Vector3, p2: Vector3, p3: Vector3, p4: Vector3) RayCollision; pub extern fn InitAudioDevice() void; pub extern fn CloseAudioDevice() void; pub extern fn IsAudioDeviceReady() bool; pub extern fn SetMasterVolume(volume: f32) void; pub extern fn LoadWave(fileName: [*c]const u8) Wave; pub extern fn LoadWaveFromMemory(fileType: [*c]const u8, fileData: [*c]const u8, dataSize: c_int) Wave; pub extern fn LoadSound(fileName: [*c]const u8) Sound; pub extern fn LoadSoundFromWave(wave: Wave) Sound; pub extern fn UpdateSound(sound: Sound, data: ?*const anyopaque, sampleCount: c_int) void; pub extern fn UnloadWave(wave: Wave) void; pub extern fn UnloadSound(sound: Sound) void; pub extern fn ExportWave(wave: Wave, fileName: [*c]const u8) bool; pub extern fn ExportWaveAsCode(wave: Wave, fileName: [*c]const u8) bool; pub extern fn PlaySound(sound: Sound) void; pub extern fn StopSound(sound: Sound) void; pub extern fn PauseSound(sound: Sound) void; pub extern fn ResumeSound(sound: Sound) void; pub extern fn PlaySoundMulti(sound: Sound) void; pub extern fn StopSoundMulti() void; pub extern fn GetSoundsPlaying() c_int; pub extern fn IsSoundPlaying(sound: Sound) bool; pub extern fn SetSoundVolume(sound: Sound, volume: f32) void; pub extern fn SetSoundPitch(sound: Sound, pitch: f32) void; pub extern fn SetSoundPan(sound: Sound, pan: f32) void; pub extern fn WaveCopy(wave: Wave) Wave; pub extern fn WaveCrop(wave: [*c]Wave, initSample: c_int, finalSample: c_int) void; pub extern fn WaveFormat(wave: [*c]Wave, sampleRate: c_int, sampleSize: c_int, channels: c_int) void; pub extern fn LoadWaveSamples(wave: Wave) [*c]f32; pub extern fn UnloadWaveSamples(samples: [*c]f32) void; pub extern fn LoadMusicStream(fileName: [*c]const u8) Music; pub extern fn LoadMusicStreamFromMemory(fileType: [*c]const u8, data: [*c]const u8, dataSize: c_int) Music; pub extern fn UnloadMusicStream(music: Music) void; pub extern fn PlayMusicStream(music: Music) void; pub extern fn IsMusicStreamPlaying(music: Music) bool; pub extern fn UpdateMusicStream(music: Music) void; pub extern fn StopMusicStream(music: Music) void; pub extern fn PauseMusicStream(music: Music) void; pub extern fn ResumeMusicStream(music: Music) void; pub extern fn SeekMusicStream(music: Music, position: f32) void; pub extern fn SetMusicVolume(music: Music, volume: f32) void; pub extern fn SetMusicPitch(music: Music, pitch: f32) void; pub extern fn SetMusicPan(music: Music, pan: f32) void; pub extern fn GetMusicTimeLength(music: Music) f32; pub extern fn GetMusicTimePlayed(music: Music) f32; pub extern fn LoadAudioStream(sampleRate: c_uint, sampleSize: c_uint, channels: c_uint) AudioStream; pub extern fn UnloadAudioStream(stream: AudioStream) void; pub extern fn UpdateAudioStream(stream: AudioStream, data: ?*const anyopaque, frameCount: c_int) void; pub extern fn IsAudioStreamProcessed(stream: AudioStream) bool; pub extern fn PlayAudioStream(stream: AudioStream) void; pub extern fn PauseAudioStream(stream: AudioStream) void; pub extern fn ResumeAudioStream(stream: AudioStream) void; pub extern fn IsAudioStreamPlaying(stream: AudioStream) bool; pub extern fn StopAudioStream(stream: AudioStream) void; pub extern fn SetAudioStreamVolume(stream: AudioStream, volume: f32) void; pub extern fn SetAudioStreamPitch(stream: AudioStream, pitch: f32) void; pub extern fn SetAudioStreamPan(stream: AudioStream, pan: f32) void; pub extern fn SetAudioStreamBufferSizeDefault(size: c_int) void; pub const struct_GuiStyleProp = extern struct { controlId: c_ushort, propertyId: c_ushort, propertyValue: c_uint, }; pub const GuiStyleProp = struct_GuiStyleProp; pub const STATE_NORMAL: c_int = 0; pub const STATE_FOCUSED: c_int = 1; pub const STATE_PRESSED: c_int = 2; pub const STATE_DISABLED: c_int = 3; pub const GuiState = c_uint; pub const TEXT_ALIGN_LEFT: c_int = 0; pub const TEXT_ALIGN_CENTER: c_int = 1; pub const TEXT_ALIGN_RIGHT: c_int = 2; pub const GuiTextAlignment = c_uint; pub const DEFAULT: c_int = 0; pub const LABEL: c_int = 1; pub const BUTTON: c_int = 2; pub const TOGGLE: c_int = 3; pub const SLIDER: c_int = 4; pub const PROGRESSBAR: c_int = 5; pub const CHECKBOX: c_int = 6; pub const COMBOBOX: c_int = 7; pub const DROPDOWNBOX: c_int = 8; pub const TEXTBOX: c_int = 9; pub const VALUEBOX: c_int = 10; pub const SPINNER: c_int = 11; pub const LISTVIEW: c_int = 12; pub const COLORPICKER: c_int = 13; pub const SCROLLBAR: c_int = 14; pub const STATUSBAR: c_int = 15; pub const GuiControl = c_uint; pub const BORDER_COLOR_NORMAL: c_int = 0; pub const BASE_COLOR_NORMAL: c_int = 1; pub const TEXT_COLOR_NORMAL: c_int = 2; pub const BORDER_COLOR_FOCUSED: c_int = 3; pub const BASE_COLOR_FOCUSED: c_int = 4; pub const TEXT_COLOR_FOCUSED: c_int = 5; pub const BORDER_COLOR_PRESSED: c_int = 6; pub const BASE_COLOR_PRESSED: c_int = 7; pub const TEXT_COLOR_PRESSED: c_int = 8; pub const BORDER_COLOR_DISABLED: c_int = 9; pub const BASE_COLOR_DISABLED: c_int = 10; pub const TEXT_COLOR_DISABLED: c_int = 11; pub const BORDER_WIDTH: c_int = 12; pub const TEXT_PADDING: c_int = 13; pub const TEXT_ALIGNMENT: c_int = 14; pub const RESERVED: c_int = 15; pub const GuiControlProperty = c_uint; pub const TEXT_SIZE: c_int = 16; pub const TEXT_SPACING: c_int = 17; pub const LINE_COLOR: c_int = 18; pub const BACKGROUND_COLOR: c_int = 19; pub const GuiDefaultProperty = c_uint; pub const GROUP_PADDING: c_int = 16; pub const GuiToggleProperty = c_uint; pub const SLIDER_WIDTH: c_int = 16; pub const SLIDER_PADDING: c_int = 17; pub const GuiSliderProperty = c_uint; pub const PROGRESS_PADDING: c_int = 16; pub const GuiProgressBarProperty = c_uint; pub const ARROWS_SIZE: c_int = 16; pub const ARROWS_VISIBLE: c_int = 17; pub const SCROLL_SLIDER_PADDING: c_int = 18; pub const SCROLL_SLIDER_SIZE: c_int = 19; pub const SCROLL_PADDING: c_int = 20; pub const SCROLL_SPEED: c_int = 21; pub const GuiScrollBarProperty = c_uint; pub const CHECK_PADDING: c_int = 16; pub const GuiCheckBoxProperty = c_uint; pub const COMBO_BUTTON_WIDTH: c_int = 16; pub const COMBO_BUTTON_SPACING: c_int = 17; pub const GuiComboBoxProperty = c_uint; pub const ARROW_PADDING: c_int = 16; pub const DROPDOWN_ITEMS_SPACING: c_int = 17; pub const GuiDropdownBoxProperty = c_uint; pub const TEXT_INNER_PADDING: c_int = 16; pub const TEXT_LINES_SPACING: c_int = 17; pub const GuiTextBoxProperty = c_uint; pub const SPIN_BUTTON_WIDTH: c_int = 16; pub const SPIN_BUTTON_SPACING: c_int = 17; pub const GuiSpinnerProperty = c_uint; pub const LIST_ITEMS_HEIGHT: c_int = 16; pub const LIST_ITEMS_SPACING: c_int = 17; pub const SCROLLBAR_WIDTH: c_int = 18; pub const SCROLLBAR_SIDE: c_int = 19; pub const GuiListViewProperty = c_uint; pub const COLOR_SELECTOR_SIZE: c_int = 16; pub const HUEBAR_WIDTH: c_int = 17; pub const HUEBAR_PADDING: c_int = 18; pub const HUEBAR_SELECTOR_HEIGHT: c_int = 19; pub const HUEBAR_SELECTOR_OVERFLOW: c_int = 20; pub const GuiColorPickerProperty = c_uint; pub extern fn GuiEnable() void; pub extern fn GuiDisable() void; pub extern fn GuiLock() void; pub extern fn GuiUnlock() void; pub extern fn GuiIsLocked() bool; pub extern fn GuiFade(alpha: f32) void; pub extern fn GuiSetState(state: c_int) void; pub extern fn GuiGetState() c_int; pub extern fn GuiSetFont(font: Font) void; pub extern fn GuiGetFont() Font; pub extern fn GuiSetStyle(control: c_int, property: c_int, value: c_int) void; pub extern fn GuiGetStyle(control: c_int, property: c_int) c_int; pub extern fn GuiWindowBox(bounds: Rectangle, title: [*c]const u8) bool; pub extern fn GuiGroupBox(bounds: Rectangle, text: [*c]const u8) void; pub extern fn GuiLine(bounds: Rectangle, text: [*c]const u8) void; pub extern fn GuiPanel(bounds: Rectangle, text: [*c]const u8) void; pub extern fn GuiScrollPanel(bounds: Rectangle, text: [*c]const u8, content: Rectangle, scroll: [*c]Vector2) Rectangle; pub extern fn GuiLabel(bounds: Rectangle, text: [*c]const u8) void; pub extern fn GuiButton(bounds: Rectangle, text: [*c]const u8) bool; pub extern fn GuiLabelButton(bounds: Rectangle, text: [*c]const u8) bool; pub extern fn GuiToggle(bounds: Rectangle, text: [*c]const u8, active: bool) bool; pub extern fn GuiToggleGroup(bounds: Rectangle, text: [*c]const u8, active: c_int) c_int; pub extern fn GuiCheckBox(bounds: Rectangle, text: [*c]const u8, checked: bool) bool; pub extern fn GuiComboBox(bounds: Rectangle, text: [*c]const u8, active: c_int) c_int; pub extern fn GuiDropdownBox(bounds: Rectangle, text: [*c]const u8, active: [*c]c_int, editMode: bool) bool; pub extern fn GuiSpinner(bounds: Rectangle, text: [*c]const u8, value: [*c]c_int, minValue: c_int, maxValue: c_int, editMode: bool) bool; pub extern fn GuiValueBox(bounds: Rectangle, text: [*c]const u8, value: [*c]c_int, minValue: c_int, maxValue: c_int, editMode: bool) bool; pub extern fn GuiTextBox(bounds: Rectangle, text: [*c]u8, textSize: c_int, editMode: bool) bool; pub extern fn GuiTextBoxMulti(bounds: Rectangle, text: [*c]u8, textSize: c_int, editMode: bool) bool; pub extern fn GuiSlider(bounds: Rectangle, textLeft: [*c]const u8, textRight: [*c]const u8, value: f32, minValue: f32, maxValue: f32) f32; pub extern fn GuiSliderBar(bounds: Rectangle, textLeft: [*c]const u8, textRight: [*c]const u8, value: f32, minValue: f32, maxValue: f32) f32; pub extern fn GuiProgressBar(bounds: Rectangle, textLeft: [*c]const u8, textRight: [*c]const u8, value: f32, minValue: f32, maxValue: f32) f32; pub extern fn GuiStatusBar(bounds: Rectangle, text: [*c]const u8) void; pub extern fn GuiDummyRec(bounds: Rectangle, text: [*c]const u8) void; pub extern fn GuiGrid(bounds: Rectangle, text: [*c]const u8, spacing: f32, subdivs: c_int) Vector2; pub extern fn GuiListView(bounds: Rectangle, text: [*c]const u8, scrollIndex: [*c]c_int, active: c_int) c_int; pub extern fn GuiListViewEx(bounds: Rectangle, text: [*c][*c]const u8, count: c_int, focus: [*c]c_int, scrollIndex: [*c]c_int, active: c_int) c_int; pub extern fn GuiMessageBox(bounds: Rectangle, title: [*c]const u8, message: [*c]const u8, buttons: [*c]const u8) c_int; pub extern fn GuiTextInputBox(bounds: Rectangle, title: [*c]const u8, message: [*c]const u8, buttons: [*c]const u8, text: [*c]u8, textMaxSize: c_int, secretViewActive: [*c]c_int) c_int; pub extern fn GuiColorPicker(bounds: Rectangle, text: [*c]const u8, color: Color) Color; pub extern fn GuiColorPanel(bounds: Rectangle, text: [*c]const u8, color: Color) Color; pub extern fn GuiColorBarAlpha(bounds: Rectangle, text: [*c]const u8, alpha: f32) f32; pub extern fn GuiColorBarHue(bounds: Rectangle, text: [*c]const u8, value: f32) f32; pub extern fn GuiLoadStyle(fileName: [*c]const u8) void; pub extern fn GuiLoadStyleDefault() void; pub extern fn GuiIconText(iconId: c_int, text: [*c]const u8) [*c]const u8; pub extern fn GuiDrawIcon(iconId: c_int, posX: c_int, posY: c_int, pixelSize: c_int, color: Color) void; pub extern fn GuiGetIcons() [*c]c_uint; pub extern fn GuiGetIconData(iconId: c_int) [*c]c_uint; pub extern fn GuiSetIconData(iconId: c_int, data: [*c]c_uint) void; pub extern fn GuiSetIconScale(scale: c_uint) void; pub extern fn GuiSetIconPixel(iconId: c_int, x: c_int, y: c_int) void; pub extern fn GuiClearIconPixel(iconId: c_int, x: c_int, y: c_int) void; pub extern fn GuiCheckIconPixel(iconId: c_int, x: c_int, y: c_int) bool; pub const ICON_NONE: c_int = 0; pub const ICON_FOLDER_FILE_OPEN: c_int = 1; pub const ICON_FILE_SAVE_CLASSIC: c_int = 2; pub const ICON_FOLDER_OPEN: c_int = 3; pub const ICON_FOLDER_SAVE: c_int = 4; pub const ICON_FILE_OPEN: c_int = 5; pub const ICON_FILE_SAVE: c_int = 6; pub const ICON_FILE_EXPORT: c_int = 7; pub const ICON_FILE_ADD: c_int = 8; pub const ICON_FILE_DELETE: c_int = 9; pub const ICON_FILETYPE_TEXT: c_int = 10; pub const ICON_FILETYPE_AUDIO: c_int = 11; pub const ICON_FILETYPE_IMAGE: c_int = 12; pub const ICON_FILETYPE_PLAY: c_int = 13; pub const ICON_FILETYPE_VIDEO: c_int = 14; pub const ICON_FILETYPE_INFO: c_int = 15; pub const ICON_FILE_COPY: c_int = 16; pub const ICON_FILE_CUT: c_int = 17; pub const ICON_FILE_PASTE: c_int = 18; pub const ICON_CURSOR_HAND: c_int = 19; pub const ICON_CURSOR_POINTER: c_int = 20; pub const ICON_CURSOR_CLASSIC: c_int = 21; pub const ICON_PENCIL: c_int = 22; pub const ICON_PENCIL_BIG: c_int = 23; pub const ICON_BRUSH_CLASSIC: c_int = 24; pub const ICON_BRUSH_PAINTER: c_int = 25; pub const ICON_WATER_DROP: c_int = 26; pub const ICON_COLOR_PICKER: c_int = 27; pub const ICON_RUBBER: c_int = 28; pub const ICON_COLOR_BUCKET: c_int = 29; pub const ICON_TEXT_T: c_int = 30; pub const ICON_TEXT_A: c_int = 31; pub const ICON_SCALE: c_int = 32; pub const ICON_RESIZE: c_int = 33; pub const ICON_FILTER_POINT: c_int = 34; pub const ICON_FILTER_BILINEAR: c_int = 35; pub const ICON_CROP: c_int = 36; pub const ICON_CROP_ALPHA: c_int = 37; pub const ICON_SQUARE_TOGGLE: c_int = 38; pub const ICON_SYMMETRY: c_int = 39; pub const ICON_SYMMETRY_HORIZONTAL: c_int = 40; pub const ICON_SYMMETRY_VERTICAL: c_int = 41; pub const ICON_LENS: c_int = 42; pub const ICON_LENS_BIG: c_int = 43; pub const ICON_EYE_ON: c_int = 44; pub const ICON_EYE_OFF: c_int = 45; pub const ICON_FILTER_TOP: c_int = 46; pub const ICON_FILTER: c_int = 47; pub const ICON_TARGET_POINT: c_int = 48; pub const ICON_TARGET_SMALL: c_int = 49; pub const ICON_TARGET_BIG: c_int = 50; pub const ICON_TARGET_MOVE: c_int = 51; pub const ICON_CURSOR_MOVE: c_int = 52; pub const ICON_CURSOR_SCALE: c_int = 53; pub const ICON_CURSOR_SCALE_RIGHT: c_int = 54; pub const ICON_CURSOR_SCALE_LEFT: c_int = 55; pub const ICON_UNDO: c_int = 56; pub const ICON_REDO: c_int = 57; pub const ICON_REREDO: c_int = 58; pub const ICON_MUTATE: c_int = 59; pub const ICON_ROTATE: c_int = 60; pub const ICON_REPEAT: c_int = 61; pub const ICON_SHUFFLE: c_int = 62; pub const ICON_EMPTYBOX: c_int = 63; pub const ICON_TARGET: c_int = 64; pub const ICON_TARGET_SMALL_FILL: c_int = 65; pub const ICON_TARGET_BIG_FILL: c_int = 66; pub const ICON_TARGET_MOVE_FILL: c_int = 67; pub const ICON_CURSOR_MOVE_FILL: c_int = 68; pub const ICON_CURSOR_SCALE_FILL: c_int = 69; pub const ICON_CURSOR_SCALE_RIGHT_FILL: c_int = 70; pub const ICON_CURSOR_SCALE_LEFT_FILL: c_int = 71; pub const ICON_UNDO_FILL: c_int = 72; pub const ICON_REDO_FILL: c_int = 73; pub const ICON_REREDO_FILL: c_int = 74; pub const ICON_MUTATE_FILL: c_int = 75; pub const ICON_ROTATE_FILL: c_int = 76; pub const ICON_REPEAT_FILL: c_int = 77; pub const ICON_SHUFFLE_FILL: c_int = 78; pub const ICON_EMPTYBOX_SMALL: c_int = 79; pub const ICON_BOX: c_int = 80; pub const ICON_BOX_TOP: c_int = 81; pub const ICON_BOX_TOP_RIGHT: c_int = 82; pub const ICON_BOX_RIGHT: c_int = 83; pub const ICON_BOX_BOTTOM_RIGHT: c_int = 84; pub const ICON_BOX_BOTTOM: c_int = 85; pub const ICON_BOX_BOTTOM_LEFT: c_int = 86; pub const ICON_BOX_LEFT: c_int = 87; pub const ICON_BOX_TOP_LEFT: c_int = 88; pub const ICON_BOX_CENTER: c_int = 89; pub const ICON_BOX_CIRCLE_MASK: c_int = 90; pub const ICON_POT: c_int = 91; pub const ICON_ALPHA_MULTIPLY: c_int = 92; pub const ICON_ALPHA_CLEAR: c_int = 93; pub const ICON_DITHERING: c_int = 94; pub const ICON_MIPMAPS: c_int = 95; pub const ICON_BOX_GRID: c_int = 96; pub const ICON_GRID: c_int = 97; pub const ICON_BOX_CORNERS_SMALL: c_int = 98; pub const ICON_BOX_CORNERS_BIG: c_int = 99; pub const ICON_FOUR_BOXES: c_int = 100; pub const ICON_GRID_FILL: c_int = 101; pub const ICON_BOX_MULTISIZE: c_int = 102; pub const ICON_ZOOM_SMALL: c_int = 103; pub const ICON_ZOOM_MEDIUM: c_int = 104; pub const ICON_ZOOM_BIG: c_int = 105; pub const ICON_ZOOM_ALL: c_int = 106; pub const ICON_ZOOM_CENTER: c_int = 107; pub const ICON_BOX_DOTS_SMALL: c_int = 108; pub const ICON_BOX_DOTS_BIG: c_int = 109; pub const ICON_BOX_CONCENTRIC: c_int = 110; pub const ICON_BOX_GRID_BIG: c_int = 111; pub const ICON_OK_TICK: c_int = 112; pub const ICON_CROSS: c_int = 113; pub const ICON_ARROW_LEFT: c_int = 114; pub const ICON_ARROW_RIGHT: c_int = 115; pub const ICON_ARROW_DOWN: c_int = 116; pub const ICON_ARROW_UP: c_int = 117; pub const ICON_ARROW_LEFT_FILL: c_int = 118; pub const ICON_ARROW_RIGHT_FILL: c_int = 119; pub const ICON_ARROW_DOWN_FILL: c_int = 120; pub const ICON_ARROW_UP_FILL: c_int = 121; pub const ICON_AUDIO: c_int = 122; pub const ICON_FX: c_int = 123; pub const ICON_WAVE: c_int = 124; pub const ICON_WAVE_SINUS: c_int = 125; pub const ICON_WAVE_SQUARE: c_int = 126; pub const ICON_WAVE_TRIANGULAR: c_int = 127; pub const ICON_CROSS_SMALL: c_int = 128; pub const ICON_PLAYER_PREVIOUS: c_int = 129; pub const ICON_PLAYER_PLAY_BACK: c_int = 130; pub const ICON_PLAYER_PLAY: c_int = 131; pub const ICON_PLAYER_PAUSE: c_int = 132; pub const ICON_PLAYER_STOP: c_int = 133; pub const ICON_PLAYER_NEXT: c_int = 134; pub const ICON_PLAYER_RECORD: c_int = 135; pub const ICON_MAGNET: c_int = 136; pub const ICON_LOCK_CLOSE: c_int = 137; pub const ICON_LOCK_OPEN: c_int = 138; pub const ICON_CLOCK: c_int = 139; pub const ICON_TOOLS: c_int = 140; pub const ICON_GEAR: c_int = 141; pub const ICON_GEAR_BIG: c_int = 142; pub const ICON_BIN: c_int = 143; pub const ICON_HAND_POINTER: c_int = 144; pub const ICON_LASER: c_int = 145; pub const ICON_COIN: c_int = 146; pub const ICON_EXPLOSION: c_int = 147; pub const ICON_1UP: c_int = 148; pub const ICON_PLAYER: c_int = 149; pub const ICON_PLAYER_JUMP: c_int = 150; pub const ICON_KEY: c_int = 151; pub const ICON_DEMON: c_int = 152; pub const ICON_TEXT_POPUP: c_int = 153; pub const ICON_GEAR_EX: c_int = 154; pub const ICON_CRACK: c_int = 155; pub const ICON_CRACK_POINTS: c_int = 156; pub const ICON_STAR: c_int = 157; pub const ICON_DOOR: c_int = 158; pub const ICON_EXIT: c_int = 159; pub const ICON_MODE_2D: c_int = 160; pub const ICON_MODE_3D: c_int = 161; pub const ICON_CUBE: c_int = 162; pub const ICON_CUBE_FACE_TOP: c_int = 163; pub const ICON_CUBE_FACE_LEFT: c_int = 164; pub const ICON_CUBE_FACE_FRONT: c_int = 165; pub const ICON_CUBE_FACE_BOTTOM: c_int = 166; pub const ICON_CUBE_FACE_RIGHT: c_int = 167; pub const ICON_CUBE_FACE_BACK: c_int = 168; pub const ICON_CAMERA: c_int = 169; pub const ICON_SPECIAL: c_int = 170; pub const ICON_LINK_NET: c_int = 171; pub const ICON_LINK_BOXES: c_int = 172; pub const ICON_LINK_MULTI: c_int = 173; pub const ICON_LINK: c_int = 174; pub const ICON_LINK_BROKE: c_int = 175; pub const ICON_TEXT_NOTES: c_int = 176; pub const ICON_NOTEBOOK: c_int = 177; pub const ICON_SUITCASE: c_int = 178; pub const ICON_SUITCASE_ZIP: c_int = 179; pub const ICON_MAILBOX: c_int = 180; pub const ICON_MONITOR: c_int = 181; pub const ICON_PRINTER: c_int = 182; pub const ICON_PHOTO_CAMERA: c_int = 183; pub const ICON_PHOTO_CAMERA_FLASH: c_int = 184; pub const ICON_HOUSE: c_int = 185; pub const ICON_HEART: c_int = 186; pub const ICON_CORNER: c_int = 187; pub const ICON_VERTICAL_BARS: c_int = 188; pub const ICON_VERTICAL_BARS_FILL: c_int = 189; pub const ICON_LIFE_BARS: c_int = 190; pub const ICON_INFO: c_int = 191; pub const ICON_CROSSLINE: c_int = 192; pub const ICON_HELP: c_int = 193; pub const ICON_FILETYPE_ALPHA: c_int = 194; pub const ICON_FILETYPE_HOME: c_int = 195; pub const ICON_LAYERS_VISIBLE: c_int = 196; pub const ICON_LAYERS: c_int = 197; pub const ICON_WINDOW: c_int = 198; pub const ICON_HIDPI: c_int = 199; pub const ICON_FILETYPE_BINARY: c_int = 200; pub const ICON_HEX: c_int = 201; pub const ICON_SHIELD: c_int = 202; pub const ICON_FILE_NEW: c_int = 203; pub const ICON_FOLDER_ADD: c_int = 204; pub const ICON_ALARM: c_int = 205; pub const ICON_206: c_int = 206; pub const ICON_207: c_int = 207; pub const ICON_208: c_int = 208; pub const ICON_209: c_int = 209; pub const ICON_210: c_int = 210; pub const ICON_211: c_int = 211; pub const ICON_212: c_int = 212; pub const ICON_213: c_int = 213; pub const ICON_214: c_int = 214; pub const ICON_215: c_int = 215; pub const ICON_216: c_int = 216; pub const ICON_217: c_int = 217; pub const ICON_218: c_int = 218; pub const ICON_219: c_int = 219; pub const ICON_220: c_int = 220; pub const ICON_221: c_int = 221; pub const ICON_222: c_int = 222; pub const ICON_223: c_int = 223; pub const ICON_224: c_int = 224; pub const ICON_225: c_int = 225; pub const ICON_226: c_int = 226; pub const ICON_227: c_int = 227; pub const ICON_228: c_int = 228; pub const ICON_229: c_int = 229; pub const ICON_230: c_int = 230; pub const ICON_231: c_int = 231; pub const ICON_232: c_int = 232; pub const ICON_233: c_int = 233; pub const ICON_234: c_int = 234; pub const ICON_235: c_int = 235; pub const ICON_236: c_int = 236; pub const ICON_237: c_int = 237; pub const ICON_238: c_int = 238; pub const ICON_239: c_int = 239; pub const ICON_240: c_int = 240; pub const ICON_241: c_int = 241; pub const ICON_242: c_int = 242; pub const ICON_243: c_int = 243; pub const ICON_244: c_int = 244; pub const ICON_245: c_int = 245; pub const ICON_246: c_int = 246; pub const ICON_247: c_int = 247; pub const ICON_248: c_int = 248; pub const ICON_249: c_int = 249; pub const ICON_250: c_int = 250; pub const ICON_251: c_int = 251; pub const ICON_252: c_int = 252; pub const ICON_253: c_int = 253; pub const ICON_254: c_int = 254; pub const ICON_255: c_int = 255; pub const GuiIconName = c_uint; pub const __INTMAX_C_SUFFIX__ = @compileError("unable to translate macro: undefined identifier `LL`"); // (no file):66:9 pub const __UINTMAX_C_SUFFIX__ = @compileError("unable to translate macro: undefined identifier `ULL`"); // (no file):72:9 pub const __INT64_C_SUFFIX__ = @compileError("unable to translate macro: undefined identifier `LL`"); // (no file):164:9 pub const __UINT32_C_SUFFIX__ = @compileError("unable to translate macro: undefined identifier `U`"); // (no file):186:9 pub const __UINT64_C_SUFFIX__ = @compileError("unable to translate macro: undefined identifier `ULL`"); // (no file):194:9 pub const __seg_gs = @compileError("unable to translate macro: undefined identifier `__attribute__`"); // (no file):314:9 pub const __seg_fs = @compileError("unable to translate macro: undefined identifier `__attribute__`"); // (no file):315:9 pub const __declspec = @compileError("unable to translate macro: undefined identifier `__attribute__`"); // (no file):380:9 pub const _cdecl = @compileError("unable to translate macro: undefined identifier `__attribute__`"); // (no file):381:9 pub const __cdecl = @compileError("unable to translate macro: undefined identifier `__attribute__`"); // (no file):382:9 pub const _stdcall = @compileError("unable to translate macro: undefined identifier `__attribute__`"); // (no file):383:9 pub const __stdcall = @compileError("unable to translate macro: undefined identifier `__attribute__`"); // (no file):384:9 pub const _fastcall = @compileError("unable to translate macro: undefined identifier `__attribute__`"); // (no file):385:9 pub const __fastcall = @compileError("unable to translate macro: undefined identifier `__attribute__`"); // (no file):386:9 pub const _thiscall = @compileError("unable to translate macro: undefined identifier `__attribute__`"); // (no file):387:9 pub const __thiscall = @compileError("unable to translate macro: undefined identifier `__attribute__`"); // (no file):388:9 pub const _pascal = @compileError("unable to translate macro: undefined identifier `__attribute__`"); // (no file):389:9 pub const __pascal = @compileError("unable to translate macro: undefined identifier `__attribute__`"); // (no file):390:9 pub const va_start = @compileError("unable to translate macro: undefined identifier `__builtin_va_start`"); // D:\Programs\zig-windows-x86_64-0.9.1\lib\include\stdarg.h:17:9 pub const va_end = @compileError("unable to translate macro: undefined identifier `__builtin_va_end`"); // D:\Programs\zig-windows-x86_64-0.9.1\lib\include\stdarg.h:18:9 pub const va_arg = @compileError("unable to translate macro: undefined identifier `__builtin_va_arg`"); // D:\Programs\zig-windows-x86_64-0.9.1\lib\include\stdarg.h:19:9 pub const __va_copy = @compileError("unable to translate macro: undefined identifier `__builtin_va_copy`"); // D:\Programs\zig-windows-x86_64-0.9.1\lib\include\stdarg.h:24:9 pub const va_copy = @compileError("unable to translate macro: undefined identifier `__builtin_va_copy`"); // D:\Programs\zig-windows-x86_64-0.9.1\lib\include\stdarg.h:27:9 pub const RL_MALLOC = @compileError("unable to translate macro: undefined identifier `malloc`"); // .\libs\raylib-zig\raylib\src\raylib.h:114:13 pub const RL_CALLOC = @compileError("unable to translate macro: undefined identifier `calloc`"); // .\libs\raylib-zig\raylib\src\raylib.h:117:13 pub const RL_REALLOC = @compileError("unable to translate macro: undefined identifier `realloc`"); // .\libs\raylib-zig\raylib\src\raylib.h:120:13 pub const RL_FREE = @compileError("unable to translate macro: undefined identifier `free`"); // .\libs\raylib-zig\raylib\src\raylib.h:123:13 pub const RAYGUI_MALLOC = @compileError("unable to translate macro: undefined identifier `malloc`"); // .\src\raygui\src\raygui.c:225:13 pub const RAYGUI_CALLOC = @compileError("unable to translate macro: undefined identifier `calloc`"); // .\src\raygui\src\raygui.c:228:13 pub const RAYGUI_FREE = @compileError("unable to translate macro: undefined identifier `free`"); // .\src\raygui\src\raygui.c:231:13 pub const RAYGUI_LOG = @compileError("unable to translate C expr: expected ')'"); // .\src\raygui\src\raygui.c:238:11 pub const __llvm__ = @as(c_int, 1); pub const __clang__ = @as(c_int, 1); pub const __clang_major__ = @as(c_int, 13); pub const __clang_minor__ = @as(c_int, 0); pub const __clang_patchlevel__ = @as(c_int, 1); pub const __clang_version__ = "13.0.1 (git@github.com:ziglang/zig-bootstrap.git 74211dd7f7e7174a2027641dfcfdb3fc5df62f0c)"; pub const __GNUC__ = @as(c_int, 4); pub const __GNUC_MINOR__ = @as(c_int, 2); pub const __GNUC_PATCHLEVEL__ = @as(c_int, 1); pub const __GXX_ABI_VERSION = @as(c_int, 1002); pub const __ATOMIC_RELAXED = @as(c_int, 0); pub const __ATOMIC_CONSUME = @as(c_int, 1); pub const __ATOMIC_ACQUIRE = @as(c_int, 2); pub const __ATOMIC_RELEASE = @as(c_int, 3); pub const __ATOMIC_ACQ_REL = @as(c_int, 4); pub const __ATOMIC_SEQ_CST = @as(c_int, 5); pub const __OPENCL_MEMORY_SCOPE_WORK_ITEM = @as(c_int, 0); pub const __OPENCL_MEMORY_SCOPE_WORK_GROUP = @as(c_int, 1); pub const __OPENCL_MEMORY_SCOPE_DEVICE = @as(c_int, 2); pub const __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES = @as(c_int, 3); pub const __OPENCL_MEMORY_SCOPE_SUB_GROUP = @as(c_int, 4); pub const __PRAGMA_REDEFINE_EXTNAME = @as(c_int, 1); pub const __VERSION__ = "Clang 13.0.1 (git@github.com:ziglang/zig-bootstrap.git 74211dd7f7e7174a2027641dfcfdb3fc5df62f0c)"; pub const __OBJC_BOOL_IS_BOOL = @as(c_int, 0); pub const __CONSTANT_CFSTRINGS__ = @as(c_int, 1); pub const __SEH__ = @as(c_int, 1); pub const __clang_literal_encoding__ = "UTF-8"; pub const __clang_wide_literal_encoding__ = "UTF-16"; pub const __OPTIMIZE__ = @as(c_int, 1); pub const __ORDER_LITTLE_ENDIAN__ = @as(c_int, 1234); pub const __ORDER_BIG_ENDIAN__ = @as(c_int, 4321); pub const __ORDER_PDP_ENDIAN__ = @as(c_int, 3412); pub const __BYTE_ORDER__ = __ORDER_LITTLE_ENDIAN__; pub const __LITTLE_ENDIAN__ = @as(c_int, 1); pub const __CHAR_BIT__ = @as(c_int, 8); pub const __SCHAR_MAX__ = @as(c_int, 127); pub const __SHRT_MAX__ = @as(c_int, 32767); pub const __INT_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal); pub const __LONG_MAX__ = @as(c_long, 2147483647); pub const __LONG_LONG_MAX__ = @as(c_longlong, 9223372036854775807); pub const __WCHAR_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 65535, .decimal); pub const __WINT_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 65535, .decimal); pub const __INTMAX_MAX__ = @as(c_longlong, 9223372036854775807); pub const __SIZE_MAX__ = @as(c_ulonglong, 18446744073709551615); pub const __UINTMAX_MAX__ = @as(c_ulonglong, 18446744073709551615); pub const __PTRDIFF_MAX__ = @as(c_longlong, 9223372036854775807); pub const __INTPTR_MAX__ = @as(c_longlong, 9223372036854775807); pub const __UINTPTR_MAX__ = @as(c_ulonglong, 18446744073709551615); pub const __SIZEOF_DOUBLE__ = @as(c_int, 8); pub const __SIZEOF_FLOAT__ = @as(c_int, 4); pub const __SIZEOF_INT__ = @as(c_int, 4); pub const __SIZEOF_LONG__ = @as(c_int, 4); pub const __SIZEOF_LONG_DOUBLE__ = @as(c_int, 16); pub const __SIZEOF_LONG_LONG__ = @as(c_int, 8); pub const __SIZEOF_POINTER__ = @as(c_int, 8); pub const __SIZEOF_SHORT__ = @as(c_int, 2); pub const __SIZEOF_PTRDIFF_T__ = @as(c_int, 8); pub const __SIZEOF_SIZE_T__ = @as(c_int, 8); pub const __SIZEOF_WCHAR_T__ = @as(c_int, 2); pub const __SIZEOF_WINT_T__ = @as(c_int, 2); pub const __SIZEOF_INT128__ = @as(c_int, 16); pub const __INTMAX_TYPE__ = c_longlong; pub const __INTMAX_FMTd__ = "lld"; pub const __INTMAX_FMTi__ = "lli"; pub const __UINTMAX_TYPE__ = c_ulonglong; pub const __UINTMAX_FMTo__ = "llo"; pub const __UINTMAX_FMTu__ = "llu"; pub const __UINTMAX_FMTx__ = "llx"; pub const __UINTMAX_FMTX__ = "llX"; pub const __INTMAX_WIDTH__ = @as(c_int, 64); pub const __PTRDIFF_TYPE__ = c_longlong; pub const __PTRDIFF_FMTd__ = "lld"; pub const __PTRDIFF_FMTi__ = "lli"; pub const __PTRDIFF_WIDTH__ = @as(c_int, 64); pub const __INTPTR_TYPE__ = c_longlong; pub const __INTPTR_FMTd__ = "lld"; pub const __INTPTR_FMTi__ = "lli"; pub const __INTPTR_WIDTH__ = @as(c_int, 64); pub const __SIZE_TYPE__ = c_ulonglong; pub const __SIZE_FMTo__ = "llo"; pub const __SIZE_FMTu__ = "llu"; pub const __SIZE_FMTx__ = "llx"; pub const __SIZE_FMTX__ = "llX"; pub const __SIZE_WIDTH__ = @as(c_int, 64); pub const __WCHAR_TYPE__ = c_ushort; pub const __WCHAR_WIDTH__ = @as(c_int, 16); pub const __WINT_TYPE__ = c_ushort; pub const __WINT_WIDTH__ = @as(c_int, 16); pub const __SIG_ATOMIC_WIDTH__ = @as(c_int, 32); pub const __SIG_ATOMIC_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal); pub const __CHAR16_TYPE__ = c_ushort; pub const __CHAR32_TYPE__ = c_uint; pub const __UINTMAX_WIDTH__ = @as(c_int, 64); pub const __UINTPTR_TYPE__ = c_ulonglong; pub const __UINTPTR_FMTo__ = "llo"; pub const __UINTPTR_FMTu__ = "llu"; pub const __UINTPTR_FMTx__ = "llx"; pub const __UINTPTR_FMTX__ = "llX"; pub const __UINTPTR_WIDTH__ = @as(c_int, 64); pub const __FLT_DENORM_MIN__ = @as(f32, 1.40129846e-45); pub const __FLT_HAS_DENORM__ = @as(c_int, 1); pub const __FLT_DIG__ = @as(c_int, 6); pub const __FLT_DECIMAL_DIG__ = @as(c_int, 9); pub const __FLT_EPSILON__ = @as(f32, 1.19209290e-7); pub const __FLT_HAS_INFINITY__ = @as(c_int, 1); pub const __FLT_HAS_QUIET_NAN__ = @as(c_int, 1); pub const __FLT_MANT_DIG__ = @as(c_int, 24); pub const __FLT_MAX_10_EXP__ = @as(c_int, 38); pub const __FLT_MAX_EXP__ = @as(c_int, 128); pub const __FLT_MAX__ = @as(f32, 3.40282347e+38); pub const __FLT_MIN_10_EXP__ = -@as(c_int, 37); pub const __FLT_MIN_EXP__ = -@as(c_int, 125); pub const __FLT_MIN__ = @as(f32, 1.17549435e-38); pub const __DBL_DENORM_MIN__ = 4.9406564584124654e-324; pub const __DBL_HAS_DENORM__ = @as(c_int, 1); pub const __DBL_DIG__ = @as(c_int, 15); pub const __DBL_DECIMAL_DIG__ = @as(c_int, 17); pub const __DBL_EPSILON__ = 2.2204460492503131e-16; pub const __DBL_HAS_INFINITY__ = @as(c_int, 1); pub const __DBL_HAS_QUIET_NAN__ = @as(c_int, 1); pub const __DBL_MANT_DIG__ = @as(c_int, 53); pub const __DBL_MAX_10_EXP__ = @as(c_int, 308); pub const __DBL_MAX_EXP__ = @as(c_int, 1024); pub const __DBL_MAX__ = 1.7976931348623157e+308; pub const __DBL_MIN_10_EXP__ = -@as(c_int, 307); pub const __DBL_MIN_EXP__ = -@as(c_int, 1021); pub const __DBL_MIN__ = 2.2250738585072014e-308; pub const __LDBL_DENORM_MIN__ = @as(c_longdouble, 3.64519953188247460253e-4951); pub const __LDBL_HAS_DENORM__ = @as(c_int, 1); pub const __LDBL_DIG__ = @as(c_int, 18); pub const __LDBL_DECIMAL_DIG__ = @as(c_int, 21); pub const __LDBL_EPSILON__ = @as(c_longdouble, 1.08420217248550443401e-19); pub const __LDBL_HAS_INFINITY__ = @as(c_int, 1); pub const __LDBL_HAS_QUIET_NAN__ = @as(c_int, 1); pub const __LDBL_MANT_DIG__ = @as(c_int, 64); pub const __LDBL_MAX_10_EXP__ = @as(c_int, 4932); pub const __LDBL_MAX_EXP__ = @as(c_int, 16384); pub const __LDBL_MAX__ = @as(c_longdouble, 1.18973149535723176502e+4932); pub const __LDBL_MIN_10_EXP__ = -@as(c_int, 4931); pub const __LDBL_MIN_EXP__ = -@as(c_int, 16381); pub const __LDBL_MIN__ = @as(c_longdouble, 3.36210314311209350626e-4932); pub const __POINTER_WIDTH__ = @as(c_int, 64); pub const __BIGGEST_ALIGNMENT__ = @as(c_int, 16); pub const __WCHAR_UNSIGNED__ = @as(c_int, 1); pub const __WINT_UNSIGNED__ = @as(c_int, 1); pub const __INT8_TYPE__ = i8; pub const __INT8_FMTd__ = "hhd"; pub const __INT8_FMTi__ = "hhi"; pub const __INT8_C_SUFFIX__ = ""; pub const __INT16_TYPE__ = c_short; pub const __INT16_FMTd__ = "hd"; pub const __INT16_FMTi__ = "hi"; pub const __INT16_C_SUFFIX__ = ""; pub const __INT32_TYPE__ = c_int; pub const __INT32_FMTd__ = "d"; pub const __INT32_FMTi__ = "i"; pub const __INT32_C_SUFFIX__ = ""; pub const __INT64_TYPE__ = c_longlong; pub const __INT64_FMTd__ = "lld"; pub const __INT64_FMTi__ = "lli"; pub const __UINT8_TYPE__ = u8; pub const __UINT8_FMTo__ = "hho"; pub const __UINT8_FMTu__ = "hhu"; pub const __UINT8_FMTx__ = "hhx"; pub const __UINT8_FMTX__ = "hhX"; pub const __UINT8_C_SUFFIX__ = ""; pub const __UINT8_MAX__ = @as(c_int, 255); pub const __INT8_MAX__ = @as(c_int, 127); pub const __UINT16_TYPE__ = c_ushort; pub const __UINT16_FMTo__ = "ho"; pub const __UINT16_FMTu__ = "hu"; pub const __UINT16_FMTx__ = "hx"; pub const __UINT16_FMTX__ = "hX"; pub const __UINT16_C_SUFFIX__ = ""; pub const __UINT16_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 65535, .decimal); pub const __INT16_MAX__ = @as(c_int, 32767); pub const __UINT32_TYPE__ = c_uint; pub const __UINT32_FMTo__ = "o"; pub const __UINT32_FMTu__ = "u"; pub const __UINT32_FMTx__ = "x"; pub const __UINT32_FMTX__ = "X"; pub const __UINT32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_uint, 4294967295, .decimal); pub const __INT32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal); pub const __UINT64_TYPE__ = c_ulonglong; pub const __UINT64_FMTo__ = "llo"; pub const __UINT64_FMTu__ = "llu"; pub const __UINT64_FMTx__ = "llx"; pub const __UINT64_FMTX__ = "llX"; pub const __UINT64_MAX__ = @as(c_ulonglong, 18446744073709551615); pub const __INT64_MAX__ = @as(c_longlong, 9223372036854775807); pub const __INT_LEAST8_TYPE__ = i8; pub const __INT_LEAST8_MAX__ = @as(c_int, 127); pub const __INT_LEAST8_FMTd__ = "hhd"; pub const __INT_LEAST8_FMTi__ = "hhi"; pub const __UINT_LEAST8_TYPE__ = u8; pub const __UINT_LEAST8_MAX__ = @as(c_int, 255); pub const __UINT_LEAST8_FMTo__ = "hho"; pub const __UINT_LEAST8_FMTu__ = "hhu"; pub const __UINT_LEAST8_FMTx__ = "hhx"; pub const __UINT_LEAST8_FMTX__ = "hhX"; pub const __INT_LEAST16_TYPE__ = c_short; pub const __INT_LEAST16_MAX__ = @as(c_int, 32767); pub const __INT_LEAST16_FMTd__ = "hd"; pub const __INT_LEAST16_FMTi__ = "hi"; pub const __UINT_LEAST16_TYPE__ = c_ushort; pub const __UINT_LEAST16_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 65535, .decimal); pub const __UINT_LEAST16_FMTo__ = "ho"; pub const __UINT_LEAST16_FMTu__ = "hu"; pub const __UINT_LEAST16_FMTx__ = "hx"; pub const __UINT_LEAST16_FMTX__ = "hX"; pub const __INT_LEAST32_TYPE__ = c_int; pub const __INT_LEAST32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal); pub const __INT_LEAST32_FMTd__ = "d"; pub const __INT_LEAST32_FMTi__ = "i"; pub const __UINT_LEAST32_TYPE__ = c_uint; pub const __UINT_LEAST32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_uint, 4294967295, .decimal); pub const __UINT_LEAST32_FMTo__ = "o"; pub const __UINT_LEAST32_FMTu__ = "u"; pub const __UINT_LEAST32_FMTx__ = "x"; pub const __UINT_LEAST32_FMTX__ = "X"; pub const __INT_LEAST64_TYPE__ = c_longlong; pub const __INT_LEAST64_MAX__ = @as(c_longlong, 9223372036854775807); pub const __INT_LEAST64_FMTd__ = "lld"; pub const __INT_LEAST64_FMTi__ = "lli"; pub const __UINT_LEAST64_TYPE__ = c_ulonglong; pub const __UINT_LEAST64_MAX__ = @as(c_ulonglong, 18446744073709551615); pub const __UINT_LEAST64_FMTo__ = "llo"; pub const __UINT_LEAST64_FMTu__ = "llu"; pub const __UINT_LEAST64_FMTx__ = "llx"; pub const __UINT_LEAST64_FMTX__ = "llX"; pub const __INT_FAST8_TYPE__ = i8; pub const __INT_FAST8_MAX__ = @as(c_int, 127); pub const __INT_FAST8_FMTd__ = "hhd"; pub const __INT_FAST8_FMTi__ = "hhi"; pub const __UINT_FAST8_TYPE__ = u8; pub const __UINT_FAST8_MAX__ = @as(c_int, 255); pub const __UINT_FAST8_FMTo__ = "hho"; pub const __UINT_FAST8_FMTu__ = "hhu"; pub const __UINT_FAST8_FMTx__ = "hhx"; pub const __UINT_FAST8_FMTX__ = "hhX"; pub const __INT_FAST16_TYPE__ = c_short; pub const __INT_FAST16_MAX__ = @as(c_int, 32767); pub const __INT_FAST16_FMTd__ = "hd"; pub const __INT_FAST16_FMTi__ = "hi"; pub const __UINT_FAST16_TYPE__ = c_ushort; pub const __UINT_FAST16_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 65535, .decimal); pub const __UINT_FAST16_FMTo__ = "ho"; pub const __UINT_FAST16_FMTu__ = "hu"; pub const __UINT_FAST16_FMTx__ = "hx"; pub const __UINT_FAST16_FMTX__ = "hX"; pub const __INT_FAST32_TYPE__ = c_int; pub const __INT_FAST32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal); pub const __INT_FAST32_FMTd__ = "d"; pub const __INT_FAST32_FMTi__ = "i"; pub const __UINT_FAST32_TYPE__ = c_uint; pub const __UINT_FAST32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_uint, 4294967295, .decimal); pub const __UINT_FAST32_FMTo__ = "o"; pub const __UINT_FAST32_FMTu__ = "u"; pub const __UINT_FAST32_FMTx__ = "x"; pub const __UINT_FAST32_FMTX__ = "X"; pub const __INT_FAST64_TYPE__ = c_longlong; pub const __INT_FAST64_MAX__ = @as(c_longlong, 9223372036854775807); pub const __INT_FAST64_FMTd__ = "lld"; pub const __INT_FAST64_FMTi__ = "lli"; pub const __UINT_FAST64_TYPE__ = c_ulonglong; pub const __UINT_FAST64_MAX__ = @as(c_ulonglong, 18446744073709551615); pub const __UINT_FAST64_FMTo__ = "llo"; pub const __UINT_FAST64_FMTu__ = "llu"; pub const __UINT_FAST64_FMTx__ = "llx"; pub const __UINT_FAST64_FMTX__ = "llX"; pub const __USER_LABEL_PREFIX__ = ""; pub const __FINITE_MATH_ONLY__ = @as(c_int, 0); pub const __GNUC_STDC_INLINE__ = @as(c_int, 1); pub const __GCC_ATOMIC_TEST_AND_SET_TRUEVAL = @as(c_int, 1); pub const __CLANG_ATOMIC_BOOL_LOCK_FREE = @as(c_int, 2); pub const __CLANG_ATOMIC_CHAR_LOCK_FREE = @as(c_int, 2); pub const __CLANG_ATOMIC_CHAR16_T_LOCK_FREE = @as(c_int, 2); pub const __CLANG_ATOMIC_CHAR32_T_LOCK_FREE = @as(c_int, 2); pub const __CLANG_ATOMIC_WCHAR_T_LOCK_FREE = @as(c_int, 2); pub const __CLANG_ATOMIC_SHORT_LOCK_FREE = @as(c_int, 2); pub const __CLANG_ATOMIC_INT_LOCK_FREE = @as(c_int, 2); pub const __CLANG_ATOMIC_LONG_LOCK_FREE = @as(c_int, 2); pub const __CLANG_ATOMIC_LLONG_LOCK_FREE = @as(c_int, 2); pub const __CLANG_ATOMIC_POINTER_LOCK_FREE = @as(c_int, 2); pub const __GCC_ATOMIC_BOOL_LOCK_FREE = @as(c_int, 2); pub const __GCC_ATOMIC_CHAR_LOCK_FREE = @as(c_int, 2); pub const __GCC_ATOMIC_CHAR16_T_LOCK_FREE = @as(c_int, 2); pub const __GCC_ATOMIC_CHAR32_T_LOCK_FREE = @as(c_int, 2); pub const __GCC_ATOMIC_WCHAR_T_LOCK_FREE = @as(c_int, 2); pub const __GCC_ATOMIC_SHORT_LOCK_FREE = @as(c_int, 2); pub const __GCC_ATOMIC_INT_LOCK_FREE = @as(c_int, 2); pub const __GCC_ATOMIC_LONG_LOCK_FREE = @as(c_int, 2); pub const __GCC_ATOMIC_LLONG_LOCK_FREE = @as(c_int, 2); pub const __GCC_ATOMIC_POINTER_LOCK_FREE = @as(c_int, 2); pub const __PIC__ = @as(c_int, 2); pub const __pic__ = @as(c_int, 2); pub const __FLT_EVAL_METHOD__ = @as(c_int, 0); pub const __FLT_RADIX__ = @as(c_int, 2); pub const __DECIMAL_DIG__ = __LDBL_DECIMAL_DIG__; pub const __GCC_ASM_FLAG_OUTPUTS__ = @as(c_int, 1); pub const __code_model_small__ = @as(c_int, 1); pub const __amd64__ = @as(c_int, 1); pub const __amd64 = @as(c_int, 1); pub const __x86_64 = @as(c_int, 1); pub const __x86_64__ = @as(c_int, 1); pub const __SEG_GS = @as(c_int, 1); pub const __SEG_FS = @as(c_int, 1); pub const __znver2 = @as(c_int, 1); pub const __znver2__ = @as(c_int, 1); pub const __tune_znver2__ = @as(c_int, 1); pub const __REGISTER_PREFIX__ = ""; pub const __NO_MATH_INLINES = @as(c_int, 1); pub const __AES__ = @as(c_int, 1); pub const __PCLMUL__ = @as(c_int, 1); pub const __LAHF_SAHF__ = @as(c_int, 1); pub const __LZCNT__ = @as(c_int, 1); pub const __RDRND__ = @as(c_int, 1); pub const __FSGSBASE__ = @as(c_int, 1); pub const __BMI__ = @as(c_int, 1); pub const __BMI2__ = @as(c_int, 1); pub const __POPCNT__ = @as(c_int, 1); pub const __PRFCHW__ = @as(c_int, 1); pub const __RDSEED__ = @as(c_int, 1); pub const __ADX__ = @as(c_int, 1); pub const __MWAITX__ = @as(c_int, 1); pub const __MOVBE__ = @as(c_int, 1); pub const __SSE4A__ = @as(c_int, 1); pub const __FMA__ = @as(c_int, 1); pub const __F16C__ = @as(c_int, 1); pub const __SHA__ = @as(c_int, 1); pub const __FXSR__ = @as(c_int, 1); pub const __XSAVE__ = @as(c_int, 1); pub const __XSAVEOPT__ = @as(c_int, 1); pub const __XSAVEC__ = @as(c_int, 1); pub const __XSAVES__ = @as(c_int, 1); pub const __CLFLUSHOPT__ = @as(c_int, 1); pub const __CLWB__ = @as(c_int, 1); pub const __WBNOINVD__ = @as(c_int, 1); pub const __CLZERO__ = @as(c_int, 1); pub const __RDPID__ = @as(c_int, 1); pub const __AVX2__ = @as(c_int, 1); pub const __AVX__ = @as(c_int, 1); pub const __SSE4_2__ = @as(c_int, 1); pub const __SSE4_1__ = @as(c_int, 1); pub const __SSSE3__ = @as(c_int, 1); pub const __SSE3__ = @as(c_int, 1); pub const __SSE2__ = @as(c_int, 1); pub const __SSE2_MATH__ = @as(c_int, 1); pub const __SSE__ = @as(c_int, 1); pub const __SSE_MATH__ = @as(c_int, 1); pub const __MMX__ = @as(c_int, 1); pub const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_1 = @as(c_int, 1); pub const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_2 = @as(c_int, 1); pub const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 = @as(c_int, 1); pub const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 = @as(c_int, 1); pub const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 = @as(c_int, 1); pub const __SIZEOF_FLOAT128__ = @as(c_int, 16); pub const _WIN32 = @as(c_int, 1); pub const _WIN64 = @as(c_int, 1); pub const WIN32 = @as(c_int, 1); pub const __WIN32 = @as(c_int, 1); pub const __WIN32__ = @as(c_int, 1); pub const WINNT = @as(c_int, 1); pub const __WINNT = @as(c_int, 1); pub const __WINNT__ = @as(c_int, 1); pub const WIN64 = @as(c_int, 1); pub const __WIN64 = @as(c_int, 1); pub const __WIN64__ = @as(c_int, 1); pub const __MINGW64__ = @as(c_int, 1); pub const __MSVCRT__ = @as(c_int, 1); pub const __MINGW32__ = @as(c_int, 1); pub const __STDC__ = @as(c_int, 1); pub const __STDC_HOSTED__ = @as(c_int, 1); pub const __STDC_VERSION__ = @as(c_long, 201710); pub const __STDC_UTF_16__ = @as(c_int, 1); pub const __STDC_UTF_32__ = @as(c_int, 1); pub const _DEBUG = @as(c_int, 1); pub const RAYGUI_H = ""; pub const RAYGUI_VERSION = "3.2"; pub const RAYLIB_H = ""; pub const __STDARG_H = ""; pub const _VA_LIST = ""; pub const __GNUC_VA_LIST = @as(c_int, 1); pub const RAYLIB_VERSION = "4.1-dev"; pub const RLAPI = ""; pub const PI = @as(f32, 3.14159265358979323846); pub const DEG2RAD = PI / @as(f32, 180.0); pub const RAD2DEG = @as(f32, 180.0) / PI; pub inline fn CLITERAL(@"type": anytype) @TypeOf(@"type") { return @"type"; } pub const RL_COLOR_TYPE = ""; pub const RL_RECTANGLE_TYPE = ""; pub const RL_VECTOR2_TYPE = ""; pub const RL_VECTOR3_TYPE = ""; pub const RL_VECTOR4_TYPE = ""; pub const RL_QUATERNION_TYPE = ""; pub const RL_MATRIX_TYPE = ""; pub const LIGHTGRAY = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 200), @as(c_int, 200), @as(c_int, 200), @as(c_int, 255) }); pub const GRAY = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 130), @as(c_int, 130), @as(c_int, 130), @as(c_int, 255) }); pub const DARKGRAY = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 80), @as(c_int, 80), @as(c_int, 80), @as(c_int, 255) }); pub const YELLOW = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 253), @as(c_int, 249), @as(c_int, 0), @as(c_int, 255) }); pub const GOLD = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 255), @as(c_int, 203), @as(c_int, 0), @as(c_int, 255) }); pub const ORANGE = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 255), @as(c_int, 161), @as(c_int, 0), @as(c_int, 255) }); pub const PINK = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 255), @as(c_int, 109), @as(c_int, 194), @as(c_int, 255) }); pub const RED = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 230), @as(c_int, 41), @as(c_int, 55), @as(c_int, 255) }); pub const MAROON = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 190), @as(c_int, 33), @as(c_int, 55), @as(c_int, 255) }); pub const GREEN = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 0), @as(c_int, 228), @as(c_int, 48), @as(c_int, 255) }); pub const LIME = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 0), @as(c_int, 158), @as(c_int, 47), @as(c_int, 255) }); pub const DARKGREEN = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 0), @as(c_int, 117), @as(c_int, 44), @as(c_int, 255) }); pub const SKYBLUE = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 102), @as(c_int, 191), @as(c_int, 255), @as(c_int, 255) }); pub const BLUE = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 0), @as(c_int, 121), @as(c_int, 241), @as(c_int, 255) }); pub const DARKBLUE = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 0), @as(c_int, 82), @as(c_int, 172), @as(c_int, 255) }); pub const PURPLE = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 200), @as(c_int, 122), @as(c_int, 255), @as(c_int, 255) }); pub const VIOLET = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 135), @as(c_int, 60), @as(c_int, 190), @as(c_int, 255) }); pub const DARKPURPLE = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 112), @as(c_int, 31), @as(c_int, 126), @as(c_int, 255) }); pub const BEIGE = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 211), @as(c_int, 176), @as(c_int, 131), @as(c_int, 255) }); pub const BROWN = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 127), @as(c_int, 106), @as(c_int, 79), @as(c_int, 255) }); pub const DARKBROWN = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 76), @as(c_int, 63), @as(c_int, 47), @as(c_int, 255) }); pub const WHITE = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 255), @as(c_int, 255), @as(c_int, 255), @as(c_int, 255) }); pub const BLACK = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 0), @as(c_int, 0), @as(c_int, 0), @as(c_int, 255) }); pub const BLANK = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 0), @as(c_int, 0), @as(c_int, 0), @as(c_int, 0) }); pub const MAGENTA = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 255), @as(c_int, 0), @as(c_int, 255), @as(c_int, 255) }); pub const RAYWHITE = @import("std").mem.zeroInit(CLITERAL(Color), .{ @as(c_int, 245), @as(c_int, 245), @as(c_int, 245), @as(c_int, 255) }); pub const __STDBOOL_H = ""; pub const @"bool" = bool; pub const @"true" = @as(c_int, 1); pub const @"false" = @as(c_int, 0); pub const __bool_true_false_are_defined = @as(c_int, 1); pub const MOUSE_LEFT_BUTTON = MOUSE_BUTTON_LEFT; pub const MOUSE_RIGHT_BUTTON = MOUSE_BUTTON_RIGHT; pub const MOUSE_MIDDLE_BUTTON = MOUSE_BUTTON_MIDDLE; pub const MATERIAL_MAP_DIFFUSE = MATERIAL_MAP_ALBEDO; pub const MATERIAL_MAP_SPECULAR = MATERIAL_MAP_METALNESS; pub const SHADER_LOC_MAP_DIFFUSE = SHADER_LOC_MAP_ALBEDO; pub const SHADER_LOC_MAP_SPECULAR = SHADER_LOC_MAP_METALNESS; pub const RAYGUIAPI = ""; pub const RAYGUI_SUPPORT_LOG_INFO = ""; pub const SCROLLBAR_LEFT_SIDE = @as(c_int, 0); pub const SCROLLBAR_RIGHT_SIDE = @as(c_int, 1);
https://raw.githubusercontent.com/rcmagic/ZigFightingGame/98b84bb20998969fa0e1a4ad7ad3ea91ad634021/src/raygui.zig
const std = @import("std"); const common = @import("../common.zig"); const typed = @import("typed"); const rangeArrayList = @import("../util.zig").rangeArrayList; const testing = std.testing; const Allocator = std.mem.Allocator; const ArrayList = std.ArrayList; /// Create new slice filtered from `slice` using function `pred` as predicate. /// Additionally supply some arguments to `pred`. /// Consumer must make sure to free returned slice. pub fn filterSlice(allocator: Allocator, comptime pred: anytype, slice: []const typed.ParamType(pred, 0), args: anytype) ![]typed.ParamType(pred, 0) { const T = typed.ParamType(pred, 0); var filtered_list = try std.ArrayList(T).initCapacity(allocator, slice.len); for (slice[0..]) |item| { if (@call(.auto, pred, .{item} ++ args)) { filtered_list.appendAssumeCapacity(item); } } return try filtered_list.toOwnedSlice(); } /// Create new array list filtered from `arr` using function `pred` as predicate. /// Additionally supply some arguments to `pred`. /// Consumer must make sure to free returned array list. pub fn filterArrayList(allocator: Allocator, comptime pred: anytype, arr: ArrayList(typed.ParamType(pred, 0)), args: anytype) !ArrayList(typed.ParamType(pred, 0)) { const T = typed.ParamType(pred, 0); var filtered = try ArrayList(T).initCapacity(allocator, arr.capacity); for (arr.items) |item| { if (@call(.auto, pred, .{item} ++ args)) { filtered.appendAssumeCapacity(item); } } return filtered; } const CommonPredicates = common.CommonPredicates; const Point2D = struct { x: i32, y: i32, }; test "test filter on i32 slice" { const slice = [_]i32{ 1, 2, 3, 4, 5 }; const allocator = testing.allocator; const even = try filterSlice( allocator, CommonPredicates.even(i32), &slice, .{}, ); defer allocator.free(even); try testing.expectEqualSlices(i32, even, &[_]i32{ 2, 4 }); } test "test filter on Point2D slice" { const slice = [_]Point2D{ .{ .x = 2, .y = 2 }, .{ .x = 0, .y = 3 }, .{ .x = 2, .y = 4 } }; const allocator = testing.allocator; const x_coord_eq_2 = try filterSlice( allocator, CommonPredicates.fieldEq(Point2D, .x), &slice, .{2}, ); defer allocator.free(x_coord_eq_2); try testing.expectEqualSlices(Point2D, x_coord_eq_2, &[_]Point2D{ .{ .x = 2, .y = 2 }, .{ .x = 2, .y = 4 }, }); } test "test filter on i32 array list" { const allocator = testing.allocator; const arr = try rangeArrayList(allocator, i32, 6); defer arr.deinit(); const even = try filterArrayList( allocator, CommonPredicates.even(i32), arr, .{}, ); defer even.deinit(); try testing.expectEqualSlices(i32, even.items, &[_]i32{ 0, 2, 4 }); }
https://raw.githubusercontent.com/ali-shahwali/zig-functools/1fd61180520d64de7bad7db3b8d2987559ec5da2/src/core/filter.zig
const std = @import("std"); const mem = std.mem; const parser = @import("parser.zig"); const Parser = parser.Parser; const ArrayList = std.ArrayList; const Allocator = std.mem.Allocator; pub const Node = union(enum) { list: Coll, set: Coll, vector: Coll, map: Coll, symbol: Symbol, keyword: Keyword, number: Number, string: String, char: Char, }; pub const Coll = struct { children: []Node, }; pub const Symbol = struct { token: []u8, }; pub const Keyword = struct { token: []u8, }; pub const Number = struct { token: []u8, }; pub const String = struct { token: []u8, }; pub const Char = struct { token: []u8, }; pub fn print_node_children(nodes: []Node, w: anytype) void { for (nodes) |node| { print_node(node, w) catch { std.debug.print("error\n", .{}); return; }; w.writeByte(' ') catch { return; }; } } pub fn print_node(node: Node, w: anytype) !void { switch (node) { .list => |coll| { try w.writeByte('('); print_node_children(coll.children, w); try w.writeByte(')'); }, .vector => |coll| { try w.writeByte('['); print_node_children(coll.children, w); try w.writeByte(']'); }, .map => |coll| { try w.writeByte('{'); print_node_children(coll.children, w); try w.writeByte('}'); }, .set => |coll| { try w.writeByte('#'); try w.writeByte('{'); print_node_children(coll.children, w); try w.writeByte('}'); }, .keyword => |kw| { // try w.writeByte(':'); try w.writeAll(kw.token); }, .number => |kw| { try w.writeAll(kw.token); }, .symbol => |kw| { try w.writeAll(kw.token); }, .string => |kw| { try w.writeAll(kw.token); }, .char => |kw| { try w.writeByte('\\'); try w.writeAll(kw.token); }, } } const NodeBuilder = union(enum) { coll: CollBuilder, wrapper: WrapperBuilder, discard, root: RootBuilder, pub fn of_coll(allocator: Allocator, tag: CollTag) NodeBuilder { return .{ .coll = .{ .tag = tag, .children = ArrayList(Node).init(allocator), } }; } pub fn of_wrapper(tag: WrapperTag) NodeBuilder { return .{ .wrapper = .{ .tag = tag, } }; } }; const CollTag = enum { list, vector, set, map, }; const CollBuilder = struct { tag: CollTag, children: ArrayList(Node), pub fn build(self: *CollBuilder) !Node { const children = try self.children.toOwnedSlice(); const coll = Coll{ .children = children }; return switch (self.tag) { .list => .{ .list = coll }, .vector => .{ .vector = coll }, .set => .{ .set = coll }, .map => .{ .map = coll }, }; } }; // const MetaBuilder = struct { // tags: ArrayList(Node), // }; const WrapperTag = enum { quote, }; const WrapperBuilder = struct { tag: WrapperTag, pub fn build(self: *WrapperBuilder, allocator: Allocator, child: Node) !Node { const presym = switch (self.tag) { .quote => blk: { var s: []u8 = try allocator.dupe(u8, "squawk.lang/quote"); break :blk Node{ .symbol = .{ .token = s } }; }, }; var children = try allocator.alloc(Node, 2); children[0] = presym; children[1] = child; return .{ .list = .{ .children = children } }; } }; const RootBuilder = struct { children: ArrayList(Node), }; pub const AstBuilder = struct { stack: ArrayList(NodeBuilder), current: NodeBuilder, allocator: Allocator, pub fn init(allocator: Allocator) AstBuilder { return .{ .stack = ArrayList(NodeBuilder).init(allocator), .current = .{ .root = .{ .children = ArrayList(Node).init(allocator) } }, .allocator = allocator, }; } pub fn build(self: *AstBuilder) ![]Node { if (self.current != .root) return error.NotAtRoot; return try self.current.root.children.toOwnedSlice(); } pub fn add_sibling(self: *AstBuilder, node: Node) !void { switch (self.current) { .root => |*root| try root.children.append(node), .coll => |*coll| try coll.children.append(node), .wrapper => |*w| { const wnode = try w.build(self.allocator, node); self.pop(); try self.add_sibling(wnode); }, .discard => { self.pop(); }, } } pub fn push(self: *AstBuilder, node: NodeBuilder) !void { try self.stack.append(self.current); self.current = node; } pub fn pop(self: *AstBuilder) void { self.current = self.stack.pop(); } pub fn handle_parser_message(self: *AstBuilder, msg: Parser.Message) !void { try switch (msg) { .none => return, .keyword => |token| add_sibling(self, .{ .keyword = .{ .token = token.text } }), .symbol => |token| add_sibling(self, .{ .symbol = .{ .token = token.text } }), .number => |token| add_sibling(self, .{ .number = .{ .token = token.text } }), .string => |token| add_sibling(self, .{ .string = .{ .token = token.text } }), .char => |token| add_sibling(self, .{ .char = .{ .token = token.text } }), .comment => return, .special_comment => return, .quote => { try push(self, NodeBuilder.of_wrapper(.quote)); }, .meta => return, .coll_start => |c| push(self, NodeBuilder.of_coll(self.allocator, switch (c.variant) { .list => .list, .vector => .vector, .map => .map, .set => .set, })), .coll_end => { const coll = try self.current.coll.build(); self.pop(); try self.add_sibling(coll); }, .discard => self.push(.discard), }; } };
https://raw.githubusercontent.com/LuisThiamNye/squawk-native-compiler/0f822f83dd127469b3c6f9b320c7b36f2c1e7b53/src/compast.zig
const Cookie = @import("Cookie.zig"); const SameSiteOption = Cookie.SameSiteOption; secure: bool = true, http_only: bool = false, same_site: SameSiteOption = .lax,
https://raw.githubusercontent.com/joachimschmidt557/nochfragen/26610de2347d55bad3db5775619670a5ec855d55/backend/sessions/Options.zig
// zig fmt: off const std = @import("std"); pub const Color = enum { Reset, Black, Red, Green, Yellow, Blue, Magenta, Cyan, White, Gray, BrightRed, BrightGreen, BrightYellow, BrightBlue, BrightMagenta, BrightCyan, BrightWhite }; pub const TextStyle = packed struct { dim: bool = false, bold: bool = false, underline: bool = false, italic: bool = false, pub fn none(self: TextStyle) bool { const val: u4 = @bitCast(self); return val == 0; } }; pub const Style = struct { fg: Color, bg: Color, mod: TextStyle, pub fn reset(printer: Printer) !void { try printer.print("\x1b[0m", .{}); } pub fn set(self: *const Style, printer: Printer) !void { if(self.fg == .Reset or self.bg == .Reset or self.mod.none()) { try reset(printer); } if(self.mod.dim) { try printer.print("\x1b[2m", .{}); } if(self.mod.bold) { try printer.print("\x1b[1m", .{}); } if(self.mod.italic) { try printer.print("\x1b[3m", .{}); } if(self.mod.underline) { try printer.print("\x1b[4m", .{}); } switch(self.bg) { .Black => try printer.print("\x1b[40m", .{}), .Red => try printer.print("\x1b[41m", .{}), .Green => try printer.print("\x1b[42m", .{}), .Yellow => try printer.print("\x1b[43m", .{}), .Blue => try printer.print("\x1b[44m", .{}), .Magenta => try printer.print("\x1b[45m", .{}), .Cyan => try printer.print("\x1b[46m", .{}), .White => try printer.print("\x1b[47m", .{}), .Gray => try printer.print("\x1b[100m", .{}), .BrightRed => try printer.print("\x1b[101m", .{}), .BrightGreen => try printer.print("\x1b[102m", .{}), .BrightYellow => try printer.print("\x1b[103m", .{}), .BrightBlue => try printer.print("\x1b[104m", .{}), .BrightMagenta => try printer.print("\x1b[105m", .{}), .BrightCyan => try printer.print("\x1b[106m", .{}), .BrightWhite => try printer.print("\x1b[107m", .{}), else => {}, } switch(self.fg) { .Black => try printer.print("\x1b[30m", .{}), .Red => try printer.print("\x1b[31m", .{}), .Green => try printer.print("\x1b[32m", .{}), .Yellow => try printer.print("\x1b[33m", .{}), .Blue => try printer.print("\x1b[34m", .{}), .Magenta => try printer.print("\x1b[35m", .{}), .Cyan => try printer.print("\x1b[36m", .{}), .White => try printer.print("\x1b[37m", .{}), .Gray => try printer.print("\x1b[90m", .{}), .BrightRed => try printer.print("\x1b[91m", .{}), .BrightGreen => try printer.print("\x1b[92m", .{}), .BrightYellow => try printer.print("\x1b[93m", .{}), .BrightBlue => try printer.print("\x1b[94m", .{}), .BrightMagenta => try printer.print("\x1b[95m", .{}), .BrightCyan => try printer.print("\x1b[96m", .{}), .BrightWhite => try printer.print("\x1b[97m", .{}), else => {}, } } }; const FilePrinterData = struct { alloc: std.mem.Allocator, file: std.fs.File, bufferWriter: std.io.BufferedWriter(4096, std.fs.File.Writer), }; const ArrayPrinterData = struct { array: std.ArrayList(u8), bufferWriter: std.io.BufferedWriter(4096, std.ArrayList(u8).Writer), alloc: std.mem.Allocator, }; // An adapter for printing either to an ArrayList or to a File like stdout. pub const Printer = union(enum) { file: *FilePrinterData, array: *ArrayPrinterData, debug: bool, pub fn stdout(alloc: std.mem.Allocator) !Printer { var f = try alloc.create(FilePrinterData); f.alloc = alloc; f.file = std.io.getStdOut(); f.bufferWriter = std.io.bufferedWriter(f.file.writer()); return .{.file = f}; } pub fn memory(alloc: std.mem.Allocator) !Printer { var a = try alloc.create(ArrayPrinterData); a.alloc = alloc; a.array = std.ArrayList(u8).init(alloc); a.bufferWriter = std.io.bufferedWriter(a.array.writer()); return .{.array = a}; } pub fn debug() Printer { return .{ .debug = true }; } pub fn deinit(self: *Printer) void { switch(self.*) { .array => |arr| { arr.array.deinit(); arr.alloc.destroy(self.array); }, .file => |f| { f.alloc.destroy(self.file); }, else => {} } } pub fn print(self: *const Printer, comptime format: []const u8, args: anytype) anyerror!void { switch(self.*) { .array => |_| try self.array.bufferWriter.writer().print(format, args), .file => |_| try self.file.bufferWriter.writer().print(format, args), .debug => |_| std.debug.print(format, args), } } pub fn printNum(self: *const Printer, s: []const u8, num: usize) !void { var n = num; while (n > 0) { try self.print("{s}", .{s}); n -= 1; } } pub fn flush(self: *const Printer) anyerror!void { switch(self.*) { .array => |_| try self.array.bufferWriter.flush(), .file => |_| try self.file.bufferWriter.flush(), .debug => {}, } } pub fn printWrapped( self: *Printer, value: []const u8, startLineLen: usize, currIndentAmount: usize, maxLineLength: usize ) !usize { var start: usize = 0; var currLineLen = startLineLen; var prevWordLoc: usize = 0; var idx: usize = 0; while(idx < value.len) { if(value[idx] == ' ') { prevWordLoc = idx; } else if(value[idx] == '\n') { try self.print("{s}", .{value[start..idx]}); try self.printNum(" ", currIndentAmount); currLineLen = currIndentAmount; start = idx + 1; idx += 1; } if((currLineLen + idx - start) >= maxLineLength) { // hyphenation here. if(start >= prevWordLoc) { try self.print("{s}-\n", .{value[start..idx-1]}); try self.printNum(" ", currIndentAmount); currLineLen = currIndentAmount; start = idx - 1; prevWordLoc = start; } // word wrap break else { try self.print("{s}\n", .{value[start..prevWordLoc]}); try self.printNum(" ", currIndentAmount); currLineLen = currIndentAmount; start = prevWordLoc + 1; } } idx += 1; } // TODO: add print rest of value in word wrap case. //if((currLineLen + left) >= maxLine) const left: usize = value.len - start; try self.print("{s}", .{value[start..start+left]}); currLineLen += left; return currLineLen; } };
https://raw.githubusercontent.com/srjilarious/testz/8d08bb61e2a9cb6dbf6fde9283d6b48d09ae7714/src/printer.zig
const std = @import("std"); // Although this function looks imperative, note that its job is to // declaratively construct a build graph that will be executed by an external // runner. pub fn build(b: *std.Build) void { // Standard target options allows the person running `zig build` to choose // what target to build for. Here we do not override the defaults, which // means any target is allowed, and the default is native. Other options // for restricting supported target set are available. const target = b.standardTargetOptions(.{}); // Standard optimization options allow the person running `zig build` to select // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not // set a preferred release mode, allowing the user to decide how to optimize. const optimize = b.standardOptimizeOption(.{}); _ = b.addModule("foxwhale-pool", .{ .root_source_file = .{ .path = "src/root.zig" }, }); // Creates a step for unit testing. This only builds the test executable // but does not run it. const lib_unit_tests = b.addTest(.{ .root_source_file = .{ .path = "src/root.zig" }, .target = target, .optimize = optimize, }); const run_lib_unit_tests = b.addRunArtifact(lib_unit_tests); // Similar to creating the run step earlier, this exposes a `test` step to // the `zig build --help` menu, providing a way for the user to request // running the unit tests. const test_step = b.step("test", "Run unit tests"); test_step.dependOn(&run_lib_unit_tests.step); }
https://raw.githubusercontent.com/malcolmstill/foxwhale/bd1d32111c87846b5d265db1010fa72d173ae45f/foxwhale-pool/build.zig
const std = @import("std"); var buffer: [1024 * 1024]u8 = undefined; pub var fixed_alloc = std.heap.FixedBufferAllocator.init(&buffer);
https://raw.githubusercontent.com/1nwf/zkernel/6ee69f01b0423799f0f8dc70d0a2a682f4f5eba2/src/arch/paging/allocator.zig
const std = @import("std"); const testing = std.testing; const expect = testing.expect; const root = @import("../root.zig"); const PieceKind = root.pieces.PieceKind; const SplitMix64 = root.bags.SplitMix64; /// Draws from a bag of N pieces without replacement. The bag is refilled with /// all pieces evenly. If `N` is not a multiple of 7, the excess pieces will be /// drawn randomly from a 7-bag. pub fn NBag(comptime N: usize) type { return struct { const Self = @This(); pieces: [N]PieceKind = undefined, index: usize = N, random: SplitMix64, pub fn init(seed: u64) Self { return Self{ .random = SplitMix64.init(seed) }; } fn refill(self: *Self) void { var pieces: [7]PieceKind = .{ .i, .o, .t, .s, .z, .j, .l }; const random = self.random.random(); random.shuffle(PieceKind, &pieces); for (0..self.pieces.len) |i| { self.pieces[i] = pieces[i % 7]; } random.shuffle(PieceKind, &self.pieces); } /// Returns the next piece in the bag. pub fn next(self: *Self) PieceKind { if (self.index >= self.pieces.len) { self.refill(); self.index = 0; } defer self.index += 1; return self.pieces[self.index]; } /// Sets the seed of the bag. The current bag will be discarded and refilled. pub fn setSeed(self: *Self, seed: u64) void { self.* = init(seed); } }; } test "N-bag (100) randomizer" { var bag = NBag(100).init(42); var actual = std.AutoHashMap(PieceKind, i32).init(testing.allocator); defer actual.deinit(); // Exhaust the bag for (0..100) |_| { const piece = bag.next(); const count = actual.get(piece) orelse 0; try actual.put(piece, count + 1); } // Should have 14 or 15 of each piece const expected = [_]PieceKind{ .i, .o, .t, .s, .z, .j, .l }; for (expected) |piece| { const count = actual.get(piece) orelse 0; try expect(count == 14 or count == 15); } }
https://raw.githubusercontent.com/TemariVirus/Budget-Tetris-Engine/c2cb87a8c73c3a2284cc6185c2dbf74f58a6d3c0/src/bags/n_bag.zig
// TODO: Can we use macros to do these definition exports for wasm functions? const std = @import("std"); const mem = @import("std").mem; const ArrayList = std.ArrayList; var gpa_allocator = std.heap.GeneralPurposeAllocator(.{}){}; var allocator = gpa_allocator.allocator(); const prefix: []const u8 = "game"; pub fn main() !void { const cmdline_args = (try std.process.argsAlloc(allocator))[1..]; if (cmdline_args.len < 2) { std.log.err("expected at least 2 cmdline args", .{}); std.os.exit(0xff); } const out_file_zig = cmdline_args[0]; const out_file_js = cmdline_args[1]; const PP_file = try std.fs.cwd().createFile(out_file_zig, .{ .read = true }); defer PP_file.close(); const js_file = try std.fs.cwd().createFile(out_file_js, .{}); defer js_file.close(); const js_writer = js_file.writer(); try js_writer.writeAll("const _WASM_IMPORTS = {\n"); for (cmdline_args[2..]) |full_file| { const base_name = std.fs.path.basename(full_file); const file_name = base_name[0 .. base_name.len - 4]; var file = try std.fs.cwd().openFile(full_file, .{}); // defer file.close(); var buf_reader = std.io.bufferedReader(file.reader()); var in_stream = buf_reader.reader(); // const first_line = try std.fmt.allocPrint(allocator, "{s}{s}{s}{s}{s}", .{"const ", file_name, " = @import(\"", file_name, ".zig\");\n"}); // TODO: Update other files where you use allocPrint with this try PP_file.writer().print("const {s} = @import(\"root\").{0s};\n", .{file_name}); // try PP_file.writeAll(first_line); var buf: [1024]u8 = undefined; var is_line_we_want: bool = false; var was_line_we_want: bool = false; while (try in_stream.readUntilDelimiterOrEof(&buf, '\n')) |line| { // std.debug.print("Line Length: {s}\n", .{line}); is_line_we_want = std.mem.startsWith(u8, line, "// @wasm"); if (is_line_we_want) { was_line_we_want = true; } else if (was_line_we_want) { was_line_we_want = false; const decl_start = "pub const "; if (std.mem.startsWith(u8, line, decl_start)) { const line_at_id = line[decl_start.len..]; const id = line_at_id[0 .. std.mem.indexOf(u8, line_at_id, " ").?]; try PP_file.writer().print("pub const {s} = {s}.{0s};\n", .{id, file_name}); continue; } // std.debug.print("Line: {s}\n", .{line}); // std.debug.print("Test {any}\n", .{std.mem.indexOf(u8, line, ": u16")}); const export_line = try std.fmt.allocPrint(allocator, "{s}{s}{s}", .{"export fn ", file_name, "_"}); try PP_file.writeAll(export_line); const start = std.mem.indexOf(u8, line, "(").?; const fn_name = line[7..start]; // std.debug.print("FN NAME: {s}\n", .{fn_name}); try js_writer.print(" {s}_{s}(", .{file_name, fn_name}); try PP_file.writeAll(fn_name); try PP_file.writeAll("("); const end = std.mem.indexOf(u8, line, ")").?; const squirelly = std.mem.indexOf(u8, line, "{").?; const return_type: struct { without_error: []const u8, has_error: bool} = blk: { const return_type_part = std.mem.trimLeft(u8, line[end..squirelly], ")"); const return_type_full = std.mem.trimLeft(u8, return_type_part, " "); if (std.mem.startsWith(u8, return_type_full, "!")) break :blk .{ .without_error = return_type_full[1..], .has_error = true }; break :blk .{ .without_error = return_type_full, .has_error = false }; }; var iter = std.mem.split(u8, line[start..end], ","); var param_index: u8 = 0; var param_list: ArrayList([]const u8) = ArrayList([]const u8).init(allocator); defer param_list.deinit(); while (iter.next()) |param| { var param_iter = std.mem.split(u8, param, ":"); var param_iter_index: u8 = 0; var param_name: []const u8 = undefined; var param_type: []const u8 = undefined; if (param_index != 0) { try PP_file.writeAll(", "); } while (param_iter.next()) |pi| { if (param_iter_index == 0) { param_name = pi; param_name = std.mem.trimLeft(u8, param_name, "( "); try param_list.append(param_name); try PP_file.writeAll(param_name); } else if (param_iter_index == 1) { param_type = pi; try PP_file.writeAll(":"); try PP_file.writeAll(param_type); } param_iter_index += 1; } param_index += 1; } try PP_file.writeAll(")"); try PP_file.writeAll(return_type.without_error); try PP_file.writeAll("{\n"); const return_line = try std.fmt.allocPrint(allocator, "{s}{s}{s}", .{" return ", file_name, "."}); try PP_file.writeAll(return_line); try PP_file.writeAll(fn_name); try PP_file.writeAll("("); if (param_list.items.len > 0) { for (param_list.items, 0..) |param, i| { if (i != 0) { try PP_file.writeAll(", "); } try PP_file.writeAll(param); } } for (param_list.items, 0..) |name, i| { const sep: []const u8 = if (i == 0) "" else ", "; try js_writer.print("{s}{s}", .{sep, name}); } try js_writer.writeAll(") {},\n"); const catch_clause: []const u8 = if (return_type.has_error) " catch |e| game.uncaughtError(e)" else ""; try PP_file.writer().print("){s};\n", .{catch_clause}); try PP_file.writeAll("}"); try PP_file.writeAll("\n"); } } file.close(); } try js_writer.writeAll("};\n"); }
https://raw.githubusercontent.com/spirodonfl/uwnh-remake/2a2dfd7d593e3d6f4a2ec63d48b0862bb9c4b582/scripts/output_wasm_exports.zig
const std = @import("std"); const fs = std.fs; const Type = std.builtin.Type; const cnf = @import("config.zig"); const tokenize = @import("fast_tokenize.zig"); // const tokenize = @import("csv/tokenize.zig"); // ============================================================================ // Utils pub inline fn parseAtomic( comptime T: type, comptime field_name: []const u8, input_val: []const u8, ) !T { switch (@typeInfo(T)) { .Bool => { if (std.mem.eql(u8, "true", input_val)) { return true; } else if (std.mem.eql(u8, "false", input_val)) { return false; } else { return error.BadInput; } }, .Int => { return std.fmt.parseInt(T, input_val, 0) catch { return error.BadInput; }; }, .Float => { return std.fmt.parseFloat(T, input_val) catch { return error.BadInput; }; }, .Enum => |Enum| { comptime var i = 0; inline for (Enum.fields) |EnumField| { if (std.mem.eql(u8, input_val, EnumField.name)) { return std.enums.nameCast(T, EnumField.name); } comptime i = i + 1; } if (Enum.is_exhaustive) { return error.BadInput; } else { // we generate the first enum outside of the possible enums return @intToEnum(T, i); } }, else => { @compileError("Unsupported type " ++ @typeName(T) ++ " for field " ++ field_name); }, } } // ============================================================================ // Parser // Want to do something that feels like a JIT // 1. Read a schema from a file // 2. Load a CSV file containing data that matches the schema // 3. Print that // I can start by doing that for a known schema and then seeing how to read the schema // const csv_config = csv_mod.CsvConfig{ // .col_sep = ',', // .row_sep = '\n', // .quote = '"', // }; // Writing a CSV library that knew how to read directly into Structs would be cool // something like // readCSV(StructType, allocator, path) -> std.ArrayList(StructType) // bonus points if it can be called during comptime to get a compile time array // what is the ideal API? // 1. Streaming so that the user can control how much memory to consume // 2. Coerces to the types you already want // 3. Efficient so that you can do it quickly if you want // 4. Can read files partially // var csv_reader = csv.Reader.init(T, allocator, file_reader); // csv_reader.nextRow() -> ?T // if ?T is null, we are done const InternalParseError = error{ // Should these be named CSV_ as to not pollute the overall error set? BadInput, MissingFields, ExtraFields, OutOfMemory, }; // put this under CsvParser pub const CsvParseError = tokenize.TokenizeError || InternalParseError; // Errors from reading a File: // 'error.MisplacedQuote' not a member of destination error set // 'error.NoSeparatorAfterField' not a member of destination error set // 'error.ShortBuffer' not a member of destination error set // 'error.AccessDenied' not a member of destination error set // 'error.BrokenPipe' not a member of destination error set // 'error.ConnectionResetByPeer' not a member of destination error set // 'error.ConnectionTimedOut' not a member of destination error set // 'error.InputOutput' not a member of destination error set // 'error.IsDir' not a member of destination error set // 'error.NotOpenForReading' not a member of destination error set // 'error.OperationAborted' not a member of destination error set // 'error.SystemResources' not a member of destination error set // 'error.Unexpected' not a member of destination error set // 'error.WouldBlock' not a member of destination error set pub fn CsvParser( comptime T: type, comptime Reader: type, comptime config: cnf.CsvConfig, ) type { return struct { const Self = @This(); const Tokenizer = tokenize.CsvTokenizer(Reader, config); const Fields: []const Type.StructField = switch (@typeInfo(T)) { .Struct => |S| S.fields, else => @compileError("T needs to be a struct"), }; const number_of_fields: usize = Fields.len; allocator: std.mem.Allocator, reader: Reader, tokenizer: Tokenizer, // The caller has to free the allocator when it is done with everything that // was parsed / allocated pub fn init( allocator: std.mem.Allocator, reader: Reader, ) CsvParseError!Self { var tokenizer = Tokenizer{ .reader = reader }; var self = Self{ .reader = reader, .tokenizer = tokenizer, .allocator = allocator, }; if (config.skip_first_row) { try self.consume_row(); } return self; } /// Try to read a row and return a parsed T out of it if possible /// Returns null if the iterator is done pub fn next(self: *Self) CsvParseError!?T { // TODO: Who should be managing draft_struct's memory? var draft_struct: T = undefined; const maybe = try self.nextInto(&draft_struct); if (maybe) |_| { return draft_struct; } else { return null; } } // Try to read a row into draft_struct and re-return it it if possible pub fn nextInto(self: *Self, draft_struct: *T) CsvParseError!?*T { var fields_added: u32 = 0; inline for (Fields) |F| { const token = try self.tokenizer.next(); // tokenize.debugToken(token); switch (token) { .row_end => return error.MissingFields, .eof => return null, .field => |field| { // the user wants an immutable slice // we need to grab what we read, copy it somewhere it will remain valid // and then give them that slice const FieldInfo = @typeInfo(F.field_type); switch (FieldInfo) { .Void => { @field(draft_struct, F.name) = {}; }, .Pointer => |info| { switch (info.size) { .Slice => { if (info.child != u8) { @compileError("Slices can only be u8 and '" ++ F.name ++ "' is " ++ @typeName(info.child)); } else if (info.is_const) { const mutable_slice = self.allocator.alloc(u8, field.len) catch { return error.OutOfMemory; }; std.mem.copy(u8, mutable_slice, field); @field(draft_struct, F.name) = mutable_slice[0..field.len]; } else { @compileError("Mutable slices are not implemented and '" ++ F.name ++ "' is a mutable slice"); } }, else => @compileError("Pointer not implemented yet and '" ++ F.name ++ "'' is a pointer."), } }, .Optional => |Optional| { // Unwrap the optional // const NestedFieldType: type = Optional.child; if (field.len == 0) { @field(draft_struct, F.name) = null; } else { @field(draft_struct, F.name) = try parseAtomic(Optional.child, F.name, field); } }, .Union => |U| { var maybe_out: ?F.field_type = null; inline for (U.fields) |UF| { const maybe_atomic = parseAtomic(UF.field_type, UF.name, field) catch null; if (maybe_atomic) |atomic| { maybe_out = @unionInit(F.field_type, UF.name, atomic); break; // stop when one succeeeds } } if (maybe_out) |out| { @field(draft_struct, F.name) = out; } else { return error.BadInput; } }, else => { @field(draft_struct, F.name) = try parseAtomic(F.field_type, F.name, field); }, } fields_added = fields_added + 1; }, } } // consume the row_end const token = try self.tokenizer.next(); switch (token) { .field => { if (token.field.len > 0) { std.debug.print("Extra fields {s}\n", .{token.field}); return error.ExtraFields; } }, .row_end => {}, .eof => {}, } // were all the fields added? if (fields_added == number_of_fields) { return draft_struct; } else { return error.MissingFields; } } fn consume_row(self: *Self) !void { var token = try self.tokenizer.next(); var continue_loop = true; while (continue_loop) { switch (token) { .field => { token = try self.tokenizer.next(); continue; }, .row_end, .eof => { continue_loop = false; break; }, } } } }; } fn testStructEql(comptime T: type, a: T, b: T) !void { const TypeInfo = @typeInfo(T); switch (TypeInfo) { .Void => {}, .Optional => { const NestedFieldType: type = TypeInfo.Optional.child; if (a) |def_a| { if (b) |def_b| { try testStructEql(NestedFieldType, def_a, def_b); } else { try std.testing.expect(false); } } else { if (b) |_| { try std.testing.expect(false); } else { try std.testing.expect(true); } } }, .Union => { try std.testing.expect(std.meta.eql(a, b)); // inline for (U.fields) |UF| { // if (std.meta.isTag(a, UF.name)) { // const def_a = @field(a, UF.name); // if (std.meta.isTag(b, UF.name)) { // const def_b = @field(b, UF.name); // try testStructEql(UF.field_type, def_a, def_b); // } else { // try std.testing.expect(false); // } // } else { // if (std.meta.isTag(b, UF.name)) { // try std.testing.expect(false); // } else { // try std.testing.expect(true); // } // } // } }, .Struct => { const Fields = TypeInfo.Struct.fields; inline for (Fields) |Field| { if (comptime Field.field_type == []const u8) { // std.debug.print("Comparing {s} and {s}\n", .{ @field(a, Field.name), @field(b, Field.name) }); try std.testing.expect(std.mem.eql(u8, a.name, b.name)); } else { // try std.testing.expect(std.meta.eql(@field(a, Field.name), @field(b, Field.name))); try testStructEql(Field.field_type, @field(a, Field.name), @field(b, Field.name)); } } }, .Int, .Float, .Bool, .Enum => { try std.testing.expectEqual(a, b); }, else => @compileError("Invalid type: " ++ @typeName(T) ++ ". Should be void, struct, enum, union, optional, int, float, or bool"), } } // const builtin = @import("std").builtin; // // fn structEql(comptime T: type, a: T, b: T) bool { // const Fields = @typeInfo(T).Struct.fields; // inline for (Fields) |Field| { // if (comptime Field.field_type == []const u8) { // if (!std.mem.eql(u8, @field(a, Field.name), @field(b, Field.name))) { // return false; // } // } else { // if (!builtin.eql(@field(a, Field.name), @field(b, Field.name))) { // return false; // } // } // } // return true; // } test "parse" { var allocator = std.testing.allocator; const file_path = "test/data/simple_parse.csv"; var file = try fs.cwd().openFile(file_path, .{}); defer file.close(); const SimpleParse = struct { id: u32, name: []const u8, unit: f32, nilable: ?u64, }; var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); var parser = try CsvParser(SimpleParse, fs.File.Reader, .{}).init(arena.allocator(), file.reader()); const maybe_first_row = try parser.next(); // we the second struct before testing to see if the first row keeps its contents const maybe_second_row = try parser.next(); if (maybe_first_row) |row| { const expected_row: SimpleParse = SimpleParse{ .id = 1, .name = "abc", .unit = 1.1, .nilable = 111, }; try testStructEql(SimpleParse, expected_row, row); } else { std.debug.print("Error parsing first row\n", .{}); try std.testing.expectEqual(false, true); } if (maybe_second_row) |row| { const expected_row: SimpleParse = SimpleParse{ .id = 22, .name = "cdef", .unit = 22.2, .nilable = null, }; try testStructEql(SimpleParse, expected_row, row); } else { std.debug.print("Error parsing second row\n", .{}); try std.testing.expectEqual(false, true); } const maybe_third_row = try parser.next(); // we the fourth struct before testing to see if the third row keeps its contents const maybe_fourth_row = try parser.next(); if (maybe_third_row) |row| { const expected_row: SimpleParse = SimpleParse{ .id = 333, .name = "ghijk", .unit = 33.33, .nilable = 3333, }; try testStructEql(SimpleParse, expected_row, row); } else { std.debug.print("Error parsing third row\n", .{}); try std.testing.expectEqual(false, true); } if (maybe_fourth_row) |_| { std.debug.print("Error parsing fourth row, expected null\n", .{}); try std.testing.expectEqual(false, true); } } test "parse mutable slices" { const SliceParse = struct { id: u32, name: []const u8, unit: f32, nilable: ?u64, }; var allocator = std.testing.allocator; const file_path = "test/data/simple_parse.csv"; var file = try fs.cwd().openFile(file_path, .{}); defer file.close(); var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); var parser = try CsvParser(SliceParse, fs.File.Reader, .{}).init(arena.allocator(), file.reader()); const maybe_first_row = try parser.next(); const maybe_second_row = try parser.next(); if (maybe_first_row) |row| { const expected_row = SliceParse{ .id = 1, .name = "abc", .unit = 1.1, .nilable = 111, }; try testStructEql(SliceParse, expected_row, row); } else { std.debug.print("Error parsing first row\n", .{}); try std.testing.expectEqual(false, true); } if (maybe_second_row) |row| { const expected_row = SliceParse{ .id = 22, .name = "cdef", .unit = 22.2, .nilable = null, }; try testStructEql(SliceParse, expected_row, row); } else { std.debug.print("Error parsing second row\n", .{}); try std.testing.expectEqual(false, true); } } test "parse into previously allocated structs" { const TightStruct = struct { id: i64, age: u32 }; var allocator = std.testing.allocator; const file_path = "test/data/simple_end_to_end.csv"; var file = try fs.cwd().openFile(file_path, .{}); defer file.close(); var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); const arena_allocator = arena.allocator(); const tight_array: []TightStruct = try arena_allocator.alloc(TightStruct, 17); var parser = try CsvParser(TightStruct, fs.File.Reader, .{}).init(arena_allocator, file.reader()); const maybe_first_row = try parser.nextInto(&tight_array[0]); const maybe_second_row = try parser.nextInto(&tight_array[1]); if (maybe_first_row) |_| { const expected_row = TightStruct{ .id = 1, .age = 32 }; try testStructEql(TightStruct, expected_row, tight_array[0]); } else { std.debug.print("Error parsing first row\n", .{}); try std.testing.expectEqual(false, true); } if (maybe_second_row) |_| { const expected_row = TightStruct{ .id = 1, .age = 28 }; try testStructEql(TightStruct, expected_row, tight_array[1]); } else { std.debug.print("Error parsing second row\n", .{}); try std.testing.expectEqual(false, true); } var i: usize = 2; // we already advanced the parser twice while (i < 17) { const maybe_result = try parser.nextInto(&tight_array[i]); if (maybe_result == null) try std.testing.expect(false); i += 1; } const expected_last_row = TightStruct{ .id = 10, .age = 29 }; try testStructEql(TightStruct, expected_last_row, tight_array[16]); } test "parse into arraylist" { const TightStruct = struct { id: i64, age: u32 }; var allocator = std.testing.allocator; const file_path = "test/data/simple_end_to_end.csv"; var file = try fs.cwd().openFile(file_path, .{}); defer file.close(); var list = std.ArrayList(TightStruct).init(allocator); defer list.deinit(); var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); var parser = try CsvParser(TightStruct, fs.File.Reader, .{}).init(arena.allocator(), file.reader()); // We can use parser.nextInto with list.addOne { const elem = try list.addOne(); const maybe_row = try parser.nextInto(elem); if (maybe_row) |_| { const expected_row = TightStruct{ .id = 1, .age = 32 }; try testStructEql(TightStruct, expected_row, elem.*); } else { std.debug.print("Error parsing first row\n", .{}); try std.testing.expectEqual(false, true); } } { const elem = try list.addOne(); const maybe_row = try parser.nextInto(elem); if (maybe_row) |_| { const expected_row = TightStruct{ .id = 1, .age = 28 }; try testStructEql(TightStruct, expected_row, elem.*); } else { std.debug.print("Error parsing second row\n", .{}); try std.testing.expectEqual(false, true); } } // We can use parser.next with list.append while (try parser.next()) |row| { try list.append(row); } try std.testing.expectEqual(list.items.len, 17); const expected_last_row = TightStruct{ .id = 10, .age = 29 }; try testStructEql(TightStruct, expected_last_row, list.pop()); } test "parse enums" { var allocator = std.testing.allocator; const file_path = "test/data/parse_enum.csv"; var file = try fs.cwd().openFile(file_path, .{}); defer file.close(); const OnOff = enum { ON, OFF }; const Color = enum(u2) { red, // 00 -> 0 blue, // 01 -> 1 green, // 10 -> 2 _, // 11 -> 3 }; const EnumParse = struct { id: u32, is_on: OnOff, color: Color, unit: void, nilable: ?u64, }; var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); var parser = try CsvParser(EnumParse, fs.File.Reader, .{}).init(arena.allocator(), file.reader()); const maybe_first_row = try parser.next(); // we the second struct before testing to see if the first row keeps its contents const maybe_second_row = try parser.next(); if (maybe_first_row) |row| { const expected_row = EnumParse{ .id = 1, .is_on = OnOff.ON, .color = Color.red, .unit = {}, .nilable = 111, }; try testStructEql(EnumParse, expected_row, row); } else { std.debug.print("Error parsing first row\n", .{}); try std.testing.expectEqual(false, true); } if (maybe_second_row) |row| { const expected_row = EnumParse{ .id = 22, .is_on = OnOff.OFF, .color = Color.blue, .unit = {}, .nilable = null, }; try testStructEql(EnumParse, expected_row, row); } else { std.debug.print("Error parsing second row\n", .{}); try std.testing.expectEqual(false, true); } const maybe_third_row = try parser.next(); // we the fourth struct before testing to see if the third row keeps its contents const maybe_fourth_row = try parser.next(); if (maybe_third_row) |row| { const expected_row = EnumParse{ .id = 333, .is_on = OnOff.ON, .color = @intToEnum(Color, 3), .unit = {}, .nilable = 3333, }; try testStructEql(EnumParse, expected_row, row); } else { std.debug.print("Error parsing third row\n", .{}); try std.testing.expectEqual(false, true); } if (maybe_fourth_row) |_| { std.debug.print("Error parsing fourth row, expected null\n", .{}); try std.testing.expectEqual(false, true); } } test "parse unions" { var allocator = std.testing.allocator; const file_path = "test/data/parse_union.csv"; var file = try fs.cwd().openFile(file_path, .{}); defer file.close(); const Tag = enum { int, float, boolean }; const SampleUnion = union(Tag) { int: i32, float: f64, boolean: bool, }; const UnionStruct = struct { union_field: SampleUnion }; var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); var parser = try CsvParser(UnionStruct, fs.File.Reader, .{}).init(arena.allocator(), file.reader()); const maybe_first_row = try parser.next(); // we the second struct before testing to see if the first row keeps its contents const maybe_second_row = try parser.next(); if (maybe_first_row) |row| { const expected_row = UnionStruct{ .union_field = SampleUnion{ .int = 1 } }; try testStructEql(UnionStruct, expected_row, row); } else { std.debug.print("Error parsing first row\n", .{}); try std.testing.expectEqual(false, true); } if (maybe_second_row) |row| { const expected_row = UnionStruct{ .union_field = SampleUnion{ .float = 2.3 } }; try testStructEql(UnionStruct, expected_row, row); } else { std.debug.print("Error parsing second row\n", .{}); try std.testing.expectEqual(false, true); } const maybe_third_row = try parser.next(); // we the fourth struct before testing to see if the third row keeps its contents const maybe_fourth_row = try parser.next(); if (maybe_third_row) |row| { const expected_row = UnionStruct{ .union_field = SampleUnion{ .boolean = true } }; try testStructEql(UnionStruct, expected_row, row); } else { std.debug.print("Error parsing third row\n", .{}); try std.testing.expectEqual(false, true); } if (maybe_fourth_row) |_| { std.debug.print("Error parsing fourth row, expected null\n", .{}); try std.testing.expectEqual(false, true); } }
https://raw.githubusercontent.com/bensu/zig-csv/792019939e7d6a8d4e77a5b0a7c003884a452f7d/src/csv/parse.zig
const std = @import("std"); const flx = @import("flx.zig"); const testing = std.testing; pub const String = flx.String; /// Result container. pub const Result = flx.Result; /// Return best score matching QUERY against STR. pub fn score(allocator: std.mem.Allocator, str: String, query: String) ?Result { return flx.score(allocator, str, query); }
https://raw.githubusercontent.com/jcs090218/flx-zig/8ef44a659f54eff58817537d7d15ffbee16eb78c/src/root.zig
const std = @import("std"); const testing = std.testing; const command = @import("./util/command.zig"); const Engine = @import("./island.zig").Engine; pub fn main() anyerror!u8 { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const allocator = arena.allocator(); const part = command.choosePart(); var engine = Engine.init(allocator); defer engine.deinit(); const inp = std.io.getStdIn().reader(); var buf: [1024]u8 = undefined; while (try inp.readUntilDelimiterOrEof(&buf, '\n')) |line| { try engine.addLine(line); } // engine.show(); var answer: usize = 0; switch (part) { .part1 => { answer = try engine.getSumPartNumbers(); const expected = @as(usize, 520135); try testing.expectEqual(expected, answer); }, .part2 => { answer = try engine.getSumGearRatios(); const expected = @as(usize, 72514855); try testing.expectEqual(expected, answer); }, } const out = std.io.getStdOut().writer(); try out.print("=== {s} ===\n", .{@tagName(part)}); try out.print("Answer: {}\n", .{answer}); try out.print("Elapsed: {}ms\n", .{command.getElapsedMs()}); return 0; }
https://raw.githubusercontent.com/gonzus/AdventOfCode/7e972b92a23db29461b2869713a3251998af5822/2023/p03/p03.zig
const std = @import("std"); const ArrayList = std.ArrayList; pub const AddressTranslations = ArrayList(AddressTranslation); pub fn toChild(translations: *const AddressTranslations, parent_addr: u64) u64 { for (translations.items) |t| { if (t.parentBusContains(parent_addr)) { return t.parentToChild(parent_addr); } } return parent_addr; } pub fn toParent(translations: *const AddressTranslations, child_addr: u64) u64 { for (translations.items) |t| { if (t.childBusContains(child_addr)) { return t.childToParent(child_addr); } } return child_addr; } pub fn translation(child_address: u64, parent_address: u64, length: usize) AddressTranslation { return .{ .parent_space_begin = parent_address, .parent_space_end = parent_address + length, .child_space_begin = child_address, .child_space_end = child_address + length, .length = length, }; } pub const AddressTranslation = struct { parent_space_begin: u64, parent_space_end: u64, child_space_begin: u64, child_space_end: u64, length: usize, inline fn contains(actual: u64, begin: u64, end: u64) bool { return (actual >= begin and actual < end); } pub fn childBusContains(self: *const AddressTranslation, address: u64) bool { return contains(address, self.child_space_begin, self.child_space_end); } pub fn parentBusContains(self: *const AddressTranslation, address: u64) bool { return contains(address, self.parent_space_begin, self.parent_space_end); } inline fn translate(actual: u64, from_origin: u64, to_origin: u64) u64 { // this is written oddly to avoid u64 overflow const relative = actual - from_origin; return to_origin + relative; } pub fn parentToChild(self: *const AddressTranslation, address: u64) u64 { return translate(address, self.parent_space_begin, self.child_space_begin); } pub fn childToParent(self: *const AddressTranslation, address: u64) u64 { return translate(address, self.child_space_begin, self.parent_space_begin); } };
https://raw.githubusercontent.com/aapen/aapen/c02010f570ec1f69905afe607d2ed4080c2e8edb/src/memory/translations.zig
//! forwarded namespaces for module // utilities const scoped_map = @import("lib/scoped_map.zig"); pub const ScopedMap = scoped_map.ScopedMap; pub const ScopedMapUnmanaged = scoped_map.ScopedMapUnmanaged; pub const ErrorBuf = @import("lib/error_buf.zig").ErrorBuf; // subsystems pub const sources = @import("sources.zig"); pub const Source = sources.Source; pub const Loc = sources.Loc; pub const env = @import("env/env.zig"); pub const Ident = @import("env/idents.zig").Ident; pub const Name = @import("env/names.zig").Name; pub const Value = @import("env/value.zig").Value; // compilation stages pub const Ast = @import("ast/Ast.zig"); pub const Type = @import("types/type.zig").Type; pub const typer = @import("types/typer.zig"); const parser = @import("parser/parser.zig"); pub const SyntaxErrorMeta = parser.SyntaxErrorMeta; pub const SyntaxErrorBuf = parser.SyntaxErrorBuf; pub const ParseError = parser.Error; pub const parse = parser.parse; const sema = @import("sema/sema.zig"); pub const TypeErrorMeta = sema.TypeErrorMeta; pub const TypeErrorBuf = sema.TypeErrorBuf; pub const SemaError = sema.Error; pub const analyze = sema.analyze; pub const ssa = @import("ssa/ssa.zig"); const assembler = @import("jit/assemble.zig"); pub const AssembleError = assembler.Error; pub const assemble = assembler.assemble; const lowering = @import("ssa/lower.zig"); pub const LowerError = lowering.Error; pub const LowerInto = lowering.LowerInto; pub const lower = lowering.lower; // pipes ======================================================================= const std = @import("std"); const stderr = std.io.getStdErr().writer(); const Allocator = std.mem.Allocator; const builtin = @import("builtin"); const options = @import("options"); const blox = @import("blox"); pub fn init() Allocator.Error!void { // log options if requested if (options.log_options) { const logger = std.log.scoped(.options); const decls = @typeInfo(options).Struct.decls; inline for (decls) |decl| { logger.info("{s}: {any}\n", .{ decl.name, @field(options, decl.name), }); } } // call init functions try typer.init(); env.init(); } pub fn deinit() void { env.deinit(); sources.deinit(); typer.deinit(); } /// put this in root! pub const std_options = struct { pub const log_level = switch (builtin.mode) { .Debug => .debug, .ReleaseSafe => .info, .ReleaseSmall, .ReleaseFast => .err, }; fn logLevelColor(comptime level: std.log.Level) blox.Color { const c = blox.Color.init; return comptime switch (level) { .debug => c(.normal, .green), .info => c(.bright, .cyan), .warn => c(.bright, .magenta), .err => c(.bright, .red), }; } fn logLevelText(comptime level: std.log.Level) []const u8 { return comptime switch (level) { .debug, .info => @tagName(level), .warn => "warning", .err => "error", }; } fn innerLogFn( comptime level: std.log.Level, comptime scope: @TypeOf(.EnumLiteral), comptime format: []const u8, args: anytype, ) !void { const enabled = comptime switch (scope) { .lexer => options.log_lexer, else => true, }; if (!enabled) return; // context var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); const ally = gpa.allocator(); var mason = blox.Mason.init(ally); defer mason.deinit(); // render logger tag const tag = try mason.newBox(&.{ try mason.newPre("[", .{}), try mason.newPre(@tagName(scope), .{}), try mason.newPre("|", .{}), try mason.newPre(logLevelText(level), .{ .fg = logLevelColor(level), }), try mason.newPre("] ", .{}), }, .{ .direction = .right }); // render lines of text with my blox decorators // this is done by intermixing std.fmt with blox in order to maintain // compatibility with consumer expectations const text = try std.fmt.allocPrint(ally, format, args); defer ally.free(text); // prune extra lines var slice = text; while (std.mem.endsWith(u8, slice, "\n")) { slice.len -= 1; } const is_single_line = std.mem.count(u8, slice, "\n") == 0; if (is_single_line) { const deco_fmt = mason.fmt(tag, .{ .print_final_newline = false }); try stderr.print("{}{s}\n", .{ deco_fmt, slice }); } else { // multiline const indent = try mason.newPre("| ", .{ .fg = blox.Color.init(.bright, .black), }); try mason.write(tag, stderr, .{}); var lines = std.mem.splitScalar(u8, slice, '\n'); var i: usize = 0; while (lines.next()) |line| : (i += 1) { const deco_fmt = mason.fmt(indent, .{ .print_final_newline = false, }); try stderr.print("{}{s}\n", .{ deco_fmt, line }); } try stderr.writeByte('\n'); } } pub fn logFn( comptime level: std.log.Level, comptime scope: @TypeOf(.EnumLiteral), comptime format: []const u8, args: anytype, ) void { innerLogFn(level, scope, format, args) catch |e| { std.debug.panic("error in logFn: {s}\n", .{@errorName(e)}); }; } };
https://raw.githubusercontent.com/garrisonhh/fluent/2d3ceb9f817681c135d8b6e978b1494c77d31a56/src/mod.zig
//! //! Small Zig reimplementation of gcc's libssp. //! //! This library implements most of the builtins required by the stack smashing //! protection as implemented by gcc&clang. //! Missing exports: //! - __gets_chk //! - __mempcpy_chk //! - __snprintf_chk //! - __sprintf_chk //! - __stpcpy_chk //! - __vsnprintf_chk //! - __vsprintf_chk const std = @import("std"); extern fn strncpy(dest: [*:0]u8, src: [*:0]const u8, n: usize) callconv(.C) [*:0]u8; extern fn memset(dest: ?[*]u8, c: u8, n: usize) callconv(.C) ?[*]u8; extern fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8; extern fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8; // Avoid dragging in the runtime safety mechanisms into this .o file. pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn { _ = msg; _ = error_return_trace; @setCold(true); std.os.abort(); } export fn __stack_chk_fail() callconv(.C) noreturn { @panic("stack smashing detected"); } export fn __chk_fail() callconv(.C) noreturn { @panic("buffer overflow detected"); } // Emitted when targeting some architectures (eg. i386) // XXX: This symbol should be hidden export fn __stack_chk_fail_local() callconv(.C) noreturn { __stack_chk_fail(); } // XXX: Initialize the canary with random data export var __stack_chk_guard: usize = blk: { var buf = [1]u8{0} ** @sizeOf(usize); buf[@sizeOf(usize) - 1] = 255; buf[@sizeOf(usize) - 2] = '\n'; break :blk @as(usize, @bitCast(buf)); }; export fn __strcpy_chk(dest: [*:0]u8, src: [*:0]const u8, dest_n: usize) callconv(.C) [*:0]u8 { @setRuntimeSafety(false); var i: usize = 0; while (i < dest_n and src[i] != 0) : (i += 1) { dest[i] = src[i]; } if (i == dest_n) __chk_fail(); dest[i] = 0; return dest; } export fn __strncpy_chk(dest: [*:0]u8, src: [*:0]const u8, n: usize, dest_n: usize) callconv(.C) [*:0]u8 { if (dest_n < n) __chk_fail(); return strncpy(dest, src, n); } export fn __strcat_chk(dest: [*:0]u8, src: [*:0]const u8, dest_n: usize) callconv(.C) [*:0]u8 { @setRuntimeSafety(false); var avail = dest_n; var dest_end: usize = 0; while (avail > 0 and dest[dest_end] != 0) : (dest_end += 1) { avail -= 1; } if (avail < 1) __chk_fail(); var i: usize = 0; while (avail > 0 and src[i] != 0) : (i += 1) { dest[dest_end + i] = src[i]; avail -= 1; } if (avail < 1) __chk_fail(); dest[dest_end + i] = 0; return dest; } export fn __strncat_chk(dest: [*:0]u8, src: [*:0]const u8, n: usize, dest_n: usize) callconv(.C) [*:0]u8 { @setRuntimeSafety(false); var avail = dest_n; var dest_end: usize = 0; while (avail > 0 and dest[dest_end] != 0) : (dest_end += 1) { avail -= 1; } if (avail < 1) __chk_fail(); var i: usize = 0; while (avail > 0 and i < n and src[i] != 0) : (i += 1) { dest[dest_end + i] = src[i]; avail -= 1; } if (avail < 1) __chk_fail(); dest[dest_end + i] = 0; return dest; } export fn __memcpy_chk(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize, dest_n: usize) callconv(.C) ?[*]u8 { if (dest_n < n) __chk_fail(); return memcpy(dest, src, n); } export fn __memmove_chk(dest: ?[*]u8, src: ?[*]const u8, n: usize, dest_n: usize) callconv(.C) ?[*]u8 { if (dest_n < n) __chk_fail(); return memmove(dest, src, n); } export fn __memset_chk(dest: ?[*]u8, c: u8, n: usize, dest_n: usize) callconv(.C) ?[*]u8 { if (dest_n < n) __chk_fail(); return memset(dest, c, n); }
https://raw.githubusercontent.com/ziglang/gotta-go-fast/c915c45c5afed9a2e2de4f4484acba2df5090c3a/src/self-hosted-parser/input_dir/special/ssp.zig
const print = @import("std").debug.print; extern fn foo_strict(x: f64) f64; extern fn foo_optimized(x: f64) f64; pub fn main() void { const x = 0.001; print("optimized = {}\n", .{foo_optimized(x)}); print("strict = {}\n", .{foo_strict(x)}); } // syntax
https://raw.githubusercontent.com/ziglang/zig/d9bd34fd0533295044ffb4160da41f7873aff905/doc/langref/float_mode_exe.zig
const std = @import("std"); pub fn digitCount(v: anytype) usize { const abs: u32 = @intCast(@abs(v)); if (abs == 0) return 1; const width: usize = @intCast(std.math.log10_int(abs)); if (v < 0) return width + 2; return width + 1; } pub const CharacterType = enum { Empty, Upper, Lower, Number, HardSeperator, SoftSeperator, /// Convert an ASCII character to a `CharacterType` pub fn fromAscii(c: u8) CharacterType { return switch (c) { 'a'...'z' => .Lower, 'A'...'Z' => .Upper, '0'...'9' => .Number, ' ', '\\', '/', '|', '(', ')', '[', ']', '{', '}' => .HardSeperator, '!', '*'...'.', ':'...'@', '^'...'`', '~' => .SoftSeperator, 0 => .Empty, else => .Lower, }; } const Role = enum { Head, Break, Camel, Tail, }; /// Get the `Role` of the current character positioned next to another /// character pub fn roleNextTo(s: CharacterType, o: CharacterType) Role { return switch (s) { .Empty, .HardSeperator => .Head, .SoftSeperator => .Break, .Lower => if (o == .Upper) .Camel else .Tail, .Number => if (o == .Upper) .Camel else .Tail, else => .Tail, }; } }; pub fn firstMatchesGeneric( comptime T: type, ctx: anytype, comptime eqlFunc: fn (@TypeOf(ctx), h: T, n: T) bool, indices: []usize, haystack: []const T, needle: []const T, ) ?[]const usize { if (needle.len == 0) { return &.{}; } if (needle.len > haystack.len) { return null; } var index: usize = 0; for (0.., haystack) |i, h| { const n = needle[index]; if (eqlFunc(ctx, h, n)) { indices[index] = i; index += 1; if (index >= needle.len) break; } } else return null; return indices[0..index]; } fn simpleEql(comptime T: type) fn (void, T, T) bool { return struct { fn f(_: void, h: T, n: T) bool { return h == n; } }.f; } /// Computes a simple equality check, recording the successive indices of haystack /// that match successive characters in needle. pub fn firstMatches( comptime T: type, indices: []usize, haystack: []const T, needle: []const T, ) ?[]const usize { return firstMatchesGeneric(T, {}, simpleEql(T), indices, haystack, needle); } pub fn firstMatchesAlloc( comptime T: type, allocator: std.mem.Allocator, haystack: []const T, needle: []const T, ) !?[]const usize { const indices = try allocator.alloc(usize, needle.len); errdefer allocator.free(indices); return firstMatches(T, indices, haystack, needle); } fn testFirstMatch( haystack: []const u8, needle: []const u8, comptime expected: []const usize, ) !void { const inds = try firstMatchesAlloc(u8, std.testing.allocator, haystack, needle); defer if (inds) |x| std.testing.allocator.free(x); try std.testing.expectEqualSlices(usize, expected, inds.?); } test "firstMatches" { try testFirstMatch("axbycz", "xyz", &.{ 1, 3, 5 }); try testFirstMatch("axbycz", "abc", &.{ 0, 2, 4 }); try testFirstMatch("", "", &.{}); }
https://raw.githubusercontent.com/fjebaker/fuzzig/0fd156d5097365151e85a85eef9d8cf0eebe7b00/src/utils.zig
const std = @import("std"); const papyrus = @import("../../modules/ui/papyrus/papyrus.zig"); const logger = std.log.scoped(.main); // a little utility to extract fonts from ttf files and create both // a png file and a json file with corresponding font vertex coordinates. // const ProgramOptions = struct { outputFile: []u8, verbose: bool = false, sourceFile: ?[]u8 = null, errorMsg: ?[]u8 = null, pub fn deinit(self: @This(), allocator: std.mem.Allocator) void { allocator.free(self.outputFile); if (self.sourceFile) |src| { allocator.free(src); } if (self.errorMsg) |msg| { allocator.free(msg); } } }; fn dupe(allocator: std.mem.Allocator, str: []const u8) ![]u8 { return try std.fmt.allocPrint(allocator, "{s}", .{str}); } fn matchArgOpt(arg: []const u8, long: []const u8, short: ?[]const u8) bool { if (arg.len < 2) { return false; } if (arg[0] == '-') { if (arg[1] == '-') { return std.mem.eql(u8, arg[2..], long); } else if (short) |s| { return arg[1] == s[0]; } } return false; } fn parseArgs(allocator: std.mem.Allocator) !ProgramOptions { const ParseState = enum { zero, default, output, }; var state: ParseState = .zero; var opts: ProgramOptions = .{ .outputFile = try dupe(allocator, "bitmap_font"), }; const args = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, args); for (args) |arg| { switch (state) { .zero => { state = .default; }, .default => { if (arg[0] == '-') { if (matchArgOpt(arg, "output", "o")) state = .output; if (matchArgOpt(arg, "verbose", "v")) opts.verbose = true; } else { if (opts.sourceFile) |src| { allocator.free(src); opts.sourceFile = null; opts.errorMsg = try dupe(allocator, "Too many source files, this is not supported"); return opts; } opts.sourceFile = try dupe(allocator, arg); } }, .output => { allocator.free(opts.outputFile); opts.outputFile = try dupe(allocator, arg); state = .default; }, } } return opts; } pub fn usage() void {} pub fn main() anyerror!void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = gpa.allocator(); defer _ = gpa.deinit(); const opts = try parseArgs(allocator); defer opts.deinit(allocator); logger.info("\n", .{}); logger.info("fontExtractor: extract bitmaps and texture coordinates into bmps and json files", .{}); if (opts.errorMsg) |msg| { logger.err("error message detected in font extractor: {s}", .{msg}); } }
https://raw.githubusercontent.com/peterino2/NeonWood/705a04e06aa60736e0af02ad00cd0d4c37f07921/projects/utils/fontExtractor.zig
const toolbox = @import("toolbox"); const pdapi = @import("playdate_api.zig"); const game = @import("game.zig"); const std = @import("std"); const GlobalState = @import("root").GlobalState; pub const ENABLE_PROFILING = false; // !toolbox.IS_DEBUG; const MAX_SECTIONS = 64; var section_map: toolbox.HashMap([]const u8, Section) = undefined; var section_stack: [MAX_SECTIONS][]const u8 = undefined; var stack_index: usize = 0; var profiler_arena: toolbox.Arena = undefined; var g_spall_events: toolbox.RingQueue(SpallEvent) = undefined; const SpallEvent = struct { name: []const u8, start: toolbox.Milliseconds, end: toolbox.Milliseconds, }; const Section = struct { start: toolbox.Milliseconds = 0, last_time: toolbox.Milliseconds = 0, total_time: toolbox.Milliseconds = 0, max_time: toolbox.Milliseconds = 0, count: usize = 0, }; pub fn clear() void { if (comptime !ENABLE_PROFILING) { return; } if (stack_index != 0) { @panic("section stack should be empty"); } section_map.clear(); } pub fn init() void { if (comptime !ENABLE_PROFILING) { return; } profiler_arena = toolbox.Arena.init(toolbox.mb(2)); section_map = toolbox.HashMap([]const u8, Section).init(MAX_SECTIONS, &profiler_arena); g_spall_events = toolbox.RingQueue(SpallEvent).init(1 << 16, &profiler_arena); } pub fn start(comptime section_name: []const u8) void { if (comptime !ENABLE_PROFILING) { return; } section_stack[stack_index] = section_name; stack_index += 1; var section = section_map.get_or_put_ptr(section_name, .{}, &profiler_arena); section.start = toolbox.milliseconds(); } pub fn end() void { if (comptime !ENABLE_PROFILING) { return; } const end_time = toolbox.milliseconds(); stack_index -= 1; const section_name = section_stack[stack_index]; var section = section_map.get_ptr(section_name).?; const time_taken = end_time - section.start; section.last_time = time_taken; section.max_time = @max(time_taken, section.max_time); const new_total = section.total_time +% time_taken; if (new_total >= 0) { section.total_time = new_total; section.count += 1; } else { section.total_time = 0; section.count = 0; } g_spall_events.enqueue(.{ .name = section_name, .start = section.start, .end = end_time, }); } pub fn flush_spall_json(scratch_arena: *toolbox.Arena) void { const save_point = scratch_arena.create_save_point(); defer scratch_arena.restore_save_point(save_point); const file_name_buffer = scratch_arena.push_slice(u8, 256); const file_name = std.fmt.bufPrintZ( file_name_buffer, "spall_{}.json", .{toolbox.milliseconds()}, ) catch |e| toolbox.panic("Error constructing spall file name: {}", .{e}); const spall_json_file = switch (pdapi.open_file(file_name, pdapi.FILE_WRITE)) { .Ok => |file| file, .Error => |err| toolbox.panic( "Error opening spall.json: {s}", .{err}, ), }; _ = pdapi.write_file(spall_json_file, "["); while (!g_spall_events.is_empty()) { const tmp_save_point = scratch_arena.create_save_point(); defer scratch_arena.restore_save_point(tmp_save_point); var bytes_consumed: usize = 0; var buffer = scratch_arena.push_slice(u8, toolbox.kb(64)); while (!g_spall_events.is_empty() and bytes_consumed + 128 < buffer.len) { const event = g_spall_events.dequeue(); const begin_line_buffer = buffer[bytes_consumed..]; const begin_json = std.fmt.bufPrintZ( begin_line_buffer, "{{\"cat\":\"function\",\"name\":\"{s}\",\"ph\":\"B\",\"pid\":0,\"tid\":0,\"ts\":{}}},\n", .{ event.name, event.start, }, ) catch |e| toolbox.panic("Error constructing spall JSON: {}", .{e}); bytes_consumed += begin_json.len; const end_line_buffer = buffer[bytes_consumed..]; const end_json = std.fmt.bufPrintZ( end_line_buffer, "{{\"ph\":\"E\",\"pid\":0,\"tid\":0,\"ts\":{}}},\n", .{event.end}, ) catch |e| toolbox.panic("Error constructing spall JSON: {}", .{e}); bytes_consumed += end_json.len; } _ = pdapi.write_file(spall_json_file, buffer[0..bytes_consumed]); } _ = pdapi.write_file(spall_json_file, "]"); pdapi.close_file(spall_json_file); toolbox.println("Profiler file {s} written!", .{file_name}); } pub fn draw_stats(font: *pdapi.LCDFont, scratch_arena: *toolbox.Arena) void { if (comptime !ENABLE_PROFILING) { return; } const save_point = scratch_arena.create_save_point(); defer { scratch_arena.restore_save_point(save_point); } var width: pdapi.Pixel = 0; const LineEntry = struct { line: []const u8, sort_key: f32, }; var sorted_lines = scratch_arena.push_slice([]const u8, section_map.len()); //sort sections { const sort_scratch = scratch_arena.push_slice(?LineEntry, section_map.len()); for (sort_scratch) |*entry| entry.* = null; var it = section_map.iterator(); while (it.next()) |kv| { const average = if (kv.v.count != 0) @as(f32, @floatFromInt(kv.v.total_time)) / @as(f32, @floatFromInt(kv.v.count)) else 0; const len = std.fmt.count("{s}: Last: {d:.2}ms, Avg: {d:.2}ms, Max: {d:.2}ms", .{ kv.k, kv.v.last_time, average, kv.v.max_time }) + 1; const line_buffer = scratch_arena.push_slice(u8, @as(usize, @intCast(len))); const line = std.fmt.bufPrintZ(line_buffer, "{s}: Last: {d:.2}ms, Avg: {d:.2}ms, Max: {d:.2}ms", .{ kv.k, kv.v.last_time, average, kv.v.max_time }) catch "Unknown error!"; { var i: usize = 0; const sort_key = average; insert_loop: while (i < sort_scratch.len) : (i += 1) { const entry_opt = &sort_scratch[i]; if (entry_opt.*) |entry| { //sort with largest towards the begining if (sort_key > entry.sort_key) { //should be inserted in this spot if entry //shift everything over to the right var j: usize = sort_scratch.len - 1; while (j > i) : (j -= 1) { sort_scratch[j] = sort_scratch[j - 1]; } entry_opt.* = .{ .line = line, .sort_key = sort_key, }; break :insert_loop; } } else { entry_opt.* = .{ .line = line, .sort_key = sort_key, }; break :insert_loop; } } } } for (sort_scratch, 0..) |entry, i| sorted_lines[i] = entry.?.line; } pdapi.push_drawing_context(null); defer pdapi.pop_drawing_context(); const font_to_restore = pdapi.get_font(); pdapi.set_font(font); defer { pdapi.set_font(font_to_restore); game.set_main_game_clip_rect(); } { for (sorted_lines) |line| { width = @max(width, pdapi.get_text_width(line)); } } const line_height = pdapi.get_font_height() + 4; pdapi.set_draw_offset(0, 0); pdapi.set_screen_clip_rect(0, 0, width, (@as(i32, @intCast(sorted_lines.len)) + 1) * line_height); pdapi.clear_screen(.ColorWhite); { var i: pdapi.Pixel = 0; // var it = lines.iterator(); // while (it.next()) |line| { for (sorted_lines) |line| { _ = pdapi.draw_text(line, 0, i * line_height); i += 1; } } toolbox.asserteq(stack_index, 0, "section stack should be empty"); }
https://raw.githubusercontent.com/DanB91/UPWARD-for-Playdate/5d87bf7ef4ad30983ba24ad536228e4ca4ce8b63/src/profiler.zig
// // ____ ______ ____ __ __ ______ __ __ ///\ _`\ /\ _ \/\ _`\ /\ \ __/\ \/\__ _\ /\ \/\ \ //\ \ \/\ \ \ \L\ \ \ \L\ \ \ \/\ \ \ \/_/\ \/ \ \ `\\ \ // \ \ \ \ \ \ __ \ \ , /\ \ \ \ \ \ \ \ \ \ \ \ , ` \ // \ \ \_\ \ \ \/\ \ \ \\ \\ \ \_/ \_\ \ \_\ \__\ \ \`\ \ // \ \____/\ \_\ \_\ \_\ \_\ `\___x___/ /\_____\\ \_\ \_\ // \/___/ \/_/\/_/\/_/\/ /'\/__//__/ \/_____/ \/_/\/_/ // a llama communication layer written in zig. // const c = @cImport({ @cInclude("llama.h"); }); const std = @import("std"); const mem = std.mem; const heap = std.heap; const process = std.process; const GPTParams = struct { const Self = @This(); ally: mem.Allocator, /// RNG seed seed: c_uint = 1, n_threads: c_int = 0, /// new tokens to predict n_predict: c_int = -1, /// context size n_ctx: c_int = 512, /// batch size for prompt processing (must be >=32 to use BLAS) n_batch: c_int = 512, /// grouped-query attention factor (TODO: move to hparams) n_gqa: c_int = 1, /// number of tokens to keep from initial prompt n_keep: c_int = 0, /// max number of chunks to process (-1 = unlimited) n_chunks: c_int = -1, /// number of layers to store in VRAM n_gpu_layers: c_int = 0, /// the GPU that is used for scratch and small tensors main_gpu: c_int = 0, /// how split tensors should be distributed across GPUs tensor_split: [c.LLAMA_MAX_DEVICES]f32 = [c.LLAMA_MAX_DEVICES]f32{0}, /// if greater than 0, output the probabilities of top n_probs tokens. n_probs: c_int = 0, /// rms norm epsilon rms_norm_eps: f32 = c.LLAMA_DEFAULT_RMS_EPS, /// RoPE base frequency rope_freq_base: f32 = 10000.0, /// RoPE frequency scaling factor rope_freq_scale: f32 = 1.0, // Sampling parameters ------------------------------------------------------------------------ /// logit bias for specific tokens logit_bias: std.AutoHashMap(c.llama_token, f32) = undefined, /// <= 0 to use vocab size top_k: c_int = 40, /// 1.0 = disabled top_p: f32 = 0.95, /// 1.0 = disabled tfs_z: f32 = 1.00, /// 1.0 = disabled typical_p: f32 = 1.00, /// 1.0 = disabled temp: f32 = 0.80, /// 1.0 = disabled repeat_penalty: f32 = 1.10, /// last n tokens to penalize (0 = disable penalty, -1 = context size) repeat_last_n: c_int = 64, /// 0.0 = disabled frequency_penalty: f32 = 0.00, /// 0.0 = disabled presence_penalty: f32 = 0.00, /// 0 = disabled, 1 = mirostat, 2 = mirostat 2.0 mirostat: c_int = 0, /// target entropy mirostat_tau: f32 = 5.00, /// learning rate mirostat_eta: f32 = 0.10, /// Classifier-Free Guidance /// https:///arxiv.org/abs/2306.17806 /// string to help guidance cfg_negative_prompt: []u8 = undefined, /// How strong is guidance cfg_scale: f32 = 1.0, /// model path model: [:0]const u8 = "models/7B/ggml-model.bin", /// model alias model_alias: [:0]const u8 = "unknown", prompt: [:0]const u8 = "", /// path to file for saving/loading prompt eval state path_prompt_cache: []const u8 = "", /// string to prefix user inputs with input_prefix: []const u8 = "", /// string to suffix user inputs with input_suffix: []const u8 = "", /// optional BNF-like grammar to constrain sampling grammar: []const u8 = "", /// string upon seeing which more user input is prompted antiprompt: std.ArrayList([]u8) = undefined, /// lora adapter path lora_adapter: [:0]const u8 = "", /// base model path for the lora adapter lora_base: [:0]const u8 = "", /// compute HellaSwag score over random tasks from datafile supplied in prompt hellaswag: bool = false, /// number of tasks to use when computing the HellaSwag score hellaswag_tasks: isize = 400, /// if true, reduce VRAM usage at the cost of performance low_vram: bool = false, /// if true, use experimental mul_mat_q kernels mul_mat_q: bool = false, /// use f16 instead of f32 for memory kv memory_f16: bool = true, /// do not randomize prompt if none provided random_prompt: bool = false, /// use color to distinguish generations and inputs use_color: bool = false, /// interactive mode interactive: bool = false, /// save user input and generations to prompt cache prompt_cache_all: bool = false, /// open the prompt cache read-only and do not update it prompt_cache_ro: bool = false, /// get only sentence embedding embedding: bool = false, /// wait for user input immediately interactive_first: bool = false, /// reverse the usage of `\` multiline_input: bool = false, /// prefix BOS to user inputs, preceding input_prefix input_prefix_bos: bool = false, /// instruction mode (used for Alpaca models) instruct: bool = false, /// consider newlines as a repeatable token penalize_nl: bool = true, /// compute perplexity over the prompt perplexity: bool = false, /// use mmap for faster loads use_mmap: bool = true, /// use mlock to keep model in memory use_mlock: bool = false, /// compute maximum memory usage mem_test: bool = false, /// attempt optimizations that help on some NUMA systems numa: bool = false, /// export the computation graph export_cgraph: bool = false, /// Print prompt tokens before generation verbose_prompt: bool = false, pub fn init(ally: mem.Allocator) Self { var result = Self{ .ally = ally }; result.n_threads = @as(i32, @intCast(std.Thread.getCpuCount() catch unreachable)); return result; } pub fn parse(self: *Self, arg_iter: *process.ArgIterator) !void { while (arg_iter.next()) |arg| { if (mem.eql(u8, arg, "-p") or mem.eql(u8, arg, "--prompt")) { self.prompt = try self.ally.dupeZ(u8, arg_iter.next() orelse unreachable); } else if (mem.eql(u8, arg, "-m") or mem.eql(u8, arg, "--model")) { self.model = try self.ally.dupeZ(u8, arg_iter.next() orelse unreachable); } } } }; fn tokenize(ally: std.mem.Allocator, ctx: ?*c.llama_context, text: [:0]const u8, bos: bool) !std.ArrayList(c.llama_token) { var embd_inp = std.ArrayList(c.llama_token).init(ally); try embd_inp.resize(text.len + if (bos) @as(usize, 1) else @as(usize, 0)); // NOTE(caleb): +1 for BOS (beginning of sentance token) const n = c.llama_tokenize(ctx, text.ptr, embd_inp.items.ptr, @as(c_int, @intCast(embd_inp.items.len)), bos); std.debug.assert(n >= 0); try embd_inp.resize(@as(usize, @intCast(n))); return embd_inp; } var ctx_ptr: *?*c.llama_context = undefined; pub fn main() !void { var arena_instance = heap.ArenaAllocator.init(heap.page_allocator); defer arena_instance.deinit(); const arena = arena_instance.allocator(); var stderr_file = std.io.getStdErr(); const stderr = stderr_file.writer(); var stdout_file = std.io.getStdOut(); const stdout = stdout_file.writer(); var gpt_params = GPTParams.init(arena); var arg_iter = try process.argsWithAllocator(arena); try gpt_params.parse(&arg_iter); c.llama_backend_init(gpt_params.numa); var lparams = mem.zeroes(c.llama_context_params); lparams.n_ctx = gpt_params.n_ctx; lparams.n_batch = gpt_params.n_batch; lparams.n_gqa = gpt_params.n_gqa; lparams.rms_norm_eps = gpt_params.rms_norm_eps; lparams.n_gpu_layers = gpt_params.n_gpu_layers; lparams.main_gpu = gpt_params.main_gpu; lparams.tensor_split = &gpt_params.tensor_split; lparams.low_vram = gpt_params.low_vram; lparams.mul_mat_q = gpt_params.mul_mat_q; lparams.seed = gpt_params.seed; lparams.f16_kv = gpt_params.memory_f16; lparams.use_mmap = gpt_params.use_mmap; lparams.use_mlock = gpt_params.use_mlock; lparams.logits_all = gpt_params.perplexity; lparams.embedding = gpt_params.embedding; lparams.rope_freq_base = gpt_params.rope_freq_base; lparams.rope_freq_scale = gpt_params.rope_freq_scale; var model = c.llama_load_model_from_file(gpt_params.model, lparams); if (model == null) { try stderr.print("failed to load model '{s}'\n", .{gpt_params.model}); process.exit(1); } var lctx = c.llama_new_context_with_model(model, lparams); if (lctx == null) { try stderr.print("failed to create context with model '{s}'\n", .{gpt_params.model}); process.exit(1); } ctx_ptr = &lctx; if (gpt_params.lora_adapter.len != 0) { var err = c.llama_model_apply_lora_from_file(model, gpt_params.lora_adapter, if (gpt_params.lora_base.len == 0) null else gpt_params.lora_base, gpt_params.n_threads); if (err != 0) { try stderr.print("failed to apply lora adapter\n", .{}); c.llama_free(lctx); c.llama_free_model(model); process.exit(1); } } var ctx_guidance: ?*c.llama_context = null; if (gpt_params.cfg_scale > 1.0) { ctx_guidance = c.llama_new_context_with_model(model, lparams); } if (model == null) { try stderr.print("unable to load model\n", .{}); process.exit(1); } _ = c.llama_print_system_info(); // TODO(caleb): see llama.cpp:145 // determine the maximum memory usage needed to do inference for the given n_batch and n_ctx parameters // uncomment the "used_mem" line in llama.cpp to see the results var path_session = gpt_params.path_prompt_cache; var session_tokens = std.ArrayList(c.llama_token).init(arena); session_tokens.deinit(); // TODO(caleb): load session tokens see llama.cpp's main.cpp:172 // Add a space in front of the first character to match OG llama tokenizer behavior gpt_params.prompt = try std.fmt.allocPrintZ(arena, " {s}", .{gpt_params.prompt}); // Tokenize the prompt var embd_inp = try tokenize(arena, lctx, gpt_params.prompt, true); defer embd_inp.deinit(); var guidance_inp = std.ArrayList(c.llama_token).init(arena); _ = guidance_inp; var guidance_offset: c_int = 0; var original_prompt_len: c_int = 0; _ = original_prompt_len; // TODO(caleb): Tokenize negative prompt see main.cpp:207 const n_ctx = c.llama_n_ctx(lctx); if (embd_inp.items.len > n_ctx - 4) { try stderr.print("prompt is too long ({d} tokens, max {d})\n", .{ embd_inp.items.len, n_ctx - 4 }); process.exit(1); } // Debug message about similarity of saved session, if applicable var n_matching_session_tokens: usize = 0; // const inp_pfx = try tokenizeInput(arena, lctx, "\n\n### Instruction:\n\n", true); // _ = inp_pfx; // const inp_sfx = try tokenizeInput(arena, lctx, "\n\n### Response:\n\n", false); // _ = inp_sfx; const llama_token_newline = try tokenize(arena, lctx, "\n", false); _ = llama_token_newline; try stderr.print("sampling: repeat_last_n = {d}, repeat_penalty = {d:.3}, presence_penalty = {d:.3}, frequency_penalty = {d:.3}, top_k = {d}, tfs_z = {d:.3}, top_p = {d:.3}, typical_p = {d:.3}, temp = {d:.3}, mirostat = {d}, mirostat_lr = {d:.3}, mirostat_ent = {d:.3}\n", .{ gpt_params.repeat_last_n, gpt_params.repeat_penalty, gpt_params.presence_penalty, gpt_params.frequency_penalty, gpt_params.top_k, gpt_params.tfs_z, gpt_params.top_p, gpt_params.typical_p, gpt_params.temp, gpt_params.mirostat, gpt_params.mirostat_eta, gpt_params.mirostat_tau }); try stderr.print("generate: n_ctx = {d}, n_batch = {d}, n_predict = {d}, n_keep = {d}\n", .{ n_ctx, gpt_params.n_batch, gpt_params.n_predict, gpt_params.n_keep }); try stderr.writeAll("\n\n"); // TODO(llama.cpp author): replace with ring-buffer var last_n_tokens = try std.ArrayList(c.llama_token).initCapacity(arena, @as(usize, @intCast(n_ctx))); defer last_n_tokens.deinit(); for (last_n_tokens.items) |*tok| tok.* = mem.zeroes(c.llama_token); var is_antiprompt: bool = false; var input_echo: bool = true; _ = input_echo; var need_to_save_session: bool = path_session.len != 0 and n_matching_session_tokens < embd_inp.items.len; _ = need_to_save_session; var n_past: c_int = 0; var n_remain: c_int = gpt_params.n_predict; var n_consumed: c_int = 0; _ = n_consumed; var n_session_consumed: c_int = 0; var n_past_guidance: c_int = 0; var embd = std.ArrayList(c.llama_token).init(arena); defer embd.deinit(); var embd_guidance = std.ArrayList(c.llama_token).init(arena); defer embd_guidance.deinit(); // Do one empty run to warm up the model { const restore_state = arena_instance.state; defer arena_instance.state = restore_state; var tmp = try std.ArrayList(c.llama_token).initCapacity(arena, 1); tmp.insertAssumeCapacity(0, c.llama_token_bos()); _ = c.llama_eval(lctx, tmp.items.ptr, @as(c_int, @intCast(tmp.items.len)), 0, gpt_params.n_threads); _ = c.llama_reset_timings(lctx); } while ((n_remain != 0 and !is_antiprompt) or gpt_params.interactive) { // predict if (embd.items.len > 0) { // NOTE(llama.cpp author): n_ctx - 4 here is to match the logic for commandline prompt handling via // --prompt or --file which uses the same value. const max_embd_size = n_ctx - 4; // Ensure the input doesn't exceed the context size by truncating embd if necessary. if (@as(c_int, @intCast(embd.items.len)) > max_embd_size) { const skipped_tokens = @as(c_int, @intCast(embd.items.len)) - max_embd_size; try stdout.print("<<input too long: skipped {d} token{s}>>", .{ skipped_tokens, if (skipped_tokens != 1) "s" else "" }); try embd.resize(@as(usize, @intCast(max_embd_size))); } // infinite text generation via context swapping // if we run out of context: // - take the n_keep first tokens from the original prompt (via n_past) // - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches if (n_past + @as(c_int, @intCast(embd.items.len)) + @max(0, guidance_offset) > n_ctx) { const n_left = n_past - gpt_params.n_keep; // always keep the first token - BOS n_past = @max(1, gpt_params.n_keep); n_past_guidance = @max(1, gpt_params.n_keep + guidance_offset); // insert n_left/2 tokens at the start of embd from last_n_tokens for (0..@as(usize, @intCast(@divTrunc(n_left, 2)))) |tok_index| try embd.insert(tok_index, last_n_tokens.items[tok_index]); // stop saving session if we run out of context path_session = ""; try stdout.writeAll("\n---\n"); try stdout.writeAll("resetting: '"); for (embd.items) |tok| try stdout.print("{s}", .{c.llama_token_to_str(lctx, tok)}); try stdout.writeAll("'\n"); try stdout.writeAll("\n---\n"); } //TODO(caleb): reuse matching prefix from loaded session main.cpp:458 // evaluate tokens in batches // embd is typically prepared beforehand to fit within a batch, but not always //TODO(caleb): Handle context_guidance main.cpp:483 for (embd.items, 0..) |*tok, tok_index| { var n_eval = @as(c_int, @intCast(embd.items.len)) - @as(c_int, @intCast(tok_index)); if (n_eval > gpt_params.n_batch) n_eval = gpt_params.n_batch; if (c.llama_eval(lctx, tok, n_eval, n_past, gpt_params.n_threads) == 0) { try stderr.writeAll("failed to eval\n"); process.exit(1); } n_past += n_eval; } if (embd.items.len > 0 and path_session.len != 0) { for (embd.items, 0..) |tok, tok_index| try session_tokens.insert(tok_index, tok); n_session_consumed = @intCast(session_tokens.items.len); } } // predict embd.clearRetainingCapacity(); embd_guidance.clearRetainingCapacity(); } }
https://raw.githubusercontent.com/polymorphicgamestudio/DARWIN/58e9492929a0df94e9cc1ca6e9025f0efd5fbd0e/src/main.zig
const std = @import("std"); const stdout = std.io.getStdOut().writer(); pub const Animal = union(enum) { const Self = @This(); dog: Dog, cat: Cat, pub fn bark(self: Self) []const u8 { return switch (self) { inline else => |n| n.bark(), }; } }; pub const Dog = struct { const Self = @This(); buffer: [64]u8, len: usize, pub fn init(name: []const u8) !Animal { const dog = blk: { var dog = Self{ .buffer = undefined, .len = 0, }; const s = try std.fmt.bufPrint(&dog.buffer, "{s} - {s}", .{ name, "wan wan" }); dog.len = s.len; break :blk dog; }; return .{ .dog = dog }; } pub fn bark(self: Self) []const u8 { return self.buffer[0..self.len]; } }; pub const Cat = struct { const Self = @This(); buffer: [64]u8, len: usize, pub fn init(name: []const u8) !Animal { const cat = blk: { var cat = Self{ .buffer = undefined, .len = 0, }; const s = try std.fmt.bufPrint(&cat.buffer, "{s} - {s}", .{ name, "nyan nyan" }); cat.len = s.len; break :blk cat; }; return .{ .cat = cat }; } pub fn bark(self: Self) []const u8 { return self.buffer[0..self.len]; } }; test "tagged_union" { const animals = [_]Animal{ try Dog.init("pochi"), try Cat.init("mike"), }; try std.testing.expectEqualStrings("pochi - wan wan", animals[0].bark()); try std.testing.expectEqualStrings("mike - nyan nyan", animals[1].bark()); }
https://raw.githubusercontent.com/ar90n/lab/6623f927466522ab5a47dfe67a5903da7c97d48b/sandbox/zig_polymorphism/src/tagged_union.zig
const std = @import("std"); const rc = @import("zigrc"); const zt = @import("../zt.zig"); // const cuda = @import("CUDAUtils.zig"); const ZT_BACKEND_CUDA = @import("build_options").ZT_BACKEND_CUDA; const Device = zt.runtime.Device; const Arc = rc.Arc; const X64Device = zt.runtime.X64Device; const DeviceType = zt.runtime.DeviceType; const getDeviceTypes = zt.runtime.getDeviceTypes; /// Device id for the single CPU device. pub const kX64DeviceId: i32 = 0; fn getActiveDeviceId(device_type: DeviceType) !i32 { return switch (device_type) { .x64 => kX64DeviceId, .CUDA => { // TODO: support CUDA backend // if (ZT_BACKEND_CUDA) { // return cuda.getActiveDeviceId(); // } std.log.debug("CUDA is not supported\n", .{}); return error.CUDABackendUnsupported; }, }; } var deviceManagerSingleton: ?*DeviceManager = null; pub fn deinitDeviceManager() void { if (deviceManagerSingleton != null) { deviceManagerSingleton.?.deinit(); deviceManagerSingleton = null; } } pub const DeviceManager = struct { pub const DeviceTypeInfo = std.AutoHashMap(i32, Device); deviceTypeToInfo_: std.EnumMap(DeviceType, DeviceTypeInfo), allocator: std.mem.Allocator, fn init(allocator: std.mem.Allocator) !*DeviceManager { var x64Info = DeviceTypeInfo.init(allocator); const x64Device = Device.init(try X64Device.init(allocator)); try x64Info.put(kX64DeviceId, x64Device); var deviceTypeToInfo_ = std.EnumMap(DeviceType, DeviceTypeInfo){}; deviceTypeToInfo_.put(.x64, x64Info); if (ZT_BACKEND_CUDA) { // TODO: add cuda device to deviceTypeToInfo_ } const self = try allocator.create(DeviceManager); self.* = .{ .allocator = allocator, .deviceTypeToInfo_ = deviceTypeToInfo_, }; return self; } pub fn deinit(self: *DeviceManager) void { var type_info_iterator = self.deviceTypeToInfo_.iterator(); while (type_info_iterator.next()) |type_info| { var device_iterator = type_info.value.valueIterator(); while (device_iterator.next()) |dev| { dev.deinit(); } type_info.value.deinit(); } self.allocator.destroy(self); } pub fn enforceDeviceTypeAvailable(self: *DeviceManager, error_prefix: []const u8, device_type: DeviceType) !void { if (!self.isDeviceTypeAvailable(device_type)) { std.log.debug("{s} device type `{s}` unavailable\n", .{ error_prefix, @tagName(device_type) }); return error.DeviceTypeUnavailable; } } pub fn getInstance(allocator: std.mem.Allocator) !*DeviceManager { if (deviceManagerSingleton == null) { deviceManagerSingleton = try DeviceManager.init(allocator); } return deviceManagerSingleton.?; } pub fn isDeviceTypeAvailable(self: *DeviceManager, device_type: DeviceType) bool { return self.deviceTypeToInfo_.contains(device_type); } pub fn getDeviceCount(self: *DeviceManager, device_type: DeviceType) !usize { try self.enforceDeviceTypeAvailable("[DeviceManager.getDeviceCount]", device_type); return self.deviceTypeToInfo_.get(device_type).?.count(); } pub fn getDevicesOfType(self: *DeviceManager, allocator: std.mem.Allocator, device_type: DeviceType) ![]*Device { try self.enforceDeviceTypeAvailable("[DeviceManager.getDevicesOfType]", device_type); var device_list = std.ArrayList(*Device).init(allocator); var devices = self.deviceTypeToInfo_.get(device_type).?; var device_iterator = devices.valueIterator(); while (device_iterator.next()) |d| { try device_list.append(d); } return device_list.toOwnedSlice(); } pub fn getDevice(self: *DeviceManager, device_type: DeviceType, id: i32) !Device { try self.enforceDeviceTypeAvailable("[DeviceManager.getDevice]", device_type); var idToDevice = self.deviceTypeToInfo_.get(device_type).?; if (!idToDevice.contains(id)) { std.log.debug("[DeviceManager::getDevice] unknown device id: [{d}]\n", .{id}); return error.DeviceNotFound; } return idToDevice.get(id).?; } pub fn getActiveDevice(self: *DeviceManager, device_type: DeviceType) !Device { try self.enforceDeviceTypeAvailable("[DeviceManager.getActiveDevice]", device_type); const active_device_id = try getActiveDeviceId(device_type); return self.deviceTypeToInfo_.get(device_type).?.get(active_device_id).?; } }; test "DeviceManager getInstance" { const allocator = std.testing.allocator; zt.tensor.init(allocator); defer zt.tensor.deinit(); const mgr1 = try DeviceManager.getInstance(allocator); try std.testing.expectEqual(mgr1, try DeviceManager.getInstance(allocator)); } test "DeviceManager isDeviceTypeAvailable" { const allocator = std.testing.allocator; zt.tensor.init(allocator); defer zt.tensor.deinit(); var mgr = try DeviceManager.getInstance(allocator); // x64 (CPU) should be always available try std.testing.expect(mgr.isDeviceTypeAvailable(.x64)); // CUDA availability depends on compilation try std.testing.expect(mgr.isDeviceTypeAvailable(.CUDA) == ZT_BACKEND_CUDA); } test "DeviceManager getDeviceCount" { const allocator = std.testing.allocator; zt.tensor.init(allocator); defer zt.tensor.deinit(); var mgr = try DeviceManager.getInstance(allocator); // For now we always treat CPU as a single device try std.testing.expect(try mgr.getDeviceCount(.x64) == 1); if (mgr.isDeviceTypeAvailable(.CUDA)) { try std.testing.expect(try mgr.getDeviceCount(.CUDA) != 0); } else { try std.testing.expectError(error.DeviceTypeUnavailable, mgr.getDeviceCount(.CUDA)); } } test "DeviceManager getDevicesOfType" { const allocator = std.testing.allocator; zt.tensor.init(allocator); defer zt.tensor.deinit(); var mgr = try DeviceManager.getInstance(allocator); // For now we always treat CPU as a single device const devices = try mgr.getDevicesOfType(allocator, .x64); defer allocator.free(devices); try std.testing.expect(devices.len == 1); var device_type_set = getDeviceTypes(); var type_iterator = device_type_set.iterator(); while (type_iterator.next()) |t| { if (mgr.isDeviceTypeAvailable(.CUDA)) { const dev_list = try mgr.getDevicesOfType(allocator, t); for (dev_list) |dev| { try std.testing.expect(dev.deviceType() == t); } } else { try std.testing.expectError(error.DeviceTypeUnavailable, mgr.getDeviceCount(.CUDA)); } } } test "DeviceManager getDevice" { const allocator = std.testing.allocator; zt.tensor.init(allocator); defer zt.tensor.deinit(); var mgr = try DeviceManager.getInstance(allocator); var x64Device = try mgr.getDevice(.x64, kX64DeviceId); try std.testing.expect(x64Device.deviceType() == .x64); } test "DeviceManager getActiveDevice" { const allocator = std.testing.allocator; zt.tensor.init(allocator); defer zt.tensor.deinit(); var mgr = try DeviceManager.getInstance(allocator); var device_type_set = getDeviceTypes(); var type_iterator = device_type_set.iterator(); while (type_iterator.next()) |t| { if (mgr.isDeviceTypeAvailable(t)) { var dev = try mgr.getActiveDevice(t); try std.testing.expect(dev.deviceType() == t); } else { try std.testing.expectError(error.DeviceTypeUnavailable, mgr.getActiveDevice(.CUDA)); } } }
https://raw.githubusercontent.com/cryptodeal/zigTensor/4a2ef170c6bd81d7f747403f98c00a3065a74100/src/runtime/device_manager.zig
// // Now that we've seen how methods work, let's see if we can help // our elephants out a bit more with some Elephant methods. // const std = @import("std"); const Elephant = struct { letter: u8, tail: ?*Elephant = null, visited: bool = false, // New Elephant methods! pub fn getTail(self: *Elephant) *Elephant { return self.tail.?; // Remember, this means "orelse unreachable" } pub fn hasTail(self: *Elephant) bool { return (self.tail != null); } pub fn visit(self: *Elephant) void { self.visited = true; } pub fn print(self: *Elephant) void { // Prints elephant letter and [v]isited var v: u8 = if (self.visited) 'v' else ' '; std.debug.print("{u}{u} ", .{ self.letter, v }); } }; pub fn main() void { var elephantA = Elephant{ .letter = 'A' }; var elephantB = Elephant{ .letter = 'B' }; var elephantC = Elephant{ .letter = 'C' }; // This links the elephants so that each tail "points" to the next. elephantA.tail = &elephantB; elephantB.tail = &elephantC; // elephantC.tail = &elephantA; visitElephants(&elephantA); std.debug.print("\n", .{}); } // This function visits all elephants once, starting with the // first elephant and following the tails to the next elephant. fn visitElephants(first_elephant: *Elephant) void { var e = first_elephant; while (true) { e.print(); e.visit(); // This gets the next elephant or stops: // which method do we want here? e = if (e.hasTail()) e.getTail() else break; } } // Zig's enums can also have methods! This comment originally asked // if anyone could find instances of enum methods in the wild. The // first five pull requests were accepted and here they are: // // 1) drforester - I found one in the Zig source: // https://github.com/ziglang/zig/blob/041212a41cfaf029dc3eb9740467b721c76f406c/src/Compilation.zig#L2495 // // 2) bbuccianti - I found one! // https://github.com/ziglang/zig/blob/6787f163eb6db2b8b89c2ea6cb51d63606487e12/lib/std/debug.zig#L477 // // 3) GoldsteinE - Found many, here's one // https://github.com/ziglang/zig/blob/ce14bc7176f9e441064ffdde2d85e35fd78977f2/lib/std/target.zig#L65 // // 4) SpencerCDixon - Love this language so far :-) // https://github.com/ziglang/zig/blob/a502c160cd51ce3de80b3be945245b7a91967a85/src/zir.zig#L530 // // 5) tomkun - here's another enum method // https://github.com/ziglang/zig/blob/4ca1f4ec2e3ae1a08295bc6ed03c235cb7700ab9/src/codegen/aarch64.zig#L24
https://raw.githubusercontent.com/zachariahmagee/ziglings/3e63164721ef763c4f1096856c0d14fe63f6fbfe/exercises/048_methods2.zig
//! Implements the runfiles strategy and discovery as defined in the following design document: //! https://docs.google.com/document/d/e/2PACX-1vSDIrFnFvEYhKsCMdGdD40wZRBX3m3aZ5HhVj4CtHPmiXKDCxioTUbYsDydjKtFDAzER5eg7OjJWs3V/pub const std = @import("std"); const builtin = @import("builtin"); const log = std.log.scoped(.runfiles); pub const runfiles_manifest_var_name = "RUNFILES_MANIFEST_FILE"; pub const runfiles_directory_var_name = "RUNFILES_DIR"; pub const runfiles_manifest_suffix = ".runfiles_manifest"; pub const runfiles_directory_suffix = ".runfiles"; pub const repo_mapping_file_name = "_repo_mapping"; /// * Manifest-based: reads the runfiles manifest file to look up runfiles. /// * Directory-based: appends the runfile's path to the runfiles root. /// The client is responsible for checking that the resulting path exists. pub const Strategy = enum { manifest, directory, }; /// The path to a runfiles manifest file or a runfiles directory. pub const Location = union(Strategy) { manifest: []const u8, directory: []const u8, pub fn deinit(self: *Location, allocator: std.mem.Allocator) void { switch (self.*) { .manifest => |value| allocator.free(value), .directory => |value| allocator.free(value), } } }; pub const DiscoverOptions = struct { /// Used during runfiles discovery. allocator: std.mem.Allocator, /// User override for the `RUNFILES_MANIFEST_FILE` variable. manifest: ?[]const u8 = null, /// User override for the `RUNFILES_DIRECTORY` variable. directory: ?[]const u8 = null, /// User override for `argv[0]`. argv0: ?[]const u8 = null, }; pub const DiscoverError = if (builtin.zig_version.major == 0 and builtin.zig_version.minor == 11) error{ OutOfMemory, InvalidCmdLine, InvalidUtf8, MissingArg0, } else error{ OutOfMemory, InvalidCmdLine, InvalidWtf8, MissingArg0, }; /// The unified runfiles discovery strategy is to: /// * check if `RUNFILES_MANIFEST_FILE` or `RUNFILES_DIR` envvars are set, and /// again initialize a `Runfiles` object accordingly; otherwise /// * check if the `argv[0] + ".runfiles_manifest"` file or the /// `argv[0] + ".runfiles"` directory exists (keeping in mind that argv[0] /// may not include the `".exe"` suffix on Windows), and if so, initialize a /// manifest- or directory-based `Runfiles` object; otherwise /// * assume the binary has no runfiles. /// /// The caller has to free the path contained in the returned location. pub fn discoverRunfiles(options: DiscoverOptions) DiscoverError!?Location { if (options.manifest) |value| return .{ .manifest = try options.allocator.dupe(u8, value) }; if (options.directory) |value| return .{ .directory = try options.allocator.dupe(u8, value) }; if (try getEnvVar(options.allocator, runfiles_manifest_var_name)) |value| return .{ .manifest = value }; if (try getEnvVar(options.allocator, runfiles_directory_var_name)) |value| return .{ .directory = value }; var iter = try std.process.argsWithAllocator(options.allocator); defer iter.deinit(); const argv0 = options.argv0 orelse iter.next() orelse return error.MissingArg0; var buffer = std.ArrayList(u8).init(options.allocator); defer buffer.deinit(); buffer.clearRetainingCapacity(); try buffer.writer().print("{s}{s}", .{ argv0, runfiles_manifest_suffix }); if (isReadableFile(buffer.items)) return .{ .manifest = try buffer.toOwnedSlice() }; buffer.clearRetainingCapacity(); try buffer.writer().print("{s}.exe{s}", .{ argv0, runfiles_manifest_suffix }); if (isReadableFile(buffer.items)) return .{ .manifest = try buffer.toOwnedSlice() }; buffer.clearRetainingCapacity(); try buffer.writer().print("{s}{s}", .{ argv0, runfiles_directory_suffix }); if (isOpenableDir(buffer.items)) return .{ .directory = try buffer.toOwnedSlice() }; buffer.clearRetainingCapacity(); try buffer.writer().print("{s}.exe{s}", .{ argv0, runfiles_directory_suffix }); if (isOpenableDir(buffer.items)) return .{ .directory = try buffer.toOwnedSlice() }; return null; } fn getEnvVar(allocator: std.mem.Allocator, key: []const u8) !?[]const u8 { return std.process.getEnvVarOwned(allocator, key) catch |e| switch (e) { error.EnvironmentVariableNotFound => null, else => |e_| return e_, }; } fn isReadableFile(file_path: []const u8) bool { var file = std.fs.cwd().openFile(file_path, .{}) catch return false; file.close(); return true; } fn isOpenableDir(dir_path: []const u8) bool { var dir = std.fs.cwd().openDir(dir_path, .{}) catch return false; dir.close(); return true; } const testing = struct { const c = @cImport({ @cInclude("stdlib.h"); }); pub fn setenv(name: []const u8, value: []const u8) !void { const nameZ = try std.testing.allocator.dupeZ(u8, name); defer std.testing.allocator.free(nameZ); const valueZ = try std.testing.allocator.dupeZ(u8, value); defer std.testing.allocator.free(valueZ); if (builtin.os.tag == .windows) { if (testing.c._putenv_s(nameZ, valueZ) != 0) return error.SetEnvFailed; } else { if (testing.c.setenv(nameZ, valueZ, 1) != 0) return error.SetEnvFailed; } } pub fn unsetenv(name: []const u8) !void { const nameZ = try std.testing.allocator.dupeZ(u8, name); defer std.testing.allocator.free(nameZ); if (builtin.os.tag == .windows) { if (testing.c._putenv_s(nameZ, "") != 0) return error.UnsetEnvFailed; } else { if (testing.c.unsetenv(nameZ) != 0) return error.UnsetEnvFailed; } } }; test "discover user specified manifest" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); try tmp.dir.writeFile("test.runfiles_manifest", ""); const manifest_path = try tmp.dir.realpathAlloc(std.testing.allocator, "test.runfiles_manifest"); defer std.testing.allocator.free(manifest_path); try testing.setenv(runfiles_manifest_var_name, "MANIFEST_DOES_NOT_EXIST"); try testing.setenv(runfiles_directory_var_name, "DIRECTORY_DOES_NOT_EXIST"); var location = try discoverRunfiles(.{ .allocator = std.testing.allocator, .manifest = manifest_path, }) orelse return error.TestRunfilesNotFound; defer location.deinit(std.testing.allocator); try std.testing.expectEqual(Strategy.manifest, @as(Strategy, location)); try std.testing.expectEqualStrings(manifest_path, location.manifest); } test "discover environment specified manifest" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); try tmp.dir.writeFile("test.runfiles_manifest", ""); const manifest_path = try tmp.dir.realpathAlloc(std.testing.allocator, "test.runfiles_manifest"); defer std.testing.allocator.free(manifest_path); try testing.setenv(runfiles_manifest_var_name, manifest_path); try testing.unsetenv(runfiles_directory_var_name); var location = try discoverRunfiles(.{ .allocator = std.testing.allocator, }) orelse return error.TestRunfilesNotFound; defer location.deinit(std.testing.allocator); try std.testing.expectEqual(Strategy.manifest, @as(Strategy, location)); try std.testing.expectEqualStrings(manifest_path, location.manifest); } test "discover user specified directory" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); try tmp.dir.makeDir("test.runfiles"); const directory_path = try tmp.dir.realpathAlloc(std.testing.allocator, "test.runfiles"); defer std.testing.allocator.free(directory_path); try testing.setenv(runfiles_manifest_var_name, "MANIFEST_DOES_NOT_EXIST"); try testing.setenv(runfiles_directory_var_name, "DIRECTORY_DOES_NOT_EXIST"); var location = try discoverRunfiles(.{ .allocator = std.testing.allocator, .directory = directory_path }) orelse return error.TestRunfilesNotFound; defer location.deinit(std.testing.allocator); try std.testing.expectEqual(Strategy.directory, @as(Strategy, location)); try std.testing.expectEqualStrings(directory_path, location.directory); } test "discover environment specified directory" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); try tmp.dir.makeDir("test.runfiles"); const directory_path = try tmp.dir.realpathAlloc(std.testing.allocator, "test.runfiles"); defer std.testing.allocator.free(directory_path); try testing.unsetenv(runfiles_manifest_var_name); try testing.setenv(runfiles_directory_var_name, directory_path); var location = try discoverRunfiles(.{ .allocator = std.testing.allocator, }) orelse return error.TestRunfilesNotFound; defer location.deinit(std.testing.allocator); try std.testing.expectEqual(Strategy.directory, @as(Strategy, location)); try std.testing.expectEqualStrings(directory_path, location.directory); } test "discover user specified argv0 manifest" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); try tmp.dir.writeFile("test.runfiles_manifest", ""); const manifest_path = try tmp.dir.realpathAlloc(std.testing.allocator, "test.runfiles_manifest"); defer std.testing.allocator.free(manifest_path); try testing.unsetenv(runfiles_manifest_var_name); try testing.unsetenv(runfiles_directory_var_name); const argv0 = manifest_path[0 .. manifest_path.len - ".runfiles_manifest".len]; var location = try discoverRunfiles(.{ .allocator = std.testing.allocator, .argv0 = argv0, }) orelse return error.TestRunfilesNotFound; defer location.deinit(std.testing.allocator); try std.testing.expectEqual(Strategy.manifest, @as(Strategy, location)); try std.testing.expectEqualStrings(manifest_path, location.manifest); } test "discover user specified argv0 .exe manifest" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); try tmp.dir.writeFile("test.exe.runfiles_manifest", ""); const manifest_path = try tmp.dir.realpathAlloc(std.testing.allocator, "test.exe.runfiles_manifest"); defer std.testing.allocator.free(manifest_path); try testing.unsetenv(runfiles_manifest_var_name); try testing.unsetenv(runfiles_directory_var_name); const argv0 = manifest_path[0 .. manifest_path.len - ".exe.runfiles_manifest".len]; var location = try discoverRunfiles(.{ .allocator = std.testing.allocator, .argv0 = argv0, }) orelse return error.TestRunfilesNotFound; defer location.deinit(std.testing.allocator); try std.testing.expectEqual(Strategy.manifest, @as(Strategy, location)); try std.testing.expectEqualStrings(manifest_path, location.manifest); } test "discover user specified argv0 directory" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); try tmp.dir.makeDir("test.runfiles"); const directory_path = try tmp.dir.realpathAlloc(std.testing.allocator, "test.runfiles"); defer std.testing.allocator.free(directory_path); try testing.unsetenv(runfiles_manifest_var_name); try testing.unsetenv(runfiles_directory_var_name); const argv0 = directory_path[0 .. directory_path.len - ".runfiles".len]; var location = try discoverRunfiles(.{ .allocator = std.testing.allocator, .argv0 = argv0, }) orelse return error.TestRunfilesNotFound; defer location.deinit(std.testing.allocator); try std.testing.expectEqual(Strategy.directory, @as(Strategy, location)); try std.testing.expectEqualStrings(directory_path, location.directory); } test "discover user specified argv0 .exe directory" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); try tmp.dir.makeDir("test.exe.runfiles"); const directory_path = try tmp.dir.realpathAlloc(std.testing.allocator, "test.exe.runfiles"); defer std.testing.allocator.free(directory_path); try testing.unsetenv(runfiles_manifest_var_name); try testing.unsetenv(runfiles_directory_var_name); const argv0 = directory_path[0 .. directory_path.len - ".exe.runfiles".len]; var location = try discoverRunfiles(.{ .allocator = std.testing.allocator, .argv0 = argv0, }) orelse return error.TestRunfilesNotFound; defer location.deinit(std.testing.allocator); try std.testing.expectEqual(Strategy.directory, @as(Strategy, location)); try std.testing.expectEqualStrings(directory_path, location.directory); } test "discover not found" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); const tmp_path = try tmp.dir.realpathAlloc(std.testing.allocator, "."); defer std.testing.allocator.free(tmp_path); try testing.unsetenv(runfiles_manifest_var_name); try testing.unsetenv(runfiles_directory_var_name); const argv0 = try std.fmt.allocPrint(std.testing.allocator, "{s}/does-not-exist", .{tmp_path}); defer std.testing.allocator.free(argv0); const result = try discoverRunfiles(.{ .allocator = std.testing.allocator, .argv0 = argv0, }); try std.testing.expectEqual(@as(?Location, null), result); } test "discover priority" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); try tmp.dir.writeFile("test.runfiles_manifest", ""); try tmp.dir.makeDir("test.runfiles"); const manifest_path = try tmp.dir.realpathAlloc(std.testing.allocator, "test.runfiles_manifest"); defer std.testing.allocator.free(manifest_path); const directory_path = try tmp.dir.realpathAlloc(std.testing.allocator, "test.runfiles"); defer std.testing.allocator.free(directory_path); const argv0 = manifest_path[0 .. manifest_path.len - ".runfiles_manifest".len]; { // user specified manifest first. try testing.setenv(runfiles_manifest_var_name, manifest_path); try testing.setenv(runfiles_directory_var_name, directory_path); var location = try discoverRunfiles(.{ .allocator = std.testing.allocator, .manifest = manifest_path, .directory = directory_path, .argv0 = argv0, }) orelse return error.TestRunfilesNotFound; defer location.deinit(std.testing.allocator); try std.testing.expectEqual(Strategy.manifest, @as(Strategy, location)); try std.testing.expectEqualStrings(manifest_path, location.manifest); } { // user specified directory next. try testing.setenv(runfiles_manifest_var_name, manifest_path); try testing.setenv(runfiles_directory_var_name, directory_path); var location = try discoverRunfiles(.{ .allocator = std.testing.allocator, .directory = directory_path, .argv0 = argv0, }) orelse return error.TestRunfilesNotFound; defer location.deinit(std.testing.allocator); try std.testing.expectEqual(Strategy.directory, @as(Strategy, location)); try std.testing.expectEqualStrings(directory_path, location.directory); } { // environment specified manifest next. try testing.setenv(runfiles_manifest_var_name, manifest_path); try testing.setenv(runfiles_directory_var_name, directory_path); var location = try discoverRunfiles(.{ .allocator = std.testing.allocator, .argv0 = argv0, }) orelse return error.TestRunfilesNotFound; defer location.deinit(std.testing.allocator); try std.testing.expectEqual(Strategy.manifest, @as(Strategy, location)); try std.testing.expectEqualStrings(manifest_path, location.manifest); } { // environment specified directory next. try testing.unsetenv(runfiles_manifest_var_name); try testing.setenv(runfiles_directory_var_name, directory_path); var location = try discoverRunfiles(.{ .allocator = std.testing.allocator, .argv0 = argv0, }) orelse return error.TestRunfilesNotFound; defer location.deinit(std.testing.allocator); try std.testing.expectEqual(Strategy.directory, @as(Strategy, location)); try std.testing.expectEqualStrings(directory_path, location.directory); } { // argv0 specified manifest next. try testing.unsetenv(runfiles_manifest_var_name); try testing.unsetenv(runfiles_directory_var_name); var location = try discoverRunfiles(.{ .allocator = std.testing.allocator, .argv0 = argv0, }) orelse return error.TestRunfilesNotFound; defer location.deinit(std.testing.allocator); try std.testing.expectEqual(Strategy.manifest, @as(Strategy, location)); try std.testing.expectEqualStrings(manifest_path, location.manifest); } try tmp.dir.deleteFile("test.runfiles_manifest"); { // argv0 specified directory next. try testing.unsetenv(runfiles_manifest_var_name); try testing.unsetenv(runfiles_directory_var_name); var location = try discoverRunfiles(.{ .allocator = std.testing.allocator, .argv0 = argv0, }) orelse return error.TestRunfilesNotFound; defer location.deinit(std.testing.allocator); try std.testing.expectEqual(Strategy.directory, @as(Strategy, location)); try std.testing.expectEqualStrings(directory_path, location.directory); } try tmp.dir.deleteDir("test.runfiles"); { // finally runfiles not found. try testing.unsetenv(runfiles_manifest_var_name); try testing.unsetenv(runfiles_directory_var_name); const result = try discoverRunfiles(.{ .allocator = std.testing.allocator, .argv0 = argv0, }); try std.testing.expectEqual(@as(?Location, null), result); } }
https://raw.githubusercontent.com/aherrmann/rules_zig/2c2ec8c2a3bc91e883dc307ead97e4703201e834/zig/runfiles/src/discovery.zig
pub usingnamespace @import("std").zig.c_builtins; const std = @import("std"); pub fn boot(allcoator: *std.mem.Allocator) void { // if (booted) return; // booted = true; // Allocator.allocator = allcoator; _ = s2n_disable_atexit(); // _ = s2n_mem_set_callbacks(Allocator.initCallback, Allocator.deinitCallback, Allocator.mallocCallback, Allocator.freeCallback); // _ = s2n_init(); // global_s2n_config = s2n_config_new(); // _ = s2n_config_disable_x509_verification(global_s2n_config); } // pub extern threadlocal var s2n_errno: c_int; // pub extern fn s2n_errno_location() [*c]c_int; // pub const S2N_ERR_T_OK: c_int = 0; // pub const S2N_ERR_T_IO: c_int = 1; // pub const S2N_ERR_T_CLOSED: c_int = 2; // pub const S2N_ERR_T_BLOCKED: c_int = 3; // pub const S2N_ERR_T_ALERT: c_int = 4; // pub const S2N_ERR_T_PROTO: c_int = 5; // pub const S2N_ERR_T_INTERNAL: c_int = 6; // pub const S2N_ERR_T_USAGE: c_int = 7; // pub const s2n_error_type = c_uint; // pub extern fn s2n_error_get_type(@"error": c_int) c_int; // pub const struct_s2n_config = opaque {}; // pub const struct_s2n_connection = opaque {}; // pub extern fn s2n_crypto_disable_init() c_int; pub extern fn s2n_disable_atexit() c_int; // pub extern fn s2n_get_openssl_version() c_ulong; // pub extern fn s2n_init() c_int; // pub extern fn s2n_cleanup() c_int; // pub extern fn s2n_config_new() *struct_s2n_config; // pub extern fn s2n_config_free(config: *struct_s2n_config) c_int; // pub extern fn s2n_config_free_dhparams(config: *struct_s2n_config) c_int; // pub extern fn s2n_config_free_cert_chain_and_key(config: *struct_s2n_config) c_int; // pub const s2n_clock_time_nanoseconds = ?fn (?*c_void, [*c]u64) callconv(.C) c_int; // pub const s2n_cache_retrieve_callback = ?fn (*struct_s2n_connection, ?*c_void, ?*const c_void, u64, ?*c_void, [*c]u64) callconv(.C) c_int; // pub const s2n_cache_store_callback = ?fn (*struct_s2n_connection, ?*c_void, u64, ?*const c_void, u64, ?*const c_void, u64) callconv(.C) c_int; // pub const s2n_cache_delete_callback = ?fn (*struct_s2n_connection, ?*c_void, ?*const c_void, u64) callconv(.C) c_int; // pub extern fn s2n_config_set_wall_clock(config: *struct_s2n_config, clock_fn: s2n_clock_time_nanoseconds, ctx: ?*c_void) c_int; // pub extern fn s2n_config_set_monotonic_clock(config: *struct_s2n_config, clock_fn: s2n_clock_time_nanoseconds, ctx: ?*c_void) c_int; // pub extern fn s2n_strerror(@"error": c_int, lang: [*c]const u8) [*c]const u8; // pub extern fn s2n_strerror_debug(@"error": c_int, lang: [*c]const u8) [*c]const u8; // pub extern fn s2n_strerror_name(@"error": c_int) [*c]const u8; // pub const struct_s2n_stacktrace = opaque {}; // pub extern fn s2n_stack_traces_enabled() bool; // pub extern fn s2n_stack_traces_enabled_set(newval: bool) c_int; // pub extern fn s2n_calculate_stacktrace() c_int; // // pub extern fn s2n_print_stacktrace(fptr: [*c]FILE) c_int; // pub extern fn s2n_free_stacktrace() c_int; // pub extern fn s2n_get_stacktrace(trace: *struct_s2n_stacktrace) c_int; // pub extern fn s2n_config_set_cache_store_callback(config: *struct_s2n_config, cache_store_callback: s2n_cache_store_callback, data: ?*c_void) c_int; // pub extern fn s2n_config_set_cache_retrieve_callback(config: *struct_s2n_config, cache_retrieve_callback: s2n_cache_retrieve_callback, data: ?*c_void) c_int; // pub extern fn s2n_config_set_cache_delete_callback(config: *struct_s2n_config, cache_delete_callback: s2n_cache_delete_callback, data: ?*c_void) c_int; // pub const s2n_mem_init_callback = ?fn () callconv(.C) c_int; // pub const s2n_mem_cleanup_callback = ?fn () callconv(.C) c_int; // pub const s2n_mem_malloc_callback = ?fn (**c_void, u32, *u32) callconv(.C) c_int; // pub const s2n_mem_free_callback = ?fn (*c_void, u32) callconv(.C) c_int; // pub extern fn s2n_mem_set_callbacks(mem_init_callback: s2n_mem_init_callback, mem_cleanup_callback: s2n_mem_cleanup_callback, mem_malloc_callback: s2n_mem_malloc_callback, mem_free_callback: s2n_mem_free_callback) c_int; // pub const s2n_rand_init_callback = ?fn () callconv(.C) c_int; // pub const s2n_rand_cleanup_callback = ?fn () callconv(.C) c_int; // pub const s2n_rand_seed_callback = ?fn (?*c_void, u32) callconv(.C) c_int; // pub const s2n_rand_mix_callback = ?fn (?*c_void, u32) callconv(.C) c_int; // pub extern fn s2n_rand_set_callbacks(rand_init_callback: s2n_rand_init_callback, rand_cleanup_callback: s2n_rand_cleanup_callback, rand_seed_callback: s2n_rand_seed_callback, rand_mix_callback: s2n_rand_mix_callback) c_int; // pub const S2N_EXTENSION_SERVER_NAME: c_int = 0; // pub const S2N_EXTENSION_MAX_FRAG_LEN: c_int = 1; // pub const S2N_EXTENSION_OCSP_STAPLING: c_int = 5; // pub const S2N_EXTENSION_SUPPORTED_GROUPS: c_int = 10; // pub const S2N_EXTENSION_EC_POINT_FORMATS: c_int = 11; // pub const S2N_EXTENSION_SIGNATURE_ALGORITHMS: c_int = 13; // pub const S2N_EXTENSION_ALPN: c_int = 16; // pub const S2N_EXTENSION_CERTIFICATE_TRANSPARENCY: c_int = 18; // pub const S2N_EXTENSION_RENEGOTIATION_INFO: c_int = 65281; // pub const s2n_tls_extension_type = c_uint; // pub const S2N_TLS_MAX_FRAG_LEN_512: c_int = 1; // pub const S2N_TLS_MAX_FRAG_LEN_1024: c_int = 2; // pub const S2N_TLS_MAX_FRAG_LEN_2048: c_int = 3; // pub const S2N_TLS_MAX_FRAG_LEN_4096: c_int = 4; // pub const s2n_max_frag_len = c_uint; // pub const struct_s2n_cert = opaque {}; // pub const struct_s2n_cert_chain_and_key = opaque {}; // pub const struct_s2n_pkey = opaque {}; // pub const s2n_cert_public_key = struct_s2n_pkey; // pub const s2n_cert_private_key = struct_s2n_pkey; // pub extern fn s2n_cert_chain_and_key_new() *struct_s2n_cert_chain_and_key; // pub extern fn s2n_cert_chain_and_key_load_pem(chain_and_key: *struct_s2n_cert_chain_and_key, chain_pem: [*c]const u8, private_key_pem: [*c]const u8) c_int; // pub extern fn s2n_cert_chain_and_key_load_pem_bytes(chain_and_key: *struct_s2n_cert_chain_and_key, chain_pem: [*c]u8, chain_pem_len: u32, private_key_pem: [*c]u8, private_key_pem_len: u32) c_int; // pub extern fn s2n_cert_chain_and_key_load_public_pem_bytes(chain_and_key: *struct_s2n_cert_chain_and_key, chain_pem: [*c]u8, chain_pem_len: u32) c_int; // pub extern fn s2n_cert_chain_and_key_free(cert_and_key: *struct_s2n_cert_chain_and_key) c_int; // pub extern fn s2n_cert_chain_and_key_set_ctx(cert_and_key: *struct_s2n_cert_chain_and_key, ctx: ?*c_void) c_int; // pub extern fn s2n_cert_chain_and_key_get_ctx(cert_and_key: *struct_s2n_cert_chain_and_key) ?*c_void; // pub extern fn s2n_cert_chain_and_key_get_private_key(cert_and_key: *struct_s2n_cert_chain_and_key) ?*s2n_cert_private_key; // pub const s2n_cert_tiebreak_callback = ?fn (*struct_s2n_cert_chain_and_key, *struct_s2n_cert_chain_and_key, [*c]u8, u32) callconv(.C) *struct_s2n_cert_chain_and_key; // pub extern fn s2n_config_set_cert_tiebreak_callback(config: *struct_s2n_config, cert_tiebreak_cb: s2n_cert_tiebreak_callback) c_int; // pub extern fn s2n_config_add_cert_chain_and_key(config: *struct_s2n_config, cert_chain_pem: [*c]const u8, private_key_pem: [*c]const u8) c_int; // pub extern fn s2n_config_add_cert_chain_and_key_to_store(config: *struct_s2n_config, cert_key_pair: *struct_s2n_cert_chain_and_key) c_int; // pub extern fn s2n_config_set_cert_chain_and_key_defaults(config: *struct_s2n_config, cert_key_pairs: [*c]*struct_s2n_cert_chain_and_key, num_cert_key_pairs: u32) c_int; // pub extern fn s2n_config_set_verification_ca_location(config: *struct_s2n_config, ca_pem_filename: [*c]const u8, ca_dir: [*c]const u8) c_int; // pub extern fn s2n_config_add_pem_to_trust_store(config: *struct_s2n_config, pem: [*c]const u8) c_int; // pub extern fn s2n_config_wipe_trust_store(config: *struct_s2n_config) c_int; // pub const s2n_verify_host_fn = ?fn ([*c]const u8, usize, ?*c_void) callconv(.C) u8; // pub extern fn s2n_config_set_verify_host_callback(config: *struct_s2n_config, s2n_verify_host_fn, data: ?*c_void) c_int; // pub extern fn s2n_config_set_check_stapled_ocsp_response(config: *struct_s2n_config, check_ocsp: u8) c_int; // pub extern fn s2n_config_disable_x509_verification(config: *struct_s2n_config) c_int; // pub extern fn s2n_config_set_max_cert_chain_depth(config: *struct_s2n_config, max_depth: u16) c_int; // pub extern fn s2n_config_add_dhparams(config: *struct_s2n_config, dhparams_pem: [*c]const u8) c_int; // pub extern fn s2n_config_set_cipher_preferences(config: *struct_s2n_config, version: [*c]const u8) c_int; // pub extern fn s2n_config_append_protocol_preference(config: *struct_s2n_config, protocol: [*c]const u8, protocol_len: u8) c_int; // pub extern fn s2n_config_set_protocol_preferences(config: *struct_s2n_config, protocols: [*c]const [*c]const u8, protocol_count: c_int) c_int; // pub const S2N_STATUS_REQUEST_NONE: c_int = 0; // pub const S2N_STATUS_REQUEST_OCSP: c_int = 1; // pub const s2n_status_request_type = c_uint; // pub extern fn s2n_config_set_status_request_type(config: *struct_s2n_config, @"type": s2n_status_request_type) c_int; // pub const S2N_CT_SUPPORT_NONE: c_int = 0; // pub const S2N_CT_SUPPORT_REQUEST: c_int = 1; // pub const s2n_ct_support_level = c_uint; // pub extern fn s2n_config_set_ct_support_level(config: *struct_s2n_config, level: s2n_ct_support_level) c_int; // pub const S2N_ALERT_FAIL_ON_WARNINGS: c_int = 0; // pub const S2N_ALERT_IGNORE_WARNINGS: c_int = 1; // pub const s2n_alert_behavior = c_uint; // pub extern fn s2n_config_set_alert_behavior(config: *struct_s2n_config, alert_behavior: s2n_alert_behavior) c_int; // pub extern fn s2n_config_set_extension_data(config: *struct_s2n_config, @"type": s2n_tls_extension_type, data: [*c]const u8, length: u32) c_int; // pub extern fn s2n_config_send_max_fragment_length(config: *struct_s2n_config, mfl_code: s2n_max_frag_len) c_int; // pub extern fn s2n_config_accept_max_fragment_length(config: *struct_s2n_config) c_int; // pub extern fn s2n_config_set_session_state_lifetime(config: *struct_s2n_config, lifetime_in_secs: u64) c_int; // pub extern fn s2n_config_set_session_tickets_onoff(config: *struct_s2n_config, enabled: u8) c_int; // pub extern fn s2n_config_set_session_cache_onoff(config: *struct_s2n_config, enabled: u8) c_int; // pub extern fn s2n_config_set_ticket_encrypt_decrypt_key_lifetime(config: *struct_s2n_config, lifetime_in_secs: u64) c_int; // pub extern fn s2n_config_set_ticket_decrypt_key_lifetime(config: *struct_s2n_config, lifetime_in_secs: u64) c_int; // pub extern fn s2n_config_add_ticket_crypto_key(config: *struct_s2n_config, name: [*c]const u8, name_len: u32, key: [*c]u8, key_len: u32, intro_time_in_seconds_from_epoch: u64) c_int; // pub const S2N_SERVER: c_int = 0; // pub const S2N_CLIENT: c_int = 1; // pub const s2n_mode = c_uint; // pub extern fn s2n_connection_new(mode: s2n_mode) *struct_s2n_connection; // pub extern fn s2n_connection_set_config(conn: *struct_s2n_connection, config: *struct_s2n_config) c_int; // pub extern fn s2n_connection_set_ctx(conn: *struct_s2n_connection, ctx: ?*c_void) c_int; // pub extern fn s2n_connection_get_ctx(conn: *struct_s2n_connection) ?*c_void; // pub const s2n_client_hello_fn = fn (*struct_s2n_connection, ?*c_void) callconv(.C) c_int; // pub const S2N_CLIENT_HELLO_CB_BLOCKING: c_int = 0; // pub const S2N_CLIENT_HELLO_CB_NONBLOCKING: c_int = 1; // pub const s2n_client_hello_cb_mode = c_uint; // pub extern fn s2n_config_set_client_hello_cb(config: *struct_s2n_config, client_hello_callback: ?s2n_client_hello_fn, ctx: ?*c_void) c_int; // pub extern fn s2n_config_set_client_hello_cb_mode(config: *struct_s2n_config, cb_mode: s2n_client_hello_cb_mode) c_int; // pub extern fn s2n_client_hello_cb_done(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_connection_server_name_extension_used(conn: *struct_s2n_connection) c_int; // pub const struct_s2n_client_hello = opaque {}; // pub extern fn s2n_connection_get_client_hello(conn: *struct_s2n_connection) *struct_s2n_client_hello; // pub extern fn s2n_client_hello_get_raw_message_length(ch: *struct_s2n_client_hello) isize; // pub extern fn s2n_client_hello_get_raw_message(ch: *struct_s2n_client_hello, out: [*c]u8, max_length: u32) isize; // pub extern fn s2n_client_hello_get_cipher_suites_length(ch: *struct_s2n_client_hello) isize; // pub extern fn s2n_client_hello_get_cipher_suites(ch: *struct_s2n_client_hello, out: [*c]u8, max_length: u32) isize; // pub extern fn s2n_client_hello_get_extensions_length(ch: *struct_s2n_client_hello) isize; // pub extern fn s2n_client_hello_get_extensions(ch: *struct_s2n_client_hello, out: [*c]u8, max_length: u32) isize; // pub extern fn s2n_client_hello_get_extension_length(ch: *struct_s2n_client_hello, extension_type: s2n_tls_extension_type) isize; // pub extern fn s2n_client_hello_get_extension_by_id(ch: *struct_s2n_client_hello, extension_type: s2n_tls_extension_type, out: [*c]u8, max_length: u32) isize; // pub extern fn s2n_client_hello_get_session_id_length(ch: *struct_s2n_client_hello, out_length: [*c]u32) c_int; // pub extern fn s2n_client_hello_get_session_id(ch: *struct_s2n_client_hello, out: [*c]u8, out_length: [*c]u32, max_length: u32) c_int; // pub extern fn s2n_connection_set_fd(conn: *struct_s2n_connection, fd: c_int) c_int; // pub extern fn s2n_connection_set_read_fd(conn: *struct_s2n_connection, readfd: c_int) c_int; // pub extern fn s2n_connection_set_write_fd(conn: *struct_s2n_connection, writefd: c_int) c_int; // pub extern fn s2n_connection_get_read_fd(conn: *struct_s2n_connection, readfd: [*c]c_int) c_int; // pub extern fn s2n_connection_get_write_fd(conn: *struct_s2n_connection, writefd: [*c]c_int) c_int; // pub extern fn s2n_connection_use_corked_io(conn: *struct_s2n_connection) c_int; // pub const s2n_recv_fn = fn (?*c_void, [*c]u8, u32) callconv(.C) c_int; // pub const s2n_send_fn = fn (?*c_void, [*c]const u8, u32) callconv(.C) c_int; // pub extern fn s2n_connection_set_recv_ctx(conn: *struct_s2n_connection, ctx: ?*c_void) c_int; // pub extern fn s2n_connection_set_send_ctx(conn: *struct_s2n_connection, ctx: ?*c_void) c_int; // pub extern fn s2n_connection_set_recv_cb(conn: *struct_s2n_connection, recv: ?s2n_recv_fn) c_int; // pub extern fn s2n_connection_set_send_cb(conn: *struct_s2n_connection, send: ?s2n_send_fn) c_int; // pub extern fn s2n_connection_prefer_throughput(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_connection_prefer_low_latency(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_connection_set_dynamic_record_threshold(conn: *struct_s2n_connection, resize_threshold: u32, timeout_threshold: u16) c_int; // pub extern fn s2n_connection_set_verify_host_callback(config: *struct_s2n_connection, host_fn: s2n_verify_host_fn, data: ?*c_void) c_int; // pub const S2N_BUILT_IN_BLINDING: c_int = 0; // pub const S2N_SELF_SERVICE_BLINDING: c_int = 1; // pub const s2n_blinding = c_uint; // pub extern fn s2n_connection_set_blinding(conn: *struct_s2n_connection, blinding: s2n_blinding) c_int; // pub extern fn s2n_connection_get_delay(conn: *struct_s2n_connection) u64; // pub extern fn s2n_connection_set_cipher_preferences(conn: *struct_s2n_connection, version: [*c]const u8) c_int; // pub extern fn s2n_connection_append_protocol_preference(conn: *struct_s2n_connection, protocol: [*c]const u8, protocol_len: u8) c_int; // pub extern fn s2n_connection_set_protocol_preferences(conn: *struct_s2n_connection, protocols: [*c]const [*c]const u8, protocol_count: c_int) c_int; // pub extern fn s2n_set_server_name(conn: *struct_s2n_connection, server_name: [*c]const u8) c_int; // pub extern fn s2n_get_server_name(conn: *struct_s2n_connection) [*c]const u8; // pub extern fn s2n_get_application_protocol(conn: *struct_s2n_connection) [*c]const u8; // pub extern fn s2n_connection_get_ocsp_response(conn: *struct_s2n_connection, length: [*c]u32) [*c]const u8; // pub extern fn s2n_connection_get_sct_list(conn: *struct_s2n_connection, length: [*c]u32) [*c]const u8; // pub const S2N_NOT_BLOCKED: c_int = 0; // pub const S2N_BLOCKED_ON_READ: c_int = 1; // pub const S2N_BLOCKED_ON_WRITE: c_int = 2; // pub const S2N_BLOCKED_ON_APPLICATION_INPUT: c_int = 3; // pub const S2N_BLOCKED_ON_EARLY_DATA: c_int = 4; // pub const s2n_blocked_status = c_uint; // pub extern fn s2n_negotiate(conn: *struct_s2n_connection, blocked: [*c]s2n_blocked_status) c_int; // pub extern fn s2n_send(conn: *struct_s2n_connection, buf: *const c_void, size: isize, blocked: [*c]s2n_blocked_status) isize; // // pub extern fn s2n_sendv(conn: *struct_s2n_connection, bufs: [*c]const struct_iovec, count: isize, blocked: [*c]s2n_blocked_status) isize; // // pub extern fn s2n_sendv_with_offset(conn: *struct_s2n_connection, bufs: [*c]const struct_iovec, count: isize, offs: isize, blocked: [*c]s2n_blocked_status) isize; // pub extern fn s2n_recv(conn: *struct_s2n_connection, buf: *c_void, size: isize, blocked: [*c]s2n_blocked_status) isize; // pub extern fn s2n_peek(conn: *struct_s2n_connection) u32; // pub extern fn s2n_connection_free_handshake(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_connection_release_buffers(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_connection_wipe(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_connection_free(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_shutdown(conn: *struct_s2n_connection, blocked: [*c]s2n_blocked_status) c_int; // pub const S2N_CERT_AUTH_NONE: c_int = 0; // pub const S2N_CERT_AUTH_REQUIRED: c_int = 1; // pub const S2N_CERT_AUTH_OPTIONAL: c_int = 2; // pub const s2n_cert_auth_type = c_uint; // pub extern fn s2n_config_get_client_auth_type(config: *struct_s2n_config, client_auth_type: [*c]s2n_cert_auth_type) c_int; // pub extern fn s2n_config_set_client_auth_type(config: *struct_s2n_config, client_auth_type: s2n_cert_auth_type) c_int; // pub extern fn s2n_connection_get_client_auth_type(conn: *struct_s2n_connection, client_auth_type: [*c]s2n_cert_auth_type) c_int; // pub extern fn s2n_connection_set_client_auth_type(conn: *struct_s2n_connection, client_auth_type: s2n_cert_auth_type) c_int; // pub extern fn s2n_connection_get_client_cert_chain(conn: *struct_s2n_connection, der_cert_chain_out: [*c][*c]u8, cert_chain_len: [*c]u32) c_int; // pub extern fn s2n_config_set_initial_ticket_count(config: *struct_s2n_config, num: u8) c_int; // pub extern fn s2n_connection_add_new_tickets_to_send(conn: *struct_s2n_connection, num: u8) c_int; // pub extern fn s2n_connection_get_tickets_sent(conn: *struct_s2n_connection, num: [*c]u16) c_int; // pub extern fn s2n_connection_set_server_keying_material_lifetime(conn: *struct_s2n_connection, lifetime_in_secs: u32) c_int; // pub const struct_s2n_session_ticket = opaque {}; // pub const s2n_session_ticket_fn = ?fn (*struct_s2n_connection, ?*c_void, *struct_s2n_session_ticket) callconv(.C) c_int; // pub extern fn s2n_config_set_session_ticket_cb(config: *struct_s2n_config, callback: s2n_session_ticket_fn, ctx: ?*c_void) c_int; // pub extern fn s2n_session_ticket_get_data_len(ticket: *struct_s2n_session_ticket, data_len: [*c]usize) c_int; // pub extern fn s2n_session_ticket_get_data(ticket: *struct_s2n_session_ticket, max_data_len: usize, data: [*c]u8) c_int; // pub extern fn s2n_session_ticket_get_lifetime(ticket: *struct_s2n_session_ticket, session_lifetime: [*c]u32) c_int; // pub extern fn s2n_connection_set_session(conn: *struct_s2n_connection, session: [*c]const u8, length: usize) c_int; // pub extern fn s2n_connection_get_session(conn: *struct_s2n_connection, session: [*c]u8, max_length: usize) c_int; // pub extern fn s2n_connection_get_session_ticket_lifetime_hint(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_connection_get_session_length(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_connection_get_session_id_length(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_connection_get_session_id(conn: *struct_s2n_connection, session_id: [*c]u8, max_length: usize) c_int; // pub extern fn s2n_connection_is_session_resumed(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_connection_is_ocsp_stapled(conn: *struct_s2n_connection) c_int; // pub const S2N_TLS_SIGNATURE_ANONYMOUS: c_int = 0; // pub const S2N_TLS_SIGNATURE_RSA: c_int = 1; // pub const S2N_TLS_SIGNATURE_ECDSA: c_int = 3; // pub const S2N_TLS_SIGNATURE_RSA_PSS_RSAE: c_int = 224; // pub const S2N_TLS_SIGNATURE_RSA_PSS_PSS: c_int = 225; // pub const s2n_tls_signature_algorithm = c_uint; // pub const S2N_TLS_HASH_NONE: c_int = 0; // pub const S2N_TLS_HASH_MD5: c_int = 1; // pub const S2N_TLS_HASH_SHA1: c_int = 2; // pub const S2N_TLS_HASH_SHA224: c_int = 3; // pub const S2N_TLS_HASH_SHA256: c_int = 4; // pub const S2N_TLS_HASH_SHA384: c_int = 5; // pub const S2N_TLS_HASH_SHA512: c_int = 6; // pub const S2N_TLS_HASH_MD5_SHA1: c_int = 224; // pub const s2n_tls_hash_algorithm = c_uint; // pub extern fn s2n_connection_get_selected_signature_algorithm(conn: *struct_s2n_connection, chosen_alg: [*c]s2n_tls_signature_algorithm) c_int; // pub extern fn s2n_connection_get_selected_digest_algorithm(conn: *struct_s2n_connection, chosen_alg: [*c]s2n_tls_hash_algorithm) c_int; // pub extern fn s2n_connection_get_selected_client_cert_signature_algorithm(conn: *struct_s2n_connection, chosen_alg: [*c]s2n_tls_signature_algorithm) c_int; // pub extern fn s2n_connection_get_selected_client_cert_digest_algorithm(conn: *struct_s2n_connection, chosen_alg: [*c]s2n_tls_hash_algorithm) c_int; // pub extern fn s2n_connection_get_selected_cert(conn: *struct_s2n_connection) *struct_s2n_cert_chain_and_key; // pub extern fn s2n_cert_chain_get_length(chain_and_key: ?*const struct_s2n_cert_chain_and_key, cert_length: [*c]u32) c_int; // pub extern fn s2n_cert_chain_get_cert(chain_and_key: ?*const struct_s2n_cert_chain_and_key, out_cert: [*c]*struct_s2n_cert, cert_idx: u32) c_int; // pub extern fn s2n_cert_get_der(cert: ?*const struct_s2n_cert, out_cert_der: [*c][*c]const u8, cert_length: [*c]u32) c_int; // pub extern fn s2n_connection_get_peer_cert_chain(conn: *const struct_s2n_connection, cert_chain: *struct_s2n_cert_chain_and_key) c_int; // pub extern fn s2n_cert_get_x509_extension_value_length(cert: *struct_s2n_cert, oid: [*c]const u8, ext_value_len: [*c]u32) c_int; // pub extern fn s2n_cert_get_x509_extension_value(cert: *struct_s2n_cert, oid: [*c]const u8, ext_value: [*c]u8, ext_value_len: [*c]u32, critical: [*c]bool) c_int; // pub extern fn s2n_cert_get_utf8_string_from_extension_data_length(extension_data: [*c]const u8, extension_len: u32, utf8_str_len: [*c]u32) c_int; // pub extern fn s2n_cert_get_utf8_string_from_extension_data(extension_data: [*c]const u8, extension_len: u32, out_data: [*c]u8, out_len: [*c]u32) c_int; // pub const S2N_PSK_HMAC_SHA256: c_int = 0; // pub const S2N_PSK_HMAC_SHA384: c_int = 1; // pub const s2n_psk_hmac = c_uint; // pub const struct_s2n_psk = opaque {}; // pub extern fn s2n_external_psk_new() *struct_s2n_psk; // pub extern fn s2n_psk_free(psk: [*c]*struct_s2n_psk) c_int; // pub extern fn s2n_psk_set_identity(psk: *struct_s2n_psk, identity: [*c]const u8, identity_size: u16) c_int; // pub extern fn s2n_psk_set_secret(psk: *struct_s2n_psk, secret: [*c]const u8, secret_size: u16) c_int; // pub extern fn s2n_psk_set_hmac(psk: *struct_s2n_psk, hmac: s2n_psk_hmac) c_int; // pub extern fn s2n_connection_append_psk(conn: *struct_s2n_connection, psk: *struct_s2n_psk) c_int; // pub const S2N_PSK_MODE_RESUMPTION: c_int = 0; // pub const S2N_PSK_MODE_EXTERNAL: c_int = 1; // pub const s2n_psk_mode = c_uint; // pub extern fn s2n_config_set_psk_mode(config: *struct_s2n_config, mode: s2n_psk_mode) c_int; // pub extern fn s2n_connection_set_psk_mode(conn: *struct_s2n_connection, mode: s2n_psk_mode) c_int; // pub extern fn s2n_connection_get_negotiated_psk_identity_length(conn: *struct_s2n_connection, identity_length: [*c]u16) c_int; // pub extern fn s2n_connection_get_negotiated_psk_identity(conn: *struct_s2n_connection, identity: [*c]u8, max_identity_length: u16) c_int; // pub const struct_s2n_offered_psk = opaque {}; // pub extern fn s2n_offered_psk_new() *struct_s2n_offered_psk; // pub extern fn s2n_offered_psk_free(psk: [*c]*struct_s2n_offered_psk) c_int; // pub extern fn s2n_offered_psk_get_identity(psk: *struct_s2n_offered_psk, identity: [*c][*c]u8, size: [*c]u16) c_int; // pub const struct_s2n_offered_psk_list = opaque {}; // pub extern fn s2n_offered_psk_list_has_next(psk_list: *struct_s2n_offered_psk_list) bool; // pub extern fn s2n_offered_psk_list_next(psk_list: *struct_s2n_offered_psk_list, psk: *struct_s2n_offered_psk) c_int; // pub extern fn s2n_offered_psk_list_reread(psk_list: *struct_s2n_offered_psk_list) c_int; // pub extern fn s2n_offered_psk_list_choose_psk(psk_list: *struct_s2n_offered_psk_list, psk: *struct_s2n_offered_psk) c_int; // pub const s2n_psk_selection_callback = ?fn (*struct_s2n_connection, ?*c_void, *struct_s2n_offered_psk_list) callconv(.C) c_int; // pub extern fn s2n_config_set_psk_selection_callback(config: *struct_s2n_config, cb: s2n_psk_selection_callback, context: ?*c_void) c_int; // pub extern fn s2n_connection_get_wire_bytes_in(conn: *struct_s2n_connection) u64; // pub extern fn s2n_connection_get_wire_bytes_out(conn: *struct_s2n_connection) u64; // pub extern fn s2n_connection_get_client_protocol_version(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_connection_get_server_protocol_version(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_connection_get_actual_protocol_version(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_connection_get_client_hello_version(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_connection_client_cert_used(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_connection_get_cipher(conn: *struct_s2n_connection) [*c]const u8; // pub extern fn s2n_connection_get_cipher_iana_value(conn: *struct_s2n_connection, first: [*c]u8, second: [*c]u8) c_int; // pub extern fn s2n_connection_is_valid_for_cipher_preferences(conn: *struct_s2n_connection, version: [*c]const u8) c_int; // pub extern fn s2n_connection_get_curve(conn: *struct_s2n_connection) [*c]const u8; // pub extern fn s2n_connection_get_kem_name(conn: *struct_s2n_connection) [*c]const u8; // pub extern fn s2n_connection_get_kem_group_name(conn: *struct_s2n_connection) [*c]const u8; // pub extern fn s2n_connection_get_alert(conn: *struct_s2n_connection) c_int; // pub extern fn s2n_connection_get_handshake_type_name(conn: *struct_s2n_connection) [*c]const u8; // pub extern fn s2n_connection_get_last_message_name(conn: *struct_s2n_connection) [*c]const u8; // pub const struct_s2n_async_pkey_op = opaque {}; // pub const S2N_ASYNC_PKEY_VALIDATION_FAST: c_int = 0; // pub const S2N_ASYNC_PKEY_VALIDATION_STRICT: c_int = 1; // pub const s2n_async_pkey_validation_mode = c_uint; // pub const S2N_ASYNC_DECRYPT: c_int = 0; // pub const S2N_ASYNC_SIGN: c_int = 1; // pub const s2n_async_pkey_op_type = c_uint; // pub const s2n_async_pkey_fn = ?fn (*struct_s2n_connection, *struct_s2n_async_pkey_op) callconv(.C) c_int; // pub extern fn s2n_config_set_async_pkey_callback(config: *struct_s2n_config, @"fn": s2n_async_pkey_fn) c_int; // pub extern fn s2n_async_pkey_op_perform(op: *struct_s2n_async_pkey_op, key: ?*s2n_cert_private_key) c_int; // pub extern fn s2n_async_pkey_op_apply(op: *struct_s2n_async_pkey_op, conn: *struct_s2n_connection) c_int; // pub extern fn s2n_async_pkey_op_free(op: *struct_s2n_async_pkey_op) c_int; // pub extern fn s2n_config_set_async_pkey_validation_mode(config: *struct_s2n_config, mode: s2n_async_pkey_validation_mode) c_int; // pub extern fn s2n_async_pkey_op_get_op_type(op: *struct_s2n_async_pkey_op, @"type": [*c]s2n_async_pkey_op_type) c_int; // pub extern fn s2n_async_pkey_op_get_input_size(op: *struct_s2n_async_pkey_op, data_len: [*c]u32) c_int; // pub extern fn s2n_async_pkey_op_get_input(op: *struct_s2n_async_pkey_op, data: [*c]u8, data_len: u32) c_int; // pub extern fn s2n_async_pkey_op_set_output(op: *struct_s2n_async_pkey_op, data: [*c]const u8, data_len: u32) c_int; // pub const s2n_key_log_fn = ?fn (?*c_void, *struct_s2n_connection, [*c]u8, usize) callconv(.C) c_int; // pub extern fn s2n_config_set_key_log_cb(config: *struct_s2n_config, callback: s2n_key_log_fn, ctx: ?*c_void) c_int; // pub extern fn s2n_config_enable_cert_req_dss_legacy_compat(config: *struct_s2n_config) c_int; // pub extern fn s2n_config_set_server_max_early_data_size(config: *struct_s2n_config, max_early_data_size: u32) c_int; // pub extern fn s2n_connection_set_server_max_early_data_size(conn: *struct_s2n_connection, max_early_data_size: u32) c_int; // pub extern fn s2n_connection_set_server_early_data_context(conn: *struct_s2n_connection, context: [*c]const u8, context_size: u16) c_int; // pub extern fn s2n_psk_configure_early_data(psk: *struct_s2n_psk, max_early_data_size: u32, cipher_suite_first_byte: u8, cipher_suite_second_byte: u8) c_int; // pub extern fn s2n_psk_set_application_protocol(psk: *struct_s2n_psk, application_protocol: [*c]const u8, size: u8) c_int; // pub extern fn s2n_psk_set_early_data_context(psk: *struct_s2n_psk, context: [*c]const u8, size: u16) c_int; // pub const S2N_EARLY_DATA_STATUS_OK: c_int = 0; // pub const S2N_EARLY_DATA_STATUS_NOT_REQUESTED: c_int = 1; // pub const S2N_EARLY_DATA_STATUS_REJECTED: c_int = 2; // pub const S2N_EARLY_DATA_STATUS_END: c_int = 3; // pub const s2n_early_data_status_t = c_uint; // pub extern fn s2n_connection_get_early_data_status(conn: *struct_s2n_connection, status: [*c]s2n_early_data_status_t) c_int; // pub extern fn s2n_connection_get_remaining_early_data_size(conn: *struct_s2n_connection, allowed_early_data_size: [*c]u32) c_int; // pub extern fn s2n_connection_get_max_early_data_size(conn: *struct_s2n_connection, max_early_data_size: [*c]u32) c_int; // pub extern fn s2n_send_early_data(conn: *struct_s2n_connection, data: [*c]const u8, data_len: isize, data_sent: [*c]isize, blocked: [*c]s2n_blocked_status) c_int; // pub extern fn s2n_recv_early_data(conn: *struct_s2n_connection, data: [*c]u8, max_data_len: isize, data_received: [*c]isize, blocked: [*c]s2n_blocked_status) c_int; // pub const struct_s2n_offered_early_data = opaque {}; // pub const s2n_early_data_cb = ?fn (*struct_s2n_connection, *struct_s2n_offered_early_data) callconv(.C) c_int; // pub extern fn s2n_config_set_early_data_cb(config: *struct_s2n_config, cb: s2n_early_data_cb) c_int; // pub extern fn s2n_offered_early_data_get_context_length(early_data: *struct_s2n_offered_early_data, context_len: [*c]u16) c_int; // pub extern fn s2n_offered_early_data_get_context(early_data: *struct_s2n_offered_early_data, context: [*c]u8, max_len: u16) c_int; // pub extern fn s2n_offered_early_data_reject(early_data: *struct_s2n_offered_early_data) c_int; // pub extern fn s2n_offered_early_data_accept(early_data: *struct_s2n_offered_early_data) c_int; // pub const S2N_SUCCESS = @as(c_int, 0); // pub const S2N_FAILURE = -@as(c_int, 1); // pub const S2N_CALLBACK_BLOCKED = -@as(c_int, 2); // pub const S2N_MINIMUM_SUPPORTED_TLS_RECORD_MAJOR_VERSION = @as(c_int, 2); // pub const S2N_MAXIMUM_SUPPORTED_TLS_RECORD_MAJOR_VERSION = @as(c_int, 3); // pub const S2N_SSLv2 = @as(c_int, 20); // pub const S2N_SSLv3 = @as(c_int, 30); // pub const S2N_TLS10 = @as(c_int, 31); // pub const S2N_TLS11 = @as(c_int, 32); // pub const S2N_TLS12 = @as(c_int, 33); // pub const S2N_TLS13 = @as(c_int, 34); // pub const S2N_UNKNOWN_PROTOCOL_VERSION = @as(c_int, 0); // pub const s2n_config = struct_s2n_config; // pub const s2n_connection = struct_s2n_connection; // pub const s2n_stacktrace = struct_s2n_stacktrace; // pub const s2n_cert = struct_s2n_cert; // pub const s2n_cert_chain_and_key = struct_s2n_cert_chain_and_key; // pub const s2n_pkey = struct_s2n_pkey; // pub const s2n_client_hello = struct_s2n_client_hello; // pub const s2n_session_ticket = struct_s2n_session_ticket; // pub const s2n_psk = struct_s2n_psk; // pub const s2n_offered_psk = struct_s2n_offered_psk; // pub const s2n_offered_psk_list = struct_s2n_offered_psk_list; // pub const s2n_async_pkey_op = struct_s2n_async_pkey_op; // pub const s2n_offered_early_data = struct_s2n_offered_early_data; // var booted = false; // pub var global_s2n_config: *s2n_config = undefined; // const unexpectedErrno = std.os.unexpectedErrno; // const S2NError = error{ Closed, Blocked, Alert, Protocol, Internal, Usage }; // pub inline fn s2nErrorNo(rc: c_int) S2NError!std.os.system.E { // switch (s2n_error_get_type(rc)) { // S2N_ERR_T_OK => return .SUCCESS, // S2N_ERR_T_IO => return std.os.errno(rc), // S2N_ERR_T_CLOSED => return error.Closed, // S2N_ERR_T_BLOCKED => return error.Blocked, // S2N_ERR_T_ALERT => return error.Alert, // S2N_ERR_T_PROTO => return error.Protocol, // S2N_ERR_T_INTERNAL => return error.Internal, // S2N_ERR_T_USAGE => return error.Usage, // else => unreachable, // } // } // pub const Connection = struct { // conn: *s2n_connection = undefined, // fd: std.os.socket_t, // node: *Pool.List.Node, // pub const Pool = struct { // pub const List = std.SinglyLinkedList(*s2n_connection); // pub var list = List{}; // pub fn get() *Pool.List.Node { // if (list.first) |first| { // return first; // } else { // var node = Allocator.allocator.create(Pool.List.Node) catch unreachable; // node.* = Pool.List.Node{ .data = s2n_connection_new(S2N_CLIENT) }; // return node; // } // } // pub fn put(conn: *Pool.List.Node) void { // _ = s2n_connection_wipe(conn.data); // list.prepend(conn); // } // }; // // var pool = std.SinglyLinkedList(); // // var pool_used: std.atomic.Atomic(u32) = std.atomic.Atomic(u32).init(0); // pub fn init(fd: std.os.socket_t) Connection { // return Connection{ // .fd = fd, // .conn = undefined, // .node = undefined, // }; // } // const errno = s2nErrorNo; // pub fn start(this: *Connection) !void { // this.node = Pool.get(); // this.conn = this.node.data; // _ = s2n_connection_set_config(this.conn, global_s2n_config); // _ = s2n_connection_set_fd(this.conn, @intCast(c_int, this.fd)); // _ = s2n_connection_prefer_low_latency(this.conn); // const rc = s2n_negotiate(this.conn, &blocked_status); // switch (try s2nErrorNo(rc)) { // .SUCCESS => return, // .BADF => unreachable, // always a race condition // .FAULT => unreachable, // .INVAL => unreachable, // .NOTCONN => unreachable, // .NOTSOCK => unreachable, // .INTR => return error.Interrupted, // .AGAIN => return error.WouldBlock, // .NOMEM => return error.SystemResources, // .CONNREFUSED => return error.ConnectionRefused, // .CONNRESET => return error.ConnectionResetByPeer, // else => |err| return unexpectedErrno(err), // } // } // pub fn close(this: *Connection) !void { // _ = s2n_shutdown(this.conn, &blocked_status); // Pool.put(this.node); // std.os.closeSocket(this.fd); // } // pub const Writer = std.io.Writer(*Connection, WriteError, write); // pub const Reader = std.io.Reader(*Connection, ReadError, read); // pub fn writer(this: *Connection) Writer { // return Writer{ .context = this }; // } // pub fn reader(this: *Connection) Reader { // return Reader{ .context = this }; // } // pub const ReadError = error{ // WouldBlock, // SystemResources, // ConnectionRefused, // ConnectionResetByPeer, // Unexpected, // Interrupted, // } || S2NError; // pub fn read(this: *Connection, buf: []u8) ReadError!usize { // const rc = s2n_recv(this.conn, buf.ptr, @intCast(isize, buf.len), &blocked_status); // switch (try errno(@intCast(c_int, rc))) { // .SUCCESS => return @intCast(usize, rc), // .BADF => unreachable, // always a race condition // .FAULT => unreachable, // .INVAL => unreachable, // .NOTCONN => unreachable, // .NOTSOCK => unreachable, // .INTR => return error.Interrupted, // .AGAIN => return error.WouldBlock, // .NOMEM => return error.SystemResources, // .CONNREFUSED => return error.ConnectionRefused, // .CONNRESET => return error.ConnectionResetByPeer, // else => |err| return unexpectedErrno(err), // } // } // var blocked_status: s2n_blocked_status = 0; // pub const WriteError = error{ // AccessDenied, // AddressFamilyNotSupported, // BrokenPipe, // ConnectionResetByPeer, // FastOpenAlreadyInProgress, // FileNotFound, // MessageTooBig, // NameTooLong, // NetworkSubsystemFailed, // NetworkUnreachable, // NotDir, // SocketNotConnected, // SymLinkLoop, // SystemResources, // WouldBlock, // Unexpected, // } || S2NError; // pub fn write(this: *Connection, buf: []const u8) WriteError!usize { // const rc = s2n_send(this.conn, buf.ptr, @intCast(isize, buf.len), &blocked_status); // // std.os.sendto( // switch (try errno(@intCast(c_int, rc))) { // .SUCCESS => return @intCast(usize, rc), // .ACCES => return error.AccessDenied, // .AGAIN => return error.WouldBlock, // .ALREADY => return error.FastOpenAlreadyInProgress, // .BADF => unreachable, // always a race condition // .CONNRESET => return error.ConnectionResetByPeer, // .DESTADDRREQ => unreachable, // The socket is not connection-mode, and no peer address is set. // .FAULT => unreachable, // An invalid user space address was specified for an argument. // .INTR => unreachable, // .INVAL => unreachable, // Invalid argument passed. // .ISCONN => unreachable, // connection-mode socket was connected already but a recipient was specified // .MSGSIZE => return error.MessageTooBig, // .NOBUFS => return error.SystemResources, // .NOMEM => return error.SystemResources, // .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket. // .OPNOTSUPP => unreachable, // Some bit in the flags argument is inappropriate for the socket type. // .PIPE => return error.BrokenPipe, // .AFNOSUPPORT => return error.AddressFamilyNotSupported, // .LOOP => return error.SymLinkLoop, // .NAMETOOLONG => return error.NameTooLong, // .NOENT => return error.FileNotFound, // .NOTDIR => return error.NotDir, // .HOSTUNREACH => return error.NetworkUnreachable, // .NETUNREACH => return error.NetworkUnreachable, // .NOTCONN => return error.SocketNotConnected, // .NETDOWN => return error.NetworkSubsystemFailed, // else => |err| return std.os.unexpectedErrno(err), // } // } // }; // pub const Allocator = struct { // pub var allocator: *std.mem.Allocator = undefined; // pub fn initCallback() callconv(.C) c_int { // return S2N_SUCCESS; // } // pub fn deinitCallback() callconv(.C) c_int { // return S2N_SUCCESS; // } // pub fn mallocCallback(ptr: **c_void, requested: u32, allocated: *u32) callconv(.C) c_int { // const bytes = allocator.allocAdvanced(u8, null, requested, .at_least) catch return S2N_FAILURE; // allocated.* = @intCast(u32, bytes.len); // ptr.* = bytes.ptr; // return S2N_SUCCESS; // } // pub fn freeCallback(ptr: *c_void, size: u32) callconv(.C) c_int { // var slice_ptr = @ptrCast([*]u8, ptr); // var slice = slice_ptr[0..size]; // allocator.free(slice); // return S2N_SUCCESS; // } // };
https://raw.githubusercontent.com/Jarred-Sumner/zig-ld-bug/3239daea25ed79e4fca288d07a4ca71ca18e5ce7/s2n.zig
// SPDX-License-Identifier: MIT // Copyright (c) 2015-2021 Zig Contributors // This file is part of [zig](https://ziglang.org/), which is MIT licensed. // The MIT license requires this copyright notice to be included in all copies // and substantial portions of the software. const std = @import("../std.zig"); const assert = std.debug.assert; const Allocator = std.mem.Allocator; const ast = std.zig.ast; const Node = ast.Node; const Tree = ast.Tree; const AstError = ast.Error; const TokenIndex = ast.TokenIndex; const Token = std.zig.Token; pub const Error = error{ParseError} || Allocator.Error; /// Result should be freed with tree.deinit() when there are /// no more references to any of the tokens or nodes. pub fn parse(gpa: *Allocator, source: []const u8) Allocator.Error!Tree { var tokens = ast.TokenList{}; defer tokens.deinit(gpa); // Empirically, the zig std lib has an 8:1 ratio of source bytes to token count. const estimated_token_count = source.len / 8; try tokens.ensureCapacity(gpa, estimated_token_count); var tokenizer = std.zig.Tokenizer.init(source); while (true) { const token = tokenizer.next(); try tokens.append(gpa, .{ .tag = token.tag, .start = @intCast(u32, token.loc.start), }); if (token.tag == .eof) break; } var parser: Parser = .{ .source = source, .gpa = gpa, .token_tags = tokens.items(.tag), .token_starts = tokens.items(.start), .errors = .{}, .nodes = .{}, .extra_data = .{}, .tok_i = 0, }; defer parser.errors.deinit(gpa); defer parser.nodes.deinit(gpa); defer parser.extra_data.deinit(gpa); // Empirically, Zig source code has a 2:1 ratio of tokens to AST nodes. // Make sure at least 1 so we can use appendAssumeCapacity on the root node below. const estimated_node_count = (tokens.len + 2) / 2; try parser.nodes.ensureCapacity(gpa, estimated_node_count); // Root node must be index 0. // Root <- skip ContainerMembers eof parser.nodes.appendAssumeCapacity(.{ .tag = .root, .main_token = 0, .data = undefined, }); const root_members = try parser.parseContainerMembers(); const root_decls = try root_members.toSpan(&parser); if (parser.token_tags[parser.tok_i] != .eof) { try parser.warnExpected(.eof); } parser.nodes.items(.data)[0] = .{ .lhs = root_decls.start, .rhs = root_decls.end, }; // TODO experiment with compacting the MultiArrayList slices here return Tree{ .source = source, .tokens = tokens.toOwnedSlice(), .nodes = parser.nodes.toOwnedSlice(), .extra_data = parser.extra_data.toOwnedSlice(gpa), .errors = parser.errors.toOwnedSlice(gpa), }; } const null_node: Node.Index = 0; /// Represents in-progress parsing, will be converted to an ast.Tree after completion. const Parser = struct { gpa: *Allocator, source: []const u8, token_tags: []const Token.Tag, token_starts: []const ast.ByteOffset, tok_i: TokenIndex, errors: std.ArrayListUnmanaged(AstError), nodes: ast.NodeList, extra_data: std.ArrayListUnmanaged(Node.Index), const SmallSpan = union(enum) { zero_or_one: Node.Index, multi: []Node.Index, fn deinit(self: SmallSpan, gpa: *Allocator) void { switch (self) { .zero_or_one => {}, .multi => |list| gpa.free(list), } } }; const Members = struct { len: usize, lhs: Node.Index, rhs: Node.Index, trailing: bool, fn toSpan(self: Members, p: *Parser) !Node.SubRange { if (self.len <= 2) { const nodes = [2]Node.Index{ self.lhs, self.rhs }; return p.listToSpan(nodes[0..self.len]); } else { return Node.SubRange{ .start = self.lhs, .end = self.rhs }; } } }; fn listToSpan(p: *Parser, list: []const Node.Index) !Node.SubRange { try p.extra_data.appendSlice(p.gpa, list); return Node.SubRange{ .start = @intCast(Node.Index, p.extra_data.items.len - list.len), .end = @intCast(Node.Index, p.extra_data.items.len), }; } fn addNode(p: *Parser, elem: ast.NodeList.Elem) Allocator.Error!Node.Index { const result = @intCast(Node.Index, p.nodes.len); try p.nodes.append(p.gpa, elem); return result; } fn setNode(p: *Parser, i: usize, elem: ast.NodeList.Elem) Node.Index { p.nodes.set(i, elem); return @intCast(Node.Index, i); } fn reserveNode(p: *Parser) !usize { try p.nodes.resize(p.gpa, p.nodes.len + 1); return p.nodes.len - 1; } fn addExtra(p: *Parser, extra: anytype) Allocator.Error!Node.Index { const fields = std.meta.fields(@TypeOf(extra)); try p.extra_data.ensureCapacity(p.gpa, p.extra_data.items.len + fields.len); const result = @intCast(u32, p.extra_data.items.len); inline for (fields) |field| { comptime assert(field.field_type == Node.Index); p.extra_data.appendAssumeCapacity(@field(extra, field.name)); } return result; } fn warn(p: *Parser, tag: ast.Error.Tag) error{OutOfMemory}!void { @setCold(true); try p.warnMsg(.{ .tag = tag, .token = p.tok_i }); } fn warnExpected(p: *Parser, expected_token: Token.Tag) error{OutOfMemory}!void { @setCold(true); try p.warnMsg(.{ .tag = .expected_token, .token = p.tok_i, .extra = .{ .expected_tag = expected_token }, }); } fn warnMsg(p: *Parser, msg: ast.Error) error{OutOfMemory}!void { @setCold(true); try p.errors.append(p.gpa, msg); } fn fail(p: *Parser, tag: ast.Error.Tag) error{ ParseError, OutOfMemory } { @setCold(true); return p.failMsg(.{ .tag = tag, .token = p.tok_i }); } fn failExpected(p: *Parser, expected_token: Token.Tag) error{ ParseError, OutOfMemory } { @setCold(true); return p.failMsg(.{ .tag = .expected_token, .token = p.tok_i, .extra = .{ .expected_tag = expected_token }, }); } fn failMsg(p: *Parser, msg: ast.Error) error{ ParseError, OutOfMemory } { @setCold(true); try p.warnMsg(msg); return error.ParseError; } /// ContainerMembers /// <- TestDecl ContainerMembers /// / TopLevelComptime ContainerMembers /// / KEYWORD_pub? TopLevelDecl ContainerMembers /// / ContainerField COMMA ContainerMembers /// / ContainerField /// / /// TopLevelComptime <- KEYWORD_comptime BlockExpr fn parseContainerMembers(p: *Parser) !Members { var list = std.ArrayList(Node.Index).init(p.gpa); defer list.deinit(); var field_state: union(enum) { /// No fields have been seen. none, /// Currently parsing fields. seen, /// Saw fields and then a declaration after them. /// Payload is first token of previous declaration. end: Node.Index, /// There was a declaration between fields, don't report more errors. err, } = .none; // Skip container doc comments. while (p.eatToken(.container_doc_comment)) |_| {} var trailing = false; while (true) { const doc_comment = try p.eatDocComments(); switch (p.token_tags[p.tok_i]) { .keyword_test => { const test_decl_node = try p.expectTestDeclRecoverable(); if (test_decl_node != 0) { if (field_state == .seen) { field_state = .{ .end = test_decl_node }; } try list.append(test_decl_node); } trailing = false; }, .keyword_comptime => switch (p.token_tags[p.tok_i + 1]) { .identifier => { p.tok_i += 1; const container_field = try p.expectContainerFieldRecoverable(); if (container_field != 0) { switch (field_state) { .none => field_state = .seen, .err, .seen => {}, .end => |node| { try p.warnMsg(.{ .tag = .decl_between_fields, .token = p.nodes.items(.main_token)[node], }); // Continue parsing; error will be reported later. field_state = .err; }, } try list.append(container_field); switch (p.token_tags[p.tok_i]) { .comma => { p.tok_i += 1; trailing = true; continue; }, .r_brace, .eof => { trailing = false; break; }, else => {}, } // There is not allowed to be a decl after a field with no comma. // Report error but recover parser. try p.warnExpected(.comma); p.findNextContainerMember(); } }, .l_brace => { const comptime_token = p.nextToken(); const block = p.parseBlock() catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.ParseError => blk: { p.findNextContainerMember(); break :blk null_node; }, }; if (block != 0) { const comptime_node = try p.addNode(.{ .tag = .@"comptime", .main_token = comptime_token, .data = .{ .lhs = block, .rhs = undefined, }, }); if (field_state == .seen) { field_state = .{ .end = comptime_node }; } try list.append(comptime_node); } trailing = false; }, else => { p.tok_i += 1; try p.warn(.expected_block_or_field); }, }, .keyword_pub => { p.tok_i += 1; const top_level_decl = try p.expectTopLevelDeclRecoverable(); if (top_level_decl != 0) { if (field_state == .seen) { field_state = .{ .end = top_level_decl }; } try list.append(top_level_decl); } trailing = p.token_tags[p.tok_i - 1] == .semicolon; }, .keyword_usingnamespace => { const node = try p.expectUsingNamespaceRecoverable(); if (node != 0) { if (field_state == .seen) { field_state = .{ .end = node }; } try list.append(node); } trailing = p.token_tags[p.tok_i - 1] == .semicolon; }, .keyword_const, .keyword_var, .keyword_threadlocal, .keyword_export, .keyword_extern, .keyword_inline, .keyword_noinline, .keyword_fn, => { const top_level_decl = try p.expectTopLevelDeclRecoverable(); if (top_level_decl != 0) { if (field_state == .seen) { field_state = .{ .end = top_level_decl }; } try list.append(top_level_decl); } trailing = p.token_tags[p.tok_i - 1] == .semicolon; }, .identifier => { const container_field = try p.expectContainerFieldRecoverable(); if (container_field != 0) { switch (field_state) { .none => field_state = .seen, .err, .seen => {}, .end => |node| { try p.warnMsg(.{ .tag = .decl_between_fields, .token = p.nodes.items(.main_token)[node], }); // Continue parsing; error will be reported later. field_state = .err; }, } try list.append(container_field); switch (p.token_tags[p.tok_i]) { .comma => { p.tok_i += 1; trailing = true; continue; }, .r_brace, .eof => { trailing = false; break; }, else => {}, } // There is not allowed to be a decl after a field with no comma. // Report error but recover parser. try p.warnExpected(.comma); p.findNextContainerMember(); } }, .eof, .r_brace => { if (doc_comment) |tok| { try p.warnMsg(.{ .tag = .unattached_doc_comment, .token = tok, }); } break; }, else => { try p.warn(.expected_container_members); // This was likely not supposed to end yet; try to find the next declaration. p.findNextContainerMember(); }, } } switch (list.items.len) { 0 => return Members{ .len = 0, .lhs = 0, .rhs = 0, .trailing = trailing, }, 1 => return Members{ .len = 1, .lhs = list.items[0], .rhs = 0, .trailing = trailing, }, 2 => return Members{ .len = 2, .lhs = list.items[0], .rhs = list.items[1], .trailing = trailing, }, else => { const span = try p.listToSpan(list.items); return Members{ .len = list.items.len, .lhs = span.start, .rhs = span.end, .trailing = trailing, }; }, } } /// Attempts to find next container member by searching for certain tokens fn findNextContainerMember(p: *Parser) void { var level: u32 = 0; while (true) { const tok = p.nextToken(); switch (p.token_tags[tok]) { // Any of these can start a new top level declaration. .keyword_test, .keyword_comptime, .keyword_pub, .keyword_export, .keyword_extern, .keyword_inline, .keyword_noinline, .keyword_usingnamespace, .keyword_threadlocal, .keyword_const, .keyword_var, .keyword_fn, => { if (level == 0) { p.tok_i -= 1; return; } }, .identifier => { if (p.token_tags[tok + 1] == .comma and level == 0) { p.tok_i -= 1; return; } }, .comma, .semicolon => { // this decl was likely meant to end here if (level == 0) { return; } }, .l_paren, .l_bracket, .l_brace => level += 1, .r_paren, .r_bracket => { if (level != 0) level -= 1; }, .r_brace => { if (level == 0) { // end of container, exit p.tok_i -= 1; return; } level -= 1; }, .eof => { p.tok_i -= 1; return; }, else => {}, } } } /// Attempts to find the next statement by searching for a semicolon fn findNextStmt(p: *Parser) void { var level: u32 = 0; while (true) { const tok = p.nextToken(); switch (p.token_tags[tok]) { .l_brace => level += 1, .r_brace => { if (level == 0) { p.tok_i -= 1; return; } level -= 1; }, .semicolon => { if (level == 0) { return; } }, .eof => { p.tok_i -= 1; return; }, else => {}, } } } /// TestDecl <- KEYWORD_test STRINGLITERALSINGLE? Block fn expectTestDecl(p: *Parser) !Node.Index { const test_token = p.assertToken(.keyword_test); const name_token = p.eatToken(.string_literal); const block_node = try p.parseBlock(); if (block_node == 0) return p.fail(.expected_block); return p.addNode(.{ .tag = .test_decl, .main_token = test_token, .data = .{ .lhs = name_token orelse 0, .rhs = block_node, }, }); } fn expectTestDeclRecoverable(p: *Parser) error{OutOfMemory}!Node.Index { return p.expectTestDecl() catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.ParseError => { p.findNextContainerMember(); return null_node; }, }; } /// TopLevelDecl /// <- (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE? / (KEYWORD_inline / KEYWORD_noinline))? FnProto (SEMICOLON / Block) /// / (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE?)? KEYWORD_threadlocal? VarDecl /// / KEYWORD_usingnamespace Expr SEMICOLON fn expectTopLevelDecl(p: *Parser) !Node.Index { const extern_export_inline_token = p.nextToken(); var expect_fn: bool = false; var expect_var_or_fn: bool = false; switch (p.token_tags[extern_export_inline_token]) { .keyword_extern => { _ = p.eatToken(.string_literal); expect_var_or_fn = true; }, .keyword_export => expect_var_or_fn = true, .keyword_inline, .keyword_noinline => expect_fn = true, else => p.tok_i -= 1, } const fn_proto = try p.parseFnProto(); if (fn_proto != 0) { switch (p.token_tags[p.tok_i]) { .semicolon => { p.tok_i += 1; return fn_proto; }, .l_brace => { const fn_decl_index = try p.reserveNode(); const body_block = try p.parseBlock(); assert(body_block != 0); return p.setNode(fn_decl_index, .{ .tag = .fn_decl, .main_token = p.nodes.items(.main_token)[fn_proto], .data = .{ .lhs = fn_proto, .rhs = body_block, }, }); }, else => { // Since parseBlock only return error.ParseError on // a missing '}' we can assume this function was // supposed to end here. try p.warn(.expected_semi_or_lbrace); return null_node; }, } } if (expect_fn) { try p.warn(.expected_fn); return error.ParseError; } const thread_local_token = p.eatToken(.keyword_threadlocal); const var_decl = try p.parseVarDecl(); if (var_decl != 0) { const semicolon_token = try p.expectToken(.semicolon); return var_decl; } if (thread_local_token != null) { return p.fail(.expected_var_decl); } if (expect_var_or_fn) { return p.fail(.expected_var_decl_or_fn); } if (p.token_tags[p.tok_i] != .keyword_usingnamespace) { return p.fail(.expected_pub_item); } return p.expectUsingNamespace(); } fn expectTopLevelDeclRecoverable(p: *Parser) error{OutOfMemory}!Node.Index { return p.expectTopLevelDecl() catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.ParseError => { p.findNextContainerMember(); return null_node; }, }; } fn expectUsingNamespace(p: *Parser) !Node.Index { const usingnamespace_token = p.assertToken(.keyword_usingnamespace); const expr = try p.expectExpr(); const semicolon_token = try p.expectToken(.semicolon); return p.addNode(.{ .tag = .@"usingnamespace", .main_token = usingnamespace_token, .data = .{ .lhs = expr, .rhs = undefined, }, }); } fn expectUsingNamespaceRecoverable(p: *Parser) error{OutOfMemory}!Node.Index { return p.expectUsingNamespace() catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.ParseError => { p.findNextContainerMember(); return null_node; }, }; } /// FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? LinkSection? CallConv? EXCLAMATIONMARK? (Keyword_anytype / TypeExpr) fn parseFnProto(p: *Parser) !Node.Index { const fn_token = p.eatToken(.keyword_fn) orelse return null_node; // We want the fn proto node to be before its children in the array. const fn_proto_index = try p.reserveNode(); _ = p.eatToken(.identifier); const params = try p.parseParamDeclList(); defer params.deinit(p.gpa); const align_expr = try p.parseByteAlign(); const section_expr = try p.parseLinkSection(); const callconv_expr = try p.parseCallconv(); const bang_token = p.eatToken(.bang); const return_type_expr = try p.parseTypeExpr(); if (return_type_expr == 0) { // most likely the user forgot to specify the return type. // Mark return type as invalid and try to continue. try p.warn(.expected_return_type); } if (align_expr == 0 and section_expr == 0 and callconv_expr == 0) { switch (params) { .zero_or_one => |param| return p.setNode(fn_proto_index, .{ .tag = .fn_proto_simple, .main_token = fn_token, .data = .{ .lhs = param, .rhs = return_type_expr, }, }), .multi => |list| { const span = try p.listToSpan(list); return p.setNode(fn_proto_index, .{ .tag = .fn_proto_multi, .main_token = fn_token, .data = .{ .lhs = try p.addExtra(Node.SubRange{ .start = span.start, .end = span.end, }), .rhs = return_type_expr, }, }); }, } } switch (params) { .zero_or_one => |param| return p.setNode(fn_proto_index, .{ .tag = .fn_proto_one, .main_token = fn_token, .data = .{ .lhs = try p.addExtra(Node.FnProtoOne{ .param = param, .align_expr = align_expr, .section_expr = section_expr, .callconv_expr = callconv_expr, }), .rhs = return_type_expr, }, }), .multi => |list| { const span = try p.listToSpan(list); return p.setNode(fn_proto_index, .{ .tag = .fn_proto, .main_token = fn_token, .data = .{ .lhs = try p.addExtra(Node.FnProto{ .params_start = span.start, .params_end = span.end, .align_expr = align_expr, .section_expr = section_expr, .callconv_expr = callconv_expr, }), .rhs = return_type_expr, }, }); }, } } /// VarDecl <- (KEYWORD_const / KEYWORD_var) IDENTIFIER (COLON TypeExpr)? ByteAlign? LinkSection? (EQUAL Expr)? SEMICOLON fn parseVarDecl(p: *Parser) !Node.Index { const mut_token = p.eatToken(.keyword_const) orelse p.eatToken(.keyword_var) orelse return null_node; _ = try p.expectToken(.identifier); const type_node: Node.Index = if (p.eatToken(.colon) == null) 0 else try p.expectTypeExpr(); const align_node = try p.parseByteAlign(); const section_node = try p.parseLinkSection(); const init_node: Node.Index = if (p.eatToken(.equal) == null) 0 else try p.expectExpr(); if (section_node == 0) { if (align_node == 0) { return p.addNode(.{ .tag = .simple_var_decl, .main_token = mut_token, .data = .{ .lhs = type_node, .rhs = init_node, }, }); } else if (type_node == 0) { return p.addNode(.{ .tag = .aligned_var_decl, .main_token = mut_token, .data = .{ .lhs = align_node, .rhs = init_node, }, }); } else { return p.addNode(.{ .tag = .local_var_decl, .main_token = mut_token, .data = .{ .lhs = try p.addExtra(Node.LocalVarDecl{ .type_node = type_node, .align_node = align_node, }), .rhs = init_node, }, }); } } else { return p.addNode(.{ .tag = .global_var_decl, .main_token = mut_token, .data = .{ .lhs = try p.addExtra(Node.GlobalVarDecl{ .type_node = type_node, .align_node = align_node, .section_node = section_node, }), .rhs = init_node, }, }); } } /// ContainerField <- KEYWORD_comptime? IDENTIFIER (COLON TypeExpr ByteAlign?)? (EQUAL Expr)? fn expectContainerField(p: *Parser) !Node.Index { const comptime_token = p.eatToken(.keyword_comptime); const name_token = p.assertToken(.identifier); var align_expr: Node.Index = 0; var type_expr: Node.Index = 0; if (p.eatToken(.colon)) |_| { if (p.eatToken(.keyword_anytype)) |anytype_tok| { type_expr = try p.addNode(.{ .tag = .@"anytype", .main_token = anytype_tok, .data = .{ .lhs = undefined, .rhs = undefined, }, }); } else { type_expr = try p.expectTypeExpr(); align_expr = try p.parseByteAlign(); } } const value_expr: Node.Index = if (p.eatToken(.equal) == null) 0 else try p.expectExpr(); if (align_expr == 0) { return p.addNode(.{ .tag = .container_field_init, .main_token = name_token, .data = .{ .lhs = type_expr, .rhs = value_expr, }, }); } else if (value_expr == 0) { return p.addNode(.{ .tag = .container_field_align, .main_token = name_token, .data = .{ .lhs = type_expr, .rhs = align_expr, }, }); } else { return p.addNode(.{ .tag = .container_field, .main_token = name_token, .data = .{ .lhs = type_expr, .rhs = try p.addExtra(Node.ContainerField{ .value_expr = value_expr, .align_expr = align_expr, }), }, }); } } fn expectContainerFieldRecoverable(p: *Parser) error{OutOfMemory}!Node.Index { return p.expectContainerField() catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.ParseError => { p.findNextContainerMember(); return null_node; }, }; } /// Statement /// <- KEYWORD_comptime? VarDecl /// / KEYWORD_comptime BlockExprStatement /// / KEYWORD_nosuspend BlockExprStatement /// / KEYWORD_suspend (SEMICOLON / BlockExprStatement) /// / KEYWORD_defer BlockExprStatement /// / KEYWORD_errdefer Payload? BlockExprStatement /// / IfStatement /// / LabeledStatement /// / SwitchExpr /// / AssignExpr SEMICOLON fn parseStatement(p: *Parser) Error!Node.Index { const comptime_token = p.eatToken(.keyword_comptime); const var_decl = try p.parseVarDecl(); if (var_decl != 0) { _ = try p.expectTokenRecoverable(.semicolon); return var_decl; } if (comptime_token) |token| { return p.addNode(.{ .tag = .@"comptime", .main_token = token, .data = .{ .lhs = try p.expectBlockExprStatement(), .rhs = undefined, }, }); } switch (p.token_tags[p.tok_i]) { .keyword_nosuspend => { return p.addNode(.{ .tag = .@"nosuspend", .main_token = p.nextToken(), .data = .{ .lhs = try p.expectBlockExprStatement(), .rhs = undefined, }, }); }, .keyword_suspend => { const token = p.nextToken(); const block_expr: Node.Index = if (p.eatToken(.semicolon) != null) 0 else try p.expectBlockExprStatement(); return p.addNode(.{ .tag = .@"suspend", .main_token = token, .data = .{ .lhs = block_expr, .rhs = undefined, }, }); }, .keyword_defer => return p.addNode(.{ .tag = .@"defer", .main_token = p.nextToken(), .data = .{ .lhs = undefined, .rhs = try p.expectBlockExprStatement(), }, }), .keyword_errdefer => return p.addNode(.{ .tag = .@"errdefer", .main_token = p.nextToken(), .data = .{ .lhs = try p.parsePayload(), .rhs = try p.expectBlockExprStatement(), }, }), .keyword_switch => return p.expectSwitchExpr(), .keyword_if => return p.expectIfStatement(), else => {}, } const labeled_statement = try p.parseLabeledStatement(); if (labeled_statement != 0) return labeled_statement; const assign_expr = try p.parseAssignExpr(); if (assign_expr != 0) { _ = try p.expectTokenRecoverable(.semicolon); return assign_expr; } return null_node; } fn expectStatement(p: *Parser) !Node.Index { const statement = try p.parseStatement(); if (statement == 0) { return p.fail(.expected_statement); } return statement; } /// If a parse error occurs, reports an error, but then finds the next statement /// and returns that one instead. If a parse error occurs but there is no following /// statement, returns 0. fn expectStatementRecoverable(p: *Parser) Error!Node.Index { while (true) { return p.expectStatement() catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.ParseError => { p.findNextStmt(); // Try to skip to the next statement. switch (p.token_tags[p.tok_i]) { .r_brace => return null_node, .eof => return error.ParseError, else => continue, } }, }; } } /// IfStatement /// <- IfPrefix BlockExpr ( KEYWORD_else Payload? Statement )? /// / IfPrefix AssignExpr ( SEMICOLON / KEYWORD_else Payload? Statement ) fn expectIfStatement(p: *Parser) !Node.Index { const if_token = p.assertToken(.keyword_if); _ = try p.expectToken(.l_paren); const condition = try p.expectExpr(); _ = try p.expectToken(.r_paren); const then_payload = try p.parsePtrPayload(); // TODO propose to change the syntax so that semicolons are always required // inside if statements, even if there is an `else`. var else_required = false; const then_expr = blk: { const block_expr = try p.parseBlockExpr(); if (block_expr != 0) break :blk block_expr; const assign_expr = try p.parseAssignExpr(); if (assign_expr == 0) { return p.fail(.expected_block_or_assignment); } if (p.eatToken(.semicolon)) |_| { return p.addNode(.{ .tag = .if_simple, .main_token = if_token, .data = .{ .lhs = condition, .rhs = assign_expr, }, }); } else_required = true; break :blk assign_expr; }; const else_token = p.eatToken(.keyword_else) orelse { if (else_required) { try p.warn(.expected_semi_or_else); } return p.addNode(.{ .tag = .if_simple, .main_token = if_token, .data = .{ .lhs = condition, .rhs = then_expr, }, }); }; const else_payload = try p.parsePayload(); const else_expr = try p.expectStatement(); return p.addNode(.{ .tag = .@"if", .main_token = if_token, .data = .{ .lhs = condition, .rhs = try p.addExtra(Node.If{ .then_expr = then_expr, .else_expr = else_expr, }), }, }); } /// LabeledStatement <- BlockLabel? (Block / LoopStatement) fn parseLabeledStatement(p: *Parser) !Node.Index { const label_token = p.parseBlockLabel(); const block = try p.parseBlock(); if (block != 0) return block; const loop_stmt = try p.parseLoopStatement(); if (loop_stmt != 0) return loop_stmt; if (label_token != 0) { return p.fail(.expected_labelable); } return null_node; } /// LoopStatement <- KEYWORD_inline? (ForStatement / WhileStatement) fn parseLoopStatement(p: *Parser) !Node.Index { const inline_token = p.eatToken(.keyword_inline); const for_statement = try p.parseForStatement(); if (for_statement != 0) return for_statement; const while_statement = try p.parseWhileStatement(); if (while_statement != 0) return while_statement; if (inline_token == null) return null_node; // If we've seen "inline", there should have been a "for" or "while" return p.fail(.expected_inlinable); } /// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload /// ForStatement /// <- ForPrefix BlockExpr ( KEYWORD_else Statement )? /// / ForPrefix AssignExpr ( SEMICOLON / KEYWORD_else Statement ) fn parseForStatement(p: *Parser) !Node.Index { const for_token = p.eatToken(.keyword_for) orelse return null_node; _ = try p.expectToken(.l_paren); const array_expr = try p.expectExpr(); _ = try p.expectToken(.r_paren); const found_payload = try p.parsePtrIndexPayload(); if (found_payload == 0) try p.warn(.expected_loop_payload); // TODO propose to change the syntax so that semicolons are always required // inside while statements, even if there is an `else`. var else_required = false; const then_expr = blk: { const block_expr = try p.parseBlockExpr(); if (block_expr != 0) break :blk block_expr; const assign_expr = try p.parseAssignExpr(); if (assign_expr == 0) { return p.fail(.expected_block_or_assignment); } if (p.eatToken(.semicolon)) |_| { return p.addNode(.{ .tag = .for_simple, .main_token = for_token, .data = .{ .lhs = array_expr, .rhs = assign_expr, }, }); } else_required = true; break :blk assign_expr; }; const else_token = p.eatToken(.keyword_else) orelse { if (else_required) { try p.warn(.expected_semi_or_else); } return p.addNode(.{ .tag = .for_simple, .main_token = for_token, .data = .{ .lhs = array_expr, .rhs = then_expr, }, }); }; return p.addNode(.{ .tag = .@"for", .main_token = for_token, .data = .{ .lhs = array_expr, .rhs = try p.addExtra(Node.If{ .then_expr = then_expr, .else_expr = try p.expectStatement(), }), }, }); } /// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr? /// WhileStatement /// <- WhilePrefix BlockExpr ( KEYWORD_else Payload? Statement )? /// / WhilePrefix AssignExpr ( SEMICOLON / KEYWORD_else Payload? Statement ) fn parseWhileStatement(p: *Parser) !Node.Index { const while_token = p.eatToken(.keyword_while) orelse return null_node; _ = try p.expectToken(.l_paren); const condition = try p.expectExpr(); _ = try p.expectToken(.r_paren); const then_payload = try p.parsePtrPayload(); const cont_expr = try p.parseWhileContinueExpr(); // TODO propose to change the syntax so that semicolons are always required // inside while statements, even if there is an `else`. var else_required = false; const then_expr = blk: { const block_expr = try p.parseBlockExpr(); if (block_expr != 0) break :blk block_expr; const assign_expr = try p.parseAssignExpr(); if (assign_expr == 0) { return p.fail(.expected_block_or_assignment); } if (p.eatToken(.semicolon)) |_| { if (cont_expr == 0) { return p.addNode(.{ .tag = .while_simple, .main_token = while_token, .data = .{ .lhs = condition, .rhs = assign_expr, }, }); } else { return p.addNode(.{ .tag = .while_cont, .main_token = while_token, .data = .{ .lhs = condition, .rhs = try p.addExtra(Node.WhileCont{ .cont_expr = cont_expr, .then_expr = assign_expr, }), }, }); } } else_required = true; break :blk assign_expr; }; const else_token = p.eatToken(.keyword_else) orelse { if (else_required) { try p.warn(.expected_semi_or_else); } if (cont_expr == 0) { return p.addNode(.{ .tag = .while_simple, .main_token = while_token, .data = .{ .lhs = condition, .rhs = then_expr, }, }); } else { return p.addNode(.{ .tag = .while_cont, .main_token = while_token, .data = .{ .lhs = condition, .rhs = try p.addExtra(Node.WhileCont{ .cont_expr = cont_expr, .then_expr = then_expr, }), }, }); } }; const else_payload = try p.parsePayload(); const else_expr = try p.expectStatement(); return p.addNode(.{ .tag = .@"while", .main_token = while_token, .data = .{ .lhs = condition, .rhs = try p.addExtra(Node.While{ .cont_expr = cont_expr, .then_expr = then_expr, .else_expr = else_expr, }), }, }); } /// BlockExprStatement /// <- BlockExpr /// / AssignExpr SEMICOLON fn parseBlockExprStatement(p: *Parser) !Node.Index { const block_expr = try p.parseBlockExpr(); if (block_expr != 0) { return block_expr; } const assign_expr = try p.parseAssignExpr(); if (assign_expr != 0) { _ = try p.expectTokenRecoverable(.semicolon); return assign_expr; } return null_node; } fn expectBlockExprStatement(p: *Parser) !Node.Index { const node = try p.parseBlockExprStatement(); if (node == 0) { return p.fail(.expected_block_or_expr); } return node; } /// BlockExpr <- BlockLabel? Block fn parseBlockExpr(p: *Parser) Error!Node.Index { switch (p.token_tags[p.tok_i]) { .identifier => { if (p.token_tags[p.tok_i + 1] == .colon and p.token_tags[p.tok_i + 2] == .l_brace) { p.tok_i += 2; return p.parseBlock(); } else { return null_node; } }, .l_brace => return p.parseBlock(), else => return null_node, } } /// AssignExpr <- Expr (AssignOp Expr)? /// AssignOp /// <- ASTERISKEQUAL /// / SLASHEQUAL /// / PERCENTEQUAL /// / PLUSEQUAL /// / MINUSEQUAL /// / LARROW2EQUAL /// / RARROW2EQUAL /// / AMPERSANDEQUAL /// / CARETEQUAL /// / PIPEEQUAL /// / ASTERISKPERCENTEQUAL /// / PLUSPERCENTEQUAL /// / MINUSPERCENTEQUAL /// / EQUAL fn parseAssignExpr(p: *Parser) !Node.Index { const expr = try p.parseExpr(); if (expr == 0) return null_node; const tag: Node.Tag = switch (p.token_tags[p.tok_i]) { .asterisk_equal => .assign_mul, .slash_equal => .assign_div, .percent_equal => .assign_mod, .plus_equal => .assign_add, .minus_equal => .assign_sub, .angle_bracket_angle_bracket_left_equal => .assign_bit_shift_left, .angle_bracket_angle_bracket_right_equal => .assign_bit_shift_right, .ampersand_equal => .assign_bit_and, .caret_equal => .assign_bit_xor, .pipe_equal => .assign_bit_or, .asterisk_percent_equal => .assign_mul_wrap, .plus_percent_equal => .assign_add_wrap, .minus_percent_equal => .assign_sub_wrap, .equal => .assign, else => return expr, }; return p.addNode(.{ .tag = tag, .main_token = p.nextToken(), .data = .{ .lhs = expr, .rhs = try p.expectExpr(), }, }); } fn expectAssignExpr(p: *Parser) !Node.Index { const expr = try p.parseAssignExpr(); if (expr == 0) { return p.fail(.expected_expr_or_assignment); } return expr; } /// Expr <- BoolOrExpr fn parseExpr(p: *Parser) Error!Node.Index { return p.parseBoolOrExpr(); } fn expectExpr(p: *Parser) Error!Node.Index { const node = try p.parseExpr(); if (node == 0) { return p.fail(.expected_expr); } else { return node; } } /// BoolOrExpr <- BoolAndExpr (KEYWORD_or BoolAndExpr)* fn parseBoolOrExpr(p: *Parser) Error!Node.Index { var res = try p.parseBoolAndExpr(); if (res == 0) return null_node; while (true) { switch (p.token_tags[p.tok_i]) { .keyword_or => { const or_token = p.nextToken(); const rhs = try p.parseBoolAndExpr(); if (rhs == 0) { return p.fail(.invalid_token); } res = try p.addNode(.{ .tag = .bool_or, .main_token = or_token, .data = .{ .lhs = res, .rhs = rhs, }, }); }, else => return res, } } } /// BoolAndExpr <- CompareExpr (KEYWORD_and CompareExpr)* fn parseBoolAndExpr(p: *Parser) !Node.Index { var res = try p.parseCompareExpr(); if (res == 0) return null_node; while (true) { switch (p.token_tags[p.tok_i]) { .keyword_and => { const and_token = p.nextToken(); const rhs = try p.parseCompareExpr(); if (rhs == 0) { return p.fail(.invalid_token); } res = try p.addNode(.{ .tag = .bool_and, .main_token = and_token, .data = .{ .lhs = res, .rhs = rhs, }, }); }, .invalid_ampersands => { try p.warn(.invalid_and); p.tok_i += 1; return p.parseCompareExpr(); }, else => return res, } } } /// CompareExpr <- BitwiseExpr (CompareOp BitwiseExpr)? /// CompareOp /// <- EQUALEQUAL /// / EXCLAMATIONMARKEQUAL /// / LARROW /// / RARROW /// / LARROWEQUAL /// / RARROWEQUAL fn parseCompareExpr(p: *Parser) !Node.Index { const expr = try p.parseBitwiseExpr(); if (expr == 0) return null_node; const tag: Node.Tag = switch (p.token_tags[p.tok_i]) { .equal_equal => .equal_equal, .bang_equal => .bang_equal, .angle_bracket_left => .less_than, .angle_bracket_right => .greater_than, .angle_bracket_left_equal => .less_or_equal, .angle_bracket_right_equal => .greater_or_equal, else => return expr, }; return p.addNode(.{ .tag = tag, .main_token = p.nextToken(), .data = .{ .lhs = expr, .rhs = try p.expectBitwiseExpr(), }, }); } /// BitwiseExpr <- BitShiftExpr (BitwiseOp BitShiftExpr)* /// BitwiseOp /// <- AMPERSAND /// / CARET /// / PIPE /// / KEYWORD_orelse /// / KEYWORD_catch Payload? fn parseBitwiseExpr(p: *Parser) !Node.Index { var res = try p.parseBitShiftExpr(); if (res == 0) return null_node; while (true) { const tag: Node.Tag = switch (p.token_tags[p.tok_i]) { .ampersand => .bit_and, .caret => .bit_xor, .pipe => .bit_or, .keyword_orelse => .@"orelse", .keyword_catch => { const catch_token = p.nextToken(); _ = try p.parsePayload(); const rhs = try p.parseBitShiftExpr(); if (rhs == 0) { return p.fail(.invalid_token); } res = try p.addNode(.{ .tag = .@"catch", .main_token = catch_token, .data = .{ .lhs = res, .rhs = rhs, }, }); continue; }, else => return res, }; res = try p.addNode(.{ .tag = tag, .main_token = p.nextToken(), .data = .{ .lhs = res, .rhs = try p.expectBitShiftExpr(), }, }); } } fn expectBitwiseExpr(p: *Parser) Error!Node.Index { const node = try p.parseBitwiseExpr(); if (node == 0) { return p.fail(.invalid_token); } else { return node; } } /// BitShiftExpr <- AdditionExpr (BitShiftOp AdditionExpr)* /// BitShiftOp /// <- LARROW2 /// / RARROW2 fn parseBitShiftExpr(p: *Parser) Error!Node.Index { var res = try p.parseAdditionExpr(); if (res == 0) return null_node; while (true) { const tag: Node.Tag = switch (p.token_tags[p.tok_i]) { .angle_bracket_angle_bracket_left => .bit_shift_left, .angle_bracket_angle_bracket_right => .bit_shift_right, else => return res, }; res = try p.addNode(.{ .tag = tag, .main_token = p.nextToken(), .data = .{ .lhs = res, .rhs = try p.expectAdditionExpr(), }, }); } } fn expectBitShiftExpr(p: *Parser) Error!Node.Index { const node = try p.parseBitShiftExpr(); if (node == 0) { return p.fail(.invalid_token); } else { return node; } } /// AdditionExpr <- MultiplyExpr (AdditionOp MultiplyExpr)* /// AdditionOp /// <- PLUS /// / MINUS /// / PLUS2 /// / PLUSPERCENT /// / MINUSPERCENT fn parseAdditionExpr(p: *Parser) Error!Node.Index { var res = try p.parseMultiplyExpr(); if (res == 0) return null_node; while (true) { const tag: Node.Tag = switch (p.token_tags[p.tok_i]) { .plus => .add, .minus => .sub, .plus_plus => .array_cat, .plus_percent => .add_wrap, .minus_percent => .sub_wrap, else => return res, }; res = try p.addNode(.{ .tag = tag, .main_token = p.nextToken(), .data = .{ .lhs = res, .rhs = try p.expectMultiplyExpr(), }, }); } } fn expectAdditionExpr(p: *Parser) Error!Node.Index { const node = try p.parseAdditionExpr(); if (node == 0) { return p.fail(.invalid_token); } return node; } /// MultiplyExpr <- PrefixExpr (MultiplyOp PrefixExpr)* /// MultiplyOp /// <- PIPE2 /// / ASTERISK /// / SLASH /// / PERCENT /// / ASTERISK2 /// / ASTERISKPERCENT fn parseMultiplyExpr(p: *Parser) Error!Node.Index { var res = try p.parsePrefixExpr(); if (res == 0) return null_node; while (true) { const tag: Node.Tag = switch (p.token_tags[p.tok_i]) { .pipe_pipe => .merge_error_sets, .asterisk => .mul, .slash => .div, .percent => .mod, .asterisk_asterisk => .array_mult, .asterisk_percent => .mul_wrap, else => return res, }; res = try p.addNode(.{ .tag = tag, .main_token = p.nextToken(), .data = .{ .lhs = res, .rhs = try p.expectPrefixExpr(), }, }); } } fn expectMultiplyExpr(p: *Parser) Error!Node.Index { const node = try p.parseMultiplyExpr(); if (node == 0) { return p.fail(.invalid_token); } return node; } /// PrefixExpr <- PrefixOp* PrimaryExpr /// PrefixOp /// <- EXCLAMATIONMARK /// / MINUS /// / TILDE /// / MINUSPERCENT /// / AMPERSAND /// / KEYWORD_try /// / KEYWORD_await fn parsePrefixExpr(p: *Parser) Error!Node.Index { const tag: Node.Tag = switch (p.token_tags[p.tok_i]) { .bang => .bool_not, .minus => .negation, .tilde => .bit_not, .minus_percent => .negation_wrap, .ampersand => .address_of, .keyword_try => .@"try", .keyword_await => .@"await", else => return p.parsePrimaryExpr(), }; return p.addNode(.{ .tag = tag, .main_token = p.nextToken(), .data = .{ .lhs = try p.expectPrefixExpr(), .rhs = undefined, }, }); } fn expectPrefixExpr(p: *Parser) Error!Node.Index { const node = try p.parsePrefixExpr(); if (node == 0) { return p.fail(.expected_prefix_expr); } return node; } /// TypeExpr <- PrefixTypeOp* ErrorUnionExpr /// PrefixTypeOp /// <- QUESTIONMARK /// / KEYWORD_anyframe MINUSRARROW /// / SliceTypeStart (ByteAlign / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)* /// / PtrTypeStart (KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)* /// / ArrayTypeStart /// SliceTypeStart <- LBRACKET (COLON Expr)? RBRACKET /// PtrTypeStart /// <- ASTERISK /// / ASTERISK2 /// / LBRACKET ASTERISK (LETTERC / COLON Expr)? RBRACKET /// ArrayTypeStart <- LBRACKET Expr (COLON Expr)? RBRACKET fn parseTypeExpr(p: *Parser) Error!Node.Index { switch (p.token_tags[p.tok_i]) { .question_mark => return p.addNode(.{ .tag = .optional_type, .main_token = p.nextToken(), .data = .{ .lhs = try p.expectTypeExpr(), .rhs = undefined, }, }), .keyword_anyframe => switch (p.token_tags[p.tok_i + 1]) { .arrow => return p.addNode(.{ .tag = .anyframe_type, .main_token = p.nextToken(), .data = .{ .lhs = p.nextToken(), .rhs = try p.expectTypeExpr(), }, }), else => return p.parseErrorUnionExpr(), }, .asterisk => { const asterisk = p.nextToken(); const mods = try p.parsePtrModifiers(); const elem_type = try p.expectTypeExpr(); if (mods.bit_range_start == 0) { return p.addNode(.{ .tag = .ptr_type_aligned, .main_token = asterisk, .data = .{ .lhs = mods.align_node, .rhs = elem_type, }, }); } else { return p.addNode(.{ .tag = .ptr_type_bit_range, .main_token = asterisk, .data = .{ .lhs = try p.addExtra(Node.PtrTypeBitRange{ .sentinel = 0, .align_node = mods.align_node, .bit_range_start = mods.bit_range_start, .bit_range_end = mods.bit_range_end, }), .rhs = elem_type, }, }); } }, .asterisk_asterisk => { const asterisk = p.nextToken(); const mods = try p.parsePtrModifiers(); const elem_type = try p.expectTypeExpr(); const inner: Node.Index = inner: { if (mods.bit_range_start == 0) { break :inner try p.addNode(.{ .tag = .ptr_type_aligned, .main_token = asterisk, .data = .{ .lhs = mods.align_node, .rhs = elem_type, }, }); } else { break :inner try p.addNode(.{ .tag = .ptr_type_bit_range, .main_token = asterisk, .data = .{ .lhs = try p.addExtra(Node.PtrTypeBitRange{ .sentinel = 0, .align_node = mods.align_node, .bit_range_start = mods.bit_range_start, .bit_range_end = mods.bit_range_end, }), .rhs = elem_type, }, }); } }; return p.addNode(.{ .tag = .ptr_type_aligned, .main_token = asterisk, .data = .{ .lhs = 0, .rhs = inner, }, }); }, .l_bracket => switch (p.token_tags[p.tok_i + 1]) { .asterisk => { const lbracket = p.nextToken(); const asterisk = p.nextToken(); var sentinel: Node.Index = 0; prefix: { if (p.eatToken(.identifier)) |ident| { const token_slice = p.source[p.token_starts[ident]..][0..2]; if (!std.mem.eql(u8, token_slice, "c]")) { p.tok_i -= 1; } else { break :prefix; } } if (p.eatToken(.colon)) |_| { sentinel = try p.expectExpr(); } } _ = try p.expectToken(.r_bracket); const mods = try p.parsePtrModifiers(); const elem_type = try p.expectTypeExpr(); if (mods.bit_range_start == 0) { if (sentinel == 0) { return p.addNode(.{ .tag = .ptr_type_aligned, .main_token = asterisk, .data = .{ .lhs = mods.align_node, .rhs = elem_type, }, }); } else if (mods.align_node == 0) { return p.addNode(.{ .tag = .ptr_type_sentinel, .main_token = asterisk, .data = .{ .lhs = sentinel, .rhs = elem_type, }, }); } else { return p.addNode(.{ .tag = .ptr_type, .main_token = asterisk, .data = .{ .lhs = try p.addExtra(Node.PtrType{ .sentinel = sentinel, .align_node = mods.align_node, }), .rhs = elem_type, }, }); } } else { return p.addNode(.{ .tag = .ptr_type_bit_range, .main_token = asterisk, .data = .{ .lhs = try p.addExtra(Node.PtrTypeBitRange{ .sentinel = sentinel, .align_node = mods.align_node, .bit_range_start = mods.bit_range_start, .bit_range_end = mods.bit_range_end, }), .rhs = elem_type, }, }); } }, else => { const lbracket = p.nextToken(); const len_expr = try p.parseExpr(); const sentinel: Node.Index = if (p.eatToken(.colon)) |_| try p.expectExpr() else 0; _ = try p.expectToken(.r_bracket); if (len_expr == 0) { const mods = try p.parsePtrModifiers(); const elem_type = try p.expectTypeExpr(); if (mods.bit_range_start != 0) { try p.warnMsg(.{ .tag = .invalid_bit_range, .token = p.nodes.items(.main_token)[mods.bit_range_start], }); } if (sentinel == 0) { return p.addNode(.{ .tag = .ptr_type_aligned, .main_token = lbracket, .data = .{ .lhs = mods.align_node, .rhs = elem_type, }, }); } else if (mods.align_node == 0) { return p.addNode(.{ .tag = .ptr_type_sentinel, .main_token = lbracket, .data = .{ .lhs = sentinel, .rhs = elem_type, }, }); } else { return p.addNode(.{ .tag = .ptr_type, .main_token = lbracket, .data = .{ .lhs = try p.addExtra(Node.PtrType{ .sentinel = sentinel, .align_node = mods.align_node, }), .rhs = elem_type, }, }); } } else { switch (p.token_tags[p.tok_i]) { .keyword_align, .keyword_const, .keyword_volatile, .keyword_allowzero, => return p.fail(.ptr_mod_on_array_child_type), else => {}, } const elem_type = try p.expectTypeExpr(); if (sentinel == 0) { return p.addNode(.{ .tag = .array_type, .main_token = lbracket, .data = .{ .lhs = len_expr, .rhs = elem_type, }, }); } else { return p.addNode(.{ .tag = .array_type_sentinel, .main_token = lbracket, .data = .{ .lhs = len_expr, .rhs = try p.addExtra(.{ .elem_type = elem_type, .sentinel = sentinel, }), }, }); } } }, }, else => return p.parseErrorUnionExpr(), } } fn expectTypeExpr(p: *Parser) Error!Node.Index { const node = try p.parseTypeExpr(); if (node == 0) { return p.fail(.expected_type_expr); } return node; } /// PrimaryExpr /// <- AsmExpr /// / IfExpr /// / KEYWORD_break BreakLabel? Expr? /// / KEYWORD_comptime Expr /// / KEYWORD_nosuspend Expr /// / KEYWORD_continue BreakLabel? /// / KEYWORD_resume Expr /// / KEYWORD_return Expr? /// / BlockLabel? LoopExpr /// / Block /// / CurlySuffixExpr fn parsePrimaryExpr(p: *Parser) !Node.Index { switch (p.token_tags[p.tok_i]) { .keyword_asm => return p.expectAsmExpr(), .keyword_if => return p.parseIfExpr(), .keyword_break => { p.tok_i += 1; return p.addNode(.{ .tag = .@"break", .main_token = p.tok_i - 1, .data = .{ .lhs = try p.parseBreakLabel(), .rhs = try p.parseExpr(), }, }); }, .keyword_continue => { p.tok_i += 1; return p.addNode(.{ .tag = .@"continue", .main_token = p.tok_i - 1, .data = .{ .lhs = try p.parseBreakLabel(), .rhs = undefined, }, }); }, .keyword_comptime => { p.tok_i += 1; return p.addNode(.{ .tag = .@"comptime", .main_token = p.tok_i - 1, .data = .{ .lhs = try p.expectExpr(), .rhs = undefined, }, }); }, .keyword_nosuspend => { p.tok_i += 1; return p.addNode(.{ .tag = .@"nosuspend", .main_token = p.tok_i - 1, .data = .{ .lhs = try p.expectExpr(), .rhs = undefined, }, }); }, .keyword_resume => { p.tok_i += 1; return p.addNode(.{ .tag = .@"resume", .main_token = p.tok_i - 1, .data = .{ .lhs = try p.expectExpr(), .rhs = undefined, }, }); }, .keyword_return => { p.tok_i += 1; return p.addNode(.{ .tag = .@"return", .main_token = p.tok_i - 1, .data = .{ .lhs = try p.parseExpr(), .rhs = undefined, }, }); }, .identifier => { if (p.token_tags[p.tok_i + 1] == .colon) { switch (p.token_tags[p.tok_i + 2]) { .keyword_inline => { p.tok_i += 3; switch (p.token_tags[p.tok_i]) { .keyword_for => return p.parseForExpr(), .keyword_while => return p.parseWhileExpr(), else => return p.fail(.expected_inlinable), } }, .keyword_for => { p.tok_i += 2; return p.parseForExpr(); }, .keyword_while => { p.tok_i += 2; return p.parseWhileExpr(); }, .l_brace => { p.tok_i += 2; return p.parseBlock(); }, else => return p.parseCurlySuffixExpr(), } } else { return p.parseCurlySuffixExpr(); } }, .keyword_inline => { p.tok_i += 1; switch (p.token_tags[p.tok_i]) { .keyword_for => return p.parseForExpr(), .keyword_while => return p.parseWhileExpr(), else => return p.fail(.expected_inlinable), } }, .keyword_for => return p.parseForExpr(), .keyword_while => return p.parseWhileExpr(), .l_brace => return p.parseBlock(), else => return p.parseCurlySuffixExpr(), } } /// IfExpr <- IfPrefix Expr (KEYWORD_else Payload? Expr)? fn parseIfExpr(p: *Parser) !Node.Index { return p.parseIf(parseExpr); } /// Block <- LBRACE Statement* RBRACE fn parseBlock(p: *Parser) !Node.Index { const lbrace = p.eatToken(.l_brace) orelse return null_node; if (p.eatToken(.r_brace)) |_| { return p.addNode(.{ .tag = .block_two, .main_token = lbrace, .data = .{ .lhs = 0, .rhs = 0, }, }); } const stmt_one = try p.expectStatementRecoverable(); if (p.eatToken(.r_brace)) |_| { const semicolon = p.token_tags[p.tok_i - 2] == .semicolon; return p.addNode(.{ .tag = if (semicolon) .block_two_semicolon else .block_two, .main_token = lbrace, .data = .{ .lhs = stmt_one, .rhs = 0, }, }); } const stmt_two = try p.expectStatementRecoverable(); if (p.eatToken(.r_brace)) |_| { const semicolon = p.token_tags[p.tok_i - 2] == .semicolon; return p.addNode(.{ .tag = if (semicolon) .block_two_semicolon else .block_two, .main_token = lbrace, .data = .{ .lhs = stmt_one, .rhs = stmt_two, }, }); } var statements = std.ArrayList(Node.Index).init(p.gpa); defer statements.deinit(); try statements.appendSlice(&.{ stmt_one, stmt_two }); while (true) { const statement = try p.expectStatementRecoverable(); if (statement == 0) break; try statements.append(statement); if (p.token_tags[p.tok_i] == .r_brace) break; } _ = try p.expectToken(.r_brace); const semicolon = p.token_tags[p.tok_i - 2] == .semicolon; const statements_span = try p.listToSpan(statements.items); return p.addNode(.{ .tag = if (semicolon) .block_semicolon else .block, .main_token = lbrace, .data = .{ .lhs = statements_span.start, .rhs = statements_span.end, }, }); } /// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload /// ForExpr <- ForPrefix Expr (KEYWORD_else Expr)? fn parseForExpr(p: *Parser) !Node.Index { const for_token = p.eatToken(.keyword_for) orelse return null_node; _ = try p.expectToken(.l_paren); const array_expr = try p.expectExpr(); _ = try p.expectToken(.r_paren); const found_payload = try p.parsePtrIndexPayload(); if (found_payload == 0) try p.warn(.expected_loop_payload); const then_expr = try p.expectExpr(); const else_token = p.eatToken(.keyword_else) orelse { return p.addNode(.{ .tag = .for_simple, .main_token = for_token, .data = .{ .lhs = array_expr, .rhs = then_expr, }, }); }; const else_expr = try p.expectExpr(); return p.addNode(.{ .tag = .@"for", .main_token = for_token, .data = .{ .lhs = array_expr, .rhs = try p.addExtra(Node.If{ .then_expr = then_expr, .else_expr = else_expr, }), }, }); } /// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr? /// WhileExpr <- WhilePrefix Expr (KEYWORD_else Payload? Expr)? fn parseWhileExpr(p: *Parser) !Node.Index { const while_token = p.eatToken(.keyword_while) orelse return null_node; _ = try p.expectToken(.l_paren); const condition = try p.expectExpr(); _ = try p.expectToken(.r_paren); const then_payload = try p.parsePtrPayload(); const cont_expr = try p.parseWhileContinueExpr(); const then_expr = try p.expectExpr(); const else_token = p.eatToken(.keyword_else) orelse { if (cont_expr == 0) { return p.addNode(.{ .tag = .while_simple, .main_token = while_token, .data = .{ .lhs = condition, .rhs = then_expr, }, }); } else { return p.addNode(.{ .tag = .while_cont, .main_token = while_token, .data = .{ .lhs = condition, .rhs = try p.addExtra(Node.WhileCont{ .cont_expr = cont_expr, .then_expr = then_expr, }), }, }); } }; const else_payload = try p.parsePayload(); const else_expr = try p.expectExpr(); return p.addNode(.{ .tag = .@"while", .main_token = while_token, .data = .{ .lhs = condition, .rhs = try p.addExtra(Node.While{ .cont_expr = cont_expr, .then_expr = then_expr, .else_expr = else_expr, }), }, }); } /// CurlySuffixExpr <- TypeExpr InitList? /// InitList /// <- LBRACE FieldInit (COMMA FieldInit)* COMMA? RBRACE /// / LBRACE Expr (COMMA Expr)* COMMA? RBRACE /// / LBRACE RBRACE fn parseCurlySuffixExpr(p: *Parser) !Node.Index { const lhs = try p.parseTypeExpr(); if (lhs == 0) return null_node; const lbrace = p.eatToken(.l_brace) orelse return lhs; // If there are 0 or 1 items, we can use ArrayInitOne/StructInitOne; // otherwise we use the full ArrayInit/StructInit. if (p.eatToken(.r_brace)) |_| { return p.addNode(.{ .tag = .struct_init_one, .main_token = lbrace, .data = .{ .lhs = lhs, .rhs = 0, }, }); } const field_init = try p.parseFieldInit(); if (field_init != 0) { const comma_one = p.eatToken(.comma); if (p.eatToken(.r_brace)) |_| { return p.addNode(.{ .tag = if (comma_one != null) .struct_init_one_comma else .struct_init_one, .main_token = lbrace, .data = .{ .lhs = lhs, .rhs = field_init, }, }); } var init_list = std.ArrayList(Node.Index).init(p.gpa); defer init_list.deinit(); try init_list.append(field_init); while (true) { const next = try p.expectFieldInit(); try init_list.append(next); switch (p.token_tags[p.nextToken()]) { .comma => { if (p.eatToken(.r_brace)) |_| break; continue; }, .r_brace => break, .colon, .r_paren, .r_bracket => { p.tok_i -= 1; return p.failExpected(.r_brace); }, else => { // This is likely just a missing comma; // give an error but continue parsing this list. p.tok_i -= 1; try p.warnExpected(.comma); }, } } const span = try p.listToSpan(init_list.items); return p.addNode(.{ .tag = if (p.token_tags[p.tok_i - 2] == .comma) .struct_init_comma else .struct_init, .main_token = lbrace, .data = .{ .lhs = lhs, .rhs = try p.addExtra(Node.SubRange{ .start = span.start, .end = span.end, }), }, }); } const elem_init = try p.expectExpr(); const comma_one = p.eatToken(.comma); if (p.eatToken(.r_brace)) |_| { return p.addNode(.{ .tag = if (comma_one != null) .array_init_one_comma else .array_init_one, .main_token = lbrace, .data = .{ .lhs = lhs, .rhs = elem_init, }, }); } if (comma_one == null) { try p.warnExpected(.comma); } var init_list = std.ArrayList(Node.Index).init(p.gpa); defer init_list.deinit(); try init_list.append(elem_init); var trailing_comma = true; var next = try p.parseExpr(); while (next != 0) : (next = try p.parseExpr()) { try init_list.append(next); if (p.eatToken(.comma) == null) { trailing_comma = false; break; } } _ = try p.expectToken(.r_brace); const span = try p.listToSpan(init_list.items); return p.addNode(.{ .tag = if (trailing_comma) .array_init_comma else .array_init, .main_token = lbrace, .data = .{ .lhs = lhs, .rhs = try p.addExtra(Node.SubRange{ .start = span.start, .end = span.end, }), }, }); } /// ErrorUnionExpr <- SuffixExpr (EXCLAMATIONMARK TypeExpr)? fn parseErrorUnionExpr(p: *Parser) !Node.Index { const suffix_expr = try p.parseSuffixExpr(); if (suffix_expr == 0) return null_node; const bang = p.eatToken(.bang) orelse return suffix_expr; return p.addNode(.{ .tag = .error_union, .main_token = bang, .data = .{ .lhs = suffix_expr, .rhs = try p.expectTypeExpr(), }, }); } /// SuffixExpr /// <- KEYWORD_async PrimaryTypeExpr SuffixOp* FnCallArguments /// / PrimaryTypeExpr (SuffixOp / FnCallArguments)* /// FnCallArguments <- LPAREN ExprList RPAREN /// ExprList <- (Expr COMMA)* Expr? fn parseSuffixExpr(p: *Parser) !Node.Index { if (p.eatToken(.keyword_async)) |async_token| { var res = try p.expectPrimaryTypeExpr(); while (true) { const node = try p.parseSuffixOp(res); if (node == 0) break; res = node; } const lparen = p.nextToken(); if (p.token_tags[lparen] != .l_paren) { p.tok_i -= 1; try p.warn(.expected_param_list); return res; } if (p.eatToken(.r_paren)) |_| { return p.addNode(.{ .tag = .async_call_one, .main_token = lparen, .data = .{ .lhs = res, .rhs = 0, }, }); } const param_one = try p.expectExpr(); const comma_one = p.eatToken(.comma); if (p.eatToken(.r_paren)) |_| { return p.addNode(.{ .tag = if (comma_one == null) .async_call_one else .async_call_one_comma, .main_token = lparen, .data = .{ .lhs = res, .rhs = param_one, }, }); } if (comma_one == null) { try p.warnExpected(.comma); } var param_list = std.ArrayList(Node.Index).init(p.gpa); defer param_list.deinit(); try param_list.append(param_one); while (true) { const next = try p.expectExpr(); try param_list.append(next); switch (p.token_tags[p.nextToken()]) { .comma => { if (p.eatToken(.r_paren)) |_| { const span = try p.listToSpan(param_list.items); return p.addNode(.{ .tag = .async_call_comma, .main_token = lparen, .data = .{ .lhs = res, .rhs = try p.addExtra(Node.SubRange{ .start = span.start, .end = span.end, }), }, }); } else { continue; } }, .r_paren => { const span = try p.listToSpan(param_list.items); return p.addNode(.{ .tag = .async_call, .main_token = lparen, .data = .{ .lhs = res, .rhs = try p.addExtra(Node.SubRange{ .start = span.start, .end = span.end, }), }, }); }, .colon, .r_brace, .r_bracket => { p.tok_i -= 1; return p.failExpected(.r_paren); }, else => { p.tok_i -= 1; try p.warnExpected(.comma); }, } } } var res = try p.parsePrimaryTypeExpr(); if (res == 0) return res; while (true) { const suffix_op = try p.parseSuffixOp(res); if (suffix_op != 0) { res = suffix_op; continue; } res = res: { const lparen = p.eatToken(.l_paren) orelse return res; if (p.eatToken(.r_paren)) |_| { break :res try p.addNode(.{ .tag = .call_one, .main_token = lparen, .data = .{ .lhs = res, .rhs = 0, }, }); } const param_one = try p.expectExpr(); const comma_one = p.eatToken(.comma); if (p.eatToken(.r_paren)) |_| { break :res try p.addNode(.{ .tag = if (comma_one == null) .call_one else .call_one_comma, .main_token = lparen, .data = .{ .lhs = res, .rhs = param_one, }, }); } if (comma_one == null) { try p.warnExpected(.comma); } var param_list = std.ArrayList(Node.Index).init(p.gpa); defer param_list.deinit(); try param_list.append(param_one); while (true) { const next = try p.expectExpr(); try param_list.append(next); switch (p.token_tags[p.nextToken()]) { .comma => { if (p.eatToken(.r_paren)) |_| { const span = try p.listToSpan(param_list.items); break :res try p.addNode(.{ .tag = .call_comma, .main_token = lparen, .data = .{ .lhs = res, .rhs = try p.addExtra(Node.SubRange{ .start = span.start, .end = span.end, }), }, }); } else { continue; } }, .r_paren => { const span = try p.listToSpan(param_list.items); break :res try p.addNode(.{ .tag = .call, .main_token = lparen, .data = .{ .lhs = res, .rhs = try p.addExtra(Node.SubRange{ .start = span.start, .end = span.end, }), }, }); }, .colon, .r_brace, .r_bracket => { p.tok_i -= 1; return p.failExpected(.r_paren); }, else => { p.tok_i -= 1; try p.warnExpected(.comma); }, } } }; } } /// PrimaryTypeExpr /// <- BUILTINIDENTIFIER FnCallArguments /// / CHAR_LITERAL /// / ContainerDecl /// / DOT IDENTIFIER /// / DOT InitList /// / ErrorSetDecl /// / FLOAT /// / FnProto /// / GroupedExpr /// / LabeledTypeExpr /// / IDENTIFIER /// / IfTypeExpr /// / INTEGER /// / KEYWORD_comptime TypeExpr /// / KEYWORD_error DOT IDENTIFIER /// / KEYWORD_false /// / KEYWORD_null /// / KEYWORD_anyframe /// / KEYWORD_true /// / KEYWORD_undefined /// / KEYWORD_unreachable /// / STRINGLITERAL /// / SwitchExpr /// ContainerDecl <- (KEYWORD_extern / KEYWORD_packed)? ContainerDeclAuto /// ContainerDeclAuto <- ContainerDeclType LBRACE ContainerMembers RBRACE /// InitList /// <- LBRACE FieldInit (COMMA FieldInit)* COMMA? RBRACE /// / LBRACE Expr (COMMA Expr)* COMMA? RBRACE /// / LBRACE RBRACE /// ErrorSetDecl <- KEYWORD_error LBRACE IdentifierList RBRACE /// GroupedExpr <- LPAREN Expr RPAREN /// IfTypeExpr <- IfPrefix TypeExpr (KEYWORD_else Payload? TypeExpr)? /// LabeledTypeExpr /// <- BlockLabel Block /// / BlockLabel? LoopTypeExpr /// LoopTypeExpr <- KEYWORD_inline? (ForTypeExpr / WhileTypeExpr) fn parsePrimaryTypeExpr(p: *Parser) !Node.Index { switch (p.token_tags[p.tok_i]) { .char_literal => return p.addNode(.{ .tag = .char_literal, .main_token = p.nextToken(), .data = .{ .lhs = undefined, .rhs = undefined, }, }), .integer_literal => return p.addNode(.{ .tag = .integer_literal, .main_token = p.nextToken(), .data = .{ .lhs = undefined, .rhs = undefined, }, }), .float_literal => return p.addNode(.{ .tag = .float_literal, .main_token = p.nextToken(), .data = .{ .lhs = undefined, .rhs = undefined, }, }), .keyword_false => return p.addNode(.{ .tag = .false_literal, .main_token = p.nextToken(), .data = .{ .lhs = undefined, .rhs = undefined, }, }), .keyword_true => return p.addNode(.{ .tag = .true_literal, .main_token = p.nextToken(), .data = .{ .lhs = undefined, .rhs = undefined, }, }), .keyword_null => return p.addNode(.{ .tag = .null_literal, .main_token = p.nextToken(), .data = .{ .lhs = undefined, .rhs = undefined, }, }), .keyword_undefined => return p.addNode(.{ .tag = .undefined_literal, .main_token = p.nextToken(), .data = .{ .lhs = undefined, .rhs = undefined, }, }), .keyword_unreachable => return p.addNode(.{ .tag = .unreachable_literal, .main_token = p.nextToken(), .data = .{ .lhs = undefined, .rhs = undefined, }, }), .keyword_anyframe => return p.addNode(.{ .tag = .anyframe_literal, .main_token = p.nextToken(), .data = .{ .lhs = undefined, .rhs = undefined, }, }), .string_literal => { const main_token = p.nextToken(); return p.addNode(.{ .tag = .string_literal, .main_token = main_token, .data = .{ .lhs = undefined, .rhs = undefined, }, }); }, .builtin => return p.parseBuiltinCall(), .keyword_fn => return p.parseFnProto(), .keyword_if => return p.parseIf(parseTypeExpr), .keyword_switch => return p.expectSwitchExpr(), .keyword_extern, .keyword_packed, => { p.tok_i += 1; return p.parseContainerDeclAuto(); }, .keyword_struct, .keyword_opaque, .keyword_enum, .keyword_union, => return p.parseContainerDeclAuto(), .keyword_comptime => return p.addNode(.{ .tag = .@"comptime", .main_token = p.nextToken(), .data = .{ .lhs = try p.expectTypeExpr(), .rhs = undefined, }, }), .multiline_string_literal_line => { const first_line = p.nextToken(); while (p.token_tags[p.tok_i] == .multiline_string_literal_line) { p.tok_i += 1; } return p.addNode(.{ .tag = .multiline_string_literal, .main_token = first_line, .data = .{ .lhs = first_line, .rhs = p.tok_i - 1, }, }); }, .identifier => switch (p.token_tags[p.tok_i + 1]) { .colon => switch (p.token_tags[p.tok_i + 2]) { .keyword_inline => { p.tok_i += 3; switch (p.token_tags[p.tok_i]) { .keyword_for => return p.parseForTypeExpr(), .keyword_while => return p.parseWhileTypeExpr(), else => return p.fail(.expected_inlinable), } }, .keyword_for => { p.tok_i += 2; return p.parseForTypeExpr(); }, .keyword_while => { p.tok_i += 2; return p.parseWhileTypeExpr(); }, .l_brace => { p.tok_i += 2; return p.parseBlock(); }, else => return p.addNode(.{ .tag = .identifier, .main_token = p.nextToken(), .data = .{ .lhs = undefined, .rhs = undefined, }, }), }, else => return p.addNode(.{ .tag = .identifier, .main_token = p.nextToken(), .data = .{ .lhs = undefined, .rhs = undefined, }, }), }, .keyword_inline => { p.tok_i += 1; switch (p.token_tags[p.tok_i]) { .keyword_for => return p.parseForTypeExpr(), .keyword_while => return p.parseWhileTypeExpr(), else => return p.fail(.expected_inlinable), } }, .keyword_for => return p.parseForTypeExpr(), .keyword_while => return p.parseWhileTypeExpr(), .period => switch (p.token_tags[p.tok_i + 1]) { .identifier => return p.addNode(.{ .tag = .enum_literal, .data = .{ .lhs = p.nextToken(), // dot .rhs = undefined, }, .main_token = p.nextToken(), // identifier }), .l_brace => { const lbrace = p.tok_i + 1; p.tok_i = lbrace + 1; // If there are 0, 1, or 2 items, we can use ArrayInitDotTwo/StructInitDotTwo; // otherwise we use the full ArrayInitDot/StructInitDot. if (p.eatToken(.r_brace)) |_| { return p.addNode(.{ .tag = .struct_init_dot_two, .main_token = lbrace, .data = .{ .lhs = 0, .rhs = 0, }, }); } const field_init_one = try p.parseFieldInit(); if (field_init_one != 0) { const comma_one = p.eatToken(.comma); if (p.eatToken(.r_brace)) |_| { return p.addNode(.{ .tag = if (comma_one != null) .struct_init_dot_two_comma else .struct_init_dot_two, .main_token = lbrace, .data = .{ .lhs = field_init_one, .rhs = 0, }, }); } if (comma_one == null) { try p.warnExpected(.comma); } const field_init_two = try p.expectFieldInit(); const comma_two = p.eatToken(.comma); if (p.eatToken(.r_brace)) |_| { return p.addNode(.{ .tag = if (comma_two != null) .struct_init_dot_two_comma else .struct_init_dot_two, .main_token = lbrace, .data = .{ .lhs = field_init_one, .rhs = field_init_two, }, }); } if (comma_two == null) { try p.warnExpected(.comma); } var init_list = std.ArrayList(Node.Index).init(p.gpa); defer init_list.deinit(); try init_list.appendSlice(&.{ field_init_one, field_init_two }); while (true) { const next = try p.expectFieldInit(); assert(next != 0); try init_list.append(next); switch (p.token_tags[p.nextToken()]) { .comma => { if (p.eatToken(.r_brace)) |_| break; continue; }, .r_brace => break, .colon, .r_paren, .r_bracket => { p.tok_i -= 1; return p.failExpected(.r_brace); }, else => { p.tok_i -= 1; try p.warnExpected(.comma); }, } } const span = try p.listToSpan(init_list.items); const trailing_comma = p.token_tags[p.tok_i - 2] == .comma; return p.addNode(.{ .tag = if (trailing_comma) .struct_init_dot_comma else .struct_init_dot, .main_token = lbrace, .data = .{ .lhs = span.start, .rhs = span.end, }, }); } const elem_init_one = try p.expectExpr(); const comma_one = p.eatToken(.comma); if (p.eatToken(.r_brace)) |_| { return p.addNode(.{ .tag = if (comma_one != null) .array_init_dot_two_comma else .array_init_dot_two, .main_token = lbrace, .data = .{ .lhs = elem_init_one, .rhs = 0, }, }); } if (comma_one == null) { try p.warnExpected(.comma); } const elem_init_two = try p.expectExpr(); const comma_two = p.eatToken(.comma); if (p.eatToken(.r_brace)) |_| { return p.addNode(.{ .tag = if (comma_two != null) .array_init_dot_two_comma else .array_init_dot_two, .main_token = lbrace, .data = .{ .lhs = elem_init_one, .rhs = elem_init_two, }, }); } if (comma_two == null) { try p.warnExpected(.comma); } var init_list = std.ArrayList(Node.Index).init(p.gpa); defer init_list.deinit(); try init_list.appendSlice(&.{ elem_init_one, elem_init_two }); while (true) { const next = try p.expectExpr(); if (next == 0) break; try init_list.append(next); switch (p.token_tags[p.nextToken()]) { .comma => { if (p.eatToken(.r_brace)) |_| break; continue; }, .r_brace => break, .colon, .r_paren, .r_bracket => { p.tok_i -= 1; return p.failExpected(.r_brace); }, else => { p.tok_i -= 1; try p.warnExpected(.comma); }, } } const span = try p.listToSpan(init_list.items); return p.addNode(.{ .tag = if (p.token_tags[p.tok_i - 2] == .comma) .array_init_dot_comma else .array_init_dot, .main_token = lbrace, .data = .{ .lhs = span.start, .rhs = span.end, }, }); }, else => return null_node, }, .keyword_error => switch (p.token_tags[p.tok_i + 1]) { .l_brace => { const error_token = p.tok_i; p.tok_i += 2; if (p.eatToken(.r_brace)) |rbrace| { return p.addNode(.{ .tag = .error_set_decl, .main_token = error_token, .data = .{ .lhs = undefined, .rhs = rbrace, }, }); } while (true) { const doc_comment = try p.eatDocComments(); const identifier = try p.expectToken(.identifier); switch (p.token_tags[p.nextToken()]) { .comma => { if (p.eatToken(.r_brace)) |_| break; continue; }, .r_brace => break, .colon, .r_paren, .r_bracket => { p.tok_i -= 1; return p.failExpected(.r_brace); }, else => { // This is likely just a missing comma; // give an error but continue parsing this list. p.tok_i -= 1; try p.warnExpected(.comma); }, } } return p.addNode(.{ .tag = .error_set_decl, .main_token = error_token, .data = .{ .lhs = undefined, .rhs = p.tok_i - 1, // rbrace }, }); }, else => { const main_token = p.nextToken(); const period = p.eatToken(.period); if (period == null) try p.warnExpected(.period); const identifier = p.eatToken(.identifier); if (identifier == null) try p.warnExpected(.identifier); return p.addNode(.{ .tag = .error_value, .main_token = main_token, .data = .{ .lhs = period orelse 0, .rhs = identifier orelse 0, }, }); }, }, .l_paren => return p.addNode(.{ .tag = .grouped_expression, .main_token = p.nextToken(), .data = .{ .lhs = try p.expectExpr(), .rhs = try p.expectToken(.r_paren), }, }), else => return null_node, } } fn expectPrimaryTypeExpr(p: *Parser) !Node.Index { const node = try p.parsePrimaryTypeExpr(); if (node == 0) { return p.fail(.expected_primary_type_expr); } return node; } /// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload /// ForTypeExpr <- ForPrefix TypeExpr (KEYWORD_else TypeExpr)? fn parseForTypeExpr(p: *Parser) !Node.Index { const for_token = p.eatToken(.keyword_for) orelse return null_node; _ = try p.expectToken(.l_paren); const array_expr = try p.expectExpr(); _ = try p.expectToken(.r_paren); const found_payload = try p.parsePtrIndexPayload(); if (found_payload == 0) try p.warn(.expected_loop_payload); const then_expr = try p.expectExpr(); const else_token = p.eatToken(.keyword_else) orelse { return p.addNode(.{ .tag = .for_simple, .main_token = for_token, .data = .{ .lhs = array_expr, .rhs = then_expr, }, }); }; const else_expr = try p.expectTypeExpr(); return p.addNode(.{ .tag = .@"for", .main_token = for_token, .data = .{ .lhs = array_expr, .rhs = try p.addExtra(Node.If{ .then_expr = then_expr, .else_expr = else_expr, }), }, }); } /// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr? /// WhileTypeExpr <- WhilePrefix TypeExpr (KEYWORD_else Payload? TypeExpr)? fn parseWhileTypeExpr(p: *Parser) !Node.Index { const while_token = p.eatToken(.keyword_while) orelse return null_node; _ = try p.expectToken(.l_paren); const condition = try p.expectExpr(); _ = try p.expectToken(.r_paren); const then_payload = try p.parsePtrPayload(); const cont_expr = try p.parseWhileContinueExpr(); const then_expr = try p.expectTypeExpr(); const else_token = p.eatToken(.keyword_else) orelse { if (cont_expr == 0) { return p.addNode(.{ .tag = .while_simple, .main_token = while_token, .data = .{ .lhs = condition, .rhs = then_expr, }, }); } else { return p.addNode(.{ .tag = .while_cont, .main_token = while_token, .data = .{ .lhs = condition, .rhs = try p.addExtra(Node.WhileCont{ .cont_expr = cont_expr, .then_expr = then_expr, }), }, }); } }; const else_payload = try p.parsePayload(); const else_expr = try p.expectTypeExpr(); return p.addNode(.{ .tag = .@"while", .main_token = while_token, .data = .{ .lhs = condition, .rhs = try p.addExtra(Node.While{ .cont_expr = cont_expr, .then_expr = then_expr, .else_expr = else_expr, }), }, }); } /// SwitchExpr <- KEYWORD_switch LPAREN Expr RPAREN LBRACE SwitchProngList RBRACE fn expectSwitchExpr(p: *Parser) !Node.Index { const switch_token = p.assertToken(.keyword_switch); _ = try p.expectToken(.l_paren); const expr_node = try p.expectExpr(); _ = try p.expectToken(.r_paren); _ = try p.expectToken(.l_brace); const cases = try p.parseSwitchProngList(); const trailing_comma = p.token_tags[p.tok_i - 1] == .comma; _ = try p.expectToken(.r_brace); return p.addNode(.{ .tag = if (trailing_comma) .switch_comma else .@"switch", .main_token = switch_token, .data = .{ .lhs = expr_node, .rhs = try p.addExtra(Node.SubRange{ .start = cases.start, .end = cases.end, }), }, }); } /// AsmExpr <- KEYWORD_asm KEYWORD_volatile? LPAREN Expr AsmOutput? RPAREN /// AsmOutput <- COLON AsmOutputList AsmInput? /// AsmInput <- COLON AsmInputList AsmClobbers? /// AsmClobbers <- COLON StringList /// StringList <- (STRINGLITERAL COMMA)* STRINGLITERAL? /// AsmOutputList <- (AsmOutputItem COMMA)* AsmOutputItem? /// AsmInputList <- (AsmInputItem COMMA)* AsmInputItem? fn expectAsmExpr(p: *Parser) !Node.Index { const asm_token = p.assertToken(.keyword_asm); _ = p.eatToken(.keyword_volatile); _ = try p.expectToken(.l_paren); const template = try p.expectExpr(); if (p.eatToken(.r_paren)) |rparen| { return p.addNode(.{ .tag = .asm_simple, .main_token = asm_token, .data = .{ .lhs = template, .rhs = rparen, }, }); } _ = try p.expectToken(.colon); var list = std.ArrayList(Node.Index).init(p.gpa); defer list.deinit(); while (true) { const output_item = try p.parseAsmOutputItem(); if (output_item == 0) break; try list.append(output_item); switch (p.token_tags[p.tok_i]) { .comma => p.tok_i += 1, .colon, .r_paren, .r_brace, .r_bracket => break, // All possible delimiters. else => { // This is likely just a missing comma; // give an error but continue parsing this list. try p.warnExpected(.comma); }, } } if (p.eatToken(.colon)) |_| { while (true) { const input_item = try p.parseAsmInputItem(); if (input_item == 0) break; try list.append(input_item); switch (p.token_tags[p.tok_i]) { .comma => p.tok_i += 1, .colon, .r_paren, .r_brace, .r_bracket => break, // All possible delimiters. else => { // This is likely just a missing comma; // give an error but continue parsing this list. try p.warnExpected(.comma); }, } } if (p.eatToken(.colon)) |_| { while (p.eatToken(.string_literal)) |_| { switch (p.token_tags[p.tok_i]) { .comma => p.tok_i += 1, .colon, .r_paren, .r_brace, .r_bracket => break, else => { // This is likely just a missing comma; // give an error but continue parsing this list. try p.warnExpected(.comma); }, } } } } const rparen = try p.expectToken(.r_paren); const span = try p.listToSpan(list.items); return p.addNode(.{ .tag = .@"asm", .main_token = asm_token, .data = .{ .lhs = template, .rhs = try p.addExtra(Node.Asm{ .items_start = span.start, .items_end = span.end, .rparen = rparen, }), }, }); } /// AsmOutputItem <- LBRACKET IDENTIFIER RBRACKET STRINGLITERAL LPAREN (MINUSRARROW TypeExpr / IDENTIFIER) RPAREN fn parseAsmOutputItem(p: *Parser) !Node.Index { _ = p.eatToken(.l_bracket) orelse return null_node; const identifier = try p.expectToken(.identifier); _ = try p.expectToken(.r_bracket); _ = try p.expectToken(.string_literal); _ = try p.expectToken(.l_paren); const type_expr: Node.Index = blk: { if (p.eatToken(.arrow)) |_| { break :blk try p.expectTypeExpr(); } else { _ = try p.expectToken(.identifier); break :blk null_node; } }; const rparen = try p.expectToken(.r_paren); return p.addNode(.{ .tag = .asm_output, .main_token = identifier, .data = .{ .lhs = type_expr, .rhs = rparen, }, }); } /// AsmInputItem <- LBRACKET IDENTIFIER RBRACKET STRINGLITERAL LPAREN Expr RPAREN fn parseAsmInputItem(p: *Parser) !Node.Index { _ = p.eatToken(.l_bracket) orelse return null_node; const identifier = try p.expectToken(.identifier); _ = try p.expectToken(.r_bracket); _ = try p.expectToken(.string_literal); _ = try p.expectToken(.l_paren); const expr = try p.expectExpr(); const rparen = try p.expectToken(.r_paren); return p.addNode(.{ .tag = .asm_input, .main_token = identifier, .data = .{ .lhs = expr, .rhs = rparen, }, }); } /// BreakLabel <- COLON IDENTIFIER fn parseBreakLabel(p: *Parser) !TokenIndex { _ = p.eatToken(.colon) orelse return @as(TokenIndex, 0); return p.expectToken(.identifier); } /// BlockLabel <- IDENTIFIER COLON fn parseBlockLabel(p: *Parser) TokenIndex { if (p.token_tags[p.tok_i] == .identifier and p.token_tags[p.tok_i + 1] == .colon) { const identifier = p.tok_i; p.tok_i += 2; return identifier; } return 0; } /// FieldInit <- DOT IDENTIFIER EQUAL Expr fn parseFieldInit(p: *Parser) !Node.Index { if (p.token_tags[p.tok_i + 0] == .period and p.token_tags[p.tok_i + 1] == .identifier and p.token_tags[p.tok_i + 2] == .equal) { p.tok_i += 3; return p.expectExpr(); } else { return null_node; } } fn expectFieldInit(p: *Parser) !Node.Index { _ = try p.expectToken(.period); _ = try p.expectToken(.identifier); _ = try p.expectToken(.equal); return p.expectExpr(); } /// WhileContinueExpr <- COLON LPAREN AssignExpr RPAREN fn parseWhileContinueExpr(p: *Parser) !Node.Index { _ = p.eatToken(.colon) orelse return null_node; _ = try p.expectToken(.l_paren); const node = try p.parseAssignExpr(); if (node == 0) return p.fail(.expected_expr_or_assignment); _ = try p.expectToken(.r_paren); return node; } /// LinkSection <- KEYWORD_linksection LPAREN Expr RPAREN fn parseLinkSection(p: *Parser) !Node.Index { _ = p.eatToken(.keyword_linksection) orelse return null_node; _ = try p.expectToken(.l_paren); const expr_node = try p.expectExpr(); _ = try p.expectToken(.r_paren); return expr_node; } /// CallConv <- KEYWORD_callconv LPAREN Expr RPAREN fn parseCallconv(p: *Parser) !Node.Index { _ = p.eatToken(.keyword_callconv) orelse return null_node; _ = try p.expectToken(.l_paren); const expr_node = try p.expectExpr(); _ = try p.expectToken(.r_paren); return expr_node; } /// ParamDecl /// <- (KEYWORD_noalias / KEYWORD_comptime)? (IDENTIFIER COLON)? ParamType /// / DOT3 /// ParamType /// <- Keyword_anytype /// / TypeExpr /// This function can return null nodes and then still return nodes afterwards, /// such as in the case of anytype and `...`. Caller must look for rparen to find /// out when there are no more param decls left. fn expectParamDecl(p: *Parser) !Node.Index { _ = try p.eatDocComments(); switch (p.token_tags[p.tok_i]) { .keyword_noalias, .keyword_comptime => p.tok_i += 1, .ellipsis3 => { p.tok_i += 1; return null_node; }, else => {}, } if (p.token_tags[p.tok_i] == .identifier and p.token_tags[p.tok_i + 1] == .colon) { p.tok_i += 2; } switch (p.token_tags[p.tok_i]) { .keyword_anytype => { p.tok_i += 1; return null_node; }, else => return p.expectTypeExpr(), } } /// Payload <- PIPE IDENTIFIER PIPE fn parsePayload(p: *Parser) !TokenIndex { _ = p.eatToken(.pipe) orelse return @as(TokenIndex, 0); const identifier = try p.expectToken(.identifier); _ = try p.expectToken(.pipe); return identifier; } /// PtrPayload <- PIPE ASTERISK? IDENTIFIER PIPE fn parsePtrPayload(p: *Parser) !TokenIndex { _ = p.eatToken(.pipe) orelse return @as(TokenIndex, 0); _ = p.eatToken(.asterisk); const identifier = try p.expectToken(.identifier); _ = try p.expectToken(.pipe); return identifier; } /// PtrIndexPayload <- PIPE ASTERISK? IDENTIFIER (COMMA IDENTIFIER)? PIPE /// Returns the first identifier token, if any. fn parsePtrIndexPayload(p: *Parser) !TokenIndex { _ = p.eatToken(.pipe) orelse return @as(TokenIndex, 0); _ = p.eatToken(.asterisk); const identifier = try p.expectToken(.identifier); if (p.eatToken(.comma) != null) { _ = try p.expectToken(.identifier); } _ = try p.expectToken(.pipe); return identifier; } /// SwitchProng <- SwitchCase EQUALRARROW PtrPayload? AssignExpr /// SwitchCase /// <- SwitchItem (COMMA SwitchItem)* COMMA? /// / KEYWORD_else fn parseSwitchProng(p: *Parser) !Node.Index { if (p.eatToken(.keyword_else)) |_| { const arrow_token = try p.expectToken(.equal_angle_bracket_right); _ = try p.parsePtrPayload(); return p.addNode(.{ .tag = .switch_case_one, .main_token = arrow_token, .data = .{ .lhs = 0, .rhs = try p.expectAssignExpr(), }, }); } const first_item = try p.parseSwitchItem(); if (first_item == 0) return null_node; if (p.eatToken(.equal_angle_bracket_right)) |arrow_token| { _ = try p.parsePtrPayload(); return p.addNode(.{ .tag = .switch_case_one, .main_token = arrow_token, .data = .{ .lhs = first_item, .rhs = try p.expectAssignExpr(), }, }); } var list = std.ArrayList(Node.Index).init(p.gpa); defer list.deinit(); try list.append(first_item); while (p.eatToken(.comma)) |_| { const next_item = try p.parseSwitchItem(); if (next_item == 0) break; try list.append(next_item); } const span = try p.listToSpan(list.items); const arrow_token = try p.expectToken(.equal_angle_bracket_right); _ = try p.parsePtrPayload(); return p.addNode(.{ .tag = .switch_case, .main_token = arrow_token, .data = .{ .lhs = try p.addExtra(Node.SubRange{ .start = span.start, .end = span.end, }), .rhs = try p.expectAssignExpr(), }, }); } /// SwitchItem <- Expr (DOT3 Expr)? fn parseSwitchItem(p: *Parser) !Node.Index { const expr = try p.parseExpr(); if (expr == 0) return null_node; if (p.eatToken(.ellipsis3)) |token| { return p.addNode(.{ .tag = .switch_range, .main_token = token, .data = .{ .lhs = expr, .rhs = try p.expectExpr(), }, }); } return expr; } const PtrModifiers = struct { align_node: Node.Index, bit_range_start: Node.Index, bit_range_end: Node.Index, }; fn parsePtrModifiers(p: *Parser) !PtrModifiers { var result: PtrModifiers = .{ .align_node = 0, .bit_range_start = 0, .bit_range_end = 0, }; var saw_const = false; var saw_volatile = false; var saw_allowzero = false; while (true) { switch (p.token_tags[p.tok_i]) { .keyword_align => { if (result.align_node != 0) { try p.warn(.extra_align_qualifier); } p.tok_i += 1; _ = try p.expectToken(.l_paren); result.align_node = try p.expectExpr(); if (p.eatToken(.colon)) |_| { result.bit_range_start = try p.expectExpr(); _ = try p.expectToken(.colon); result.bit_range_end = try p.expectExpr(); } _ = try p.expectToken(.r_paren); }, .keyword_const => { if (saw_const) { try p.warn(.extra_const_qualifier); } p.tok_i += 1; saw_const = true; }, .keyword_volatile => { if (saw_volatile) { try p.warn(.extra_volatile_qualifier); } p.tok_i += 1; saw_volatile = true; }, .keyword_allowzero => { if (saw_allowzero) { try p.warn(.extra_allowzero_qualifier); } p.tok_i += 1; saw_allowzero = true; }, else => return result, } } } /// SuffixOp /// <- LBRACKET Expr (DOT2 (Expr? (COLON Expr)?)?)? RBRACKET /// / DOT IDENTIFIER /// / DOTASTERISK /// / DOTQUESTIONMARK fn parseSuffixOp(p: *Parser, lhs: Node.Index) !Node.Index { switch (p.token_tags[p.tok_i]) { .l_bracket => { const lbracket = p.nextToken(); const index_expr = try p.expectExpr(); if (p.eatToken(.ellipsis2)) |_| { const end_expr = try p.parseExpr(); if (p.eatToken(.colon)) |_| { const sentinel = try p.parseExpr(); _ = try p.expectToken(.r_bracket); return p.addNode(.{ .tag = .slice_sentinel, .main_token = lbracket, .data = .{ .lhs = lhs, .rhs = try p.addExtra(Node.SliceSentinel{ .start = index_expr, .end = end_expr, .sentinel = sentinel, }), }, }); } _ = try p.expectToken(.r_bracket); if (end_expr == 0) { return p.addNode(.{ .tag = .slice_open, .main_token = lbracket, .data = .{ .lhs = lhs, .rhs = index_expr, }, }); } return p.addNode(.{ .tag = .slice, .main_token = lbracket, .data = .{ .lhs = lhs, .rhs = try p.addExtra(Node.Slice{ .start = index_expr, .end = end_expr, }), }, }); } _ = try p.expectToken(.r_bracket); return p.addNode(.{ .tag = .array_access, .main_token = lbracket, .data = .{ .lhs = lhs, .rhs = index_expr, }, }); }, .period_asterisk => return p.addNode(.{ .tag = .deref, .main_token = p.nextToken(), .data = .{ .lhs = lhs, .rhs = undefined, }, }), .invalid_periodasterisks => { try p.warn(.asterisk_after_ptr_deref); return p.addNode(.{ .tag = .deref, .main_token = p.nextToken(), .data = .{ .lhs = lhs, .rhs = undefined, }, }); }, .period => switch (p.token_tags[p.tok_i + 1]) { .identifier => return p.addNode(.{ .tag = .field_access, .main_token = p.nextToken(), .data = .{ .lhs = lhs, .rhs = p.nextToken(), }, }), .question_mark => return p.addNode(.{ .tag = .unwrap_optional, .main_token = p.nextToken(), .data = .{ .lhs = lhs, .rhs = p.nextToken(), }, }), else => { p.tok_i += 1; try p.warn(.expected_suffix_op); return null_node; }, }, else => return null_node, } } /// Caller must have already verified the first token. /// ContainerDeclType /// <- KEYWORD_struct /// / KEYWORD_enum (LPAREN Expr RPAREN)? /// / KEYWORD_union (LPAREN (KEYWORD_enum (LPAREN Expr RPAREN)? / Expr) RPAREN)? /// / KEYWORD_opaque fn parseContainerDeclAuto(p: *Parser) !Node.Index { const main_token = p.nextToken(); const arg_expr = switch (p.token_tags[main_token]) { .keyword_struct, .keyword_opaque => null_node, .keyword_enum => blk: { if (p.eatToken(.l_paren)) |_| { const expr = try p.expectExpr(); _ = try p.expectToken(.r_paren); break :blk expr; } else { break :blk null_node; } }, .keyword_union => blk: { if (p.eatToken(.l_paren)) |_| { if (p.eatToken(.keyword_enum)) |_| { if (p.eatToken(.l_paren)) |_| { const enum_tag_expr = try p.expectExpr(); _ = try p.expectToken(.r_paren); _ = try p.expectToken(.r_paren); _ = try p.expectToken(.l_brace); const members = try p.parseContainerMembers(); const members_span = try members.toSpan(p); _ = try p.expectToken(.r_brace); return p.addNode(.{ .tag = switch (members.trailing) { true => .tagged_union_enum_tag_trailing, false => .tagged_union_enum_tag, }, .main_token = main_token, .data = .{ .lhs = enum_tag_expr, .rhs = try p.addExtra(members_span), }, }); } else { _ = try p.expectToken(.r_paren); _ = try p.expectToken(.l_brace); const members = try p.parseContainerMembers(); _ = try p.expectToken(.r_brace); if (members.len <= 2) { return p.addNode(.{ .tag = switch (members.trailing) { true => .tagged_union_two_trailing, false => .tagged_union_two, }, .main_token = main_token, .data = .{ .lhs = members.lhs, .rhs = members.rhs, }, }); } else { const span = try members.toSpan(p); return p.addNode(.{ .tag = switch (members.trailing) { true => .tagged_union_trailing, false => .tagged_union, }, .main_token = main_token, .data = .{ .lhs = span.start, .rhs = span.end, }, }); } } } else { const expr = try p.expectExpr(); _ = try p.expectToken(.r_paren); break :blk expr; } } else { break :blk null_node; } }, else => { p.tok_i -= 1; return p.fail(.expected_container); }, }; _ = try p.expectToken(.l_brace); const members = try p.parseContainerMembers(); _ = try p.expectToken(.r_brace); if (arg_expr == 0) { if (members.len <= 2) { return p.addNode(.{ .tag = switch (members.trailing) { true => .container_decl_two_trailing, false => .container_decl_two, }, .main_token = main_token, .data = .{ .lhs = members.lhs, .rhs = members.rhs, }, }); } else { const span = try members.toSpan(p); return p.addNode(.{ .tag = switch (members.trailing) { true => .container_decl_trailing, false => .container_decl, }, .main_token = main_token, .data = .{ .lhs = span.start, .rhs = span.end, }, }); } } else { const span = try members.toSpan(p); return p.addNode(.{ .tag = switch (members.trailing) { true => .container_decl_arg_trailing, false => .container_decl_arg, }, .main_token = main_token, .data = .{ .lhs = arg_expr, .rhs = try p.addExtra(Node.SubRange{ .start = span.start, .end = span.end, }), }, }); } } /// Holds temporary data until we are ready to construct the full ContainerDecl AST node. /// ByteAlign <- KEYWORD_align LPAREN Expr RPAREN fn parseByteAlign(p: *Parser) !Node.Index { _ = p.eatToken(.keyword_align) orelse return null_node; _ = try p.expectToken(.l_paren); const expr = try p.expectExpr(); _ = try p.expectToken(.r_paren); return expr; } /// SwitchProngList <- (SwitchProng COMMA)* SwitchProng? fn parseSwitchProngList(p: *Parser) !Node.SubRange { return ListParseFn(parseSwitchProng)(p); } /// ParamDeclList <- (ParamDecl COMMA)* ParamDecl? fn parseParamDeclList(p: *Parser) !SmallSpan { _ = try p.expectToken(.l_paren); if (p.eatToken(.r_paren)) |_| { return SmallSpan{ .zero_or_one = 0 }; } const param_one = while (true) { const param = try p.expectParamDecl(); if (param != 0) break param; switch (p.token_tags[p.nextToken()]) { .comma => { if (p.eatToken(.r_paren)) |_| { return SmallSpan{ .zero_or_one = 0 }; } }, .r_paren => return SmallSpan{ .zero_or_one = 0 }, else => { // This is likely just a missing comma; // give an error but continue parsing this list. p.tok_i -= 1; try p.warnExpected(.comma); }, } } else unreachable; const param_two = while (true) { switch (p.token_tags[p.nextToken()]) { .comma => {}, .r_paren => return SmallSpan{ .zero_or_one = param_one }, .colon, .r_brace, .r_bracket => { p.tok_i -= 1; return p.failExpected(.r_paren); }, else => { // This is likely just a missing comma; // give an error but continue parsing this list. p.tok_i -= 1; try p.warnExpected(.comma); }, } if (p.eatToken(.r_paren)) |_| { return SmallSpan{ .zero_or_one = param_one }; } const param = try p.expectParamDecl(); if (param != 0) break param; } else unreachable; var list = std.ArrayList(Node.Index).init(p.gpa); defer list.deinit(); try list.appendSlice(&.{ param_one, param_two }); while (true) { switch (p.token_tags[p.nextToken()]) { .comma => {}, .r_paren => return SmallSpan{ .multi = list.toOwnedSlice() }, .colon, .r_brace, .r_bracket => { p.tok_i -= 1; return p.failExpected(.r_paren); }, else => { // This is likely just a missing comma; // give an error but continue parsing this list. p.tok_i -= 1; try p.warnExpected(.comma); }, } if (p.eatToken(.r_paren)) |_| { return SmallSpan{ .multi = list.toOwnedSlice() }; } const param = try p.expectParamDecl(); if (param != 0) try list.append(param); } } const NodeParseFn = fn (p: *Parser) Error!Node.Index; fn ListParseFn(comptime nodeParseFn: anytype) (fn (p: *Parser) Error!Node.SubRange) { return struct { pub fn parse(p: *Parser) Error!Node.SubRange { var list = std.ArrayList(Node.Index).init(p.gpa); defer list.deinit(); while (true) { const item = try nodeParseFn(p); if (item == 0) break; try list.append(item); switch (p.token_tags[p.tok_i]) { .comma => p.tok_i += 1, // all possible delimiters .colon, .r_paren, .r_brace, .r_bracket => break, else => { // This is likely just a missing comma; // give an error but continue parsing this list. try p.warnExpected(.comma); }, } } return p.listToSpan(list.items); } }.parse; } /// FnCallArguments <- LPAREN ExprList RPAREN /// ExprList <- (Expr COMMA)* Expr? fn parseBuiltinCall(p: *Parser) !Node.Index { const builtin_token = p.assertToken(.builtin); if (p.token_tags[p.nextToken()] != .l_paren) { p.tok_i -= 1; try p.warn(.expected_param_list); // Pretend this was an identifier so we can continue parsing. return p.addNode(.{ .tag = .identifier, .main_token = builtin_token, .data = .{ .lhs = undefined, .rhs = undefined, }, }); } if (p.eatToken(.r_paren)) |_| { return p.addNode(.{ .tag = .builtin_call_two, .main_token = builtin_token, .data = .{ .lhs = 0, .rhs = 0, }, }); } const param_one = try p.expectExpr(); switch (p.token_tags[p.nextToken()]) { .comma => { if (p.eatToken(.r_paren)) |_| { return p.addNode(.{ .tag = .builtin_call_two_comma, .main_token = builtin_token, .data = .{ .lhs = param_one, .rhs = 0, }, }); } }, .r_paren => return p.addNode(.{ .tag = .builtin_call_two, .main_token = builtin_token, .data = .{ .lhs = param_one, .rhs = 0, }, }), else => { // This is likely just a missing comma; // give an error but continue parsing this list. p.tok_i -= 1; try p.warnExpected(.comma); }, } const param_two = try p.expectExpr(); switch (p.token_tags[p.nextToken()]) { .comma => { if (p.eatToken(.r_paren)) |_| { return p.addNode(.{ .tag = .builtin_call_two_comma, .main_token = builtin_token, .data = .{ .lhs = param_one, .rhs = param_two, }, }); } }, .r_paren => return p.addNode(.{ .tag = .builtin_call_two, .main_token = builtin_token, .data = .{ .lhs = param_one, .rhs = param_two, }, }), else => { // This is likely just a missing comma; // give an error but continue parsing this list. p.tok_i -= 1; try p.warnExpected(.comma); }, } var list = std.ArrayList(Node.Index).init(p.gpa); defer list.deinit(); try list.appendSlice(&.{ param_one, param_two }); while (true) { const param = try p.expectExpr(); try list.append(param); switch (p.token_tags[p.nextToken()]) { .comma => { if (p.eatToken(.r_paren)) |_| { const params = try p.listToSpan(list.items); return p.addNode(.{ .tag = .builtin_call_comma, .main_token = builtin_token, .data = .{ .lhs = params.start, .rhs = params.end, }, }); } continue; }, .r_paren => { const params = try p.listToSpan(list.items); return p.addNode(.{ .tag = .builtin_call, .main_token = builtin_token, .data = .{ .lhs = params.start, .rhs = params.end, }, }); }, else => { // This is likely just a missing comma; // give an error but continue parsing this list. p.tok_i -= 1; try p.warnExpected(.comma); }, } } } // string literal or multiline string literal fn parseStringLiteral(p: *Parser) !Node.Index { switch (p.token_tags[p.tok_i]) { .string_literal => { const main_token = p.nextToken(); return p.addNode(.{ .tag = .string_literal, .main_token = main_token, .data = .{ .lhs = undefined, .rhs = undefined, }, }); }, .multiline_string_literal_line => { const first_line = p.nextToken(); while (p.token_tags[p.tok_i] == .multiline_string_literal_line) { p.tok_i += 1; } return p.addNode(.{ .tag = .multiline_string_literal, .main_token = first_line, .data = .{ .lhs = first_line, .rhs = p.tok_i - 1, }, }); }, else => return null_node, } } fn expectStringLiteral(p: *Parser) !Node.Index { const node = try p.parseStringLiteral(); if (node == 0) { return p.fail(.expected_string_literal); } return node; } fn expectIntegerLiteral(p: *Parser) !Node.Index { return p.addNode(.{ .tag = .integer_literal, .main_token = try p.expectToken(.integer_literal), .data = .{ .lhs = undefined, .rhs = undefined, }, }); } /// KEYWORD_if LPAREN Expr RPAREN PtrPayload? Body (KEYWORD_else Payload? Body)? fn parseIf(p: *Parser, bodyParseFn: NodeParseFn) !Node.Index { const if_token = p.eatToken(.keyword_if) orelse return null_node; _ = try p.expectToken(.l_paren); const condition = try p.expectExpr(); _ = try p.expectToken(.r_paren); const then_payload = try p.parsePtrPayload(); const then_expr = try bodyParseFn(p); if (then_expr == 0) return p.fail(.invalid_token); const else_token = p.eatToken(.keyword_else) orelse return p.addNode(.{ .tag = .if_simple, .main_token = if_token, .data = .{ .lhs = condition, .rhs = then_expr, }, }); const else_payload = try p.parsePayload(); const else_expr = try bodyParseFn(p); if (else_expr == 0) return p.fail(.invalid_token); return p.addNode(.{ .tag = .@"if", .main_token = if_token, .data = .{ .lhs = condition, .rhs = try p.addExtra(Node.If{ .then_expr = then_expr, .else_expr = else_expr, }), }, }); } /// Skips over doc comment tokens. Returns the first one, if any. fn eatDocComments(p: *Parser) !?TokenIndex { if (p.eatToken(.doc_comment)) |tok| { var first_line = tok; if (tok > 0 and tokensOnSameLine(p, tok - 1, tok)) { try p.warnMsg(.{ .tag = .same_line_doc_comment, .token = tok, }); first_line = p.eatToken(.doc_comment) orelse return null; } while (p.eatToken(.doc_comment)) |_| {} return first_line; } return null; } fn tokensOnSameLine(p: *Parser, token1: TokenIndex, token2: TokenIndex) bool { return std.mem.indexOfScalar(u8, p.source[p.token_starts[token1]..p.token_starts[token2]], '\n') == null; } fn eatToken(p: *Parser, tag: Token.Tag) ?TokenIndex { return if (p.token_tags[p.tok_i] == tag) p.nextToken() else null; } fn assertToken(p: *Parser, tag: Token.Tag) TokenIndex { const token = p.nextToken(); assert(p.token_tags[token] == tag); return token; } fn expectToken(p: *Parser, tag: Token.Tag) Error!TokenIndex { const token = p.nextToken(); if (p.token_tags[token] != tag) { p.tok_i -= 1; // Go back so that we can recover properly. return p.failMsg(.{ .tag = .expected_token, .token = token, .extra = .{ .expected_tag = tag }, }); } return token; } fn expectTokenRecoverable(p: *Parser, tag: Token.Tag) !?TokenIndex { if (p.token_tags[p.tok_i] != tag) { try p.warnExpected(tag); return null; } else { return p.nextToken(); } } fn nextToken(p: *Parser) TokenIndex { const result = p.tok_i; p.tok_i += 1; return result; } }; test { _ = @import("parser_test.zig"); }
https://raw.githubusercontent.com/creationix/zig-toolset/9ad208cd93d1f05eb772deff4af24f58eb42386f/zig-linux-x86_64-0.8.0-dev.1860+1fada3746/lib/std/zig/parse.zig
const std = @import("std"); const build_options = @import("build_options"); const builtin = @import("builtin"); const assert = std.debug.assert; const codegen = @import("../../codegen.zig"); const leb128 = std.leb; const link = @import("../../link.zig"); const log = std.log.scoped(.codegen); const math = std.math; const mem = std.mem; const trace = @import("../../tracy.zig").trace; const Air = @import("../../Air.zig"); const Allocator = mem.Allocator; const Compilation = @import("../../Compilation.zig"); const DebugInfoOutput = codegen.DebugInfoOutput; const DW = std.dwarf; const ErrorMsg = Module.ErrorMsg; const FnResult = codegen.FnResult; const GenerateSymbolError = codegen.GenerateSymbolError; const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); const Mir = @import("Mir.zig"); const Module = @import("../../Module.zig"); const Target = std.Target; const Type = @import("../../type.zig").Type; const TypedValue = @import("../../TypedValue.zig"); const Value = @import("../../value.zig").Value; const bits = @import("bits.zig"); const abi = @import("abi.zig"); const errUnionPayloadOffset = codegen.errUnionPayloadOffset; const errUnionErrorOffset = codegen.errUnionErrorOffset; const Condition = bits.Condition; const RegisterManager = abi.RegisterManager; const RegisterLock = RegisterManager.RegisterLock; const Register = bits.Register; const gp = abi.RegisterClass.gp; const sse = abi.RegisterClass.sse; const InnerError = error{ OutOfMemory, CodegenFail, OutOfRegisters, }; gpa: Allocator, air: Air, liveness: Liveness, bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, mod_fn: *const Module.Fn, err_msg: ?*ErrorMsg, args: []MCValue, ret_mcv: MCValue, fn_type: Type, arg_index: u32, src_loc: Module.SrcLoc, stack_align: u32, eflags_inst: ?Air.Inst.Index = null, /// MIR Instructions mir_instructions: std.MultiArrayList(Mir.Inst) = .{}, /// MIR extra data mir_extra: std.ArrayListUnmanaged(u32) = .{}, /// Byte offset within the source file of the ending curly. end_di_line: u32, end_di_column: u32, /// The value is an offset into the `Function` `code` from the beginning. /// To perform the reloc, write 32-bit signed little-endian integer /// which is a relative jump, based on the address following the reloc. exitlude_jump_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{}, /// Whenever there is a runtime branch, we push a Branch onto this stack, /// and pop it off when the runtime branch joins. This provides an "overlay" /// of the table of mappings from instructions to `MCValue` from within the branch. /// This way we can modify the `MCValue` for an instruction in different ways /// within different branches. Special consideration is needed when a branch /// joins with its parent, to make sure all instructions have the same MCValue /// across each runtime branch upon joining. branch_stack: *std.ArrayList(Branch), // Key is the block instruction blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, register_manager: RegisterManager = .{}, /// Maps offset to what is stored there. stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{}, /// Offset from the stack base, representing the end of the stack frame. max_end_stack: u32 = 0, /// Represents the current end stack offset. If there is no existing slot /// to place a new stack allocation, it goes here, and then bumps `max_end_stack`. next_stack_offset: u32 = 0, /// Debug field, used to find bugs in the compiler. air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init, /// For mir debug info, maps a mir index to a air index mir_to_air_map: if (builtin.mode == .Debug) std.AutoHashMap(Mir.Inst.Index, Air.Inst.Index) else void, const air_bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {}; pub const MCValue = union(enum) { /// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc. /// TODO Look into deleting this tag and using `dead` instead, since every use /// of MCValue.none should be instead looking at the type and noticing it is 0 bits. none, /// Control flow will not allow this value to be observed. unreach, /// No more references to this value remain. dead, /// The value is undefined. undef, /// A pointer-sized integer that fits in a register. /// If the type is a pointer, this is the pointer address in virtual address space. immediate: u64, /// The value is in a GP register. register: Register, /// The value is a tuple { wrapped, overflow } where wrapped value is stored in the GP register. register_overflow: struct { reg: Register, eflags: Condition }, /// The value is in memory at a hard-coded address. /// If the type is a pointer, it means the pointer address is at this memory location. memory: u64, /// The value is in memory but requires a linker relocation fixup. linker_load: codegen.LinkerLoad, /// The value is one of the stack variables. /// If the type is a pointer, it means the pointer address is in the stack at this offset. stack_offset: i32, /// The value is a pointer to one of the stack variables (payload is stack offset). ptr_stack_offset: i32, /// The value resides in the EFLAGS register. eflags: Condition, fn isMemory(mcv: MCValue) bool { return switch (mcv) { .memory, .stack_offset, .ptr_stack_offset, .linker_load, => true, else => false, }; } fn isImmediate(mcv: MCValue) bool { return switch (mcv) { .immediate => true, else => false, }; } fn isRegister(mcv: MCValue) bool { return switch (mcv) { .register => true, else => false, }; } }; const Branch = struct { inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{}, fn deinit(self: *Branch, gpa: Allocator) void { self.inst_table.deinit(gpa); self.* = undefined; } const FormatContext = struct { insts: []const Air.Inst.Index, mcvs: []const MCValue, }; fn fmt( ctx: FormatContext, comptime unused_format_string: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { _ = options; comptime assert(unused_format_string.len == 0); try writer.writeAll("Branch {\n"); for (ctx.insts) |inst, i| { const mcv = ctx.mcvs[i]; try writer.print(" %{d} => {}\n", .{ inst, mcv }); } try writer.writeAll("}"); } fn format(branch: Branch, comptime unused_format_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { _ = branch; _ = unused_format_string; _ = options; _ = writer; @compileError("do not format Branch directly; use ty.fmtDebug()"); } fn fmtDebug(self: @This()) std.fmt.Formatter(fmt) { return .{ .data = .{ .insts = self.inst_table.keys(), .mcvs = self.inst_table.values(), } }; } }; const StackAllocation = struct { inst: Air.Inst.Index, /// TODO do we need size? should be determined by inst.ty.abiSize(self.target.*) size: u32, }; const BlockData = struct { relocs: std.ArrayListUnmanaged(Mir.Inst.Index), /// The first break instruction encounters `null` here and chooses a /// machine code value for the block result, populating this field. /// Following break instructions encounter that value and use it for /// the location to store their block results. mcv: MCValue, }; const BigTomb = struct { function: *Self, inst: Air.Inst.Index, lbt: Liveness.BigTomb, fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void { const dies = bt.lbt.feed(); const op_index = Air.refToIndex(op_ref) orelse return; if (!dies) return; bt.function.processDeath(op_index); } fn finishAir(bt: *BigTomb, result: MCValue) void { const is_used = !bt.function.liveness.isUnused(bt.inst); if (is_used) { log.debug(" (saving %{d} => {})", .{ bt.inst, result }); const branch = &bt.function.branch_stack.items[bt.function.branch_stack.items.len - 1]; branch.inst_table.putAssumeCapacityNoClobber(bt.inst, result); } bt.function.finishAirBookkeeping(); } }; const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, module_fn: *Module.Fn, air: Air, liveness: Liveness, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) GenerateSymbolError!FnResult { if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } const mod = bin_file.options.module.?; const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; var branch_stack = std.ArrayList(Branch).init(bin_file.allocator); defer { assert(branch_stack.items.len == 1); branch_stack.items[0].deinit(bin_file.allocator); branch_stack.deinit(); } try branch_stack.append(.{}); var function = Self{ .gpa = bin_file.allocator, .air = air, .liveness = liveness, .target = &bin_file.options.target, .bin_file = bin_file, .debug_output = debug_output, .mod_fn = module_fn, .err_msg = null, .args = undefined, // populated after `resolveCallingConventionValues` .ret_mcv = undefined, // populated after `resolveCallingConventionValues` .fn_type = fn_type, .arg_index = 0, .branch_stack = &branch_stack, .src_loc = src_loc, .stack_align = undefined, .end_di_line = module_fn.rbrace_line, .end_di_column = module_fn.rbrace_column, .mir_to_air_map = if (builtin.mode == .Debug) std.AutoHashMap(Mir.Inst.Index, Air.Inst.Index).init(bin_file.allocator) else {}, }; defer function.stack.deinit(bin_file.allocator); defer function.blocks.deinit(bin_file.allocator); defer function.exitlude_jump_relocs.deinit(bin_file.allocator); defer function.mir_instructions.deinit(bin_file.allocator); defer function.mir_extra.deinit(bin_file.allocator); defer if (builtin.mode == .Debug) function.mir_to_air_map.deinit(); var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, error.OutOfRegisters => return FnResult{ .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), }, else => |e| return e, }; defer call_info.deinit(&function); function.args = call_info.args; function.ret_mcv = call_info.return_value; function.stack_align = call_info.stack_align; function.max_end_stack = call_info.stack_byte_count; function.gen() catch |err| switch (err) { error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, error.OutOfRegisters => return FnResult{ .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), }, else => |e| return e, }; var mir = Mir{ .instructions = function.mir_instructions.toOwnedSlice(), .extra = try function.mir_extra.toOwnedSlice(bin_file.allocator), }; defer mir.deinit(bin_file.allocator); var emit = Emit{ .mir = mir, .bin_file = bin_file, .debug_output = debug_output, .target = &bin_file.options.target, .src_loc = src_loc, .code = code, .prev_di_pc = 0, .prev_di_line = module_fn.lbrace_line, .prev_di_column = module_fn.lbrace_column, }; defer emit.deinit(); emit.lowerMir() catch |err| switch (err) { error.EmitFail => return FnResult{ .fail = emit.err_msg.? }, else => |e| return e, }; if (function.err_msg) |em| { return FnResult{ .fail = em }; } else { return FnResult{ .appended = {} }; } } fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { const gpa = self.gpa; try self.mir_instructions.ensureUnusedCapacity(gpa, 1); const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len); self.mir_instructions.appendAssumeCapacity(inst); return result_index; } pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 { const fields = std.meta.fields(@TypeOf(extra)); try self.mir_extra.ensureUnusedCapacity(self.gpa, fields.len); return self.addExtraAssumeCapacity(extra); } pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); const result = @intCast(u32, self.mir_extra.items.len); inline for (fields) |field| { self.mir_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), i32 => @bitCast(u32, @field(extra, field.name)), else => @compileError("bad field type"), }); } return result; } fn gen(self: *Self) InnerError!void { const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { _ = try self.addInst(.{ .tag = .push, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp }), .data = undefined, // unused for push reg, }); _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp, .reg2 = .rsp, }), .data = undefined, }); // We want to subtract the aligned stack frame size from rsp here, but we don't // yet know how big it will be, so we leave room for a 4-byte stack size. // TODO During semantic analysis, check if there are no function calls. If there // are none, here we can omit the part where we subtract and then add rsp. const backpatch_stack_sub = try self.addInst(.{ .tag = .nop, .ops = undefined, .data = undefined, }); if (self.ret_mcv == .stack_offset) { // The address where to store the return value for the caller is in a // register which the callee is free to clobber. Therefore, we purposely // spill it to stack immediately. const stack_offset = mem.alignForwardGeneric(u32, self.next_stack_offset + 8, 8); self.next_stack_offset = stack_offset; self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset); const ret_reg = abi.getCAbiIntParamRegs(self.target.*)[0]; try self.genSetStack(Type.usize, @intCast(i32, stack_offset), MCValue{ .register = ret_reg }, .{}); self.ret_mcv = MCValue{ .stack_offset = @intCast(i32, stack_offset) }; log.debug("gen: spilling {s} to stack at offset {}", .{ @tagName(ret_reg), stack_offset }); } _ = try self.addInst(.{ .tag = .dbg_prologue_end, .ops = undefined, .data = undefined, }); // Push callee-preserved regs that were used actually in use. const backpatch_push_callee_preserved_regs = try self.addInst(.{ .tag = .nop, .ops = undefined, .data = undefined, }); try self.genBody(self.air.getMainBody()); // TODO can single exitlude jump reloc be elided? What if it is not at the end of the code? // Example: // pub fn main() void { // maybeErr() catch return; // unreachable; // } // Eliding the reloc will cause a miscompilation in this case. for (self.exitlude_jump_relocs.items) |jmp_reloc| { self.mir_instructions.items(.data)[jmp_reloc].inst = @intCast(u32, self.mir_instructions.len); } // Create list of registers to save in the prologue. // TODO handle register classes var reg_list = Mir.RegisterList{}; const callee_preserved_regs = abi.getCalleePreservedRegs(self.target.*); for (callee_preserved_regs) |reg| { if (self.register_manager.isRegAllocated(reg)) { reg_list.push(callee_preserved_regs, reg); } } const saved_regs_stack_space: u32 = reg_list.count() * 8; // Pop saved callee-preserved regs. const backpatch_pop_callee_preserved_regs = try self.addInst(.{ .tag = .nop, .ops = undefined, .data = undefined, }); _ = try self.addInst(.{ .tag = .dbg_epilogue_begin, .ops = undefined, .data = undefined, }); // Maybe add rsp, x if required. This is backpatched later. const backpatch_stack_add = try self.addInst(.{ .tag = .nop, .ops = undefined, .data = undefined, }); _ = try self.addInst(.{ .tag = .pop, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp }), .data = undefined, }); _ = try self.addInst(.{ .tag = .ret, .ops = Mir.Inst.Ops.encode(.{ .flags = 0b11 }), .data = undefined, }); // Adjust the stack if (self.max_end_stack > math.maxInt(i32)) { return self.failSymbol("too much stack used in call parameters", .{}); } const aligned_stack_end = @intCast( u32, mem.alignForward(self.max_end_stack + saved_regs_stack_space, self.stack_align), ); if (aligned_stack_end > 0) { self.mir_instructions.set(backpatch_stack_sub, .{ .tag = .sub, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }), .data = .{ .imm = aligned_stack_end }, }); self.mir_instructions.set(backpatch_stack_add, .{ .tag = .add, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }), .data = .{ .imm = aligned_stack_end }, }); const save_reg_list = try self.addExtra(Mir.SaveRegisterList{ .register_list = reg_list.asInt(), .stack_end = aligned_stack_end, }); self.mir_instructions.set(backpatch_push_callee_preserved_regs, .{ .tag = .push_regs, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp }), .data = .{ .payload = save_reg_list }, }); self.mir_instructions.set(backpatch_pop_callee_preserved_regs, .{ .tag = .pop_regs, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp }), .data = .{ .payload = save_reg_list }, }); } } else { _ = try self.addInst(.{ .tag = .dbg_prologue_end, .ops = undefined, .data = undefined, }); try self.genBody(self.air.getMainBody()); _ = try self.addInst(.{ .tag = .dbg_epilogue_begin, .ops = undefined, .data = undefined, }); } // Drop them off at the rbrace. const payload = try self.addExtra(Mir.DbgLineColumn{ .line = self.end_di_line, .column = self.end_di_column, }); _ = try self.addInst(.{ .tag = .dbg_line, .ops = undefined, .data = .{ .payload = payload }, }); } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { const air_tags = self.air.instructions.items(.tag); for (body) |inst| { const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); if (builtin.mode == .Debug) { try self.mir_to_air_map.put(@intCast(u32, self.mir_instructions.len), inst); } switch (air_tags[inst]) { // zig fmt: off .add => try self.airBinOp(inst, .add), .addwrap => try self.airBinOp(inst, .addwrap), .sub => try self.airBinOp(inst, .sub), .subwrap => try self.airBinOp(inst, .subwrap), .bool_and => try self.airBinOp(inst, .bool_and), .bool_or => try self.airBinOp(inst, .bool_or), .bit_and => try self.airBinOp(inst, .bit_and), .bit_or => try self.airBinOp(inst, .bit_or), .xor => try self.airBinOp(inst, .xor), .ptr_add => try self.airPtrArithmetic(inst, .ptr_add), .ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub), .shr, .shr_exact => try self.airShlShrBinOp(inst), .shl, .shl_exact => try self.airShlShrBinOp(inst), .mul => try self.airMulDivBinOp(inst), .mulwrap => try self.airMulDivBinOp(inst), .rem => try self.airMulDivBinOp(inst), .mod => try self.airMulDivBinOp(inst), .add_sat => try self.airAddSat(inst), .sub_sat => try self.airSubSat(inst), .mul_sat => try self.airMulSat(inst), .shl_sat => try self.airShlSat(inst), .min => try self.airMin(inst), .max => try self.airMax(inst), .slice => try self.airSlice(inst), .sqrt, .sin, .cos, .tan, .exp, .exp2, .log, .log2, .log10, .fabs, .floor, .ceil, .round, .trunc_float, .neg, => try self.airUnaryMath(inst), .add_with_overflow => try self.airAddSubShlWithOverflow(inst), .sub_with_overflow => try self.airAddSubShlWithOverflow(inst), .mul_with_overflow => try self.airMulWithOverflow(inst), .shl_with_overflow => try self.airAddSubShlWithOverflow(inst), .div_float, .div_trunc, .div_floor, .div_exact => try self.airMulDivBinOp(inst), .cmp_lt => try self.airCmp(inst, .lt), .cmp_lte => try self.airCmp(inst, .lte), .cmp_eq => try self.airCmp(inst, .eq), .cmp_gte => try self.airCmp(inst, .gte), .cmp_gt => try self.airCmp(inst, .gt), .cmp_neq => try self.airCmp(inst, .neq), .cmp_vector => try self.airCmpVector(inst), .cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst), .alloc => try self.airAlloc(inst), .ret_ptr => try self.airRetPtr(inst), .arg => try self.airArg(inst), .assembly => try self.airAsm(inst), .bitcast => try self.airBitCast(inst), .block => try self.airBlock(inst), .br => try self.airBr(inst), .breakpoint => try self.airBreakpoint(), .ret_addr => try self.airRetAddr(inst), .frame_addr => try self.airFrameAddress(inst), .fence => try self.airFence(), .cond_br => try self.airCondBr(inst), .dbg_stmt => try self.airDbgStmt(inst), .fptrunc => try self.airFptrunc(inst), .fpext => try self.airFpext(inst), .intcast => try self.airIntCast(inst), .trunc => try self.airTrunc(inst), .bool_to_int => try self.airBoolToInt(inst), .is_non_null => try self.airIsNonNull(inst), .is_non_null_ptr => try self.airIsNonNullPtr(inst), .is_null => try self.airIsNull(inst), .is_null_ptr => try self.airIsNullPtr(inst), .is_non_err => try self.airIsNonErr(inst), .is_non_err_ptr => try self.airIsNonErrPtr(inst), .is_err => try self.airIsErr(inst), .is_err_ptr => try self.airIsErrPtr(inst), .load => try self.airLoad(inst), .loop => try self.airLoop(inst), .not => try self.airNot(inst), .ptrtoint => try self.airPtrToInt(inst), .ret => try self.airRet(inst), .ret_load => try self.airRetLoad(inst), .store => try self.airStore(inst), .struct_field_ptr=> try self.airStructFieldPtr(inst), .struct_field_val=> try self.airStructFieldVal(inst), .array_to_slice => try self.airArrayToSlice(inst), .int_to_float => try self.airIntToFloat(inst), .float_to_int => try self.airFloatToInt(inst), .cmpxchg_strong => try self.airCmpxchg(inst), .cmpxchg_weak => try self.airCmpxchg(inst), .atomic_rmw => try self.airAtomicRmw(inst), .atomic_load => try self.airAtomicLoad(inst), .memcpy => try self.airMemcpy(inst), .memset => try self.airMemset(inst), .set_union_tag => try self.airSetUnionTag(inst), .get_union_tag => try self.airGetUnionTag(inst), .clz => try self.airClz(inst), .ctz => try self.airCtz(inst), .popcount => try self.airPopcount(inst), .byte_swap => try self.airByteSwap(inst), .bit_reverse => try self.airBitReverse(inst), .tag_name => try self.airTagName(inst), .error_name => try self.airErrorName(inst), .splat => try self.airSplat(inst), .select => try self.airSelect(inst), .shuffle => try self.airShuffle(inst), .reduce => try self.airReduce(inst), .aggregate_init => try self.airAggregateInit(inst), .union_init => try self.airUnionInit(inst), .prefetch => try self.airPrefetch(inst), .mul_add => try self.airMulAdd(inst), .addrspace_cast => return self.fail("TODO implement addrspace_cast", .{}), .@"try" => try self.airTry(inst), .try_ptr => try self.airTryPtr(inst), .dbg_var_ptr, .dbg_var_val, => try self.airDbgVar(inst), .dbg_inline_begin, .dbg_inline_end, => try self.airDbgInline(inst), .dbg_block_begin, .dbg_block_end, => try self.airDbgBlock(inst), .call => try self.airCall(inst, .auto), .call_always_tail => try self.airCall(inst, .always_tail), .call_never_tail => try self.airCall(inst, .never_tail), .call_never_inline => try self.airCall(inst, .never_inline), .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered), .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic), .atomic_store_release => try self.airAtomicStore(inst, .Release), .atomic_store_seq_cst => try self.airAtomicStore(inst, .SeqCst), .struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0), .struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1), .struct_field_ptr_index_2 => try self.airStructFieldPtrIndex(inst, 2), .struct_field_ptr_index_3 => try self.airStructFieldPtrIndex(inst, 3), .field_parent_ptr => try self.airFieldParentPtr(inst), .switch_br => try self.airSwitch(inst), .slice_ptr => try self.airSlicePtr(inst), .slice_len => try self.airSliceLen(inst), .ptr_slice_len_ptr => try self.airPtrSliceLenPtr(inst), .ptr_slice_ptr_ptr => try self.airPtrSlicePtrPtr(inst), .array_elem_val => try self.airArrayElemVal(inst), .slice_elem_val => try self.airSliceElemVal(inst), .slice_elem_ptr => try self.airSliceElemPtr(inst), .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), .optional_payload_ptr => try self.airOptionalPayloadPtr(inst), .optional_payload_ptr_set => try self.airOptionalPayloadPtrSet(inst), .unwrap_errunion_err => try self.airUnwrapErrErr(inst), .unwrap_errunion_payload => try self.airUnwrapErrPayload(inst), .unwrap_errunion_err_ptr => try self.airUnwrapErrErrPtr(inst), .unwrap_errunion_payload_ptr=> try self.airUnwrapErrPayloadPtr(inst), .errunion_payload_ptr_set => try self.airErrUnionPayloadPtrSet(inst), .err_return_trace => try self.airErrReturnTrace(inst), .set_err_return_trace => try self.airSetErrReturnTrace(inst), .save_err_return_trace_index=> try self.airSaveErrReturnTraceIndex(inst), .wrap_optional => try self.airWrapOptional(inst), .wrap_errunion_payload => try self.airWrapErrUnionPayload(inst), .wrap_errunion_err => try self.airWrapErrUnionErr(inst), .add_optimized, .addwrap_optimized, .sub_optimized, .subwrap_optimized, .mul_optimized, .mulwrap_optimized, .div_float_optimized, .div_trunc_optimized, .div_floor_optimized, .div_exact_optimized, .rem_optimized, .mod_optimized, .neg_optimized, .cmp_lt_optimized, .cmp_lte_optimized, .cmp_eq_optimized, .cmp_gte_optimized, .cmp_gt_optimized, .cmp_neq_optimized, .cmp_vector_optimized, .reduce_optimized, .float_to_int_optimized, => return self.fail("TODO implement optimized float mode", .{}), .is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}), .error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}), .vector_store_elem => return self.fail("TODO implement vector_store_elem", .{}), .c_va_arg => return self.fail("TODO implement c_va_arg", .{}), .c_va_copy => return self.fail("TODO implement c_va_copy", .{}), .c_va_end => return self.fail("TODO implement c_va_end", .{}), .c_va_start => return self.fail("TODO implement c_va_start", .{}), .wasm_memory_size => unreachable, .wasm_memory_grow => unreachable, // zig fmt: on } assert(!self.register_manager.lockedRegsExist()); if (std.debug.runtime_safety) { if (self.air_bookkeeping < old_air_bookkeeping + 1) { std.debug.panic("in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. Look for a missing call to finishAir.", .{ inst, air_tags[inst] }); } } } } /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { const air_tags = self.air.instructions.items(.tag); if (air_tags[inst] == .constant) return; // Constants are immortal. log.debug("%{d} => {}", .{ inst, MCValue{ .dead = {} } }); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; branch.inst_table.putAssumeCapacity(inst, .dead); switch (prev_value) { .register => |reg| { self.register_manager.freeReg(reg.to64()); }, .register_overflow => |ro| { self.register_manager.freeReg(ro.reg.to64()); self.eflags_inst = null; }, .eflags => { self.eflags_inst = null; }, else => {}, // TODO process stack allocation death } } /// Called when there are no operands, and the instruction is always unreferenced. fn finishAirBookkeeping(self: *Self) void { if (std.debug.runtime_safety) { self.air_bookkeeping += 1; } } fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { var tomb_bits = self.liveness.getTombBits(inst); for (operands) |op| { const dies = @truncate(u1, tomb_bits) != 0; tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); if (op_int < Air.Inst.Ref.typed_value_map.len) continue; const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; if (is_used) { log.debug("%{d} => {}", .{ inst, result }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; branch.inst_table.putAssumeCapacityNoClobber(inst, result); // In some cases (such as bitcast), an operand // may be the same MCValue as the result. If // that operand died and was a register, it // was freed by processDeath. We have to // "re-allocate" the register. switch (result) { .register => |reg| { if (self.register_manager.isRegFree(reg)) { self.register_manager.getRegAssumeFree(reg, inst); } }, .register_overflow => |ro| { if (self.register_manager.isRegFree(ro.reg)) { self.register_manager.getRegAssumeFree(ro.reg, inst); } }, else => {}, } } self.finishAirBookkeeping(); } fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table; try table.ensureUnusedCapacity(self.gpa, additional_count); } fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 { if (abi_align > self.stack_align) self.stack_align = abi_align; // TODO find a free slot instead of always appending const offset = mem.alignForwardGeneric(u32, self.next_stack_offset + abi_size, abi_align); self.next_stack_offset = offset; self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset); try self.stack.putNoClobber(self.gpa, offset, .{ .inst = inst, .size = abi_size, }); return offset; } /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const ptr_ty = self.air.typeOfIndex(inst); const elem_ty = ptr_ty.elemType(); if (!elem_ty.hasRuntimeBitsIgnoreComptime()) { return self.allocMem(inst, @sizeOf(usize), @alignOf(usize)); } const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { const mod = self.bin_file.options.module.?; return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign const abi_align = ptr_ty.ptrAlignment(self.target.*); return self.allocMem(inst, abi_size, abi_align); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const elem_ty = self.air.typeOfIndex(inst); const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { const mod = self.bin_file.options.module.?; return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; const abi_align = elem_ty.abiAlignment(self.target.*); if (abi_align > self.stack_align) self.stack_align = abi_align; if (reg_ok) { switch (elem_ty.zigTypeTag()) { .Vector => return self.fail("TODO allocRegOrMem for Vector type", .{}), .Float => { if (intrinsicsAllowed(self.target.*, elem_ty)) { const ptr_bytes: u64 = 32; if (abi_size <= ptr_bytes) { if (self.register_manager.tryAllocReg(inst, sse)) |reg| { return MCValue{ .register = registerAlias(reg, abi_size) }; } } } return self.fail("TODO allocRegOrMem for Float type without SSE/AVX support", .{}); }, else => { // Make sure the type can fit in a register before we try to allocate one. const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); if (abi_size <= ptr_bytes) { if (self.register_manager.tryAllocReg(inst, gp)) |reg| { return MCValue{ .register = registerAlias(reg, abi_size) }; } } }, } } const stack_offset = try self.allocMem(inst, abi_size, abi_align); return MCValue{ .stack_offset = @intCast(i32, stack_offset) }; } const State = struct { next_stack_offset: u32, registers: abi.RegisterManager.TrackedRegisters, free_registers: abi.RegisterManager.RegisterBitSet, eflags_inst: ?Air.Inst.Index, stack: std.AutoHashMapUnmanaged(u32, StackAllocation), fn deinit(state: *State, gpa: Allocator) void { state.stack.deinit(gpa); } }; fn captureState(self: *Self) !State { return State{ .next_stack_offset = self.next_stack_offset, .registers = self.register_manager.registers, .free_registers = self.register_manager.free_registers, .eflags_inst = self.eflags_inst, .stack = try self.stack.clone(self.gpa), }; } fn revertState(self: *Self, state: State) void { self.register_manager.registers = state.registers; self.eflags_inst = state.eflags_inst; self.stack.deinit(self.gpa); self.stack = state.stack; self.next_stack_offset = state.next_stack_offset; self.register_manager.free_registers = state.free_registers; } pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { const stack_mcv = try self.allocRegOrMem(inst, false); log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); switch (reg_mcv) { .register => |other| { assert(reg.to64() == other.to64()); }, .register_overflow => |ro| { assert(reg.to64() == ro.reg.to64()); }, else => {}, } const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv, .{}); } pub fn spillEflagsIfOccupied(self: *Self) !void { if (self.eflags_inst) |inst_to_save| { const mcv = self.getResolvedInstValue(inst_to_save); const new_mcv = switch (mcv) { .register_overflow => try self.allocRegOrMem(inst_to_save, false), .eflags => try self.allocRegOrMem(inst_to_save, true), else => unreachable, }; try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst_to_save, new_mcv); self.eflags_inst = null; // TODO consolidate with register manager and spillInstruction // this call should really belong in the register manager! switch (mcv) { .register_overflow => |ro| self.register_manager.freeReg(ro.reg), else => {}, } } } pub fn spillRegisters(self: *Self, comptime count: comptime_int, registers: [count]Register) !void { for (registers) |reg| { try self.register_manager.getReg(reg, null); } } /// Copies a value to a register without tracking the register. The register is not considered /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { const reg_class: RegisterManager.RegisterBitSet = switch (ty.zigTypeTag()) { .Float => blk: { if (intrinsicsAllowed(self.target.*, ty)) break :blk sse; return self.fail("TODO copy {} to register", .{ty.fmtDebug()}); }, else => gp, }; const reg: Register = try self.register_manager.allocReg(null, reg_class); try self.genSetReg(ty, reg, mcv); return reg; } /// Allocates a new register and copies `mcv` into it. /// `reg_owner` is the instruction that gets associated with the register in the register table. /// This can have a side effect of spilling instructions to the stack to free up a register. /// WARNING make sure that the allocated register matches the returned MCValue from an instruction! fn copyToRegisterWithInstTracking(self: *Self, reg_owner: Air.Inst.Index, ty: Type, mcv: MCValue) !MCValue { const reg_class: RegisterManager.RegisterBitSet = switch (ty.zigTypeTag()) { .Float => blk: { if (intrinsicsAllowed(self.target.*, ty)) break :blk sse; return self.fail("TODO copy {} to register", .{ty.fmtDebug()}); }, else => gp, }; const reg: Register = try self.register_manager.allocReg(reg_owner, reg_class); try self.genSetReg(ty, reg, mcv); return MCValue{ .register = reg }; } fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMemPtr(inst); return self.finishAir(inst, .{ .ptr_stack_offset = @intCast(i32, stack_offset) }, .{ .none, .none, .none }); } fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMemPtr(inst); return self.finishAir(inst, .{ .ptr_stack_offset = @intCast(i32, stack_offset) }, .{ .none, .none, .none }); } fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; _ = ty_op; return self.fail("TODO implement airFptrunc for {}", .{self.target.cpu.arch}); // return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airFpext(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; _ = ty_op; return self.fail("TODO implement airFpext for {}", .{self.target.cpu.arch}); // return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const info_a = operand_ty.intInfo(self.target.*); const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*); const operand_abi_size = operand_ty.abiSize(self.target.*); const dest_ty = self.air.typeOfIndex(inst); const dest_abi_size = dest_ty.abiSize(self.target.*); const dst_mcv: MCValue = blk: { if (info_a.bits == info_b.bits) { break :blk operand; } if (operand_abi_size > 8 or dest_abi_size > 8) { return self.fail("TODO implement intCast for abi sizes larger than 8", .{}); } const operand_lock: ?RegisterLock = switch (operand) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const reg = try self.register_manager.allocReg(inst, gp); try self.genSetReg(dest_ty, reg, .{ .immediate = 0 }); try self.genSetReg(operand_ty, reg, operand); break :blk MCValue{ .register = reg }; }; return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); } fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); const src_ty = self.air.typeOf(ty_op.operand); const dst_ty = self.air.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const src_ty_size = src_ty.abiSize(self.target.*); const dst_ty_size = dst_ty.abiSize(self.target.*); if (src_ty_size > 8 or dst_ty_size > 8) { return self.fail("TODO implement trunc for abi sizes larger than 8", .{}); } const operand_lock: ?RegisterLock = switch (operand) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const reg: Register = blk: { if (operand.isRegister()) { if (self.reuseOperand(inst, ty_op.operand, 0, operand)) { break :blk operand.register.to64(); } } const mcv = try self.copyToRegisterWithInstTracking(inst, src_ty, operand); break :blk mcv.register.to64(); }; // when truncating a `u16` to `u5`, for example, those top 3 bits in the result // have to be removed. this only happens if the dst if not a power-of-two size. const dst_bit_size = dst_ty.bitSize(self.target.*); if (!math.isPowerOfTwo(dst_bit_size) or dst_bit_size < 8) { try self.truncateRegister(dst_ty, reg); } return self.finishAir(inst, .{ .register = reg }, .{ ty_op.operand, .none, .none }); } fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else operand; return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); } const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { switch (operand) { .dead => unreachable, .unreach => unreachable, .eflags => |cc| { break :result MCValue{ .eflags = cc.negate() }; }, else => {}, } const operand_lock: ?RegisterLock = switch (operand) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const dst_mcv: MCValue = blk: { if (self.reuseOperand(inst, ty_op.operand, 0, operand) and operand.isRegister()) { break :blk operand; } break :blk try self.copyToRegisterWithInstTracking(inst, operand_ty, operand); }; const dst_mcv_lock: ?RegisterLock = switch (dst_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, }; defer if (dst_mcv_lock) |lock| self.register_manager.unlockReg(lock); const mask = ~@as(u64, 0); try self.genBinOpMir(.xor, operand_ty, dst_mcv, .{ .immediate = mask }); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airMin(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } const ty = self.air.typeOfIndex(inst); if (ty.zigTypeTag() != .Int) { return self.fail("TODO implement min for type {}", .{ty.fmtDebug()}); } const signedness = ty.intInfo(self.target.*).signedness; const result: MCValue = result: { // TODO improve by checking if any operand can be reused. // TODO audit register allocation const lhs = try self.resolveInst(bin_op.lhs); const lhs_lock: ?RegisterLock = switch (lhs) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); const lhs_reg = try self.copyToTmpRegister(ty, lhs); const lhs_reg_lock = self.register_manager.lockRegAssumeUnused(lhs_reg); defer self.register_manager.unlockReg(lhs_reg_lock); const rhs_mcv = try self.limitImmediateType(bin_op.rhs, i32); const rhs_lock: ?RegisterLock = switch (rhs_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); try self.genBinOpMir(.cmp, ty, .{ .register = lhs_reg }, rhs_mcv); const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ty, rhs_mcv); const cc: Condition = switch (signedness) { .unsigned => .b, .signed => .l, }; _ = try self.addInst(.{ .tag = .cond_mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = dst_mcv.register, .reg2 = lhs_reg, }), .data = .{ .cc = cc }, }); break :result dst_mcv; }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airMax(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement max for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } const ptr = try self.resolveInst(bin_op.lhs); const ptr_ty = self.air.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); const len_ty = self.air.typeOf(bin_op.rhs); const stack_offset = @intCast(i32, try self.allocMem(inst, 16, 16)); try self.genSetStack(ptr_ty, stack_offset, ptr, .{}); try self.genSetStack(len_ty, stack_offset - 8, len, .{}); const result = MCValue{ .stack_offset = stack_offset }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } const result = try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } const result = try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } const tag = self.air.instructions.items(.tag)[inst]; const ty = self.air.typeOfIndex(inst); try self.spillRegisters(2, .{ .rax, .rdx }); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const result = try self.genMulDivBinOp(tag, inst, ty, lhs, rhs); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement add_sat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement sub_sat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement mul_sat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airAddSubShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const tag = self.air.instructions.items(.tag)[inst]; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result = if (self.liveness.isUnused(inst)) .dead else result: { const ty = self.air.typeOf(bin_op.lhs); const abi_size = ty.abiSize(self.target.*); switch (ty.zigTypeTag()) { .Vector => return self.fail("TODO implement add/sub/shl with overflow for Vector type", .{}), .Int => { if (abi_size > 8) { return self.fail("TODO implement add/sub/shl with overflow for Ints larger than 64bits", .{}); } try self.spillEflagsIfOccupied(); if (tag == .shl_with_overflow) { try self.spillRegisters(1, .{.rcx}); } const partial: MCValue = switch (tag) { .add_with_overflow => try self.genBinOp(null, .add, bin_op.lhs, bin_op.rhs), .sub_with_overflow => try self.genBinOp(null, .sub, bin_op.lhs, bin_op.rhs), .shl_with_overflow => blk: { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const shift_ty = self.air.typeOf(bin_op.rhs); break :blk try self.genShiftBinOp(.shl, null, lhs, rhs, ty, shift_ty); }, else => unreachable, }; const int_info = ty.intInfo(self.target.*); if (math.isPowerOfTwo(int_info.bits) and int_info.bits >= 8) { self.eflags_inst = inst; const cc: Condition = switch (int_info.signedness) { .unsigned => .c, .signed => .o, }; break :result MCValue{ .register_overflow = .{ .reg = partial.register, .eflags = cc, } }; } self.eflags_inst = null; const tuple_ty = self.air.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); const tuple_align = tuple_ty.abiAlignment(self.target.*); const overflow_bit_offset = @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)); const stack_offset = @intCast(i32, try self.allocMem(inst, tuple_size, tuple_align)); try self.genSetStackTruncatedOverflowCompare(ty, stack_offset, overflow_bit_offset, partial.register); break :result MCValue{ .stack_offset = stack_offset }; }, else => unreachable, } }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn genSetStackTruncatedOverflowCompare( self: *Self, ty: Type, stack_offset: i32, overflow_bit_offset: i32, reg: Register, ) !void { const reg_lock = self.register_manager.lockReg(reg); defer if (reg_lock) |lock| self.register_manager.unlockReg(lock); const int_info = ty.intInfo(self.target.*); const extended_ty = switch (int_info.signedness) { .signed => Type.isize, .unsigned => ty, }; const temp_regs = try self.register_manager.allocRegs(3, .{ null, null, null }, gp); const temp_regs_locks = self.register_manager.lockRegsAssumeUnused(3, temp_regs); defer for (temp_regs_locks) |rreg| { self.register_manager.unlockReg(rreg); }; const overflow_reg = temp_regs[0]; const cc: Condition = switch (int_info.signedness) { .signed => .o, .unsigned => .c, }; _ = try self.addInst(.{ .tag = .cond_set_byte, .ops = Mir.Inst.Ops.encode(.{ .reg1 = overflow_reg.to8(), }), .data = .{ .cc = cc }, }); const scratch_reg = temp_regs[1]; try self.genSetReg(extended_ty, scratch_reg, .{ .register = reg }); try self.truncateRegister(ty, scratch_reg); try self.genBinOpMir( .cmp, extended_ty, .{ .register = reg }, .{ .register = scratch_reg }, ); const eq_reg = temp_regs[2]; _ = try self.addInst(.{ .tag = .cond_set_byte, .ops = Mir.Inst.Ops.encode(.{ .reg1 = eq_reg.to8() }), .data = .{ .cc = .ne }, }); try self.genBinOpMir( .@"or", Type.u8, .{ .register = overflow_reg }, .{ .register = eq_reg }, ); try self.genSetStack(ty, stack_offset, .{ .register = scratch_reg }, .{}); try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .register = overflow_reg.to8(), }, .{}); } fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } const ty = self.air.typeOf(bin_op.lhs); const abi_size = ty.abiSize(self.target.*); const result: MCValue = result: { switch (ty.zigTypeTag()) { .Vector => return self.fail("TODO implement mul_with_overflow for Vector type", .{}), .Int => { if (abi_size > 8) { return self.fail("TODO implement mul_with_overflow for Ints larger than 64bits", .{}); } const int_info = ty.intInfo(self.target.*); if (math.isPowerOfTwo(int_info.bits) and int_info.bits >= 8) { try self.spillEflagsIfOccupied(); self.eflags_inst = inst; try self.spillRegisters(2, .{ .rax, .rdx }); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const partial = try self.genMulDivBinOp(.mul, null, ty, lhs, rhs); const cc: Condition = switch (int_info.signedness) { .unsigned => .c, .signed => .o, }; break :result MCValue{ .register_overflow = .{ .reg = partial.register, .eflags = cc, } }; } try self.spillEflagsIfOccupied(); self.eflags_inst = null; const dst_reg: Register = dst_reg: { switch (int_info.signedness) { .signed => { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const rhs_lock: ?RegisterLock = switch (rhs) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); const dst_reg: Register = blk: { if (lhs.isRegister()) break :blk lhs.register; break :blk try self.copyToTmpRegister(ty, lhs); }; const dst_reg_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_reg_lock); const rhs_mcv: MCValue = blk: { if (rhs.isRegister() or rhs.isMemory()) break :blk rhs; break :blk MCValue{ .register = try self.copyToTmpRegister(ty, rhs) }; }; const rhs_mcv_lock: ?RegisterLock = switch (rhs_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, }; defer if (rhs_mcv_lock) |lock| self.register_manager.unlockReg(lock); try self.genIntMulComplexOpMir(Type.isize, .{ .register = dst_reg }, rhs_mcv); break :dst_reg dst_reg; }, .unsigned => { try self.spillRegisters(2, .{ .rax, .rdx }); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const dst_mcv = try self.genMulDivBinOp(.mul, null, ty, lhs, rhs); break :dst_reg dst_mcv.register; }, } }; const tuple_ty = self.air.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); const tuple_align = tuple_ty.abiAlignment(self.target.*); const overflow_bit_offset = @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)); const stack_offset = @intCast(i32, try self.allocMem(inst, tuple_size, tuple_align)); try self.genSetStackTruncatedOverflowCompare(ty, stack_offset, overflow_bit_offset, dst_reg); break :result MCValue{ .stack_offset = stack_offset }; }, else => unreachable, } }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } /// Generates signed or unsigned integer multiplication/division. /// Clobbers .rax and .rdx registers. /// Quotient is saved in .rax and remainder in .rdx. fn genIntMulDivOpMir( self: *Self, tag: Mir.Inst.Tag, ty: Type, signedness: std.builtin.Signedness, lhs: MCValue, rhs: MCValue, ) !void { const abi_size = @intCast(u32, ty.abiSize(self.target.*)); if (abi_size > 8) { return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{}); } lhs: { switch (lhs) { .register => |reg| if (reg.to64() == .rax) break :lhs, else => {}, } try self.genSetReg(ty, .rax, lhs); } switch (signedness) { .signed => { _ = try self.addInst(.{ .tag = .cwd, .ops = Mir.Inst.Ops.encode(.{ .flags = 0b11 }), .data = undefined, }); }, .unsigned => { _ = try self.addInst(.{ .tag = .xor, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rdx, .reg2 = .rdx, }), .data = undefined, }); }, } const factor = switch (rhs) { .register => rhs, .stack_offset => rhs, else => blk: { const reg = try self.copyToTmpRegister(ty, rhs); break :blk MCValue{ .register = reg }; }, }; switch (factor) { .register => |reg| { _ = try self.addInst(.{ .tag = tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }), .data = undefined, }); }, .stack_offset => |off| { _ = try self.addInst(.{ .tag = tag, .ops = Mir.Inst.Ops.encode(.{ .reg2 = .rbp, .flags = switch (abi_size) { 1 => 0b00, 2 => 0b01, 4 => 0b10, 8 => 0b11, else => unreachable, }, }), .data = .{ .imm = @bitCast(u32, -off) }, }); }, else => unreachable, } } /// Always returns a register. /// Clobbers .rax and .rdx registers. fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue { const signedness = ty.intInfo(self.target.*).signedness; const dividend: Register = switch (lhs) { .register => |reg| reg, else => try self.copyToTmpRegister(ty, lhs), }; const dividend_lock = self.register_manager.lockReg(dividend); defer if (dividend_lock) |lock| self.register_manager.unlockReg(lock); const divisor: Register = switch (rhs) { .register => |reg| reg, else => try self.copyToTmpRegister(ty, rhs), }; const divisor_lock = self.register_manager.lockReg(divisor); defer if (divisor_lock) |lock| self.register_manager.unlockReg(lock); try self.genIntMulDivOpMir(switch (signedness) { .signed => .idiv, .unsigned => .div, }, Type.isize, signedness, .{ .register = dividend }, .{ .register = divisor }); _ = try self.addInst(.{ .tag = .xor, .ops = Mir.Inst.Ops.encode(.{ .reg1 = divisor.to64(), .reg2 = dividend.to64(), }), .data = undefined, }); _ = try self.addInst(.{ .tag = .sar, .ops = Mir.Inst.Ops.encode(.{ .reg1 = divisor.to64(), .flags = 0b10, }), .data = .{ .imm = 63 }, }); _ = try self.addInst(.{ .tag = .@"test", .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rdx, .reg2 = .rdx, }), .data = undefined, }); _ = try self.addInst(.{ .tag = .cond_mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = divisor.to64(), .reg2 = .rdx, }), .data = .{ .cc = .e }, }); try self.genBinOpMir(.add, Type.isize, .{ .register = divisor }, .{ .register = .rax }); return MCValue{ .register = divisor }; } fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } try self.spillRegisters(1, .{.rcx}); const tag = self.air.instructions.items(.tag)[inst]; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); const result = try self.genShiftBinOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); } const payload_ty = self.air.typeOfIndex(inst); const optional_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { if (!payload_ty.hasRuntimeBits()) break :result MCValue.none; if (optional_ty.isPtrLikeOptional()) { if (self.reuseOperand(inst, ty_op.operand, 0, operand)) { break :result operand; } break :result try self.copyToRegisterWithInstTracking(inst, payload_ty, operand); } const offset = optional_ty.abiSize(self.target.*) - payload_ty.abiSize(self.target.*); switch (operand) { .stack_offset => |off| { break :result MCValue{ .stack_offset = off - @intCast(i32, offset) }; }, .register => { // TODO reuse the operand const result = try self.copyToRegisterWithInstTracking(inst, optional_ty, operand); const shift = @intCast(u8, offset * @sizeOf(usize)); try self.genShiftBinOpMir(.shr, optional_ty, result.register, .{ .immediate = @intCast(u8, shift) }); break :result result; }, else => return self.fail("TODO implement optional_payload when operand is {}", .{operand}), } }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .optional_payload_ptr_set for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); } const err_union_ty = self.air.typeOf(ty_op.operand); const err_ty = err_union_ty.errorUnionSet(); const payload_ty = err_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { if (err_ty.errorSetIsEmpty()) { break :result MCValue{ .immediate = 0 }; } if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { break :result operand; } const err_off = errUnionErrorOffset(payload_ty, self.target.*); switch (operand) { .stack_offset => |off| { const offset = off - @intCast(i32, err_off); break :result MCValue{ .stack_offset = offset }; }, .register => |reg| { // TODO reuse operand const lock = self.register_manager.lockRegAssumeUnused(reg); defer self.register_manager.unlockReg(lock); const result = try self.copyToRegisterWithInstTracking(inst, err_union_ty, operand); if (err_off > 0) { const shift = @intCast(u6, err_off * 8); try self.genShiftBinOpMir(.shr, err_union_ty, result.register, .{ .immediate = shift }); } else { try self.truncateRegister(Type.anyerror, result.register); } break :result result; }, else => return self.fail("TODO implement unwrap_err_err for {}", .{operand}), } }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); } const err_union_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const result = try self.genUnwrapErrorUnionPayloadMir(inst, err_union_ty, operand); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn genUnwrapErrorUnionPayloadMir( self: *Self, maybe_inst: ?Air.Inst.Index, err_union_ty: Type, err_union: MCValue, ) !MCValue { const payload_ty = err_union_ty.errorUnionPayload(); const result: MCValue = result: { if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { break :result MCValue.none; } const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); switch (err_union) { .stack_offset => |off| { const offset = off - @intCast(i32, payload_off); break :result MCValue{ .stack_offset = offset }; }, .register => |reg| { // TODO reuse operand const lock = self.register_manager.lockRegAssumeUnused(reg); defer self.register_manager.unlockReg(lock); const result_reg: Register = if (maybe_inst) |inst| (try self.copyToRegisterWithInstTracking(inst, err_union_ty, err_union)).register else try self.copyToTmpRegister(err_union_ty, err_union); if (payload_off > 0) { const shift = @intCast(u6, payload_off * 8); try self.genShiftBinOpMir(.shr, err_union_ty, result_reg, .{ .immediate = shift }); } else { try self.truncateRegister(payload_ty, result_reg); } break :result MCValue{ .register = result_reg }; }, else => return self.fail("TODO implement genUnwrapErrorUnionPayloadMir for {}", .{err_union}), } }; return result; } // *(E!T) -> E fn airUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } // *(E!T) -> *T fn airUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .errunion_payload_ptr_set for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airErrReturnTrace for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ .none, .none, .none }); } fn airSetErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void { _ = inst; return self.fail("TODO implement airSetErrReturnTrace for {}", .{self.target.cpu.arch}); } fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { _ = inst; return self.fail("TODO implement airSaveErrReturnTraceIndex for {}", .{self.target.cpu.arch}); } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); } const payload_ty = self.air.typeOf(ty_op.operand); const result: MCValue = result: { if (!payload_ty.hasRuntimeBits()) { break :result MCValue{ .immediate = 1 }; } const optional_ty = self.air.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const operand_lock: ?RegisterLock = switch (operand) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); if (optional_ty.isPtrLikeOptional()) { // TODO should we check if we can reuse the operand? if (self.reuseOperand(inst, ty_op.operand, 0, operand)) { break :result operand; } break :result try self.copyToRegisterWithInstTracking(inst, payload_ty, operand); } const optional_abi_size = @intCast(u32, optional_ty.abiSize(self.target.*)); const optional_abi_align = optional_ty.abiAlignment(self.target.*); const payload_abi_size = @intCast(u32, payload_ty.abiSize(self.target.*)); const offset = optional_abi_size - payload_abi_size; const stack_offset = @intCast(i32, try self.allocMem(inst, optional_abi_size, optional_abi_align)); try self.genSetStack(Type.bool, stack_offset, .{ .immediate = 1 }, .{}); try self.genSetStack(payload_ty, stack_offset - @intCast(i32, offset), operand, .{}); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); } const error_union_ty = self.air.getRefType(ty_op.ty); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { break :result operand; } const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); const abi_align = error_union_ty.abiAlignment(self.target.*); const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align)); const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); const err_off = errUnionErrorOffset(payload_ty, self.target.*); try self.genSetStack(payload_ty, stack_offset - @intCast(i32, payload_off), operand, .{}); try self.genSetStack(Type.anyerror, stack_offset - @intCast(i32, err_off), .{ .immediate = 0 }, .{}); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); } const error_union_ty = self.air.getRefType(ty_op.ty); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { break :result operand; } const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); const abi_align = error_union_ty.abiAlignment(self.target.*); const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align)); const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); const err_off = errUnionErrorOffset(payload_ty, self.target.*); try self.genSetStack(Type.anyerror, stack_offset - @intCast(i32, err_off), operand, .{}); try self.genSetStack(payload_ty, stack_offset - @intCast(i32, payload_off), .undef, .{}); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); const dst_mcv: MCValue = blk: { switch (operand) { .stack_offset => |off| { break :blk MCValue{ .stack_offset = off }; }, else => return self.fail("TODO implement slice_ptr for {}", .{operand}), } }; break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); const dst_mcv: MCValue = blk: { switch (operand) { .stack_offset => |off| { break :blk MCValue{ .stack_offset = off - 8 }; }, else => return self.fail("TODO implement slice_len for {}", .{operand}), } }; break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_slice_len_ptr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_slice_ptr_ptr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Register { const reg: Register = blk: { switch (index) { .immediate => |imm| { // Optimisation: if index MCValue is an immediate, we can multiply in `comptime` // and set the register directly to the scaled offset as an immediate. const reg = try self.register_manager.allocReg(null, gp); try self.genSetReg(index_ty, reg, .{ .immediate = imm * elem_size }); break :blk reg; }, else => { const reg = try self.copyToTmpRegister(index_ty, index); try self.genIntMulComplexOpMir(index_ty, .{ .register = reg }, .{ .immediate = elem_size }); break :blk reg; }, } }; return reg; } fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { const slice_ty = self.air.typeOf(lhs); const slice_mcv = try self.resolveInst(lhs); const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock); const elem_ty = slice_ty.childType(); const elem_size = elem_ty.abiSize(self.target.*); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); const index_ty = self.air.typeOf(rhs); const index_mcv = try self.resolveInst(rhs); const index_mcv_lock: ?RegisterLock = switch (index_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (index_mcv_lock) |lock| self.register_manager.unlockReg(lock); const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_size); const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); defer self.register_manager.unlockReg(offset_reg_lock); const addr_reg = try self.register_manager.allocReg(null, gp); switch (slice_mcv) { .stack_offset => |off| { // mov reg, [rbp - 8] _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = addr_reg.to64(), .reg2 = .rbp, .flags = 0b01, }), .data = .{ .imm = @bitCast(u32, -@intCast(i32, off)) }, }); }, else => return self.fail("TODO implement slice_elem_ptr when slice is {}", .{slice_mcv}), } // TODO we could allocate register here, but need to expect addr register and potentially // offset register. try self.genBinOpMir(.add, slice_ptr_field_type, .{ .register = addr_reg }, .{ .register = offset_reg, }); return MCValue{ .register = addr_reg.to64() }; } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else result: { const slice_ty = self.air.typeOf(bin_op.lhs); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); const elem_ptr = try self.genSliceElemPtr(bin_op.lhs, bin_op.rhs); const dst_mcv = try self.allocRegOrMem(inst, false); try self.load(dst_mcv, elem_ptr, slice_ptr_field_type); break :result dst_mcv; }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.genSliceElemPtr(extra.lhs, extra.rhs); return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } const array_ty = self.air.typeOf(bin_op.lhs); const array = try self.resolveInst(bin_op.lhs); const array_lock: ?RegisterLock = switch (array) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (array_lock) |lock| self.register_manager.unlockReg(lock); const elem_ty = array_ty.childType(); const elem_abi_size = elem_ty.abiSize(self.target.*); const index_ty = self.air.typeOf(bin_op.rhs); const index = try self.resolveInst(bin_op.rhs); const index_lock: ?RegisterLock = switch (index) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (index_lock) |lock| self.register_manager.unlockReg(lock); const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size); const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); defer self.register_manager.unlockReg(offset_reg_lock); const addr_reg = try self.register_manager.allocReg(null, gp); switch (array) { .register => { const off = @intCast(i32, try self.allocMem( inst, @intCast(u32, array_ty.abiSize(self.target.*)), array_ty.abiAlignment(self.target.*), )); try self.genSetStack(array_ty, off, array, .{}); // lea reg, [rbp] _ = try self.addInst(.{ .tag = .lea, .ops = Mir.Inst.Ops.encode(.{ .reg1 = addr_reg.to64(), .reg2 = .rbp, }), .data = .{ .imm = @bitCast(u32, -off) }, }); }, .stack_offset => |off| { // lea reg, [rbp] _ = try self.addInst(.{ .tag = .lea, .ops = Mir.Inst.Ops.encode(.{ .reg1 = addr_reg.to64(), .reg2 = .rbp, }), .data = .{ .imm = @bitCast(u32, -off) }, }); }, .memory, .linker_load => { try self.loadMemPtrIntoRegister(addr_reg, Type.usize, array); }, else => return self.fail("TODO implement array_elem_val when array is {}", .{array}), } // TODO we could allocate register here, but need to expect addr register and potentially // offset register. const dst_mcv = try self.allocRegOrMem(inst, false); try self.genBinOpMir(.add, Type.usize, .{ .register = addr_reg }, .{ .register = offset_reg }); try self.load(dst_mcv, .{ .register = addr_reg.to64() }, array_ty); return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[inst].bin_op; if (!is_volatile and self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } // this is identical to the `airPtrElemPtr` codegen expect here an // additional `mov` is needed at the end to get the actual value const ptr_ty = self.air.typeOf(bin_op.lhs); const ptr = try self.resolveInst(bin_op.lhs); const ptr_lock: ?RegisterLock = switch (ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); const elem_ty = ptr_ty.elemType2(); const elem_abi_size = elem_ty.abiSize(self.target.*); const index_ty = self.air.typeOf(bin_op.rhs); const index = try self.resolveInst(bin_op.rhs); const index_lock: ?RegisterLock = switch (index) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (index_lock) |lock| self.register_manager.unlockReg(lock); const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size); const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); defer self.register_manager.unlockReg(offset_reg_lock); const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr); try self.genBinOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg }); const result: MCValue = result: { if (elem_abi_size > 8) { return self.fail("TODO copy value with size {} from pointer", .{elem_abi_size}); } else { // mov dst_mcv, [dst_mcv] _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(dst_mcv.register, @intCast(u32, elem_abi_size)), .reg2 = dst_mcv.register, .flags = 0b01, }), .data = .{ .imm = 0 }, }); break :result .{ .register = registerAlias(dst_mcv.register, @intCast(u32, elem_abi_size)) }; } }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); } const ptr_ty = self.air.typeOf(extra.lhs); const ptr = try self.resolveInst(extra.lhs); const ptr_lock: ?RegisterLock = switch (ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); const elem_ty = ptr_ty.elemType2(); const elem_abi_size = elem_ty.abiSize(self.target.*); const index_ty = self.air.typeOf(extra.rhs); const index = try self.resolveInst(extra.rhs); const index_lock: ?RegisterLock = switch (index) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (index_lock) |lock| self.register_manager.unlockReg(lock); const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size); const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); defer self.register_manager.unlockReg(offset_reg_lock); const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr); try self.genBinOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg }); return self.finishAir(inst, dst_mcv, .{ extra.lhs, extra.rhs, .none }); } fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.air.typeOf(bin_op.lhs); const union_ty = ptr_ty.childType(); const tag_ty = self.air.typeOf(bin_op.rhs); const layout = union_ty.unionGetLayout(self.target.*); if (layout.tag_size == 0) { return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); } const ptr = try self.resolveInst(bin_op.lhs); const ptr_lock: ?RegisterLock = switch (ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); const tag = try self.resolveInst(bin_op.rhs); const tag_lock: ?RegisterLock = switch (tag) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (tag_lock) |lock| self.register_manager.unlockReg(lock); const adjusted_ptr: MCValue = if (layout.payload_size > 0 and layout.tag_align < layout.payload_align) blk: { // TODO reusing the operand const reg = try self.copyToTmpRegister(ptr_ty, ptr); try self.genBinOpMir(.add, ptr_ty, .{ .register = reg }, .{ .immediate = layout.payload_size }); break :blk MCValue{ .register = reg }; } else ptr; try self.store(adjusted_ptr, tag, ptr_ty, tag_ty); return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); } const tag_ty = self.air.typeOfIndex(inst); const union_ty = self.air.typeOf(ty_op.operand); const layout = union_ty.unionGetLayout(self.target.*); if (layout.tag_size == 0) { return self.finishAir(inst, .none, .{ ty_op.operand, .none, .none }); } // TODO reusing the operand const operand = try self.resolveInst(ty_op.operand); const operand_lock: ?RegisterLock = switch (operand) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const tag_abi_size = tag_ty.abiSize(self.target.*); const dst_mcv: MCValue = blk: { switch (operand) { .stack_offset => |off| { if (tag_abi_size <= 8) { const offset: i32 = if (layout.tag_align < layout.payload_align) @intCast(i32, layout.payload_size) else 0; break :blk try self.copyToRegisterWithInstTracking(inst, tag_ty, .{ .stack_offset = off - offset, }); } return self.fail("TODO implement get_union_tag for ABI larger than 8 bytes and operand {}", .{operand}); }, .register => { const shift: u6 = if (layout.tag_align < layout.payload_align) @intCast(u6, layout.payload_size * @sizeOf(usize)) else 0; const result = try self.copyToRegisterWithInstTracking(inst, union_ty, operand); try self.genShiftBinOpMir(.shr, Type.usize, result.register, .{ .immediate = shift }); break :blk MCValue{ .register = registerAlias(result.register, @intCast(u32, layout.tag_size)), }; }, else => return self.fail("TODO implement get_union_tag for {}", .{operand}), } }; return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); } fn airClz(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airClz for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airCtz(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airCtz for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airPopcount for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airByteSwap for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airBitReverse for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airUnaryMath for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn reuseOperand( self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue, ) bool { if (!self.liveness.operandDies(inst, op_index)) return false; switch (mcv) { .register => |reg| { // If it's in the registers table, need to associate the register with the // new instruction. if (RegisterManager.indexOfRegIntoTracked(reg)) |index| { if (!self.register_manager.isRegFree(reg)) { self.register_manager.registers[index] = inst; } } log.debug("%{d} => {} (reused)", .{ inst, reg }); }, .stack_offset => |off| { log.debug("%{d} => stack offset {d} (reused)", .{ inst, off }); }, else => return false, } // Prevent the operand deaths processing code from deallocating it. self.liveness.clearOperandDeath(inst, op_index); // That makes us responsible for doing the rest of the stuff that processDeath would have done. const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; branch.inst_table.putAssumeCapacity(Air.refToIndex(operand).?, .dead); return true; } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { const elem_ty = ptr_ty.elemType(); const abi_size = elem_ty.abiSize(self.target.*); switch (ptr) { .none => unreachable, .undef => unreachable, .unreach => unreachable, .dead => unreachable, .eflags => unreachable, .register_overflow => unreachable, .immediate => |imm| { try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }); }, .stack_offset => { const reg = try self.copyToTmpRegister(ptr_ty, ptr); try self.load(dst_mcv, .{ .register = reg }, ptr_ty); }, .ptr_stack_offset => |off| { try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }); }, .register => |reg| { const reg_lock = self.register_manager.lockReg(reg); defer if (reg_lock) |lock| self.register_manager.unlockReg(lock); switch (dst_mcv) { .dead => unreachable, .undef => unreachable, .eflags => unreachable, .register => |dst_reg| { // mov dst_reg, [reg] _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(dst_reg, @intCast(u32, abi_size)), .reg2 = reg, .flags = 0b01, }), .data = .{ .imm = 0 }, }); }, .stack_offset => |off| { if (abi_size <= 8) { const tmp_reg = try self.register_manager.allocReg(null, gp); try self.load(.{ .register = tmp_reg }, ptr, ptr_ty); return self.genSetStack(elem_ty, off, MCValue{ .register = tmp_reg }, .{}); } try self.genInlineMemcpy(dst_mcv, ptr, .{ .immediate = abi_size }, .{}); }, else => return self.fail("TODO implement loading from register into {}", .{dst_mcv}), } }, .memory, .linker_load => { const reg = try self.copyToTmpRegister(ptr_ty, ptr); try self.load(dst_mcv, .{ .register = reg }, ptr_ty); }, } } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); const result: MCValue = result: { if (!elem_ty.hasRuntimeBitsIgnoreComptime()) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; const dst_mcv: MCValue = blk: { if (self.reuseOperand(inst, ty_op.operand, 0, ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk ptr; } else { break :blk try self.allocRegOrMem(inst, true); } }; try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue) InnerError!void { switch (ptr) { .linker_load => |load_struct| { const abi_size = @intCast(u32, ptr_ty.abiSize(self.target.*)); const mod = self.bin_file.options.module.?; const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl); const atom_index = if (self.bin_file.tag == link.File.MachO.base_tag) fn_owner_decl.link.macho.sym_index else fn_owner_decl.link.coff.sym_index; const flags: u2 = switch (load_struct.type) { .got => 0b00, .direct => 0b01, .import => 0b10, }; _ = try self.addInst(.{ .tag = .lea_pic, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size), .flags = flags, }), .data = .{ .relocation = .{ .atom_index = atom_index, .sym_index = load_struct.sym_index, }, }, }); }, .memory => |addr| { // TODO: in case the address fits in an imm32 we can use [ds:imm32] // instead of wasting an instruction copying the address to a register try self.genSetReg(ptr_ty, reg, .{ .immediate = addr }); }, else => unreachable, } } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { const abi_size = value_ty.abiSize(self.target.*); switch (ptr) { .none => unreachable, .undef => unreachable, .unreach => unreachable, .dead => unreachable, .eflags => unreachable, .register_overflow => unreachable, .immediate => |imm| { try self.setRegOrMem(value_ty, .{ .memory = imm }, value); }, .stack_offset => { const reg = try self.copyToTmpRegister(ptr_ty, ptr); try self.store(.{ .register = reg }, value, ptr_ty, value_ty); }, .ptr_stack_offset => |off| { try self.genSetStack(value_ty, off, value, .{}); }, .register => |reg| { const reg_lock = self.register_manager.lockReg(reg); defer if (reg_lock) |lock| self.register_manager.unlockReg(lock); switch (value) { .none => unreachable, .undef => unreachable, .dead => unreachable, .unreach => unreachable, .eflags => unreachable, .immediate => |imm| { switch (abi_size) { 1, 2, 4 => { // TODO this is wasteful! // introduce new MIR tag specifically for mov [reg + 0], imm const payload = try self.addExtra(Mir.ImmPair{ .dest_off = 0, .operand = @truncate(u32, imm), }); _ = try self.addInst(.{ .tag = .mov_mem_imm, .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg.to64(), .flags = switch (abi_size) { 1 => 0b00, 2 => 0b01, 4 => 0b10, else => unreachable, }, }), .data = .{ .payload = payload }, }); }, 8 => { // TODO: optimization: if the imm is only using the lower // 4 bytes and can be sign extended we can use a normal mov // with indirect addressing (mov [reg64], imm32). // movabs does not support indirect register addressing // so we need an extra register and an extra mov. const tmp_reg = try self.copyToTmpRegister(value_ty, value); return self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty); }, else => { return self.fail("TODO implement set pointee with immediate of ABI size {d}", .{abi_size}); }, } }, .register => |src_reg| { try self.genInlineMemcpyRegisterRegister(value_ty, reg, src_reg, 0); }, .linker_load, .memory, .stack_offset, => { if (abi_size <= 8) { const tmp_reg = try self.copyToTmpRegister(value_ty, value); return self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty); } try self.genInlineMemcpy(.{ .stack_offset = 0 }, value, .{ .immediate = abi_size }, .{ .source_stack_base = .rbp, .dest_stack_base = reg.to64(), }); }, else => |other| { return self.fail("TODO implement set pointee with {}", .{other}); }, } }, .linker_load, .memory => { const value_lock: ?RegisterLock = switch (value) { .register => |reg| self.register_manager.lockReg(reg), else => null, }; defer if (value_lock) |lock| self.register_manager.unlockReg(lock); const addr_reg = try self.register_manager.allocReg(null, gp); const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_reg_lock); try self.loadMemPtrIntoRegister(addr_reg, ptr_ty, ptr); // to get the actual address of the value we want to modify we have to go through the GOT // mov reg, [reg] _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = addr_reg.to64(), .reg2 = addr_reg.to64(), .flags = 0b01, }), .data = .{ .imm = 0 }, }); const new_ptr = MCValue{ .register = addr_reg.to64() }; switch (value) { .immediate => |imm| { if (abi_size > 8) { return self.fail("TODO saving imm to memory for abi_size {}", .{abi_size}); } const payload = try self.addExtra(Mir.ImmPair{ .dest_off = 0, // TODO check if this logic is correct .operand = @truncate(u32, imm), }); const flags: u2 = switch (abi_size) { 1 => 0b00, 2 => 0b01, 4 => 0b10, 8 => 0b11, else => unreachable, }; if (flags == 0b11) { const top_bits: u32 = @intCast(u32, imm >> 32); const can_extend = if (value_ty.isUnsignedInt()) (top_bits == 0) and (imm & 0x8000_0000) == 0 else top_bits == 0xffff_ffff; if (!can_extend) { return self.fail("TODO imm64 would get incorrectly sign extended", .{}); } } _ = try self.addInst(.{ .tag = .mov_mem_imm, .ops = Mir.Inst.Ops.encode(.{ .reg1 = addr_reg.to64(), .flags = flags, }), .data = .{ .payload = payload }, }); }, .register => { return self.store(new_ptr, value, ptr_ty, value_ty); }, .linker_load, .memory => { if (abi_size <= 8) { const tmp_reg = try self.register_manager.allocReg(null, gp); const tmp_reg_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_reg_lock); try self.loadMemPtrIntoRegister(tmp_reg, value_ty, value); _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = tmp_reg, .reg2 = tmp_reg, .flags = 0b01, }), .data = .{ .imm = 0 }, }); return self.store(new_ptr, .{ .register = tmp_reg }, ptr_ty, value_ty); } try self.genInlineMemcpy(new_ptr, value, .{ .immediate = abi_size }, .{}); }, .stack_offset => { if (abi_size <= 8) { const tmp_reg = try self.copyToTmpRegister(value_ty, value); return self.store(new_ptr, .{ .register = tmp_reg }, ptr_ty, value_ty); } try self.genInlineMemcpy(new_ptr, value, .{ .immediate = abi_size }, .{}); }, else => return self.fail("TODO implement storing {} to MCValue.memory", .{value}), } }, } } fn airStore(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const ptr_ty = self.air.typeOf(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); const value_ty = self.air.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const result = try self.structFieldPtr(inst, extra.struct_operand, extra.field_index); return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); } fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = try self.structFieldPtr(inst, ty_op.operand, index); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { if (self.liveness.isUnused(inst)) { return MCValue.dead; } const mcv = try self.resolveInst(operand); const ptr_ty = self.air.typeOf(operand); const struct_ty = ptr_ty.childType(); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); const dst_mcv: MCValue = result: { switch (mcv) { .stack_offset => { const offset_reg = try self.copyToTmpRegister(ptr_ty, .{ .immediate = struct_field_offset, }); const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); defer self.register_manager.unlockReg(offset_reg_lock); const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, mcv); try self.genBinOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg }); break :result dst_mcv; }, .ptr_stack_offset => |off| { const ptr_stack_offset = off - @intCast(i32, struct_field_offset); break :result MCValue{ .ptr_stack_offset = ptr_stack_offset }; }, .register => |reg| { const reg_lock = self.register_manager.lockRegAssumeUnused(reg); defer self.register_manager.unlockReg(reg_lock); const offset_reg = try self.copyToTmpRegister(ptr_ty, .{ .immediate = struct_field_offset, }); const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); defer self.register_manager.unlockReg(offset_reg_lock); const can_reuse_operand = self.reuseOperand(inst, operand, 0, mcv); const result_reg: Register = blk: { if (can_reuse_operand) { break :blk reg; } else { const result_reg = try self.register_manager.allocReg(inst, gp); try self.genSetReg(ptr_ty, result_reg, mcv); break :blk result_reg; } }; const result_reg_lock = self.register_manager.lockReg(result_reg); defer if (result_reg_lock) |lock| self.register_manager.unlockReg(lock); try self.genBinOpMir(.add, ptr_ty, .{ .register = result_reg }, .{ .register = offset_reg }); break :result MCValue{ .register = result_reg }; }, else => return self.fail("TODO implement codegen struct_field_ptr for {}", .{mcv}), } }; return dst_mcv; } fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const operand = extra.struct_operand; const index = extra.field_index; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ extra.struct_operand, .none, .none }); } const mcv = try self.resolveInst(operand); const struct_ty = self.air.typeOf(operand); const struct_field_offset = struct_ty.structFieldOffset(index, self.target.*); const struct_field_ty = struct_ty.structFieldType(index); const result: MCValue = result: { switch (mcv) { .stack_offset => |off| { const stack_offset = off - @intCast(i32, struct_field_offset); break :result MCValue{ .stack_offset = stack_offset }; }, .register => |reg| { const reg_lock = self.register_manager.lockRegAssumeUnused(reg); defer self.register_manager.unlockReg(reg_lock); const dst_mcv: MCValue = blk: { if (self.reuseOperand(inst, operand, 0, mcv)) { break :blk mcv; } else { const dst_mcv = try self.copyToRegisterWithInstTracking(inst, Type.usize, .{ .register = reg.to64(), }); break :blk dst_mcv; } }; const dst_mcv_lock: ?RegisterLock = switch (dst_mcv) { .register => |a_reg| self.register_manager.lockReg(a_reg), else => null, }; defer if (dst_mcv_lock) |lock| self.register_manager.unlockReg(lock); // Shift by struct_field_offset. const shift = @intCast(u8, struct_field_offset * @sizeOf(usize)); try self.genShiftBinOpMir(.shr, Type.usize, dst_mcv.register, .{ .immediate = shift }); // Mask with reg.size() - struct_field_size const max_reg_bit_width = Register.rax.size(); const mask_shift = @intCast(u6, (max_reg_bit_width - struct_field_ty.bitSize(self.target.*))); const mask = (~@as(u64, 0)) >> mask_shift; const tmp_reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = mask }); try self.genBinOpMir(.@"and", Type.usize, dst_mcv, .{ .register = tmp_reg }); const signedness: std.builtin.Signedness = blk: { if (struct_field_ty.zigTypeTag() != .Int) break :blk .unsigned; break :blk struct_field_ty.intInfo(self.target.*).signedness; }; const field_size = @intCast(u32, struct_field_ty.abiSize(self.target.*)); if (signedness == .signed and field_size < 8) { _ = try self.addInst(.{ .tag = .mov_sign_extend, .ops = Mir.Inst.Ops.encode(.{ .reg1 = dst_mcv.register, .reg2 = registerAlias(dst_mcv.register, field_size), }), .data = undefined, }); } break :result dst_mcv; }, .register_overflow => |ro| { switch (index) { 0 => { // Get wrapped value for overflow operation. break :result MCValue{ .register = ro.reg }; }, 1 => { // Get overflow bit. const reg_lock = self.register_manager.lockRegAssumeUnused(ro.reg); defer self.register_manager.unlockReg(reg_lock); const dst_reg = try self.register_manager.allocReg(inst, gp); _ = try self.addInst(.{ .tag = .cond_set_byte, .ops = Mir.Inst.Ops.encode(.{ .reg1 = dst_reg.to8(), }), .data = .{ .cc = ro.eflags }, }); break :result MCValue{ .register = dst_reg.to8() }; }, else => unreachable, } }, else => return self.fail("TODO implement codegen struct_field_val for {}", .{mcv}), } }; return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFieldParentPtr for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// Clobbers .rcx for non-immediate shift value. fn genShiftBinOpMir(self: *Self, tag: Mir.Inst.Tag, ty: Type, reg: Register, shift: MCValue) !void { assert(reg.to64() != .rcx); switch (tag) { .sal, .sar, .shl, .shr => {}, else => unreachable, } const abi_size = @intCast(u32, ty.abiSize(self.target.*)); blk: { switch (shift) { .immediate => |imm| switch (imm) { 0 => return, 1 => { _ = try self.addInst(.{ .tag = tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size) }), .data = undefined, }); return; }, else => { _ = try self.addInst(.{ .tag = tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size), .flags = 0b10, }), .data = .{ .imm = @intCast(u8, imm) }, }); return; }, }, .register => |shift_reg| { if (shift_reg == .rcx) break :blk; }, else => {}, } assert(self.register_manager.isRegFree(.rcx)); try self.register_manager.getReg(.rcx, null); try self.genSetReg(Type.u8, .rcx, shift); } _ = try self.addInst(.{ .tag = tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size), .flags = 0b01, }), .data = undefined, }); } /// Result is always a register. /// Clobbers .rcx for non-immediate rhs, therefore care is needed to spill .rcx upfront. /// Asserts .rcx is free. fn genShiftBinOp( self: *Self, tag: Air.Inst.Tag, maybe_inst: ?Air.Inst.Index, lhs: MCValue, rhs: MCValue, lhs_ty: Type, rhs_ty: Type, ) !MCValue { if (lhs_ty.zigTypeTag() == .Vector or lhs_ty.zigTypeTag() == .Float) { return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); } if (lhs_ty.abiSize(self.target.*) > 8) { return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); } assert(rhs_ty.abiSize(self.target.*) == 1); const lhs_lock: ?RegisterLock = switch (lhs) { .register => |reg| self.register_manager.lockReg(reg), else => null, }; defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); const rhs_lock: ?RegisterLock = switch (rhs) { .register => |reg| self.register_manager.lockReg(reg), else => null, }; defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); assert(self.register_manager.isRegFree(.rcx)); try self.register_manager.getReg(.rcx, null); const rcx_lock = self.register_manager.lockRegAssumeUnused(.rcx); defer self.register_manager.unlockReg(rcx_lock); const dst: MCValue = blk: { if (maybe_inst) |inst| { const bin_op = self.air.instructions.items(.data)[inst].bin_op; // TODO dst can also be a memory location if (self.reuseOperand(inst, bin_op.lhs, 0, lhs) and lhs.isRegister()) { break :blk lhs; } break :blk try self.copyToRegisterWithInstTracking(inst, lhs_ty, lhs); } break :blk MCValue{ .register = try self.copyToTmpRegister(lhs_ty, lhs) }; }; const signedness = lhs_ty.intInfo(self.target.*).signedness; switch (tag) { .shl => try self.genShiftBinOpMir(switch (signedness) { .signed => .sal, .unsigned => .shl, }, lhs_ty, dst.register, rhs), .shl_exact => try self.genShiftBinOpMir(.shl, lhs_ty, dst.register, rhs), .shr, .shr_exact, => try self.genShiftBinOpMir(switch (signedness) { .signed => .sar, .unsigned => .shr, }, lhs_ty, dst.register, rhs), else => unreachable, } return dst; } /// Result is always a register. /// Clobbers .rax and .rdx therefore care is needed to spill .rax and .rdx upfront. /// Asserts .rax and .rdx are free. fn genMulDivBinOp( self: *Self, tag: Air.Inst.Tag, maybe_inst: ?Air.Inst.Index, ty: Type, lhs: MCValue, rhs: MCValue, ) !MCValue { if (ty.zigTypeTag() == .Vector or ty.zigTypeTag() == .Float) { return self.fail("TODO implement genBinOp for {}", .{ty.fmtDebug()}); } if (ty.abiSize(self.target.*) > 8) { return self.fail("TODO implement genBinOp for {}", .{ty.fmtDebug()}); } if (tag == .div_float) { return self.fail("TODO implement genMulDivBinOp for div_float", .{}); } assert(self.register_manager.isRegFree(.rax)); assert(self.register_manager.isRegFree(.rdx)); const reg_locks = self.register_manager.lockRegsAssumeUnused(2, .{ .rax, .rdx }); defer for (reg_locks) |reg| { self.register_manager.unlockReg(reg); }; const int_info = ty.intInfo(self.target.*); const signedness = int_info.signedness; switch (tag) { .mul, .mulwrap, .rem, .div_trunc, .div_exact, => { const track_inst_rax: ?Air.Inst.Index = switch (tag) { .mul, .mulwrap, .div_exact, .div_trunc => maybe_inst, else => null, }; const track_inst_rdx: ?Air.Inst.Index = switch (tag) { .rem => maybe_inst, else => null, }; try self.register_manager.getReg(.rax, track_inst_rax); try self.register_manager.getReg(.rdx, track_inst_rdx); const mir_tag: Mir.Inst.Tag = switch (signedness) { .signed => switch (tag) { .mul, .mulwrap => Mir.Inst.Tag.imul, .div_trunc, .div_exact, .rem => Mir.Inst.Tag.idiv, else => unreachable, }, .unsigned => switch (tag) { .mul, .mulwrap => Mir.Inst.Tag.mul, .div_trunc, .div_exact, .rem => Mir.Inst.Tag.div, else => unreachable, }, }; try self.genIntMulDivOpMir(mir_tag, ty, .signed, lhs, rhs); switch (signedness) { .signed => switch (tag) { .mul, .mulwrap, .div_trunc, .div_exact => return MCValue{ .register = .rax }, .rem => return MCValue{ .register = .rdx }, else => unreachable, }, .unsigned => switch (tag) { .mul, .mulwrap, .div_trunc, .div_exact => return MCValue{ .register = registerAlias(.rax, @intCast(u32, ty.abiSize(self.target.*))), }, .rem => return MCValue{ .register = registerAlias(.rdx, @intCast(u32, ty.abiSize(self.target.*))), }, else => unreachable, }, } }, .mod => { try self.register_manager.getReg(.rax, null); try self.register_manager.getReg(.rdx, if (signedness == .unsigned) maybe_inst else null); switch (signedness) { .signed => { const div_floor = try self.genInlineIntDivFloor(ty, lhs, rhs); try self.genIntMulComplexOpMir(ty, div_floor, rhs); const div_floor_lock = self.register_manager.lockReg(div_floor.register); defer if (div_floor_lock) |lock| self.register_manager.unlockReg(lock); const result: MCValue = if (maybe_inst) |inst| try self.copyToRegisterWithInstTracking(inst, ty, lhs) else MCValue{ .register = try self.copyToTmpRegister(ty, lhs) }; try self.genBinOpMir(.sub, ty, result, div_floor); return result; }, .unsigned => { try self.genIntMulDivOpMir(.div, ty, .unsigned, lhs, rhs); return MCValue{ .register = registerAlias(.rdx, @intCast(u32, ty.abiSize(self.target.*))) }; }, } }, .div_floor => { try self.register_manager.getReg(.rax, if (signedness == .unsigned) maybe_inst else null); try self.register_manager.getReg(.rdx, null); const lhs_lock: ?RegisterLock = switch (lhs) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); const actual_rhs: MCValue = blk: { switch (signedness) { .signed => { const rhs_lock: ?RegisterLock = switch (rhs) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); if (maybe_inst) |inst| { break :blk try self.copyToRegisterWithInstTracking(inst, ty, rhs); } break :blk MCValue{ .register = try self.copyToTmpRegister(ty, rhs) }; }, .unsigned => break :blk rhs, } }; const rhs_lock: ?RegisterLock = switch (actual_rhs) { .register => |reg| self.register_manager.lockReg(reg), else => null, }; defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); const result: MCValue = result: { switch (signedness) { .signed => break :result try self.genInlineIntDivFloor(ty, lhs, actual_rhs), .unsigned => { try self.genIntMulDivOpMir(.div, ty, .unsigned, lhs, actual_rhs); break :result MCValue{ .register = registerAlias(.rax, @intCast(u32, ty.abiSize(self.target.*))), }; }, } }; return result; }, else => unreachable, } } /// Result is always a register. fn genBinOp( self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, lhs_air: Air.Inst.Ref, rhs_air: Air.Inst.Ref, ) !MCValue { const lhs = try self.resolveInst(lhs_air); const rhs = try self.resolveInst(rhs_air); const lhs_ty = self.air.typeOf(lhs_air); const rhs_ty = self.air.typeOf(rhs_air); if (lhs_ty.zigTypeTag() == .Vector) { return self.fail("TODO implement genBinOp for {}", .{lhs_ty.fmtDebug()}); } if (lhs_ty.abiSize(self.target.*) > 8) { return self.fail("TODO implement genBinOp for {}", .{lhs_ty.fmtDebug()}); } const is_commutative: bool = switch (tag) { .add, .addwrap, .bool_or, .bit_or, .bool_and, .bit_and, .xor, => true, else => false, }; const lhs_lock: ?RegisterLock = switch (lhs) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); const rhs_lock: ?RegisterLock = switch (rhs) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); var flipped: bool = false; const dst_mcv: MCValue = blk: { if (maybe_inst) |inst| { if (self.reuseOperand(inst, lhs_air, 0, lhs) and lhs.isRegister()) { break :blk lhs; } if (is_commutative and self.reuseOperand(inst, rhs_air, 1, rhs) and rhs.isRegister()) { flipped = true; break :blk rhs; } break :blk try self.copyToRegisterWithInstTracking(inst, lhs_ty, lhs); } break :blk MCValue{ .register = try self.copyToTmpRegister(lhs_ty, lhs) }; }; const dst_mcv_lock: ?RegisterLock = switch (dst_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, }; defer if (dst_mcv_lock) |lock| self.register_manager.unlockReg(lock); const src_mcv: MCValue = blk: { const mcv = if (flipped) lhs else rhs; if (mcv.isRegister() or mcv.isMemory()) break :blk mcv; break :blk MCValue{ .register = try self.copyToTmpRegister(rhs_ty, mcv) }; }; const src_mcv_lock: ?RegisterLock = switch (src_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, }; defer if (src_mcv_lock) |lock| self.register_manager.unlockReg(lock); switch (tag) { .add, .addwrap, => try self.genBinOpMir(.add, lhs_ty, dst_mcv, src_mcv), .sub, .subwrap, => try self.genBinOpMir(.sub, lhs_ty, dst_mcv, src_mcv), .ptr_add, .ptr_sub, => { const mir_tag: Mir.Inst.Tag = switch (tag) { .ptr_add => .add, .ptr_sub => .sub, else => unreachable, }; const elem_size = lhs_ty.elemType2().abiSize(self.target.*); try self.genIntMulComplexOpMir(rhs_ty, src_mcv, .{ .immediate = elem_size }); try self.genBinOpMir(mir_tag, lhs_ty, dst_mcv, src_mcv); }, .bool_or, .bit_or, => try self.genBinOpMir(.@"or", lhs_ty, dst_mcv, src_mcv), .bool_and, .bit_and, => try self.genBinOpMir(.@"and", lhs_ty, dst_mcv, src_mcv), .xor => try self.genBinOpMir(.xor, lhs_ty, dst_mcv, src_mcv), else => unreachable, } return dst_mcv; } fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); switch (dst_mcv) { .none => unreachable, .undef => unreachable, .dead, .unreach, .immediate => unreachable, .eflags => unreachable, .register_overflow => unreachable, .register => |dst_reg| { switch (src_mcv) { .none => unreachable, .undef => unreachable, .dead, .unreach => unreachable, .register_overflow => unreachable, .ptr_stack_offset => { const dst_reg_lock = self.register_manager.lockReg(dst_reg); defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); const reg = try self.copyToTmpRegister(dst_ty, src_mcv); return self.genBinOpMir(mir_tag, dst_ty, dst_mcv, .{ .register = reg }); }, .register => |src_reg| switch (dst_ty.zigTypeTag()) { .Float => { if (intrinsicsAllowed(self.target.*, dst_ty)) { const actual_tag: Mir.Inst.Tag = switch (dst_ty.tag()) { .f32 => switch (mir_tag) { .add => if (hasAvxSupport(self.target.*)) Mir.Inst.Tag.add_f32_avx else Mir.Inst.Tag.add_f32_sse, .cmp => if (hasAvxSupport(self.target.*)) Mir.Inst.Tag.cmp_f32_avx else Mir.Inst.Tag.cmp_f32_sse, else => return self.fail("TODO genBinOpMir for f32 register-register with MIR tag {}", .{mir_tag}), }, .f64 => switch (mir_tag) { .add => if (hasAvxSupport(self.target.*)) Mir.Inst.Tag.add_f64_avx else Mir.Inst.Tag.add_f64_sse, .cmp => if (hasAvxSupport(self.target.*)) Mir.Inst.Tag.cmp_f64_avx else Mir.Inst.Tag.cmp_f64_sse, else => return self.fail("TODO genBinOpMir for f64 register-register with MIR tag {}", .{mir_tag}), }, else => return self.fail("TODO genBinOpMir for float register-register and type {}", .{dst_ty.fmtDebug()}), }; _ = try self.addInst(.{ .tag = actual_tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = dst_reg.to128(), .reg2 = src_reg.to128(), }), .data = undefined, }); return; } return self.fail("TODO genBinOpMir for float register-register and no intrinsics", .{}); }, else => { _ = try self.addInst(.{ .tag = mir_tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(dst_reg, abi_size), .reg2 = registerAlias(src_reg, abi_size), }), .data = undefined, }); }, }, .immediate => |imm| { _ = try self.addInst(.{ .tag = mir_tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(dst_reg, abi_size) }), .data = .{ .imm = @truncate(u32, imm) }, }); }, .memory, .linker_load, .eflags, => { assert(abi_size <= 8); const dst_reg_lock = self.register_manager.lockReg(dst_reg); defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); const reg = try self.copyToTmpRegister(dst_ty, src_mcv); return self.genBinOpMir(mir_tag, dst_ty, dst_mcv, .{ .register = reg }); }, .stack_offset => |off| { if (off > math.maxInt(i32)) { return self.fail("stack offset too large", .{}); } _ = try self.addInst(.{ .tag = mir_tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(dst_reg, abi_size), .reg2 = .rbp, .flags = 0b01, }), .data = .{ .imm = @bitCast(u32, -off) }, }); }, } }, .ptr_stack_offset, .stack_offset => |off| { if (off > math.maxInt(i32)) { return self.fail("stack offset too large", .{}); } if (abi_size > 8) { return self.fail("TODO implement {} for stack dst with large ABI", .{mir_tag}); } switch (src_mcv) { .none => unreachable, .undef => unreachable, .dead, .unreach => unreachable, .register_overflow => unreachable, .register => |src_reg| { _ = try self.addInst(.{ .tag = mir_tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp, .reg2 = registerAlias(src_reg, abi_size), .flags = 0b10, }), .data = .{ .imm = @bitCast(u32, -off) }, }); }, .immediate => |imm| { const tag: Mir.Inst.Tag = switch (mir_tag) { .add => .add_mem_imm, .@"or" => .or_mem_imm, .@"and" => .and_mem_imm, .sub => .sub_mem_imm, .xor => .xor_mem_imm, .cmp => .cmp_mem_imm, else => unreachable, }; const flags: u2 = switch (abi_size) { 1 => 0b00, 2 => 0b01, 4 => 0b10, 8 => 0b11, else => unreachable, }; const payload = try self.addExtra(Mir.ImmPair{ .dest_off = @bitCast(u32, -off), .operand = @truncate(u32, imm), }); _ = try self.addInst(.{ .tag = tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp, .flags = flags, }), .data = .{ .payload = payload }, }); }, .memory, .stack_offset, .ptr_stack_offset, => { return self.fail("TODO implement x86 ADD/SUB/CMP source memory", .{}); }, .linker_load => { return self.fail("TODO implement x86 ADD/SUB/CMP source symbol at index in linker", .{}); }, .eflags => { return self.fail("TODO implement x86 ADD/SUB/CMP source eflags", .{}); }, } }, .memory => { return self.fail("TODO implement x86 ADD/SUB/CMP destination memory", .{}); }, .linker_load => { return self.fail("TODO implement x86 ADD/SUB/CMP destination symbol at index", .{}); }, } } /// Performs multi-operand integer multiplication between dst_mcv and src_mcv, storing the result in dst_mcv. /// Does not support byte-size operands. fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void { const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); switch (dst_mcv) { .none => unreachable, .undef => unreachable, .dead, .unreach, .immediate => unreachable, .eflags => unreachable, .ptr_stack_offset => unreachable, .register_overflow => unreachable, .register => |dst_reg| { switch (src_mcv) { .none => unreachable, .undef => try self.genSetReg(dst_ty, dst_reg, .undef), .dead, .unreach => unreachable, .ptr_stack_offset => unreachable, .register_overflow => unreachable, .register => |src_reg| { // register, register _ = try self.addInst(.{ .tag = .imul_complex, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(dst_reg, abi_size), .reg2 = registerAlias(src_reg, abi_size), }), .data = undefined, }); }, .immediate => |imm| { // TODO take into account the type's ABI size when selecting the register alias // register, immediate if (math.minInt(i32) <= imm and imm <= math.maxInt(i32)) { _ = try self.addInst(.{ .tag = .imul_complex, .ops = Mir.Inst.Ops.encode(.{ .reg1 = dst_reg.to32(), .reg2 = dst_reg.to32(), .flags = 0b10, }), .data = .{ .imm = @truncate(u32, imm) }, }); } else { // TODO verify we don't spill and assign to the same register as dst_mcv const src_reg = try self.copyToTmpRegister(dst_ty, src_mcv); return self.genIntMulComplexOpMir(dst_ty, dst_mcv, MCValue{ .register = src_reg }); } }, .stack_offset => |off| { _ = try self.addInst(.{ .tag = .imul_complex, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(dst_reg, abi_size), .reg2 = .rbp, .flags = 0b01, }), .data = .{ .imm = @bitCast(u32, -off) }, }); }, .memory => { return self.fail("TODO implement x86 multiply source memory", .{}); }, .linker_load => { return self.fail("TODO implement x86 multiply source symbol at index in linker", .{}); }, .eflags => { return self.fail("TODO implement x86 multiply source eflags", .{}); }, } }, .stack_offset => |off| { switch (src_mcv) { .none => unreachable, .undef => return self.genSetStack(dst_ty, off, .undef, .{}), .dead, .unreach => unreachable, .ptr_stack_offset => unreachable, .register_overflow => unreachable, .register => |src_reg| { // copy dst to a register const dst_reg = try self.copyToTmpRegister(dst_ty, dst_mcv); // multiply into dst_reg // register, register _ = try self.addInst(.{ .tag = .imul_complex, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(dst_reg, abi_size), .reg2 = registerAlias(src_reg, abi_size), }), .data = undefined, }); // copy dst_reg back out return self.genSetStack(dst_ty, off, .{ .register = dst_reg }, .{}); }, .immediate => { // copy dst to a register const dst_reg = try self.copyToTmpRegister(dst_ty, dst_mcv); const dst_reg_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_reg_lock); try self.genIntMulComplexOpMir(dst_ty, .{ .register = dst_reg }, src_mcv); return self.genSetStack(dst_ty, off, .{ .register = dst_reg }, .{}); }, .memory, .stack_offset => { return self.fail("TODO implement x86 multiply source memory", .{}); }, .linker_load => { return self.fail("TODO implement x86 multiply source symbol at index in linker", .{}); }, .eflags => { return self.fail("TODO implement x86 multiply source eflags", .{}); }, } }, .memory => { return self.fail("TODO implement x86 multiply destination memory", .{}); }, .linker_load => { return self.fail("TODO implement x86 multiply destination symbol at index in linker", .{}); }, } } fn airArg(self: *Self, inst: Air.Inst.Index) !void { const arg_index = self.arg_index; self.arg_index += 1; const ty = self.air.typeOfIndex(inst); const mcv = self.args[arg_index]; const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg_index); if (self.liveness.isUnused(inst)) return self.finishAirBookkeeping(); const dst_mcv: MCValue = switch (mcv) { .register => |reg| blk: { self.register_manager.getRegAssumeFree(reg.to64(), inst); break :blk MCValue{ .register = reg }; }, .stack_offset => |off| blk: { const offset = @intCast(i32, self.max_end_stack) - off + 16; break :blk MCValue{ .stack_offset = -offset }; }, else => return self.fail("TODO implement arg for {}", .{mcv}), }; try self.genArgDbgInfo(ty, name, dst_mcv); return self.finishAir(inst, dst_mcv, .{ .none, .none, .none }); } fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void { switch (self.debug_output) { .dwarf => |dw| { const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) { .register => |reg| .{ .register = reg.dwarfLocOp() }, .stack_offset => |off| .{ .stack = .{ // TODO handle -fomit-frame-pointer .fp_register = Register.rbp.dwarfLocOpDeref(), .offset = -off, }, }, else => unreachable, // not a valid function parameter }; try dw.genArgDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, loc); }, .plan9 => {}, .none => {}, } } fn genVarDbgInfo( self: Self, tag: Air.Inst.Tag, ty: Type, mcv: MCValue, name: [:0]const u8, ) !void { const is_ptr = switch (tag) { .dbg_var_ptr => true, .dbg_var_val => false, else => unreachable, }; switch (self.debug_output) { .dwarf => |dw| { const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) { .register => |reg| .{ .register = reg.dwarfLocOp() }, .ptr_stack_offset, .stack_offset, => |off| .{ .stack = .{ .fp_register = Register.rbp.dwarfLocOpDeref(), .offset = -off, } }, .memory => |address| .{ .memory = address }, .linker_load => |linker_load| .{ .linker_load = linker_load }, .immediate => |x| .{ .immediate = x }, .undef => .undef, .none => .none, else => blk: { log.debug("TODO generate debug info for {}", .{mcv}); break :blk .nop; }, }; try dw.genVarDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, is_ptr, loc); }, .plan9 => {}, .none => {}, } } fn airBreakpoint(self: *Self) !void { _ = try self.addInst(.{ .tag = .interrupt, .ops = Mir.Inst.Ops.encode(.{}), .data = undefined, }); return self.finishAirBookkeeping(); } fn airRetAddr(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airRetAddr for x86_64", .{}); return self.finishAir(inst, result, .{ .none, .none, .none }); } fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFrameAddress for x86_64", .{}); return self.finishAir(inst, result, .{ .none, .none, .none }); } fn airFence(self: *Self) !void { return self.fail("TODO implement fence() for {}", .{self.target.cpu.arch}); //return self.finishAirBookkeeping(); } fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { if (modifier == .always_tail) return self.fail("TODO implement tail calls for x86_64", .{}); const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const ty = self.air.typeOf(callee); const fn_ty = switch (ty.zigTypeTag()) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, }; var info = try self.resolveCallingConventionValues(fn_ty); defer info.deinit(self); try self.spillEflagsIfOccupied(); for (abi.getCallerPreservedRegs(self.target.*)) |reg| { try self.register_manager.getReg(reg, null); } const ret_reg_lock: ?RegisterLock = blk: { if (info.return_value == .stack_offset) { const ret_ty = fn_ty.fnReturnType(); const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*)); const stack_offset = @intCast(i32, try self.allocMem(inst, ret_abi_size, ret_abi_align)); log.debug("airCall: return value on stack at offset {}", .{stack_offset}); const ret_reg = abi.getCAbiIntParamRegs(self.target.*)[0]; try self.register_manager.getReg(ret_reg, null); try self.genSetReg(Type.usize, ret_reg, .{ .ptr_stack_offset = stack_offset }); const ret_reg_lock = self.register_manager.lockRegAssumeUnused(ret_reg); info.return_value.stack_offset = stack_offset; break :blk ret_reg_lock; } break :blk null; }; defer if (ret_reg_lock) |lock| self.register_manager.unlockReg(lock); for (args) |arg, arg_i| { const mc_arg = info.args[arg_i]; const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); // Here we do not use setRegOrMem even though the logic is similar, because // the function call will move the stack pointer, so the offsets are different. switch (mc_arg) { .none => continue, .register => |reg| { try self.register_manager.getReg(reg, null); try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => |off| { // TODO rewrite using `genSetStack` try self.genSetStackArg(arg_ty, off, arg_mcv); }, .ptr_stack_offset => { return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); }, .undef => unreachable, .immediate => unreachable, .unreach => unreachable, .dead => unreachable, .memory => unreachable, .linker_load => unreachable, .eflags => unreachable, .register_overflow => unreachable, } } if (info.stack_byte_count > 0) { // Adjust the stack _ = try self.addInst(.{ .tag = .sub, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }), .data = .{ .imm = info.stack_byte_count }, }); } // Due to incremental compilation, how function calls are generated depends // on linking. const mod = self.bin_file.options.module.?; if (self.bin_file.cast(link.File.Elf)) |elf_file| { if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); const fn_owner_decl = mod.declPtr(func.owner_decl); const got_addr = blk: { const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes); }; _ = try self.addInst(.{ .tag = .call, .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }), .data = .{ .imm = @truncate(u32, got_addr) }, }); } else if (func_value.castTag(.extern_fn)) |_| { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); } } else { assert(ty.zigTypeTag() == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(Type.initTag(.usize), .rax, mcv); _ = try self.addInst(.{ .tag = .call, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rax, .flags = 0b01, }), .data = undefined, }); } } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const fn_owner_decl = mod.declPtr(func.owner_decl); try self.genSetReg(Type.initTag(.usize), .rax, .{ .linker_load = .{ .type = .got, .sym_index = fn_owner_decl.link.coff.sym_index, }, }); _ = try self.addInst(.{ .tag = .call, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rax, .flags = 0b01, }), .data = undefined, }); } else if (func_value.castTag(.extern_fn)) |func_payload| { const extern_fn = func_payload.data; const decl_name = mod.declPtr(extern_fn.owner_decl).name; if (extern_fn.lib_name) |lib_name| { log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{ decl_name, lib_name, }); } const sym_index = try coff_file.getGlobalSymbol(mem.sliceTo(decl_name, 0)); try self.genSetReg(Type.initTag(.usize), .rax, .{ .linker_load = .{ .type = .import, .sym_index = sym_index, }, }); _ = try self.addInst(.{ .tag = .call, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rax, .flags = 0b01, }), .data = undefined, }); } else { return self.fail("TODO implement calling bitcasted functions", .{}); } } else { assert(ty.zigTypeTag() == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(Type.initTag(.usize), .rax, mcv); _ = try self.addInst(.{ .tag = .call, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rax, .flags = 0b01, }), .data = undefined, }); } } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const fn_owner_decl = mod.declPtr(func.owner_decl); const sym_index = fn_owner_decl.link.macho.sym_index; try self.genSetReg(Type.initTag(.usize), .rax, .{ .linker_load = .{ .type = .got, .sym_index = sym_index, }, }); // callq *%rax _ = try self.addInst(.{ .tag = .call, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rax, .flags = 0b01, }), .data = undefined, }); } else if (func_value.castTag(.extern_fn)) |func_payload| { const extern_fn = func_payload.data; const decl_name = mod.declPtr(extern_fn.owner_decl).name; if (extern_fn.lib_name) |lib_name| { log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{ decl_name, lib_name, }); } const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0)); _ = try self.addInst(.{ .tag = .call_extern, .ops = undefined, .data = .{ .relocation = .{ .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.sym_index, .sym_index = sym_index, }, }, }); } else { return self.fail("TODO implement calling bitcasted functions", .{}); } } else { assert(ty.zigTypeTag() == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(Type.initTag(.usize), .rax, mcv); _ = try self.addInst(.{ .tag = .call, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rax, .flags = 0b01, }), .data = undefined, }); } } else if (self.bin_file.cast(link.File.Plan9)) |p9| { if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { try p9.seeDecl(func_payload.data.owner_decl); const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); const got_addr = p9.bases.data; const got_index = mod.declPtr(func_payload.data.owner_decl).link.plan9.got_index.?; const fn_got_addr = got_addr + got_index * ptr_bytes; _ = try self.addInst(.{ .tag = .call, .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }), .data = .{ .imm = @intCast(u32, fn_got_addr) }, }); } else return self.fail("TODO implement calling extern fn on plan9", .{}); } else { assert(ty.zigTypeTag() == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(Type.initTag(.usize), .rax, mcv); _ = try self.addInst(.{ .tag = .call, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rax, .flags = 0b01, }), .data = undefined, }); } } else unreachable; if (info.stack_byte_count > 0) { // Readjust the stack _ = try self.addInst(.{ .tag = .add, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }), .data = .{ .imm = info.stack_byte_count }, }); } const result: MCValue = result: { switch (info.return_value) { .register => { // Save function return value in a new register break :result try self.copyToRegisterWithInstTracking( inst, self.air.typeOfIndex(inst), info.return_value, ); }, else => {}, } break :result info.return_value; }; if (args.len <= Liveness.bpi - 2) { var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); buf[0] = callee; std.mem.copy(Air.Inst.Ref, buf[1..], args); return self.finishAir(inst, result, buf); } var bt = try self.iterateBigTomb(inst, 1 + args.len); bt.feed(callee); for (args) |arg| { bt.feed(arg); } return bt.finishAir(result); } fn airRet(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const ret_ty = self.fn_type.fnReturnType(); switch (self.ret_mcv) { .immediate => { assert(ret_ty.isError()); }, .stack_offset => { const reg = try self.copyToTmpRegister(Type.usize, self.ret_mcv); const reg_lock = self.register_manager.lockRegAssumeUnused(reg); defer self.register_manager.unlockReg(reg_lock); try self.genSetStack(ret_ty, 0, operand, .{ .source_stack_base = .rbp, .dest_stack_base = reg, }); }, else => { try self.setRegOrMem(ret_ty, self.ret_mcv, operand); }, } // TODO when implementing defer, this will need to jump to the appropriate defer expression. // TODO optimization opportunity: figure out when we can emit this as a 2 byte instruction // which is available if the jump is 127 bytes or less forward. const jmp_reloc = try self.addInst(.{ .tag = .jmp, .ops = Mir.Inst.Ops.encode(.{}), .data = .{ .inst = undefined }, }); try self.exitlude_jump_relocs.append(self.gpa, jmp_reloc); return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); const ptr_ty = self.air.typeOf(un_op); const elem_ty = ptr_ty.elemType(); switch (self.ret_mcv) { .immediate => { assert(elem_ty.isError()); }, .stack_offset => { const reg = try self.copyToTmpRegister(Type.usize, self.ret_mcv); const reg_lock = self.register_manager.lockRegAssumeUnused(reg); defer self.register_manager.unlockReg(reg_lock); try self.genInlineMemcpy(.{ .stack_offset = 0 }, ptr, .{ .immediate = elem_ty.abiSize(self.target.*) }, .{ .source_stack_base = .rbp, .dest_stack_base = reg, }); }, else => { try self.load(self.ret_mcv, ptr, ptr_ty); try self.setRegOrMem(elem_ty, self.ret_mcv, self.ret_mcv); }, } // TODO when implementing defer, this will need to jump to the appropriate defer expression. // TODO optimization opportunity: figure out when we can emit this as a 2 byte instruction // which is available if the jump is 127 bytes or less forward. const jmp_reloc = try self.addInst(.{ .tag = .jmp, .ops = Mir.Inst.Ops.encode(.{}), .data = .{ .inst = undefined }, }); try self.exitlude_jump_relocs.append(self.gpa, jmp_reloc); return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } const ty = self.air.typeOf(bin_op.lhs); const signedness: std.builtin.Signedness = blk: { // For non-int types, we treat the values as unsigned if (ty.zigTypeTag() != .Int) break :blk .unsigned; // Otherwise, we take the signedness of the actual int break :blk ty.intInfo(self.target.*).signedness; }; try self.spillEflagsIfOccupied(); self.eflags_inst = inst; const result: MCValue = result: { // There are 2 operands, destination and source. // Either one, but not both, can be a memory operand. // Source operand can be an immediate, 8 bits or 32 bits. // TODO look into reusing the operand const lhs = try self.resolveInst(bin_op.lhs); const lhs_lock: ?RegisterLock = switch (lhs) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); const dst_reg = try self.copyToTmpRegister(ty, lhs); const dst_reg_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_reg_lock); const dst_mcv = MCValue{ .register = dst_reg }; const rhs_ty = self.air.typeOf(bin_op.rhs); // This instruction supports only signed 32-bit immediates at most. const src_mcv: MCValue = blk: { switch (rhs_ty.zigTypeTag()) { .Float => { const rhs = try self.resolveInst(bin_op.rhs); const rhs_lock: ?RegisterLock = switch (rhs) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); const src_reg = try self.copyToTmpRegister(rhs_ty, rhs); break :blk MCValue{ .register = src_reg }; }, else => break :blk try self.limitImmediateType(bin_op.rhs, i32), } }; const src_lock: ?RegisterLock = switch (src_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, }; defer if (src_lock) |lock| self.register_manager.unlockReg(lock); try self.genBinOpMir(.cmp, ty, dst_mcv, src_mcv); break :result switch (signedness) { .signed => MCValue{ .eflags = Condition.fromCompareOperatorSigned(op) }, .unsigned => MCValue{ .eflags = Condition.fromCompareOperatorUnsigned(op) }, }; }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void { _ = inst; return self.fail("TODO implement airCmpVector for {}", .{self.target.cpu.arch}); } fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); _ = operand; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airCmpLtErrorsLen for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airTry(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const err_union_ty = self.air.typeOf(pl_op.operand); const err_union = try self.resolveInst(pl_op.operand); const result = try self.genTry(inst, err_union, body, err_union_ty, false); return self.finishAir(inst, result, .{ pl_op.operand, .none, .none }); } fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const err_union_ty = self.air.typeOf(extra.data.ptr).childType(); const err_union_ptr = try self.resolveInst(extra.data.ptr); const result = try self.genTry(inst, err_union_ptr, body, err_union_ty, true); return self.finishAir(inst, result, .{ extra.data.ptr, .none, .none }); } fn genTry( self: *Self, inst: Air.Inst.Index, err_union: MCValue, body: []const Air.Inst.Index, err_union_ty: Type, operand_is_ptr: bool, ) !MCValue { if (operand_is_ptr) { return self.fail("TODO genTry for pointers", .{}); } const is_err_mcv = try self.isErr(null, err_union_ty, err_union); const reloc = try self.genCondBrMir(Type.anyerror, is_err_mcv); try self.genBody(body); try self.performReloc(reloc); const result = try self.genUnwrapErrorUnionPayloadMir(inst, err_union_ty, err_union); return result; } fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; const payload = try self.addExtra(Mir.DbgLineColumn{ .line = dbg_stmt.line, .column = dbg_stmt.column, }); _ = try self.addInst(.{ .tag = .dbg_line, .ops = undefined, .data = .{ .payload = payload }, }); return self.finishAirBookkeeping(); } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const function = self.air.values[ty_pl.payload].castTag(.function).?.data; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); } fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { // TODO emit debug info lexical block return self.finishAir(inst, .dead, .{ .none, .none, .none }); } fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = pl_op.operand; const ty = self.air.typeOf(operand); const mcv = try self.resolveInst(operand); log.debug("airDbgVar: %{d}: {}, {}", .{ inst, ty.fmtDebug(), mcv }); const name = self.air.nullTerminatedString(pl_op.payload); const tag = self.air.instructions.items(.tag)[inst]; try self.genVarDbgInfo(tag, ty, mcv, name); return self.finishAir(inst, .dead, .{ operand, .none, .none }); } fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 { const abi_size = ty.abiSize(self.target.*); switch (mcv) { .eflags => |cc| { return self.addInst(.{ .tag = .cond_jmp, .ops = Mir.Inst.Ops.encode(.{}), .data = .{ .inst_cc = .{ .inst = undefined, // Here we map the opposites since the jump is to the false branch. .cc = cc.negate(), }, }, }); }, .register => |reg| { try self.spillEflagsIfOccupied(); _ = try self.addInst(.{ .tag = .@"test", .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }), .data = .{ .imm = 1 }, }); return self.addInst(.{ .tag = .cond_jmp, .ops = Mir.Inst.Ops.encode(.{}), .data = .{ .inst_cc = .{ .inst = undefined, .cc = .e, } }, }); }, .immediate, .stack_offset, => { try self.spillEflagsIfOccupied(); if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, mcv); return self.genCondBrMir(ty, .{ .register = reg }); } return self.fail("TODO implement condbr when condition is {} with abi larger than 8 bytes", .{mcv}); }, else => return self.fail("TODO implement condbr when condition is {s}", .{@tagName(mcv)}), } } fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const cond_ty = self.air.typeOf(pl_op.operand); const extra = self.air.extraData(Air.CondBr, pl_op.payload); const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const liveness_condbr = self.liveness.getCondBr(inst); const reloc = try self.genCondBrMir(cond_ty, cond); // If the condition dies here in this condbr instruction, process // that death now instead of later as this has an effect on // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); if (op_int >= Air.Inst.Ref.typed_value_map.len) { const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); self.processDeath(op_index); } } // Capture the state of register and stack allocation state so that we can revert to it. const saved_state = try self.captureState(); try self.branch_stack.append(.{}); errdefer { _ = self.branch_stack.pop(); } try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len); for (liveness_condbr.then_deaths) |operand| { self.processDeath(operand); } try self.genBody(then_body); // Revert to the previous register and stack allocation state. var then_branch = self.branch_stack.pop(); defer then_branch.deinit(self.gpa); self.revertState(saved_state); try self.performReloc(reloc); try self.branch_stack.append(.{}); errdefer { _ = self.branch_stack.pop(); } try self.ensureProcessDeathCapacity(liveness_condbr.else_deaths.len); for (liveness_condbr.else_deaths) |operand| { self.processDeath(operand); } try self.genBody(else_body); var else_branch = self.branch_stack.pop(); defer else_branch.deinit(self.gpa); // At this point, each branch will possibly have conflicting values for where // each instruction is stored. They agree, however, on which instructions are alive/dead. // We use the first ("then") branch as canonical, and here emit // instructions into the second ("else") branch to make it conform. // We continue respect the data structure semantic guarantees of the else_branch so // that we can use all the code emitting abstractions. This is why at the bottom we // assert that parent_branch.free_registers equals the saved_then_branch.free_registers // rather than assigning it. log.debug("airCondBr: %{d}", .{inst}); log.debug("Upper branches:", .{}); for (self.branch_stack.items) |bs| { log.debug("{}", .{bs.fmtDebug()}); } log.debug("Then branch: {}", .{then_branch.fmtDebug()}); log.debug("Else branch: {}", .{else_branch.fmtDebug()}); const parent_branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try self.canonicaliseBranches(parent_branch, &then_branch, &else_branch); // We already took care of pl_op.operand earlier, so we're going // to pass .none here return self.finishAir(inst, .unreach, .{ .none, .none, .none }); } fn isNull(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValue { try self.spillEflagsIfOccupied(); self.eflags_inst = inst; const cmp_ty: Type = if (!ty.isPtrLikeOptional()) blk: { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime()) Type.bool else ty; } else ty; try self.genBinOpMir(.cmp, cmp_ty, operand, MCValue{ .immediate = 0 }); return MCValue{ .eflags = .e }; } fn isNonNull(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValue { const is_null_res = try self.isNull(inst, ty, operand); assert(is_null_res.eflags == .e); return MCValue{ .eflags = is_null_res.eflags.negate() }; } fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !MCValue { const err_type = ty.errorUnionSet(); if (err_type.errorSetIsEmpty()) { return MCValue{ .immediate = 0 }; // always false } try self.spillEflagsIfOccupied(); if (maybe_inst) |inst| { self.eflags_inst = inst; } const err_off = errUnionErrorOffset(ty.errorUnionPayload(), self.target.*); switch (operand) { .stack_offset => |off| { const offset = off - @intCast(i32, err_off); try self.genBinOpMir(.cmp, Type.anyerror, .{ .stack_offset = offset }, .{ .immediate = 0 }); }, .register => |reg| { const maybe_lock = self.register_manager.lockReg(reg); defer if (maybe_lock) |lock| self.register_manager.unlockReg(lock); const tmp_reg = try self.copyToTmpRegister(ty, operand); if (err_off > 0) { const shift = @intCast(u6, err_off * 8); try self.genShiftBinOpMir(.shr, ty, tmp_reg, .{ .immediate = shift }); } else { try self.truncateRegister(Type.anyerror, tmp_reg); } try self.genBinOpMir(.cmp, Type.anyerror, .{ .register = tmp_reg }, .{ .immediate = 0 }); }, else => return self.fail("TODO implement isErr for {}", .{operand}), } return MCValue{ .eflags = .a }; } fn isNonErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValue { const is_err_res = try self.isErr(inst, ty, operand); switch (is_err_res) { .eflags => |cc| { assert(cc == .a); return MCValue{ .eflags = cc.negate() }; }, .immediate => |imm| { assert(imm == 0); return MCValue{ .immediate = 1 }; }, else => unreachable, } } fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); const ty = self.air.typeOf(un_op); break :result try self.isNull(inst, ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } const operand_ptr = try self.resolveInst(un_op); const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (operand_ptr_lock) |lock| self.register_manager.unlockReg(lock); const operand: MCValue = blk: { if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk operand_ptr; } else { break :blk try self.allocRegOrMem(inst, true); } }; const ptr_ty = self.air.typeOf(un_op); try self.load(operand, operand_ptr, ptr_ty); const result = try self.isNull(inst, ptr_ty.elemType(), operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); const ty = self.air.typeOf(un_op); break :result try self.isNonNull(inst, ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } const operand_ptr = try self.resolveInst(un_op); const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (operand_ptr_lock) |lock| self.register_manager.unlockReg(lock); const operand: MCValue = blk: { if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk operand_ptr; } else { break :blk try self.allocRegOrMem(inst, true); } }; const ptr_ty = self.air.typeOf(un_op); try self.load(operand, operand_ptr, ptr_ty); const result = try self.isNonNull(inst, ptr_ty.elemType(), operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); const ty = self.air.typeOf(un_op); break :result try self.isErr(inst, ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } const operand_ptr = try self.resolveInst(un_op); const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (operand_ptr_lock) |lock| self.register_manager.unlockReg(lock); const operand: MCValue = blk: { if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk operand_ptr; } else { break :blk try self.allocRegOrMem(inst, true); } }; const ptr_ty = self.air.typeOf(un_op); try self.load(operand, operand_ptr, ptr_ty); const result = try self.isErr(inst, ptr_ty.elemType(), operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); const ty = self.air.typeOf(un_op); break :result try self.isNonErr(inst, ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } const operand_ptr = try self.resolveInst(un_op); const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (operand_ptr_lock) |lock| self.register_manager.unlockReg(lock); const operand: MCValue = blk: { if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk operand_ptr; } else { break :blk try self.allocRegOrMem(inst, true); } }; const ptr_ty = self.air.typeOf(un_op); try self.load(operand, operand_ptr, ptr_ty); const result = try self.isNonErr(inst, ptr_ty.elemType(), operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airLoop(self: *Self, inst: Air.Inst.Index) !void { // A loop is a setup to be able to jump back to the beginning. const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; const jmp_target = @intCast(u32, self.mir_instructions.len); try self.genBody(body); _ = try self.addInst(.{ .tag = .jmp, .ops = Mir.Inst.Ops.encode(.{}), .data = .{ .inst = jmp_target }, }); return self.finishAirBookkeeping(); } fn airBlock(self: *Self, inst: Air.Inst.Index) !void { try self.blocks.putNoClobber(self.gpa, inst, .{ // A block is a setup to be able to jump to the end. .relocs = .{}, // It also acts as a receptacle for break operands. // Here we use `MCValue.none` to represent a null value so that the first // break instruction will choose a MCValue for the block result and overwrite // this field. Following break instructions will use that MCValue to put their // block results. .mcv = MCValue{ .none = {} }, }); defer self.blocks.getPtr(inst).?.relocs.deinit(self.gpa); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; try self.genBody(body); for (self.blocks.getPtr(inst).?.relocs.items) |reloc| try self.performReloc(reloc); const result = self.blocks.getPtr(inst).?.mcv; return self.finishAir(inst, result, .{ .none, .none, .none }); } fn genCondSwitchMir(self: *Self, ty: Type, condition: MCValue, case: MCValue) !u32 { const abi_size = @intCast(u32, ty.abiSize(self.target.*)); switch (condition) { .none => unreachable, .undef => unreachable, .dead, .unreach => unreachable, .eflags => unreachable, .register => |cond_reg| { try self.spillEflagsIfOccupied(); const cond_reg_lock = self.register_manager.lockReg(cond_reg); defer if (cond_reg_lock) |lock| self.register_manager.unlockReg(lock); switch (case) { .none => unreachable, .undef => unreachable, .dead, .unreach => unreachable, .immediate => |imm| { _ = try self.addInst(.{ .tag = .xor, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(cond_reg, abi_size) }), .data = .{ .imm = @intCast(u32, imm) }, }); }, .register => |reg| { _ = try self.addInst(.{ .tag = .xor, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(cond_reg, abi_size), .reg2 = registerAlias(reg, abi_size), }), .data = undefined, }); }, .stack_offset => { if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, case); return self.genCondSwitchMir(ty, condition, .{ .register = reg }); } return self.fail("TODO implement switch mir when case is stack offset with abi larger than 8 bytes", .{}); }, else => { return self.fail("TODO implement switch mir when case is {}", .{case}); }, } _ = try self.addInst(.{ .tag = .@"test", .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(cond_reg, abi_size), .reg2 = registerAlias(cond_reg, abi_size), }), .data = undefined, }); return self.addInst(.{ .tag = .cond_jmp, .ops = Mir.Inst.Ops.encode(.{}), .data = .{ .inst_cc = .{ .inst = undefined, .cc = .ne, } }, }); }, .stack_offset => { try self.spillEflagsIfOccupied(); if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, condition); const reg_lock = self.register_manager.lockRegAssumeUnused(reg); defer self.register_manager.unlockReg(reg_lock); return self.genCondSwitchMir(ty, .{ .register = reg }, case); } return self.fail("TODO implement switch mir when condition is stack offset with abi larger than 8 bytes", .{}); }, else => { return self.fail("TODO implemenent switch mir when condition is {}", .{condition}); }, } } fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const condition = try self.resolveInst(pl_op.operand); const condition_ty = self.air.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); var extra_index: usize = switch_br.end; var case_i: u32 = 0; const liveness = try self.liveness.getSwitchBr( self.gpa, inst, switch_br.data.cases_len + 1, ); defer self.gpa.free(liveness.deaths); // If the condition dies here in this switch instruction, process // that death now instead of later as this has an effect on // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); if (op_int >= Air.Inst.Ref.typed_value_map.len) { const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); self.processDeath(op_index); } } var branch_stack = std.ArrayList(Branch).init(self.gpa); defer { for (branch_stack.items) |*bs| { bs.deinit(self.gpa); } branch_stack.deinit(); } try branch_stack.ensureTotalCapacityPrecise(switch_br.data.cases_len + 1); while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + items.len + case_body.len; var relocs = try self.gpa.alloc(u32, items.len); defer self.gpa.free(relocs); for (items) |item, item_i| { const item_mcv = try self.resolveInst(item); relocs[item_i] = try self.genCondSwitchMir(condition_ty, condition, item_mcv); } // Capture the state of register and stack allocation state so that we can revert to it. const saved_state = try self.captureState(); try self.branch_stack.append(.{}); errdefer { _ = self.branch_stack.pop(); } try self.ensureProcessDeathCapacity(liveness.deaths[case_i].len); for (liveness.deaths[case_i]) |operand| { self.processDeath(operand); } try self.genBody(case_body); branch_stack.appendAssumeCapacity(self.branch_stack.pop()); // Revert to the previous register and stack allocation state. self.revertState(saved_state); for (relocs) |reloc| { try self.performReloc(reloc); } } if (switch_br.data.else_body_len > 0) { const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len]; // Capture the state of register and stack allocation state so that we can revert to it. const saved_state = try self.captureState(); try self.branch_stack.append(.{}); errdefer { _ = self.branch_stack.pop(); } const else_deaths = liveness.deaths.len - 1; try self.ensureProcessDeathCapacity(liveness.deaths[else_deaths].len); for (liveness.deaths[else_deaths]) |operand| { self.processDeath(operand); } try self.genBody(else_body); branch_stack.appendAssumeCapacity(self.branch_stack.pop()); // Revert to the previous register and stack allocation state. self.revertState(saved_state); } // Consolidate returned MCValues between prongs and else branch like we do // in airCondBr. log.debug("airSwitch: %{d}", .{inst}); log.debug("Upper branches:", .{}); for (self.branch_stack.items) |bs| { log.debug("{}", .{bs.fmtDebug()}); } for (branch_stack.items) |bs, i| { log.debug("Case-{d} branch: {}", .{ i, bs.fmtDebug() }); } // TODO: can we reduce the complexity of this algorithm? const parent_branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; var i: usize = branch_stack.items.len; while (i > 1) : (i -= 1) { const canon_branch = &branch_stack.items[i - 2]; const target_branch = &branch_stack.items[i - 1]; try self.canonicaliseBranches(parent_branch, canon_branch, target_branch); } // We already took care of pl_op.operand earlier, so we're going // to pass .none here return self.finishAir(inst, .unreach, .{ .none, .none, .none }); } fn canonicaliseBranches(self: *Self, parent_branch: *Branch, canon_branch: *Branch, target_branch: *Branch) !void { try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, target_branch.inst_table.count()); const target_slice = target_branch.inst_table.entries.slice(); const target_keys = target_slice.items(.key); const target_values = target_slice.items(.value); for (target_keys) |target_key, target_idx| { const target_value = target_values[target_idx]; const canon_mcv = if (canon_branch.inst_table.fetchSwapRemove(target_key)) |canon_entry| blk: { // The instruction's MCValue is overridden in both branches. parent_branch.inst_table.putAssumeCapacity(target_key, canon_entry.value); if (target_value == .dead) { assert(canon_entry.value == .dead); continue; } break :blk canon_entry.value; } else blk: { if (target_value == .dead) continue; // The instruction is only overridden in the else branch. var i: usize = self.branch_stack.items.len - 1; while (true) { i -= 1; // If this overflows, the question is: why wasn't the instruction marked dead? if (self.branch_stack.items[i].inst_table.get(target_key)) |mcv| { assert(mcv != .dead); break :blk mcv; } } }; log.debug("consolidating target_entry {d} {}=>{}", .{ target_key, target_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. try self.setRegOrMem(self.air.typeOfIndex(target_key), canon_mcv, target_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, canon_branch.inst_table.count()); const canon_slice = canon_branch.inst_table.entries.slice(); const canon_keys = canon_slice.items(.key); const canon_values = canon_slice.items(.value); for (canon_keys) |canon_key, canon_idx| { const canon_value = canon_values[canon_idx]; // We already deleted the items from this table that matched the target_branch. // So these are all instructions that are only overridden in the canon branch. parent_branch.inst_table.putAssumeCapacity(canon_key, canon_value); log.debug("canon_value = {}", .{canon_value}); if (canon_value == .dead) continue; const parent_mcv = blk: { var i: usize = self.branch_stack.items.len - 1; while (true) { i -= 1; if (self.branch_stack.items[i].inst_table.get(canon_key)) |mcv| { assert(mcv != .dead); break :blk mcv; } } }; log.debug("consolidating canon_entry {d} {}=>{}", .{ canon_key, parent_mcv, canon_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. try self.setRegOrMem(self.air.typeOfIndex(canon_key), parent_mcv, canon_value); // TODO track the new register / stack allocation } } fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void { const next_inst = @intCast(u32, self.mir_instructions.len); switch (self.mir_instructions.items(.tag)[reloc]) { .cond_jmp => { self.mir_instructions.items(.data)[reloc].inst_cc.inst = next_inst; }, .jmp => { self.mir_instructions.items(.data)[reloc].inst = next_inst; }, else => unreachable, } } fn airBr(self: *Self, inst: Air.Inst.Index) !void { const branch = self.air.instructions.items(.data)[inst].br; try self.br(branch.block_inst, branch.operand); return self.finishAir(inst, .dead, .{ branch.operand, .none, .none }); } fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; if (self.air.typeOf(operand).hasRuntimeBits()) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { block_data.mcv = switch (operand_mcv) { .none, .dead, .unreach => unreachable, .register, .stack_offset, .memory => operand_mcv, .eflags, .immediate, .ptr_stack_offset => blk: { const new_mcv = try self.allocRegOrMem(block, true); try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), }; } else { try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); } fn brVoid(self: *Self, block: Air.Inst.Index) !void { const block_data = self.blocks.getPtr(block).?; // Emit a jump with a relocation. It will be patched up after the block ends. try block_data.relocs.ensureUnusedCapacity(self.gpa, 1); // Leave the jump offset undefined const jmp_reloc = try self.addInst(.{ .tag = .jmp, .ops = Mir.Inst.Ops.encode(.{}), .data = .{ .inst = undefined }, }); block_data.relocs.appendAssumeCapacity(jmp_reloc); } fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; const clobbers_len = @truncate(u31, extra.data.flags); var extra_i: usize = extra.end; const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); extra_i += outputs.len; const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); extra_i += inputs.len; const dead = !is_volatile and self.liveness.isUnused(inst); const result: MCValue = if (dead) .dead else result: { if (outputs.len > 1) { return self.fail("TODO implement codegen for asm with more than 1 output", .{}); } const output_constraint: ?[]const u8 = for (outputs) |output| { if (output != .none) { return self.fail("TODO implement codegen for non-expr asm", .{}); } const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]); const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0); // This equation accounts for the fact that even if we have exactly 4 bytes // for the string, we still use the next u32 for the null terminator. extra_i += (constraint.len + name.len + (2 + 3)) / 4; break constraint; } else null; for (inputs) |input| { const input_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]); const constraint = std.mem.sliceTo(input_bytes, 0); const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0); // This equation accounts for the fact that even if we have exactly 4 bytes // for the string, we still use the next u32 for the null terminator. extra_i += (constraint.len + name.len + (2 + 3)) / 4; if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); } const reg_name = constraint[1 .. constraint.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); } { var clobber_i: u32 = 0; while (clobber_i < clobbers_len) : (clobber_i += 1) { const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); // This equation accounts for the fact that even if we have exactly 4 bytes // for the string, we still use the next u32 for the null terminator. extra_i += clobber.len / 4 + 1; // TODO honor these } } const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len]; { var iter = std.mem.tokenize(u8, asm_source, "\n\r"); while (iter.next()) |ins| { if (mem.eql(u8, ins, "syscall")) { _ = try self.addInst(.{ .tag = .syscall, .ops = undefined, .data = undefined, }); } else if (mem.indexOf(u8, ins, "push")) |_| { const arg = ins[4..]; if (mem.indexOf(u8, arg, "$")) |l| { const n = std.fmt.parseInt(u8, ins[4 + l + 1 ..], 10) catch { return self.fail("TODO implement more inline asm int parsing", .{}); }; _ = try self.addInst(.{ .tag = .push, .ops = Mir.Inst.Ops.encode(.{ .flags = 0b10 }), .data = .{ .imm = n }, }); } else if (mem.indexOf(u8, arg, "%%")) |l| { const reg_name = ins[4 + l + 2 ..]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); _ = try self.addInst(.{ .tag = .push, .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }), .data = undefined, }); } else return self.fail("TODO more push operands", .{}); } else if (mem.indexOf(u8, ins, "pop")) |_| { const arg = ins[3..]; if (mem.indexOf(u8, arg, "%%")) |l| { const reg_name = ins[3 + l + 2 ..]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); _ = try self.addInst(.{ .tag = .pop, .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }), .data = undefined, }); } else return self.fail("TODO more pop operands", .{}); } else { return self.fail("TODO implement support for more x86 assembly instructions", .{}); } } } if (output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); break :result MCValue{ .register = reg }; } else { break :result MCValue{ .none = {} }; } }; simple: { var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); var buf_index: usize = 0; for (outputs) |output| { if (output == .none) continue; if (buf_index >= buf.len) break :simple; buf[buf_index] = output; buf_index += 1; } if (buf_index + inputs.len > buf.len) break :simple; std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs); return self.finishAir(inst, result, buf); } var bt = try self.iterateBigTomb(inst, outputs.len + inputs.len); for (outputs) |output| { if (output == .none) continue; bt.feed(output); } for (inputs) |input| { bt.feed(input); } return bt.finishAir(result); } fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigTomb { try self.ensureProcessDeathCapacity(operand_count + 1); return BigTomb{ .function = self, .inst = inst, .lbt = self.liveness.iterateBigTomb(inst), }; } /// Sets the value without any modifications to register allocation metadata or stack allocation metadata. fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { switch (loc) { .none => return, .immediate => unreachable, .register => |reg| return self.genSetReg(ty, reg, val), .stack_offset => |off| return self.genSetStack(ty, off, val, .{}), .memory => { return self.fail("TODO implement setRegOrMem for memory", .{}); }, else => { return self.fail("TODO implement setRegOrMem for {}", .{loc}); }, } } fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerError!void { const abi_size = ty.abiSize(self.target.*); switch (mcv) { .dead => unreachable, .unreach, .none => return, .undef => { if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg }); } try self.genInlineMemset( .{ .stack_offset = stack_offset }, .{ .immediate = 0xaa }, .{ .immediate = abi_size }, .{ .dest_stack_base = .rsp }, ); }, .register_overflow => return self.fail("TODO genSetStackArg for register with overflow bit", .{}), .eflags => { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArg(ty, stack_offset, .{ .register = reg }); }, .immediate => |imm| { switch (abi_size) { 1, 2, 4 => { // We have a positive stack offset value but we want a twos complement negative // offset from rbp, which is at the top of the stack frame. // mov [rbp+offset], immediate const payload = try self.addExtra(Mir.ImmPair{ .dest_off = @bitCast(u32, -stack_offset), .operand = @truncate(u32, imm), }); _ = try self.addInst(.{ .tag = .mov_mem_imm, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp, .flags = switch (abi_size) { 1 => 0b00, 2 => 0b01, 4 => 0b10, else => unreachable, }, }), .data = .{ .payload = payload }, }); }, 8 => { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg }); }, else => return self.fail("TODO implement inputs on stack for {} with abi size > 8", .{mcv}), } }, .memory, .linker_load => { if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg }); } try self.genInlineMemcpy(.{ .stack_offset = stack_offset }, mcv, .{ .immediate = abi_size }, .{ .source_stack_base = .rbp, .dest_stack_base = .rsp, }); }, .register => |reg| { switch (ty.zigTypeTag()) { .Float => { if (intrinsicsAllowed(self.target.*, ty)) { const tag: Mir.Inst.Tag = switch (ty.tag()) { .f32 => if (hasAvxSupport(self.target.*)) Mir.Inst.Tag.mov_f32_avx else Mir.Inst.Tag.mov_f32_sse, .f64 => if (hasAvxSupport(self.target.*)) Mir.Inst.Tag.mov_f64_avx else Mir.Inst.Tag.mov_f64_sse, else => return self.fail("TODO genSetStackArg for register for type {}", .{ty.fmtDebug()}), }; _ = try self.addInst(.{ .tag = tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = switch (ty.tag()) { .f32 => .esp, .f64 => .rsp, else => unreachable, }, .reg2 = reg.to128(), .flags = 0b01, }), .data = .{ .imm = @bitCast(u32, -stack_offset) }, }); return; } return self.fail("TODO genSetStackArg for register with no intrinsics", .{}); }, else => { _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp, .reg2 = registerAlias(reg, @intCast(u32, abi_size)), .flags = 0b10, }), .data = .{ .imm = @bitCast(u32, -stack_offset) }, }); }, } }, .ptr_stack_offset => { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg }); }, .stack_offset => { if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg }); } try self.genInlineMemcpy(.{ .stack_offset = stack_offset }, mcv, .{ .immediate = abi_size }, .{ .source_stack_base = .rbp, .dest_stack_base = .rsp, }); }, } } fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: InlineMemcpyOpts) InnerError!void { const abi_size = ty.abiSize(self.target.*); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. .undef => { if (!self.wantSafety()) return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. switch (ty.abiSize(self.target.*)) { 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }, opts), 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }, opts), 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }, opts), 8 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }, opts), else => |x| return self.genInlineMemset( .{ .stack_offset = stack_offset }, .{ .immediate = 0xaa }, .{ .immediate = x }, opts, ), } }, .register_overflow => |ro| { const reg_lock = self.register_manager.lockReg(ro.reg); defer if (reg_lock) |lock| self.register_manager.unlockReg(lock); const wrapped_ty = ty.structFieldType(0); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = ro.reg }, .{}); const overflow_bit_ty = ty.structFieldType(1); const overflow_bit_offset = ty.structFieldOffset(1, self.target.*); const tmp_reg = try self.register_manager.allocReg(null, gp); _ = try self.addInst(.{ .tag = .cond_set_byte, .ops = Mir.Inst.Ops.encode(.{ .reg1 = tmp_reg.to8(), }), .data = .{ .cc = ro.eflags }, }); return self.genSetStack( overflow_bit_ty, stack_offset - @intCast(i32, overflow_bit_offset), .{ .register = tmp_reg.to8() }, .{}, ); }, .eflags => { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, .{ .register = reg }, opts); }, .immediate => |x_big| { const base_reg = opts.dest_stack_base orelse .rbp; switch (abi_size) { 0 => { assert(ty.isError()); const payload = try self.addExtra(Mir.ImmPair{ .dest_off = @bitCast(u32, -stack_offset), .operand = @truncate(u32, x_big), }); _ = try self.addInst(.{ .tag = .mov_mem_imm, .ops = Mir.Inst.Ops.encode(.{ .reg1 = base_reg, .flags = 0b00, }), .data = .{ .payload = payload }, }); }, 1, 2, 4 => { const payload = try self.addExtra(Mir.ImmPair{ .dest_off = @bitCast(u32, -stack_offset), .operand = @truncate(u32, x_big), }); _ = try self.addInst(.{ .tag = .mov_mem_imm, .ops = Mir.Inst.Ops.encode(.{ .reg1 = base_reg, .flags = switch (abi_size) { 1 => 0b00, 2 => 0b01, 4 => 0b10, else => unreachable, }, }), .data = .{ .payload = payload }, }); }, 8 => { // 64 bit write to memory would take two mov's anyways so we // insted just use two 32 bit writes to avoid register allocation { const payload = try self.addExtra(Mir.ImmPair{ .dest_off = @bitCast(u32, -stack_offset + 4), .operand = @truncate(u32, x_big >> 32), }); _ = try self.addInst(.{ .tag = .mov_mem_imm, .ops = Mir.Inst.Ops.encode(.{ .reg1 = base_reg, .flags = 0b10, }), .data = .{ .payload = payload }, }); } { const payload = try self.addExtra(Mir.ImmPair{ .dest_off = @bitCast(u32, -stack_offset), .operand = @truncate(u32, x_big), }); _ = try self.addInst(.{ .tag = .mov_mem_imm, .ops = Mir.Inst.Ops.encode(.{ .reg1 = base_reg, .flags = 0b10, }), .data = .{ .payload = payload }, }); } }, else => { return self.fail("TODO implement set abi_size=large stack variable with immediate", .{}); }, } }, .register => |reg| { if (stack_offset > math.maxInt(i32)) { return self.fail("stack offset too large", .{}); } const base_reg = opts.dest_stack_base orelse .rbp; switch (ty.zigTypeTag()) { .Float => { if (intrinsicsAllowed(self.target.*, ty)) { const tag: Mir.Inst.Tag = switch (ty.tag()) { .f32 => if (hasAvxSupport(self.target.*)) Mir.Inst.Tag.mov_f32_avx else Mir.Inst.Tag.mov_f32_sse, .f64 => if (hasAvxSupport(self.target.*)) Mir.Inst.Tag.mov_f64_avx else Mir.Inst.Tag.mov_f64_sse, else => return self.fail("TODO genSetStack for register for type {}", .{ty.fmtDebug()}), }; _ = try self.addInst(.{ .tag = tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = switch (ty.tag()) { .f32 => base_reg.to32(), .f64 => base_reg.to64(), else => unreachable, }, .reg2 = reg.to128(), .flags = 0b01, }), .data = .{ .imm = @bitCast(u32, -stack_offset) }, }); return; } return self.fail("TODO genSetStack for register for type float with no intrinsics", .{}); }, else => { try self.genInlineMemcpyRegisterRegister(ty, base_reg, reg, stack_offset); }, } }, .memory, .linker_load => { if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }, opts); } try self.genInlineMemcpy(.{ .stack_offset = stack_offset }, mcv, .{ .immediate = abi_size }, opts); }, .ptr_stack_offset => { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }, opts); }, .stack_offset => |off| { if (stack_offset == off) { // Copy stack variable to itself; nothing to do. return; } if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }, opts); } try self.genInlineMemcpy(.{ .stack_offset = stack_offset }, mcv, .{ .immediate = abi_size }, opts); }, } } /// Like `genInlineMemcpy` but copies value from a register to an address via dereferencing /// of destination register. /// Boils down to MOV r/m64, r64. fn genInlineMemcpyRegisterRegister( self: *Self, ty: Type, dst_reg: Register, src_reg: Register, offset: i32, ) InnerError!void { assert(dst_reg.size() == 64); const dst_reg_lock = self.register_manager.lockReg(dst_reg); defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); const src_reg_lock = self.register_manager.lockReg(src_reg); defer if (src_reg_lock) |lock| self.register_manager.unlockReg(lock); const abi_size = @intCast(u32, ty.abiSize(self.target.*)); if (!math.isPowerOfTwo(abi_size)) { const tmp_reg = try self.copyToTmpRegister(ty, .{ .register = src_reg }); var next_offset = offset; var remainder = abi_size; while (remainder > 0) { const nearest_power_of_two = @as(u6, 1) << math.log2_int(u3, @intCast(u3, remainder)); _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = dst_reg, .reg2 = registerAlias(tmp_reg, nearest_power_of_two), .flags = 0b10, }), .data = .{ .imm = @bitCast(u32, -next_offset) }, }); if (nearest_power_of_two > 1) { try self.genShiftBinOpMir(.shr, ty, tmp_reg, .{ .immediate = nearest_power_of_two * 8, }); } remainder -= nearest_power_of_two; next_offset -= nearest_power_of_two; } } else { _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = dst_reg, .reg2 = registerAlias(src_reg, @intCast(u32, abi_size)), .flags = 0b10, }), .data = .{ .imm = @bitCast(u32, -offset) }, }); } } const InlineMemcpyOpts = struct { source_stack_base: ?Register = null, dest_stack_base: ?Register = null, }; fn genInlineMemcpy( self: *Self, dst_ptr: MCValue, src_ptr: MCValue, len: MCValue, opts: InlineMemcpyOpts, ) InnerError!void { const ssbase_lock: ?RegisterLock = if (opts.source_stack_base) |reg| self.register_manager.lockReg(reg) else null; defer if (ssbase_lock) |reg| self.register_manager.unlockReg(reg); const dsbase_lock: ?RegisterLock = if (opts.dest_stack_base) |reg| self.register_manager.lockReg(reg) else null; defer if (dsbase_lock) |lock| self.register_manager.unlockReg(lock); const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); const dst_addr_reg = regs[0]; const src_addr_reg = regs[1]; const index_reg = regs[2].to64(); const count_reg = regs[3].to64(); const tmp_reg = regs[4].to8(); switch (dst_ptr) { .memory, .linker_load => { try self.loadMemPtrIntoRegister(dst_addr_reg, Type.usize, dst_ptr); }, .ptr_stack_offset, .stack_offset => |off| { _ = try self.addInst(.{ .tag = .lea, .ops = Mir.Inst.Ops.encode(.{ .reg1 = dst_addr_reg.to64(), .reg2 = opts.dest_stack_base orelse .rbp, }), .data = .{ .imm = @bitCast(u32, -off) }, }); }, .register => |reg| { _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(dst_addr_reg, @divExact(reg.size(), 8)), .reg2 = reg, }), .data = undefined, }); }, else => { return self.fail("TODO implement memcpy for setting stack when dest is {}", .{dst_ptr}); }, } switch (src_ptr) { .memory, .linker_load => { try self.loadMemPtrIntoRegister(src_addr_reg, Type.usize, src_ptr); }, .ptr_stack_offset, .stack_offset => |off| { _ = try self.addInst(.{ .tag = .lea, .ops = Mir.Inst.Ops.encode(.{ .reg1 = src_addr_reg.to64(), .reg2 = opts.source_stack_base orelse .rbp, }), .data = .{ .imm = @bitCast(u32, -off) }, }); }, .register => |reg| { _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(src_addr_reg, @divExact(reg.size(), 8)), .reg2 = reg, }), .data = undefined, }); }, else => { return self.fail("TODO implement memcpy for setting stack when src is {}", .{src_ptr}); }, } try self.genSetReg(Type.usize, count_reg, len); // mov index_reg, 0 _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = index_reg }), .data = .{ .imm = 0 }, }); // loop: // cmp count, 0 const loop_start = try self.addInst(.{ .tag = .cmp, .ops = Mir.Inst.Ops.encode(.{ .reg1 = count_reg }), .data = .{ .imm = 0 }, }); // je end const loop_reloc = try self.addInst(.{ .tag = .cond_jmp, .ops = Mir.Inst.Ops.encode(.{}), .data = .{ .inst_cc = .{ .inst = undefined, .cc = .e, } }, }); // mov tmp, [addr + index_reg] _ = try self.addInst(.{ .tag = .mov_scale_src, .ops = Mir.Inst.Ops.encode(.{ .reg1 = tmp_reg.to8(), .reg2 = src_addr_reg, }), .data = .{ .payload = try self.addExtra(Mir.IndexRegisterDisp.encode(index_reg, 0)) }, }); // mov [stack_offset + index_reg], tmp _ = try self.addInst(.{ .tag = .mov_scale_dst, .ops = Mir.Inst.Ops.encode(.{ .reg1 = dst_addr_reg, .reg2 = tmp_reg.to8(), }), .data = .{ .payload = try self.addExtra(Mir.IndexRegisterDisp.encode(index_reg, 0)) }, }); // add index_reg, 1 _ = try self.addInst(.{ .tag = .add, .ops = Mir.Inst.Ops.encode(.{ .reg1 = index_reg }), .data = .{ .imm = 1 }, }); // sub count, 1 _ = try self.addInst(.{ .tag = .sub, .ops = Mir.Inst.Ops.encode(.{ .reg1 = count_reg }), .data = .{ .imm = 1 }, }); // jmp loop _ = try self.addInst(.{ .tag = .jmp, .ops = Mir.Inst.Ops.encode(.{}), .data = .{ .inst = loop_start }, }); // end: try self.performReloc(loop_reloc); } fn genInlineMemset( self: *Self, dst_ptr: MCValue, value: MCValue, len: MCValue, opts: InlineMemcpyOpts, ) InnerError!void { const ssbase_lock: ?RegisterLock = if (opts.source_stack_base) |reg| self.register_manager.lockReg(reg) else null; defer if (ssbase_lock) |reg| self.register_manager.unlockReg(reg); const dsbase_lock: ?RegisterLock = if (opts.dest_stack_base) |reg| self.register_manager.lockReg(reg) else null; defer if (dsbase_lock) |lock| self.register_manager.unlockReg(lock); const regs = try self.register_manager.allocRegs(2, .{ null, null }, gp); const addr_reg = regs[0]; const index_reg = regs[1].to64(); switch (dst_ptr) { .memory, .linker_load => { try self.loadMemPtrIntoRegister(addr_reg, Type.usize, dst_ptr); }, .ptr_stack_offset, .stack_offset => |off| { _ = try self.addInst(.{ .tag = .lea, .ops = Mir.Inst.Ops.encode(.{ .reg1 = addr_reg.to64(), .reg2 = opts.dest_stack_base orelse .rbp, }), .data = .{ .imm = @bitCast(u32, -off) }, }); }, .register => |reg| { _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(addr_reg, @divExact(reg.size(), 8)), .reg2 = reg, }), .data = undefined, }); }, else => { return self.fail("TODO implement memcpy for setting stack when dest is {}", .{dst_ptr}); }, } try self.genSetReg(Type.usize, index_reg, len); try self.genBinOpMir(.sub, Type.usize, .{ .register = index_reg }, .{ .immediate = 1 }); // loop: // cmp index_reg, -1 const loop_start = try self.addInst(.{ .tag = .cmp, .ops = Mir.Inst.Ops.encode(.{ .reg1 = index_reg }), .data = .{ .imm = @bitCast(u32, @as(i32, -1)) }, }); // je end const loop_reloc = try self.addInst(.{ .tag = .cond_jmp, .ops = Mir.Inst.Ops.encode(.{}), .data = .{ .inst_cc = .{ .inst = undefined, .cc = .e, } }, }); switch (value) { .immediate => |x| { if (x > math.maxInt(i32)) { return self.fail("TODO inline memset for value immediate larger than 32bits", .{}); } // mov byte ptr [rbp + index_reg + stack_offset], imm _ = try self.addInst(.{ .tag = .mov_mem_index_imm, .ops = Mir.Inst.Ops.encode(.{ .reg1 = addr_reg }), .data = .{ .payload = try self.addExtra(Mir.IndexRegisterDispImm.encode(index_reg, 0, @truncate(u32, x))) }, }); }, else => return self.fail("TODO inline memset for value of type {}", .{value}), } // sub index_reg, 1 _ = try self.addInst(.{ .tag = .sub, .ops = Mir.Inst.Ops.encode(.{ .reg1 = index_reg }), .data = .{ .imm = 1 }, }); // jmp loop _ = try self.addInst(.{ .tag = .jmp, .ops = Mir.Inst.Ops.encode(.{}), .data = .{ .inst = loop_start }, }); // end: try self.performReloc(loop_reloc); } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { const abi_size = @intCast(u32, ty.abiSize(self.target.*)); switch (mcv) { .dead => unreachable, .register_overflow => unreachable, .ptr_stack_offset => |off| { if (off < std.math.minInt(i32) or off > std.math.maxInt(i32)) { return self.fail("stack offset too large", .{}); } _ = try self.addInst(.{ .tag = .lea, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size), .reg2 = .rbp, }), .data = .{ .imm = @bitCast(u32, -off) }, }); }, .unreach, .none => return, // Nothing to do. .undef => { if (!self.wantSafety()) return; // The already existing value will do just fine. // Write the debug undefined value. switch (registerAlias(reg, abi_size).size()) { 8 => return self.genSetReg(ty, reg, .{ .immediate = 0xaa }), 16 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaa }), 32 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaa }), 64 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), else => unreachable, } }, .eflags => |cc| { _ = try self.addInst(.{ .tag = .cond_set_byte, .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg.to8(), }), .data = .{ .cc = cc }, }); }, .immediate => |x| { // 32-bit moves zero-extend to 64-bit, so xoring the 32-bit // register is the fastest way to zero a register. if (x == 0) { _ = try self.addInst(.{ .tag = .xor, .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg.to32(), .reg2 = reg.to32(), }), .data = undefined, }); return; } if (x <= math.maxInt(i32)) { // Next best case: if we set the lower four bytes, the upper four will be zeroed. _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size) }), .data = .{ .imm = @truncate(u32, x) }, }); return; } // Worst case: we need to load the 64-bit register with the IMM. GNU's assemblers calls // this `movabs`, though this is officially just a different variant of the plain `mov` // instruction. // // This encoding is, in fact, the *same* as the one used for 32-bit loads. The only // difference is that we set REX.W before the instruction, which extends the load to // 64-bit and uses the full bit-width of the register. const payload = try self.addExtra(Mir.Imm64.encode(x)); _ = try self.addInst(.{ .tag = .movabs, .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg.to64() }), .data = .{ .payload = payload }, }); }, .register => |src_reg| { // If the registers are the same, nothing to do. if (src_reg.id() == reg.id()) return; switch (ty.zigTypeTag()) { .Int => switch (ty.intInfo(self.target.*).signedness) { .signed => { if (abi_size <= 4) { _ = try self.addInst(.{ .tag = .mov_sign_extend, .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg.to64(), .reg2 = registerAlias(src_reg, abi_size), }), .data = undefined, }); return; } }, .unsigned => { if (abi_size <= 2) { _ = try self.addInst(.{ .tag = .mov_zero_extend, .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg.to64(), .reg2 = registerAlias(src_reg, abi_size), }), .data = undefined, }); return; } }, }, .Float => { if (intrinsicsAllowed(self.target.*, ty)) { const tag: Mir.Inst.Tag = switch (ty.tag()) { .f32 => if (hasAvxSupport(self.target.*)) Mir.Inst.Tag.mov_f32_avx else Mir.Inst.Tag.mov_f32_sse, .f64 => if (hasAvxSupport(self.target.*)) Mir.Inst.Tag.mov_f64_avx else Mir.Inst.Tag.mov_f64_sse, else => return self.fail("TODO genSetReg from register for {}", .{ty.fmtDebug()}), }; _ = try self.addInst(.{ .tag = tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg.to128(), .reg2 = src_reg.to128(), .flags = 0b10, }), .data = undefined, }); return; } return self.fail("TODO genSetReg from register for float with no intrinsics", .{}); }, else => {}, } _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size), .reg2 = registerAlias(src_reg, abi_size), }), .data = undefined, }); }, .linker_load => { switch (ty.zigTypeTag()) { .Float => { const base_reg = try self.register_manager.allocReg(null, gp); try self.loadMemPtrIntoRegister(base_reg, Type.usize, mcv); if (intrinsicsAllowed(self.target.*, ty)) { const tag: Mir.Inst.Tag = switch (ty.tag()) { .f32 => if (hasAvxSupport(self.target.*)) Mir.Inst.Tag.mov_f32_avx else Mir.Inst.Tag.mov_f32_sse, .f64 => if (hasAvxSupport(self.target.*)) Mir.Inst.Tag.mov_f64_avx else Mir.Inst.Tag.mov_f64_sse, else => return self.fail("TODO genSetReg from memory for {}", .{ty.fmtDebug()}), }; _ = try self.addInst(.{ .tag = tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg.to128(), .reg2 = switch (ty.tag()) { .f32 => base_reg.to32(), .f64 => base_reg.to64(), else => unreachable, }, }), .data = .{ .imm = 0 }, }); return; } return self.fail("TODO genSetReg from memory for float with no intrinsics", .{}); }, else => { try self.loadMemPtrIntoRegister(reg, Type.usize, mcv); _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size), .reg2 = reg.to64(), .flags = 0b01, }), .data = .{ .imm = 0 }, }); }, } }, .memory => |x| switch (ty.zigTypeTag()) { .Float => { const base_reg = try self.register_manager.allocReg(null, gp); try self.loadMemPtrIntoRegister(base_reg, Type.usize, mcv); if (intrinsicsAllowed(self.target.*, ty)) { const tag: Mir.Inst.Tag = switch (ty.tag()) { .f32 => if (hasAvxSupport(self.target.*)) Mir.Inst.Tag.mov_f32_avx else Mir.Inst.Tag.mov_f32_sse, .f64 => if (hasAvxSupport(self.target.*)) Mir.Inst.Tag.mov_f64_avx else Mir.Inst.Tag.mov_f64_sse, else => return self.fail("TODO genSetReg from memory for {}", .{ty.fmtDebug()}), }; _ = try self.addInst(.{ .tag = tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg.to128(), .reg2 = switch (ty.tag()) { .f32 => base_reg.to32(), .f64 => base_reg.to64(), else => unreachable, }, }), .data = .{ .imm = 0 }, }); return; } return self.fail("TODO genSetReg from memory for float with no intrinsics", .{}); }, else => { if (x <= math.maxInt(i32)) { // mov reg, [ds:imm32] _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size), .flags = 0b01, }), .data = .{ .imm = @truncate(u32, x) }, }); } else { // If this is RAX, we can use a direct load. // Otherwise, we need to load the address, then indirectly load the value. if (reg.id() == 0) { // movabs rax, ds:moffs64 const payload = try self.addExtra(Mir.Imm64.encode(x)); _ = try self.addInst(.{ .tag = .movabs, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rax, .flags = 0b01, // imm64 will become moffs64 }), .data = .{ .payload = payload }, }); } else { // Rather than duplicate the logic used for the move, we just use a self-call with a new MCValue. try self.genSetReg(ty, reg, MCValue{ .immediate = x }); // mov reg, [reg + 0x0] _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size), .reg2 = reg.to64(), .flags = 0b01, }), .data = .{ .imm = 0 }, }); } } }, }, .stack_offset => |off| { if (off < std.math.minInt(i32) or off > std.math.maxInt(i32)) { return self.fail("stack offset too large", .{}); } switch (ty.zigTypeTag()) { .Int => switch (ty.intInfo(self.target.*).signedness) { .signed => { if (abi_size <= 4) { const flags: u2 = switch (abi_size) { 1 => 0b01, 2 => 0b10, 4 => 0b11, else => unreachable, }; _ = try self.addInst(.{ .tag = .mov_sign_extend, .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg.to64(), .reg2 = .rbp, .flags = flags, }), .data = .{ .imm = @bitCast(u32, -off) }, }); return; } }, .unsigned => { if (abi_size <= 2) { const flags: u2 = switch (abi_size) { 1 => 0b01, 2 => 0b10, else => unreachable, }; _ = try self.addInst(.{ .tag = .mov_zero_extend, .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg.to64(), .reg2 = .rbp, .flags = flags, }), .data = .{ .imm = @bitCast(u32, -off) }, }); return; } }, }, .Float => { if (intrinsicsAllowed(self.target.*, ty)) { const tag: Mir.Inst.Tag = switch (ty.tag()) { .f32 => if (hasAvxSupport(self.target.*)) Mir.Inst.Tag.mov_f32_avx else Mir.Inst.Tag.mov_f32_sse, .f64 => if (hasAvxSupport(self.target.*)) Mir.Inst.Tag.mov_f64_avx else Mir.Inst.Tag.mov_f64_sse, else => return self.fail("TODO genSetReg from stack offset for {}", .{ty.fmtDebug()}), }; _ = try self.addInst(.{ .tag = tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg.to128(), .reg2 = switch (ty.tag()) { .f32 => .ebp, .f64 => .rbp, else => unreachable, }, }), .data = .{ .imm = @bitCast(u32, -off) }, }); return; } return self.fail("TODO genSetReg from stack offset for float with no intrinsics", .{}); }, else => {}, } _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size), .reg2 = .rbp, .flags = 0b01, }), .data = .{ .imm = @bitCast(u32, -off) }, }); }, } } fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result = try self.resolveInst(un_op); return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = try self.resolveInst(ty_op.operand); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const ptr_ty = self.air.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(); const array_len = array_ty.arrayLen(); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { const stack_offset = @intCast(i32, try self.allocMem(inst, 16, 16)); try self.genSetStack(ptr_ty, stack_offset, ptr, .{}); try self.genSetStack(Type.initTag(.u64), stack_offset - 8, .{ .immediate = array_len }, .{}); break :blk .{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airIntToFloat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); const src_ty = self.air.typeOf(ty_op.operand); const dst_ty = self.air.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); // move float src to ST(0) const stack_offset = switch (operand) { .stack_offset, .ptr_stack_offset => |offset| offset, else => blk: { const offset = @intCast(i32, try self.allocMem( inst, @intCast(u32, src_ty.abiSize(self.target.*)), src_ty.abiAlignment(self.target.*), )); try self.genSetStack(src_ty, offset, operand, .{}); break :blk offset; }, }; _ = try self.addInst(.{ .tag = .fld, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp, .flags = switch (src_ty.abiSize(self.target.*)) { 4 => 0b01, 8 => 0b10, else => |size| return self.fail("TODO load ST(0) with abiSize={}", .{size}), }, }), .data = .{ .imm = @bitCast(u32, -stack_offset) }, }); // convert const stack_dst = try self.allocRegOrMem(inst, false); _ = try self.addInst(.{ .tag = .fisttp, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp, .flags = switch (dst_ty.abiSize(self.target.*)) { 1...2 => 0b00, 3...4 => 0b01, 5...8 => 0b10, else => |size| return self.fail("TODO convert float with abiSize={}", .{size}), }, }), .data = .{ .imm = @bitCast(u32, -stack_dst.stack_offset) }, }); return self.finishAir(inst, stack_dst, .{ ty_op.operand, .none, .none }); } fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Block, ty_pl.payload); _ = extra; return self.fail("TODO implement x86 airCmpxchg", .{}); // return self.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value }); } fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void { _ = inst; return self.fail("TODO implement x86 airAtomicRaw", .{}); } fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void { _ = inst; return self.fail("TODO implement airAtomicLoad for {}", .{self.target.cpu.arch}); } fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void { _ = inst; _ = order; return self.fail("TODO implement airAtomicStore for {}", .{self.target.cpu.arch}); } fn airMemset(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const dst_ptr = try self.resolveInst(pl_op.operand); const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (dst_ptr_lock) |lock| self.register_manager.unlockReg(lock); const src_val = try self.resolveInst(extra.lhs); const src_val_lock: ?RegisterLock = switch (src_val) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock); const len = try self.resolveInst(extra.rhs); const len_lock: ?RegisterLock = switch (len) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (len_lock) |lock| self.register_manager.unlockReg(lock); try self.genInlineMemset(dst_ptr, src_val, len, .{}); return self.finishAir(inst, .none, .{ pl_op.operand, .none, .none }); } fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const dst_ptr = try self.resolveInst(pl_op.operand); const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (dst_ptr_lock) |lock| self.register_manager.unlockReg(lock); const src_ty = self.air.typeOf(extra.lhs); const src_ptr = try self.resolveInst(extra.lhs); const src_ptr_lock: ?RegisterLock = switch (src_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (src_ptr_lock) |lock| self.register_manager.unlockReg(lock); const len = try self.resolveInst(extra.rhs); const len_lock: ?RegisterLock = switch (len) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (len_lock) |lock| self.register_manager.unlockReg(lock); // TODO Is this the only condition for pointer dereference for memcpy? const src: MCValue = blk: { switch (src_ptr) { .linker_load, .memory => { const reg = try self.register_manager.allocReg(null, gp); try self.loadMemPtrIntoRegister(reg, src_ty, src_ptr); _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg, .reg2 = reg, .flags = 0b01, }), .data = .{ .imm = 0 }, }); break :blk MCValue{ .register = reg }; }, else => break :blk src_ptr, } }; const src_lock: ?RegisterLock = switch (src) { .register => |reg| self.register_manager.lockReg(reg), else => null, }; defer if (src_lock) |lock| self.register_manager.unlockReg(lock); try self.genInlineMemcpy(dst_ptr, src, len, .{}); return self.finishAir(inst, .none, .{ pl_op.operand, .none, .none }); } fn airTagName(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else { _ = operand; return self.fail("TODO implement airTagName for x86_64", .{}); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else { _ = operand; return self.fail("TODO implement airErrorName for x86_64", .{}); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airSplat(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSplat for x86_64", .{}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airSelect(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSelect for x86_64", .{}); return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs }); } fn airShuffle(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airShuffle for x86_64", .{}); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airReduce(self: *Self, inst: Air.Inst.Index) !void { const reduce = self.air.instructions.items(.data)[inst].reduce; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airReduce for x86_64", .{}); return self.finishAir(inst, result, .{ reduce.operand, .none, .none }); } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const result_ty = self.air.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const abi_size = @intCast(u32, result_ty.abiSize(self.target.*)); const abi_align = result_ty.abiAlignment(self.target.*); const result: MCValue = res: { if (self.liveness.isUnused(inst)) break :res MCValue.dead; switch (result_ty.zigTypeTag()) { .Struct => { const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align)); for (elements) |elem, elem_i| { if (result_ty.structFieldValueComptime(elem_i) != null) continue; // comptime elem const elem_ty = result_ty.structFieldType(elem_i); const elem_off = result_ty.structFieldOffset(elem_i, self.target.*); const elem_mcv = try self.resolveInst(elem); try self.genSetStack(elem_ty, stack_offset - @intCast(i32, elem_off), elem_mcv, .{}); } break :res MCValue{ .stack_offset = stack_offset }; }, .Array => { const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align)); const elem_ty = result_ty.childType(); const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); for (elements) |elem, elem_i| { const elem_mcv = try self.resolveInst(elem); const elem_off = @intCast(i32, elem_size * elem_i); try self.genSetStack(elem_ty, stack_offset - elem_off, elem_mcv, .{}); } break :res MCValue{ .stack_offset = stack_offset }; }, .Vector => return self.fail("TODO implement aggregate_init for vectors", .{}), else => unreachable, } }; if (elements.len <= Liveness.bpi - 1) { var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); std.mem.copy(Air.Inst.Ref, &buf, elements); return self.finishAir(inst, result, buf); } var bt = try self.iterateBigTomb(inst, elements.len); for (elements) |elem| { bt.feed(elem); } return bt.finishAir(result); } fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const result: MCValue = res: { if (self.liveness.isUnused(inst)) break :res MCValue.dead; return self.fail("TODO implement airAggregateInit for x86_64", .{}); }; return self.finishAir(inst, result, .{ extra.init, .none, .none }); } fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void { const prefetch = self.air.instructions.items(.data)[inst].prefetch; return self.finishAir(inst, MCValue.dead, .{ prefetch.ptr, .none, .none }); } fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else { return self.fail("TODO implement airMulAdd for x86_64", .{}); }; return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand }); } pub fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { // First section of indexes correspond to a set number of constant values. const ref_int = @enumToInt(inst); if (ref_int < Air.Inst.Ref.typed_value_map.len) { const tv = Air.Inst.Ref.typed_value_map[ref_int]; if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { return MCValue{ .none = {} }; } return self.genTypedValue(tv); } // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) return MCValue{ .none = {} }; const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); switch (self.air.instructions.items(.tag)[inst_index]) { .constant => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, .val = self.air.values[ty_pl.payload], }); } return gop.value_ptr.*; }, .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { // Treat each stack item as a "layer" on top of the previous one. var i: usize = self.branch_stack.items.len; while (true) { i -= 1; if (self.branch_stack.items[i].inst_table.get(inst)) |mcv| { assert(mcv != .dead); return mcv; } } } /// If the MCValue is an immediate, and it does not fit within this type, /// we put it in a register. /// A potential opportunity for future optimization here would be keeping track /// of the fact that the instruction is available both as an immediate /// and as a register. fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCValue { const mcv = try self.resolveInst(operand); const ti = @typeInfo(T).Int; switch (mcv) { .immediate => |imm| { // This immediate is unsigned. const U = std.meta.Int(.unsigned, ti.bits - @boolToInt(ti.signedness == .signed)); if (imm >= math.maxInt(U)) { return MCValue{ .register = try self.copyToTmpRegister(Type.initTag(.usize), mcv) }; } }, else => {}, } return mcv; } fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue { log.debug("lowerDeclRef: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() }); const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`? if (tv.ty.zigTypeTag() == .Pointer) blk: { if (tv.ty.castPtrToFn()) |_| break :blk; if (!tv.ty.elemType2().hasRuntimeBits()) { return MCValue.none; } } const module = self.bin_file.options.module.?; const decl = module.declPtr(decl_index); module.markDeclAlive(decl); if (self.bin_file.cast(link.File.Elf)) |elf_file| { const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; return MCValue{ .memory = got_addr }; } else if (self.bin_file.cast(link.File.MachO)) |_| { assert(decl.link.macho.sym_index != 0); return MCValue{ .linker_load = .{ .type = .got, .sym_index = decl.link.macho.sym_index, } }; } else if (self.bin_file.cast(link.File.Coff)) |_| { assert(decl.link.coff.sym_index != 0); return MCValue{ .linker_load = .{ .type = .got, .sym_index = decl.link.coff.sym_index, } }; } else if (self.bin_file.cast(link.File.Plan9)) |p9| { try p9.seeDecl(decl_index); const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; return MCValue{ .memory = got_addr }; } else { return self.fail("TODO codegen non-ELF const Decl pointer", .{}); } } fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue { log.debug("lowerUnnamedConst: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() }); const local_sym_index = self.bin_file.lowerUnnamedConst(tv, self.mod_fn.owner_decl) catch |err| { return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)}); }; if (self.bin_file.cast(link.File.Elf)) |elf_file| { const vaddr = elf_file.local_symbols.items[local_sym_index].st_value; return MCValue{ .memory = vaddr }; } else if (self.bin_file.cast(link.File.MachO)) |_| { return MCValue{ .linker_load = .{ .type = .direct, .sym_index = local_sym_index, } }; } else if (self.bin_file.cast(link.File.Coff)) |_| { return MCValue{ .linker_load = .{ .type = .direct, .sym_index = local_sym_index, } }; } else if (self.bin_file.cast(link.File.Plan9)) |p9| { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); const got_index = local_sym_index; // the plan9 backend returns the got_index const got_addr = p9.bases.data + got_index * ptr_bytes; return MCValue{ .memory = got_addr }; } else { return self.fail("TODO lower unnamed const", .{}); } } fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { var typed_value = arg_tv; if (typed_value.val.castTag(.runtime_value)) |rt| { typed_value.val = rt.data; } log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmtDebug(), typed_value.val.fmtDebug() }); if (typed_value.val.isUndef()) return MCValue{ .undef = {} }; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); if (typed_value.val.castTag(.decl_ref)) |payload| { return self.lowerDeclRef(typed_value, payload.data); } if (typed_value.val.castTag(.decl_ref_mut)) |payload| { return self.lowerDeclRef(typed_value, payload.data.decl_index); } const target = self.target.*; switch (typed_value.ty.zigTypeTag()) { .Void => return MCValue{ .none = {} }, .Pointer => switch (typed_value.ty.ptrSize()) { .Slice => {}, else => { switch (typed_value.val.tag()) { .int_u64 => { return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) }; }, else => {}, } }, }, .Int => { const info = typed_value.ty.intInfo(self.target.*); if (info.bits <= ptr_bits and info.signedness == .signed) { return MCValue{ .immediate = @bitCast(u64, typed_value.val.toSignedInt(target)) }; } if (!(info.bits > ptr_bits or info.signedness == .signed)) { return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) }; } }, .Bool => { return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) }; }, .Optional => { if (typed_value.ty.isPtrLikeOptional()) { if (typed_value.val.isNull()) return MCValue{ .immediate = 0 }; var buf: Type.Payload.ElemType = undefined; return self.genTypedValue(.{ .ty = typed_value.ty.optionalChild(&buf), .val = typed_value.val, }); } else if (typed_value.ty.abiSize(self.target.*) == 1) { return MCValue{ .immediate = @boolToInt(!typed_value.val.isNull()) }; } }, .Enum => { if (typed_value.val.castTag(.enum_field_index)) |field_index| { switch (typed_value.ty.tag()) { .enum_simple => { return MCValue{ .immediate = field_index.data }; }, .enum_full, .enum_nonexhaustive => { const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data; if (enum_full.values.count() != 0) { const tag_val = enum_full.values.keys()[field_index.data]; return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val }); } else { return MCValue{ .immediate = field_index.data }; } }, else => unreachable, } } else { var int_tag_buffer: Type.Payload.Bits = undefined; const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer); return self.genTypedValue(.{ .ty = int_tag_ty, .val = typed_value.val }); } }, .ErrorSet => { switch (typed_value.val.tag()) { .@"error" => { const err_name = typed_value.val.castTag(.@"error").?.data.name; const module = self.bin_file.options.module.?; const global_error_set = module.global_error_set; const error_index = global_error_set.get(err_name).?; return MCValue{ .immediate = error_index }; }, else => { // In this case we are rendering an error union which has a 0 bits payload. return MCValue{ .immediate = 0 }; }, } }, .ErrorUnion => { const error_type = typed_value.ty.errorUnionSet(); const payload_type = typed_value.ty.errorUnionPayload(); const is_pl = typed_value.val.errorUnionIsPayload(); if (!payload_type.hasRuntimeBitsIgnoreComptime()) { // We use the error type directly as the type. const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero); return self.genTypedValue(.{ .ty = error_type, .val = err_val }); } }, .ComptimeInt => unreachable, .ComptimeFloat => unreachable, .Type => unreachable, .EnumLiteral => unreachable, .NoReturn => unreachable, .Undefined => unreachable, .Null => unreachable, .Opaque => unreachable, else => {}, } return self.lowerUnnamedConst(typed_value); } const CallMCValues = struct { args: []MCValue, return_value: MCValue, stack_byte_count: u32, stack_align: u32, fn deinit(self: *CallMCValues, func: *Self) void { func.gpa.free(self.args); self.* = undefined; } }; /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const cc = fn_ty.fnCallingConvention(); const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); defer self.gpa.free(param_types); fn_ty.fnParamTypes(param_types); var result: CallMCValues = .{ .args = try self.gpa.alloc(MCValue, param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, .stack_align = undefined, }; errdefer self.gpa.free(result.args); const ret_ty = fn_ty.fnReturnType(); switch (cc) { .Naked => { assert(result.args.len == 0); result.return_value = .{ .unreach = {} }; result.stack_byte_count = 0; result.stack_align = 1; return result; }, .C => { // Return values if (ret_ty.zigTypeTag() == .NoReturn) { result.return_value = .{ .unreach = {} }; } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { // TODO: is this even possible for C calling convention? result.return_value = .{ .none = {} }; } else { const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); if (ret_ty_size == 0) { assert(ret_ty.isError()); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 8) { const aliased_reg = registerAlias(abi.getCAbiIntReturnRegs(self.target.*)[0], ret_ty_size); result.return_value = .{ .register = aliased_reg }; } else { // TODO: return argument cell should go first result.return_value = .{ .stack_offset = 0 }; } } // Input params var next_stack_offset: u32 = switch (result.return_value) { .stack_offset => |off| @intCast(u32, off), else => 0, }; for (param_types) |ty, i| { assert(ty.hasRuntimeBits()); const classes: []const abi.Class = switch (self.target.os.tag) { .windows => &[1]abi.Class{abi.classifyWindows(ty, self.target.*)}, else => mem.sliceTo(&abi.classifySystemV(ty, self.target.*, .arg), .none), }; if (classes.len > 1) { return self.fail("TODO handle multiple classes per type", .{}); } switch (classes[0]) { .integer => blk: { if (i >= abi.getCAbiIntParamRegs(self.target.*).len) break :blk; // fallthrough result.args[i] = .{ .register = abi.getCAbiIntParamRegs(self.target.*)[i] }; continue; }, .memory => {}, // fallthrough else => |class| return self.fail("TODO handle calling convention class {s}", .{ @tagName(class), }), } const param_size = @intCast(u32, ty.abiSize(self.target.*)); const param_align = @intCast(u32, ty.abiAlignment(self.target.*)); const offset = mem.alignForwardGeneric(u32, next_stack_offset + param_size, param_align); result.args[i] = .{ .stack_offset = @intCast(i32, offset) }; next_stack_offset = offset; } // Align the stack to 16bytes before allocating shadow stack space (if any). const aligned_next_stack_offset = mem.alignForwardGeneric(u32, next_stack_offset, 16); const padding = aligned_next_stack_offset - next_stack_offset; if (padding > 0) { for (result.args) |*arg| { if (arg.isRegister()) continue; arg.stack_offset += @intCast(i32, padding); } } const shadow_stack_space: u32 = switch (self.target.os.tag) { .windows => @intCast(u32, 4 * @sizeOf(u64)), else => 0, }; // alignment padding | args ... | shadow stack space (if any) | ret addr | $rbp | result.stack_byte_count = aligned_next_stack_offset + shadow_stack_space; result.stack_align = 16; }, .Unspecified => { // Return values if (ret_ty.zigTypeTag() == .NoReturn) { result.return_value = .{ .unreach = {} }; } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { result.return_value = .{ .none = {} }; } else { const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); if (ret_ty_size == 0) { assert(ret_ty.isError()); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 8) { const aliased_reg = registerAlias(abi.getCAbiIntReturnRegs(self.target.*)[0], ret_ty_size); result.return_value = .{ .register = aliased_reg }; } else { // We simply make the return MCValue a stack offset. However, the actual value // for the offset will be populated later. We will also push the stack offset // value into an appropriate register when we resolve the offset. result.return_value = .{ .stack_offset = 0 }; } } // Input params var next_stack_offset: u32 = switch (result.return_value) { .stack_offset => |off| @intCast(u32, off), else => 0, }; for (param_types) |ty, i| { if (!ty.hasRuntimeBits()) { result.args[i] = .{ .none = {} }; continue; } const param_size = @intCast(u32, ty.abiSize(self.target.*)); const param_align = @intCast(u32, ty.abiAlignment(self.target.*)); const offset = mem.alignForwardGeneric(u32, next_stack_offset + param_size, param_align); result.args[i] = .{ .stack_offset = @intCast(i32, offset) }; next_stack_offset = offset; } result.stack_align = 16; // TODO fix this so that the 16byte alignment padding is at the current value of $rsp, and push // the args onto the stack so that there is no padding between the first argument and // the standard preamble. // alignment padding | args ... | ret addr | $rbp | result.stack_byte_count = mem.alignForwardGeneric(u32, next_stack_offset, result.stack_align); }, else => return self.fail("TODO implement function parameters and return values for {} on x86_64", .{cc}), } return result; } /// TODO support scope overrides. Also note this logic is duplicated with `Module.wantSafety`. fn wantSafety(self: *Self) bool { return switch (self.bin_file.options.optimize_mode) { .Debug => true, .ReleaseSafe => true, .ReleaseFast => false, .ReleaseSmall => false, }; } fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { @setCold(true); assert(self.err_msg == null); self.err_msg = try ErrorMsg.create(self.bin_file.allocator, self.src_loc, format, args); return error.CodegenFail; } fn failSymbol(self: *Self, comptime format: []const u8, args: anytype) InnerError { @setCold(true); assert(self.err_msg == null); self.err_msg = try ErrorMsg.create(self.bin_file.allocator, self.src_loc, format, args); return error.CodegenFail; } fn parseRegName(name: []const u8) ?Register { if (@hasDecl(Register, "parseRegName")) { return Register.parseRegName(name); } return std.meta.stringToEnum(Register, name); } /// Returns register wide enough to hold at least `size_bytes`. fn registerAlias(reg: Register, size_bytes: u32) Register { if (size_bytes == 0) { unreachable; // should be comptime-known } else if (size_bytes <= 1) { return reg.to8(); } else if (size_bytes <= 2) { return reg.to16(); } else if (size_bytes <= 4) { return reg.to32(); } else if (size_bytes <= 8) { return reg.to64(); } else if (size_bytes <= 16) { return reg.to128(); } else if (size_bytes <= 32) { return reg.to256(); } else unreachable; } /// Truncates the value in the register in place. /// Clobbers any remaining bits. fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { const int_info = ty.intInfo(self.target.*); const max_reg_bit_width = Register.rax.size(); switch (int_info.signedness) { .signed => { const shift = @intCast(u6, max_reg_bit_width - int_info.bits); try self.genShiftBinOpMir(.sal, Type.isize, reg, .{ .immediate = shift }); try self.genShiftBinOpMir(.sar, Type.isize, reg, .{ .immediate = shift }); }, .unsigned => { const shift = @intCast(u6, max_reg_bit_width - int_info.bits); const mask = (~@as(u64, 0)) >> shift; if (int_info.bits <= 32) { try self.genBinOpMir(.@"and", Type.usize, .{ .register = reg }, .{ .immediate = mask }); } else { const tmp_reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = mask }); try self.genBinOpMir(.@"and", Type.usize, .{ .register = reg }, .{ .register = tmp_reg }); } }, } } fn intrinsicsAllowed(target: Target, ty: Type) bool { return switch (ty.tag()) { .f32, .f64, => Target.x86.featureSetHasAny(target.cpu.features, .{ .sse2, .avx, .avx2 }), else => unreachable, // TODO finish this off }; } fn hasAvxSupport(target: Target) bool { return Target.x86.featureSetHasAny(target.cpu.features, .{ .avx, .avx2 }); }
https://raw.githubusercontent.com/mazino3/ziglang/3db8cffa3b383011471f425983a7e98ad8a46aa5/src/arch/x86_64/CodeGen.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const curl = @cImport({ @cInclude("curl/curl.h"); }); const Repo = struct { user: []const u8 = undefined, name: []const u8 = undefined, url: []const u8 = undefined, }; pub fn main() anyerror!void { // var alloc_buf: [10_000_000]u8 = undefined; // var fba = std.heap.FixedBufferAllocator.init(&alloc_buf); // const alloc = fba.allocator(); // var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); // defer arena.deinit(); // const alloc = arena.allocator(); var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); const alloc = gpa.allocator(); const stdout = std.io.getStdOut().writer(); const h = curl.curl_url(); var arg_iter = std.process.args(); while (try arg_iter.next(alloc)) |url| { const repo = parseUrl(alloc, h, url); if (repo == null) { //not sure why this prints \252\252\252... //std.log.warn("can't parse: {s}", .{url}); continue; } else { std.log.debug("{s} {s}", .{ repo.?.user, repo.?.name }); try gitClone(stdout, alloc, repo.?); } } } test "basic test" { try std.testing.expectEqual(10, 3 + 7); } fn makeRepo(url: []const u8, path: []const u8) Repo { var it = std.mem.split(u8, path, "/"); _ = it.next().?; // emptiness before the slash const user = it.next().?; const repo = it.next().?; // sourcehut uses tildes like unix homedirs, gotta strip that return Repo{ .url = url, .user = if (user[0] == '~') user[1..] else user, .name = repo, }; } fn parseUrl(alloc: Allocator, h: ?*curl.struct_Curl_URL, url: [:0]const u8) ?*Repo { var host: ?[*:0]u8 = null; var path: ?[*:0]u8 = null; var repo: Repo = Repo{}; var uc = curl.curl_url_set(h, curl.CURLUPART_URL, url, 0); defer alloc.free(url); if (uc == 0) { _ = curl.curl_url_get(h, curl.CURLUPART_HOST, &host, 1); _ = curl.curl_url_get(h, curl.CURLUPART_PATH, &path, 1); repo = makeRepo( url, // the span() converts the slice to a []const u8 std.mem.span(path.?), ); return &repo; } else { return null; } } fn gitClone(stdout: std.io.Writer( std.fs.File, std.os.WriteError, std.fs.File.write, ), alloc: Allocator, repo: *Repo) !void { _ = repo; const args = &.{ "git", "clone", repo.url, }; const result = std.ChildProcess.exec(.{ .allocator = alloc, .argv = args, .cwd = "/tmp", .env_map = null, .max_output_bytes = 10_000_000, }) catch |err| { std.log.warn("The following command failed:\n", .{}); return err; }; // Happened upon this at // https://github.com/ziglang/zig/issues/8969#issue-909829436 defer { alloc.free(result.stdout); alloc.free(result.stderr); } try stdout.print("{s}\n", .{result.stdout}); }
https://raw.githubusercontent.com/drewr/zigutils/a3c8b40a546e193ca5a4fd91011f0ed26e174de9/src/main.zig
const std = @import("std"); const web3 = @import("web3.zig"); /// Encodes data into JSON pub const JsonWriter = struct { pub fn writeLiteral(arg: []const u8, writer: anytype) !usize { return writer.write(arg); } pub fn writeString(arg: []const u8, writer: anytype) !usize { try writer.writeByte('"'); _ = try writer.write(arg); try writer.writeByte('"'); return arg.len + 2; } pub fn writeHexString(arg: []const u8, writer: anytype) !usize { const charset = "0123456789abcdef"; _ = try writer.write("\"0x"); for (arg) |b| { try writer.writeByte(charset[b >> 4]); try writer.writeByte(charset[b & 15]); } try writer.writeByte('"'); return 4 + arg.len / 2; } pub fn writeHexInt(arg: anytype, writer: anytype) !usize { var buffer: [78]u8 = undefined; var fbs = std.io.fixedBufferStream(&buffer); try std.fmt.formatInt(arg, 16, .lower, .{}, fbs.writer()); _ = try writer.write("\"0x"); _ = try writer.write(buffer[0..fbs.pos]); try writer.writeByte('"'); return fbs.pos + 4; } pub fn write(arg: anytype, writer: anytype) !usize { const T = @TypeOf(arg); const TI = @typeInfo(T); if (TI == .Struct or TI == .Union) { if (@hasDecl(T, "toJson")) { return arg.toJson(writer); } } switch (TI) { .Int, .ComptimeInt => { return writeHexInt(arg, writer); }, .Float, .ComptimeFloat => { var buffer: [78]u8 = undefined; var fbs = std.io.fixedBufferStream(&buffer); try std.fmt.formatFloatDecimal(arg, .{}, fbs.writer()); return writer.write(buffer[0..fbs.pos]); }, .Null => { return writer.write("null"); }, .Optional => { if (arg == null) { return write(null, writer); } else { return write(arg.?, writer); } }, .Bool => { if (arg) { return writer.write("true"); } else { return writer.write("false"); } }, .Void, .NoReturn => { return; }, .Array => |arr| { const slice: []const arr.child = arg[0..]; return write(slice, writer); }, .Struct => |struct_t| { if (struct_t.is_tuple) { try writer.writeByte('['); var total_size: usize = 2; var first = comptime true; inline for (struct_t.fields) |field| { if (@typeInfo(field.type) != .Optional or @field(arg, field.name) != null) { if (!first) { try writer.writeByte(','); total_size += 1; } first = false; total_size += try write(&@field(arg, field.name), writer); } } try writer.writeByte(']'); return total_size; } else { try writer.writeByte('{'); var total_size: usize = 2; var first = comptime true; inline for (struct_t.fields) |field| { const val_ptr = &@field(arg, field.name); if (@typeInfo(field.type) != .Optional or val_ptr.* != null) { if (!first) { try writer.writeByte(','); total_size += 1; } first = false; try writer.writeByte('"'); const json_def = getJsonDef(T, field.name); total_size += try writer.write(json_def.field_name); _ = try writer.write("\":"); total_size += try write(val_ptr, writer); total_size += 3; } } try writer.writeByte('}'); return total_size; } }, .Pointer => |ptr| { switch (ptr.size) { .One => { return write(arg.*, writer); }, .Slice => { // if (ptr.child == u8) { // const charset = "0123456789abcdef"; // _ = try writer.write("\"0x"); // for (arg) |b| { // try writer.writeByte(charset[b >> 4]); // try writer.writeByte(charset[b & 15]); // } // try writer.writeByte('"'); // return 4 + arg.len / 2; // } else { const len = arg.len; try writer.writeByte('['); var total_size: usize = 2; for (0..len) |i| { total_size += try write(arg[i], writer); if (i != len - 1) { try writer.writeByte(','); total_size += 1; } } try writer.writeByte(']'); return total_size; // } }, .Many, .C => { if (ptr.sentinel == null) { @compileError("Cannot serialize many pointer with no sentinel"); } const slice = std.mem.span(arg); return write(slice, writer); }, } }, else => { @compileLog("Type serialization not implemented: ", T); @compileError("Cannot serialize value"); }, } return 0; } }; /// Reads JSON from a buffer and decodes it into desired format pub const JsonReader = struct { const Field = struct { key: []const u8, value: []const u8, }; fn parseInt(buffer: *[]const u8, comptime T: type) !T { var _buffer = buffer.ptr; defer { buffer.len = buffer.len - (@intFromPtr(_buffer) - @intFromPtr(buffer.ptr)); buffer.ptr = _buffer; } const end = _buffer + buffer.len; var state: enum { start, zero, hex, decimal } = .start; var int_type: enum { decimal, hex } = undefined; var string = false; while (_buffer != end) : (_buffer += 1) { switch (state) { .start => { switch (_buffer[0]) { '"' => { string = true; }, '0' => { state = .zero; }, '1'...'9', '-' => { state = .decimal; int_type = .decimal; }, else => return error.UnexpectedCharacter, } }, .zero => { switch (_buffer[0]) { '"' => { _buffer += 1; break; }, 'x' => { state = .hex; int_type = .hex; }, '1'...'9' => { state = .decimal; }, else => { break; }, } }, .decimal => { switch (_buffer[0]) { '"' => { _buffer += 1; break; }, '0'...'9' => {}, else => { break; }, } }, .hex => { switch (_buffer[0]) { '"' => { _buffer += 1; break; }, '0'...'9', 'a'...'f', 'A'...'F' => {}, else => { break; }, } }, } } var size: usize = @intFromPtr(_buffer) - @intFromPtr(buffer.ptr); var slice = buffer.*[0..size]; if (string) { slice = slice[1 .. slice.len - 1]; } switch (int_type) { .hex => { return try std.fmt.parseInt(T, slice[2..], 16); }, .decimal => { return try std.fmt.parseInt(T, slice, 10); }, } } fn parseFloat(buffer: *[]const u8, comptime T: type) !T { var _buffer = buffer.ptr; defer { buffer.len = buffer.len - (@intFromPtr(_buffer) - @intFromPtr(buffer.ptr)); buffer.ptr = _buffer; } const end = _buffer + buffer.len; var state: enum { start, int, frac } = .start; while (_buffer != end) : (_buffer += 1) { switch (state) { .start => { switch (_buffer[0]) { '0'...'9', '-' => { state = .int; }, else => return error.UnexpectedCharacter, } }, .int => { switch (_buffer[0]) { '.' => { state = .frac; }, '0'...'9' => {}, else => { break; }, } }, .frac => { switch (_buffer[0]) { '0'...'9' => {}, else => { break; }, } }, } } var size: usize = @intFromPtr(_buffer) - @intFromPtr(buffer.ptr); var slice = buffer.*[0..size]; return try std.fmt.parseFloat(T, slice); } fn parseString(buffer: *[]const u8) ![]const u8 { var _buffer = buffer.*; defer { buffer.* = _buffer; } var end: usize = 1; while (_buffer.len - end != 0 and _buffer[end] != '"') { end += 1; } if (_buffer.len - end == 0) { return error.EndOfBuffer; } _buffer = _buffer[end + 1 ..]; return buffer.*[1..end]; } fn parseStruct(allocator: std.mem.Allocator, buffer: *[]const u8, comptime T: type) !T { var _buffer = buffer.*; defer { buffer.* = _buffer; } if (_buffer.len == 0) { return error.EndOfBuffer; } if (_buffer[0] != '{') { return error.ExpectedObject; } _buffer = _buffer[1..]; var result: T = undefined; const TI = @typeInfo(T); var field_exists: [TI.Struct.fields.len]bool = .{false} ** TI.Struct.fields.len; while (_buffer.len > 0) { skipWhitespace(&_buffer); var key = try parseString(&_buffer); skipWhitespace(&_buffer); if (_buffer[0] != ':') { return error.ExpectedColon; } _buffer = _buffer[1..]; skipWhitespace(&_buffer); var exists = false; inline for (TI.Struct.fields, 0..) |field, i| { const json_def = getJsonDef(T, field.name); if (std.mem.eql(u8, json_def.field_name, key)) { @field(result, field.name) = try parse(allocator, &_buffer, field.type); field_exists[i] = true; exists = true; break; } } if (!exists) { try parseAny(allocator, &_buffer); } skipWhitespace(&_buffer); if (_buffer.len == 0) { return error.EndOfBuffer; } if (_buffer[0] == '}') { break; } if (_buffer[0] != ',') { return error.ExpectedComma; } _buffer = _buffer[1..]; } if (_buffer.len == 0) { return error.EndOfBuffer; } if (_buffer[0] != '}') { return error.ExpectedObjectClose; } inline for (TI.Struct.fields, 0..) |field, i| { if (!field_exists[i]) { const FTI = @typeInfo(field.type); if (field.default_value) |default| { const default_value = @as(*field.type, @constCast(@alignCast(@ptrCast(default)))).*; @field(result, field.name) = default_value; } else { if (FTI != .Optional) { return error.MissingRequiredField; } else { @field(result, field.name) = null; } } } } _buffer = _buffer[1..]; return result; } fn parseTuple(allocator: std.mem.Allocator, buffer: *[]const u8, comptime T: type) !T { const TI = @typeInfo(T); std.debug.assert(TI == .Struct and TI.Struct.is_tuple); var _buffer = buffer.*; defer { buffer.* = _buffer; } if (_buffer.len == 0) { return error.EndOfBuffer; } if (_buffer[0] != '{') { return error.ExpectedObject; } _buffer = _buffer[1..]; var result: T = undefined; const field_count = TI.Struct.fields.len; inline for (TI.Struct.fields, 0..) |field, i| { skipWhitespace(&_buffer); @field(result, field.name) = try parse(allocator, &_buffer, field.type); skipWhitespace(&_buffer); if (_buffer.len == 0) { return error.EndOfBuffer; } if (i == field_count - 1) { break; } if (_buffer[0] == '}') { return error.MissingRequiredField; } if (_buffer[0] != ',') { return error.ExpectedComma; } _buffer = _buffer[1..]; } if (_buffer[0] != '}') { return error.ExpectedObjectClose; } _buffer = _buffer[1..]; return result; } fn parseSlice(allocator: std.mem.Allocator, buffer: *[]const u8, comptime T: type) ![]T { var _buffer = buffer.*; defer { buffer.* = _buffer; } if (_buffer.len == 0) { return error.EndOfBuffer; } if (_buffer[0] != '[') { return error.ExpectedArray; } _buffer = _buffer[1..]; skipWhitespace(&_buffer); if (_buffer[0] == ']') { _buffer = _buffer[1..]; return allocator.alloc(T, 0); } var result = std.ArrayList(T).init(allocator); while (_buffer.len > 0) { skipWhitespace(&_buffer); const val = try parse(allocator, &_buffer, T); try result.append(val); skipWhitespace(&_buffer); if (_buffer.len == 0) { return error.EndOfBuffer; } if (_buffer[0] == ']') { break; } if (_buffer[0] != ',') { return error.ExpectedComma; } _buffer = _buffer[1..]; } if (_buffer[0] != ']') { return error.ExpectedArrayClose; } _buffer = _buffer[1..]; return result.toOwnedSlice(); } fn parseArray(allocator: std.mem.Allocator, buffer: *[]const u8, comptime T: type, comptime L: comptime_int) ![L]T { var _buffer = buffer.*; defer { buffer.* = _buffer; } if (_buffer.len == 0) { return error.EndOfBuffer; } if (_buffer[0] != '[') { return error.ExpectedArray; } _buffer = _buffer[1..]; var result: [L]T = undefined; for (0..L) |i| { skipWhitespace(&_buffer); result[i] = try parse(allocator, &_buffer, T); skipWhitespace(&_buffer); if (i == L - 1) { break; } if (_buffer.len == 0) { return error.EndOfBuffer; } if (_buffer[0] != ',') { return error.ExpectedComma; } _buffer = _buffer[1..]; } if (_buffer[0] != ']') { return error.ExpectedArrayClose; } _buffer = _buffer[1..]; return result; } fn parseAnyArray(allocator: std.mem.Allocator, buffer: *[]const u8) !void { var _buffer = buffer.*; defer { buffer.* = _buffer; } if (_buffer.len == 0) { return error.EndOfBuffer; } if (_buffer[0] != '[') { return error.ExpectedArray; } _buffer = _buffer[1..]; skipWhitespace(&_buffer); if (_buffer[0] == ']') { _buffer = _buffer[1..]; return; } while (_buffer.len > 0) { skipWhitespace(&_buffer); try parseAny(allocator, &_buffer); skipWhitespace(&_buffer); if (_buffer.len == 0) { return error.EndOfBuffer; } if (_buffer[0] == ']') { break; } if (_buffer[0] != ',') { return error.ExpectedComma; } _buffer = _buffer[1..]; } if (_buffer[0] != ']') { return error.ExpectedArrayClose; } _buffer = _buffer[1..]; } /// Skips any type of json value at the cursor (allocator not used) fn parseAny(allocator: std.mem.Allocator, buffer: *[]const u8) anyerror!void { var _buffer = buffer.*; defer { buffer.* = _buffer; } switch (_buffer[0]) { '"' => { _ = try parseString(&_buffer); }, '0'...'9' => { _buffer = _buffer[1..]; while (_buffer.len > 0 and (_buffer[0] == '.' or std.ascii.isDigit(_buffer[0]))) { _buffer = _buffer[1..]; } }, '{' => { _ = try parseStruct(allocator, &_buffer, struct {}); }, '[' => { try parseAnyArray(allocator, &_buffer); }, 'f', 't' => { if (buffer.len >= "true".len and std.mem.eql(u8, buffer.*[0.."true".len], "true")) { _buffer = _buffer["true".len..]; } else if (buffer.len >= "false".len and std.mem.eql(u8, buffer.*[0.."false".len], "false")) { _buffer = _buffer["false".len..]; } else { return error.ExpectedValue; } }, else => return error.ExpectedValue, } } /// Parses JSON in a supplied buffer into type T /// The supplied buffer pointer is moved to the end of the parsed value, make a copy to /// avoid losing track of allocation /// Caller is responsible for any memory allocated during parsing which may include /// deeply nested pointers, an arena allocator can be used to ensure all allocated memory /// is freed correctly pub fn parse(allocator: std.mem.Allocator, buffer: *[]const u8, comptime T: type) !T { const TI = @typeInfo(T); skipWhitespace(buffer); if (buffer.len == 0) { return error.EndOfBuffer; } if (TI == .Struct or TI == .Union) { if (@hasDecl(T, "fromJson")) { return T.fromJson(allocator, buffer); } else { if (@hasDecl(T, "fromString")) { if (buffer.*[0] == '"') { const str = try parseString(buffer); return T.fromString(str); } } else if (@hasDecl(T, "fromStringAlloc")) { if (buffer.*[0] == '"') { const str = try parseString(buffer); return try T.fromStringAlloc(allocator, str); } } else if (TI == .Union) { @compileError("Union requries a fromString or fromStringAlloc"); } } } switch (TI) { .Int => { return parseInt(buffer, T); }, .Float => { return parseFloat(buffer, T); }, .Bool => { if (buffer.len >= "true".len and std.mem.eql(u8, buffer.*[0.."true".len], "true")) { buffer.* = buffer.*["true".len..]; return true; } else if (buffer.len >= "false".len and std.mem.eql(u8, buffer.*[0.."false".len], "false")) { buffer.* = buffer.*["false".len..]; return false; } return error.InvalidBoolean; }, .Struct => |struct_t| { if (!struct_t.is_tuple) { return try parseStruct(allocator, buffer, T); } else { return try parseTuple(allocator, buffer, T); } }, .Union => { return error.ParserError; }, .Pointer => |ptr| { switch (ptr.size) { .One => { if (@hasDecl(ptr.child, "fromStringAlloc")) { if (buffer.*[0] == '"') { const str = try parseString(buffer); return try ptr.child.fromStringAlloc(allocator, str); } } else { var val = try allocator.create(ptr.child); val.* = try parse(allocator, buffer, ptr.child); return val; } unreachable; }, .Many, .C, .Slice => { if (ptr.child == u8 and buffer.*[0] == '"') { const str = try parseString(buffer); const val = try allocator.alloc(u8, str.len); @memcpy(val, str); return val; } return try parseSlice(allocator, buffer, ptr.child); }, } }, .Array => |arr| { return try parseArray(allocator, buffer, arr.child, arr.len); }, .Optional => |opt| { if (buffer.len >= "null".len and std.mem.eql(u8, buffer.*[0.."null".len], "null")) { buffer.* = buffer.*["null".len..]; return null; } return try parse(allocator, buffer, opt.child); }, .Enum => |enum_t| { if (buffer.*[0] == '"') { const str = try parseString(buffer); inline for (enum_t.fields) |field| { if (std.mem.eql(u8, field.name, str)) { return @enumFromInt(field.value); } } } unreachable; }, else => { @compileError("Cannot deserialize type " ++ @typeName(T)); }, } } fn skipWhitespace(buffer: *[]const u8) void { while (buffer.*.len > 0 and std.ascii.isWhitespace(buffer.*[0])) { buffer.* = buffer.*[1..]; } } }; pub const JsonDef = struct { field_name: []const u8, }; fn getJsonDef(comptime T: type, comptime field_name: []const u8) JsonDef { const TI = @typeInfo(T); std.debug.assert(TI == .Struct); if (@hasDecl(T, "json_def")) { const json_defs = T.json_def; if (@hasField(@TypeOf(json_defs), field_name)) { return @field(T.json_def, field_name); } } return JsonDef{ .field_name = field_name, }; } test "writing" { const assert = std.debug.assert; // Struct { const val: struct { hello: u32, world: u32, } = .{ .hello = 0x20, .world = 0x40, }; var buf: [32]u8 = undefined; var fbs = std.io.fixedBufferStream(&buf); const size = try JsonWriter.write(val, fbs.writer()); assert(std.mem.eql(u8, buf[0..size], "{\"hello\":\"0x20\",\"world\":\"0x40\"}")); } } test "reading" { const assert = std.debug.assert; const allocator = std.testing.allocator; // Struct { const Struct = struct { hello: u32, world: u32, }; const buf = "{\"hello\":\"0x20\",\"world\":\"0x40\"}"; var ptr: []const u8 = buf[0..]; var result = try JsonReader.parse(allocator, &ptr, Struct); assert(result.hello == 0x20); assert(result.world == 0x40); } // Struct w/ optional { const Struct = struct { hello: u32, world: ?u32 = 0x60, }; const buf = "{\"hello\":\"0x20\"}"; var ptr: []const u8 = buf[0..]; var result = try JsonReader.parse(allocator, &ptr, Struct); assert(result.hello == 0x20); assert(result.world == 0x60); } // Invalid number { const buf = "{"; var ptr: []const u8 = buf[0..]; var result = JsonReader.parse(allocator, &ptr, u32); try std.testing.expectError(error.UnexpectedCharacter, result); } // Unexpected EOF { const Struct = struct { hello: u32, world: u32, }; const buf = "{\"hello"; var ptr: []const u8 = buf[0..]; var result = JsonReader.parse(allocator, &ptr, Struct); try std.testing.expectError(error.EndOfBuffer, result); } // Missing field { const Struct = struct { hello: u32, world: u32, }; const buf = "{\"hello\":\"0x20\"}"; var ptr: []const u8 = buf[0..]; var result = JsonReader.parse(allocator, &ptr, Struct); try std.testing.expectError(error.MissingRequiredField, result); } } test "json def" { const json_def = getJsonDef(web3.abi.AbiEntry, "state_mutability"); std.debug.assert(std.mem.eql(u8, json_def.field_name, "stateMutability")); }
https://raw.githubusercontent.com/kanewallmann/web3.zig/918ff527956939a8f6fe9323aa4fe50777aeecc4/src/json.zig
const std = @import("std"); const zang = @import("zang"); const f = @import("zang-12tet"); const common = @import("common.zig"); const c = @import("common/c.zig"); const util = @import("common/util.zig"); pub const AUDIO_FORMAT: zang.AudioFormat = .signed16_lsb; pub const AUDIO_SAMPLE_RATE = 48000; pub const AUDIO_BUFFER_SIZE = 1024; pub const DESCRIPTION = \\example_song \\ \\Plays a canned melody (Bach's Toccata and Fugue in D \\Minor). \\ \\Press spacebar to restart the song. ; const a4 = 440.0; const NOTE_DURATION = 0.15; // everything comes out of the text file in this format const MyNoteParams = struct { freq: f32, note_on: bool, }; const Pedal = struct { const Module = @import("modules.zig").PMOscInstrument; fn initModule() Module { return Module.init(0.4); } fn makeParams(sample_rate: f32, src: MyNoteParams) Module.Params { return .{ .sample_rate = sample_rate, .freq = src.freq * 0.5, .note_on = src.note_on, }; } const polyphony = 3; const num_columns = 2; }; const RegularOrgan = struct { const Module = @import("modules.zig").NiceInstrument; fn initModule() Module { return Module.init(0.25); } fn makeParams(sample_rate: f32, src: MyNoteParams) Module.Params { return .{ .sample_rate = sample_rate, .freq = src.freq, .note_on = src.note_on, }; } const polyphony = 10; const num_columns = 8; }; const WeirdOrgan = struct { const Module = @import("modules.zig").NiceInstrument; fn initModule() Module { return Module.init(0.1); } fn makeParams(sample_rate: f32, src: MyNoteParams) Module.Params { return .{ .sample_rate = sample_rate, .freq = src.freq, .note_on = src.note_on, }; } const polyphony = 4; const num_columns = 2; }; // note: i would prefer for this to be an array of types (so i don't have to // give meaningless names to the fields), but it's also being used as an // instance type. you can't do that with an array of types. and without // reification features i can't generate a struct procedurally. (it needs to be // a struct because the elements/fields have different types) const Voices = struct { voice0: Voice(Pedal), voice1: Voice(RegularOrgan), voice2: Voice(WeirdOrgan), }; // this parallels the Voices struct. these values are not necessarily the same // as the polyphony amount. they're just a parsing detail const COLUMNS_PER_VOICE = [@typeInfo(Voices).Struct.fields.len]usize{ Pedal.num_columns, RegularOrgan.num_columns, WeirdOrgan.num_columns, }; const TOTAL_COLUMNS = blk: { var sum: usize = 0; for (COLUMNS_PER_VOICE) |v| sum += v; break :blk sum; }; const NUM_INSTRUMENTS = COLUMNS_PER_VOICE.len; // note we can't put params straight into Module.Params because that requires // sample_rate which is only known at runtime var all_notes_arr: [NUM_INSTRUMENTS][20000]zang.Notes(MyNoteParams).SongEvent = undefined; var all_notes: [NUM_INSTRUMENTS][]zang.Notes(MyNoteParams).SongEvent = undefined; fn makeSongNote( t: f32, id: usize, freq: f32, note_on: bool, ) zang.Notes(MyNoteParams).SongEvent { return .{ .t = t, .note_id = id, .params = .{ .freq = freq, .note_on = note_on, }, }; } const Parser = @import("common/songparse1.zig").Parser(TOTAL_COLUMNS); // NOTE about polyphony: if you retrigger a note at the same frequency, it will // be played in a separate voice (won't cut off the previous note). polyphony // doesn't look at note frequency at all, just the on/off events. fn doParse(parser: *Parser) !void { const LastNote = struct { freq: f32, id: usize, }; var column_last_note = [1]?LastNote{null} ** TOTAL_COLUMNS; var instrument_num_notes = [1]usize{0} ** NUM_INSTRUMENTS; var next_id: usize = 1; var t: f32 = 0; var rate: f32 = 1.0; var tempo: f32 = 1.0; while (try parser.parseToken()) |token| { if (token.isWord("start")) { t = 0.0; var i: usize = 0; while (i < NUM_INSTRUMENTS) : (i += 1) { instrument_num_notes[i] = 0; } // TODO what about column_last_note? } else if (token.isWord("rate")) { rate = try parser.requireNumber(); } else if (token.isWord("tempo")) { tempo = try parser.requireNumber(); } else { switch (token) { .notes => |notes| { const old_instrument_num_notes = instrument_num_notes; for (notes) |note, col| { const instrument_index = blk: { var first_column: usize = 0; for (COLUMNS_PER_VOICE) |num_columns, track_index| { if (col < first_column + num_columns) { break :blk track_index; } first_column += num_columns; } unreachable; }; var note_ptr = &all_notes_arr[instrument_index][instrument_num_notes[instrument_index]]; switch (note) { .idle => {}, .freq => |freq| { // note-off of previous note in this column // (if present) if (column_last_note[col]) |last_note| { note_ptr.* = makeSongNote( t, last_note.id, last_note.freq, false, ); instrument_num_notes[instrument_index] += 1; note_ptr = &all_notes_arr[instrument_index][instrument_num_notes[instrument_index]]; } // note-on event for the new frequency note_ptr.* = makeSongNote(t, next_id, freq, true); instrument_num_notes[instrument_index] += 1; column_last_note[col] = LastNote{ .id = next_id, .freq = freq, }; next_id += 1; }, .off => { if (column_last_note[col]) |last_note| { note_ptr.* = makeSongNote( t, last_note.id, last_note.freq, false, ); instrument_num_notes[instrument_index] += 1; column_last_note[col] = null; } }, } } t += NOTE_DURATION / (rate * tempo); // sort the events at this time frame by note id. this // puts note-offs before note-ons var i: usize = 0; while (i < NUM_INSTRUMENTS) : (i += 1) { const start = old_instrument_num_notes[i]; const end = instrument_num_notes[i]; std.sort.sort( zang.Notes(MyNoteParams).SongEvent, all_notes_arr[i][start..end], struct { fn compare( a: zang.Notes(MyNoteParams).SongEvent, b: zang.Notes(MyNoteParams).SongEvent, ) bool { return a.note_id < b.note_id; } }.compare, ); } }, else => return error.BadToken, } } } // now for each of the 3 instruments, we have chronological list of all // note on and off events (with a lot of overlapping). the notes need to // be identified by their frequency, which kind of sucks. i should // probably change the parser above to assign them unique IDs. var i: usize = 0; while (i < NUM_INSTRUMENTS) : (i += 1) { all_notes[i] = all_notes_arr[i][0..instrument_num_notes[i]]; } //i = 0; while (i < NUM_INSTRUMENTS) : (i += 1) { // if (i == 1) { // std.debug.warn("instrument {}:\n", .{ i }); // for (all_notes[i]) |note| { // std.debug.warn("t={} id={} freq={} note_on={}\n", .{ // note.t, // note.id, // note.params.freq, // note.params.note_on, // }); // } // } //} } fn parse() void { var buffer: [150000]u8 = undefined; const contents = util.readFile(buffer[0..]) catch { std.debug.warn("failed to read file\n", .{}); return; }; var parser = Parser{ .a4 = a4, .contents = contents, .index = 0, .line_index = 0, }; doParse(&parser) catch { std.debug.warn("parse failed on line {}\n", .{parser.line_index + 1}); }; } // polyphonic instrument, encapsulating note tracking (uses `all_notes` global) fn Voice(comptime T: type) type { return struct { pub const num_outputs = T.Module.num_outputs; pub const num_temps = T.Module.num_temps; const SubVoice = struct { module: T.Module, trigger: zang.Trigger(MyNoteParams), }; tracker: zang.Notes(MyNoteParams).NoteTracker, dispatcher: zang.Notes(MyNoteParams).PolyphonyDispatcher(T.polyphony), sub_voices: [T.polyphony]SubVoice, fn init(track_index: usize) @This() { var self: @This() = .{ .tracker = zang.Notes(MyNoteParams).NoteTracker.init(all_notes[track_index]), .dispatcher = zang.Notes(MyNoteParams).PolyphonyDispatcher(T.polyphony).init(), .sub_voices = undefined, }; var i: usize = 0; while (i < T.polyphony) : (i += 1) { self.sub_voices[i] = .{ .module = T.initModule(), .trigger = zang.Trigger(MyNoteParams).init(), }; } return self; } fn reset(self: *@This()) void { self.tracker.reset(); self.dispatcher.reset(); for (self.sub_voices) |*sub_voice| { sub_voice.trigger.reset(); } } fn paint( self: *@This(), span: zang.Span, outputs: [num_outputs][]f32, temps: [num_temps][]f32, ) void { const iap = self.tracker.consume( AUDIO_SAMPLE_RATE, span.end - span.start, ); const poly_iap = self.dispatcher.dispatch(iap); for (self.sub_voices) |*sub_voice, i| { var ctr = sub_voice.trigger.counter(span, poly_iap[i]); while (sub_voice.trigger.next(&ctr)) |result| { sub_voice.module.paint( result.span, outputs, temps, result.note_id_changed, T.makeParams(AUDIO_SAMPLE_RATE, result.params), ); } } } }; } pub const MainModule = struct { pub const num_outputs = 1; pub const num_temps = blk: { comptime var n: usize = 0; inline for (@typeInfo(Voices).Struct.fields) |field| { n = std.math.max(n, @field(field.field_type, "num_temps")); } break :blk n; }; pub const output_audio = common.AudioOut{ .mono = 0 }; pub const output_visualize = 0; voices: Voices, pub fn init() MainModule { parse(); var mod: MainModule = .{ .voices = undefined, }; inline for (@typeInfo(Voices).Struct.fields) |field, track_index| { @field(mod.voices, field.name) = field.field_type.init(track_index); } return mod; } pub fn paint( self: *MainModule, span: zang.Span, outputs: [num_outputs][]f32, temps: [num_temps][]f32, ) void { inline for (@typeInfo(Voices).Struct.fields) |field| { const VoiceType = field.field_type; @field(self.voices, field.name).paint( span, outputs, util.subarray(temps, VoiceType.num_temps), ); } } pub fn keyEvent(self: *MainModule, key: i32, down: bool, impulse_frame: usize) bool { if (down and key == c.SDLK_SPACE) { inline for (@typeInfo(Voices).Struct.fields) |field| { @field(self.voices, field.name).reset(); } } return false; } };
https://raw.githubusercontent.com/justjosias/zang/13e78fd332c30be704863eb96e27a2a39df1bacd/examples/example_song.zig
const std = @import("std"); const print = std.debug.print; const file = @embedFile("entry.txt"); const literalCount = struct { codeCount : u64, dataCount : u64, encodedCount : u64, // for the second problem prevWasSlash : bool, prevWasHexa : bool, fn increaseAll(self : *literalCount) void { self.codeCount += 1; self.dataCount += 1; self.encodedCount += 1; } }; pub fn main() !void { var count = literalCount{ .codeCount = 0, .dataCount = 0, .encodedCount = 0, .prevWasSlash = false, .prevWasHexa = false, }; var iter = std.mem.tokenizeScalar(u8, file, '\n'); while(iter.next()) |line| { count.encodedCount += 2; var i : usize = 0; while(i < line.len) { if(count.prevWasSlash) { if(line[i] == 'x') { count.prevWasHexa = true; } else { if(line[i] == '\\' or line[i] == '"') { count.encodedCount += 1; } count.dataCount += 1; count.codeCount += 2; count.encodedCount += 2; } count.prevWasSlash = false; } else if(count.prevWasHexa) { i += 1; count.prevWasHexa = false; count.dataCount += 1; count.codeCount += 4; count.encodedCount += 4; } else { if(line[i] == '\\') { count.prevWasSlash = true; count.encodedCount += 1; } else if(line[i] == '"') { count.codeCount += 1; count.encodedCount += 2; } else { count.increaseAll(); } } i += 1; } } print("code count: {}\n", .{count.codeCount}); print("data count: {}\n", .{count.dataCount}); print("encoded count: {}\n", .{count.encodedCount}); print("subtraction first problem: {}\n", .{count.codeCount - count.dataCount}); print("subtraction second problem: {}\n", .{count.encodedCount - count.codeCount}); }
https://raw.githubusercontent.com/luanjaardim/AdventOfCode/54cb89278baa46cc0d2e800c49bb4e6796c88cab/zig_2015/day8/main.zig
const TreeSitterHighlightStep = @This(); const std = @import("std"); const Step = std.build.Step; const RunStep = std.build.RunStep; const FileSource = std.build.FileSource; pub const Options = struct { treesitter_dir: []const u8, }; step: *Step, output: FileSource, pub fn create(b: *std.Build, opts: Options) *TreeSitterHighlightStep { const self = b.allocator.create(TreeSitterHighlightStep) catch @panic("OOM"); var run_step = RunStep.create( b, b.fmt("build tree-sitter-highlight", .{}), ); run_step.cwd = b.fmt("{s}/highlight", .{opts.treesitter_dir}); run_step.addArgs(&.{ "cargo", "build", "--release", "--lib" }); const output_str = b.fmt("{s}/target/release/libtree_sitter_highlight.a", .{opts.treesitter_dir}); self.* = .{ .step = &run_step.step, .output = .{ .path = output_str }, }; return self; }
https://raw.githubusercontent.com/zackradisic/tether/94eb9e28d6534fd54524ee34bf29063075c6c463/editor/build/tree_sitter_highlight_step.zig
const std = @import("std"); fn main() void { std.debug.print("Hello, world!\n", .{}); }
https://raw.githubusercontent.com/chungjung-d/zig-sneak-peek/ef2d201248ef3b1392529d28308748f6a84a4d7c/zinglings/001_hello.zig
const std = @import("std"); const testing = std.testing; const debug = std.debug.print; const daya = @import("daya"); const main = @import("main.zig"); pub const OutputFormat = enum { dot, png, svg }; pub const AppArgs = struct { input_file: []const u8, output_file: []const u8, output_format: OutputFormat = .png, }; pub fn printHelp(full: bool) void { debug( \\{0s} v{1s} - Quick graphing utility \\ \\Usage: {0s} [arguments] input.daya output.png \\ , .{ main.APP_NAME, main.APP_VERSION}); if (!full) { debug( \\ \\try '{0s} --help' for more information. \\ , .{main.APP_NAME}); return; } debug( \\ \\Examples: \\ {0s} myapp.daya mynicediagram.png \\ {0s} myapp.daya mynicediagram.svg \\ {0s} myapp.daya mynicediagram.dot \\ \\Arguments \\ -h, --help Show this help and exit \\ --version Show version and exit // \\ -v, --verbose Verbose output \\ \\https://github.com/michaelo/daya \\ , .{main.APP_NAME}); } fn argIs(arg: []const u8, full: []const u8, short: ?[]const u8) bool { return std.mem.eql(u8, arg, full) or std.mem.eql(u8, arg, short orelse "321NOSUCHTHING123"); } fn argHasValue(arg: []const u8, full: []const u8, short: ?[]const u8) ?[]const u8 { const eq_pos = std.mem.indexOf(u8, arg, "=") orelse return null; const key = arg[0..eq_pos]; if(argIs(key, full, short)) { return arg[eq_pos + 1 ..]; } else return null; } fn getLowercaseFileext(file: []const u8, scrap: []u8) ![]u8 { const last_dot = std.mem.lastIndexOf(u8, file, ".") orelse return error.NoExtFound; return std.ascii.lowerString(scrap, file[last_dot+1..]); } pub fn parseArgs(args: []const []const u8) !AppArgs { if(args.len < 1) { debug("ERROR: No arguments provided\n", .{}); printHelp(false); return error.NoArguments; } var scrap: [64]u8 = undefined; var maybe_input_file: ?[]const u8 = null; var maybe_output_file: ?[]const u8 = null; var maybe_output_format: ?OutputFormat = null; for (args) |arg| { // Flags if(argIs(arg, "--help", "-h")) { printHelp(true); return error.OkExit; } if(argIs(arg, "--version", null)) { debug("{0s} v{1s} (libdaya v{2s})\n", .{main.APP_NAME, main.APP_VERSION, daya.LIB_VERSION}); return error.OkExit; } if(arg[0] == '-') { debug("ERROR: Unsupported argument '{s}'\n", .{arg}); printHelp(false); return error.InvalidArgument; } // Check for input file const ext = getLowercaseFileext(arg, scrap[0..]) catch { debug("WARNING: Could not read file-extension of argument '{s}' (ignoring)\n", .{arg}); continue; }; if(std.mem.eql(u8, ext, "daya")) { maybe_input_file = arg[0..]; continue; } // Check for valid output-file if(std.meta.stringToEnum(OutputFormat, ext)) |format| { maybe_output_file = arg[0..]; maybe_output_format = format; continue; } debug("WARNING: Unhandled argument: '{s}'\n", .{arg}); } // Validate parsed args const input_file = maybe_input_file orelse { debug("ERROR: Missing input file\n", .{}); return error.NoInputFile; }; const output_file = maybe_output_file orelse { debug("ERROR: Missing output file\n", .{}); return error.NoOutputFile; }; const output_format = maybe_output_format orelse { debug("ERROR: Unknown output format\n", .{}); return error.NoOutputFormat; }; // Donaroo return AppArgs{ .input_file = input_file, .output_file = output_file, .output_format = output_format, }; } test "ArgParse" { try testing.expectError(error.OkExit, parseArgs(&.{"--help"})); try testing.expectError(error.OkExit, parseArgs(&.{"-h"})); try testing.expectError(error.OkExit, parseArgs(&.{"--version"})); }
https://raw.githubusercontent.com/michaelo/daya/74a91f70d11e0cdb7237d59dd87c61543be4c62e/compiler/src/argparse.zig
const win32 = @import("win32").everything; pub export fn WinMainCRTStartup() callconv(@import("std").os.windows.WINAPI) noreturn { const hStdOut = win32.GetStdHandle(win32.STD_OUTPUT_HANDLE); if (hStdOut == win32.INVALID_HANDLE_VALUE) { //std.debug.warn("Error: GetStdHandle failed with {}\n", .{GetLastError()}); win32.ExitProcess(255); } writeAll(hStdOut, "Hello, World!") catch win32.ExitProcess(255); // fail win32.ExitProcess(0); } fn writeAll(hFile: win32.HANDLE, buffer: []const u8) !void { var written : usize = 0; while (written < buffer.len) { const next_write = @as(u32, @intCast(0xFFFFFFFF & (buffer.len - written))); var last_written : u32 = undefined; if (1 != win32.WriteFile(hFile, buffer.ptr + written, next_write, &last_written, null)) { // TODO: return from GetLastError return error.WriteFileFailed; } written += last_written; } }
https://raw.githubusercontent.com/marlersoft/zigwin32gen/a7ea975694193ef077c34aa3bca5ec3fbdba4712/examples/helloworld.zig
const std = @import("std"); const Environment = @import("./env.zig"); const Output = @import("output.zig"); const use_mimalloc = @import("root").bun.use_mimalloc; const StringTypes = @import("./string_types.zig"); const Mimalloc = @import("root").bun.Mimalloc; const bun = @import("root").bun; pub const build_id = std.fmt.parseInt(u64, std.mem.trim(u8, @embedFile("./build-id"), "\n \r\t"), 10) catch unreachable; pub const version: if (Environment.isWasm) std.SemanticVersion else @import("./install/semver.zig").Version = .{ .major = 1, .minor = 0, .patch = build_id, }; const version_string = std.fmt.comptimePrint("{d}.{d}.{d}", .{ version.major, version.minor, version.patch }); pub const package_json_version = if (Environment.isDebug) version_string ++ "_debug" else version_string; pub const package_json_version_with_sha = if (Environment.git_sha.len == 0) package_json_version else if (Environment.isDebug) std.fmt.comptimePrint("{s}_debug ({s})", .{ version_string, Environment.git_sha[0..@min(Environment.git_sha.len, 8)] }) else std.fmt.comptimePrint("{s} ({s})", .{ version_string, Environment.git_sha[0..@min(Environment.git_sha.len, 8)] }); pub const package_json_version_with_revision = if (Environment.git_sha.len == 0) package_json_version else if (Environment.isDebug) std.fmt.comptimePrint(version_string ++ "-debug+{s}", .{Environment.git_sha}) else if (Environment.is_canary) std.fmt.comptimePrint(version_string ++ "-canary+{s}", .{Environment.git_sha}) else if (Environment.isTest) std.fmt.comptimePrint(version_string ++ "-test+{s}", .{Environment.git_sha}) else std.fmt.comptimePrint(version_string ++ "+{s}", .{Environment.git_sha}); pub const os_name = if (Environment.isWindows) "win32" else if (Environment.isMac) "darwin" else if (Environment.isLinux) "linux" else if (Environment.isWasm) "wasm" else "unknown"; pub const arch_name = if (Environment.isX64) "x64" else if (Environment.isX86) "x86" else if (Environment.isAarch64) "arm64" else "unknown"; pub inline fn getStartTime() i128 { if (Environment.isTest) return 0; return @import("root").bun.start_time; } pub fn setThreadName(name: StringTypes.stringZ) void { if (Environment.isLinux) { _ = std.os.prctl(.SET_NAME, .{@intFromPtr(name.ptr)}) catch 0; } else if (Environment.isMac) { _ = std.c.pthread_setname_np(name); } } /// Flushes stdout and stderr and exits with the given code. pub fn exit(code: u8) noreturn { Output.flush(); std.os.exit(code); } pub const AllocatorConfiguration = struct { verbose: bool = false, long_running: bool = false, }; pub inline fn mimalloc_cleanup(force: bool) void { if (comptime use_mimalloc) { Mimalloc.mi_collect(force); } } pub const versions = @import("./generated_versions_list.zig"); // Enabling huge pages slows down bun by 8x or so // Keeping this code for: // 1. documentation that an attempt was made // 2. if I want to configure allocator later pub inline fn configureAllocator(_: AllocatorConfiguration) void { // if (comptime !use_mimalloc) return; // const Mimalloc = @import("./allocators/mimalloc.zig"); // Mimalloc.mi_option_set_enabled(Mimalloc.mi_option_verbose, config.verbose); // Mimalloc.mi_option_set_enabled(Mimalloc.mi_option_large_os_pages, config.long_running); // if (!config.long_running) Mimalloc.mi_option_set(Mimalloc.mi_option_reset_delay, 0); } pub fn panic(comptime fmt: string, args: anytype) noreturn { @setCold(true); if (comptime Environment.isWasm) { Output.printErrorln(fmt, args); Output.flush(); @panic(fmt); } else { Output.prettyErrorln(fmt, args); Output.flush(); std.debug.panic(fmt, args); } } // std.debug.assert but happens at runtime pub fn invariant(condition: bool, comptime fmt: string, args: anytype) void { if (!condition) { _invariant(fmt, args); } } inline fn _invariant(comptime fmt: string, args: anytype) noreturn { @setCold(true); if (comptime Environment.isWasm) { Output.printErrorln(fmt, args); Output.flush(); @panic(fmt); } else { Output.prettyErrorln(fmt, args); Global.exit(1); } } pub fn notimpl() noreturn { @setCold(true); Global.panic("Not implemented yet!!!!!", .{}); } // Make sure we always print any leftover pub fn crash() noreturn { @setCold(true); Global.exit(1); } const Global = @This(); const string = @import("root").bun.string; pub const BunInfo = struct { bun_version: string, platform: Analytics.GenerateHeader.GeneratePlatform.Platform = undefined, framework: string = "", framework_version: string = "", const Analytics = @import("./analytics/analytics_thread.zig"); const JSON = bun.JSON; const JSAst = bun.JSAst; pub fn generate(comptime Bundler: type, bundler: Bundler, allocator: std.mem.Allocator) !JSAst.Expr { var info = BunInfo{ .bun_version = Global.package_json_version, .platform = Analytics.GenerateHeader.GeneratePlatform.forOS(), }; if (bundler.options.framework) |framework| { info.framework = framework.package; info.framework_version = framework.version; } return try JSON.toAST(allocator, BunInfo, info); } }; pub const user_agent = "Bun/" ++ Global.package_json_version; pub export const Bun__userAgent: [*:0]const u8 = Global.user_agent; comptime { _ = Bun__userAgent; }
https://raw.githubusercontent.com/beingofexistence13/multiversal-lang/dd769e3fc6182c23ef43ed4479614f43f29738c9/javascript/bun/src/__global.zig
//! { //! "stdout": "hello", //! "stderr": "world" //! } const testsuite = @import("testsuite"); export fn _start() callconv(.C) noreturn { testsuite.write(.stdout, "hello"); testsuite.write(.stderr, "world"); testsuite.exit(0); }
https://raw.githubusercontent.com/ZigEmbeddedGroup/aviron/6f1b3e957561f8e80e059055ab9aa63a53a7e813/testsuite/lib/write-chan.zig
//! This file contains thin wrappers around OS-specific APIs, with these //! specific goals in mind: //! * Convert "errno"-style error codes into Zig errors. //! * When null-terminated byte buffers are required, provide APIs which accept //! slices as well as APIs which accept null-terminated byte buffers. Same goes //! for UTF-16LE encoding. //! * Where operating systems share APIs, e.g. POSIX, these thin wrappers provide //! cross platform abstracting. //! * When there exists a corresponding libc function and linking libc, the libc //! implementation is used. Exceptions are made for known buggy areas of libc. //! On Linux libc can be side-stepped by using `std.os.linux` directly. //! * For Windows, this file represents the API that libc would provide for //! Windows. For thin wrappers around Windows-specific APIs, see `std.os.windows`. //! Note: The Zig standard library does not support POSIX thread cancellation, and //! in general EINTR is handled by trying again. const root = @import("root"); const std = @import("std.zig"); const builtin = @import("builtin"); const assert = std.debug.assert; const math = std.math; const mem = std.mem; const elf = std.elf; const dl = @import("dynamic_library.zig"); const MAX_PATH_BYTES = std.fs.MAX_PATH_BYTES; const is_windows = builtin.os.tag == .windows; pub const darwin = std.c; pub const dragonfly = std.c; pub const freebsd = std.c; pub const haiku = std.c; pub const netbsd = std.c; pub const openbsd = std.c; pub const solaris = std.c; pub const linux = @import("os/linux.zig"); pub const plan9 = @import("os/plan9.zig"); pub const uefi = @import("os/uefi.zig"); pub const wasi = @import("os/wasi.zig"); pub const windows = @import("os/windows.zig"); comptime { assert(@import("std") == std); // std lib tests require --zig-lib-dir } test { _ = linux; _ = uefi; _ = wasi; _ = windows; _ = @import("os/test.zig"); } /// Applications can override the `system` API layer in their root source file. /// Otherwise, when linking libc, this is the C API. /// When not linking libc, it is the OS-specific system interface. pub const system = if (@hasDecl(root, "os") and root.os != @This()) root.os.system else if (builtin.link_libc or is_windows) std.c else switch (builtin.os.tag) { .linux => linux, .wasi => wasi, .uefi => uefi, else => struct {}, }; pub const AF = system.AF; pub const AF_SUN = system.AF_SUN; pub const ARCH = system.ARCH; pub const AT = system.AT; pub const AT_SUN = system.AT_SUN; pub const CLOCK = system.CLOCK; pub const CPU_COUNT = system.CPU_COUNT; pub const CTL = system.CTL; pub const DT = system.DT; pub const E = system.E; pub const Elf_Symndx = system.Elf_Symndx; pub const F = system.F; pub const FD_CLOEXEC = system.FD_CLOEXEC; pub const Flock = system.Flock; pub const HOST_NAME_MAX = system.HOST_NAME_MAX; pub const IFNAMESIZE = system.IFNAMESIZE; pub const IOV_MAX = system.IOV_MAX; pub const IPPROTO = system.IPPROTO; pub const KERN = system.KERN; pub const Kevent = system.Kevent; pub const LOCK = system.LOCK; pub const MADV = system.MADV; pub const MAP = system.MAP; pub const MAX_ADDR_LEN = system.MAX_ADDR_LEN; pub const MMAP2_UNIT = system.MMAP2_UNIT; pub const MSG = system.MSG; pub const NAME_MAX = system.NAME_MAX; pub const O = system.O; pub const PATH_MAX = system.PATH_MAX; pub const POLL = system.POLL; pub const POSIX_FADV = system.POSIX_FADV; pub const PR = system.PR; pub const PROT = system.PROT; pub const REG = system.REG; pub const RIGHT = system.RIGHT; pub const RLIM = system.RLIM; pub const RR = system.RR; pub const S = system.S; pub const SA = system.SA; pub const SC = system.SC; pub const _SC = system._SC; pub const SEEK = system.SEEK; pub const SHUT = system.SHUT; pub const SIG = system.SIG; pub const SIOCGIFINDEX = system.SIOCGIFINDEX; pub const SO = system.SO; pub const SOCK = system.SOCK; pub const SOL = system.SOL; pub const STDERR_FILENO = system.STDERR_FILENO; pub const STDIN_FILENO = system.STDIN_FILENO; pub const STDOUT_FILENO = system.STDOUT_FILENO; pub const SYS = system.SYS; pub const Sigaction = system.Sigaction; pub const Stat = system.Stat; pub const TCSA = system.TCSA; pub const TCP = system.TCP; pub const VDSO = system.VDSO; pub const W = system.W; pub const addrinfo = system.addrinfo; pub const blkcnt_t = system.blkcnt_t; pub const blksize_t = system.blksize_t; pub const clock_t = system.clock_t; pub const cpu_set_t = system.cpu_set_t; pub const dev_t = system.dev_t; pub const dl_phdr_info = system.dl_phdr_info; pub const empty_sigset = system.empty_sigset; pub const fd_t = system.fd_t; pub const fdflags_t = system.fdflags_t; pub const fdstat_t = system.fdstat_t; pub const gid_t = system.gid_t; pub const ifreq = system.ifreq; pub const ino_t = system.ino_t; pub const lookupflags_t = system.lookupflags_t; pub const mcontext_t = system.mcontext_t; pub const mode_t = system.mode_t; pub const msghdr = system.msghdr; pub const msghdr_const = system.msghdr_const; pub const nfds_t = system.nfds_t; pub const nlink_t = system.nlink_t; pub const off_t = system.off_t; pub const oflags_t = system.oflags_t; pub const pid_t = system.pid_t; pub const pollfd = system.pollfd; pub const port_t = system.port_t; pub const port_event = system.port_event; pub const port_notify = system.port_notify; pub const file_obj = system.file_obj; pub const rights_t = system.rights_t; pub const rlim_t = system.rlim_t; pub const rlimit = system.rlimit; pub const rlimit_resource = system.rlimit_resource; pub const rusage = system.rusage; pub const sa_family_t = system.sa_family_t; pub const siginfo_t = system.siginfo_t; pub const sigset_t = system.sigset_t; pub const sockaddr = system.sockaddr; pub const socklen_t = system.socklen_t; pub const stack_t = system.stack_t; pub const termios = system.termios; pub const time_t = system.time_t; pub const timespec = system.timespec; pub const timestamp_t = system.timestamp_t; pub const timeval = system.timeval; pub const timezone = system.timezone; pub const ucontext_t = system.ucontext_t; pub const uid_t = system.uid_t; pub const user_desc = system.user_desc; pub const utsname = system.utsname; pub const F_OK = system.F_OK; pub const R_OK = system.R_OK; pub const W_OK = system.W_OK; pub const X_OK = system.X_OK; pub const iovec = extern struct { iov_base: [*]u8, iov_len: usize, }; pub const iovec_const = extern struct { iov_base: [*]const u8, iov_len: usize, }; pub const LOG = struct { /// system is unusable pub const EMERG = 0; /// action must be taken immediately pub const ALERT = 1; /// critical conditions pub const CRIT = 2; /// error conditions pub const ERR = 3; /// warning conditions pub const WARNING = 4; /// normal but significant condition pub const NOTICE = 5; /// informational pub const INFO = 6; /// debug-level messages pub const DEBUG = 7; }; pub const socket_t = if (builtin.os.tag == .windows) windows.ws2_32.SOCKET else fd_t; /// See also `getenv`. Populated by startup code before main(). /// TODO this is a footgun because the value will be undefined when using `zig build-lib`. /// https://github.com/ziglang/zig/issues/4524 pub var environ: [][*:0]u8 = undefined; /// Populated by startup code before main(). /// Not available on Windows. See `std.process.args` /// for obtaining the process arguments. pub var argv: [][*:0]u8 = undefined; /// To obtain errno, call this function with the return value of the /// system function call. For some systems this will obtain the value directly /// from the return code; for others it will use a thread-local errno variable. /// Therefore, this function only returns a well-defined value when it is called /// directly after the system function call which one wants to learn the errno /// value of. pub const errno = system.getErrno; /// Closes the file descriptor. /// This function is not capable of returning any indication of failure. An /// application which wants to ensure writes have succeeded before closing /// must call `fsync` before `close`. /// Note: The Zig standard library does not support POSIX thread cancellation. pub fn close(fd: fd_t) void { if (builtin.os.tag == .windows) { return windows.CloseHandle(fd); } if (builtin.os.tag == .wasi) { _ = wasi.fd_close(fd); return; } if (comptime builtin.target.isDarwin()) { // This avoids the EINTR problem. switch (darwin.getErrno(darwin.@"close$NOCANCEL"(fd))) { .BADF => unreachable, // Always a race condition. else => return, } } switch (errno(system.close(fd))) { .BADF => unreachable, // Always a race condition. .INTR => return, // This is still a success. See https://github.com/ziglang/zig/issues/2425 else => return, } } pub const FChmodError = error{ AccessDenied, InputOutput, SymLinkLoop, FileNotFound, SystemResources, ReadOnlyFileSystem, } || UnexpectedError; /// Changes the mode of the file referred to by the file descriptor. /// The process must have the correct privileges in order to do this /// successfully, or must have the effective user ID matching the owner /// of the file. pub fn fchmod(fd: fd_t, mode: mode_t) FChmodError!void { if (builtin.os.tag == .windows or builtin.os.tag == .wasi) @compileError("Unsupported OS"); while (true) { const res = system.fchmod(fd, mode); switch (system.getErrno(res)) { .SUCCESS => return, .INTR => continue, .BADF => unreachable, // Can be reached if the fd refers to a directory opened without `OpenDirOptions{ .iterate = true }` .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .IO => return error.InputOutput, .LOOP => return error.SymLinkLoop, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.FileNotFound, .PERM => return error.AccessDenied, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } } pub const FChownError = error{ AccessDenied, InputOutput, SymLinkLoop, FileNotFound, SystemResources, ReadOnlyFileSystem, } || UnexpectedError; /// Changes the owner and group of the file referred to by the file descriptor. /// The process must have the correct privileges in order to do this /// successfully. The group may be changed by the owner of the directory to /// any group of which the owner is a member. If the owner or group is /// specified as `null`, the ID is not changed. pub fn fchown(fd: fd_t, owner: ?uid_t, group: ?gid_t) FChownError!void { if (builtin.os.tag == .windows or builtin.os.tag == .wasi) @compileError("Unsupported OS"); while (true) { const res = system.fchown(fd, owner orelse @as(u32, 0) -% 1, group orelse @as(u32, 0) -% 1); switch (system.getErrno(res)) { .SUCCESS => return, .INTR => continue, .BADF => unreachable, // Can be reached if the fd refers to a directory opened without `OpenDirOptions{ .iterate = true }` .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .IO => return error.InputOutput, .LOOP => return error.SymLinkLoop, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.FileNotFound, .PERM => return error.AccessDenied, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } } pub const GetRandomError = OpenError; /// Obtain a series of random bytes. These bytes can be used to seed user-space /// random number generators or for cryptographic purposes. /// When linking against libc, this calls the /// appropriate OS-specific library call. Otherwise it uses the zig standard /// library implementation. pub fn getrandom(buffer: []u8) GetRandomError!void { if (builtin.os.tag == .windows) { return windows.RtlGenRandom(buffer); } if (builtin.os.tag == .linux or builtin.os.tag == .freebsd) { var buf = buffer; const use_c = builtin.os.tag != .linux or std.c.versionCheck(std.builtin.Version{ .major = 2, .minor = 25, .patch = 0 }).ok; while (buf.len != 0) { const res = if (use_c) blk: { const rc = std.c.getrandom(buf.ptr, buf.len, 0); break :blk .{ .num_read = @bitCast(usize, rc), .err = std.c.getErrno(rc), }; } else blk: { const rc = linux.getrandom(buf.ptr, buf.len, 0); break :blk .{ .num_read = rc, .err = linux.getErrno(rc), }; }; switch (res.err) { .SUCCESS => buf = buf[res.num_read..], .INVAL => unreachable, .FAULT => unreachable, .INTR => continue, .NOSYS => return getRandomBytesDevURandom(buf), else => return unexpectedErrno(res.err), } } return; } switch (builtin.os.tag) { .netbsd, .openbsd, .macos, .ios, .tvos, .watchos => { system.arc4random_buf(buffer.ptr, buffer.len); return; }, .wasi => switch (wasi.random_get(buffer.ptr, buffer.len)) { .SUCCESS => return, else => |err| return unexpectedErrno(err), }, else => return getRandomBytesDevURandom(buffer), } } fn getRandomBytesDevURandom(buf: []u8) !void { const fd = try openZ("/dev/urandom", O.RDONLY | O.CLOEXEC, 0); defer close(fd); const st = try fstat(fd); if (!S.ISCHR(st.mode)) { return error.NoDevice; } const file = std.fs.File{ .handle = fd, .capable_io_mode = .blocking, .intended_io_mode = .blocking, }; const stream = file.reader(); stream.readNoEof(buf) catch return error.Unexpected; } /// Causes abnormal process termination. /// If linking against libc, this calls the abort() libc function. Otherwise /// it raises SIGABRT followed by SIGKILL and finally lo pub fn abort() noreturn { @setCold(true); // MSVCRT abort() sometimes opens a popup window which is undesirable, so // even when linking libc on Windows we use our own abort implementation. // See https://github.com/ziglang/zig/issues/2071 for more details. if (builtin.os.tag == .windows) { if (builtin.mode == .Debug) { @breakpoint(); } windows.kernel32.ExitProcess(3); } if (!builtin.link_libc and builtin.os.tag == .linux) { raise(SIG.ABRT) catch {}; // TODO the rest of the implementation of abort() from musl libc here raise(SIG.KILL) catch {}; exit(127); } if (builtin.os.tag == .uefi) { exit(0); // TODO choose appropriate exit code } if (builtin.os.tag == .wasi) { @breakpoint(); exit(1); } system.abort(); } pub const RaiseError = UnexpectedError; pub fn raise(sig: u8) RaiseError!void { if (builtin.link_libc) { switch (errno(system.raise(sig))) { .SUCCESS => return, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .linux) { var set: sigset_t = undefined; // block application signals _ = linux.sigprocmask(SIG.BLOCK, &linux.app_mask, &set); const tid = linux.gettid(); const rc = linux.tkill(tid, sig); // restore signal mask _ = linux.sigprocmask(SIG.SETMASK, &set, null); switch (errno(rc)) { .SUCCESS => return, else => |err| return unexpectedErrno(err), } } @compileError("std.os.raise unimplemented for this target"); } pub const KillError = error{PermissionDenied} || UnexpectedError; pub fn kill(pid: pid_t, sig: u8) KillError!void { switch (errno(system.kill(pid, sig))) { .SUCCESS => return, .INVAL => unreachable, // invalid signal .PERM => return error.PermissionDenied, .SRCH => unreachable, // always a race condition else => |err| return unexpectedErrno(err), } } /// Exits the program cleanly with the specified status code. pub fn exit(status: u8) noreturn { if (builtin.link_libc) { system.exit(status); } if (builtin.os.tag == .windows) { windows.kernel32.ExitProcess(status); } if (builtin.os.tag == .wasi) { wasi.proc_exit(status); } if (builtin.os.tag == .linux and !builtin.single_threaded) { linux.exit_group(status); } if (builtin.os.tag == .uefi) { // exit() is only avaliable if exitBootServices() has not been called yet. // This call to exit should not fail, so we don't care about its return value. if (uefi.system_table.boot_services) |bs| { _ = bs.exit(uefi.handle, @intToEnum(uefi.Status, status), 0, null); } // If we can't exit, reboot the system instead. uefi.system_table.runtime_services.resetSystem(uefi.tables.ResetType.ResetCold, @intToEnum(uefi.Status, status), 0, null); } system.exit(status); } pub const ReadError = error{ InputOutput, SystemResources, IsDir, OperationAborted, BrokenPipe, ConnectionResetByPeer, ConnectionTimedOut, NotOpenForReading, /// This error occurs when no global event loop is configured, /// and reading from the file descriptor would block. WouldBlock, /// In WASI, this error occurs when the file descriptor does /// not hold the required rights to read from it. AccessDenied, } || UnexpectedError; /// Returns the number of bytes that were read, which can be less than /// buf.len. If 0 bytes were read, that means EOF. /// If `fd` is opened in non blocking mode, the function will return error.WouldBlock /// when EAGAIN is received. /// /// Linux has a limit on how many bytes may be transferred in one `read` call, which is `0x7ffff000` /// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as /// well as stuffing the errno codes into the last `4096` values. This is noted on the `read` man page. /// The limit on Darwin is `0x7fffffff`, trying to read more than that returns EINVAL. /// The corresponding POSIX limit is `math.maxInt(isize)`. pub fn read(fd: fd_t, buf: []u8) ReadError!usize { if (builtin.os.tag == .windows) { return windows.ReadFile(fd, buf, null, std.io.default_mode); } if (builtin.os.tag == .wasi and !builtin.link_libc) { const iovs = [1]iovec{iovec{ .iov_base = buf.ptr, .iov_len = buf.len, }}; var nread: usize = undefined; switch (wasi.fd_read(fd, &iovs, iovs.len, &nread)) { .SUCCESS => return nread, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForReading, // Can be a race condition. .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .CONNRESET => return error.ConnectionResetByPeer, .TIMEDOUT => return error.ConnectionTimedOut, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } // Prevents EINVAL. const max_count = switch (builtin.os.tag) { .linux => 0x7ffff000, .macos, .ios, .watchos, .tvos => math.maxInt(i32), else => math.maxInt(isize), }; const adjusted_len = @minimum(max_count, buf.len); while (true) { const rc = system.read(fd, buf.ptr, adjusted_len); switch (errno(rc)) { .SUCCESS => return @intCast(usize, rc), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForReading, // Can be a race condition. .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .CONNRESET => return error.ConnectionResetByPeer, .TIMEDOUT => return error.ConnectionTimedOut, else => |err| return unexpectedErrno(err), } } } /// Number of bytes read is returned. Upon reading end-of-file, zero is returned. /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// This operation is non-atomic on the following systems: /// * Windows /// On these systems, the read races with concurrent writes to the same file descriptor. pub fn readv(fd: fd_t, iov: []const iovec) ReadError!usize { if (builtin.os.tag == .windows) { // TODO improve this to use ReadFileScatter if (iov.len == 0) return @as(usize, 0); const first = iov[0]; return read(fd, first.iov_base[0..first.iov_len]); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var nread: usize = undefined; switch (wasi.fd_read(fd, iov.ptr, iov.len, &nread)) { .SUCCESS => return nread, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, // currently not support in WASI .BADF => return error.NotOpenForReading, // can be a race condition .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const iov_count = math.cast(u31, iov.len) catch math.maxInt(u31); while (true) { // TODO handle the case when iov_len is too large and get rid of this @intCast const rc = system.readv(fd, iov.ptr, iov_count); switch (errno(rc)) { .SUCCESS => return @intCast(usize, rc), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForReading, // can be a race condition .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } } pub const PReadError = ReadError || error{Unseekable}; /// Number of bytes read is returned. Upon reading end-of-file, zero is returned. /// /// Retries when interrupted by a signal. /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// Linux has a limit on how many bytes may be transferred in one `pread` call, which is `0x7ffff000` /// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as /// well as stuffing the errno codes into the last `4096` values. This is noted on the `read` man page. /// The limit on Darwin is `0x7fffffff`, trying to read more than that returns EINVAL. /// The corresponding POSIX limit is `math.maxInt(isize)`. pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize { if (builtin.os.tag == .windows) { return windows.ReadFile(fd, buf, offset, std.io.default_mode); } if (builtin.os.tag == .wasi and !builtin.link_libc) { const iovs = [1]iovec{iovec{ .iov_base = buf.ptr, .iov_len = buf.len, }}; var nread: usize = undefined; switch (wasi.fd_pread(fd, &iovs, iovs.len, offset, &nread)) { .SUCCESS => return nread, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForReading, // Can be a race condition. .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .CONNRESET => return error.ConnectionResetByPeer, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } // Prevent EINVAL. const max_count = switch (builtin.os.tag) { .linux => 0x7ffff000, .macos, .ios, .watchos, .tvos => math.maxInt(i32), else => math.maxInt(isize), }; const adjusted_len = @minimum(max_count, buf.len); const pread_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.pread64 else system.pread; const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned while (true) { const rc = pread_sym(fd, buf.ptr, adjusted_len, ioffset); switch (errno(rc)) { .SUCCESS => return @intCast(usize, rc), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForReading, // Can be a race condition. .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .CONNRESET => return error.ConnectionResetByPeer, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, else => |err| return unexpectedErrno(err), } } } pub const TruncateError = error{ FileTooBig, InputOutput, FileBusy, /// In WASI, this error occurs when the file descriptor does /// not hold the required rights to call `ftruncate` on it. AccessDenied, } || UnexpectedError; pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void { if (builtin.os.tag == .windows) { var io_status_block: windows.IO_STATUS_BLOCK = undefined; var eof_info = windows.FILE_END_OF_FILE_INFORMATION{ .EndOfFile = @bitCast(windows.LARGE_INTEGER, length), }; const rc = windows.ntdll.NtSetInformationFile( fd, &io_status_block, &eof_info, @sizeOf(windows.FILE_END_OF_FILE_INFORMATION), .FileEndOfFileInformation, ); switch (rc) { .SUCCESS => return, .INVALID_HANDLE => unreachable, // Handle not open for writing .ACCESS_DENIED => return error.AccessDenied, else => return windows.unexpectedStatus(rc), } } if (builtin.os.tag == .wasi and !builtin.link_libc) { switch (wasi.fd_filestat_set_size(fd, length)) { .SUCCESS => return, .INTR => unreachable, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .PERM => return error.AccessDenied, .TXTBSY => return error.FileBusy, .BADF => unreachable, // Handle not open for writing .INVAL => unreachable, // Handle not open for writing .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } while (true) { const ftruncate_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.ftruncate64 else system.ftruncate; const ilen = @bitCast(i64, length); // the OS treats this as unsigned switch (errno(ftruncate_sym(fd, ilen))) { .SUCCESS => return, .INTR => continue, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .PERM => return error.AccessDenied, .TXTBSY => return error.FileBusy, .BADF => unreachable, // Handle not open for writing .INVAL => unreachable, // Handle not open for writing else => |err| return unexpectedErrno(err), } } } /// Number of bytes read is returned. Upon reading end-of-file, zero is returned. /// /// Retries when interrupted by a signal. /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// This operation is non-atomic on the following systems: /// * Darwin /// * Windows /// On these systems, the read races with concurrent writes to the same file descriptor. pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize { const have_pread_but_not_preadv = switch (builtin.os.tag) { .windows, .macos, .ios, .watchos, .tvos, .haiku => true, else => false, }; if (have_pread_but_not_preadv) { // We could loop here; but proper usage of `preadv` must handle partial reads anyway. // So we simply read into the first vector only. if (iov.len == 0) return @as(usize, 0); const first = iov[0]; return pread(fd, first.iov_base[0..first.iov_len], offset); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var nread: usize = undefined; switch (wasi.fd_pread(fd, iov.ptr, iov.len, offset, &nread)) { .SUCCESS => return nread, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForReading, // can be a race condition .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const iov_count = math.cast(u31, iov.len) catch math.maxInt(u31); const preadv_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.preadv64 else system.preadv; const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned while (true) { const rc = preadv_sym(fd, iov.ptr, iov_count, ioffset); switch (errno(rc)) { .SUCCESS => return @bitCast(usize, rc), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForReading, // can be a race condition .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, else => |err| return unexpectedErrno(err), } } } pub const WriteError = error{ DiskQuota, FileTooBig, InputOutput, NoSpaceLeft, /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to write to it. AccessDenied, BrokenPipe, SystemResources, OperationAborted, NotOpenForWriting, /// This error occurs when no global event loop is configured, /// and reading from the file descriptor would block. WouldBlock, /// Connection reset by peer. ConnectionResetByPeer, } || UnexpectedError; /// Write to a file descriptor. /// Retries when interrupted by a signal. /// Returns the number of bytes written. If nonzero bytes were supplied, this will be nonzero. /// /// Note that a successful write() may transfer fewer than count bytes. Such partial writes can /// occur for various reasons; for example, because there was insufficient space on the disk /// device to write all of the requested bytes, or because a blocked write() to a socket, pipe, or /// similar was interrupted by a signal handler after it had transferred some, but before it had /// transferred all of the requested bytes. In the event of a partial write, the caller can make /// another write() call to transfer the remaining bytes. The subsequent call will either /// transfer further bytes or may result in an error (e.g., if the disk is now full). /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// Linux has a limit on how many bytes may be transferred in one `write` call, which is `0x7ffff000` /// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as /// well as stuffing the errno codes into the last `4096` values. This is noted on the `write` man page. /// The limit on Darwin is `0x7fffffff`, trying to read more than that returns EINVAL. /// The corresponding POSIX limit is `math.maxInt(isize)`. pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize { if (builtin.os.tag == .windows) { return windows.WriteFile(fd, bytes, null, std.io.default_mode); } if (builtin.os.tag == .wasi and !builtin.link_libc) { const ciovs = [_]iovec_const{iovec_const{ .iov_base = bytes.ptr, .iov_len = bytes.len, }}; var nwritten: usize = undefined; switch (wasi.fd_write(fd, &ciovs, ciovs.len, &nwritten)) { .SUCCESS => return nwritten, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForWriting, // can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const max_count = switch (builtin.os.tag) { .linux => 0x7ffff000, .macos, .ios, .watchos, .tvos => math.maxInt(i32), else => math.maxInt(isize), }; const adjusted_len = @minimum(max_count, bytes.len); while (true) { const rc = system.write(fd, bytes.ptr, adjusted_len); switch (errno(rc)) { .SUCCESS => return @intCast(usize, rc), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .CONNRESET => return error.ConnectionResetByPeer, else => |err| return unexpectedErrno(err), } } } /// Write multiple buffers to a file descriptor. /// Retries when interrupted by a signal. /// Returns the number of bytes written. If nonzero bytes were supplied, this will be nonzero. /// /// Note that a successful write() may transfer fewer bytes than supplied. Such partial writes can /// occur for various reasons; for example, because there was insufficient space on the disk /// device to write all of the requested bytes, or because a blocked write() to a socket, pipe, or /// similar was interrupted by a signal handler after it had transferred some, but before it had /// transferred all of the requested bytes. In the event of a partial write, the caller can make /// another write() call to transfer the remaining bytes. The subsequent call will either /// transfer further bytes or may result in an error (e.g., if the disk is now full). /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received.k`. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// If `iov.len` is larger than `IOV_MAX`, a partial write will occur. pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize { if (builtin.os.tag == .windows) { // TODO improve this to use WriteFileScatter if (iov.len == 0) return @as(usize, 0); const first = iov[0]; return write(fd, first.iov_base[0..first.iov_len]); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var nwritten: usize = undefined; switch (wasi.fd_write(fd, iov.ptr, iov.len, &nwritten)) { .SUCCESS => return nwritten, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForWriting, // can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @intCast(u31, iov.len); while (true) { const rc = system.writev(fd, iov.ptr, iov_count); switch (errno(rc)) { .SUCCESS => return @intCast(usize, rc), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // Can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .CONNRESET => return error.ConnectionResetByPeer, else => |err| return unexpectedErrno(err), } } } pub const PWriteError = WriteError || error{Unseekable}; /// Write to a file descriptor, with a position offset. /// Retries when interrupted by a signal. /// Returns the number of bytes written. If nonzero bytes were supplied, this will be nonzero. /// /// Note that a successful write() may transfer fewer bytes than supplied. Such partial writes can /// occur for various reasons; for example, because there was insufficient space on the disk /// device to write all of the requested bytes, or because a blocked write() to a socket, pipe, or /// similar was interrupted by a signal handler after it had transferred some, but before it had /// transferred all of the requested bytes. In the event of a partial write, the caller can make /// another write() call to transfer the remaining bytes. The subsequent call will either /// transfer further bytes or may result in an error (e.g., if the disk is now full). /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// Linux has a limit on how many bytes may be transferred in one `pwrite` call, which is `0x7ffff000` /// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as /// well as stuffing the errno codes into the last `4096` values. This is noted on the `write` man page. /// The limit on Darwin is `0x7fffffff`, trying to write more than that returns EINVAL. /// The corresponding POSIX limit is `math.maxInt(isize)`. pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize { if (builtin.os.tag == .windows) { return windows.WriteFile(fd, bytes, offset, std.io.default_mode); } if (builtin.os.tag == .wasi and !builtin.link_libc) { const ciovs = [1]iovec_const{iovec_const{ .iov_base = bytes.ptr, .iov_len = bytes.len, }}; var nwritten: usize = undefined; switch (wasi.fd_pwrite(fd, &ciovs, ciovs.len, offset, &nwritten)) { .SUCCESS => return nwritten, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForWriting, // can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } // Prevent EINVAL. const max_count = switch (builtin.os.tag) { .linux => 0x7ffff000, .macos, .ios, .watchos, .tvos => math.maxInt(i32), else => math.maxInt(isize), }; const adjusted_len = @minimum(max_count, bytes.len); const pwrite_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.pwrite64 else system.pwrite; const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned while (true) { const rc = pwrite_sym(fd, bytes.ptr, adjusted_len, ioffset); switch (errno(rc)) { .SUCCESS => return @intCast(usize, rc), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // Can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, else => |err| return unexpectedErrno(err), } } } /// Write multiple buffers to a file descriptor, with a position offset. /// Retries when interrupted by a signal. /// Returns the number of bytes written. If nonzero bytes were supplied, this will be nonzero. /// /// Note that a successful write() may transfer fewer than count bytes. Such partial writes can /// occur for various reasons; for example, because there was insufficient space on the disk /// device to write all of the requested bytes, or because a blocked write() to a socket, pipe, or /// similar was interrupted by a signal handler after it had transferred some, but before it had /// transferred all of the requested bytes. In the event of a partial write, the caller can make /// another write() call to transfer the remaining bytes. The subsequent call will either /// transfer further bytes or may result in an error (e.g., if the disk is now full). /// /// If `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// /// The following systems do not have this syscall, and will return partial writes if more than one /// vector is provided: /// * Darwin /// * Windows /// /// If `iov.len` is larger than `IOV_MAX`, a partial write will occur. pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usize { const have_pwrite_but_not_pwritev = switch (builtin.os.tag) { .windows, .macos, .ios, .watchos, .tvos, .haiku => true, else => false, }; if (have_pwrite_but_not_pwritev) { // We could loop here; but proper usage of `pwritev` must handle partial writes anyway. // So we simply write the first vector only. if (iov.len == 0) return @as(usize, 0); const first = iov[0]; return pwrite(fd, first.iov_base[0..first.iov_len], offset); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var nwritten: usize = undefined; switch (wasi.fd_pwrite(fd, iov.ptr, iov.len, offset, &nwritten)) { .SUCCESS => return nwritten, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForWriting, // Can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const pwritev_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.pwritev64 else system.pwritev; const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @intCast(u31, iov.len); const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned while (true) { const rc = pwritev_sym(fd, iov.ptr, iov_count, ioffset); switch (errno(rc)) { .SUCCESS => return @intCast(usize, rc), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // Can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, else => |err| return unexpectedErrno(err), } } } pub const OpenError = error{ /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to open a new resource relative to it. AccessDenied, SymLinkLoop, ProcessFdQuotaExceeded, SystemFdQuotaExceeded, NoDevice, FileNotFound, /// The path exceeded `MAX_PATH_BYTES` bytes. NameTooLong, /// Insufficient kernel memory was available, or /// the named file is a FIFO and per-user hard limit on /// memory allocation for pipes has been reached. SystemResources, /// The file is too large to be opened. This error is unreachable /// for 64-bit targets, as well as when opening directories. FileTooBig, /// The path refers to directory but the `O.DIRECTORY` flag was not provided. IsDir, /// A new path cannot be created because the device has no room for the new file. /// This error is only reachable when the `O.CREAT` flag is provided. NoSpaceLeft, /// A component used as a directory in the path was not, in fact, a directory, or /// `O.DIRECTORY` was specified and the path was not a directory. NotDir, /// The path already exists and the `O.CREAT` and `O.EXCL` flags were provided. PathAlreadyExists, DeviceBusy, /// The underlying filesystem does not support file locks FileLocksNotSupported, BadPathName, InvalidUtf8, /// One of these three things: /// * pathname refers to an executable image which is currently being /// executed and write access was requested. /// * pathname refers to a file that is currently in use as a swap /// file, and the O_TRUNC flag was specified. /// * pathname refers to a file that is currently being read by the /// kernel (e.g., for module/firmware loading), and write access was /// requested. FileBusy, WouldBlock, } || UnexpectedError; /// Open and possibly create a file. Keeps trying if it gets interrupted. /// See also `openZ`. pub fn open(file_path: []const u8, flags: u32, perm: mode_t) OpenError!fd_t { if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return openW(file_path_w.span(), flags, perm); } const file_path_c = try toPosixPath(file_path); return openZ(&file_path_c, flags, perm); } /// Open and possibly create a file. Keeps trying if it gets interrupted. /// See also `open`. pub fn openZ(file_path: [*:0]const u8, flags: u32, perm: mode_t) OpenError!fd_t { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToPrefixedFileW(file_path); return openW(file_path_w.span(), flags, perm); } const open_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.open64 else system.open; while (true) { const rc = open_sym(file_path, flags, perm); switch (errno(rc)) { .SUCCESS => return @intCast(fd_t, rc), .INTR => continue, .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .FBIG => return error.FileTooBig, .OVERFLOW => return error.FileTooBig, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MFILE => return error.ProcessFdQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .PERM => return error.AccessDenied, .EXIST => return error.PathAlreadyExists, .BUSY => return error.DeviceBusy, else => |err| return unexpectedErrno(err), } } } fn openOptionsFromFlags(flags: u32) windows.OpenFileOptions { const w = windows; var access_mask: w.ULONG = w.READ_CONTROL | w.FILE_WRITE_ATTRIBUTES | w.SYNCHRONIZE; if (flags & O.RDWR != 0) { access_mask |= w.GENERIC_READ | w.GENERIC_WRITE; } else if (flags & O.WRONLY != 0) { access_mask |= w.GENERIC_WRITE; } else { access_mask |= w.GENERIC_READ | w.GENERIC_WRITE; } const filter: windows.OpenFileOptions.Filter = if (flags & O.DIRECTORY != 0) .dir_only else .file_only; const follow_symlinks: bool = flags & O.NOFOLLOW == 0; const creation: w.ULONG = blk: { if (flags & O.CREAT != 0) { if (flags & O.EXCL != 0) { break :blk w.FILE_CREATE; } } break :blk w.FILE_OPEN; }; return .{ .access_mask = access_mask, .io_mode = .blocking, .creation = creation, .filter = filter, .follow_symlinks = follow_symlinks, }; } /// Windows-only. The path parameter is /// [WTF-16](https://simonsapin.github.io/wtf-8/#potentially-ill-formed-utf-16) encoded. /// Translates the POSIX open API call to a Windows API call. /// TODO currently, this function does not handle all flag combinations /// or makes use of perm argument. pub fn openW(file_path_w: []const u16, flags: u32, perm: mode_t) OpenError!fd_t { _ = perm; var options = openOptionsFromFlags(flags); options.dir = std.fs.cwd().fd; return windows.OpenFile(file_path_w, options) catch |err| switch (err) { error.WouldBlock => unreachable, error.PipeBusy => unreachable, else => |e| return e, }; } /// Open and possibly create a file. Keeps trying if it gets interrupted. /// `file_path` is relative to the open directory handle `dir_fd`. /// See also `openatZ`. pub fn openat(dir_fd: fd_t, file_path: []const u8, flags: u32, mode: mode_t) OpenError!fd_t { if (builtin.os.tag == .wasi) { @compileError("use openatWasi instead"); } if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return openatW(dir_fd, file_path_w.span(), flags, mode); } const file_path_c = try toPosixPath(file_path); return openatZ(dir_fd, &file_path_c, flags, mode); } /// Open and possibly create a file in WASI. pub fn openatWasi(dir_fd: fd_t, file_path: []const u8, lookup_flags: lookupflags_t, oflags: oflags_t, fdflags: fdflags_t, base: rights_t, inheriting: rights_t) OpenError!fd_t { while (true) { var fd: fd_t = undefined; switch (wasi.path_open(dir_fd, lookup_flags, file_path.ptr, file_path.len, oflags, base, inheriting, fdflags, &fd)) { .SUCCESS => return fd, .INTR => continue, .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .FBIG => return error.FileTooBig, .OVERFLOW => return error.FileTooBig, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MFILE => return error.ProcessFdQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .PERM => return error.AccessDenied, .EXIST => return error.PathAlreadyExists, .BUSY => return error.DeviceBusy, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } } /// Open and possibly create a file. Keeps trying if it gets interrupted. /// `file_path` is relative to the open directory handle `dir_fd`. /// See also `openat`. pub fn openatZ(dir_fd: fd_t, file_path: [*:0]const u8, flags: u32, mode: mode_t) OpenError!fd_t { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToPrefixedFileW(file_path); return openatW(dir_fd, file_path_w.span(), flags, mode); } const openat_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.openat64 else system.openat; while (true) { const rc = openat_sym(dir_fd, file_path, flags, mode); switch (errno(rc)) { .SUCCESS => return @intCast(fd_t, rc), .INTR => continue, .FAULT => unreachable, .INVAL => unreachable, .BADF => unreachable, .ACCES => return error.AccessDenied, .FBIG => return error.FileTooBig, .OVERFLOW => return error.FileTooBig, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MFILE => return error.ProcessFdQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .PERM => return error.AccessDenied, .EXIST => return error.PathAlreadyExists, .BUSY => return error.DeviceBusy, .OPNOTSUPP => return error.FileLocksNotSupported, .AGAIN => return error.WouldBlock, .TXTBSY => return error.FileBusy, else => |err| return unexpectedErrno(err), } } } /// Windows-only. Similar to `openat` but with pathname argument null-terminated /// WTF16 encoded. /// TODO currently, this function does not handle all flag combinations /// or makes use of perm argument. pub fn openatW(dir_fd: fd_t, file_path_w: []const u16, flags: u32, mode: mode_t) OpenError!fd_t { _ = mode; var options = openOptionsFromFlags(flags); options.dir = dir_fd; return windows.OpenFile(file_path_w, options) catch |err| switch (err) { error.WouldBlock => unreachable, error.PipeBusy => unreachable, else => |e| return e, }; } pub fn dup(old_fd: fd_t) !fd_t { const rc = system.dup(old_fd); return switch (errno(rc)) { .SUCCESS => return @intCast(fd_t, rc), .MFILE => error.ProcessFdQuotaExceeded, .BADF => unreachable, // invalid file descriptor else => |err| return unexpectedErrno(err), }; } pub fn dup2(old_fd: fd_t, new_fd: fd_t) !void { while (true) { switch (errno(system.dup2(old_fd, new_fd))) { .SUCCESS => return, .BUSY, .INTR => continue, .MFILE => return error.ProcessFdQuotaExceeded, .INVAL => unreachable, // invalid parameters passed to dup2 .BADF => unreachable, // invalid file descriptor else => |err| return unexpectedErrno(err), } } } pub const ExecveError = error{ SystemResources, AccessDenied, InvalidExe, FileSystem, IsDir, FileNotFound, NotDir, FileBusy, ProcessFdQuotaExceeded, SystemFdQuotaExceeded, NameTooLong, } || UnexpectedError; /// Like `execve` except the parameters are null-terminated, /// matching the syscall API on all targets. This removes the need for an allocator. /// This function ignores PATH environment variable. See `execvpeZ` for that. pub fn execveZ( path: [*:0]const u8, child_argv: [*:null]const ?[*:0]const u8, envp: [*:null]const ?[*:0]const u8, ) ExecveError { switch (errno(system.execve(path, child_argv, envp))) { .SUCCESS => unreachable, .FAULT => unreachable, .@"2BIG" => return error.SystemResources, .MFILE => return error.ProcessFdQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NFILE => return error.SystemFdQuotaExceeded, .NOMEM => return error.SystemResources, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .INVAL => return error.InvalidExe, .NOEXEC => return error.InvalidExe, .IO => return error.FileSystem, .LOOP => return error.FileSystem, .ISDIR => return error.IsDir, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .TXTBSY => return error.FileBusy, else => |err| switch (builtin.os.tag) { .macos, .ios, .tvos, .watchos => switch (err) { .BADEXEC => return error.InvalidExe, .BADARCH => return error.InvalidExe, else => return unexpectedErrno(err), }, .linux, .solaris => switch (err) { .LIBBAD => return error.InvalidExe, else => return unexpectedErrno(err), }, else => return unexpectedErrno(err), }, } } pub const Arg0Expand = enum { expand, no_expand, }; /// Like `execvpeZ` except if `arg0_expand` is `.expand`, then `argv` is mutable, /// and `argv[0]` is expanded to be the same absolute path that is passed to the execve syscall. /// If this function returns with an error, `argv[0]` will be restored to the value it was when it was passed in. pub fn execvpeZ_expandArg0( comptime arg0_expand: Arg0Expand, file: [*:0]const u8, child_argv: switch (arg0_expand) { .expand => [*:null]?[*:0]const u8, .no_expand => [*:null]const ?[*:0]const u8, }, envp: [*:null]const ?[*:0]const u8, ) ExecveError { const file_slice = mem.sliceTo(file, 0); if (mem.indexOfScalar(u8, file_slice, '/') != null) return execveZ(file, child_argv, envp); const PATH = getenvZ("PATH") orelse "/usr/local/bin:/bin/:/usr/bin"; // Use of MAX_PATH_BYTES here is valid as the path_buf will be passed // directly to the operating system in execveZ. var path_buf: [MAX_PATH_BYTES]u8 = undefined; var it = mem.tokenize(u8, PATH, ":"); var seen_eacces = false; var err: ExecveError = undefined; // In case of expanding arg0 we must put it back if we return with an error. const prev_arg0 = child_argv[0]; defer switch (arg0_expand) { .expand => child_argv[0] = prev_arg0, .no_expand => {}, }; while (it.next()) |search_path| { if (path_buf.len < search_path.len + file_slice.len + 1) return error.NameTooLong; mem.copy(u8, &path_buf, search_path); path_buf[search_path.len] = '/'; mem.copy(u8, path_buf[search_path.len + 1 ..], file_slice); const path_len = search_path.len + file_slice.len + 1; path_buf[path_len] = 0; const full_path = path_buf[0..path_len :0].ptr; switch (arg0_expand) { .expand => child_argv[0] = full_path, .no_expand => {}, } err = execveZ(full_path, child_argv, envp); switch (err) { error.AccessDenied => seen_eacces = true, error.FileNotFound, error.NotDir => {}, else => |e| return e, } } if (seen_eacces) return error.AccessDenied; return err; } /// Like `execvpe` except the parameters are null-terminated, /// matching the syscall API on all targets. This removes the need for an allocator. /// This function also uses the PATH environment variable to get the full path to the executable. /// If `file` is an absolute path, this is the same as `execveZ`. pub fn execvpeZ( file: [*:0]const u8, argv_ptr: [*:null]const ?[*:0]const u8, envp: [*:null]const ?[*:0]const u8, ) ExecveError { return execvpeZ_expandArg0(.no_expand, file, argv_ptr, envp); } /// Get an environment variable. /// See also `getenvZ`. pub fn getenv(key: []const u8) ?[]const u8 { if (builtin.link_libc) { var small_key_buf: [64]u8 = undefined; if (key.len < small_key_buf.len) { mem.copy(u8, &small_key_buf, key); small_key_buf[key.len] = 0; const key0 = small_key_buf[0..key.len :0]; return getenvZ(key0); } // Search the entire `environ` because we don't have a null terminated pointer. var ptr = std.c.environ; while (ptr.*) |line| : (ptr += 1) { var line_i: usize = 0; while (line[line_i] != 0 and line[line_i] != '=') : (line_i += 1) {} const this_key = line[0..line_i]; if (!mem.eql(u8, this_key, key)) continue; var end_i: usize = line_i; while (line[end_i] != 0) : (end_i += 1) {} const value = line[line_i + 1 .. end_i]; return value; } return null; } if (builtin.os.tag == .windows) { @compileError("std.os.getenv is unavailable for Windows because environment string is in WTF-16 format. See std.process.getEnvVarOwned for cross-platform API or std.os.getenvW for Windows-specific API."); } // TODO see https://github.com/ziglang/zig/issues/4524 for (environ) |ptr| { var line_i: usize = 0; while (ptr[line_i] != 0 and ptr[line_i] != '=') : (line_i += 1) {} const this_key = ptr[0..line_i]; if (!mem.eql(u8, key, this_key)) continue; var end_i: usize = line_i; while (ptr[end_i] != 0) : (end_i += 1) {} const this_value = ptr[line_i + 1 .. end_i]; return this_value; } return null; } /// Get an environment variable with a null-terminated name. /// See also `getenv`. pub fn getenvZ(key: [*:0]const u8) ?[]const u8 { if (builtin.link_libc) { const value = system.getenv(key) orelse return null; return mem.sliceTo(value, 0); } if (builtin.os.tag == .windows) { @compileError("std.os.getenvZ is unavailable for Windows because environment string is in WTF-16 format. See std.process.getEnvVarOwned for cross-platform API or std.os.getenvW for Windows-specific API."); } return getenv(mem.sliceTo(key, 0)); } /// Windows-only. Get an environment variable with a null-terminated, WTF-16 encoded name. /// See also `getenv`. /// This function first attempts a case-sensitive lookup. If no match is found, and `key` /// is ASCII, then it attempts a second case-insensitive lookup. pub fn getenvW(key: [*:0]const u16) ?[:0]const u16 { if (builtin.os.tag != .windows) { @compileError("std.os.getenvW is a Windows-only API"); } const key_slice = mem.sliceTo(key, 0); const ptr = windows.peb().ProcessParameters.Environment; var ascii_match: ?[:0]const u16 = null; var i: usize = 0; while (ptr[i] != 0) { const key_start = i; while (ptr[i] != 0 and ptr[i] != '=') : (i += 1) {} const this_key = ptr[key_start..i]; if (ptr[i] == '=') i += 1; const value_start = i; while (ptr[i] != 0) : (i += 1) {} const this_value = ptr[value_start..i :0]; if (mem.eql(u16, key_slice, this_key)) return this_value; ascii_check: { if (ascii_match != null) break :ascii_check; if (key_slice.len != this_key.len) break :ascii_check; for (key_slice) |a_c, key_index| { const a = math.cast(u8, a_c) catch break :ascii_check; const b = math.cast(u8, this_key[key_index]) catch break :ascii_check; if (std.ascii.toLower(a) != std.ascii.toLower(b)) break :ascii_check; } ascii_match = this_value; } i += 1; // skip over null byte } return ascii_match; } pub const GetCwdError = error{ NameTooLong, CurrentWorkingDirectoryUnlinked, } || UnexpectedError; /// The result is a slice of out_buffer, indexed from 0. pub fn getcwd(out_buffer: []u8) GetCwdError![]u8 { if (builtin.os.tag == .windows) { return windows.GetCurrentDirectory(out_buffer); } if (builtin.os.tag == .wasi) { @compileError("WASI doesn't have a concept of cwd(); use std.fs.wasi.PreopenList to get available Dir handles instead"); } const err = if (builtin.link_libc) blk: { const c_err = if (std.c.getcwd(out_buffer.ptr, out_buffer.len)) |_| 0 else std.c._errno().*; break :blk @intToEnum(E, c_err); } else blk: { break :blk errno(system.getcwd(out_buffer.ptr, out_buffer.len)); }; switch (err) { .SUCCESS => return mem.sliceTo(std.meta.assumeSentinel(out_buffer.ptr, 0), 0), .FAULT => unreachable, .INVAL => unreachable, .NOENT => return error.CurrentWorkingDirectoryUnlinked, .RANGE => return error.NameTooLong, else => return unexpectedErrno(err), } } pub const SymLinkError = error{ /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to create a new symbolic link relative to it. AccessDenied, DiskQuota, PathAlreadyExists, FileSystem, SymLinkLoop, FileNotFound, SystemResources, NoSpaceLeft, ReadOnlyFileSystem, NotDir, NameTooLong, InvalidUtf8, BadPathName, } || UnexpectedError; /// Creates a symbolic link named `sym_link_path` which contains the string `target_path`. /// A symbolic link (also known as a soft link) may point to an existing file or to a nonexistent /// one; the latter case is known as a dangling link. /// If `sym_link_path` exists, it will not be overwritten. /// See also `symlinkZ. pub fn symlink(target_path: []const u8, sym_link_path: []const u8) SymLinkError!void { if (builtin.os.tag == .wasi) { @compileError("symlink is not supported in WASI; use symlinkat instead"); } if (builtin.os.tag == .windows) { @compileError("symlink is not supported on Windows; use std.os.windows.CreateSymbolicLink instead"); } const target_path_c = try toPosixPath(target_path); const sym_link_path_c = try toPosixPath(sym_link_path); return symlinkZ(&target_path_c, &sym_link_path_c); } /// This is the same as `symlink` except the parameters are null-terminated pointers. /// See also `symlink`. pub fn symlinkZ(target_path: [*:0]const u8, sym_link_path: [*:0]const u8) SymLinkError!void { if (builtin.os.tag == .windows) { @compileError("symlink is not supported on Windows; use std.os.windows.CreateSymbolicLink instead"); } switch (errno(system.symlink(target_path, sym_link_path))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } /// Similar to `symlink`, however, creates a symbolic link named `sym_link_path` which contains the string /// `target_path` **relative** to `newdirfd` directory handle. /// A symbolic link (also known as a soft link) may point to an existing file or to a nonexistent /// one; the latter case is known as a dangling link. /// If `sym_link_path` exists, it will not be overwritten. /// See also `symlinkatWasi`, `symlinkatZ` and `symlinkatW`. pub fn symlinkat(target_path: []const u8, newdirfd: fd_t, sym_link_path: []const u8) SymLinkError!void { if (builtin.os.tag == .wasi and !builtin.link_libc) { return symlinkatWasi(target_path, newdirfd, sym_link_path); } if (builtin.os.tag == .windows) { @compileError("symlinkat is not supported on Windows; use std.os.windows.CreateSymbolicLink instead"); } const target_path_c = try toPosixPath(target_path); const sym_link_path_c = try toPosixPath(sym_link_path); return symlinkatZ(&target_path_c, newdirfd, &sym_link_path_c); } /// WASI-only. The same as `symlinkat` but targeting WASI. /// See also `symlinkat`. pub fn symlinkatWasi(target_path: []const u8, newdirfd: fd_t, sym_link_path: []const u8) SymLinkError!void { switch (wasi.path_symlink(target_path.ptr, target_path.len, newdirfd, sym_link_path.ptr, sym_link_path.len)) { .SUCCESS => {}, .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .ROFS => return error.ReadOnlyFileSystem, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } /// The same as `symlinkat` except the parameters are null-terminated pointers. /// See also `symlinkat`. pub fn symlinkatZ(target_path: [*:0]const u8, newdirfd: fd_t, sym_link_path: [*:0]const u8) SymLinkError!void { if (builtin.os.tag == .windows) { @compileError("symlinkat is not supported on Windows; use std.os.windows.CreateSymbolicLink instead"); } switch (errno(system.symlinkat(target_path, newdirfd, sym_link_path))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } pub const LinkError = UnexpectedError || error{ AccessDenied, DiskQuota, PathAlreadyExists, FileSystem, SymLinkLoop, LinkQuotaExceeded, NameTooLong, FileNotFound, SystemResources, NoSpaceLeft, ReadOnlyFileSystem, NotSameFileSystem, }; pub fn linkZ(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: i32) LinkError!void { switch (errno(system.link(oldpath, newpath, flags))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .FAULT => unreachable, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .ROFS => return error.ReadOnlyFileSystem, .XDEV => return error.NotSameFileSystem, .INVAL => unreachable, else => |err| return unexpectedErrno(err), } } pub fn link(oldpath: []const u8, newpath: []const u8, flags: i32) LinkError!void { const old = try toPosixPath(oldpath); const new = try toPosixPath(newpath); return try linkZ(&old, &new, flags); } pub const LinkatError = LinkError || error{NotDir}; pub fn linkatZ( olddir: fd_t, oldpath: [*:0]const u8, newdir: fd_t, newpath: [*:0]const u8, flags: i32, ) LinkatError!void { switch (errno(system.linkat(olddir, oldpath, newdir, newpath, flags))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .FAULT => unreachable, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .PERM => return error.AccessDenied, .ROFS => return error.ReadOnlyFileSystem, .XDEV => return error.NotSameFileSystem, .INVAL => unreachable, else => |err| return unexpectedErrno(err), } } pub fn linkat( olddir: fd_t, oldpath: []const u8, newdir: fd_t, newpath: []const u8, flags: i32, ) LinkatError!void { const old = try toPosixPath(oldpath); const new = try toPosixPath(newpath); return try linkatZ(olddir, &old, newdir, &new, flags); } pub const UnlinkError = error{ FileNotFound, /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to unlink a resource by path relative to it. AccessDenied, FileBusy, FileSystem, IsDir, SymLinkLoop, NameTooLong, NotDir, SystemResources, ReadOnlyFileSystem, /// On Windows, file paths must be valid Unicode. InvalidUtf8, /// On Windows, file paths cannot contain these characters: /// '/', '*', '?', '"', '<', '>', '|' BadPathName, } || UnexpectedError; /// Delete a name and possibly the file it refers to. /// See also `unlinkZ`. pub fn unlink(file_path: []const u8) UnlinkError!void { if (builtin.os.tag == .wasi) { @compileError("unlink is not supported in WASI; use unlinkat instead"); } else if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return unlinkW(file_path_w.span()); } else { const file_path_c = try toPosixPath(file_path); return unlinkZ(&file_path_c); } } /// Same as `unlink` except the parameter is a null terminated UTF8-encoded string. pub fn unlinkZ(file_path: [*:0]const u8) UnlinkError!void { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToPrefixedFileW(file_path); return unlinkW(file_path_w.span()); } switch (errno(system.unlink(file_path))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .FAULT => unreachable, .INVAL => unreachable, .IO => return error.FileSystem, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } /// Windows-only. Same as `unlink` except the parameter is null-terminated, WTF16 encoded. pub fn unlinkW(file_path_w: []const u16) UnlinkError!void { return windows.DeleteFile(file_path_w, .{ .dir = std.fs.cwd().fd }); } pub const UnlinkatError = UnlinkError || error{ /// When passing `AT.REMOVEDIR`, this error occurs when the named directory is not empty. DirNotEmpty, }; /// Delete a file name and possibly the file it refers to, based on an open directory handle. /// Asserts that the path parameter has no null bytes. pub fn unlinkat(dirfd: fd_t, file_path: []const u8, flags: u32) UnlinkatError!void { if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return unlinkatW(dirfd, file_path_w.span(), flags); } else if (builtin.os.tag == .wasi and !builtin.link_libc) { return unlinkatWasi(dirfd, file_path, flags); } else { const file_path_c = try toPosixPath(file_path); return unlinkatZ(dirfd, &file_path_c, flags); } } /// WASI-only. Same as `unlinkat` but targeting WASI. /// See also `unlinkat`. pub fn unlinkatWasi(dirfd: fd_t, file_path: []const u8, flags: u32) UnlinkatError!void { const remove_dir = (flags & AT.REMOVEDIR) != 0; const res = if (remove_dir) wasi.path_remove_directory(dirfd, file_path.ptr, file_path.len) else wasi.path_unlink_file(dirfd, file_path.ptr, file_path.len); switch (res) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .FAULT => unreachable, .IO => return error.FileSystem, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .ROFS => return error.ReadOnlyFileSystem, .NOTEMPTY => return error.DirNotEmpty, .NOTCAPABLE => return error.AccessDenied, .INVAL => unreachable, // invalid flags, or pathname has . as last component .BADF => unreachable, // always a race condition else => |err| return unexpectedErrno(err), } } /// Same as `unlinkat` but `file_path` is a null-terminated string. pub fn unlinkatZ(dirfd: fd_t, file_path_c: [*:0]const u8, flags: u32) UnlinkatError!void { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToPrefixedFileW(file_path_c); return unlinkatW(dirfd, file_path_w.span(), flags); } switch (errno(system.unlinkat(dirfd, file_path_c, flags))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .FAULT => unreachable, .IO => return error.FileSystem, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .ROFS => return error.ReadOnlyFileSystem, .EXIST => return error.DirNotEmpty, .NOTEMPTY => return error.DirNotEmpty, .INVAL => unreachable, // invalid flags, or pathname has . as last component .BADF => unreachable, // always a race condition else => |err| return unexpectedErrno(err), } } /// Same as `unlinkat` but `sub_path_w` is UTF16LE, NT prefixed. Windows only. pub fn unlinkatW(dirfd: fd_t, sub_path_w: []const u16, flags: u32) UnlinkatError!void { const remove_dir = (flags & AT.REMOVEDIR) != 0; return windows.DeleteFile(sub_path_w, .{ .dir = dirfd, .remove_dir = remove_dir }); } pub const RenameError = error{ /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to rename a resource by path relative to it. AccessDenied, FileBusy, DiskQuota, IsDir, SymLinkLoop, LinkQuotaExceeded, NameTooLong, FileNotFound, NotDir, SystemResources, NoSpaceLeft, PathAlreadyExists, ReadOnlyFileSystem, RenameAcrossMountPoints, InvalidUtf8, BadPathName, NoDevice, SharingViolation, PipeBusy, } || UnexpectedError; /// Change the name or location of a file. pub fn rename(old_path: []const u8, new_path: []const u8) RenameError!void { if (builtin.os.tag == .wasi) { @compileError("rename is not supported in WASI; use renameat instead"); } else if (builtin.os.tag == .windows) { const old_path_w = try windows.sliceToPrefixedFileW(old_path); const new_path_w = try windows.sliceToPrefixedFileW(new_path); return renameW(old_path_w.span().ptr, new_path_w.span().ptr); } else { const old_path_c = try toPosixPath(old_path); const new_path_c = try toPosixPath(new_path); return renameZ(&old_path_c, &new_path_c); } } /// Same as `rename` except the parameters are null-terminated byte arrays. pub fn renameZ(old_path: [*:0]const u8, new_path: [*:0]const u8) RenameError!void { if (builtin.os.tag == .windows) { const old_path_w = try windows.cStrToPrefixedFileW(old_path); const new_path_w = try windows.cStrToPrefixedFileW(new_path); return renameW(old_path_w.span().ptr, new_path_w.span().ptr); } switch (errno(system.rename(old_path, new_path))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .DQUOT => return error.DiskQuota, .FAULT => unreachable, .INVAL => unreachable, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .EXIST => return error.PathAlreadyExists, .NOTEMPTY => return error.PathAlreadyExists, .ROFS => return error.ReadOnlyFileSystem, .XDEV => return error.RenameAcrossMountPoints, else => |err| return unexpectedErrno(err), } } /// Same as `rename` except the parameters are null-terminated UTF16LE encoded byte arrays. /// Assumes target is Windows. pub fn renameW(old_path: [*:0]const u16, new_path: [*:0]const u16) RenameError!void { const flags = windows.MOVEFILE_REPLACE_EXISTING | windows.MOVEFILE_WRITE_THROUGH; return windows.MoveFileExW(old_path, new_path, flags); } /// Change the name or location of a file based on an open directory handle. pub fn renameat( old_dir_fd: fd_t, old_path: []const u8, new_dir_fd: fd_t, new_path: []const u8, ) RenameError!void { if (builtin.os.tag == .windows) { const old_path_w = try windows.sliceToPrefixedFileW(old_path); const new_path_w = try windows.sliceToPrefixedFileW(new_path); return renameatW(old_dir_fd, old_path_w.span(), new_dir_fd, new_path_w.span(), windows.TRUE); } else if (builtin.os.tag == .wasi and !builtin.link_libc) { return renameatWasi(old_dir_fd, old_path, new_dir_fd, new_path); } else { const old_path_c = try toPosixPath(old_path); const new_path_c = try toPosixPath(new_path); return renameatZ(old_dir_fd, &old_path_c, new_dir_fd, &new_path_c); } } /// WASI-only. Same as `renameat` expect targeting WASI. /// See also `renameat`. pub fn renameatWasi(old_dir_fd: fd_t, old_path: []const u8, new_dir_fd: fd_t, new_path: []const u8) RenameError!void { switch (wasi.path_rename(old_dir_fd, old_path.ptr, old_path.len, new_dir_fd, new_path.ptr, new_path.len)) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .DQUOT => return error.DiskQuota, .FAULT => unreachable, .INVAL => unreachable, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .EXIST => return error.PathAlreadyExists, .NOTEMPTY => return error.PathAlreadyExists, .ROFS => return error.ReadOnlyFileSystem, .XDEV => return error.RenameAcrossMountPoints, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } /// Same as `renameat` except the parameters are null-terminated byte arrays. pub fn renameatZ( old_dir_fd: fd_t, old_path: [*:0]const u8, new_dir_fd: fd_t, new_path: [*:0]const u8, ) RenameError!void { if (builtin.os.tag == .windows) { const old_path_w = try windows.cStrToPrefixedFileW(old_path); const new_path_w = try windows.cStrToPrefixedFileW(new_path); return renameatW(old_dir_fd, old_path_w.span(), new_dir_fd, new_path_w.span(), windows.TRUE); } switch (errno(system.renameat(old_dir_fd, old_path, new_dir_fd, new_path))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .DQUOT => return error.DiskQuota, .FAULT => unreachable, .INVAL => unreachable, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .EXIST => return error.PathAlreadyExists, .NOTEMPTY => return error.PathAlreadyExists, .ROFS => return error.ReadOnlyFileSystem, .XDEV => return error.RenameAcrossMountPoints, else => |err| return unexpectedErrno(err), } } /// Same as `renameat` but Windows-only and the path parameters are /// [WTF-16](https://simonsapin.github.io/wtf-8/#potentially-ill-formed-utf-16) encoded. pub fn renameatW( old_dir_fd: fd_t, old_path_w: []const u16, new_dir_fd: fd_t, new_path_w: []const u16, ReplaceIfExists: windows.BOOLEAN, ) RenameError!void { const src_fd = windows.OpenFile(old_path_w, .{ .dir = old_dir_fd, .access_mask = windows.SYNCHRONIZE | windows.GENERIC_WRITE | windows.DELETE, .creation = windows.FILE_OPEN, .io_mode = .blocking, .filter = .any, // This function is supposed to rename both files and directories. }) catch |err| switch (err) { error.WouldBlock => unreachable, // Not possible without `.share_access_nonblocking = true`. else => |e| return e, }; defer windows.CloseHandle(src_fd); const struct_buf_len = @sizeOf(windows.FILE_RENAME_INFORMATION) + (MAX_PATH_BYTES - 1); var rename_info_buf: [struct_buf_len]u8 align(@alignOf(windows.FILE_RENAME_INFORMATION)) = undefined; const struct_len = @sizeOf(windows.FILE_RENAME_INFORMATION) - 1 + new_path_w.len * 2; if (struct_len > struct_buf_len) return error.NameTooLong; const rename_info = @ptrCast(*windows.FILE_RENAME_INFORMATION, &rename_info_buf); rename_info.* = .{ .ReplaceIfExists = ReplaceIfExists, .RootDirectory = if (std.fs.path.isAbsoluteWindowsWTF16(new_path_w)) null else new_dir_fd, .FileNameLength = @intCast(u32, new_path_w.len * 2), // already checked error.NameTooLong .FileName = undefined, }; std.mem.copy(u16, @as([*]u16, &rename_info.FileName)[0..new_path_w.len], new_path_w); var io_status_block: windows.IO_STATUS_BLOCK = undefined; const rc = windows.ntdll.NtSetInformationFile( src_fd, &io_status_block, rename_info, @intCast(u32, struct_len), // already checked for error.NameTooLong .FileRenameInformation, ); switch (rc) { .SUCCESS => return, .INVALID_HANDLE => unreachable, .INVALID_PARAMETER => unreachable, .OBJECT_PATH_SYNTAX_BAD => unreachable, .ACCESS_DENIED => return error.AccessDenied, .OBJECT_NAME_NOT_FOUND => return error.FileNotFound, .OBJECT_PATH_NOT_FOUND => return error.FileNotFound, .NOT_SAME_DEVICE => return error.RenameAcrossMountPoints, else => return windows.unexpectedStatus(rc), } } pub fn mkdirat(dir_fd: fd_t, sub_dir_path: []const u8, mode: u32) MakeDirError!void { if (builtin.os.tag == .windows) { const sub_dir_path_w = try windows.sliceToPrefixedFileW(sub_dir_path); return mkdiratW(dir_fd, sub_dir_path_w.span(), mode); } else if (builtin.os.tag == .wasi and !builtin.link_libc) { return mkdiratWasi(dir_fd, sub_dir_path, mode); } else { const sub_dir_path_c = try toPosixPath(sub_dir_path); return mkdiratZ(dir_fd, &sub_dir_path_c, mode); } } pub fn mkdiratWasi(dir_fd: fd_t, sub_dir_path: []const u8, mode: u32) MakeDirError!void { _ = mode; switch (wasi.path_create_directory(dir_fd, sub_dir_path.ptr, sub_dir_path.len)) { .SUCCESS => return, .ACCES => return error.AccessDenied, .BADF => unreachable, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .FAULT => unreachable, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .ROFS => return error.ReadOnlyFileSystem, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } pub fn mkdiratZ(dir_fd: fd_t, sub_dir_path: [*:0]const u8, mode: u32) MakeDirError!void { if (builtin.os.tag == .windows) { const sub_dir_path_w = try windows.cStrToPrefixedFileW(sub_dir_path); return mkdiratW(dir_fd, sub_dir_path_w.span().ptr, mode); } switch (errno(system.mkdirat(dir_fd, sub_dir_path, mode))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .BADF => unreachable, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .FAULT => unreachable, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } pub fn mkdiratW(dir_fd: fd_t, sub_path_w: []const u16, mode: u32) MakeDirError!void { _ = mode; const sub_dir_handle = windows.OpenFile(sub_path_w, .{ .dir = dir_fd, .access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE, .creation = windows.FILE_CREATE, .io_mode = .blocking, .filter = .dir_only, }) catch |err| switch (err) { error.IsDir => unreachable, error.PipeBusy => unreachable, error.WouldBlock => unreachable, else => |e| return e, }; windows.CloseHandle(sub_dir_handle); } pub const MakeDirError = error{ /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to create a new directory relative to it. AccessDenied, DiskQuota, PathAlreadyExists, SymLinkLoop, LinkQuotaExceeded, NameTooLong, FileNotFound, SystemResources, NoSpaceLeft, NotDir, ReadOnlyFileSystem, InvalidUtf8, BadPathName, NoDevice, } || UnexpectedError; /// Create a directory. /// `mode` is ignored on Windows. pub fn mkdir(dir_path: []const u8, mode: u32) MakeDirError!void { if (builtin.os.tag == .wasi) { @compileError("mkdir is not supported in WASI; use mkdirat instead"); } else if (builtin.os.tag == .windows) { const dir_path_w = try windows.sliceToPrefixedFileW(dir_path); return mkdirW(dir_path_w.span(), mode); } else { const dir_path_c = try toPosixPath(dir_path); return mkdirZ(&dir_path_c, mode); } } /// Same as `mkdir` but the parameter is a null-terminated UTF8-encoded string. pub fn mkdirZ(dir_path: [*:0]const u8, mode: u32) MakeDirError!void { if (builtin.os.tag == .windows) { const dir_path_w = try windows.cStrToPrefixedFileW(dir_path); return mkdirW(dir_path_w.span(), mode); } switch (errno(system.mkdir(dir_path, mode))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .FAULT => unreachable, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } /// Windows-only. Same as `mkdir` but the parameters is WTF16 encoded. pub fn mkdirW(dir_path_w: []const u16, mode: u32) MakeDirError!void { _ = mode; const sub_dir_handle = windows.OpenFile(dir_path_w, .{ .dir = std.fs.cwd().fd, .access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE, .creation = windows.FILE_CREATE, .io_mode = .blocking, .filter = .dir_only, }) catch |err| switch (err) { error.IsDir => unreachable, error.PipeBusy => unreachable, error.WouldBlock => unreachable, else => |e| return e, }; windows.CloseHandle(sub_dir_handle); } pub const DeleteDirError = error{ AccessDenied, FileBusy, SymLinkLoop, NameTooLong, FileNotFound, SystemResources, NotDir, DirNotEmpty, ReadOnlyFileSystem, InvalidUtf8, BadPathName, } || UnexpectedError; /// Deletes an empty directory. pub fn rmdir(dir_path: []const u8) DeleteDirError!void { if (builtin.os.tag == .wasi) { @compileError("rmdir is not supported in WASI; use unlinkat instead"); } else if (builtin.os.tag == .windows) { const dir_path_w = try windows.sliceToPrefixedFileW(dir_path); return rmdirW(dir_path_w.span()); } else { const dir_path_c = try toPosixPath(dir_path); return rmdirZ(&dir_path_c); } } /// Same as `rmdir` except the parameter is null-terminated. pub fn rmdirZ(dir_path: [*:0]const u8) DeleteDirError!void { if (builtin.os.tag == .windows) { const dir_path_w = try windows.cStrToPrefixedFileW(dir_path); return rmdirW(dir_path_w.span()); } switch (errno(system.rmdir(dir_path))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .FAULT => unreachable, .INVAL => return error.BadPathName, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, .EXIST => return error.DirNotEmpty, .NOTEMPTY => return error.DirNotEmpty, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } /// Windows-only. Same as `rmdir` except the parameter is WTF16 encoded. pub fn rmdirW(dir_path_w: []const u16) DeleteDirError!void { return windows.DeleteFile(dir_path_w, .{ .dir = std.fs.cwd().fd, .remove_dir = true }) catch |err| switch (err) { error.IsDir => unreachable, else => |e| return e, }; } pub const ChangeCurDirError = error{ AccessDenied, FileSystem, SymLinkLoop, NameTooLong, FileNotFound, SystemResources, NotDir, BadPathName, /// On Windows, file paths must be valid Unicode. InvalidUtf8, } || UnexpectedError; /// Changes the current working directory of the calling process. /// `dir_path` is recommended to be a UTF-8 encoded string. pub fn chdir(dir_path: []const u8) ChangeCurDirError!void { if (builtin.os.tag == .wasi) { @compileError("chdir is not supported in WASI"); } else if (builtin.os.tag == .windows) { var utf16_dir_path: [windows.PATH_MAX_WIDE]u16 = undefined; const len = try std.unicode.utf8ToUtf16Le(utf16_dir_path[0..], dir_path); if (len > utf16_dir_path.len) return error.NameTooLong; return chdirW(utf16_dir_path[0..len]); } else { const dir_path_c = try toPosixPath(dir_path); return chdirZ(&dir_path_c); } } /// Same as `chdir` except the parameter is null-terminated. pub fn chdirZ(dir_path: [*:0]const u8) ChangeCurDirError!void { if (builtin.os.tag == .windows) { var utf16_dir_path: [windows.PATH_MAX_WIDE]u16 = undefined; const len = try std.unicode.utf8ToUtf16Le(utf16_dir_path[0..], dir_path); if (len > utf16_dir_path.len) return error.NameTooLong; return chdirW(utf16_dir_path[0..len]); } switch (errno(system.chdir(dir_path))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .FAULT => unreachable, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, else => |err| return unexpectedErrno(err), } } /// Windows-only. Same as `chdir` except the paramter is WTF16 encoded. pub fn chdirW(dir_path: []const u16) ChangeCurDirError!void { windows.SetCurrentDirectory(dir_path) catch |err| switch (err) { error.NoDevice => return error.FileSystem, else => |e| return e, }; } pub const FchdirError = error{ AccessDenied, NotDir, FileSystem, } || UnexpectedError; pub fn fchdir(dirfd: fd_t) FchdirError!void { while (true) { switch (errno(system.fchdir(dirfd))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .BADF => unreachable, .NOTDIR => return error.NotDir, .INTR => continue, .IO => return error.FileSystem, else => |err| return unexpectedErrno(err), } } } pub const ReadLinkError = error{ /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to read value of a symbolic link relative to it. AccessDenied, FileSystem, SymLinkLoop, NameTooLong, FileNotFound, SystemResources, NotLink, NotDir, InvalidUtf8, BadPathName, /// Windows-only. This error may occur if the opened reparse point is /// of unsupported type. UnsupportedReparsePointType, } || UnexpectedError; /// Read value of a symbolic link. /// The return value is a slice of `out_buffer` from index 0. pub fn readlink(file_path: []const u8, out_buffer: []u8) ReadLinkError![]u8 { if (builtin.os.tag == .wasi) { @compileError("readlink is not supported in WASI; use readlinkat instead"); } else if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return readlinkW(file_path_w.span(), out_buffer); } else { const file_path_c = try toPosixPath(file_path); return readlinkZ(&file_path_c, out_buffer); } } /// Windows-only. Same as `readlink` except `file_path` is WTF16 encoded. /// See also `readlinkZ`. pub fn readlinkW(file_path: []const u16, out_buffer: []u8) ReadLinkError![]u8 { return windows.ReadLink(std.fs.cwd().fd, file_path, out_buffer); } /// Same as `readlink` except `file_path` is null-terminated. pub fn readlinkZ(file_path: [*:0]const u8, out_buffer: []u8) ReadLinkError![]u8 { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToWin32PrefixedFileW(file_path); return readlinkW(file_path_w.span(), out_buffer); } const rc = system.readlink(file_path, out_buffer.ptr, out_buffer.len); switch (errno(rc)) { .SUCCESS => return out_buffer[0..@bitCast(usize, rc)], .ACCES => return error.AccessDenied, .FAULT => unreachable, .INVAL => return error.NotLink, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, else => |err| return unexpectedErrno(err), } } /// Similar to `readlink` except reads value of a symbolink link **relative** to `dirfd` directory handle. /// The return value is a slice of `out_buffer` from index 0. /// See also `readlinkatWasi`, `realinkatZ` and `realinkatW`. pub fn readlinkat(dirfd: fd_t, file_path: []const u8, out_buffer: []u8) ReadLinkError![]u8 { if (builtin.os.tag == .wasi and !builtin.link_libc) { return readlinkatWasi(dirfd, file_path, out_buffer); } if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return readlinkatW(dirfd, file_path_w.span(), out_buffer); } const file_path_c = try toPosixPath(file_path); return readlinkatZ(dirfd, &file_path_c, out_buffer); } /// WASI-only. Same as `readlinkat` but targets WASI. /// See also `readlinkat`. pub fn readlinkatWasi(dirfd: fd_t, file_path: []const u8, out_buffer: []u8) ReadLinkError![]u8 { var bufused: usize = undefined; switch (wasi.path_readlink(dirfd, file_path.ptr, file_path.len, out_buffer.ptr, out_buffer.len, &bufused)) { .SUCCESS => return out_buffer[0..bufused], .ACCES => return error.AccessDenied, .FAULT => unreachable, .INVAL => return error.NotLink, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } /// Windows-only. Same as `readlinkat` except `file_path` is null-terminated, WTF16 encoded. /// See also `readlinkat`. pub fn readlinkatW(dirfd: fd_t, file_path: []const u16, out_buffer: []u8) ReadLinkError![]u8 { return windows.ReadLink(dirfd, file_path, out_buffer); } /// Same as `readlinkat` except `file_path` is null-terminated. /// See also `readlinkat`. pub fn readlinkatZ(dirfd: fd_t, file_path: [*:0]const u8, out_buffer: []u8) ReadLinkError![]u8 { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToPrefixedFileW(file_path); return readlinkatW(dirfd, file_path_w.span(), out_buffer); } const rc = system.readlinkat(dirfd, file_path, out_buffer.ptr, out_buffer.len); switch (errno(rc)) { .SUCCESS => return out_buffer[0..@bitCast(usize, rc)], .ACCES => return error.AccessDenied, .FAULT => unreachable, .INVAL => return error.NotLink, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, else => |err| return unexpectedErrno(err), } } pub const SetEidError = error{ InvalidUserId, PermissionDenied, } || UnexpectedError; pub const SetIdError = error{ResourceLimitReached} || SetEidError; pub fn setuid(uid: uid_t) SetIdError!void { switch (errno(system.setuid(uid))) { .SUCCESS => return, .AGAIN => return error.ResourceLimitReached, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub fn seteuid(uid: uid_t) SetEidError!void { switch (errno(system.seteuid(uid))) { .SUCCESS => return, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub fn setreuid(ruid: uid_t, euid: uid_t) SetIdError!void { switch (errno(system.setreuid(ruid, euid))) { .SUCCESS => return, .AGAIN => return error.ResourceLimitReached, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub fn setgid(gid: gid_t) SetIdError!void { switch (errno(system.setgid(gid))) { .SUCCESS => return, .AGAIN => return error.ResourceLimitReached, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub fn setegid(uid: uid_t) SetEidError!void { switch (errno(system.setegid(uid))) { .SUCCESS => return, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub fn setregid(rgid: gid_t, egid: gid_t) SetIdError!void { switch (errno(system.setregid(rgid, egid))) { .SUCCESS => return, .AGAIN => return error.ResourceLimitReached, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } /// Test whether a file descriptor refers to a terminal. pub fn isatty(handle: fd_t) bool { if (builtin.os.tag == .windows) { if (isCygwinPty(handle)) return true; var out: windows.DWORD = undefined; return windows.kernel32.GetConsoleMode(handle, &out) != 0; } if (builtin.link_libc) { return system.isatty(handle) != 0; } if (builtin.os.tag == .wasi) { var statbuf: fdstat_t = undefined; const err = system.fd_fdstat_get(handle, &statbuf); if (err != 0) { // errno = err; return false; } // A tty is a character device that we can't seek or tell on. if (statbuf.fs_filetype != .CHARACTER_DEVICE or (statbuf.fs_rights_base & (RIGHT.FD_SEEK | RIGHT.FD_TELL)) != 0) { // errno = ENOTTY; return false; } return true; } if (builtin.os.tag == .linux) { while (true) { var wsz: linux.winsize = undefined; const fd = @bitCast(usize, @as(isize, handle)); const rc = linux.syscall3(.ioctl, fd, linux.T.IOCGWINSZ, @ptrToInt(&wsz)); switch (linux.getErrno(rc)) { .SUCCESS => return true, .INTR => continue, else => return false, } } } return system.isatty(handle) != 0; } pub fn isCygwinPty(handle: fd_t) bool { if (builtin.os.tag != .windows) return false; const size = @sizeOf(windows.FILE_NAME_INFO); var name_info_bytes align(@alignOf(windows.FILE_NAME_INFO)) = [_]u8{0} ** (size + windows.MAX_PATH); if (windows.kernel32.GetFileInformationByHandleEx( handle, windows.FileNameInfo, @ptrCast(*anyopaque, &name_info_bytes), name_info_bytes.len, ) == 0) { return false; } const name_info = @ptrCast(*const windows.FILE_NAME_INFO, &name_info_bytes[0]); const name_bytes = name_info_bytes[size .. size + @as(usize, name_info.FileNameLength)]; const name_wide = mem.bytesAsSlice(u16, name_bytes); return mem.indexOf(u16, name_wide, &[_]u16{ 'm', 's', 'y', 's', '-' }) != null or mem.indexOf(u16, name_wide, &[_]u16{ '-', 'p', 't', 'y' }) != null; } pub const SocketError = error{ /// Permission to create a socket of the specified type and/or /// pro‐tocol is denied. PermissionDenied, /// The implementation does not support the specified address family. AddressFamilyNotSupported, /// Unknown protocol, or protocol family not available. ProtocolFamilyNotAvailable, /// The per-process limit on the number of open file descriptors has been reached. ProcessFdQuotaExceeded, /// The system-wide limit on the total number of open files has been reached. SystemFdQuotaExceeded, /// Insufficient memory is available. The socket cannot be created until sufficient /// resources are freed. SystemResources, /// The protocol type or the specified protocol is not supported within this domain. ProtocolNotSupported, /// The socket type is not supported by the protocol. SocketTypeNotSupported, } || UnexpectedError; pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t { if (builtin.os.tag == .windows) { // NOTE: windows translates the SOCK.NONBLOCK/SOCK.CLOEXEC flags into // windows-analagous operations const filtered_sock_type = socket_type & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC); const flags: u32 = if ((socket_type & SOCK.CLOEXEC) != 0) windows.ws2_32.WSA_FLAG_NO_HANDLE_INHERIT else 0; const rc = try windows.WSASocketW( @bitCast(i32, domain), @bitCast(i32, filtered_sock_type), @bitCast(i32, protocol), null, 0, flags, ); errdefer windows.closesocket(rc) catch unreachable; if ((socket_type & SOCK.NONBLOCK) != 0) { var mode: c_ulong = 1; // nonblocking if (windows.ws2_32.SOCKET_ERROR == windows.ws2_32.ioctlsocket(rc, windows.ws2_32.FIONBIO, &mode)) { switch (windows.ws2_32.WSAGetLastError()) { // have not identified any error codes that should be handled yet else => unreachable, } } } return rc; } const have_sock_flags = comptime !builtin.target.isDarwin(); const filtered_sock_type = if (!have_sock_flags) socket_type & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC) else socket_type; const rc = system.socket(domain, filtered_sock_type, protocol); switch (errno(rc)) { .SUCCESS => { const fd = @intCast(fd_t, rc); if (!have_sock_flags) { try setSockFlags(fd, socket_type); } return fd; }, .ACCES => return error.PermissionDenied, .AFNOSUPPORT => return error.AddressFamilyNotSupported, .INVAL => return error.ProtocolFamilyNotAvailable, .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .PROTONOSUPPORT => return error.ProtocolNotSupported, .PROTOTYPE => return error.SocketTypeNotSupported, else => |err| return unexpectedErrno(err), } } pub const ShutdownError = error{ ConnectionAborted, /// Connection was reset by peer, application should close socket as it is no longer usable. ConnectionResetByPeer, BlockingOperationInProgress, /// The network subsystem has failed. NetworkSubsystemFailed, /// The socket is not connected (connection-oriented sockets only). SocketNotConnected, SystemResources, } || UnexpectedError; pub const ShutdownHow = enum { recv, send, both }; /// Shutdown socket send/receive operations pub fn shutdown(sock: socket_t, how: ShutdownHow) ShutdownError!void { if (builtin.os.tag == .windows) { const result = windows.ws2_32.shutdown(sock, switch (how) { .recv => windows.ws2_32.SD_RECEIVE, .send => windows.ws2_32.SD_SEND, .both => windows.ws2_32.SD_BOTH, }); if (0 != result) switch (windows.ws2_32.WSAGetLastError()) { .WSAECONNABORTED => return error.ConnectionAborted, .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAEINPROGRESS => return error.BlockingOperationInProgress, .WSAEINVAL => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENOTCONN => return error.SocketNotConnected, .WSAENOTSOCK => unreachable, .WSANOTINITIALISED => unreachable, else => |err| return windows.unexpectedWSAError(err), }; } else { const rc = system.shutdown(sock, switch (how) { .recv => SHUT.RD, .send => SHUT.WR, .both => SHUT.RDWR, }); switch (errno(rc)) { .SUCCESS => return, .BADF => unreachable, .INVAL => unreachable, .NOTCONN => return error.SocketNotConnected, .NOTSOCK => unreachable, .NOBUFS => return error.SystemResources, else => |err| return unexpectedErrno(err), } } } pub fn closeSocket(sock: socket_t) void { if (builtin.os.tag == .windows) { windows.closesocket(sock) catch unreachable; } else { close(sock); } } pub const BindError = error{ /// The address is protected, and the user is not the superuser. /// For UNIX domain sockets: Search permission is denied on a component /// of the path prefix. AccessDenied, /// The given address is already in use, or in the case of Internet domain sockets, /// The port number was specified as zero in the socket /// address structure, but, upon attempting to bind to an ephemeral port, it was /// determined that all port numbers in the ephemeral port range are currently in /// use. See the discussion of /proc/sys/net/ipv4/ip_local_port_range ip(7). AddressInUse, /// A nonexistent interface was requested or the requested address was not local. AddressNotAvailable, /// Too many symbolic links were encountered in resolving addr. SymLinkLoop, /// addr is too long. NameTooLong, /// A component in the directory prefix of the socket pathname does not exist. FileNotFound, /// Insufficient kernel memory was available. SystemResources, /// A component of the path prefix is not a directory. NotDir, /// The socket inode would reside on a read-only filesystem. ReadOnlyFileSystem, /// The network subsystem has failed. NetworkSubsystemFailed, FileDescriptorNotASocket, AlreadyBound, } || UnexpectedError; /// addr is `*const T` where T is one of the sockaddr pub fn bind(sock: socket_t, addr: *const sockaddr, len: socklen_t) BindError!void { if (builtin.os.tag == .windows) { const rc = windows.bind(sock, addr, len); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, // not initialized WSA .WSAEACCES => return error.AccessDenied, .WSAEADDRINUSE => return error.AddressInUse, .WSAEADDRNOTAVAIL => return error.AddressNotAvailable, .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEFAULT => unreachable, // invalid pointers .WSAEINVAL => return error.AlreadyBound, .WSAENOBUFS => return error.SystemResources, .WSAENETDOWN => return error.NetworkSubsystemFailed, else => |err| return windows.unexpectedWSAError(err), } unreachable; } return; } else { const rc = system.bind(sock, addr, len); switch (errno(rc)) { .SUCCESS => return, .ACCES => return error.AccessDenied, .ADDRINUSE => return error.AddressInUse, .BADF => unreachable, // always a race condition if this error is returned .INVAL => unreachable, // invalid parameters .NOTSOCK => unreachable, // invalid `sockfd` .ADDRNOTAVAIL => return error.AddressNotAvailable, .FAULT => unreachable, // invalid `addr` pointer .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } unreachable; } pub const ListenError = error{ /// Another socket is already listening on the same port. /// For Internet domain sockets, the socket referred to by sockfd had not previously /// been bound to an address and, upon attempting to bind it to an ephemeral port, it /// was determined that all port numbers in the ephemeral port range are currently in /// use. See the discussion of /proc/sys/net/ipv4/ip_local_port_range in ip(7). AddressInUse, /// The file descriptor sockfd does not refer to a socket. FileDescriptorNotASocket, /// The socket is not of a type that supports the listen() operation. OperationNotSupported, /// The network subsystem has failed. NetworkSubsystemFailed, /// Ran out of system resources /// On Windows it can either run out of socket descriptors or buffer space SystemResources, /// Already connected AlreadyConnected, /// Socket has not been bound yet SocketNotBound, } || UnexpectedError; pub fn listen(sock: socket_t, backlog: u31) ListenError!void { if (builtin.os.tag == .windows) { const rc = windows.listen(sock, backlog); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, // not initialized WSA .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAEADDRINUSE => return error.AddressInUse, .WSAEISCONN => return error.AlreadyConnected, .WSAEINVAL => return error.SocketNotBound, .WSAEMFILE, .WSAENOBUFS => return error.SystemResources, .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEOPNOTSUPP => return error.OperationNotSupported, .WSAEINPROGRESS => unreachable, else => |err| return windows.unexpectedWSAError(err), } } return; } else { const rc = system.listen(sock, backlog); switch (errno(rc)) { .SUCCESS => return, .ADDRINUSE => return error.AddressInUse, .BADF => unreachable, .NOTSOCK => return error.FileDescriptorNotASocket, .OPNOTSUPP => return error.OperationNotSupported, else => |err| return unexpectedErrno(err), } } } pub const AcceptError = error{ ConnectionAborted, /// The file descriptor sockfd does not refer to a socket. FileDescriptorNotASocket, /// The per-process limit on the number of open file descriptors has been reached. ProcessFdQuotaExceeded, /// The system-wide limit on the total number of open files has been reached. SystemFdQuotaExceeded, /// Not enough free memory. This often means that the memory allocation is limited /// by the socket buffer limits, not by the system memory. SystemResources, /// Socket is not listening for new connections. SocketNotListening, ProtocolFailure, /// Firewall rules forbid connection. BlockedByFirewall, /// This error occurs when no global event loop is configured, /// and accepting from the socket would block. WouldBlock, /// An incoming connection was indicated, but was subsequently terminated by the /// remote peer prior to accepting the call. ConnectionResetByPeer, /// The network subsystem has failed. NetworkSubsystemFailed, /// The referenced socket is not a type that supports connection-oriented service. OperationNotSupported, } || UnexpectedError; /// Accept a connection on a socket. /// If `sockfd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. pub fn accept( /// This argument is a socket that has been created with `socket`, bound to a local address /// with `bind`, and is listening for connections after a `listen`. sock: socket_t, /// This argument is a pointer to a sockaddr structure. This structure is filled in with the /// address of the peer socket, as known to the communications layer. The exact format of the /// address returned addr is determined by the socket's address family (see `socket` and the /// respective protocol man pages). addr: ?*sockaddr, /// This argument is a value-result argument: the caller must initialize it to contain the /// size (in bytes) of the structure pointed to by addr; on return it will contain the actual size /// of the peer address. /// /// The returned address is truncated if the buffer provided is too small; in this case, `addr_size` /// will return a value greater than was supplied to the call. addr_size: ?*socklen_t, /// The following values can be bitwise ORed in flags to obtain different behavior: /// * `SOCK.NONBLOCK` - Set the `O.NONBLOCK` file status flag on the open file description (see `open`) /// referred to by the new file descriptor. Using this flag saves extra calls to `fcntl` to achieve /// the same result. /// * `SOCK.CLOEXEC` - Set the close-on-exec (`FD_CLOEXEC`) flag on the new file descriptor. See the /// description of the `O.CLOEXEC` flag in `open` for reasons why this may be useful. flags: u32, ) AcceptError!socket_t { const have_accept4 = comptime !(builtin.target.isDarwin() or builtin.os.tag == .windows); assert(0 == (flags & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC))); // Unsupported flag(s) const accepted_sock = while (true) { const rc = if (have_accept4) system.accept4(sock, addr, addr_size, flags) else if (builtin.os.tag == .windows) windows.accept(sock, addr, addr_size) else system.accept(sock, addr, addr_size); if (builtin.os.tag == .windows) { if (rc == windows.ws2_32.INVALID_SOCKET) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, // not initialized WSA .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAEFAULT => unreachable, .WSAEINVAL => return error.SocketNotListening, .WSAEMFILE => return error.ProcessFdQuotaExceeded, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENOBUFS => return error.FileDescriptorNotASocket, .WSAEOPNOTSUPP => return error.OperationNotSupported, .WSAEWOULDBLOCK => return error.WouldBlock, else => |err| return windows.unexpectedWSAError(err), } } else { break rc; } } else { switch (errno(rc)) { .SUCCESS => { break @intCast(socket_t, rc); }, .INTR => continue, .AGAIN => return error.WouldBlock, .BADF => unreachable, // always a race condition .CONNABORTED => return error.ConnectionAborted, .FAULT => unreachable, .INVAL => return error.SocketNotListening, .NOTSOCK => unreachable, .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .OPNOTSUPP => unreachable, .PROTO => return error.ProtocolFailure, .PERM => return error.BlockedByFirewall, else => |err| return unexpectedErrno(err), } } } else unreachable; if (!have_accept4) { try setSockFlags(accepted_sock, flags); } return accepted_sock; } pub const EpollCreateError = error{ /// The per-user limit on the number of epoll instances imposed by /// /proc/sys/fs/epoll/max_user_instances was encountered. See epoll(7) for further /// details. /// Or, The per-process limit on the number of open file descriptors has been reached. ProcessFdQuotaExceeded, /// The system-wide limit on the total number of open files has been reached. SystemFdQuotaExceeded, /// There was insufficient memory to create the kernel object. SystemResources, } || UnexpectedError; pub fn epoll_create1(flags: u32) EpollCreateError!i32 { const rc = system.epoll_create1(flags); switch (errno(rc)) { .SUCCESS => return @intCast(i32, rc), else => |err| return unexpectedErrno(err), .INVAL => unreachable, .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, .NOMEM => return error.SystemResources, } } pub const EpollCtlError = error{ /// op was EPOLL_CTL_ADD, and the supplied file descriptor fd is already registered /// with this epoll instance. FileDescriptorAlreadyPresentInSet, /// fd refers to an epoll instance and this EPOLL_CTL_ADD operation would result in a /// circular loop of epoll instances monitoring one another. OperationCausesCircularLoop, /// op was EPOLL_CTL_MOD or EPOLL_CTL_DEL, and fd is not registered with this epoll /// instance. FileDescriptorNotRegistered, /// There was insufficient memory to handle the requested op control operation. SystemResources, /// The limit imposed by /proc/sys/fs/epoll/max_user_watches was encountered while /// trying to register (EPOLL_CTL_ADD) a new file descriptor on an epoll instance. /// See epoll(7) for further details. UserResourceLimitReached, /// The target file fd does not support epoll. This error can occur if fd refers to, /// for example, a regular file or a directory. FileDescriptorIncompatibleWithEpoll, } || UnexpectedError; pub fn epoll_ctl(epfd: i32, op: u32, fd: i32, event: ?*linux.epoll_event) EpollCtlError!void { const rc = system.epoll_ctl(epfd, op, fd, event); switch (errno(rc)) { .SUCCESS => return, else => |err| return unexpectedErrno(err), .BADF => unreachable, // always a race condition if this happens .EXIST => return error.FileDescriptorAlreadyPresentInSet, .INVAL => unreachable, .LOOP => return error.OperationCausesCircularLoop, .NOENT => return error.FileDescriptorNotRegistered, .NOMEM => return error.SystemResources, .NOSPC => return error.UserResourceLimitReached, .PERM => return error.FileDescriptorIncompatibleWithEpoll, } } /// Waits for an I/O event on an epoll file descriptor. /// Returns the number of file descriptors ready for the requested I/O, /// or zero if no file descriptor became ready during the requested timeout milliseconds. pub fn epoll_wait(epfd: i32, events: []linux.epoll_event, timeout: i32) usize { while (true) { // TODO get rid of the @intCast const rc = system.epoll_wait(epfd, events.ptr, @intCast(u32, events.len), timeout); switch (errno(rc)) { .SUCCESS => return @intCast(usize, rc), .INTR => continue, .BADF => unreachable, .FAULT => unreachable, .INVAL => unreachable, else => unreachable, } } } pub const EventFdError = error{ SystemResources, ProcessFdQuotaExceeded, SystemFdQuotaExceeded, } || UnexpectedError; pub fn eventfd(initval: u32, flags: u32) EventFdError!i32 { const rc = system.eventfd(initval, flags); switch (errno(rc)) { .SUCCESS => return @intCast(i32, rc), else => |err| return unexpectedErrno(err), .INVAL => unreachable, // invalid parameters .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.SystemResources, .NOMEM => return error.SystemResources, } } pub const GetSockNameError = error{ /// Insufficient resources were available in the system to perform the operation. SystemResources, /// The network subsystem has failed. NetworkSubsystemFailed, /// Socket hasn't been bound yet SocketNotBound, FileDescriptorNotASocket, } || UnexpectedError; pub fn getsockname(sock: socket_t, addr: *sockaddr, addrlen: *socklen_t) GetSockNameError!void { if (builtin.os.tag == .windows) { const rc = windows.getsockname(sock, addr, addrlen); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAEFAULT => unreachable, // addr or addrlen have invalid pointers or addrlen points to an incorrect value .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEINVAL => return error.SocketNotBound, else => |err| return windows.unexpectedWSAError(err), } } return; } else { const rc = system.getsockname(sock, addr, addrlen); switch (errno(rc)) { .SUCCESS => return, else => |err| return unexpectedErrno(err), .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, // invalid parameters .NOTSOCK => return error.FileDescriptorNotASocket, .NOBUFS => return error.SystemResources, } } } pub fn getpeername(sock: socket_t, addr: *sockaddr, addrlen: *socklen_t) GetSockNameError!void { if (builtin.os.tag == .windows) { const rc = windows.getpeername(sock, addr, addrlen); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAEFAULT => unreachable, // addr or addrlen have invalid pointers or addrlen points to an incorrect value .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEINVAL => return error.SocketNotBound, else => |err| return windows.unexpectedWSAError(err), } } return; } else { const rc = system.getpeername(sock, addr, addrlen); switch (errno(rc)) { .SUCCESS => return, else => |err| return unexpectedErrno(err), .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, // invalid parameters .NOTSOCK => return error.FileDescriptorNotASocket, .NOBUFS => return error.SystemResources, } } } pub const ConnectError = error{ /// For UNIX domain sockets, which are identified by pathname: Write permission is denied on the socket /// file, or search permission is denied for one of the directories in the path prefix. /// or /// The user tried to connect to a broadcast address without having the socket broadcast flag enabled or /// the connection request failed because of a local firewall rule. PermissionDenied, /// Local address is already in use. AddressInUse, /// (Internet domain sockets) The socket referred to by sockfd had not previously been bound to an /// address and, upon attempting to bind it to an ephemeral port, it was determined that all port numbers /// in the ephemeral port range are currently in use. See the discussion of /// /proc/sys/net/ipv4/ip_local_port_range in ip(7). AddressNotAvailable, /// The passed address didn't have the correct address family in its sa_family field. AddressFamilyNotSupported, /// Insufficient entries in the routing cache. SystemResources, /// A connect() on a stream socket found no one listening on the remote address. ConnectionRefused, /// Network is unreachable. NetworkUnreachable, /// Timeout while attempting connection. The server may be too busy to accept new connections. Note /// that for IP sockets the timeout may be very long when syncookies are enabled on the server. ConnectionTimedOut, /// This error occurs when no global event loop is configured, /// and connecting to the socket would block. WouldBlock, /// The given path for the unix socket does not exist. FileNotFound, /// Connection was reset by peer before connect could complete. ConnectionResetByPeer, /// Socket is non-blocking and already has a pending connection in progress. ConnectionPending, } || UnexpectedError; /// Initiate a connection on a socket. /// If `sockfd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN or EINPROGRESS is received. pub fn connect(sock: socket_t, sock_addr: *const sockaddr, len: socklen_t) ConnectError!void { if (builtin.os.tag == .windows) { const rc = windows.ws2_32.connect(sock, sock_addr, @intCast(i32, len)); if (rc == 0) return; switch (windows.ws2_32.WSAGetLastError()) { .WSAEADDRINUSE => return error.AddressInUse, .WSAEADDRNOTAVAIL => return error.AddressNotAvailable, .WSAECONNREFUSED => return error.ConnectionRefused, .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAETIMEDOUT => return error.ConnectionTimedOut, .WSAEHOSTUNREACH, // TODO: should we return NetworkUnreachable in this case as well? .WSAENETUNREACH, => return error.NetworkUnreachable, .WSAEFAULT => unreachable, .WSAEINVAL => unreachable, .WSAEISCONN => unreachable, .WSAENOTSOCK => unreachable, .WSAEWOULDBLOCK => unreachable, .WSAEACCES => unreachable, .WSAENOBUFS => return error.SystemResources, .WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported, else => |err| return windows.unexpectedWSAError(err), } return; } while (true) { switch (errno(system.connect(sock, sock_addr, len))) { .SUCCESS => return, .ACCES => return error.PermissionDenied, .PERM => return error.PermissionDenied, .ADDRINUSE => return error.AddressInUse, .ADDRNOTAVAIL => return error.AddressNotAvailable, .AFNOSUPPORT => return error.AddressFamilyNotSupported, .AGAIN, .INPROGRESS => return error.WouldBlock, .ALREADY => return error.ConnectionPending, .BADF => unreachable, // sockfd is not a valid open file descriptor. .CONNREFUSED => return error.ConnectionRefused, .CONNRESET => return error.ConnectionResetByPeer, .FAULT => unreachable, // The socket structure address is outside the user's address space. .INTR => continue, .ISCONN => unreachable, // The socket is already connected. .NETUNREACH => return error.NetworkUnreachable, .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket. .PROTOTYPE => unreachable, // The socket type does not support the requested communications protocol. .TIMEDOUT => return error.ConnectionTimedOut, .NOENT => return error.FileNotFound, // Returned when socket is AF.UNIX and the given path does not exist. else => |err| return unexpectedErrno(err), } } } pub fn getsockoptError(sockfd: fd_t) ConnectError!void { var err_code: i32 = undefined; var size: u32 = @sizeOf(u32); const rc = system.getsockopt(sockfd, SOL.SOCKET, SO.ERROR, @ptrCast([*]u8, &err_code), &size); assert(size == 4); switch (errno(rc)) { .SUCCESS => switch (@intToEnum(E, err_code)) { .SUCCESS => return, .ACCES => return error.PermissionDenied, .PERM => return error.PermissionDenied, .ADDRINUSE => return error.AddressInUse, .ADDRNOTAVAIL => return error.AddressNotAvailable, .AFNOSUPPORT => return error.AddressFamilyNotSupported, .AGAIN => return error.SystemResources, .ALREADY => return error.ConnectionPending, .BADF => unreachable, // sockfd is not a valid open file descriptor. .CONNREFUSED => return error.ConnectionRefused, .FAULT => unreachable, // The socket structure address is outside the user's address space. .ISCONN => unreachable, // The socket is already connected. .NETUNREACH => return error.NetworkUnreachable, .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket. .PROTOTYPE => unreachable, // The socket type does not support the requested communications protocol. .TIMEDOUT => return error.ConnectionTimedOut, .CONNRESET => return error.ConnectionResetByPeer, else => |err| return unexpectedErrno(err), }, .BADF => unreachable, // The argument sockfd is not a valid file descriptor. .FAULT => unreachable, // The address pointed to by optval or optlen is not in a valid part of the process address space. .INVAL => unreachable, .NOPROTOOPT => unreachable, // The option is unknown at the level indicated. .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket. else => |err| return unexpectedErrno(err), } } pub const WaitPidResult = struct { pid: pid_t, status: u32, }; pub fn waitpid(pid: pid_t, flags: u32) WaitPidResult { const Status = if (builtin.link_libc) c_int else u32; var status: Status = undefined; while (true) { const rc = system.waitpid(pid, &status, if (builtin.link_libc) @intCast(c_int, flags) else flags); switch (errno(rc)) { .SUCCESS => return .{ .pid = @intCast(pid_t, rc), .status = @bitCast(u32, status), }, .INTR => continue, .CHILD => unreachable, // The process specified does not exist. It would be a race condition to handle this error. .INVAL => unreachable, // Invalid flags. else => unreachable, } } } pub const FStatError = error{ SystemResources, /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to get its filestat information. AccessDenied, } || UnexpectedError; /// Return information about a file descriptor. pub fn fstat(fd: fd_t) FStatError!Stat { if (builtin.os.tag == .wasi and !builtin.link_libc) { var stat: wasi.filestat_t = undefined; switch (wasi.fd_filestat_get(fd, &stat)) { .SUCCESS => return Stat.fromFilestat(stat), .INVAL => unreachable, .BADF => unreachable, // Always a race condition. .NOMEM => return error.SystemResources, .ACCES => return error.AccessDenied, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .windows) { @compileError("fstat is not yet implemented on Windows"); } const fstat_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.fstat64 else system.fstat; var stat = mem.zeroes(Stat); switch (errno(fstat_sym(fd, &stat))) { .SUCCESS => return stat, .INVAL => unreachable, .BADF => unreachable, // Always a race condition. .NOMEM => return error.SystemResources, .ACCES => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } pub const FStatAtError = FStatError || error{ NameTooLong, FileNotFound, SymLinkLoop }; /// Similar to `fstat`, but returns stat of a resource pointed to by `pathname` /// which is relative to `dirfd` handle. /// See also `fstatatZ` and `fstatatWasi`. pub fn fstatat(dirfd: fd_t, pathname: []const u8, flags: u32) FStatAtError!Stat { if (builtin.os.tag == .wasi and !builtin.link_libc) { return fstatatWasi(dirfd, pathname, flags); } else if (builtin.os.tag == .windows) { @compileError("fstatat is not yet implemented on Windows"); } else { const pathname_c = try toPosixPath(pathname); return fstatatZ(dirfd, &pathname_c, flags); } } /// WASI-only. Same as `fstatat` but targeting WASI. /// See also `fstatat`. pub fn fstatatWasi(dirfd: fd_t, pathname: []const u8, flags: u32) FStatAtError!Stat { var stat: wasi.filestat_t = undefined; switch (wasi.path_filestat_get(dirfd, flags, pathname.ptr, pathname.len, &stat)) { .SUCCESS => return Stat.fromFilestat(stat), .INVAL => unreachable, .BADF => unreachable, // Always a race condition. .NOMEM => return error.SystemResources, .ACCES => return error.AccessDenied, .FAULT => unreachable, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.FileNotFound, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } /// Same as `fstatat` but `pathname` is null-terminated. /// See also `fstatat`. pub fn fstatatZ(dirfd: fd_t, pathname: [*:0]const u8, flags: u32) FStatAtError!Stat { const fstatat_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.fstatat64 else system.fstatat; var stat = mem.zeroes(Stat); switch (errno(fstatat_sym(dirfd, pathname, &stat, flags))) { .SUCCESS => return stat, .INVAL => unreachable, .BADF => unreachable, // Always a race condition. .NOMEM => return error.SystemResources, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .FAULT => unreachable, .NAMETOOLONG => return error.NameTooLong, .LOOP => return error.SymLinkLoop, .NOENT => return error.FileNotFound, .NOTDIR => return error.FileNotFound, else => |err| return unexpectedErrno(err), } } pub const KQueueError = error{ /// The per-process limit on the number of open file descriptors has been reached. ProcessFdQuotaExceeded, /// The system-wide limit on the total number of open files has been reached. SystemFdQuotaExceeded, } || UnexpectedError; pub fn kqueue() KQueueError!i32 { const rc = system.kqueue(); switch (errno(rc)) { .SUCCESS => return @intCast(i32, rc), .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, else => |err| return unexpectedErrno(err), } } pub const KEventError = error{ /// The process does not have permission to register a filter. AccessDenied, /// The event could not be found to be modified or deleted. EventNotFound, /// No memory was available to register the event. SystemResources, /// The specified process to attach to does not exist. ProcessNotFound, /// changelist or eventlist had too many items on it. /// TODO remove this possibility Overflow, }; pub fn kevent( kq: i32, changelist: []const Kevent, eventlist: []Kevent, timeout: ?*const timespec, ) KEventError!usize { while (true) { const rc = system.kevent( kq, changelist.ptr, try math.cast(c_int, changelist.len), eventlist.ptr, try math.cast(c_int, eventlist.len), timeout, ); switch (errno(rc)) { .SUCCESS => return @intCast(usize, rc), .ACCES => return error.AccessDenied, .FAULT => unreachable, .BADF => unreachable, // Always a race condition. .INTR => continue, .INVAL => unreachable, .NOENT => return error.EventNotFound, .NOMEM => return error.SystemResources, .SRCH => return error.ProcessNotFound, else => unreachable, } } } pub const INotifyInitError = error{ ProcessFdQuotaExceeded, SystemFdQuotaExceeded, SystemResources, } || UnexpectedError; /// initialize an inotify instance pub fn inotify_init1(flags: u32) INotifyInitError!i32 { const rc = system.inotify_init1(flags); switch (errno(rc)) { .SUCCESS => return @intCast(i32, rc), .INVAL => unreachable, .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } pub const INotifyAddWatchError = error{ AccessDenied, NameTooLong, FileNotFound, SystemResources, UserResourceLimitReached, NotDir, } || UnexpectedError; /// add a watch to an initialized inotify instance pub fn inotify_add_watch(inotify_fd: i32, pathname: []const u8, mask: u32) INotifyAddWatchError!i32 { const pathname_c = try toPosixPath(pathname); return inotify_add_watchZ(inotify_fd, &pathname_c, mask); } /// Same as `inotify_add_watch` except pathname is null-terminated. pub fn inotify_add_watchZ(inotify_fd: i32, pathname: [*:0]const u8, mask: u32) INotifyAddWatchError!i32 { const rc = system.inotify_add_watch(inotify_fd, pathname, mask); switch (errno(rc)) { .SUCCESS => return @intCast(i32, rc), .ACCES => return error.AccessDenied, .BADF => unreachable, .FAULT => unreachable, .INVAL => unreachable, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.UserResourceLimitReached, .NOTDIR => return error.NotDir, else => |err| return unexpectedErrno(err), } } /// remove an existing watch from an inotify instance pub fn inotify_rm_watch(inotify_fd: i32, wd: i32) void { switch (errno(system.inotify_rm_watch(inotify_fd, wd))) { .SUCCESS => return, .BADF => unreachable, .INVAL => unreachable, else => unreachable, } } pub const MProtectError = error{ /// The memory cannot be given the specified access. This can happen, for example, if you /// mmap(2) a file to which you have read-only access, then ask mprotect() to mark it /// PROT_WRITE. AccessDenied, /// Changing the protection of a memory region would result in the total number of map‐ /// pings with distinct attributes (e.g., read versus read/write protection) exceeding the /// allowed maximum. (For example, making the protection of a range PROT_READ in the mid‐ /// dle of a region currently protected as PROT_READ|PROT_WRITE would result in three map‐ /// pings: two read/write mappings at each end and a read-only mapping in the middle.) OutOfMemory, } || UnexpectedError; /// `memory.len` must be page-aligned. pub fn mprotect(memory: []align(mem.page_size) u8, protection: u32) MProtectError!void { assert(mem.isAligned(memory.len, mem.page_size)); switch (errno(system.mprotect(memory.ptr, memory.len, protection))) { .SUCCESS => return, .INVAL => unreachable, .ACCES => return error.AccessDenied, .NOMEM => return error.OutOfMemory, else => |err| return unexpectedErrno(err), } } pub const ForkError = error{SystemResources} || UnexpectedError; pub fn fork() ForkError!pid_t { const rc = system.fork(); switch (errno(rc)) { .SUCCESS => return @intCast(pid_t, rc), .AGAIN => return error.SystemResources, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } pub const MMapError = error{ /// The underlying filesystem of the specified file does not support memory mapping. MemoryMappingNotSupported, /// A file descriptor refers to a non-regular file. Or a file mapping was requested, /// but the file descriptor is not open for reading. Or `MAP.SHARED` was requested /// and `PROT_WRITE` is set, but the file descriptor is not open in `O.RDWR` mode. /// Or `PROT_WRITE` is set, but the file is append-only. AccessDenied, /// The `prot` argument asks for `PROT_EXEC` but the mapped area belongs to a file on /// a filesystem that was mounted no-exec. PermissionDenied, LockedMemoryLimitExceeded, OutOfMemory, } || UnexpectedError; /// Map files or devices into memory. /// `length` does not need to be aligned. /// Use of a mapped region can result in these signals: /// * SIGSEGV - Attempted write into a region mapped as read-only. /// * SIGBUS - Attempted access to a portion of the buffer that does not correspond to the file pub fn mmap( ptr: ?[*]align(mem.page_size) u8, length: usize, prot: u32, flags: u32, fd: fd_t, offset: u64, ) MMapError![]align(mem.page_size) u8 { const mmap_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.mmap64 else system.mmap; const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned const rc = mmap_sym(ptr, length, prot, flags, fd, ioffset); const err = if (builtin.link_libc) blk: { if (rc != std.c.MAP.FAILED) return @ptrCast([*]align(mem.page_size) u8, @alignCast(mem.page_size, rc))[0..length]; break :blk @intToEnum(E, system._errno().*); } else blk: { const err = errno(rc); if (err == .SUCCESS) return @intToPtr([*]align(mem.page_size) u8, rc)[0..length]; break :blk err; }; switch (err) { .SUCCESS => unreachable, .TXTBSY => return error.AccessDenied, .ACCES => return error.AccessDenied, .PERM => return error.PermissionDenied, .AGAIN => return error.LockedMemoryLimitExceeded, .BADF => unreachable, // Always a race condition. .OVERFLOW => unreachable, // The number of pages used for length + offset would overflow. .NODEV => return error.MemoryMappingNotSupported, .INVAL => unreachable, // Invalid parameters to mmap() .NOMEM => return error.OutOfMemory, else => return unexpectedErrno(err), } } /// Deletes the mappings for the specified address range, causing /// further references to addresses within the range to generate invalid memory references. /// Note that while POSIX allows unmapping a region in the middle of an existing mapping, /// Zig's munmap function does not, for two reasons: /// * It violates the Zig principle that resource deallocation must succeed. /// * The Windows function, VirtualFree, has this restriction. pub fn munmap(memory: []align(mem.page_size) const u8) void { switch (errno(system.munmap(memory.ptr, memory.len))) { .SUCCESS => return, .INVAL => unreachable, // Invalid parameters. .NOMEM => unreachable, // Attempted to unmap a region in the middle of an existing mapping. else => unreachable, } } pub const AccessError = error{ PermissionDenied, FileNotFound, NameTooLong, InputOutput, SystemResources, BadPathName, FileBusy, SymLinkLoop, ReadOnlyFileSystem, /// On Windows, file paths must be valid Unicode. InvalidUtf8, } || UnexpectedError; /// check user's permissions for a file /// TODO currently this assumes `mode` is `F.OK` on Windows. pub fn access(path: []const u8, mode: u32) AccessError!void { if (builtin.os.tag == .windows) { const path_w = try windows.sliceToPrefixedFileW(path); _ = try windows.GetFileAttributesW(path_w.span().ptr); return; } const path_c = try toPosixPath(path); return accessZ(&path_c, mode); } /// Same as `access` except `path` is null-terminated. pub fn accessZ(path: [*:0]const u8, mode: u32) AccessError!void { if (builtin.os.tag == .windows) { const path_w = try windows.cStrToPrefixedFileW(path); _ = try windows.GetFileAttributesW(path_w.span().ptr); return; } switch (errno(system.access(path, mode))) { .SUCCESS => return, .ACCES => return error.PermissionDenied, .ROFS => return error.ReadOnlyFileSystem, .LOOP => return error.SymLinkLoop, .TXTBSY => return error.FileBusy, .NOTDIR => return error.FileNotFound, .NOENT => return error.FileNotFound, .NAMETOOLONG => return error.NameTooLong, .INVAL => unreachable, .FAULT => unreachable, .IO => return error.InputOutput, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } /// Call from Windows-specific code if you already have a UTF-16LE encoded, null terminated string. /// Otherwise use `access` or `accessC`. /// TODO currently this ignores `mode`. pub fn accessW(path: [*:0]const u16, mode: u32) windows.GetFileAttributesError!void { _ = mode; const ret = try windows.GetFileAttributesW(path); if (ret != windows.INVALID_FILE_ATTRIBUTES) { return; } switch (windows.kernel32.GetLastError()) { .FILE_NOT_FOUND => return error.FileNotFound, .PATH_NOT_FOUND => return error.FileNotFound, .ACCESS_DENIED => return error.PermissionDenied, else => |err| return windows.unexpectedError(err), } } /// Check user's permissions for a file, based on an open directory handle. /// TODO currently this ignores `mode` and `flags` on Windows. pub fn faccessat(dirfd: fd_t, path: []const u8, mode: u32, flags: u32) AccessError!void { if (builtin.os.tag == .windows) { const path_w = try windows.sliceToPrefixedFileW(path); return faccessatW(dirfd, path_w.span().ptr, mode, flags); } const path_c = try toPosixPath(path); return faccessatZ(dirfd, &path_c, mode, flags); } /// Same as `faccessat` except the path parameter is null-terminated. pub fn faccessatZ(dirfd: fd_t, path: [*:0]const u8, mode: u32, flags: u32) AccessError!void { if (builtin.os.tag == .windows) { const path_w = try windows.cStrToPrefixedFileW(path); return faccessatW(dirfd, path_w.span().ptr, mode, flags); } switch (errno(system.faccessat(dirfd, path, mode, flags))) { .SUCCESS => return, .ACCES => return error.PermissionDenied, .ROFS => return error.ReadOnlyFileSystem, .LOOP => return error.SymLinkLoop, .TXTBSY => return error.FileBusy, .NOTDIR => return error.FileNotFound, .NOENT => return error.FileNotFound, .NAMETOOLONG => return error.NameTooLong, .INVAL => unreachable, .FAULT => unreachable, .IO => return error.InputOutput, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } /// Same as `faccessat` except asserts the target is Windows and the path parameter /// is NtDll-prefixed, null-terminated, WTF-16 encoded. /// TODO currently this ignores `mode` and `flags` pub fn faccessatW(dirfd: fd_t, sub_path_w: [*:0]const u16, mode: u32, flags: u32) AccessError!void { _ = mode; _ = flags; if (sub_path_w[0] == '.' and sub_path_w[1] == 0) { return; } if (sub_path_w[0] == '.' and sub_path_w[1] == '.' and sub_path_w[2] == 0) { return; } const path_len_bytes = math.cast(u16, mem.sliceTo(sub_path_w, 0).len * 2) catch |err| switch (err) { error.Overflow => return error.NameTooLong, }; var nt_name = windows.UNICODE_STRING{ .Length = path_len_bytes, .MaximumLength = path_len_bytes, .Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w)), }; var attr = windows.OBJECT_ATTRIBUTES{ .Length = @sizeOf(windows.OBJECT_ATTRIBUTES), .RootDirectory = if (std.fs.path.isAbsoluteWindowsW(sub_path_w)) null else dirfd, .Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here. .ObjectName = &nt_name, .SecurityDescriptor = null, .SecurityQualityOfService = null, }; var basic_info: windows.FILE_BASIC_INFORMATION = undefined; switch (windows.ntdll.NtQueryAttributesFile(&attr, &basic_info)) { .SUCCESS => return, .OBJECT_NAME_NOT_FOUND => return error.FileNotFound, .OBJECT_PATH_NOT_FOUND => return error.FileNotFound, .OBJECT_NAME_INVALID => unreachable, .INVALID_PARAMETER => unreachable, .ACCESS_DENIED => return error.PermissionDenied, .OBJECT_PATH_SYNTAX_BAD => unreachable, else => |rc| return windows.unexpectedStatus(rc), } } pub const PipeError = error{ SystemFdQuotaExceeded, ProcessFdQuotaExceeded, } || UnexpectedError; /// Creates a unidirectional data channel that can be used for interprocess communication. pub fn pipe() PipeError![2]fd_t { var fds: [2]fd_t = undefined; switch (errno(system.pipe(&fds))) { .SUCCESS => return fds, .INVAL => unreachable, // Invalid parameters to pipe() .FAULT => unreachable, // Invalid fds pointer .NFILE => return error.SystemFdQuotaExceeded, .MFILE => return error.ProcessFdQuotaExceeded, else => |err| return unexpectedErrno(err), } } pub fn pipe2(flags: u32) PipeError![2]fd_t { if (@hasDecl(system, "pipe2")) { var fds: [2]fd_t = undefined; switch (errno(system.pipe2(&fds, flags))) { .SUCCESS => return fds, .INVAL => unreachable, // Invalid flags .FAULT => unreachable, // Invalid fds pointer .NFILE => return error.SystemFdQuotaExceeded, .MFILE => return error.ProcessFdQuotaExceeded, else => |err| return unexpectedErrno(err), } } var fds: [2]fd_t = try pipe(); errdefer { close(fds[0]); close(fds[1]); } if (flags == 0) return fds; // O.CLOEXEC is special, it's a file descriptor flag and must be set using // F.SETFD. if (flags & O.CLOEXEC != 0) { for (fds) |fd| { switch (errno(system.fcntl(fd, F.SETFD, @as(u32, FD_CLOEXEC)))) { .SUCCESS => {}, .INVAL => unreachable, // Invalid flags .BADF => unreachable, // Always a race condition else => |err| return unexpectedErrno(err), } } } const new_flags = flags & ~@as(u32, O.CLOEXEC); // Set every other flag affecting the file status using F.SETFL. if (new_flags != 0) { for (fds) |fd| { switch (errno(system.fcntl(fd, F.SETFL, new_flags))) { .SUCCESS => {}, .INVAL => unreachable, // Invalid flags .BADF => unreachable, // Always a race condition else => |err| return unexpectedErrno(err), } } } return fds; } pub const SysCtlError = error{ PermissionDenied, SystemResources, NameTooLong, UnknownName, } || UnexpectedError; pub fn sysctl( name: []const c_int, oldp: ?*anyopaque, oldlenp: ?*usize, newp: ?*anyopaque, newlen: usize, ) SysCtlError!void { if (builtin.os.tag == .wasi) { @panic("unsupported"); // TODO should be compile error, not panic } if (builtin.os.tag == .haiku) { @panic("unsupported"); // TODO should be compile error, not panic } const name_len = math.cast(c_uint, name.len) catch return error.NameTooLong; switch (errno(system.sysctl(name.ptr, name_len, oldp, oldlenp, newp, newlen))) { .SUCCESS => return, .FAULT => unreachable, .PERM => return error.PermissionDenied, .NOMEM => return error.SystemResources, .NOENT => return error.UnknownName, else => |err| return unexpectedErrno(err), } } pub fn sysctlbynameZ( name: [*:0]const u8, oldp: ?*anyopaque, oldlenp: ?*usize, newp: ?*anyopaque, newlen: usize, ) SysCtlError!void { if (builtin.os.tag == .wasi) { @panic("unsupported"); // TODO should be compile error, not panic } if (builtin.os.tag == .haiku) { @panic("unsupported"); // TODO should be compile error, not panic } switch (errno(system.sysctlbyname(name, oldp, oldlenp, newp, newlen))) { .SUCCESS => return, .FAULT => unreachable, .PERM => return error.PermissionDenied, .NOMEM => return error.SystemResources, .NOENT => return error.UnknownName, else => |err| return unexpectedErrno(err), } } pub fn gettimeofday(tv: ?*timeval, tz: ?*timezone) void { switch (errno(system.gettimeofday(tv, tz))) { .SUCCESS => return, .INVAL => unreachable, else => unreachable, } } pub const SeekError = error{ Unseekable, /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to seek on it. AccessDenied, } || UnexpectedError; /// Repositions read/write file offset relative to the beginning. pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void { if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) { var result: u64 = undefined; switch (errno(system.llseek(fd, offset, &result, SEEK.SET))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .windows) { return windows.SetFilePointerEx_BEGIN(fd, offset); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var new_offset: wasi.filesize_t = undefined; switch (wasi.fd_seek(fd, @bitCast(wasi.filedelta_t, offset), .SET, &new_offset)) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const lseek_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.lseek64 else system.lseek; const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned switch (errno(lseek_sym(fd, ioffset, SEEK.SET))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } /// Repositions read/write file offset relative to the current offset. pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void { if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) { var result: u64 = undefined; switch (errno(system.llseek(fd, @bitCast(u64, offset), &result, SEEK.CUR))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .windows) { return windows.SetFilePointerEx_CURRENT(fd, offset); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var new_offset: wasi.filesize_t = undefined; switch (wasi.fd_seek(fd, offset, .CUR, &new_offset)) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const lseek_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.lseek64 else system.lseek; const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned switch (errno(lseek_sym(fd, ioffset, SEEK.CUR))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } /// Repositions read/write file offset relative to the end. pub fn lseek_END(fd: fd_t, offset: i64) SeekError!void { if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) { var result: u64 = undefined; switch (errno(system.llseek(fd, @bitCast(u64, offset), &result, SEEK.END))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .windows) { return windows.SetFilePointerEx_END(fd, offset); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var new_offset: wasi.filesize_t = undefined; switch (wasi.fd_seek(fd, offset, .END, &new_offset)) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const lseek_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.lseek64 else system.lseek; const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned switch (errno(lseek_sym(fd, ioffset, SEEK.END))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } /// Returns the read/write file offset relative to the beginning. pub fn lseek_CUR_get(fd: fd_t) SeekError!u64 { if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) { var result: u64 = undefined; switch (errno(system.llseek(fd, 0, &result, SEEK.CUR))) { .SUCCESS => return result, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .windows) { return windows.SetFilePointerEx_CURRENT_get(fd); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var new_offset: wasi.filesize_t = undefined; switch (wasi.fd_seek(fd, 0, .CUR, &new_offset)) { .SUCCESS => return new_offset, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const lseek_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.lseek64 else system.lseek; const rc = lseek_sym(fd, 0, SEEK.CUR); switch (errno(rc)) { .SUCCESS => return @bitCast(u64, rc), .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } pub const FcntlError = error{ PermissionDenied, FileBusy, ProcessFdQuotaExceeded, Locked, } || UnexpectedError; pub fn fcntl(fd: fd_t, cmd: i32, arg: usize) FcntlError!usize { while (true) { const rc = system.fcntl(fd, cmd, arg); switch (errno(rc)) { .SUCCESS => return @intCast(usize, rc), .INTR => continue, .ACCES => return error.Locked, .BADF => unreachable, .BUSY => return error.FileBusy, .INVAL => unreachable, // invalid parameters .PERM => return error.PermissionDenied, .MFILE => return error.ProcessFdQuotaExceeded, .NOTDIR => unreachable, // invalid parameter else => |err| return unexpectedErrno(err), } } } fn setSockFlags(sock: socket_t, flags: u32) !void { if ((flags & SOCK.CLOEXEC) != 0) { if (builtin.os.tag == .windows) { // TODO: Find out if this is supported for sockets } else { var fd_flags = fcntl(sock, F.GETFD, 0) catch |err| switch (err) { error.FileBusy => unreachable, error.Locked => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; fd_flags |= FD_CLOEXEC; _ = fcntl(sock, F.SETFD, fd_flags) catch |err| switch (err) { error.FileBusy => unreachable, error.Locked => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; } } if ((flags & SOCK.NONBLOCK) != 0) { if (builtin.os.tag == .windows) { var mode: c_ulong = 1; if (windows.ws2_32.ioctlsocket(sock, windows.ws2_32.FIONBIO, &mode) == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENOTSOCK => return error.FileDescriptorNotASocket, // TODO: handle more errors else => |err| return windows.unexpectedWSAError(err), } } } else { var fl_flags = fcntl(sock, F.GETFL, 0) catch |err| switch (err) { error.FileBusy => unreachable, error.Locked => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; fl_flags |= O.NONBLOCK; _ = fcntl(sock, F.SETFL, fl_flags) catch |err| switch (err) { error.FileBusy => unreachable, error.Locked => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; } } } pub const FlockError = error{ WouldBlock, /// The kernel ran out of memory for allocating file locks SystemResources, /// The underlying filesystem does not support file locks FileLocksNotSupported, } || UnexpectedError; /// Depending on the operating system `flock` may or may not interact with /// `fcntl` locks made by other processes. pub fn flock(fd: fd_t, operation: i32) FlockError!void { while (true) { const rc = system.flock(fd, operation); switch (errno(rc)) { .SUCCESS => return, .BADF => unreachable, .INTR => continue, .INVAL => unreachable, // invalid parameters .NOLCK => return error.SystemResources, .AGAIN => return error.WouldBlock, // TODO: integrate with async instead of just returning an error .OPNOTSUPP => return error.FileLocksNotSupported, else => |err| return unexpectedErrno(err), } } } pub const RealPathError = error{ FileNotFound, AccessDenied, NameTooLong, NotSupported, NotDir, SymLinkLoop, InputOutput, FileTooBig, IsDir, ProcessFdQuotaExceeded, SystemFdQuotaExceeded, NoDevice, SystemResources, NoSpaceLeft, FileSystem, BadPathName, DeviceBusy, SharingViolation, PipeBusy, /// On Windows, file paths must be valid Unicode. InvalidUtf8, PathAlreadyExists, } || UnexpectedError; /// Return the canonicalized absolute pathname. /// Expands all symbolic links and resolves references to `.`, `..`, and /// extra `/` characters in `pathname`. /// The return value is a slice of `out_buffer`, but not necessarily from the beginning. /// See also `realpathZ` and `realpathW`. pub fn realpath(pathname: []const u8, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 { if (builtin.os.tag == .windows) { const pathname_w = try windows.sliceToPrefixedFileW(pathname); return realpathW(pathname_w.span(), out_buffer); } if (builtin.os.tag == .wasi) { @compileError("Use std.fs.wasi.PreopenList to obtain valid Dir handles instead of using absolute paths"); } const pathname_c = try toPosixPath(pathname); return realpathZ(&pathname_c, out_buffer); } /// Same as `realpath` except `pathname` is null-terminated. pub fn realpathZ(pathname: [*:0]const u8, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 { if (builtin.os.tag == .windows) { const pathname_w = try windows.cStrToPrefixedFileW(pathname); return realpathW(pathname_w.span(), out_buffer); } if (!builtin.link_libc) { const flags = if (builtin.os.tag == .linux) O.PATH | O.NONBLOCK | O.CLOEXEC else O.NONBLOCK | O.CLOEXEC; const fd = openZ(pathname, flags, 0) catch |err| switch (err) { error.FileLocksNotSupported => unreachable, error.WouldBlock => unreachable, error.FileBusy => unreachable, // not asking for write permissions else => |e| return e, }; defer close(fd); return getFdPath(fd, out_buffer); } const result_path = std.c.realpath(pathname, out_buffer) orelse switch (@intToEnum(E, std.c._errno().*)) { .SUCCESS => unreachable, .INVAL => unreachable, .BADF => unreachable, .FAULT => unreachable, .ACCES => return error.AccessDenied, .NOENT => return error.FileNotFound, .OPNOTSUPP => return error.NotSupported, .NOTDIR => return error.NotDir, .NAMETOOLONG => return error.NameTooLong, .LOOP => return error.SymLinkLoop, .IO => return error.InputOutput, else => |err| return unexpectedErrno(err), }; return mem.sliceTo(result_path, 0); } /// Same as `realpath` except `pathname` is UTF16LE-encoded. pub fn realpathW(pathname: []const u16, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 { const w = windows; const dir = std.fs.cwd().fd; const access_mask = w.GENERIC_READ | w.SYNCHRONIZE; const share_access = w.FILE_SHARE_READ; const creation = w.FILE_OPEN; const h_file = blk: { const res = w.OpenFile(pathname, .{ .dir = dir, .access_mask = access_mask, .share_access = share_access, .creation = creation, .io_mode = .blocking, }) catch |err| switch (err) { error.IsDir => break :blk w.OpenFile(pathname, .{ .dir = dir, .access_mask = access_mask, .share_access = share_access, .creation = creation, .io_mode = .blocking, .filter = .dir_only, }) catch |er| switch (er) { error.WouldBlock => unreachable, else => |e2| return e2, }, error.WouldBlock => unreachable, else => |e| return e, }; break :blk res; }; defer w.CloseHandle(h_file); return getFdPath(h_file, out_buffer); } /// Return canonical path of handle `fd`. /// This function is very host-specific and is not universally supported by all hosts. /// For example, while it generally works on Linux, macOS or Windows, it is unsupported /// on FreeBSD, or WASI. pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 { switch (builtin.os.tag) { .windows => { var wide_buf: [windows.PATH_MAX_WIDE]u16 = undefined; const wide_slice = try windows.GetFinalPathNameByHandle(fd, .{}, wide_buf[0..]); // Trust that Windows gives us valid UTF-16LE. const end_index = std.unicode.utf16leToUtf8(out_buffer, wide_slice) catch unreachable; return out_buffer[0..end_index]; }, .macos, .ios, .watchos, .tvos => { // On macOS, we can use F.GETPATH fcntl command to query the OS for // the path to the file descriptor. @memset(out_buffer, 0, MAX_PATH_BYTES); switch (errno(system.fcntl(fd, F.GETPATH, out_buffer))) { .SUCCESS => {}, .BADF => return error.FileNotFound, // TODO man pages for fcntl on macOS don't really tell you what // errno values to expect when command is F.GETPATH... else => |err| return unexpectedErrno(err), } const len = mem.indexOfScalar(u8, out_buffer[0..], @as(u8, 0)) orelse MAX_PATH_BYTES; return out_buffer[0..len]; }, .linux => { var procfs_buf: ["/proc/self/fd/-2147483648".len:0]u8 = undefined; const proc_path = std.fmt.bufPrint(procfs_buf[0..], "/proc/self/fd/{d}\x00", .{fd}) catch unreachable; const target = readlinkZ(std.meta.assumeSentinel(proc_path.ptr, 0), out_buffer) catch |err| { switch (err) { error.UnsupportedReparsePointType => unreachable, // Windows only, error.NotLink => unreachable, else => |e| return e, } }; return target; }, .solaris => { var procfs_buf: ["/proc/self/path/-2147483648".len:0]u8 = undefined; const proc_path = std.fmt.bufPrintZ(procfs_buf[0..], "/proc/self/path/{d}", .{fd}) catch unreachable; const target = readlinkZ(proc_path, out_buffer) catch |err| switch (err) { error.UnsupportedReparsePointType => unreachable, error.NotLink => unreachable, else => |e| return e, }; return target; }, else => @compileError("querying for canonical path of a handle is unsupported on this host"), } } /// Spurious wakeups are possible and no precision of timing is guaranteed. pub fn nanosleep(seconds: u64, nanoseconds: u64) void { var req = timespec{ .tv_sec = math.cast(isize, seconds) catch math.maxInt(isize), .tv_nsec = math.cast(isize, nanoseconds) catch math.maxInt(isize), }; var rem: timespec = undefined; while (true) { switch (errno(system.nanosleep(&req, &rem))) { .FAULT => unreachable, .INVAL => { // Sometimes Darwin returns EINVAL for no reason. // We treat it as a spurious wakeup. return; }, .INTR => { req = rem; continue; }, // This prong handles success as well as unexpected errors. else => return, } } } pub fn dl_iterate_phdr( context: anytype, comptime Error: type, comptime callback: fn (info: *dl_phdr_info, size: usize, context: @TypeOf(context)) Error!void, ) Error!void { const Context = @TypeOf(context); if (builtin.object_format != .elf) @compileError("dl_iterate_phdr is not available for this target"); if (builtin.link_libc) { switch (system.dl_iterate_phdr(struct { fn callbackC(info: *dl_phdr_info, size: usize, data: ?*anyopaque) callconv(.C) c_int { const context_ptr = @ptrCast(*const Context, @alignCast(@alignOf(*const Context), data)); callback(info, size, context_ptr.*) catch |err| return @errorToInt(err); return 0; } }.callbackC, @intToPtr(?*anyopaque, @ptrToInt(&context)))) { 0 => return, else => |err| return @errSetCast(Error, @intToError(@intCast(u16, err))), // TODO don't hardcode u16 } } const elf_base = std.process.getBaseAddress(); const ehdr = @intToPtr(*elf.Ehdr, elf_base); // Make sure the base address points to an ELF image. assert(mem.eql(u8, ehdr.e_ident[0..4], "\x7fELF")); const n_phdr = ehdr.e_phnum; const phdrs = (@intToPtr([*]elf.Phdr, elf_base + ehdr.e_phoff))[0..n_phdr]; var it = dl.linkmap_iterator(phdrs) catch unreachable; // The executable has no dynamic link segment, create a single entry for // the whole ELF image. if (it.end()) { // Find the base address for the ELF image, if this is a PIE the value // is non-zero. const base_address = for (phdrs) |*phdr| { if (phdr.p_type == elf.PT_PHDR) { break @ptrToInt(phdrs.ptr) - phdr.p_vaddr; // We could try computing the difference between _DYNAMIC and // the p_vaddr of the PT_DYNAMIC section, but using the phdr is // good enough (Is it?). } } else unreachable; var info = dl_phdr_info{ .dlpi_addr = base_address, .dlpi_name = "/proc/self/exe", .dlpi_phdr = phdrs.ptr, .dlpi_phnum = ehdr.e_phnum, }; return callback(&info, @sizeOf(dl_phdr_info), context); } // Last return value from the callback function. while (it.next()) |entry| { var dlpi_phdr: [*]elf.Phdr = undefined; var dlpi_phnum: u16 = undefined; if (entry.l_addr != 0) { const elf_header = @intToPtr(*elf.Ehdr, entry.l_addr); dlpi_phdr = @intToPtr([*]elf.Phdr, entry.l_addr + elf_header.e_phoff); dlpi_phnum = elf_header.e_phnum; } else { // This is the running ELF image dlpi_phdr = @intToPtr([*]elf.Phdr, elf_base + ehdr.e_phoff); dlpi_phnum = ehdr.e_phnum; } var info = dl_phdr_info{ .dlpi_addr = entry.l_addr, .dlpi_name = entry.l_name, .dlpi_phdr = dlpi_phdr, .dlpi_phnum = dlpi_phnum, }; try callback(&info, @sizeOf(dl_phdr_info), context); } } pub const ClockGetTimeError = error{UnsupportedClock} || UnexpectedError; /// TODO: change this to return the timespec as a return value /// TODO: look into making clk_id an enum pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void { if (builtin.os.tag == .wasi and !builtin.link_libc) { var ts: timestamp_t = undefined; switch (system.clock_time_get(@bitCast(u32, clk_id), 1, &ts)) { .SUCCESS => { tp.* = .{ .tv_sec = @intCast(i64, ts / std.time.ns_per_s), .tv_nsec = @intCast(isize, ts % std.time.ns_per_s), }; }, .INVAL => return error.UnsupportedClock, else => |err| return unexpectedErrno(err), } return; } if (builtin.os.tag == .windows) { if (clk_id == CLOCK.REALTIME) { var ft: windows.FILETIME = undefined; windows.kernel32.GetSystemTimeAsFileTime(&ft); // FileTime has a granularity of 100 nanoseconds and uses the NTFS/Windows epoch. const ft64 = (@as(u64, ft.dwHighDateTime) << 32) | ft.dwLowDateTime; const ft_per_s = std.time.ns_per_s / 100; tp.* = .{ .tv_sec = @intCast(i64, ft64 / ft_per_s) + std.time.epoch.windows, .tv_nsec = @intCast(c_long, ft64 % ft_per_s) * 100, }; return; } else { // TODO POSIX implementation of CLOCK.MONOTONIC on Windows. return error.UnsupportedClock; } } switch (errno(system.clock_gettime(clk_id, tp))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => return error.UnsupportedClock, else => |err| return unexpectedErrno(err), } } pub fn clock_getres(clk_id: i32, res: *timespec) ClockGetTimeError!void { if (builtin.os.tag == .wasi and !builtin.link_libc) { var ts: timestamp_t = undefined; switch (system.clock_res_get(@bitCast(u32, clk_id), &ts)) { .SUCCESS => res.* = .{ .tv_sec = @intCast(i64, ts / std.time.ns_per_s), .tv_nsec = @intCast(isize, ts % std.time.ns_per_s), }, .INVAL => return error.UnsupportedClock, else => |err| return unexpectedErrno(err), } return; } switch (errno(system.clock_getres(clk_id, res))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => return error.UnsupportedClock, else => |err| return unexpectedErrno(err), } } pub const SchedGetAffinityError = error{PermissionDenied} || UnexpectedError; pub fn sched_getaffinity(pid: pid_t) SchedGetAffinityError!cpu_set_t { var set: cpu_set_t = undefined; switch (errno(system.sched_getaffinity(pid, @sizeOf(cpu_set_t), &set))) { .SUCCESS => return set, .FAULT => unreachable, .INVAL => unreachable, .SRCH => unreachable, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } /// Used to convert a slice to a null terminated slice on the stack. /// TODO https://github.com/ziglang/zig/issues/287 pub fn toPosixPath(file_path: []const u8) ![MAX_PATH_BYTES - 1:0]u8 { if (std.debug.runtime_safety) assert(std.mem.indexOfScalar(u8, file_path, 0) == null); var path_with_null: [MAX_PATH_BYTES - 1:0]u8 = undefined; // >= rather than > to make room for the null byte if (file_path.len >= MAX_PATH_BYTES) return error.NameTooLong; mem.copy(u8, &path_with_null, file_path); path_with_null[file_path.len] = 0; return path_with_null; } /// Whether or not error.Unexpected will print its value and a stack trace. /// if this happens the fix is to add the error code to the corresponding /// switch expression, possibly introduce a new error in the error set, and /// send a patch to Zig. /// The self-hosted compiler is not fully capable of handle the related code. /// Until then, unexpected error tracing is disabled for the self-hosted compiler. /// TODO remove this once self-hosted is capable enough to handle printing and /// stack trace dumping. pub const unexpected_error_tracing = builtin.zig_backend == .stage1 and builtin.mode == .Debug; pub const UnexpectedError = error{ /// The Operating System returned an undocumented error code. /// This error is in theory not possible, but it would be better /// to handle this error than to invoke undefined behavior. Unexpected, }; /// Call this when you made a syscall or something that sets errno /// and you get an unexpected error. pub fn unexpectedErrno(err: E) UnexpectedError { if (unexpected_error_tracing) { std.debug.print("unexpected errno: {d}\n", .{@enumToInt(err)}); std.debug.dumpCurrentStackTrace(null); } return error.Unexpected; } pub const SigaltstackError = error{ /// The supplied stack size was less than MINSIGSTKSZ. SizeTooSmall, /// Attempted to change the signal stack while it was active. PermissionDenied, } || UnexpectedError; pub fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) SigaltstackError!void { switch (errno(system.sigaltstack(ss, old_ss))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => unreachable, .NOMEM => return error.SizeTooSmall, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } /// Examine and change a signal action. pub fn sigaction(sig: u6, act: ?*const Sigaction, oact: ?*Sigaction) void { switch (errno(system.sigaction(sig, act, oact))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => unreachable, else => unreachable, } } pub const FutimensError = error{ /// times is NULL, or both tv_nsec values are UTIME_NOW, and either: /// * the effective user ID of the caller does not match the owner /// of the file, the caller does not have write access to the /// file, and the caller is not privileged (Linux: does not have /// either the CAP_FOWNER or the CAP_DAC_OVERRIDE capability); /// or, /// * the file is marked immutable (see chattr(1)). AccessDenied, /// The caller attempted to change one or both timestamps to a value /// other than the current time, or to change one of the timestamps /// to the current time while leaving the other timestamp unchanged, /// (i.e., times is not NULL, neither tv_nsec field is UTIME_NOW, /// and neither tv_nsec field is UTIME_OMIT) and either: /// * the caller's effective user ID does not match the owner of /// file, and the caller is not privileged (Linux: does not have /// the CAP_FOWNER capability); or, /// * the file is marked append-only or immutable (see chattr(1)). PermissionDenied, ReadOnlyFileSystem, } || UnexpectedError; pub fn futimens(fd: fd_t, times: *const [2]timespec) FutimensError!void { if (builtin.os.tag == .wasi and !builtin.link_libc) { // TODO WASI encodes `wasi.fstflags` to signify magic values // similar to UTIME_NOW and UTIME_OMIT. Currently, we ignore // this here, but we should really handle it somehow. const atim = times[0].toTimestamp(); const mtim = times[1].toTimestamp(); switch (wasi.fd_filestat_set_times(fd, atim, mtim, wasi.FILESTAT_SET_ATIM | wasi.FILESTAT_SET_MTIM)) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.PermissionDenied, .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } switch (errno(system.futimens(fd, times))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.PermissionDenied, .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } pub const GetHostNameError = error{PermissionDenied} || UnexpectedError; pub fn gethostname(name_buffer: *[HOST_NAME_MAX]u8) GetHostNameError![]u8 { if (builtin.link_libc) { switch (errno(system.gethostname(name_buffer, name_buffer.len))) { .SUCCESS => return mem.sliceTo(std.meta.assumeSentinel(name_buffer, 0), 0), .FAULT => unreachable, .NAMETOOLONG => unreachable, // HOST_NAME_MAX prevents this .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .linux) { const uts = uname(); const hostname = mem.sliceTo(std.meta.assumeSentinel(&uts.nodename, 0), 0); mem.copy(u8, name_buffer, hostname); return name_buffer[0..hostname.len]; } @compileError("TODO implement gethostname for this OS"); } pub fn uname() utsname { var uts: utsname = undefined; switch (errno(system.uname(&uts))) { .SUCCESS => return uts, .FAULT => unreachable, else => unreachable, } } pub fn res_mkquery( op: u4, dname: []const u8, class: u8, ty: u8, data: []const u8, newrr: ?[*]const u8, buf: []u8, ) usize { _ = data; _ = newrr; // This implementation is ported from musl libc. // A more idiomatic "ziggy" implementation would be welcome. var name = dname; if (mem.endsWith(u8, name, ".")) name.len -= 1; assert(name.len <= 253); const n = 17 + name.len + @boolToInt(name.len != 0); // Construct query template - ID will be filled later var q: [280]u8 = undefined; @memset(&q, 0, n); q[2] = @as(u8, op) * 8 + 1; q[5] = 1; mem.copy(u8, q[13..], name); var i: usize = 13; var j: usize = undefined; while (q[i] != 0) : (i = j + 1) { j = i; while (q[j] != 0 and q[j] != '.') : (j += 1) {} // TODO determine the circumstances for this and whether or // not this should be an error. if (j - i - 1 > 62) unreachable; q[i - 1] = @intCast(u8, j - i); } q[i + 1] = ty; q[i + 3] = class; // Make a reasonably unpredictable id var ts: timespec = undefined; clock_gettime(CLOCK.REALTIME, &ts) catch {}; const UInt = std.meta.Int(.unsigned, std.meta.bitCount(@TypeOf(ts.tv_nsec))); const unsec = @bitCast(UInt, ts.tv_nsec); const id = @truncate(u32, unsec + unsec / 65536); q[0] = @truncate(u8, id / 256); q[1] = @truncate(u8, id); mem.copy(u8, buf, q[0..n]); return n; } pub const SendError = error{ /// (For UNIX domain sockets, which are identified by pathname) Write permission is denied /// on the destination socket file, or search permission is denied for one of the /// directories the path prefix. (See path_resolution(7).) /// (For UDP sockets) An attempt was made to send to a network/broadcast address as though /// it was a unicast address. AccessDenied, /// The socket is marked nonblocking and the requested operation would block, and /// there is no global event loop configured. /// It's also possible to get this error under the following condition: /// (Internet domain datagram sockets) The socket referred to by sockfd had not previously /// been bound to an address and, upon attempting to bind it to an ephemeral port, it was /// determined that all port numbers in the ephemeral port range are currently in use. See /// the discussion of /proc/sys/net/ipv4/ip_local_port_range in ip(7). WouldBlock, /// Another Fast Open is already in progress. FastOpenAlreadyInProgress, /// Connection reset by peer. ConnectionResetByPeer, /// The socket type requires that message be sent atomically, and the size of the message /// to be sent made this impossible. The message is not transmitted. MessageTooBig, /// The output queue for a network interface was full. This generally indicates that the /// interface has stopped sending, but may be caused by transient congestion. (Normally, /// this does not occur in Linux. Packets are just silently dropped when a device queue /// overflows.) /// This is also caused when there is not enough kernel memory available. SystemResources, /// The local end has been shut down on a connection oriented socket. In this case, the /// process will also receive a SIGPIPE unless MSG.NOSIGNAL is set. BrokenPipe, FileDescriptorNotASocket, /// Network is unreachable. NetworkUnreachable, /// The local network interface used to reach the destination is down. NetworkSubsystemFailed, } || UnexpectedError; pub const SendMsgError = SendError || error{ /// The passed address didn't have the correct address family in its sa_family field. AddressFamilyNotSupported, /// Returned when socket is AF.UNIX and the given path has a symlink loop. SymLinkLoop, /// Returned when socket is AF.UNIX and the given path length exceeds `MAX_PATH_BYTES` bytes. NameTooLong, /// Returned when socket is AF.UNIX and the given path does not point to an existing file. FileNotFound, NotDir, /// The socket is not connected (connection-oriented sockets only). SocketNotConnected, AddressNotAvailable, }; pub fn sendmsg( /// The file descriptor of the sending socket. sockfd: socket_t, /// Message header and iovecs msg: msghdr_const, flags: u32, ) SendMsgError!usize { while (true) { const rc = system.sendmsg(sockfd, @ptrCast(*const std.x.os.Socket.Message, &msg), @intCast(c_int, flags)); if (builtin.os.tag == .windows) { if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSAEACCES => return error.AccessDenied, .WSAEADDRNOTAVAIL => return error.AddressNotAvailable, .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAEMSGSIZE => return error.MessageTooBig, .WSAENOBUFS => return error.SystemResources, .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported, .WSAEDESTADDRREQ => unreachable, // A destination address is required. .WSAEFAULT => unreachable, // The lpBuffers, lpTo, lpOverlapped, lpNumberOfBytesSent, or lpCompletionRoutine parameters are not part of the user address space, or the lpTo parameter is too small. .WSAEHOSTUNREACH => return error.NetworkUnreachable, // TODO: WSAEINPROGRESS, WSAEINTR .WSAEINVAL => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENETRESET => return error.ConnectionResetByPeer, .WSAENETUNREACH => return error.NetworkUnreachable, .WSAENOTCONN => return error.SocketNotConnected, .WSAESHUTDOWN => unreachable, // The socket has been shut down; it is not possible to WSASendTo on a socket after shutdown has been invoked with how set to SD_SEND or SD_BOTH. .WSAEWOULDBLOCK => return error.WouldBlock, .WSANOTINITIALISED => unreachable, // A successful WSAStartup call must occur before using this function. else => |err| return windows.unexpectedWSAError(err), } } else { return @intCast(usize, rc); } } else { switch (errno(rc)) { .SUCCESS => return @intCast(usize, rc), .ACCES => return error.AccessDenied, .AGAIN => return error.WouldBlock, .ALREADY => return error.FastOpenAlreadyInProgress, .BADF => unreachable, // always a race condition .CONNRESET => return error.ConnectionResetByPeer, .DESTADDRREQ => unreachable, // The socket is not connection-mode, and no peer address is set. .FAULT => unreachable, // An invalid user space address was specified for an argument. .INTR => continue, .INVAL => unreachable, // Invalid argument passed. .ISCONN => unreachable, // connection-mode socket was connected already but a recipient was specified .MSGSIZE => return error.MessageTooBig, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket. .OPNOTSUPP => unreachable, // Some bit in the flags argument is inappropriate for the socket type. .PIPE => return error.BrokenPipe, .AFNOSUPPORT => return error.AddressFamilyNotSupported, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .HOSTUNREACH => return error.NetworkUnreachable, .NETUNREACH => return error.NetworkUnreachable, .NOTCONN => return error.SocketNotConnected, .NETDOWN => return error.NetworkSubsystemFailed, else => |err| return unexpectedErrno(err), } } } } pub const SendToError = SendMsgError; /// Transmit a message to another socket. /// /// The `sendto` call may be used only when the socket is in a connected state (so that the intended /// recipient is known). The following call /// /// send(sockfd, buf, len, flags); /// /// is equivalent to /// /// sendto(sockfd, buf, len, flags, NULL, 0); /// /// If sendto() is used on a connection-mode (`SOCK.STREAM`, `SOCK.SEQPACKET`) socket, the arguments /// `dest_addr` and `addrlen` are asserted to be `null` and `0` respectively, and asserted /// that the socket was actually connected. /// Otherwise, the address of the target is given by `dest_addr` with `addrlen` specifying its size. /// /// If the message is too long to pass atomically through the underlying protocol, /// `SendError.MessageTooBig` is returned, and the message is not transmitted. /// /// There is no indication of failure to deliver. /// /// When the message does not fit into the send buffer of the socket, `sendto` normally blocks, /// unless the socket has been placed in nonblocking I/O mode. In nonblocking mode it would fail /// with `SendError.WouldBlock`. The `select` call may be used to determine when it is /// possible to send more data. pub fn sendto( /// The file descriptor of the sending socket. sockfd: socket_t, /// Message to send. buf: []const u8, flags: u32, dest_addr: ?*const sockaddr, addrlen: socklen_t, ) SendToError!usize { while (true) { const rc = system.sendto(sockfd, buf.ptr, buf.len, flags, dest_addr, addrlen); if (builtin.os.tag == .windows) { if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSAEACCES => return error.AccessDenied, .WSAEADDRNOTAVAIL => return error.AddressNotAvailable, .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAEMSGSIZE => return error.MessageTooBig, .WSAENOBUFS => return error.SystemResources, .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported, .WSAEDESTADDRREQ => unreachable, // A destination address is required. .WSAEFAULT => unreachable, // The lpBuffers, lpTo, lpOverlapped, lpNumberOfBytesSent, or lpCompletionRoutine parameters are not part of the user address space, or the lpTo parameter is too small. .WSAEHOSTUNREACH => return error.NetworkUnreachable, // TODO: WSAEINPROGRESS, WSAEINTR .WSAEINVAL => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENETRESET => return error.ConnectionResetByPeer, .WSAENETUNREACH => return error.NetworkUnreachable, .WSAENOTCONN => return error.SocketNotConnected, .WSAESHUTDOWN => unreachable, // The socket has been shut down; it is not possible to WSASendTo on a socket after shutdown has been invoked with how set to SD_SEND or SD_BOTH. .WSAEWOULDBLOCK => return error.WouldBlock, .WSANOTINITIALISED => unreachable, // A successful WSAStartup call must occur before using this function. else => |err| return windows.unexpectedWSAError(err), } } else { return @intCast(usize, rc); } } else { switch (errno(rc)) { .SUCCESS => return @intCast(usize, rc), .ACCES => return error.AccessDenied, .AGAIN => return error.WouldBlock, .ALREADY => return error.FastOpenAlreadyInProgress, .BADF => unreachable, // always a race condition .CONNRESET => return error.ConnectionResetByPeer, .DESTADDRREQ => unreachable, // The socket is not connection-mode, and no peer address is set. .FAULT => unreachable, // An invalid user space address was specified for an argument. .INTR => continue, .INVAL => unreachable, // Invalid argument passed. .ISCONN => unreachable, // connection-mode socket was connected already but a recipient was specified .MSGSIZE => return error.MessageTooBig, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket. .OPNOTSUPP => unreachable, // Some bit in the flags argument is inappropriate for the socket type. .PIPE => return error.BrokenPipe, .AFNOSUPPORT => return error.AddressFamilyNotSupported, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .HOSTUNREACH => return error.NetworkUnreachable, .NETUNREACH => return error.NetworkUnreachable, .NOTCONN => return error.SocketNotConnected, .NETDOWN => return error.NetworkSubsystemFailed, else => |err| return unexpectedErrno(err), } } } } /// Transmit a message to another socket. /// /// The `send` call may be used only when the socket is in a connected state (so that the intended /// recipient is known). The only difference between `send` and `write` is the presence of /// flags. With a zero flags argument, `send` is equivalent to `write`. Also, the following /// call /// /// send(sockfd, buf, len, flags); /// /// is equivalent to /// /// sendto(sockfd, buf, len, flags, NULL, 0); /// /// There is no indication of failure to deliver. /// /// When the message does not fit into the send buffer of the socket, `send` normally blocks, /// unless the socket has been placed in nonblocking I/O mode. In nonblocking mode it would fail /// with `SendError.WouldBlock`. The `select` call may be used to determine when it is /// possible to send more data. pub fn send( /// The file descriptor of the sending socket. sockfd: socket_t, buf: []const u8, flags: u32, ) SendError!usize { return sendto(sockfd, buf, flags, null, 0) catch |err| switch (err) { error.AddressFamilyNotSupported => unreachable, error.SymLinkLoop => unreachable, error.NameTooLong => unreachable, error.FileNotFound => unreachable, error.NotDir => unreachable, error.NetworkUnreachable => unreachable, error.AddressNotAvailable => unreachable, error.SocketNotConnected => unreachable, else => |e| return e, }; } pub const SendFileError = PReadError || WriteError || SendError; fn count_iovec_bytes(iovs: []const iovec_const) usize { var count: usize = 0; for (iovs) |iov| { count += iov.iov_len; } return count; } /// Transfer data between file descriptors, with optional headers and trailers. /// Returns the number of bytes written, which can be zero. /// /// The `sendfile` call copies `in_len` bytes from one file descriptor to another. When possible, /// this is done within the operating system kernel, which can provide better performance /// characteristics than transferring data from kernel to user space and back, such as with /// `read` and `write` calls. When `in_len` is `0`, it means to copy until the end of the input file has been /// reached. Note, however, that partial writes are still possible in this case. /// /// `in_fd` must be a file descriptor opened for reading, and `out_fd` must be a file descriptor /// opened for writing. They may be any kind of file descriptor; however, if `in_fd` is not a regular /// file system file, it may cause this function to fall back to calling `read` and `write`, in which case /// atomicity guarantees no longer apply. /// /// Copying begins reading at `in_offset`. The input file descriptor seek position is ignored and not updated. /// If the output file descriptor has a seek position, it is updated as bytes are written. When /// `in_offset` is past the end of the input file, it successfully reads 0 bytes. /// /// `flags` has different meanings per operating system; refer to the respective man pages. /// /// These systems support atomically sending everything, including headers and trailers: /// * macOS /// * FreeBSD /// /// These systems support in-kernel data copying, but headers and trailers are not sent atomically: /// * Linux /// /// Other systems fall back to calling `read` / `write`. /// /// Linux has a limit on how many bytes may be transferred in one `sendfile` call, which is `0x7ffff000` /// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as /// well as stuffing the errno codes into the last `4096` values. This is noted on the `sendfile` man page. /// The limit on Darwin is `0x7fffffff`, trying to write more than that returns EINVAL. /// The corresponding POSIX limit on this is `math.maxInt(isize)`. pub fn sendfile( out_fd: fd_t, in_fd: fd_t, in_offset: u64, in_len: u64, headers: []const iovec_const, trailers: []const iovec_const, flags: u32, ) SendFileError!usize { var header_done = false; var total_written: usize = 0; // Prevents EOVERFLOW. const size_t = std.meta.Int(.unsigned, @typeInfo(usize).Int.bits - 1); const max_count = switch (builtin.os.tag) { .linux => 0x7ffff000, .macos, .ios, .watchos, .tvos => math.maxInt(i32), else => math.maxInt(size_t), }; switch (builtin.os.tag) { .linux => sf: { // sendfile() first appeared in Linux 2.2, glibc 2.1. const call_sf = comptime if (builtin.link_libc) std.c.versionCheck(.{ .major = 2, .minor = 1 }).ok else builtin.os.version_range.linux.range.max.order(.{ .major = 2, .minor = 2 }) != .lt; if (!call_sf) break :sf; if (headers.len != 0) { const amt = try writev(out_fd, headers); total_written += amt; if (amt < count_iovec_bytes(headers)) return total_written; header_done = true; } // Here we match BSD behavior, making a zero count value send as many bytes as possible. const adjusted_count_tmp = if (in_len == 0) max_count else @minimum(in_len, @as(size_t, max_count)); // TODO we should not need this cast; improve return type of @minimum const adjusted_count = @intCast(usize, adjusted_count_tmp); const sendfile_sym = if (builtin.link_libc) system.sendfile64 else system.sendfile; while (true) { var offset: off_t = @bitCast(off_t, in_offset); const rc = sendfile_sym(out_fd, in_fd, &offset, adjusted_count); switch (errno(rc)) { .SUCCESS => { const amt = @bitCast(usize, rc); total_written += amt; if (in_len == 0 and amt == 0) { // We have detected EOF from `in_fd`. break; } else if (amt < in_len) { return total_written; } else { break; } }, .BADF => unreachable, // Always a race condition. .FAULT => unreachable, // Segmentation fault. .OVERFLOW => unreachable, // We avoid passing too large of a `count`. .NOTCONN => unreachable, // `out_fd` is an unconnected socket. .INVAL, .NOSYS => { // EINVAL could be any of the following situations: // * Descriptor is not valid or locked // * an mmap(2)-like operation is not available for in_fd // * count is negative // * out_fd has the O.APPEND flag set // Because of the "mmap(2)-like operation" possibility, we fall back to doing read/write // manually, the same as ENOSYS. break :sf; }, .AGAIN => if (std.event.Loop.instance) |loop| { loop.waitUntilFdWritable(out_fd); continue; } else { return error.WouldBlock; }, .IO => return error.InputOutput, .PIPE => return error.BrokenPipe, .NOMEM => return error.SystemResources, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, else => |err| { unexpectedErrno(err) catch {}; break :sf; }, } } if (trailers.len != 0) { total_written += try writev(out_fd, trailers); } return total_written; }, .freebsd => sf: { var hdtr_data: std.c.sf_hdtr = undefined; var hdtr: ?*std.c.sf_hdtr = null; if (headers.len != 0 or trailers.len != 0) { // Here we carefully avoid `@intCast` by returning partial writes when // too many io vectors are provided. const hdr_cnt = math.cast(u31, headers.len) catch math.maxInt(u31); if (headers.len > hdr_cnt) return writev(out_fd, headers); const trl_cnt = math.cast(u31, trailers.len) catch math.maxInt(u31); hdtr_data = std.c.sf_hdtr{ .headers = headers.ptr, .hdr_cnt = hdr_cnt, .trailers = trailers.ptr, .trl_cnt = trl_cnt, }; hdtr = &hdtr_data; } const adjusted_count = @minimum(in_len, max_count); while (true) { var sbytes: off_t = undefined; const offset = @bitCast(off_t, in_offset); const err = errno(system.sendfile(in_fd, out_fd, offset, adjusted_count, hdtr, &sbytes, flags)); const amt = @bitCast(usize, sbytes); switch (err) { .SUCCESS => return amt, .BADF => unreachable, // Always a race condition. .FAULT => unreachable, // Segmentation fault. .NOTCONN => unreachable, // `out_fd` is an unconnected socket. .INVAL, .OPNOTSUPP, .NOTSOCK, .NOSYS => { // EINVAL could be any of the following situations: // * The fd argument is not a regular file. // * The s argument is not a SOCK.STREAM type socket. // * The offset argument is negative. // Because of some of these possibilities, we fall back to doing read/write // manually, the same as ENOSYS. break :sf; }, .INTR => if (amt != 0) return amt else continue, .AGAIN => if (amt != 0) { return amt; } else if (std.event.Loop.instance) |loop| { loop.waitUntilFdWritable(out_fd); continue; } else { return error.WouldBlock; }, .BUSY => if (amt != 0) { return amt; } else if (std.event.Loop.instance) |loop| { loop.waitUntilFdReadable(in_fd); continue; } else { return error.WouldBlock; }, .IO => return error.InputOutput, .NOBUFS => return error.SystemResources, .PIPE => return error.BrokenPipe, else => { unexpectedErrno(err) catch {}; if (amt != 0) { return amt; } else { break :sf; } }, } } }, .macos, .ios, .tvos, .watchos => sf: { var hdtr_data: std.c.sf_hdtr = undefined; var hdtr: ?*std.c.sf_hdtr = null; if (headers.len != 0 or trailers.len != 0) { // Here we carefully avoid `@intCast` by returning partial writes when // too many io vectors are provided. const hdr_cnt = math.cast(u31, headers.len) catch math.maxInt(u31); if (headers.len > hdr_cnt) return writev(out_fd, headers); const trl_cnt = math.cast(u31, trailers.len) catch math.maxInt(u31); hdtr_data = std.c.sf_hdtr{ .headers = headers.ptr, .hdr_cnt = hdr_cnt, .trailers = trailers.ptr, .trl_cnt = trl_cnt, }; hdtr = &hdtr_data; } const adjusted_count_temporary = @minimum(in_len, @as(u63, max_count)); // TODO we should not need this int cast; improve the return type of `@minimum` const adjusted_count = @intCast(u63, adjusted_count_temporary); while (true) { var sbytes: off_t = adjusted_count; const signed_offset = @bitCast(i64, in_offset); const err = errno(system.sendfile(in_fd, out_fd, signed_offset, &sbytes, hdtr, flags)); const amt = @bitCast(usize, sbytes); switch (err) { .SUCCESS => return amt, .BADF => unreachable, // Always a race condition. .FAULT => unreachable, // Segmentation fault. .INVAL => unreachable, .NOTCONN => unreachable, // `out_fd` is an unconnected socket. .OPNOTSUPP, .NOTSOCK, .NOSYS => break :sf, .INTR => if (amt != 0) return amt else continue, .AGAIN => if (amt != 0) { return amt; } else if (std.event.Loop.instance) |loop| { loop.waitUntilFdWritable(out_fd); continue; } else { return error.WouldBlock; }, .IO => return error.InputOutput, .PIPE => return error.BrokenPipe, else => { unexpectedErrno(err) catch {}; if (amt != 0) { return amt; } else { break :sf; } }, } } }, else => {}, // fall back to read/write } if (headers.len != 0 and !header_done) { const amt = try writev(out_fd, headers); total_written += amt; if (amt < count_iovec_bytes(headers)) return total_written; } rw: { var buf: [8 * 4096]u8 = undefined; // Here we match BSD behavior, making a zero count value send as many bytes as possible. const adjusted_count_tmp = if (in_len == 0) buf.len else @minimum(buf.len, in_len); // TODO we should not need this cast; improve return type of @minimum const adjusted_count = @intCast(usize, adjusted_count_tmp); const amt_read = try pread(in_fd, buf[0..adjusted_count], in_offset); if (amt_read == 0) { if (in_len == 0) { // We have detected EOF from `in_fd`. break :rw; } else { return total_written; } } const amt_written = try write(out_fd, buf[0..amt_read]); total_written += amt_written; if (amt_written < in_len or in_len == 0) return total_written; } if (trailers.len != 0) { total_written += try writev(out_fd, trailers); } return total_written; } pub const CopyFileRangeError = error{ FileTooBig, InputOutput, /// `fd_in` is not open for reading; or `fd_out` is not open for writing; /// or the `O.APPEND` flag is set for `fd_out`. FilesOpenedWithWrongFlags, IsDir, OutOfMemory, NoSpaceLeft, Unseekable, PermissionDenied, FileBusy, } || PReadError || PWriteError || UnexpectedError; var has_copy_file_range_syscall = std.atomic.Atomic(bool).init(true); /// Transfer data between file descriptors at specified offsets. /// Returns the number of bytes written, which can less than requested. /// /// The `copy_file_range` call copies `len` bytes from one file descriptor to another. When possible, /// this is done within the operating system kernel, which can provide better performance /// characteristics than transferring data from kernel to user space and back, such as with /// `pread` and `pwrite` calls. /// /// `fd_in` must be a file descriptor opened for reading, and `fd_out` must be a file descriptor /// opened for writing. They may be any kind of file descriptor; however, if `fd_in` is not a regular /// file system file, it may cause this function to fall back to calling `pread` and `pwrite`, in which case /// atomicity guarantees no longer apply. /// /// If `fd_in` and `fd_out` are the same, source and target ranges must not overlap. /// The file descriptor seek positions are ignored and not updated. /// When `off_in` is past the end of the input file, it successfully reads 0 bytes. /// /// `flags` has different meanings per operating system; refer to the respective man pages. /// /// These systems support in-kernel data copying: /// * Linux 4.5 (cross-filesystem 5.3) /// /// Other systems fall back to calling `pread` / `pwrite`. /// /// Maximum offsets on Linux are `math.maxInt(i64)`. pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len: usize, flags: u32) CopyFileRangeError!usize { const call_cfr = comptime if (builtin.os.tag == .wasi) // WASI-libc doesn't have copy_file_range. false else if (builtin.link_libc) std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 }).ok else builtin.os.isAtLeast(.linux, .{ .major = 4, .minor = 5 }) orelse true; if (call_cfr and has_copy_file_range_syscall.load(.Monotonic)) { var off_in_copy = @bitCast(i64, off_in); var off_out_copy = @bitCast(i64, off_out); const rc = system.copy_file_range(fd_in, &off_in_copy, fd_out, &off_out_copy, len, flags); switch (system.getErrno(rc)) { .SUCCESS => return @intCast(usize, rc), .BADF => return error.FilesOpenedWithWrongFlags, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOMEM => return error.OutOfMemory, .NOSPC => return error.NoSpaceLeft, .OVERFLOW => return error.Unseekable, .PERM => return error.PermissionDenied, .TXTBSY => return error.FileBusy, // these may not be regular files, try fallback .INVAL => {}, // support for cross-filesystem copy added in Linux 5.3, use fallback .XDEV => {}, // syscall added in Linux 4.5, use fallback .NOSYS => { has_copy_file_range_syscall.store(false, .Monotonic); }, else => |err| return unexpectedErrno(err), } } var buf: [8 * 4096]u8 = undefined; const adjusted_count = @minimum(buf.len, len); const amt_read = try pread(fd_in, buf[0..adjusted_count], off_in); // TODO without @as the line below fails to compile for wasm32-wasi: // error: integer value 0 cannot be coerced to type 'os.PWriteError!usize' if (amt_read == 0) return @as(usize, 0); return pwrite(fd_out, buf[0..amt_read], off_out); } pub const PollError = error{ /// The network subsystem has failed. NetworkSubsystemFailed, /// The kernel had no space to allocate file descriptor tables. SystemResources, } || UnexpectedError; pub fn poll(fds: []pollfd, timeout: i32) PollError!usize { while (true) { const fds_count = math.cast(nfds_t, fds.len) catch return error.SystemResources; const rc = system.poll(fds.ptr, fds_count, timeout); if (builtin.os.tag == .windows) { if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENOBUFS => return error.SystemResources, // TODO: handle more errors else => |err| return windows.unexpectedWSAError(err), } } else { return @intCast(usize, rc); } } else { switch (errno(rc)) { .SUCCESS => return @intCast(usize, rc), .FAULT => unreachable, .INTR => continue, .INVAL => unreachable, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } unreachable; } } pub const PPollError = error{ /// The operation was interrupted by a delivery of a signal before it could complete. SignalInterrupt, /// The kernel had no space to allocate file descriptor tables. SystemResources, } || UnexpectedError; pub fn ppoll(fds: []pollfd, timeout: ?*const timespec, mask: ?*const sigset_t) PPollError!usize { var ts: timespec = undefined; var ts_ptr: ?*timespec = null; if (timeout) |timeout_ns| { ts_ptr = &ts; ts = timeout_ns.*; } const fds_count = math.cast(nfds_t, fds.len) catch return error.SystemResources; const rc = system.ppoll(fds.ptr, fds_count, ts_ptr, mask); switch (errno(rc)) { .SUCCESS => return @intCast(usize, rc), .FAULT => unreachable, .INTR => return error.SignalInterrupt, .INVAL => unreachable, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } pub const RecvFromError = error{ /// The socket is marked nonblocking and the requested operation would block, and /// there is no global event loop configured. WouldBlock, /// A remote host refused to allow the network connection, typically because it is not /// running the requested service. ConnectionRefused, /// Could not allocate kernel memory. SystemResources, ConnectionResetByPeer, /// The socket has not been bound. SocketNotBound, /// The UDP message was too big for the buffer and part of it has been discarded MessageTooBig, /// The network subsystem has failed. NetworkSubsystemFailed, /// The socket is not connected (connection-oriented sockets only). SocketNotConnected, } || UnexpectedError; pub fn recv(sock: socket_t, buf: []u8, flags: u32) RecvFromError!usize { return recvfrom(sock, buf, flags, null, null); } /// If `sockfd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. pub fn recvfrom( sockfd: socket_t, buf: []u8, flags: u32, src_addr: ?*sockaddr, addrlen: ?*socklen_t, ) RecvFromError!usize { while (true) { const rc = system.recvfrom(sockfd, buf.ptr, buf.len, flags, src_addr, addrlen); if (builtin.os.tag == .windows) { if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAEINVAL => return error.SocketNotBound, .WSAEMSGSIZE => return error.MessageTooBig, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENOTCONN => return error.SocketNotConnected, .WSAEWOULDBLOCK => return error.WouldBlock, // TODO: handle more errors else => |err| return windows.unexpectedWSAError(err), } } else { return @intCast(usize, rc); } } else { switch (errno(rc)) { .SUCCESS => return @intCast(usize, rc), .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, .NOTCONN => unreachable, .NOTSOCK => unreachable, .INTR => continue, .AGAIN => return error.WouldBlock, .NOMEM => return error.SystemResources, .CONNREFUSED => return error.ConnectionRefused, .CONNRESET => return error.ConnectionResetByPeer, else => |err| return unexpectedErrno(err), } } } } pub const DnExpandError = error{InvalidDnsPacket}; pub fn dn_expand( msg: []const u8, comp_dn: []const u8, exp_dn: []u8, ) DnExpandError!usize { // This implementation is ported from musl libc. // A more idiomatic "ziggy" implementation would be welcome. var p = comp_dn.ptr; var len: usize = std.math.maxInt(usize); const end = msg.ptr + msg.len; if (p == end or exp_dn.len == 0) return error.InvalidDnsPacket; var dest = exp_dn.ptr; const dend = dest + @minimum(exp_dn.len, 254); // detect reference loop using an iteration counter var i: usize = 0; while (i < msg.len) : (i += 2) { // loop invariants: p<end, dest<dend if ((p[0] & 0xc0) != 0) { if (p + 1 == end) return error.InvalidDnsPacket; var j = ((p[0] & @as(usize, 0x3f)) << 8) | p[1]; if (len == std.math.maxInt(usize)) len = @ptrToInt(p) + 2 - @ptrToInt(comp_dn.ptr); if (j >= msg.len) return error.InvalidDnsPacket; p = msg.ptr + j; } else if (p[0] != 0) { if (dest != exp_dn.ptr) { dest.* = '.'; dest += 1; } var j = p[0]; p += 1; if (j >= @ptrToInt(end) - @ptrToInt(p) or j >= @ptrToInt(dend) - @ptrToInt(dest)) { return error.InvalidDnsPacket; } while (j != 0) { j -= 1; dest.* = p[0]; dest += 1; p += 1; } } else { dest.* = 0; if (len == std.math.maxInt(usize)) len = @ptrToInt(p) + 1 - @ptrToInt(comp_dn.ptr); return len; } } return error.InvalidDnsPacket; } pub const SchedYieldError = error{ /// The system is not configured to allow yielding SystemCannotYield, }; pub fn sched_yield() SchedYieldError!void { if (builtin.os.tag == .windows) { // The return value has to do with how many other threads there are; it is not // an error condition on Windows. _ = windows.kernel32.SwitchToThread(); return; } switch (errno(system.sched_yield())) { .SUCCESS => return, .NOSYS => return error.SystemCannotYield, else => return error.SystemCannotYield, } } pub const SetSockOptError = error{ /// The socket is already connected, and a specified option cannot be set while the socket is connected. AlreadyConnected, /// The option is not supported by the protocol. InvalidProtocolOption, /// The send and receive timeout values are too big to fit into the timeout fields in the socket structure. TimeoutTooBig, /// Insufficient resources are available in the system to complete the call. SystemResources, // Setting the socket option requires more elevated permissions. PermissionDenied, NetworkSubsystemFailed, FileDescriptorNotASocket, SocketNotBound, } || UnexpectedError; /// Set a socket's options. pub fn setsockopt(fd: socket_t, level: u32, optname: u32, opt: []const u8) SetSockOptError!void { if (builtin.os.tag == .windows) { const rc = windows.ws2_32.setsockopt(fd, @intCast(i32, level), @intCast(i32, optname), opt.ptr, @intCast(i32, opt.len)); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAEFAULT => unreachable, .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEINVAL => return error.SocketNotBound, else => |err| return windows.unexpectedWSAError(err), } } return; } else { switch (errno(system.setsockopt(fd, level, optname, opt.ptr, @intCast(socklen_t, opt.len)))) { .SUCCESS => {}, .BADF => unreachable, // always a race condition .NOTSOCK => unreachable, // always a race condition .INVAL => unreachable, .FAULT => unreachable, .DOM => return error.TimeoutTooBig, .ISCONN => return error.AlreadyConnected, .NOPROTOOPT => return error.InvalidProtocolOption, .NOMEM => return error.SystemResources, .NOBUFS => return error.SystemResources, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } } pub const MemFdCreateError = error{ SystemFdQuotaExceeded, ProcessFdQuotaExceeded, OutOfMemory, /// memfd_create is available in Linux 3.17 and later. This error is returned /// for older kernel versions. SystemOutdated, } || UnexpectedError; pub fn memfd_createZ(name: [*:0]const u8, flags: u32) MemFdCreateError!fd_t { // memfd_create is available only in glibc versions starting with 2.27. const use_c = std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 }).ok; const sys = if (use_c) std.c else linux; const getErrno = if (use_c) std.c.getErrno else linux.getErrno; const rc = sys.memfd_create(name, flags); switch (getErrno(rc)) { .SUCCESS => return @intCast(fd_t, rc), .FAULT => unreachable, // name has invalid memory .INVAL => unreachable, // name/flags are faulty .NFILE => return error.SystemFdQuotaExceeded, .MFILE => return error.ProcessFdQuotaExceeded, .NOMEM => return error.OutOfMemory, .NOSYS => return error.SystemOutdated, else => |err| return unexpectedErrno(err), } } pub const MFD_NAME_PREFIX = "memfd:"; pub const MFD_MAX_NAME_LEN = NAME_MAX - MFD_NAME_PREFIX.len; fn toMemFdPath(name: []const u8) ![MFD_MAX_NAME_LEN:0]u8 { var path_with_null: [MFD_MAX_NAME_LEN:0]u8 = undefined; // >= rather than > to make room for the null byte if (name.len >= MFD_MAX_NAME_LEN) return error.NameTooLong; mem.copy(u8, &path_with_null, name); path_with_null[name.len] = 0; return path_with_null; } pub fn memfd_create(name: []const u8, flags: u32) !fd_t { const name_t = try toMemFdPath(name); return memfd_createZ(&name_t, flags); } pub fn getrusage(who: i32) rusage { var result: rusage = undefined; const rc = system.getrusage(who, &result); switch (errno(rc)) { .SUCCESS => return result, .INVAL => unreachable, .FAULT => unreachable, else => unreachable, } } pub const TermiosGetError = error{NotATerminal} || UnexpectedError; pub fn tcgetattr(handle: fd_t) TermiosGetError!termios { while (true) { var term: termios = undefined; switch (errno(system.tcgetattr(handle, &term))) { .SUCCESS => return term, .INTR => continue, .BADF => unreachable, .NOTTY => return error.NotATerminal, else => |err| return unexpectedErrno(err), } } } pub const TermiosSetError = TermiosGetError || error{ProcessOrphaned}; pub fn tcsetattr(handle: fd_t, optional_action: TCSA, termios_p: termios) TermiosSetError!void { while (true) { switch (errno(system.tcsetattr(handle, optional_action, &termios_p))) { .SUCCESS => return, .BADF => unreachable, .INTR => continue, .INVAL => unreachable, .NOTTY => return error.NotATerminal, .IO => return error.ProcessOrphaned, else => |err| return unexpectedErrno(err), } } } pub const IoCtl_SIOCGIFINDEX_Error = error{ FileSystem, InterfaceNotFound, } || UnexpectedError; pub fn ioctl_SIOCGIFINDEX(fd: fd_t, ifr: *ifreq) IoCtl_SIOCGIFINDEX_Error!void { while (true) { switch (errno(system.ioctl(fd, SIOCGIFINDEX, @ptrToInt(ifr)))) { .SUCCESS => return, .INVAL => unreachable, // Bad parameters. .NOTTY => unreachable, .NXIO => unreachable, .BADF => unreachable, // Always a race condition. .FAULT => unreachable, // Bad pointer parameter. .INTR => continue, .IO => return error.FileSystem, .NODEV => return error.InterfaceNotFound, else => |err| return unexpectedErrno(err), } } } pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) !fd_t { const rc = system.signalfd(fd, mask, flags); switch (errno(rc)) { .SUCCESS => return @intCast(fd_t, rc), .BADF, .INVAL => unreachable, .NFILE => return error.SystemFdQuotaExceeded, .NOMEM => return error.SystemResources, .MFILE => return error.ProcessResources, .NODEV => return error.InodeMountFail, .NOSYS => return error.SystemOutdated, else => |err| return unexpectedErrno(err), } } pub const SyncError = error{ InputOutput, NoSpaceLeft, DiskQuota, AccessDenied, } || UnexpectedError; /// Write all pending file contents and metadata modifications to all filesystems. pub fn sync() void { system.sync(); } /// Write all pending file contents and metadata modifications to the filesystem which contains the specified file. pub fn syncfs(fd: fd_t) SyncError!void { const rc = system.syncfs(fd); switch (errno(rc)) { .SUCCESS => return, .BADF, .INVAL, .ROFS => unreachable, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .DQUOT => return error.DiskQuota, else => |err| return unexpectedErrno(err), } } /// Write all pending file contents and metadata modifications for the specified file descriptor to the underlying filesystem. pub fn fsync(fd: fd_t) SyncError!void { if (builtin.os.tag == .windows) { if (windows.kernel32.FlushFileBuffers(fd) != 0) return; switch (windows.kernel32.GetLastError()) { .SUCCESS => return, .INVALID_HANDLE => unreachable, .ACCESS_DENIED => return error.AccessDenied, // a sync was performed but the system couldn't update the access time .UNEXP_NET_ERR => return error.InputOutput, else => return error.InputOutput, } } const rc = system.fsync(fd); switch (errno(rc)) { .SUCCESS => return, .BADF, .INVAL, .ROFS => unreachable, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .DQUOT => return error.DiskQuota, else => |err| return unexpectedErrno(err), } } /// Write all pending file contents for the specified file descriptor to the underlying filesystem, but not necessarily the metadata. pub fn fdatasync(fd: fd_t) SyncError!void { if (builtin.os.tag == .windows) { return fsync(fd) catch |err| switch (err) { SyncError.AccessDenied => return, // fdatasync doesn't promise that the access time was synced else => return err, }; } const rc = system.fdatasync(fd); switch (errno(rc)) { .SUCCESS => return, .BADF, .INVAL, .ROFS => unreachable, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .DQUOT => return error.DiskQuota, else => |err| return unexpectedErrno(err), } } pub const PrctlError = error{ /// Can only occur with PR_SET_SECCOMP/SECCOMP_MODE_FILTER or /// PR_SET_MM/PR_SET_MM_EXE_FILE AccessDenied, /// Can only occur with PR_SET_MM/PR_SET_MM_EXE_FILE InvalidFileDescriptor, InvalidAddress, /// Can only occur with PR_SET_SPECULATION_CTRL, PR_MPX_ENABLE_MANAGEMENT, /// or PR_MPX_DISABLE_MANAGEMENT UnsupportedFeature, /// Can only occur wih PR_SET_FP_MODE OperationNotSupported, PermissionDenied, } || UnexpectedError; pub fn prctl(option: PR, args: anytype) PrctlError!u31 { if (@typeInfo(@TypeOf(args)) != .Struct) @compileError("Expected tuple or struct argument, found " ++ @typeName(@TypeOf(args))); if (args.len > 4) @compileError("prctl takes a maximum of 4 optional arguments"); var buf: [4]usize = undefined; { comptime var i = 0; inline while (i < args.len) : (i += 1) buf[i] = args[i]; } const rc = system.prctl(@enumToInt(option), buf[0], buf[1], buf[2], buf[3]); switch (errno(rc)) { .SUCCESS => return @intCast(u31, rc), .ACCES => return error.AccessDenied, .BADF => return error.InvalidFileDescriptor, .FAULT => return error.InvalidAddress, .INVAL => unreachable, .NODEV, .NXIO => return error.UnsupportedFeature, .OPNOTSUPP => return error.OperationNotSupported, .PERM, .BUSY => return error.PermissionDenied, .RANGE => unreachable, else => |err| return unexpectedErrno(err), } } pub const GetrlimitError = UnexpectedError; pub fn getrlimit(resource: rlimit_resource) GetrlimitError!rlimit { const getrlimit_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.getrlimit64 else system.getrlimit; var limits: rlimit = undefined; switch (errno(getrlimit_sym(resource, &limits))) { .SUCCESS => return limits, .FAULT => unreachable, // bogus pointer .INVAL => unreachable, else => |err| return unexpectedErrno(err), } } pub const SetrlimitError = error{ PermissionDenied, LimitTooBig } || UnexpectedError; pub fn setrlimit(resource: rlimit_resource, limits: rlimit) SetrlimitError!void { const setrlimit_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.setrlimit64 else system.setrlimit; switch (errno(setrlimit_sym(resource, &limits))) { .SUCCESS => return, .FAULT => unreachable, // bogus pointer .INVAL => return error.LimitTooBig, // this could also mean "invalid resource", but that would be unreachable .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub const MadviseError = error{ /// advice is MADV.REMOVE, but the specified address range is not a shared writable mapping. AccessDenied, /// advice is MADV.HWPOISON, but the caller does not have the CAP_SYS_ADMIN capability. PermissionDenied, /// A kernel resource was temporarily unavailable. SystemResources, /// One of the following: /// * addr is not page-aligned or length is negative /// * advice is not valid /// * advice is MADV.DONTNEED or MADV.REMOVE and the specified address range /// includes locked, Huge TLB pages, or VM_PFNMAP pages. /// * advice is MADV.MERGEABLE or MADV.UNMERGEABLE, but the kernel was not /// configured with CONFIG_KSM. /// * advice is MADV.FREE or MADV.WIPEONFORK but the specified address range /// includes file, Huge TLB, MAP.SHARED, or VM_PFNMAP ranges. InvalidSyscall, /// (for MADV.WILLNEED) Paging in this area would exceed the process's /// maximum resident set size. WouldExceedMaximumResidentSetSize, /// One of the following: /// * (for MADV.WILLNEED) Not enough memory: paging in failed. /// * Addresses in the specified range are not currently mapped, or /// are outside the address space of the process. OutOfMemory, /// The madvise syscall is not available on this version and configuration /// of the Linux kernel. MadviseUnavailable, /// The operating system returned an undocumented error code. Unexpected, }; /// Give advice about use of memory. /// This syscall is optional and is sometimes configured to be disabled. pub fn madvise(ptr: [*]align(mem.page_size) u8, length: usize, advice: u32) MadviseError!void { switch (errno(system.madvise(ptr, length, advice))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .AGAIN => return error.SystemResources, .BADF => unreachable, // The map exists, but the area maps something that isn't a file. .INVAL => return error.InvalidSyscall, .IO => return error.WouldExceedMaximumResidentSetSize, .NOMEM => return error.OutOfMemory, .NOSYS => return error.MadviseUnavailable, else => |err| return unexpectedErrno(err), } } pub const PerfEventOpenError = error{ /// Returned if the perf_event_attr size value is too small (smaller /// than PERF_ATTR_SIZE_VER0), too big (larger than the page size), /// or larger than the kernel supports and the extra bytes are not /// zero. When E2BIG is returned, the perf_event_attr size field is /// overwritten by the kernel to be the size of the structure it was /// expecting. TooBig, /// Returned when the requested event requires CAP_SYS_ADMIN permis‐ /// sions (or a more permissive perf_event paranoid setting). Some /// common cases where an unprivileged process may encounter this /// error: attaching to a process owned by a different user; moni‐ /// toring all processes on a given CPU (i.e., specifying the pid /// argument as -1); and not setting exclude_kernel when the para‐ /// noid setting requires it. /// Also: /// Returned on many (but not all) architectures when an unsupported /// exclude_hv, exclude_idle, exclude_user, or exclude_kernel set‐ /// ting is specified. /// It can also happen, as with EACCES, when the requested event re‐ /// quires CAP_SYS_ADMIN permissions (or a more permissive /// perf_event paranoid setting). This includes setting a break‐ /// point on a kernel address, and (since Linux 3.13) setting a ker‐ /// nel function-trace tracepoint. PermissionDenied, /// Returned if another event already has exclusive access to the /// PMU. DeviceBusy, /// Each opened event uses one file descriptor. If a large number /// of events are opened, the per-process limit on the number of /// open file descriptors will be reached, and no more events can be /// created. ProcessResources, EventRequiresUnsupportedCpuFeature, /// Returned if you try to add more breakpoint /// events than supported by the hardware. TooManyBreakpoints, /// Returned if PERF_SAMPLE_STACK_USER is set in sample_type and it /// is not supported by hardware. SampleStackNotSupported, /// Returned if an event requiring a specific hardware feature is /// requested but there is no hardware support. This includes re‐ /// questing low-skid events if not supported, branch tracing if it /// is not available, sampling if no PMU interrupt is available, and /// branch stacks for software events. EventNotSupported, /// Returned if PERF_SAMPLE_CALLCHAIN is requested and sam‐ /// ple_max_stack is larger than the maximum specified in /// /proc/sys/kernel/perf_event_max_stack. SampleMaxStackOverflow, /// Returned if attempting to attach to a process that does not exist. ProcessNotFound, } || UnexpectedError; pub fn perf_event_open( attr: *linux.perf_event_attr, pid: pid_t, cpu: i32, group_fd: fd_t, flags: usize, ) PerfEventOpenError!fd_t { const rc = system.perf_event_open(attr, pid, cpu, group_fd, flags); switch (errno(rc)) { .SUCCESS => return @intCast(fd_t, rc), .@"2BIG" => return error.TooBig, .ACCES => return error.PermissionDenied, .BADF => unreachable, // group_fd file descriptor is not valid. .BUSY => return error.DeviceBusy, .FAULT => unreachable, // Segmentation fault. .INVAL => unreachable, // Bad attr settings. .INTR => unreachable, // Mixed perf and ftrace handling for a uprobe. .MFILE => return error.ProcessResources, .NODEV => return error.EventRequiresUnsupportedCpuFeature, .NOENT => unreachable, // Invalid type setting. .NOSPC => return error.TooManyBreakpoints, .NOSYS => return error.SampleStackNotSupported, .OPNOTSUPP => return error.EventNotSupported, .OVERFLOW => return error.SampleMaxStackOverflow, .PERM => return error.PermissionDenied, .SRCH => return error.ProcessNotFound, else => |err| return unexpectedErrno(err), } }
https://raw.githubusercontent.com/natanalt/zig-x86_16/1b38fc3ef5e539047c76604ffe71b81e246f1a1e/lib/std/os.zig
const std = @import("std"); const c = @import("c_api.zig"); const core = @import("core.zig"); const utils = @import("utils.zig"); const assert = std.debug.assert; const epnn = utils.ensurePtrNotNull; const Mat = core.Mat; const Mats = core.Mats; const Point = core.Point; pub const SeamlessCloneFlag = enum(u2) { /// NormalClone The power of the method is fully expressed when inserting objects with complex outlines into a new background. normal_clone = 0, /// MixedClone The classic method, color-based selection and alpha masking might be time consuming and often leaves an undesirable halo. Seamless cloning, even averaged with the original image, is not effective. Mixed seamless cloning based on a loose selection proves effective. mixed_clone, /// MonochromeTransfer Monochrome transfer allows the user to easily replace certain features of one object by alternative features. monochrome_transfer, }; pub const EdgeFilter = enum(u2) { /// RecursFilter Recursive Filtering. recurs_filter = 1, /// NormconvFilter Normalized Convolution Filtering. normconv_filter = 2, }; /// ColorChange mix two differently colored versions of an image seamlessly. /// /// For further details, please see: /// https://docs.opencv.org/master/df/da0/group__photo__clone.html#ga6684f35dc669ff6196a7c340dc73b98e /// pub fn colorChange(src: Mat, mask: Mat, dst: *Mat, red_mul: f32, green_mul: f32, blue_mul: f32) void { _ = c.ColorChange(src.ptr, mask.ptr, dst.*.ptr, red_mul, green_mul, blue_mul); } /// SeamlessClone blend two image by Poisson Blending. /// /// For further details, please see: /// https://docs.opencv.org/master/df/da0/group__photo__clone.html#ga2bf426e4c93a6b1f21705513dfeca49d /// pub fn seamlessClone(src: Mat, dst: *Mat, mask: Mat, p: Point, blend: Mat, flags: SeamlessCloneFlag) void { _ = c.SeamlessClone(src.ptr, dst.*.ptr, mask.ptr, p.toC(), blend.ptr, @intFromEnum(flags)); } /// IlluminationChange modifies locally the apparent illumination of an image. /// /// For further details, please see: /// https://docs.opencv.org/master/df/da0/group__photo__clone.html#gac5025767cf2febd8029d474278e886c7 /// pub fn illuminationChange(src: Mat, mask: Mat, dst: *Mat, alpha: f32, beta: f32) void { _ = c.IlluminationChange(src.ptr, mask.ptr, dst.*.ptr, alpha, beta); } /// TextureFlattening washes out the texture of the selected region, giving its contents a flat aspect. /// /// For further details, please see: /// https://docs.opencv.org/master/df/da0/group__photo__clone.html#gad55df6aa53797365fa7cc23959a54004 /// pub fn textureFlattening(src: Mat, mask: Mat, dst: *Mat, low_threshold: f32, high_threshold: f32, kernel_size: i32) void { _ = c.TextureFlattening(src.ptr, mask.ptr, dst.*.ptr, low_threshold, high_threshold, kernel_size); } /// pub extern fn FastNlMeansDenoisingColoredMulti(src: struct_Mats, dst: Mat, imgToDenoiseIndex: c_int, temporalWindowSize: c_int) void; /// FastNlMeansDenoisingColoredMulti denoises the selected images. /// /// For further details, please see: /// https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#gaa501e71f52fb2dc17ff8ca5e7d2d3619 /// pub fn fastNlMeansDenoisingColoredMulti( src: []Mat, dst: *Mat, img_to_denoise_index: i32, temporal_window_size: i32, ) !void { var c_mats = try Mat.toCStructs(src); defer Mat.deinitCStructs(c_mats); _ = c.FastNlMeansDenoisingColoredMulti(c_mats, dst.*.ptr, img_to_denoise_index, temporal_window_size); } /// FastNlMeansDenoisingColoredMulti denoises the selected images. /// /// For further details, please see: /// https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#gaa501e71f52fb2dc17ff8ca5e7d2d3619 /// pub fn fastNlMeansDenoisingColoredMultiWithParams( src: []Mat, dst: *Mat, img_to_denoise_index: i32, temporal_window_size: i32, h: f32, h_color: f32, template_window_size: i32, search_window_size: i32, ) !void { var c_mats = try Mat.toCStructs(src); defer Mat.deinitCStructs(c_mats); _ = c.FastNlMeansDenoisingColoredMultiWithParams( c_mats, dst.*.ptr, img_to_denoise_index, temporal_window_size, h, h_color, template_window_size, search_window_size, ); } /// FastNlMeansDenoising performs image denoising using Non-local Means Denoising algorithm /// http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/ /// /// For further details, please see: /// https://docs.opencv.org/4.x/d1/d79/group__photo__denoise.html#ga4c6b0031f56ea3f98f768881279ffe93 /// pub fn fastNlMeansDenoising(src: Mat, dst: *Mat) void { _ = c.FastNlMeansDenoising(src.ptr, dst.*.ptr); } /// FastNlMeansDenoisingWithParams performs image denoising using Non-local Means Denoising algorithm /// http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/ /// /// For further details, please see: /// https://docs.opencv.org/4.x/d1/d79/group__photo__denoise.html#ga4c6b0031f56ea3f98f768881279ffe93 /// pub fn fastNlMeansDenoisingWithParams( src: Mat, dst: *Mat, h: f32, template_window_size: i32, search_window_size: i32, ) void { _ = c.FastNlMeansDenoisingWithParams( src.ptr, dst.*.ptr, h, template_window_size, search_window_size, ); } /// FastNlMeansDenoisingColored is a modification of fastNlMeansDenoising function for colored images. /// /// For further details, please see: /// https://docs.opencv.org/4.x/d1/d79/group__photo__denoise.html#ga21abc1c8b0e15f78cd3eff672cb6c476 /// pub fn fastNlMeansDenoisingColored(src: Mat, dst: *Mat) void { _ = c.FastNlMeansDenoisingColored(src.ptr, dst.*.ptr); } /// FastNlMeansDenoisingColoredWithParams is a modification of fastNlMeansDenoising function for colored images. /// /// For further details, please see: /// https://docs.opencv.org/4.x/d1/d79/group__photo__denoise.html#ga21abc1c8b0e15f78cd3eff672cb6c476 /// pub fn fastNlMeansDenoisingColoredWithParams( src: Mat, dst: *Mat, h: f32, h_color: f32, template_window_size: i32, search_window_size: i32, ) void { _ = c.FastNlMeansDenoisingColoredWithParams( src.ptr, dst.*.ptr, h, h_color, template_window_size, search_window_size, ); } /// MergeMertens is a wrapper around the cv::MergeMertens. pub const MergeMertens = struct { ptr: c.MergeMertens, const Self = @This(); /// NewMergeMertens returns returns a new MergeMertens white LDR merge algorithm. /// of type MergeMertens with default parameters. /// MergeMertens algorithm merge the ldr image should result in a HDR image. /// /// For further details, please see: /// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html /// https://docs.opencv.org/master/d7/dd6/classcv_1_1MergeMertens.html /// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga79d59aa3cb3a7c664e59a4b5acc1ccb6 /// pub fn init() !Self { const ptr = c.MergeMertens_Create(); return try initFromC(ptr); } /// NewMergeMertensWithParams returns a new MergeMertens white LDR merge algorithm /// of type MergeMertens with customized parameters. /// MergeMertens algorithm merge the ldr image should result in a HDR image. /// /// For further details, please see: /// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html /// https://docs.opencv.org/master/d7/dd6/classcv_1_1MergeMertens.html /// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga79d59aa3cb3a7c664e59a4b5acc1ccb6 /// pub fn initWithParams(contrast_weight: f32, saturation_weight: f32, exposure_weight: f32) !Self { const ptr = c.MergeMertens_CreateWithParams(contrast_weight, saturation_weight, exposure_weight); return try initFromC(ptr); } fn initFromC(ptr: c.MergeMertens) !Self { const nn_ptr = try epnn(ptr); return .{ .ptr = nn_ptr }; } ///Close MergeMertens pub fn deinit(self: *Self) void { assert(self.ptr != null); c.MergeMertens_Close(self.ptr); self.*.ptr = null; } /// BalanceWhite computes merge LDR images using the current MergeMertens. /// Return a image MAT : 8bits 3 channel image ( RGB 8 bits ) /// For further details, please see: /// https://docs.opencv.org/master/d7/dd6/classcv_1_1MergeMertens.html#a2d2254b2aab722c16954de13a663644d /// pub fn process(self: *Self, src: []const Mat, dst: *Mat) !void { var c_mats: c.struct_Mats = try Mat.toCStructs(src); defer Mat.deinitCStructs(c_mats); _ = c.MergeMertens_Process(self.ptr, c_mats, dst.*.ptr); } }; /// AlignMTB is a wrapper around the cv::AlignMTB. pub const AlignMTB = struct { ptr: c.AlignMTB, const Self = @This(); /// NewAlignMTB returns an AlignMTB for converts images to median threshold bitmaps. /// of type AlignMTB converts images to median threshold bitmaps (1 for pixels /// brighter than median luminance and 0 otherwise) and than aligns the resulting /// bitmaps using bit operations. /// For further details, please see: /// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html /// https://docs.opencv.org/master/d7/db6/classcv_1_1AlignMTB.html /// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga2f1fafc885a5d79dbfb3542e08db0244 /// pub fn init() !Self { const ptr = c.AlignMTB_Create(); return try initFromC(ptr); } /// NewAlignMTBWithParams returns an AlignMTB for converts images to median threshold bitmaps. /// of type AlignMTB converts images to median threshold bitmaps (1 for pixels /// brighter than median luminance and 0 otherwise) and than aligns the resulting /// bitmaps using bit operations. /// For further details, please see: /// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html /// https://docs.opencv.org/master/d7/db6/classcv_1_1AlignMTB.html /// https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga2f1fafc885a5d79dbfb3542e08db0244 /// pub fn initWithParams(max_bits: i32, exclude_range: i32, cut: bool) !Self { const ptr = c.AlignMTB_CreateWithParams(max_bits, exclude_range, cut); return try initFromC(ptr); } pub fn initFromC(ptr: c.AlignMTB) !Self { const nn_ptr = try epnn(ptr); return .{ .ptr = nn_ptr }; } ///Close AlignMTB pub fn deinit(self: *Self) void { assert(self.ptr != null); c.AlignMTB_Close(self.ptr); self.*.ptr = null; } /// Process computes an alignment using the current AlignMTB. /// /// For further details, please see: /// https://docs.opencv.org/master/d7/db6/classcv_1_1AlignMTB.html#a37b3417d844f362d781f34155cbcb201 /// pub fn process(self: Self, src: []const Mat, allocator: std.mem.Allocator) !Mats { var c_mats: c.struct_Mats = try Mat.toCStructs(src); defer Mat.deinitCStructs(c_mats); var c_dst_mats: c.struct_Mats = undefined; _ = c.AlignMTB_Process(self.ptr, c_mats, &c_dst_mats); return Mat.toArrayList(c_dst_mats, allocator); } }; /// DetailEnhance filter enhances the details of a particular image /// /// For further details, please see: /// https://docs.opencv.org/4.x/df/dac/group__photo__render.html#gae5930dd822c713b36f8529b21ddebd0c /// pub fn detailEnhance(src: Mat, dst: *Mat, sigma_s: f32, sigma_r: f32) void { _ = c.DetailEnhance(src.ptr, dst.*.ptr, sigma_s, sigma_r); } /// EdgePreservingFilter filtering is the fundamental operation in image and video processing. /// Edge-preserving smoothing filters are used in many different applications. /// /// For further details, please see: /// https://docs.opencv.org/4.x/df/dac/group__photo__render.html#gafaee2977597029bc8e35da6e67bd31f7 /// pub fn edgePreservingFilter(src: Mat, dst: *Mat, flags: EdgeFilter, sigma_s: f32, sigma_r: f32) void { _ = c.EdgePreservingFilter(src.ptr, dst.*.ptr, @intFromEnum(flags), sigma_s, sigma_r); } /// EdgePreservingFilter filtering is the fundamental operation in image and video processing. /// Edge-preserving smoothing filters are used in many different applications. /// /// For further details, please see: /// https://docs.opencv.org/4.x/df/dac/group__photo__render.html#gafaee2977597029bc8e35da6e67bd31f7 /// pub fn edgePreservingFilterWithKernel(src: Mat, dst: *Mat, kernel: Mat) void { _ = c.EdgePreservingFilterWithKernel(src.ptr, dst.*.ptr, kernel.ptr); } /// PencilSketch pencil-like non-photorealistic line drawing. /// /// For further details, please see: /// https://docs.opencv.org/4.x/df/dac/group__photo__render.html#gae5930dd822c713b36f8529b21ddebd0c /// pub fn pencilSketch(src: Mat, dst1: *Mat, dst2: *Mat, sigma_s: f32, sigma_r: f32, shade_factor: f32) void { _ = c.PencilSketch(src.ptr, dst1.*.ptr, dst2.*.ptr, sigma_s, sigma_r, shade_factor); } /// Stylization aims to produce digital imagery with a wide variety of effects /// not focused on photorealism. Edge-aware filters are ideal for stylization, /// as they can abstract regions of low contrast while preserving, or enhancing, /// high-contrast features. /// /// For further details, please see: /// https://docs.opencv.org/4.x/df/dac/group__photo__render.html#gacb0f7324017df153d7b5d095aed53206 /// pub fn stylization(src: Mat, dst: *Mat, sigma_s: f32, sigma_r: f32) void { _ = c.Stylization(src.ptr, dst.*.ptr, sigma_s, sigma_r); } const testing = std.testing; const imgcodecs = @import("imgcodecs.zig"); test "photo colorchange" { var src = try Mat.initSize(20, 20, .cv8uc3); defer src.deinit(); var dst = try Mat.initSize(20, 20, .cv8uc3); defer dst.deinit(); var mask = try src.clone(); defer mask.deinit(); colorChange(src, mask, &dst, 1.5, 1.5, 1.5); try testing.expectEqual(false, dst.isEmpty()); try testing.expectEqual(@as(i32, 20), dst.rows()); try testing.expectEqual(@as(i32, 20), dst.cols()); } test "photo seamlessClone" { var src = try Mat.initSize(20, 20, .cv8uc3); defer src.deinit(); var dst = try Mat.initSize(20, 20, .cv8uc3); defer dst.deinit(); var mask = try src.clone(); defer mask.deinit(); var blend = try Mat.initSize(dst.rows(), dst.cols(), dst.getType()); defer blend.deinit(); var center = Point.init(@divExact(dst.rows(), 2), @divExact(dst.cols(), 2)); seamlessClone(src, &dst, mask, center, blend, .normal_clone); try testing.expectEqual(false, blend.isEmpty()); try testing.expectEqual(@as(i32, 20), dst.rows()); try testing.expectEqual(@as(i32, 20), dst.cols()); try testing.expectEqual(@as(i32, 20), blend.rows()); try testing.expectEqual(@as(i32, 20), blend.cols()); } test "photo illumination change" { var src = try Mat.initSize(20, 20, .cv8uc3); defer src.deinit(); var mask = try src.clone(); defer mask.deinit(); var dst = try Mat.initSize(20, 20, .cv8uc3); defer dst.deinit(); illuminationChange(src, mask, &dst, 0.2, 0.4); try testing.expectEqual(false, dst.isEmpty()); try testing.expectEqual(@as(i32, 20), dst.rows()); try testing.expectEqual(@as(i32, 20), dst.cols()); } test "photo textureFlattening" { var src = try Mat.initSize(20, 20, .cv8uc3); defer src.deinit(); var mask = try src.clone(); defer mask.deinit(); var dst = try Mat.initSize(20, 20, .cv8uc3); defer dst.deinit(); textureFlattening(src, mask, &dst, 30, 45, 3); try testing.expectEqual(false, dst.isEmpty()); try testing.expectEqual(@as(i32, 20), dst.rows()); try testing.expectEqual(@as(i32, 20), dst.cols()); } test "photo fastNlMeansDenoisingColoredMulti" { var src: [3]Mat = undefined; for (&src) |*s| s.* = try Mat.initSize(20, 20, .cv8uc3); defer for (&src) |*s| s.deinit(); var dst = try Mat.init(); defer dst.deinit(); try fastNlMeansDenoisingColoredMulti(src[0..], &dst, 1, 1); try testing.expectEqual(false, dst.isEmpty()); try testing.expectEqual(src[0].rows(), dst.rows()); try testing.expectEqual(src[0].cols(), dst.cols()); } test "photo fastNlMeansDenoisingColoredMultiWithParams" { var src: [3]Mat = undefined; for (&src) |*s| s.* = try Mat.initSize(20, 20, .cv8uc3); defer for (&src) |*s| s.deinit(); var dst = try Mat.init(); defer dst.deinit(); try fastNlMeansDenoisingColoredMultiWithParams(src[0..], &dst, 1, 1, 3, 3, 7, 21); try testing.expectEqual(false, dst.isEmpty()); try testing.expectEqual(src[0].rows(), dst.rows()); try testing.expectEqual(src[0].cols(), dst.cols()); } test "photo MergeMertens" { var src: [3]Mat = undefined; for (&src) |*s| s.* = try Mat.initSize(20, 20, .cv32fc3); defer for (&src) |*s| s.deinit(); var dst = try Mat.init(); defer dst.deinit(); var mertens = try MergeMertens.init(); defer mertens.deinit(); try mertens.process(src[0..], &dst); try testing.expectEqual(false, dst.isEmpty()); try testing.expectEqual(src[0].rows(), dst.rows()); try testing.expectEqual(src[0].cols(), dst.cols()); } test "photo AlignMTB" { var src: [3]Mat = undefined; for (&src) |*s| s.* = try Mat.initSize(20, 20, .cv8uc3); defer for (&src) |*s| s.deinit(); var align_mtb = try AlignMTB.init(); defer align_mtb.deinit(); var dst = try align_mtb.process(src[0..], testing.allocator); defer dst.deinit(); try testing.expect(dst.list.items.len > 0); const dst0 = dst.list.items[0]; try testing.expectEqual(false, dst0.isEmpty()); try testing.expectEqual(src[0].rows(), dst0.rows()); try testing.expectEqual(src[0].cols(), dst0.cols()); } test "photo fastNlMeansDenoising" { var img = try imgcodecs.imRead("libs/gocv/images/face-detect.jpg", .gray_scale); defer img.deinit(); try testing.expectEqual(false, img.isEmpty()); var dst = try Mat.init(); defer dst.deinit(); fastNlMeansDenoising(img, &dst); try testing.expectEqual(false, dst.isEmpty()); try testing.expectEqual(img.rows(), dst.rows()); try testing.expectEqual(img.cols(), dst.cols()); } test "photo FastNlMeansDenoisingColoredMultiWithParams" { var img = try imgcodecs.imRead("libs/gocv/images/face-detect.jpg", .gray_scale); defer img.deinit(); try testing.expectEqual(false, img.isEmpty()); var dst = try Mat.init(); defer dst.deinit(); fastNlMeansDenoisingWithParams(img, &dst, 3, 7, 21); try testing.expectEqual(false, dst.isEmpty()); try testing.expectEqual(img.rows(), dst.rows()); try testing.expectEqual(img.cols(), dst.cols()); } test "photo fastNlMeansDenoisingColored" { var img = try imgcodecs.imRead("libs/gocv/images/face-detect.jpg", .color); defer img.deinit(); try testing.expectEqual(false, img.isEmpty()); var dst = try Mat.init(); defer dst.deinit(); fastNlMeansDenoisingColored(img, &dst); try testing.expectEqual(false, dst.isEmpty()); try testing.expectEqual(img.rows(), dst.rows()); try testing.expectEqual(img.cols(), dst.cols()); } test "photo fastNlMeansDenoisingColoredWithParams" { var img = try imgcodecs.imRead("libs/gocv/images/face-detect.jpg", .color); defer img.deinit(); try testing.expectEqual(false, img.isEmpty()); var dst = try Mat.init(); defer dst.deinit(); fastNlMeansDenoisingColoredWithParams(img, &dst, 3, 3, 7, 21); try testing.expectEqual(false, dst.isEmpty()); try testing.expectEqual(img.rows(), dst.rows()); try testing.expectEqual(img.cols(), dst.cols()); } test "photo detailEnhance" { var src = try Mat.initSize(20, 20, .cv8uc3); defer src.deinit(); var dst = try Mat.init(); defer dst.deinit(); detailEnhance(src, &dst, 100, 0.5); try testing.expectEqual(false, dst.isEmpty()); try testing.expectEqual(src.rows(), dst.rows()); try testing.expectEqual(src.cols(), dst.cols()); } test "photo edgePreservingFilter" { var src = try Mat.initSize(20, 20, .cv8uc3); defer src.deinit(); var dst = try Mat.init(); defer dst.deinit(); edgePreservingFilter(src, &dst, .recurs_filter, 100, 0.5); try testing.expectEqual(false, dst.isEmpty()); try testing.expectEqual(src.rows(), dst.rows()); try testing.expectEqual(src.cols(), dst.cols()); } test "photo pencilSketch" { var src = try Mat.initSize(20, 20, .cv8uc3); defer src.deinit(); var dst1 = try Mat.init(); defer dst1.deinit(); var dst2 = try Mat.init(); defer dst2.deinit(); pencilSketch(src, &dst1, &dst2, 100, 0.5, 0.5); try testing.expectEqual(false, dst1.isEmpty()); try testing.expectEqual(src.rows(), dst1.rows()); try testing.expectEqual(src.cols(), dst1.cols()); try testing.expectEqual(false, dst2.isEmpty()); try testing.expectEqual(src.rows(), dst2.rows()); try testing.expectEqual(src.cols(), dst2.cols()); } test "photo stylization" { var src = try Mat.initSize(20, 20, .cv8uc3); defer src.deinit(); var dst = try Mat.init(); defer dst.deinit(); stylization(src, &dst, 100, 0.5); try testing.expectEqual(false, dst.isEmpty()); try testing.expectEqual(src.rows(), dst.rows()); try testing.expectEqual(src.cols(), dst.cols()); } //* implementation done //* pub const MergeMertens = ?*anyopaque; //* pub const AlignMTB = ?*anyopaque; //* pub extern fn ColorChange(src: Mat, mask: Mat, dst: Mat, red_mul: f32, green_mul: f32, blue_mul: f32) void; //* pub extern fn SeamlessClone(src: Mat, dst: Mat, mask: Mat, p: Point, blend: Mat, flags: c_int) void; //* pub extern fn IlluminationChange(src: Mat, mask: Mat, dst: Mat, alpha: f32, beta: f32) void; //* pub extern fn TextureFlattening(src: Mat, mask: Mat, dst: Mat, low_threshold: f32, high_threshold: f32, kernel_size: c_int) void; //* pub extern fn FastNlMeansDenoisingColoredMulti(src: struct_Mats, dst: Mat, imgToDenoiseIndex: c_int, temporalWindowSize: c_int) void; //* pub extern fn FastNlMeansDenoisingColoredMultiWithParams(src: struct_Mats, dst: Mat, imgToDenoiseIndex: c_int, temporalWindowSize: c_int, h: f32, hColor: f32, templateWindowSize: c_int, searchWindowSize: c_int) void; //* pub extern fn FastNlMeansDenoising(src: Mat, dst: Mat) void; //* pub extern fn FastNlMeansDenoisingWithParams(src: Mat, dst: Mat, h: f32, templateWindowSize: c_int, searchWindowSize: c_int) void; //* pub extern fn FastNlMeansDenoisingColored(src: Mat, dst: Mat) void; //* pub extern fn FastNlMeansDenoisingColoredWithParams(src: Mat, dst: Mat, h: f32, hColor: f32, templateWindowSize: c_int, searchWindowSize: c_int) void; //* pub extern fn MergeMertens_Create(...) MergeMertens; //* pub extern fn MergeMertens_CreateWithParams(contrast_weight: f32, saturation_weight: f32, exposure_weight: f32) MergeMertens; //* pub extern fn MergeMertens_Process(b: MergeMertens, src: struct_Mats, dst: Mat) void; //* pub extern fn MergeMertens_Close(b: MergeMertens) void; //* pub extern fn AlignMTB_Create(...) AlignMTB; //* pub extern fn AlignMTB_CreateWithParams(max_bits: c_int, exclude_range: c_int, cut: bool) AlignMTB; //* pub extern fn AlignMTB_Process(b: AlignMTB, src: struct_Mats, dst: [*c]struct_Mats) void; //* pub extern fn AlignMTB_Close(b: AlignMTB) void; //* pub extern fn DetailEnhance(src: Mat, dst: Mat, sigma_s: f32, sigma_r: f32) void; //* pub extern fn EdgePreservingFilter(src: Mat, dst: Mat, filter: c_int, sigma_s: f32, sigma_r: f32) void; //* pub extern fn PencilSketch(src: Mat, dst1: Mat, dst2: Mat, sigma_s: f32, sigma_r: f32, shade_factor: f32) void; //* pub extern fn Stylization(src: Mat, dst: Mat, sigma_s: f32, sigma_r: f32) void;
https://raw.githubusercontent.com/zigcv/zigcv/4275af100b552d64266cf4466cd5b6b33e630e2d/src/photo.zig
// wasi_snapshot_preview1 spec available (in witx format) here: // * typenames -- https://github.com/WebAssembly/WASI/blob/master/phases/snapshot/witx/typenames.witx // * module -- https://github.com/WebAssembly/WASI/blob/master/phases/snapshot/witx/wasi_snapshot_preview1.witx const std = @import("std"); const assert = std.debug.assert; comptime { assert(@alignOf(i8) == 1); assert(@alignOf(u8) == 1); assert(@alignOf(i16) == 2); assert(@alignOf(u16) == 2); assert(@alignOf(i32) == 4); assert(@alignOf(u32) == 4); // assert(@alignOf(i64) == 8); // assert(@alignOf(u64) == 8); } pub const iovec_t = std.os.iovec; pub const ciovec_t = std.os.iovec_const; pub extern "wasi_snapshot_preview1" fn args_get(argv: [*][*:0]u8, argv_buf: [*]u8) errno_t; pub extern "wasi_snapshot_preview1" fn args_sizes_get(argc: *usize, argv_buf_size: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn clock_res_get(clock_id: clockid_t, resolution: *timestamp_t) errno_t; pub extern "wasi_snapshot_preview1" fn clock_time_get(clock_id: clockid_t, precision: timestamp_t, timestamp: *timestamp_t) errno_t; pub extern "wasi_snapshot_preview1" fn environ_get(environ: [*][*:0]u8, environ_buf: [*]u8) errno_t; pub extern "wasi_snapshot_preview1" fn environ_sizes_get(environ_count: *usize, environ_buf_size: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn fd_advise(fd: fd_t, offset: filesize_t, len: filesize_t, advice: advice_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_allocate(fd: fd_t, offset: filesize_t, len: filesize_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_close(fd: fd_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_datasync(fd: fd_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_pread(fd: fd_t, iovs: [*]const iovec_t, iovs_len: usize, offset: filesize_t, nread: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn fd_pwrite(fd: fd_t, iovs: [*]const ciovec_t, iovs_len: usize, offset: filesize_t, nwritten: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn fd_read(fd: fd_t, iovs: [*]const iovec_t, iovs_len: usize, nread: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn fd_readdir(fd: fd_t, buf: [*]u8, buf_len: usize, cookie: dircookie_t, bufused: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn fd_renumber(from: fd_t, to: fd_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_seek(fd: fd_t, offset: filedelta_t, whence: whence_t, newoffset: *filesize_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_sync(fd: fd_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_tell(fd: fd_t, newoffset: *filesize_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_write(fd: fd_t, iovs: [*]const ciovec_t, iovs_len: usize, nwritten: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn fd_fdstat_get(fd: fd_t, buf: *fdstat_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_fdstat_set_flags(fd: fd_t, flags: fdflags_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_fdstat_set_rights(fd: fd_t, fs_rights_base: rights_t, fs_rights_inheriting: rights_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_filestat_get(fd: fd_t, buf: *filestat_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_filestat_set_size(fd: fd_t, st_size: filesize_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_filestat_set_times(fd: fd_t, st_atim: timestamp_t, st_mtim: timestamp_t, fstflags: fstflags_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_prestat_get(fd: fd_t, buf: *prestat_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_prestat_dir_name(fd: fd_t, path: [*]u8, path_len: usize) errno_t; pub extern "wasi_snapshot_preview1" fn path_create_directory(fd: fd_t, path: [*]const u8, path_len: usize) errno_t; pub extern "wasi_snapshot_preview1" fn path_filestat_get(fd: fd_t, flags: lookupflags_t, path: [*]const u8, path_len: usize, buf: *filestat_t) errno_t; pub extern "wasi_snapshot_preview1" fn path_filestat_set_times(fd: fd_t, flags: lookupflags_t, path: [*]const u8, path_len: usize, st_atim: timestamp_t, st_mtim: timestamp_t, fstflags: fstflags_t) errno_t; pub extern "wasi_snapshot_preview1" fn path_link(old_fd: fd_t, old_flags: lookupflags_t, old_path: [*]const u8, old_path_len: usize, new_fd: fd_t, new_path: [*]const u8, new_path_len: usize) errno_t; pub extern "wasi_snapshot_preview1" fn path_open(dirfd: fd_t, dirflags: lookupflags_t, path: [*]const u8, path_len: usize, oflags: oflags_t, fs_rights_base: rights_t, fs_rights_inheriting: rights_t, fs_flags: fdflags_t, fd: *fd_t) errno_t; pub extern "wasi_snapshot_preview1" fn path_readlink(fd: fd_t, path: [*]const u8, path_len: usize, buf: [*]u8, buf_len: usize, bufused: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn path_remove_directory(fd: fd_t, path: [*]const u8, path_len: usize) errno_t; pub extern "wasi_snapshot_preview1" fn path_rename(old_fd: fd_t, old_path: [*]const u8, old_path_len: usize, new_fd: fd_t, new_path: [*]const u8, new_path_len: usize) errno_t; pub extern "wasi_snapshot_preview1" fn path_symlink(old_path: [*]const u8, old_path_len: usize, fd: fd_t, new_path: [*]const u8, new_path_len: usize) errno_t; pub extern "wasi_snapshot_preview1" fn path_unlink_file(fd: fd_t, path: [*]const u8, path_len: usize) errno_t; pub extern "wasi_snapshot_preview1" fn poll_oneoff(in: *const subscription_t, out: *event_t, nsubscriptions: usize, nevents: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn proc_exit(rval: exitcode_t) noreturn; pub extern "wasi_snapshot_preview1" fn random_get(buf: [*]u8, buf_len: usize) errno_t; pub extern "wasi_snapshot_preview1" fn sched_yield() errno_t; pub extern "wasi_snapshot_preview1" fn sock_recv(sock: fd_t, ri_data: *const iovec_t, ri_data_len: usize, ri_flags: riflags_t, ro_datalen: *usize, ro_flags: *roflags_t) errno_t; pub extern "wasi_snapshot_preview1" fn sock_send(sock: fd_t, si_data: *const ciovec_t, si_data_len: usize, si_flags: siflags_t, so_datalen: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn sock_shutdown(sock: fd_t, how: sdflags_t) errno_t; /// Get the errno from a syscall return value, or 0 for no error. pub fn getErrno(r: errno_t) errno_t { return r; } pub const STDIN_FILENO = 0; pub const STDOUT_FILENO = 1; pub const STDERR_FILENO = 2; pub const mode_t = u32; pub const time_t = i64; // match https://github.com/CraneStation/wasi-libc pub const timespec = struct { tv_sec: time_t, tv_nsec: isize, pub fn fromTimestamp(tm: timestamp_t) timespec { const tv_sec: timestamp_t = tm / 1_000_000_000; const tv_nsec = tm - tv_sec * 1_000_000_000; return timespec{ .tv_sec = @intCast(time_t, tv_sec), .tv_nsec = @intCast(isize, tv_nsec), }; } pub fn toTimestamp(ts: timespec) timestamp_t { const tm = @intCast(timestamp_t, ts.tv_sec * 1_000_000_000) + @intCast(timestamp_t, ts.tv_nsec); return tm; } }; pub const Stat = struct { dev: device_t, ino: inode_t, mode: mode_t, filetype: filetype_t, nlink: linkcount_t, size: filesize_t, atim: timespec, mtim: timespec, ctim: timespec, const Self = @This(); pub fn fromFilestat(stat: filestat_t) Self { return Self{ .dev = stat.dev, .ino = stat.ino, .mode = 0, .filetype = stat.filetype, .nlink = stat.nlink, .size = stat.size, .atim = stat.atime(), .mtim = stat.mtime(), .ctim = stat.ctime(), }; } pub fn atime(self: Self) timespec { return self.atim; } pub fn mtime(self: Self) timespec { return self.mtim; } pub fn ctime(self: Self) timespec { return self.ctim; } }; pub const IOV_MAX = 1024; pub const AT = struct { pub const REMOVEDIR: u32 = 0x4; pub const FDCWD: fd_t = -2; }; // As defined in the wasi_snapshot_preview1 spec file: // https://github.com/WebAssembly/WASI/blob/master/phases/snapshot/witx/typenames.witx pub const advice_t = u8; pub const ADVICE_NORMAL: advice_t = 0; pub const ADVICE_SEQUENTIAL: advice_t = 1; pub const ADVICE_RANDOM: advice_t = 2; pub const ADVICE_WILLNEED: advice_t = 3; pub const ADVICE_DONTNEED: advice_t = 4; pub const ADVICE_NOREUSE: advice_t = 5; pub const clockid_t = u32; pub const CLOCK = struct { pub const REALTIME: clockid_t = 0; pub const MONOTONIC: clockid_t = 1; pub const PROCESS_CPUTIME_ID: clockid_t = 2; pub const THREAD_CPUTIME_ID: clockid_t = 3; }; pub const device_t = u64; pub const dircookie_t = u64; pub const DIRCOOKIE_START: dircookie_t = 0; pub const dirnamlen_t = u32; pub const dirent_t = extern struct { d_next: dircookie_t, d_ino: inode_t, d_namlen: dirnamlen_t, d_type: filetype_t, }; pub const errno_t = enum(u16) { SUCCESS = 0, @"2BIG" = 1, ACCES = 2, ADDRINUSE = 3, ADDRNOTAVAIL = 4, AFNOSUPPORT = 5, /// This is also the error code used for `WOULDBLOCK`. AGAIN = 6, ALREADY = 7, BADF = 8, BADMSG = 9, BUSY = 10, CANCELED = 11, CHILD = 12, CONNABORTED = 13, CONNREFUSED = 14, CONNRESET = 15, DEADLK = 16, DESTADDRREQ = 17, DOM = 18, DQUOT = 19, EXIST = 20, FAULT = 21, FBIG = 22, HOSTUNREACH = 23, IDRM = 24, ILSEQ = 25, INPROGRESS = 26, INTR = 27, INVAL = 28, IO = 29, ISCONN = 30, ISDIR = 31, LOOP = 32, MFILE = 33, MLINK = 34, MSGSIZE = 35, MULTIHOP = 36, NAMETOOLONG = 37, NETDOWN = 38, NETRESET = 39, NETUNREACH = 40, NFILE = 41, NOBUFS = 42, NODEV = 43, NOENT = 44, NOEXEC = 45, NOLCK = 46, NOLINK = 47, NOMEM = 48, NOMSG = 49, NOPROTOOPT = 50, NOSPC = 51, NOSYS = 52, NOTCONN = 53, NOTDIR = 54, NOTEMPTY = 55, NOTRECOVERABLE = 56, NOTSOCK = 57, /// This is also the code used for `NOTSUP`. OPNOTSUPP = 58, NOTTY = 59, NXIO = 60, OVERFLOW = 61, OWNERDEAD = 62, PERM = 63, PIPE = 64, PROTO = 65, PROTONOSUPPORT = 66, PROTOTYPE = 67, RANGE = 68, ROFS = 69, SPIPE = 70, SRCH = 71, STALE = 72, TIMEDOUT = 73, TXTBSY = 74, XDEV = 75, NOTCAPABLE = 76, _, }; pub const E = errno_t; pub const event_t = extern struct { userdata: userdata_t, @"error": errno_t, @"type": eventtype_t, fd_readwrite: eventfdreadwrite_t, }; pub const eventfdreadwrite_t = extern struct { nbytes: filesize_t, flags: eventrwflags_t, }; pub const eventrwflags_t = u16; pub const EVENT_FD_READWRITE_HANGUP: eventrwflags_t = 0x0001; pub const eventtype_t = u8; pub const EVENTTYPE_CLOCK: eventtype_t = 0; pub const EVENTTYPE_FD_READ: eventtype_t = 1; pub const EVENTTYPE_FD_WRITE: eventtype_t = 2; pub const exitcode_t = u32; pub const fd_t = i32; pub const fdflags_t = u16; pub const FDFLAG = struct { pub const APPEND: fdflags_t = 0x0001; pub const DSYNC: fdflags_t = 0x0002; pub const NONBLOCK: fdflags_t = 0x0004; pub const RSYNC: fdflags_t = 0x0008; pub const SYNC: fdflags_t = 0x0010; }; pub const fdstat_t = extern struct { fs_filetype: filetype_t, fs_flags: fdflags_t, fs_rights_base: rights_t, fs_rights_inheriting: rights_t, }; pub const filedelta_t = i64; pub const filesize_t = u64; pub const filestat_t = extern struct { dev: device_t, ino: inode_t, filetype: filetype_t, nlink: linkcount_t, size: filesize_t, atim: timestamp_t, mtim: timestamp_t, ctim: timestamp_t, pub fn atime(self: filestat_t) timespec { return timespec.fromTimestamp(self.atim); } pub fn mtime(self: filestat_t) timespec { return timespec.fromTimestamp(self.mtim); } pub fn ctime(self: filestat_t) timespec { return timespec.fromTimestamp(self.ctim); } }; /// Also known as `FILETYPE`. pub const filetype_t = enum(u8) { UNKNOWN, BLOCK_DEVICE, CHARACTER_DEVICE, DIRECTORY, REGULAR_FILE, SOCKET_DGRAM, SOCKET_STREAM, SYMBOLIC_LINK, _, }; pub const fstflags_t = u16; pub const FILESTAT_SET_ATIM: fstflags_t = 0x0001; pub const FILESTAT_SET_ATIM_NOW: fstflags_t = 0x0002; pub const FILESTAT_SET_MTIM: fstflags_t = 0x0004; pub const FILESTAT_SET_MTIM_NOW: fstflags_t = 0x0008; pub const inode_t = u64; pub const ino_t = inode_t; pub const linkcount_t = u64; pub const lookupflags_t = u32; pub const LOOKUP_SYMLINK_FOLLOW: lookupflags_t = 0x00000001; pub const oflags_t = u16; pub const O = struct { pub const CREAT: oflags_t = 0x0001; pub const DIRECTORY: oflags_t = 0x0002; pub const EXCL: oflags_t = 0x0004; pub const TRUNC: oflags_t = 0x0008; }; pub const preopentype_t = u8; pub const PREOPENTYPE_DIR: preopentype_t = 0; pub const prestat_t = extern struct { pr_type: preopentype_t, u: prestat_u_t, }; pub const prestat_dir_t = extern struct { pr_name_len: usize, }; pub const prestat_u_t = extern union { dir: prestat_dir_t, }; pub const riflags_t = u16; pub const roflags_t = u16; pub const SOCK = struct { pub const RECV_PEEK: riflags_t = 0x0001; pub const RECV_WAITALL: riflags_t = 0x0002; pub const RECV_DATA_TRUNCATED: roflags_t = 0x0001; }; pub const rights_t = u64; pub const RIGHT = struct { pub const FD_DATASYNC: rights_t = 0x0000000000000001; pub const FD_READ: rights_t = 0x0000000000000002; pub const FD_SEEK: rights_t = 0x0000000000000004; pub const FD_FDSTAT_SET_FLAGS: rights_t = 0x0000000000000008; pub const FD_SYNC: rights_t = 0x0000000000000010; pub const FD_TELL: rights_t = 0x0000000000000020; pub const FD_WRITE: rights_t = 0x0000000000000040; pub const FD_ADVISE: rights_t = 0x0000000000000080; pub const FD_ALLOCATE: rights_t = 0x0000000000000100; pub const PATH_CREATE_DIRECTORY: rights_t = 0x0000000000000200; pub const PATH_CREATE_FILE: rights_t = 0x0000000000000400; pub const PATH_LINK_SOURCE: rights_t = 0x0000000000000800; pub const PATH_LINK_TARGET: rights_t = 0x0000000000001000; pub const PATH_OPEN: rights_t = 0x0000000000002000; pub const FD_READDIR: rights_t = 0x0000000000004000; pub const PATH_READLINK: rights_t = 0x0000000000008000; pub const PATH_RENAME_SOURCE: rights_t = 0x0000000000010000; pub const PATH_RENAME_TARGET: rights_t = 0x0000000000020000; pub const PATH_FILESTAT_GET: rights_t = 0x0000000000040000; pub const PATH_FILESTAT_SET_SIZE: rights_t = 0x0000000000080000; pub const PATH_FILESTAT_SET_TIMES: rights_t = 0x0000000000100000; pub const FD_FILESTAT_GET: rights_t = 0x0000000000200000; pub const FD_FILESTAT_SET_SIZE: rights_t = 0x0000000000400000; pub const FD_FILESTAT_SET_TIMES: rights_t = 0x0000000000800000; pub const PATH_SYMLINK: rights_t = 0x0000000001000000; pub const PATH_REMOVE_DIRECTORY: rights_t = 0x0000000002000000; pub const PATH_UNLINK_FILE: rights_t = 0x0000000004000000; pub const POLL_FD_READWRITE: rights_t = 0x0000000008000000; pub const SOCK_SHUTDOWN: rights_t = 0x0000000010000000; pub const ALL: rights_t = FD_DATASYNC | FD_READ | FD_SEEK | FD_FDSTAT_SET_FLAGS | FD_SYNC | FD_TELL | FD_WRITE | FD_ADVISE | FD_ALLOCATE | PATH_CREATE_DIRECTORY | PATH_CREATE_FILE | PATH_LINK_SOURCE | PATH_LINK_TARGET | PATH_OPEN | FD_READDIR | PATH_READLINK | PATH_RENAME_SOURCE | PATH_RENAME_TARGET | PATH_FILESTAT_GET | PATH_FILESTAT_SET_SIZE | PATH_FILESTAT_SET_TIMES | FD_FILESTAT_GET | FD_FILESTAT_SET_SIZE | FD_FILESTAT_SET_TIMES | PATH_SYMLINK | PATH_REMOVE_DIRECTORY | PATH_UNLINK_FILE | POLL_FD_READWRITE | SOCK_SHUTDOWN; }; pub const sdflags_t = u8; pub const SHUT = struct { pub const RD: sdflags_t = 0x01; pub const WR: sdflags_t = 0x02; }; pub const siflags_t = u16; pub const signal_t = u8; pub const SIGNONE: signal_t = 0; pub const SIGHUP: signal_t = 1; pub const SIGINT: signal_t = 2; pub const SIGQUIT: signal_t = 3; pub const SIGILL: signal_t = 4; pub const SIGTRAP: signal_t = 5; pub const SIGABRT: signal_t = 6; pub const SIGBUS: signal_t = 7; pub const SIGFPE: signal_t = 8; pub const SIGKILL: signal_t = 9; pub const SIGUSR1: signal_t = 10; pub const SIGSEGV: signal_t = 11; pub const SIGUSR2: signal_t = 12; pub const SIGPIPE: signal_t = 13; pub const SIGALRM: signal_t = 14; pub const SIGTERM: signal_t = 15; pub const SIGCHLD: signal_t = 16; pub const SIGCONT: signal_t = 17; pub const SIGSTOP: signal_t = 18; pub const SIGTSTP: signal_t = 19; pub const SIGTTIN: signal_t = 20; pub const SIGTTOU: signal_t = 21; pub const SIGURG: signal_t = 22; pub const SIGXCPU: signal_t = 23; pub const SIGXFSZ: signal_t = 24; pub const SIGVTALRM: signal_t = 25; pub const SIGPROF: signal_t = 26; pub const SIGWINCH: signal_t = 27; pub const SIGPOLL: signal_t = 28; pub const SIGPWR: signal_t = 29; pub const SIGSYS: signal_t = 30; pub const subclockflags_t = u16; pub const SUBSCRIPTION_CLOCK_ABSTIME: subclockflags_t = 0x0001; pub const subscription_t = extern struct { userdata: userdata_t, u: subscription_u_t, }; pub const subscription_clock_t = extern struct { id: clockid_t, timeout: timestamp_t, precision: timestamp_t, flags: subclockflags_t, }; pub const subscription_fd_readwrite_t = extern struct { fd: fd_t, }; pub const subscription_u_t = extern struct { tag: eventtype_t, u: subscription_u_u_t, }; pub const subscription_u_u_t = extern union { clock: subscription_clock_t, fd_read: subscription_fd_readwrite_t, fd_write: subscription_fd_readwrite_t, }; pub const timestamp_t = u64; pub const userdata_t = u64; /// Also known as `WHENCE`. pub const whence_t = enum(u8) { SET, CUR, END }; pub const S = struct { pub const IEXEC = @compileError("TODO audit this"); pub const IFBLK = 0x6000; pub const IFCHR = 0x2000; pub const IFDIR = 0x4000; pub const IFIFO = 0xc000; pub const IFLNK = 0xa000; pub const IFMT = IFBLK | IFCHR | IFDIR | IFIFO | IFLNK | IFREG | IFSOCK; pub const IFREG = 0x8000; // There's no concept of UNIX domain socket but we define this value here in order to line with other OSes. pub const IFSOCK = 0x1; }; pub const LOCK = struct { pub const SH = 0x1; pub const EX = 0x2; pub const NB = 0x4; pub const UN = 0x8; };
https://raw.githubusercontent.com/natanalt/zig-x86_16/1b38fc3ef5e539047c76604ffe71b81e246f1a1e/lib/std/os/wasi.zig
const std = @import("std"); const clamp = std.math.clamp; const zig_ui = @import("../zig_ui.zig"); const vec2 = zig_ui.vec2; const vec4 = zig_ui.vec4; const UI = @import("UI.zig"); const Node = UI.Node; const Axis = UI.Axis; pub fn layoutTree(self: *UI, root: *Node) void { solveIndependentSizes(self, root); solveDownwardDependent(self, root); solveUpwardDependent(self, root); solveViolations(self, root); solveFinalPos(self, root); } fn solveIndependentSizes(self: *UI, node: *Node) void { const work_fn = solveIndependentSizesWorkFn; layoutRecurseHelperPre(work_fn, .{ .self = self, .node = node, .axis = .x }); layoutRecurseHelperPre(work_fn, .{ .self = self, .node = node, .axis = .y }); } fn solveDownwardDependent(self: *UI, node: *Node) void { const work_fn = solveDownwardDependentWorkFn; layoutRecurseHelperPost(work_fn, .{ .self = self, .node = node, .axis = .x }); layoutRecurseHelperPost(work_fn, .{ .self = self, .node = node, .axis = .y }); } fn solveUpwardDependent(self: *UI, node: *Node) void { const work_fn = solveUpwardDependentWorkFn; layoutRecurseHelperPre(work_fn, .{ .self = self, .node = node, .axis = .x }); layoutRecurseHelperPre(work_fn, .{ .self = self, .node = node, .axis = .y }); } fn solveViolations(self: *UI, node: *Node) void { const work_fn = solveViolationsWorkFn; layoutRecurseHelperPre(work_fn, .{ .self = self, .node = node, .axis = .x }); layoutRecurseHelperPre(work_fn, .{ .self = self, .node = node, .axis = .y }); } fn solveFinalPos(self: *UI, node: *Node) void { const work_fn = solveFinalPosWorkFn; layoutRecurseHelperPre(work_fn, .{ .self = self, .node = node, .axis = .x }); layoutRecurseHelperPre(work_fn, .{ .self = self, .node = node, .axis = .y }); } fn solveIndependentSizesWorkFn(_: *UI, node: *Node, axis: Axis) void { const axis_idx: usize = @intFromEnum(axis); switch (node.size[axis_idx]) { .pixels => |pixels| node.calc_size[axis_idx] = pixels.value, // this is wrong for percent (the correct one is calculated later) but this gives // and upper bound on the size, which might be needed for "downward dependent" nodes // which have children with `Size.percent` .percent, .text, => { const text_size = node.text_rect.size(); node.calc_size[axis_idx] = text_size[axis_idx] + 2 * node.inner_padding[axis_idx]; }, else => {}, } } fn solveDownwardDependentWorkFn(_: *UI, node: *Node, axis: Axis) void { const axis_idx: usize = @intFromEnum(axis); const is_layout_axis = (axis == node.layout_axis); const child_funcs = struct { pub fn sumChildrenSizes(parent: *Node, idx: usize) f32 { var sum: f32 = 0; var child = parent.first; while (child) |child_node| : (child = child_node.next) { sum += child_node.calc_size[idx] + 2 * child_node.outer_padding[idx]; } return sum; } pub fn maxChildrenSizes(parent: *Node, idx: usize) f32 { var max_so_far: f32 = 0; var child = parent.first; while (child) |child_node| : (child = child_node.next) { const child_size = switch (child_node.size[idx]) { .percent => blk: { if (@intFromEnum(child_node.layout_axis) == idx) { break :blk sumChildrenSizes(child_node, idx); } else { break :blk sumChildrenSizes(child_node, idx); } }, else => child_node.calc_size[idx] + 2 * child_node.outer_padding[idx], }; max_so_far = @max(max_so_far, child_size); } return max_so_far; } }; switch (node.size[axis_idx]) { .children => { if (is_layout_axis) { node.calc_size[axis_idx] = child_funcs.sumChildrenSizes(node, axis_idx); } else { node.calc_size[axis_idx] = child_funcs.maxChildrenSizes(node, axis_idx); } node.calc_size[axis_idx] += 2 * node.inner_padding[axis_idx]; }, else => {}, } } fn solveUpwardDependentWorkFn(self: *UI, node: *Node, axis: Axis) void { const axis_idx: usize = @intFromEnum(axis); switch (node.size[axis_idx]) { .percent => |percent| { const parent_size = if (node.parent) |p| p.calc_size - vec2{ 2, 2 } * p.inner_padding else self.screen_size; node.calc_size[axis_idx] = parent_size[axis_idx] * percent.value - 2 * node.outer_padding[axis_idx]; }, else => {}, } } fn solveViolationsWorkFn(self: *UI, node: *Node, axis: Axis) void { if (node.child_count == 0) return; const axis_idx: usize = @intFromEnum(axis); const is_layout_axis = (axis == node.layout_axis); const arena = self.build_arena.allocator(); const available_size = node.calc_size - vec2{ 2, 2 } * node.inner_padding; // collect sizing information about children var total_children_size: f32 = 0; var max_child_size: f32 = 0; var zero_strict_take_budget: f32 = 0; var other_children_leeway: f32 = 0; var zero_strict_children = std.ArrayList(*Node).initCapacity(arena, node.child_count) catch @panic("too many children"); var other_children = std.ArrayList(*Node).initCapacity(arena, node.child_count) catch @panic("too many children"); var child = node.first; while (child) |child_node| : (child = child_node.next) { if (switch (axis) { .x => child_node.flags.floating_x, .y => child_node.flags.floating_y, }) continue; const strictness = child_node.size[axis_idx].getStrictness(); const child_size = child_node.calc_size[axis_idx] + 2 * child_node.outer_padding[axis_idx]; total_children_size += child_size; max_child_size = @max(max_child_size, child_size); if (strictness == 0) { zero_strict_take_budget += child_size; zero_strict_children.append(child_node) catch unreachable; } else { other_children_leeway += (1 - strictness); other_children.append(child_node) catch unreachable; } } const total_size = if (is_layout_axis) total_children_size else max_child_size; var overflow = @max(0, total_size - available_size[axis_idx]); // shrink zero strictness children as much as we can (to 0 size if needed) before // trying to shrink other children with strictness > 0 const zero_strict_remove_amount = @min(overflow, zero_strict_take_budget); for (zero_strict_children.items) |z_child| { const z_child_size = z_child.calc_size[axis_idx]; if (is_layout_axis) { const z_child_percent = z_child_size / zero_strict_take_budget; z_child.calc_size[axis_idx] -= zero_strict_remove_amount * z_child_percent; } else { const extra_size = z_child_size - available_size[axis_idx]; z_child.calc_size[axis_idx] -= @max(0, extra_size); } } overflow -= zero_strict_remove_amount; // if there's still overflow, shrink the other children as much as we can // (proportionally to their strictness values, i.e least strict shrinks the most) if (overflow > 0) { var removed_amount: f32 = 0; for (other_children.items) |child_node| { const strictness = child_node.size[axis_idx].getStrictness(); if (strictness == 1) continue; const child_size = child_node.calc_size[axis_idx]; const child_take_budget = child_size * strictness; const leeway_percent = (1 - strictness) / other_children_leeway; const desired_remove_amount = if (is_layout_axis) overflow * leeway_percent else @max(0, child_size - available_size[axis_idx]); const true_remove_amount = @min(child_take_budget, desired_remove_amount); child_node.calc_size[axis_idx] -= true_remove_amount; removed_amount += true_remove_amount; } overflow -= removed_amount; std.debug.assert(overflow >= 0); // if overflow is negative we removed too much somewhere } // constrain scrolling to children size, i.e. don't scroll more than is possible node.scroll_offset[axis_idx] = switch (axis) { .x => clamp(node.scroll_offset[axis_idx], -overflow, 0), .y => clamp(node.scroll_offset[axis_idx], 0, overflow), }; } fn solveFinalPosWorkFn(self: *UI, node: *Node, axis: Axis) void { const axis_idx: usize = @intFromEnum(axis); const is_layout_axis = (axis == node.layout_axis); const is_scrollable_axis = switch (axis) { .x => node.flags.scroll_children_x, .y => node.flags.scroll_children_y, }; // window root nodes need a position too! if (node.parent == null) { const calc_rel_pos = node.rel_pos.calcRelativePos(node.calc_size, self.screen_size); node.calc_rel_pos[axis_idx] = calc_rel_pos[axis_idx]; node.rect.min[axis_idx] = node.calc_rel_pos[axis_idx]; node.rect.max[axis_idx] = node.calc_rel_pos[axis_idx] + node.calc_size[axis_idx]; node.clip_rect = node.rect; } if (node.child_count == 0) return; // start layout at the top left var start_rel_pos: f32 = switch (axis) { .x => node.inner_padding[0], .y => node.calc_size[1] - node.inner_padding[1], }; // when `scroll_children` is enabled start layout at an offset if (is_scrollable_axis) start_rel_pos += node.scroll_offset[axis_idx]; // position all the children var rel_pos: f32 = start_rel_pos; var child = node.first; while (child) |child_node| : (child = child_node.next) { const is_floating = switch (axis) { .x => child_node.flags.floating_x, .y => child_node.flags.floating_y, }; if (is_floating) { const calc_rel_pos = child_node.rel_pos.calcRelativePos(child_node.calc_size, node.calc_size); child_node.calc_rel_pos[axis_idx] = calc_rel_pos[axis_idx]; continue; } if (is_layout_axis) { const rel_pos_advance = child_node.calc_size[axis_idx] + 2 * child_node.outer_padding[axis_idx]; switch (axis) { .x => { child_node.calc_rel_pos[axis_idx] = rel_pos; rel_pos += rel_pos_advance; }, .y => { rel_pos -= rel_pos_advance; child_node.calc_rel_pos[axis_idx] = rel_pos; }, } } else { child_node.calc_rel_pos[axis_idx] = start_rel_pos; const parent_size = node.calc_size[axis_idx] + 2 * node.inner_padding[axis_idx]; const child_size = child_node.calc_size[axis_idx] + 2 * child_node.outer_padding[axis_idx]; child_node.calc_rel_pos[axis_idx] += switch (axis) { .x => switch (child_node.alignment) { .start => 0, .center => blk: { break :blk (parent_size - child_size) / 2; }, .end => parent_size - child_size, }, .y => -switch (child_node.alignment) { .start => child_size, .center => (parent_size / 2) + (child_size / 2), .end => parent_size, }, }; } } // calculate the final screen pixel rect child = node.first; while (child) |child_node| : (child = child_node.next) { child_node.rect.min[axis_idx] = node.rect.min[axis_idx] + child_node.calc_rel_pos[axis_idx] + child_node.outer_padding[axis_idx]; child_node.rect.max[axis_idx] = child_node.rect.min[axis_idx] + child_node.calc_size[axis_idx]; // propagate the clipping to children child_node.clip_rect = if (node.flags.clip_children) node.rect else node.clip_rect; } } const LayoutWorkFn = fn (*UI, *Node, Axis) void; const LayoutWorkFnArgs = struct { self: *UI, node: *Node, axis: Axis }; /// do the work before recursing fn layoutRecurseHelperPre(comptime work_fn: LayoutWorkFn, args: LayoutWorkFnArgs) void { work_fn(args.self, args.node, args.axis); var child = args.node.first; while (child) |child_node| : (child = child_node.next) { layoutRecurseHelperPre(work_fn, .{ .self = args.self, .node = child_node, .axis = args.axis }); } } /// do the work after recursing fn layoutRecurseHelperPost(comptime work_fn: LayoutWorkFn, args: LayoutWorkFnArgs) void { var child = args.node.first; while (child) |child_node| : (child = child_node.next) { layoutRecurseHelperPost(work_fn, .{ .self = args.self, .node = child_node, .axis = args.axis }); } work_fn(args.self, args.node, args.axis); }
https://raw.githubusercontent.com/mparadinha/zig-ui/3a1ba02301deac530cbfa2b1db204b424c6d2448/src/layout.zig
const std = @import("std"); const ArrayList = std.ArrayList; const AutoHashMap = std.AutoHashMap; const SAMPLE = false; const FILE_PATH = if (SAMPLE) "src/day16_sample_input.txt" else "src/day16_input.txt"; const Direction = enum { North, South, West, East, }; const Walker = struct { x: usize, y: usize, direction: Direction }; pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = gpa.allocator(); defer { const deinit_status = gpa.deinit(); if (deinit_status == .leak) @panic("MEMORY LEAK"); } const file = try std.fs.cwd().openFile(FILE_PATH, .{}); var buf_reader = std.io.bufferedReader(file.reader()); var in_stream = buf_reader.reader(); var lines: [][]const u8 = undefined; { var lines_builder = ArrayList([]const u8).init(allocator); var line = ArrayList(u8).init(allocator); const writer = line.writer(); while (true) { in_stream.streamUntilDelimiter(writer, '\n', 1024) catch |err| switch (err) { error.EndOfStream => break, else => { line.deinit(); lines_builder.deinit(); return err; }, }; try lines_builder.append(try line.toOwnedSlice()); } lines = try lines_builder.toOwnedSlice(); } defer { for (lines) |l| { allocator.free(l); } defer allocator.free(lines); } const grid_height = lines.len; const grid_width = lines[0].len; // Let's find out if we can allocate a multi-dimensional array of // runtime-known dimensions on the heap. const flat_grid = try allocator.alloc(bool, grid_width * grid_height); @memset(flat_grid, false); const energized_grid = try allocator.alloc([]bool, grid_height); for (0..grid_height) |y| { energized_grid[y] = flat_grid[y * grid_width .. (y + 1) * grid_width]; } defer { allocator.free(energized_grid); allocator.free(flat_grid); } // Start walking. var walkers = ArrayList(Walker).init(allocator); defer walkers.deinit(); try walkers.append(Walker{ .x = 0, .y = 0, .direction = .East }); while (walkers.items.len > 0) { const walker = walkers.pop(); const new_walkers = step(walker, lines, energized_grid); for (new_walkers) |new_walker| { if (new_walker) |nw| try walkers.append(nw); } } var energized_tiles: u64 = 0; for (flat_grid) |tile| { if (tile) energized_tiles += 1; } const stdout = std.io.getStdOut().writer(); try stdout.print("Energized tiles: {}\n", .{energized_tiles}); } fn step(walker: Walker, lines: [][]const u8, energized_grid: [][]bool) [2]?Walker { const tile = lines[walker.y][walker.x]; // Lol needless use of defer. defer energized_grid[walker.y][walker.x] = true; return switch (tile) { '.' => .{ moveWalker(walker, walker.direction, lines), null }, '\\' => switch (walker.direction) { .North => .{ moveWalker(walker, .West, lines), null }, .South => .{ moveWalker(walker, .East, lines), null }, .West => .{ moveWalker(walker, .North, lines), null }, .East => .{ moveWalker(walker, .South, lines), null }, }, '/' => switch (walker.direction) { .North => .{ moveWalker(walker, .East, lines), null }, .South => .{ moveWalker(walker, .West, lines), null }, .West => .{ moveWalker(walker, .South, lines), null }, .East => .{ moveWalker(walker, .North, lines), null }, }, '|' => if (energized_grid[walker.y][walker.x]) .{ null, null } else switch (walker.direction) { .North => .{ moveWalker(walker, .North, lines), null }, .South => .{ moveWalker(walker, .South, lines), null }, .West => .{ moveWalker(walker, .North, lines), moveWalker(walker, .South, lines) }, .East => .{ moveWalker(walker, .North, lines), moveWalker(walker, .South, lines) }, }, '-' => if (energized_grid[walker.y][walker.x]) .{ null, null } else switch (walker.direction) { .North => .{ moveWalker(walker, .West, lines), moveWalker(walker, .East, lines) }, .South => .{ moveWalker(walker, .West, lines), moveWalker(walker, .East, lines) }, .West => .{ moveWalker(walker, .West, lines), null }, .East => .{ moveWalker(walker, .East, lines), null }, }, else => @panic("invalid input"), }; } fn moveWalker(walker: Walker, direction: Direction, lines: [][]const u8) ?Walker { const new_loc = switch (direction) { .North => { if (walker.y == 0) return null; return .{ .x = walker.x, .y = walker.y - 1, .direction = direction }; }, .South => { if (walker.y == lines.len - 1) return null; return .{ .x = walker.x, .y = walker.y + 1, .direction = direction }; }, .West => { if (walker.x == 0) return null; return .{ .x = walker.x - 1, .y = walker.y, .direction = direction }; }, .East => { if (walker.x == lines[walker.y].len - 1) return null; return .{ .x = walker.x + 1, .y = walker.y, .direction = direction }; }, }; if (new_loc) |l| { return Walker{ .x = l.x, .y = l.y, .direction = direction, }; } return null; }
https://raw.githubusercontent.com/pyrrho/AdventOfCode2023/2122da740b0f9365c3a8b68b7f25205f81805659/src/day16_1.zig
const std = @import("std"); const network = @import("network"); const vnc = @import("vnc"); const logger = std.log.scoped(.host_vnc_server); const ashet = @import("../../../main.zig"); const VNC_Server = @This(); allocator: std.mem.Allocator, socket: network.Socket, screen: ashet.drivers.video.Host_VNC_Output, input: ashet.drivers.input.Host_VNC_Input, pub fn init( allocator: std.mem.Allocator, endpoint: network.EndPoint, width: u16, height: u16, ) !*VNC_Server { var server_sock = try network.Socket.create(.ipv4, .tcp); errdefer server_sock.close(); try server_sock.enablePortReuse(true); try server_sock.bind(endpoint); try server_sock.listen(); logger.info("Host Screen VNC Server available at {!}", .{ server_sock.getLocalEndPoint(), }); const server = try allocator.create(VNC_Server); errdefer allocator.destroy(server); server.* = .{ .allocator = allocator, .socket = server_sock, .screen = try ashet.drivers.video.Host_VNC_Output.init(width, height), .input = ashet.drivers.input.Host_VNC_Input.init(), }; const accept_thread = try std.Thread.spawn(.{}, connection_handler, .{server}); accept_thread.detach(); return server; } fn connection_handler(vd: *VNC_Server) !void { while (true) { var local_arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer local_arena.deinit(); const local_allocator = local_arena.allocator(); const client = try vd.socket.accept(); var server = try vnc.Server.open(std.heap.page_allocator, client, .{ .screen_width = vd.screen.width, .screen_height = vd.screen.height, .desktop_name = "Ashet OS", }); defer server.close(); const new_framebuffer = try local_allocator.dupe(ashet.abi.ColorIndex, vd.screen.backbuffer); defer local_allocator.free(new_framebuffer); const old_framebuffer = try local_allocator.dupe(ashet.abi.ColorIndex, vd.screen.backbuffer); defer local_allocator.free(old_framebuffer); std.debug.print("protocol version: {}\n", .{server.protocol_version}); std.debug.print("shared connection: {}\n", .{server.shared_connection}); const Point = struct { x: u16, y: u16 }; var old_mouse: ?Point = null; var old_button: u8 = 0; var request_arena = std.heap.ArenaAllocator.init(local_allocator); defer request_arena.deinit(); while (try server.waitEvent()) |event| { _ = request_arena.reset(.retain_capacity); const request_allocator = request_arena.allocator(); switch (event) { .set_pixel_format => |pf| { logger.info("change pixel format to {}", .{pf}); }, // use internal handler .framebuffer_update_request => |req| { { // vd.screen.backbuffer_lock.lock(); // defer vd.screen.backbuffer_lock.unlock(); @memcpy(new_framebuffer, vd.screen.backbuffer); } // logger.info("framebuffer update request: {}", .{in_req}); var rectangles = std.ArrayList(vnc.UpdateRectangle).init(request_allocator); defer rectangles.deinit(); const incremental_support = true; if (incremental_support and req.incremental) { // Compute differential update: var base: usize = req.y * vd.screen.width; var y: usize = 0; while (y < req.height) : (y += 1) { const old_scanline = old_framebuffer[base + req.x ..][0..req.width]; const new_scanline = new_framebuffer[base + req.x ..][0..req.width]; var first_diff: usize = old_scanline.len; var last_diff: usize = 0; for (old_scanline, new_scanline, 0..) |old, new, index| { if (old != new) { first_diff = @min(first_diff, index); last_diff = @max(last_diff, index); } } if (first_diff <= last_diff) { try rectangles.append(try vd.encode_screen_rect( request_allocator, .{ .x = @intCast(req.x + first_diff), .y = @intCast(req.y + y), .width = @intCast(last_diff - first_diff + 1), .height = 1, }, new_framebuffer, server.pixel_format, )); // logger.debug("sending incremental update on scanline {} from {}...{}", .{ // req.y + y, // req.x + first_diff, // last_diff, // }); } base += vd.screen.width; } } else { // Simple full screen update: try rectangles.append(try vd.encode_screen_rect( request_allocator, .{ .x = req.x, .y = req.y, .width = req.width, .height = req.height, }, new_framebuffer, server.pixel_format, )); } if (rectangles.items.len == 0) { try rectangles.append(try vd.encode_screen_rect( request_allocator, .{ .x = req.x, .y = req.y, .width = 1, .height = 1, }, new_framebuffer, server.pixel_format, )); } // logger.debug("Respond to update request ({},{})+({}x{}) with {} updated rectangles", .{ // req.x, req.y, req.width, req.height, // rectangles.items.len, // }); try server.sendFramebufferUpdate(rectangles.items); @memcpy(old_framebuffer, new_framebuffer); }, .key_event => |ev| { var cs = ashet.CriticalSection.enter(); defer cs.leave(); ashet.input.pushRawEventFromIRQ(.{ .keyboard = .{ .down = ev.down, .scancode = @truncate(@intFromEnum(ev.key)), }, }); }, .pointer_event => |ptr| { var cs = ashet.CriticalSection.enter(); defer cs.leave(); if (old_mouse) |prev| { if (prev.x != ptr.x or prev.y != ptr.y) { ashet.input.pushRawEventFromIRQ(.{ .mouse_abs_motion = .{ .x = @intCast(ptr.x), .y = @intCast(ptr.y), }, }); } } old_mouse = Point{ .x = ptr.x, .y = ptr.y, }; if (old_button != ptr.buttons) { for (0..7) |i| { const mask: u8 = @as(u8, 1) << @truncate(i); if ((old_button ^ ptr.buttons) & mask != 0) { ashet.input.pushRawEventFromIRQ(.{ .mouse_button = .{ .button = switch (i) { 0 => .left, 1 => .right, 2 => .middle, 3 => .nav_previous, 4 => .nav_next, 5 => .wheel_down, 6 => .wheel_up, else => unreachable, }, .down = (ptr.buttons & mask) != 0, }, }); } } old_button = ptr.buttons; } }, else => logger.warn("received unhandled event: {}", .{event}), } } } } fn encode_screen_rect( vd: VNC_Server, allocator: std.mem.Allocator, rect: struct { x: u16, y: u16, width: u16, height: u16 }, framebuffer: []const ashet.abi.ColorIndex, pixel_format: vnc.PixelFormat, ) !vnc.UpdateRectangle { var fb = std.ArrayList(u8).init(allocator); defer fb.deinit(); var y: usize = 0; while (y < rect.height) : (y += 1) { var x: usize = 0; while (x < rect.width) : (x += 1) { const px = x + rect.x; const py = y + rect.y; const color = if (px < vd.screen.width and py < vd.screen.height) blk: { const offset = py * vd.screen.width + px; std.debug.assert(offset < framebuffer.len); const index = framebuffer[offset]; const raw_color = vd.screen.palette[@intFromEnum(index)]; const rgb = raw_color.toRgb888(); break :blk vnc.Color{ .r = @as(f32, @floatFromInt(rgb.r)) / 255.0, .g = @as(f32, @floatFromInt(rgb.g)) / 255.0, .b = @as(f32, @floatFromInt(rgb.b)) / 255.0, }; } else vnc.Color{ .r = 1.0, .g = 0.0, .b = 1.0 }; var buf: [8]u8 = undefined; const bits = pixel_format.encode(&buf, color); try fb.appendSlice(bits); } } return .{ .x = rect.x, .y = rect.y, .width = rect.width, .height = rect.height, .encoding = .raw, .data = try fb.toOwnedSlice(), }; }
https://raw.githubusercontent.com/Ashet-Technologies/Ashet-OS/4c08e2856e0dd27607b9e379b572a08e53428f68/src/kernel/port/machine/linux_pc/VNC_Server.zig