content
stringlengths 32
91.6k
| path
stringlengths 14
91
| fimified
bool 2
classes |
---|---|---|
from tests.wrapper import MojoTest
from gojo.bytes import reader, buffer
import gojo.io
fn test_read() raises:
var test = MojoTest("Testing bytes.Reader.read")
var reader = reader.new_reader("0123456789")
var dest = List[UInt8](capacity=16)
_ = reader.read(dest)
dest.append(0)
test.assert_equal(String(dest), "0123456789")
# Test negative seek
alias NEGATIVE_POSITION_ERROR = "bytes.Reader.seek: negative position"
var position: Int
var err: Error
position, err = reader.seek(-1, io.SEEK_START)
if not err:
raise Error("Expected error not raised while testing negative seek.")
if str(err) != NEGATIVE_POSITION_ERROR:
raise err
test.assert_equal(str(err), NEGATIVE_POSITION_ERROR)
fn test_read_after_big_seek() raises:
var test = MojoTest("Testing bytes.Reader.read after big seek")
var reader = reader.new_reader("0123456789")
_ = reader.seek(123456789, io.SEEK_START)
var dest = List[UInt8](capacity=16)
var bytes_read: Int
var err: Error
bytes_read, err = reader.read(dest)
if not err:
raise Error("Expected error not raised while testing big seek.")
if str(err) != str(io.EOF):
raise err
test.assert_equal(str(err), str(io.EOF))
fn test_read_at() raises:
var test = MojoTest("Testing bytes.Reader.read_at")
var reader = reader.new_reader("0123456789")
var dest = List[UInt8](capacity=16)
var pos = reader.read_at(dest, 0)
dest.append(0)
test.assert_equal(String(dest), "0123456789")
dest = List[UInt8](capacity=16)
pos = reader.read_at(dest, 1)
dest.append(0)
test.assert_equal(String(dest), "123456789")
fn test_seek() raises:
var test = MojoTest("Testing bytes.Reader.seek")
var reader = reader.new_reader("0123456789")
var pos = reader.seek(5, io.SEEK_START)
var dest = List[UInt8](capacity=16)
_ = reader.read(dest)
dest.append(0)
test.assert_equal(String(dest), "56789")
# Test SEEK_END relative seek
pos = reader.seek(-2, io.SEEK_END)
dest = List[UInt8](capacity=16)
_ = reader.read(dest)
dest.append(0)
test.assert_equal(String(dest), "89")
# Test SEEK_CURRENT relative seek (should be at the end of the reader, ie [:-4])
pos = reader.seek(-4, io.SEEK_CURRENT)
dest = List[UInt8](capacity=16)
_ = reader.read(dest)
dest.append(0)
test.assert_equal(String(dest), "6789")
fn test_read_all() raises:
var test = MojoTest("Testing io.read_all with bytes.Reader")
var reader = reader.new_reader("0123456789")
var result = io.read_all(reader)
var bytes = result[0]
bytes.append(0)
test.assert_equal(String(bytes), "0123456789")
# fn test_write_to() raises:
# var test = MojoTest("Testing bytes.Reader.write_to")
# # Create a new reader containing the content "0123456789"
# var reader = reader.new_reader("0123456789")
# # Create a new writer containing the content "Hello World"
# var test_string: String = "Hello World"
# var w = buffer.new_buffer(test_string)
# # Write the content of the reader to the writer
# _ = reader.write_to(w)
# # Check if the content of the writer is "Hello World0123456789"
# test.assert_equal(str(w), String("Hello World0123456789"))
fn test_read_and_unread_byte() raises:
var test = MojoTest("Testing bytes.Reader.read_byte and bytes.Reader.unread_byte")
var reader = reader.new_reader("0123456789")
# Read the first byte from the reader.
var byte: UInt8
var err: Error
byte, err = reader.read_byte()
test.assert_equal(int(byte), 48)
var post_read_position = reader.index
# Unread the first byte from the reader. Read position should be moved back by 1
err = reader.unread_byte()
if err:
raise err
test.assert_equal(int(reader.index), int(post_read_position - 1))
fn test_unread_byte_at_beginning() raises:
var test = MojoTest("Testing bytes.Reader.unread_byte before reading any bytes")
var reader = reader.new_reader("0123456789")
alias AT_BEGINNING_ERROR = "bytes.Reader.unread_byte: at beginning of slice"
var err = reader.unread_byte()
if str(err) != AT_BEGINNING_ERROR:
raise err
test.assert_equal(str(err), AT_BEGINNING_ERROR)
fn main() raises:
test_read()
test_read_after_big_seek()
test_read_at()
test_read_all()
test_read_and_unread_byte()
test_unread_byte_at_beginning()
test_seek()
# test_write_to()
| gojo/tests/test_bytes_reader.mojo | false |
<filename>gojo/tests/test_file.mojo
from tests.wrapper import MojoTest
from gojo.io import read_all, FileWrapper
fn test_read() raises:
var test = MojoTest("Testing FileWrapper.read")
var file = FileWrapper("tests/data/test.txt", "r")
var dest = List[UInt8](capacity=16)
_ = file.read(dest)
dest.append(0)
test.assert_equal(String(dest), "12345")
fn test_read_all() raises:
var test = MojoTest("Testing FileWrapper.read_all")
var file = FileWrapper("tests/data/test_big_file.txt", "r")
var result = file.read_all()
var bytes = result[0]
test.assert_equal(len(bytes), 15358)
bytes.append(0)
with open("tests/data/test_big_file.txt", "r") as f:
var expected = f.read()
test.assert_equal(String(bytes), expected)
fn test_io_read_all() raises:
var test = MojoTest("Testing io.read_all with FileWrapper")
var file = FileWrapper("tests/data/test_big_file.txt", "r")
var result = read_all(file)
var bytes = result[0]
test.assert_equal(len(bytes), 15358)
bytes.append(0)
with open("tests/data/test_big_file.txt", "r") as f:
var expected = f.read()
test.assert_equal(String(bytes), expected)
fn test_read_byte() raises:
var test = MojoTest("Testing FileWrapper.read_byte")
var file = FileWrapper("tests/data/test.txt", "r")
test.assert_equal(int(file.read_byte()[0]), 49)
fn test_write() raises:
var test = MojoTest("Testing FileWrapper.write")
var file = FileWrapper("tests/data/test_write.txt", "w")
var content = String("12345")
var bytes_written = file.write(content.as_bytes())
test.assert_equal(bytes_written[0], 5)
with open("tests/data/test_write.txt", "r") as f:
var expected = f.read()
test.assert_equal(content, expected)
fn main() raises:
test_read()
test_read_all()
test_io_read_all()
test_read_byte()
test_write()
| gojo/tests/test_file.mojo | false |
from tests.wrapper import MojoTest
from gojo.fmt import sprintf, printf
fn test_sprintf() raises:
var test = MojoTest("Testing sprintf")
var s = sprintf(
"Hello, %s. I am %d years old. More precisely, I am %f years old. It is %t that I like Mojo!",
String("world"),
29,
Float64(29.5),
True,
)
test.assert_equal(
s,
"Hello, world. I am 29 years old. More precisely, I am 29.5 years old. It is True that I like Mojo!",
)
s = sprintf("This is a number: %d. In base 16: %x. In base 16 upper: %X.", 42, 42, 42)
test.assert_equal(s, "This is a number: 42. In base 16: 2a. In base 16 upper: 2A.")
s = sprintf("Hello %s", String("world").as_bytes())
test.assert_equal(s, "Hello world")
fn test_printf() raises:
var test = MojoTest("Testing printf")
printf(
"Hello, %s. I am %d years old. More precisely, I am %f years old. It is %t that I like Mojo!",
String("world"),
29,
Float64(29.5),
True,
)
fn main() raises:
test_sprintf()
# test_printf()
| gojo/tests/test_fmt.mojo | false |
<filename>gojo/tests/test_get_addr.mojo
from gojo.net import Socket, TCPAddr, get_ip_address, listen_tcp, dial_tcp
from gojo.syscall import SocketOptions, ProtocolFamily
fn test_dial() raises:
# Connect to example.com on port 80 and send a GET request
var connection = dial_tcp("tcp", TCPAddr(get_ip_address("www.example.com"), 80))
var bytes_written: Int = 0
var err = Error()
bytes_written, err = connection.write(
String("GET / HTTP/1.1\r\nHost: www.example.com\r\nConnection: close\r\n\r\n").as_bytes_slice()
)
if err:
raise err
if bytes_written == 0:
print("No bytes sent to peer.")
return
# Read the response from the connection
var response = List[UInt8](capacity=4096)
var bytes_read: Int = 0
bytes_read, err = connection.read(response)
if err:
raise err
if bytes_read == 0:
print("No bytes received from peer.")
return
print(String(response))
# Cleanup the connection
err = connection.close()
if err:
raise err
fn test_listener() raises:
var listener = listen_tcp("tcp", TCPAddr("0.0.0.0", 8081))
while True:
var conn = listener.accept()
print("Accepted connection from", conn.remote_address())
var err = conn.close()
if err:
raise err
# fn test_stuff() raises:
# # TODO: context manager not working yet
# # with Socket() as socket:
# # socket.bind("0.0.0.0", 8080)
# var socket = Socket(protocol=ProtocolFamily.PF_UNIX)
# socket.bind("0.0.0.0", 8080)
# socket.connect(get_ip_address("www.example.com"), 80)
# print("File number", socket.file_no())
# var local = socket.get_sock_name()
# var remote = socket.get_peer_name()
# print("Local address", str(local), socket.local_address)
# print("Remote address", str(remote), socket.remote_address)
# socket.set_socket_option(SocketOptions.SO_REUSEADDR, 1)
# print("REUSE_ADDR value", socket.get_socket_option(SocketOptions.SO_REUSEADDR))
# var timeout = 30
# # socket.set_timeout(timeout)
# # print(socket.get_timeout())
# socket.shutdown()
# print("closing")
# var err = socket.close()
# print("closed")
# if err:
# print("err returned")
# raise err
# # var option_value = socket.get_sock_opt(SocketOptions.SO_REUSEADDR)
# # print(option_value)
# # socket.connect(self.ip, self.port)
# # socket.send(message)
# # var response = socket.receive() # TODO: call receive until all data is fetched, receive should also just return bytes
# # socket.shutdown()
# # socket.close()
fn main() raises:
# test_stuff()
# test_listener()
test_dial()
| gojo/tests/test_get_addr.mojo | false |
from tests.wrapper import MojoTest
from gojo.syscall import FD
from gojo.io import STDWriter
fn test_writer() raises:
var test = MojoTest("Testing STDWriter.write")
var writer = STDWriter[FD.STDOUT]()
_ = writer.write_string("")
fn main() raises:
test_writer()
| gojo/tests/test_std.mojo | false |
<filename>gojo/tests/test_strings_reader.mojo
from tests.wrapper import MojoTest
from gojo.strings import StringBuilder, Reader, new_reader
import gojo.io
fn test_read() raises:
var test = MojoTest("Testing strings.Reader.read")
var example: String = "Hello, World!"
var reader = new_reader("Hello, World!")
# Test reading from the reader.
var buffer = List[UInt8](capacity=16)
var bytes_read = reader.read(buffer)
buffer.append(0)
test.assert_equal(bytes_read[0], len(example))
test.assert_equal(String(buffer), "Hello, World!")
fn test_read_slice() raises:
var test = MojoTest("Testing strings.Reader.read")
var example: String = "Hello, World!"
var reader = new_reader("Hello, World!")
# Test reading from the reader.
var buffer = List[UInt8](capacity=16)
var bytes_read = reader.read(buffer)
buffer.append(0)
test.assert_equal(bytes_read[0], len(example))
test.assert_equal(String(buffer), "Hello, World!")
fn test_read_at() raises:
var test = MojoTest("Testing strings.Reader.read_at")
var example: String = "Hello, World!"
var reader = new_reader("Hello, World!")
# Test reading from the reader.
var buffer = List[UInt8](capacity=128)
var bytes_read = reader.read_at(buffer, 7)
buffer.append(0)
test.assert_equal(bytes_read[0], len(example[7:]))
test.assert_equal(String(buffer), "World!")
fn test_seek() raises:
var test = MojoTest("Testing strings.Reader.seek")
var reader = new_reader("Hello, World!")
# Seek to the middle of the reader.
var position = reader.seek(5, io.SEEK_START)
test.assert_equal(int(position[0]), 5)
fn test_read_and_unread_byte() raises:
var test = MojoTest("Testing strings.Reader.read_byte and strings.Reader.unread_byte")
var example: String = "Hello, World!"
var reader = new_reader("Hello, World!")
# Read the first byte from the reader.
var byte = reader.read_byte()
test.assert_equal(int(byte[0]), 72)
# Unread the first byte from the reader. Remaining bytes to be read should be the same as the length of the example string.
_ = reader.unread_byte()
test.assert_equal(len(reader), len(example))
# fn test_write_to() raises:
# var test = MojoTest("Testing strings.Reader.write_to")
# var example: String = "Hello, World!"
# var reader = new_reader("Hello, World!")
# # Write from the string reader to a StringBuilder.
# var builder = StringBuilder()
# _ = reader.write_to(builder)
# test.assert_equal(str(builder), example)
fn main() raises:
test_read()
test_read_at()
test_seek()
test_read_and_unread_byte()
# test_write_to()
test_read_slice()
| gojo/tests/test_strings_reader.mojo | false |
from tests.wrapper import MojoTest
from gojo.strings import StringBuilder
fn test_write_string() raises:
var test = MojoTest("Testing strings.StringBuilder.write_string")
# Create a string from the builder by writing strings to it.
var builder = StringBuilder()
for _ in range(3):
_ = builder.write_string("Lorem ipsum dolor sit amet ")
test.assert_equal(
str(builder),
"Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet ",
)
fn test_big_write():
var test = MojoTest("Testing strings.StringBuilder.write_string with big Write")
# Create a string from the builder by writing strings to it.
var builder = StringBuilder(capacity=1)
_ = builder.write_string("Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet")
test.assert_equal(
str(builder),
"Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet",
)
fn test_write() raises:
var test = MojoTest("Testing strings.StringBuilder.write")
# Create a string from the builder by writing bytes to it.
var builder = StringBuilder()
_ = builder.write(String("Hello").as_bytes_slice())
test.assert_equal(str(builder), "Hello")
fn test_write_byte() raises:
var test = MojoTest("Testing strings.StringBuilder.write_byte")
# Create a string from the builder by writing bytes to it.
var builder = StringBuilder()
_ = builder.write_byte(ord("H"))
test.assert_equal(str(builder), "H")
fn main() raises:
test_write_string()
test_write()
test_write_byte()
test_big_write()
| gojo/tests/test_strings_stringbuilder.mojo | false |
from testing import testing
@value
struct MojoTest:
"""
A utility struct for testing.
"""
var test_name: String
fn __init__(inout self, test_name: String):
self.test_name = test_name
print("# " + test_name)
fn assert_true(self, cond: Bool, message: String = ""):
try:
if message == "":
testing.assert_true(cond)
else:
testing.assert_true(cond, message)
except e:
print(e)
fn assert_false(self, cond: Bool, message: String = ""):
try:
if message == "":
testing.assert_false(cond)
else:
testing.assert_false(cond, message)
except e:
print(e)
fn assert_equal[T: testing.Testable](self, left: T, right: T):
try:
testing.assert_equal(left, right)
except e:
print(e)
| gojo/tests/wrapper.mojo | false |
<filename>lightbug_http/bench.mojo
import benchmark
from lightbug_http.sys.server import SysServer
from lightbug_http.python.server import PythonServer
from lightbug_http.service import TechEmpowerRouter
from tests.utils import (
TestStruct,
FakeResponder,
new_fake_listener,
FakeServer,
getRequest,
)
fn main():
try:
var server = SysServer(tcp_keep_alive=True)
var handler = TechEmpowerRouter()
server.listen_and_serve("0.0.0.0:8080", handler)
except e:
print("Error starting server: " + e.__str__())
return
fn lightbug_benchmark_server():
var server_report = benchmark.run[run_fake_server](max_iters=1)
print("Server: ")
server_report.print(benchmark.Unit.ms)
fn lightbug_benchmark_misc() -> None:
var direct_set_report = benchmark.run[init_test_and_set_a_direct](max_iters=1)
var recreating_set_report = benchmark.run[init_test_and_set_a_copy](max_iters=1)
print("Direct set: ")
direct_set_report.print(benchmark.Unit.ms)
print("Recreating set: ")
recreating_set_report.print(benchmark.Unit.ms)
fn run_fake_server():
var handler = FakeResponder()
var listener = new_fake_listener(2, getRequest)
var server = FakeServer(listener, handler)
server.serve()
fn init_test_and_set_a_copy() -> None:
var test = TestStruct("a", "b")
_ = test.set_a_copy("c")
fn init_test_and_set_a_direct() -> None:
var test = TestStruct("a", "b")
_ = test.set_a_direct("c")
| lightbug_http/bench.mojo | false |
from lightbug_http.http import HTTPRequest, encode
from lightbug_http.header import RequestHeader
from lightbug_http.uri import URI
from lightbug_http.sys.client import MojoClient
fn test_request(inout client: MojoClient) raises -> None:
var uri = URI("http://httpbin.org/status/404")
try:
uri.parse()
except e:
print("error parsing uri: " + e.__str__())
var request = HTTPRequest(uri)
var response = client.do(request)
# print status code
print("Response:", response.header.status_code())
# print raw headers
# print("Headers:", response.header.headers())
# print parsed headers (only some are parsed for now)
print("Content-Type:", String(response.header.content_type()))
print("Content-Length", response.header.content_length())
print("Server:", String(response.header.server()))
print("Is connection set to connection-close? ", response.header.connection_close())
# print body
print(String(response.get_body_bytes()))
fn main() raises -> None:
var client = MojoClient()
test_request(client) | lightbug_http/client.mojo | false |
<filename>lightbug_http/run_tests.mojo
from tests.test_io import test_io
from tests.test_http import test_http
from tests.test_header import test_header
from tests.test_uri import test_uri
# from lightbug_http.test.test_client import test_client
fn main() raises:
test_io()
test_http()
test_header()
test_uri()
# test_client()
| lightbug_http/run_tests.mojo | false |
<filename>lightbug_http/external/libc.mojo
from utils import StaticTuple
from lightbug_http.io.bytes import Bytes
alias IPPROTO_IPV6 = 41
alias IPV6_V6ONLY = 26
alias EPROTONOSUPPORT = 93
# Adapted from https://github.com/gabrieldemarmiesse/mojo-stdlib-extensions/ . Huge thanks to Gabriel!
alias FD_STDIN: c_int = 0
alias FD_STDOUT: c_int = 1
alias FD_STDERR: c_int = 2
alias SUCCESS = 0
alias GRND_NONBLOCK: UInt8 = 1
alias char_UnsafePointer = UnsafePointer[c_char]
# Adapted from https://github.com/crisadamo/mojo-Libc . Huge thanks to Cristian!
# C types
alias c_void = UInt8
alias c_char = UInt8
alias c_schar = Int8
alias c_uchar = UInt8
alias c_short = Int16
alias c_ushort = UInt16
alias c_int = Int32
alias c_uint = UInt32
alias c_long = Int64
alias c_ulong = UInt64
alias c_float = Float32
alias c_double = Float64
# `Int` is known to be machine's width
alias c_size_t = Int
alias c_ssize_t = Int
alias ptrdiff_t = Int64
alias intptr_t = Int64
alias uintptr_t = UInt64
# --- ( error.h Constants )-----------------------------------------------------
alias EPERM = 1
alias ENOENT = 2
alias ESRCH = 3
alias EINTR = 4
alias EIO = 5
alias ENXIO = 6
alias E2BIG = 7
alias ENOEXEC = 8
alias EBADF = 9
alias ECHILD = 10
alias EAGAIN = 11
alias ENOMEM = 12
alias EACCES = 13
alias EFAULT = 14
alias ENOTBLK = 15
alias EBUSY = 16
alias EEXIST = 17
alias EXDEV = 18
alias ENODEV = 19
alias ENOTDIR = 20
alias EISDIR = 21
alias EINVAL = 22
alias ENFILE = 23
alias EMFILE = 24
alias ENOTTY = 25
alias ETXTBSY = 26
alias EFBIG = 27
alias ENOSPC = 28
alias ESPIPE = 29
alias EROFS = 30
alias EMLINK = 31
alias EPIPE = 32
alias EDOM = 33
alias ERANGE = 34
alias EWOULDBLOCK = EAGAIN
fn to_char_ptr(s: String) -> UnsafePointer[c_char]:
"""Only ASCII-based strings."""
var ptr = UnsafePointer[c_char]().alloc(len(s))
for i in range(len(s)):
ptr[i] = ord(s[i])
return ptr
fn to_char_ptr(s: Bytes) -> UnsafePointer[c_char]:
var ptr = UnsafePointer[c_char]().alloc(len(s))
for i in range(len(s)):
ptr[i] = int(s[i])
return ptr
fn c_charptr_to_string(s: UnsafePointer[c_char]) -> String:
return String(s.bitcast[UInt8](), strlen(s))
fn cftob(val: c_int) -> Bool:
"""Convert C-like failure (-1) to Bool."""
return rebind[Bool](val > 0)
# --- ( Network Related Constants )---------------------------------------------
alias sa_family_t = c_ushort
alias socklen_t = c_uint
alias in_addr_t = c_uint
alias in_port_t = c_ushort
# Address Family Constants
alias AF_UNSPEC = 0
alias AF_UNIX = 1
alias AF_LOCAL = AF_UNIX
alias AF_INET = 2
alias AF_AX25 = 3
alias AF_IPX = 4
alias AF_APPLETALK = 5
alias AF_NETROM = 6
alias AF_BRIDGE = 7
alias AF_ATMPVC = 8
alias AF_X25 = 9
alias AF_INET6 = 10
alias AF_ROSE = 11
alias AF_DECnet = 12
alias AF_NETBEUI = 13
alias AF_SECURITY = 14
alias AF_KEY = 15
alias AF_NETLINK = 16
alias AF_ROUTE = AF_NETLINK
alias AF_PACKET = 17
alias AF_ASH = 18
alias AF_ECONET = 19
alias AF_ATMSVC = 20
alias AF_RDS = 21
alias AF_SNA = 22
alias AF_IRDA = 23
alias AF_PPPOX = 24
alias AF_WANPIPE = 25
alias AF_LLC = 26
alias AF_CAN = 29
alias AF_TIPC = 30
alias AF_BLUETOOTH = 31
alias AF_IUCV = 32
alias AF_RXRPC = 33
alias AF_ISDN = 34
alias AF_PHONET = 35
alias AF_IEEE802154 = 36
alias AF_CAIF = 37
alias AF_ALG = 38
alias AF_NFC = 39
alias AF_VSOCK = 40
alias AF_KCM = 41
alias AF_QIPCRTR = 42
alias AF_MAX = 43
alias PF_UNSPEC = AF_UNSPEC
alias PF_UNIX = AF_UNIX
alias PF_LOCAL = AF_LOCAL
alias PF_INET = AF_INET
alias PF_AX25 = AF_AX25
alias PF_IPX = AF_IPX
alias PF_APPLETALK = AF_APPLETALK
alias PF_NETROM = AF_NETROM
alias PF_BRIDGE = AF_BRIDGE
alias PF_ATMPVC = AF_ATMPVC
alias PF_X25 = AF_X25
alias PF_INET6 = AF_INET6
alias PF_ROSE = AF_ROSE
alias PF_DECnet = AF_DECnet
alias PF_NETBEUI = AF_NETBEUI
alias PF_SECURITY = AF_SECURITY
alias PF_KEY = AF_KEY
alias PF_NETLINK = AF_NETLINK
alias PF_ROUTE = AF_ROUTE
alias PF_PACKET = AF_PACKET
alias PF_ASH = AF_ASH
alias PF_ECONET = AF_ECONET
alias PF_ATMSVC = AF_ATMSVC
alias PF_RDS = AF_RDS
alias PF_SNA = AF_SNA
alias PF_IRDA = AF_IRDA
alias PF_PPPOX = AF_PPPOX
alias PF_WANPIPE = AF_WANPIPE
alias PF_LLC = AF_LLC
alias PF_CAN = AF_CAN
alias PF_TIPC = AF_TIPC
alias PF_BLUETOOTH = AF_BLUETOOTH
alias PF_IUCV = AF_IUCV
alias PF_RXRPC = AF_RXRPC
alias PF_ISDN = AF_ISDN
alias PF_PHONET = AF_PHONET
alias PF_IEEE802154 = AF_IEEE802154
alias PF_CAIF = AF_CAIF
alias PF_ALG = AF_ALG
alias PF_NFC = AF_NFC
alias PF_VSOCK = AF_VSOCK
alias PF_KCM = AF_KCM
alias PF_QIPCRTR = AF_QIPCRTR
alias PF_MAX = AF_MAX
# Socket Type constants
alias SOCK_STREAM = 1
alias SOCK_DGRAM = 2
alias SOCK_RAW = 3
alias SOCK_RDM = 4
alias SOCK_SEQPACKET = 5
alias SOCK_DCCP = 6
alias SOCK_PACKET = 10
alias SOCK_CLOEXEC = O_CLOEXEC
alias SOCK_NONBLOCK = O_NONBLOCK
# Address Information
alias AI_PASSIVE = 1
alias AI_CANONNAME = 2
alias AI_NUMERICHOST = 4
alias AI_V4MAPPED = 8
alias AI_ALL = 16
alias AI_ADDRCONFIG = 32
alias AI_IDN = 64
alias INET_ADDRSTRLEN = 16
alias INET6_ADDRSTRLEN = 46
alias SHUT_RD = 0
alias SHUT_WR = 1
alias SHUT_RDWR = 2
alias SOL_SOCKET = 1
alias SO_DEBUG = 1
alias SO_REUSEADDR = 2
alias SO_TYPE = 3
alias SO_ERROR = 4
alias SO_DONTROUTE = 5
alias SO_BROADCAST = 6
alias SO_SNDBUF = 7
alias SO_RCVBUF = 8
alias SO_KEEPALIVE = 9
alias SO_OOBINLINE = 10
alias SO_NO_CHECK = 11
alias SO_PRIORITY = 12
alias SO_LINGER = 13
alias SO_BSDCOMPAT = 14
alias SO_REUSEPORT = 15
alias SO_PASSCRED = 16
alias SO_PEERCRED = 17
alias SO_RCVLOWAT = 18
alias SO_SNDLOWAT = 19
alias SO_RCVTIMEO = 20
alias SO_SNDTIMEO = 21
alias SO_RCVTIMEO_OLD = 20
alias SO_SNDTIMEO_OLD = 21
alias SO_SECURITY_AUTHENTICATION = 22
alias SO_SECURITY_ENCRYPTION_TRANSPORT = 23
alias SO_SECURITY_ENCRYPTION_NETWORK = 24
alias SO_BINDTODEVICE = 25
alias SO_ATTACH_FILTER = 26
alias SO_DETACH_FILTER = 27
alias SO_GET_FILTER = SO_ATTACH_FILTER
alias SO_PEERNAME = 28
alias SO_TIMESTAMP = 29
alias SO_TIMESTAMP_OLD = 29
alias SO_ACCEPTCONN = 30
alias SO_PEERSEC = 31
alias SO_SNDBUFFORCE = 32
alias SO_RCVBUFFORCE = 33
alias SO_PASSSEC = 34
alias SO_TIMESTAMPNS = 35
alias SO_TIMESTAMPNS_OLD = 35
alias SO_MARK = 36
alias SO_TIMESTAMPING = 37
alias SO_TIMESTAMPING_OLD = 37
alias SO_PROTOCOL = 38
alias SO_DOMAIN = 39
alias SO_RXQ_OVFL = 40
alias SO_WIFI_STATUS = 41
alias SCM_WIFI_STATUS = SO_WIFI_STATUS
alias SO_PEEK_OFF = 42
alias SO_NOFCS = 43
alias SO_LOCK_FILTER = 44
alias SO_SELECT_ERR_QUEUE = 45
alias SO_BUSY_POLL = 46
alias SO_MAX_PACING_RATE = 47
alias SO_BPF_EXTENSIONS = 48
alias SO_INCOMING_CPU = 49
alias SO_ATTACH_BPF = 50
alias SO_DETACH_BPF = SO_DETACH_FILTER
alias SO_ATTACH_REUSEPORT_CBPF = 51
alias SO_ATTACH_REUSEPORT_EBPF = 52
alias SO_CNX_ADVICE = 53
alias SCM_TIMESTAMPING_OPT_STATS = 54
alias SO_MEMINFO = 55
alias SO_INCOMING_NAPI_ID = 56
alias SO_COOKIE = 57
alias SCM_TIMESTAMPING_PKTINFO = 58
alias SO_PEERGROUPS = 59
alias SO_ZEROCOPY = 60
alias SO_TXTIME = 61
alias SCM_TXTIME = SO_TXTIME
alias SO_BINDTOIFINDEX = 62
alias SO_TIMESTAMP_NEW = 63
alias SO_TIMESTAMPNS_NEW = 64
alias SO_TIMESTAMPING_NEW = 65
alias SO_RCVTIMEO_NEW = 66
alias SO_SNDTIMEO_NEW = 67
alias SO_DETACH_REUSEPORT_BPF = 68
# --- ( Network Related Structs )-----------------------------------------------
@value
@register_passable("trivial")
struct in_addr:
var s_addr: in_addr_t
@value
@register_passable("trivial")
struct in6_addr:
var s6_addr: StaticTuple[c_char, 16]
@value
@register_passable("trivial")
struct sockaddr:
var sa_family: sa_family_t
var sa_data: StaticTuple[c_char, 14]
@value
@register_passable("trivial")
struct sockaddr_in:
var sin_family: sa_family_t
var sin_port: in_port_t
var sin_addr: in_addr
var sin_zero: StaticTuple[c_char, 8]
@value
@register_passable("trivial")
struct sockaddr_in6:
var sin6_family: sa_family_t
var sin6_port: in_port_t
var sin6_flowinfo: c_uint
var sin6_addr: in6_addr
var sin6_scope_id: c_uint
@value
@register_passable("trivial")
struct addrinfo:
var ai_flags: c_int
var ai_family: c_int
var ai_socktype: c_int
var ai_protocol: c_int
var ai_addrlen: socklen_t
var ai_addr: UnsafePointer[sockaddr]
var ai_canonname: UnsafePointer[c_char]
# FIXME(cristian): This should be UnsafePointer[addrinfo]
var ai_next: UnsafePointer[c_void]
fn __init__() -> Self:
return Self(
0, 0, 0, 0, 0, UnsafePointer[sockaddr](), UnsafePointer[c_char](), UnsafePointer[c_void]()
)
fn strlen(s: UnsafePointer[c_char]) -> c_size_t:
"""Libc POSIX `strlen` function
Reference: https://man7.org/linux/man-pages/man3/strlen.3p.html
Fn signature: size_t strlen(const char *s).
Args: s: A UnsafePointer to a C string.
Returns: The length of the string.
"""
return external_call["strlen", c_size_t, UnsafePointer[c_char]](s)
# --- ( Network Related Syscalls & Structs )------------------------------------
fn htonl(hostlong: c_uint) -> c_uint:
"""Libc POSIX `htonl` function
Reference: https://man7.org/linux/man-pages/man3/htonl.3p.html
Fn signature: uint32_t htonl(uint32_t hostlong).
Args: hostlong: A 32-bit integer in host byte order.
Returns: The value provided in network byte order.
"""
return external_call["htonl", c_uint, c_uint](hostlong)
fn htons(hostshort: c_ushort) -> c_ushort:
"""Libc POSIX `htons` function
Reference: https://man7.org/linux/man-pages/man3/htonl.3p.html
Fn signature: uint16_t htons(uint16_t hostshort).
Args: hostshort: A 16-bit integer in host byte order.
Returns: The value provided in network byte order.
"""
return external_call["htons", c_ushort, c_ushort](hostshort)
fn ntohl(netlong: c_uint) -> c_uint:
"""Libc POSIX `ntohl` function
Reference: https://man7.org/linux/man-pages/man3/htonl.3p.html
Fn signature: uint32_t ntohl(uint32_t netlong).
Args: netlong: A 32-bit integer in network byte order.
Returns: The value provided in host byte order.
"""
return external_call["ntohl", c_uint, c_uint](netlong)
fn ntohs(netshort: c_ushort) -> c_ushort:
"""Libc POSIX `ntohs` function
Reference: https://man7.org/linux/man-pages/man3/htonl.3p.html
Fn signature: uint16_t ntohs(uint16_t netshort).
Args: netshort: A 16-bit integer in network byte order.
Returns: The value provided in host byte order.
"""
return external_call["ntohs", c_ushort, c_ushort](netshort)
fn inet_ntop(
af: c_int, src: UnsafePointer[c_void], dst: UnsafePointer[c_char], size: socklen_t
) -> UnsafePointer[c_char]:
"""Libc POSIX `inet_ntop` function
Reference: https://man7.org/linux/man-pages/man3/inet_ntop.3p.html.
Fn signature: const char *inet_ntop(int af, const void *restrict src, char *restrict dst, socklen_t size).
Args:
af: Address Family see AF_ aliases.
src: A UnsafePointer to a binary address.
dst: A UnsafePointer to a buffer to store the result.
size: The size of the buffer.
Returns:
A UnsafePointer to the buffer containing the result.
"""
return external_call[
"inet_ntop",
UnsafePointer[c_char], # FnName, RetType
c_int,
UnsafePointer[c_void],
UnsafePointer[c_char],
socklen_t, # Args
](af, src, dst, size)
fn inet_pton(af: c_int, src: UnsafePointer[c_char], dst: UnsafePointer[c_void]) -> c_int:
"""Libc POSIX `inet_pton` function
Reference: https://man7.org/linux/man-pages/man3/inet_ntop.3p.html
Fn signature: int inet_pton(int af, const char *restrict src, void *restrict dst).
Args: af: Address Family see AF_ aliases.
src: A UnsafePointer to a string containing the address.
dst: A UnsafePointer to a buffer to store the result.
Returns: 1 on success, 0 if the input is not a valid address, -1 on error.
"""
return external_call[
"inet_pton",
c_int, # FnName, RetType
c_int,
UnsafePointer[c_char],
UnsafePointer[c_void], # Args
](af, src, dst)
fn inet_addr(cp: UnsafePointer[c_char]) -> in_addr_t:
"""Libc POSIX `inet_addr` function
Reference: https://man7.org/linux/man-pages/man3/inet_addr.3p.html
Fn signature: in_addr_t inet_addr(const char *cp).
Args: cp: A UnsafePointer to a string containing the address.
Returns: The address in network byte order.
"""
return external_call["inet_addr", in_addr_t, UnsafePointer[c_char]](cp)
fn inet_ntoa(addr: in_addr) -> UnsafePointer[c_char]:
"""Libc POSIX `inet_ntoa` function
Reference: https://man7.org/linux/man-pages/man3/inet_addr.3p.html
Fn signature: char *inet_ntoa(struct in_addr in).
Args: in: A UnsafePointer to a string containing the address.
Returns: The address in network byte order.
"""
return external_call["inet_ntoa", UnsafePointer[c_char], in_addr](addr)
fn socket(domain: c_int, type: c_int, protocol: c_int) -> c_int:
"""Libc POSIX `socket` function
Reference: https://man7.org/linux/man-pages/man3/socket.3p.html
Fn signature: int socket(int domain, int type, int protocol).
Args: domain: Address Family see AF_ aliases.
type: Socket Type see SOCK_ aliases.
protocol: The protocol to use.
Returns: A File Descriptor or -1 in case of failure.
"""
return external_call[
"socket", c_int, c_int, c_int, c_int # FnName, RetType # Args
](domain, type, protocol)
fn setsockopt(
socket: c_int,
level: c_int,
option_name: c_int,
option_value: UnsafePointer[c_void],
option_len: socklen_t,
) -> c_int:
"""Libc POSIX `setsockopt` function
Reference: https://man7.org/linux/man-pages/man3/setsockopt.3p.html
Fn signature: int setsockopt(int socket, int level, int option_name, const void *option_value, socklen_t option_len).
Args: socket: A File Descriptor.
level: The protocol level.
option_name: The option to set.
option_value: A UnsafePointer to the value to set.
option_len: The size of the value.
Returns: 0 on success, -1 on error.
"""
return external_call[
"setsockopt",
c_int, # FnName, RetType
c_int,
c_int,
c_int,
UnsafePointer[c_void],
socklen_t, # Args
](socket, level, option_name, option_value, option_len)
fn getsockname(
socket: c_int, address: UnsafePointer[sockaddr], address_len: UnsafePointer[socklen_t]
) -> c_int:
"""Libc POSIX `getsockname` function
Reference: https://man7.org/linux/man-pages/man3/getsockname.3p.html
Fn signature: int getsockname(int socket, struct sockaddr *restrict address, socklen_t *restrict address_len).
Args: socket: A File Descriptor.
address: A UnsafePointer to a buffer to store the address of the peer.
address_len: A UnsafePointer to the size of the buffer.
Returns: 0 on success, -1 on error.
"""
return external_call[
"getsockname",
c_int, # FnName, RetType
c_int,
UnsafePointer[sockaddr],
UnsafePointer[socklen_t], # Args
](socket, address, address_len)
fn getpeername(
sockfd: c_int, addr: UnsafePointer[sockaddr], address_len: UnsafePointer[socklen_t]
) -> c_int:
"""Libc POSIX `getpeername` function
Reference: https://man7.org/linux/man-pages/man2/getpeername.2.html
Fn signature: int getpeername(int socket, struct sockaddr *restrict addr, socklen_t *restrict address_len).
Args: sockfd: A File Descriptor.
addr: A UnsafePointer to a buffer to store the address of the peer.
address_len: A UnsafePointer to the size of the buffer.
Returns: 0 on success, -1 on error.
"""
return external_call[
"getpeername",
c_int, # FnName, RetType
c_int,
UnsafePointer[sockaddr],
UnsafePointer[socklen_t], # Args
](sockfd, addr, address_len)
fn bind(socket: c_int, address: UnsafePointer[sockaddr], address_len: socklen_t) -> c_int:
"""Libc POSIX `bind` function
Reference: https://man7.org/linux/man-pages/man3/bind.3p.html
Fn signature: int bind(int socket, const struct sockaddr *address, socklen_t address_len).
"""
return external_call[
"bind", c_int, c_int, UnsafePointer[sockaddr], socklen_t # FnName, RetType # Args
](socket, address, address_len)
fn listen(socket: c_int, backlog: c_int) -> c_int:
"""Libc POSIX `listen` function
Reference: https://man7.org/linux/man-pages/man3/listen.3p.html
Fn signature: int listen(int socket, int backlog).
Args: socket: A File Descriptor.
backlog: The maximum length of the queue of pending connections.
Returns: 0 on success, -1 on error.
"""
return external_call["listen", c_int, c_int, c_int](socket, backlog)
fn accept(
socket: c_int, address: UnsafePointer[sockaddr], address_len: UnsafePointer[socklen_t]
) -> c_int:
"""Libc POSIX `accept` function
Reference: https://man7.org/linux/man-pages/man3/accept.3p.html
Fn signature: int accept(int socket, struct sockaddr *restrict address, socklen_t *restrict address_len).
Args: socket: A File Descriptor.
address: A UnsafePointer to a buffer to store the address of the peer.
address_len: A UnsafePointer to the size of the buffer.
Returns: A File Descriptor or -1 in case of failure.
"""
return external_call[
"accept",
c_int, # FnName, RetType
c_int,
UnsafePointer[sockaddr],
UnsafePointer[socklen_t], # Args
](socket, address, address_len)
fn connect(socket: c_int, address: UnsafePointer[sockaddr], address_len: socklen_t) -> c_int:
"""Libc POSIX `connect` function
Reference: https://man7.org/linux/man-pages/man3/connect.3p.html
Fn signature: int connect(int socket, const struct sockaddr *address, socklen_t address_len).
Args: socket: A File Descriptor.
address: A UnsafePointer to the address to connect to.
address_len: The size of the address.
Returns: 0 on success, -1 on error.
"""
return external_call[
"connect", c_int, c_int, UnsafePointer[sockaddr], socklen_t # FnName, RetType # Args
](socket, address, address_len)
# fn recv(
# socket: c_int, buffer: UnsafePointer[c_void], length: c_size_t, flags: c_int
# ) -> c_ssize_t:
# """Libc POSIX `recv` function
# Reference: https://man7.org/linux/man-pages/man3/recv.3p.html
# Fn signature: ssize_t recv(int socket, void *buffer, size_t length, int flags).
# """
# return external_call[
# "recv",
# c_ssize_t, # FnName, RetType
# c_int,
# UnsafePointer[c_void],
# c_size_t,
# c_int, # Args
# ](socket, buffer, length, flags)
fn recv(
socket: c_int,
buffer: DTypePointer[DType.uint8],
length: c_size_t,
flags: c_int,
) -> c_ssize_t:
"""Libc POSIX `recv` function
Reference: https://man7.org/linux/man-pages/man3/recv.3p.html
Fn signature: ssize_t recv(int socket, void *buffer, size_t length, int flags).
"""
return external_call[
"recv",
c_ssize_t, # FnName, RetType
c_int,
DTypePointer[DType.uint8],
c_size_t,
c_int, # Args
](socket, buffer, length, flags)
fn send(
socket: c_int, buffer: UnsafePointer[c_void], length: c_size_t, flags: c_int
) -> c_ssize_t:
"""Libc POSIX `send` function
Reference: https://man7.org/linux/man-pages/man3/send.3p.html
Fn signature: ssize_t send(int socket, const void *buffer, size_t length, int flags).
Args: socket: A File Descriptor.
buffer: A UnsafePointer to the buffer to send.
length: The size of the buffer.
flags: Flags to control the behaviour of the function.
Returns: The number of bytes sent or -1 in case of failure.
"""
return external_call[
"send",
c_ssize_t, # FnName, RetType
c_int,
UnsafePointer[c_void],
c_size_t,
c_int, # Args
](socket, buffer, length, flags)
fn shutdown(socket: c_int, how: c_int) -> c_int:
"""Libc POSIX `shutdown` function
Reference: https://man7.org/linux/man-pages/man3/shutdown.3p.html
Fn signature: int shutdown(int socket, int how).
Args: socket: A File Descriptor.
how: How to shutdown the socket.
Returns: 0 on success, -1 on error.
"""
return external_call["shutdown", c_int, c_int, c_int]( # FnName, RetType # Args
socket, how
)
fn getaddrinfo(
nodename: UnsafePointer[c_char],
servname: UnsafePointer[c_char],
hints: UnsafePointer[addrinfo],
res: UnsafePointer[UnsafePointer[addrinfo]],
) -> c_int:
"""Libc POSIX `getaddrinfo` function
Reference: https://man7.org/linux/man-pages/man3/getaddrinfo.3p.html
Fn signature: int getaddrinfo(const char *restrict nodename, const char *restrict servname, const struct addrinfo *restrict hints, struct addrinfo **restrict res).
"""
return external_call[
"getaddrinfo",
c_int, # FnName, RetType
UnsafePointer[c_char],
UnsafePointer[c_char],
UnsafePointer[addrinfo], # Args
UnsafePointer[UnsafePointer[addrinfo]], # Args
](nodename, servname, hints, res)
fn gai_strerror(ecode: c_int) -> UnsafePointer[c_char]:
"""Libc POSIX `gai_strerror` function
Reference: https://man7.org/linux/man-pages/man3/gai_strerror.3p.html
Fn signature: const char *gai_strerror(int ecode).
Args: ecode: The error code.
Returns: A UnsafePointer to a string describing the error.
"""
return external_call[
"gai_strerror", UnsafePointer[c_char], c_int # FnName, RetType # Args
](ecode)
fn inet_pton(address_family: Int, address: String) -> Int:
var ip_buf_size = 4
if address_family == AF_INET6:
ip_buf_size = 16
var ip_buf = UnsafePointer[c_void].alloc(ip_buf_size)
var conv_status = inet_pton(
rebind[c_int](address_family), to_char_ptr(address), ip_buf
)
return int(ip_buf.bitcast[c_uint]())
# --- ( File Related Syscalls & Structs )---------------------------------------
alias O_NONBLOCK = 16384
alias O_ACCMODE = 3
alias O_CLOEXEC = 524288
fn close(fildes: c_int) -> c_int:
"""Libc POSIX `close` function
Reference: https://man7.org/linux/man-pages/man3/close.3p.html
Fn signature: int close(int fildes).
Args:
fildes: A File Descriptor to close.
Returns:
Upon successful completion, 0 shall be returned; otherwise, -1
shall be returned and errno set to indicate the error.
"""
return external_call["close", c_int, c_int](fildes)
fn open[*T: AnyType](path: UnsafePointer[c_char], oflag: c_int, *args: *T) -> c_int:
"""Libc POSIX `open` function
Reference: https://man7.org/linux/man-pages/man3/open.3p.html
Fn signature: int open(const char *path, int oflag, ...).
Args:
path: A UnsafePointer to a C string containing the path to open.
oflag: The flags to open the file with.
args: The optional arguments.
Returns:
A File Descriptor or -1 in case of failure
"""
return external_call[
"open", c_int, UnsafePointer[c_char], c_int # FnName, RetType # Args
](path, oflag, args)
fn openat[
*T: AnyType
](fd: c_int, path: UnsafePointer[c_char], oflag: c_int, *args: *T) -> c_int:
"""Libc POSIX `open` function
Reference: https://man7.org/linux/man-pages/man3/open.3p.html
Fn signature: int openat(int fd, const char *path, int oflag, ...).
Args:
fd: A File Descriptor.
path: A UnsafePointer to a C string containing the path to open.
oflag: The flags to open the file with.
args: The optional arguments.
Returns:
A File Descriptor or -1 in case of failure
"""
return external_call[
"openat", c_int, c_int, UnsafePointer[c_char], c_int # FnName, RetType # Args
](fd, path, oflag, args)
fn printf[*T: AnyType](format: UnsafePointer[c_char], *args: *T) -> c_int:
"""Libc POSIX `printf` function
Reference: https://man7.org/linux/man-pages/man3/fprintf.3p.html
Fn signature: int printf(const char *restrict format, ...).
Args: format: A UnsafePointer to a C string containing the format.
args: The optional arguments.
Returns: The number of bytes written or -1 in case of failure.
"""
return external_call[
"printf",
c_int, # FnName, RetType
UnsafePointer[c_char], # Args
](format, args)
fn sprintf[
*T: AnyType
](s: UnsafePointer[c_char], format: UnsafePointer[c_char], *args: *T) -> c_int:
"""Libc POSIX `sprintf` function
Reference: https://man7.org/linux/man-pages/man3/fprintf.3p.html
Fn signature: int sprintf(char *restrict s, const char *restrict format, ...).
Args: s: A UnsafePointer to a buffer to store the result.
format: A UnsafePointer to a C string containing the format.
args: The optional arguments.
Returns: The number of bytes written or -1 in case of failure.
"""
return external_call[
"sprintf", c_int, UnsafePointer[c_char], UnsafePointer[c_char] # FnName, RetType # Args
](s, format, args)
fn read(fildes: c_int, buf: UnsafePointer[c_void], nbyte: c_size_t) -> c_int:
"""Libc POSIX `read` function
Reference: https://man7.org/linux/man-pages/man3/read.3p.html
Fn signature: sssize_t read(int fildes, void *buf, size_t nbyte).
Args: fildes: A File Descriptor.
buf: A UnsafePointer to a buffer to store the read data.
nbyte: The number of bytes to read.
Returns: The number of bytes read or -1 in case of failure.
"""
return external_call["read", c_ssize_t, c_int, UnsafePointer[c_void], c_size_t](
fildes, buf, nbyte
)
fn write(fildes: c_int, buf: UnsafePointer[c_void], nbyte: c_size_t) -> c_int:
"""Libc POSIX `write` function
Reference: https://man7.org/linux/man-pages/man3/write.3p.html
Fn signature: ssize_t write(int fildes, const void *buf, size_t nbyte).
Args: fildes: A File Descriptor.
buf: A UnsafePointer to a buffer to write.
nbyte: The number of bytes to write.
Returns: The number of bytes written or -1 in case of failure.
"""
return external_call["write", c_ssize_t, c_int, UnsafePointer[c_void], c_size_t](
fildes, buf, nbyte
)
# --- ( Testing Functions ) ----------------------------------------------------
fn __test_getaddrinfo__():
var ip_addr = "127.0.0.1"
var port = 8083
var servinfo = UnsafePointer[addrinfo]().alloc(1)
servinfo[0] = addrinfo()
var hints = addrinfo()
hints.ai_family = AF_INET
hints.ai_socktype = SOCK_STREAM
hints.ai_flags = AI_PASSIVE
# var hints_ptr =
var status = getaddrinfo(
to_char_ptr(ip_addr),
UnsafePointer[UInt8](),
UnsafePointer.address_of(hints),
UnsafePointer.address_of(servinfo),
)
var msg_ptr = gai_strerror(c_int(status))
_ = external_call["printf", c_int, UnsafePointer[c_char], UnsafePointer[c_char]](
to_char_ptr("gai_strerror: %s"), msg_ptr
)
var msg = c_charptr_to_string(msg_ptr)
print("getaddrinfo satus: " + msg)
# fn __test_socket_client__():
# var ip_addr = "127.0.0.1" # The server's hostname or IP address
# var port = 8080 # The port used by the server
# var address_family = AF_INET
# var ip_buf = UnsafePointer[c_void].alloc(4)
# var conv_status = inet_pton(address_family, to_char_ptr(ip_addr), ip_buf)
# var raw_ip = ip_buf.bitcast[c_uint]()
# print("inet_pton: " + raw_ip.__str__() + " :: status: " + conv_status.__str__())
# var bin_port = htons(UInt16(port))
# print("htons: " + "\n" + bin_port.__str__())
# var ai = sockaddr_in(address_family, bin_port, raw_ip, StaticTuple[c_char, 8]())
# var ai_ptr = UnsafePointer[sockaddr_in].address_of(ai).bitcast[sockaddr]()
# var sockfd = socket(address_family, SOCK_STREAM, 0)
# if sockfd == -1:
# print("Socket creation error")
# print("sockfd: " + "\n" + sockfd.__str__())
# if connect(sockfd, ai_ptr, sizeof[sockaddr_in]()) == -1:
# _ = shutdown(sockfd, SHUT_RDWR)
# print("Connection error")
# return # Ensure to exit if connection fails
# var msg = to_char_ptr("Hello, world Server")
# var bytes_sent = send(sockfd, msg, strlen(msg), 0)
# if bytes_sent == -1:
# print("Failed to send message")
# else:
# print("Message sent")
# var buf_size = 1024
# var buf = UnsafePointer[UInt8]().alloc(buf_size)
# var bytes_recv = recv(sockfd, buf, buf_size, 0)
# if bytes_recv == -1:
# print("Failed to receive message")
# else:
# print("Received Message: ")
# print(String(buf.bitcast[UInt8](), bytes_recv))
# _ = shutdown(sockfd, SHUT_RDWR)
# var close_status = close(sockfd)
# if close_status == -1:
# print("Failed to close socket")
# fn __test_socket_server__() raises:
# var ip_addr = "127.0.0.1"
# var port = 8083
# var address_family = AF_INET
# var ip_buf_size = 4
# if address_family == AF_INET6:
# ip_buf_size = 16
# var ip_buf = UnsafePointer[c_void].alloc(ip_buf_size)
# var conv_status = inet_pton(address_family, to_char_ptr(ip_addr), ip_buf)
# var raw_ip = ip_buf.bitcast[c_uint]()
# print("inet_pton: " + raw_ip.__str__() + " :: status: " + conv_status.__str__())
# var bin_port = htons(UInt16(port))
# print("htons: " + "\n" + bin_port.__str__())
# var ai = sockaddr_in(address_family, bin_port, raw_ip, StaticTuple[c_char, 8]())
# var ai_ptr = UnsafePointer[sockaddr_in].address_of(ai).bitcast[sockaddr]()
# var sockfd = socket(address_family, SOCK_STREAM, 0)
# if sockfd == -1:
# print("Socket creation error")
# print("sockfd: " + "\n" + sockfd.__str__())
# var yes: Int = 1
# if (
# setsockopt(
# sockfd,
# SOL_SOCKET,
# SO_REUSEADDR,
# UnsafePointer[Int].address_of(yes).bitcast[c_void](),
# sizeof[Int](),
# )
# == -1
# ):
# print("set socket options failed")
# if bind(sockfd, ai_ptr, sizeof[sockaddr_in]()) == -1:
# # close(sockfd)
# _ = shutdown(sockfd, SHUT_RDWR)
# print("Binding socket failed. Wait a few seconds and try again?")
# if listen(sockfd, c_int(128)) == -1:
# print("Listen failed.\n on sockfd " + sockfd.__str__())
# print(
# "server: started at "
# + ip_addr
# + ":"
# + port.__str__()
# + " on sockfd "
# + sockfd.__str__()
# + "Waiting for connections..."
# )
# var their_addr_ptr = UnsafePointer[sockaddr].alloc(1)
# var sin_size = socklen_t(sizeof[socklen_t]())
# var new_sockfd = accept(
# sockfd, their_addr_ptr, UnsafePointer[socklen_t].address_of(sin_size)
# )
# if new_sockfd == -1:
# print("Accept failed")
# # close(sockfd)
# _ = shutdown(sockfd, SHUT_RDWR)
# var msg = "Hello, Mojo!"
# if send(new_sockfd, to_char_ptr(msg).bitcast[c_void](), len(msg), 0) == -1:
# print("Failed to send response")
# print("Message sent succesfully")
# _ = shutdown(sockfd, SHUT_RDWR)
# var close_status = close(new_sockfd)
# if close_status == -1:
# print("Failed to close new_sockfd")
| lightbug_http/external/libc.mojo | false |
# From Morrow package https://github.com/mojoto/morrow.mojo/tree/cc6625e16829acc55bcea060dd2ea5d6a4b6c676
# Including like this until better package management is available
alias _MAX_TIMESTAMP: Int = 32503737600
alias MAX_TIMESTAMP = _MAX_TIMESTAMP
alias MAX_TIMESTAMP_MS = MAX_TIMESTAMP * 1000
alias MAX_TIMESTAMP_US = MAX_TIMESTAMP * 1_000_000
@always_inline
fn c_gettimeofday() -> CTimeval:
var tv = CTimeval()
var p_tv = Pointer[CTimeval].address_of(tv)
external_call["gettimeofday", NoneType, Pointer[CTimeval], Int32](p_tv, 0)
return tv
@always_inline
fn c_gmtime(owned tv_sec: Int) -> CTm:
var p_tv_sec = Pointer[Int].address_of(tv_sec)
var tm = external_call["gmtime", Pointer[CTm], Pointer[Int]](p_tv_sec).load()
return tm
@always_inline
fn c_localtime(owned tv_sec: Int) -> CTm:
var p_tv_sec = Pointer[Int].address_of(tv_sec)
var tm = external_call["localtime", Pointer[CTm], Pointer[Int]](p_tv_sec).load()
return tm
@value
struct TimeZone:
var offset: Int
var name: String
fn __init__(inout self, offset: Int, name: String = ""):
self.offset = offset
self.name = name
fn __str__(self) -> String:
return self.name
fn is_none(self) -> Bool:
return self.name == "None"
@staticmethod
fn none() -> TimeZone:
return TimeZone(0, "None")
@staticmethod
fn local() -> TimeZone:
var local_t = c_localtime(0)
return TimeZone(int(local_t.tm_gmtoff), "local")
@staticmethod
fn from_utc(utc_str: String) raises -> TimeZone:
if len(utc_str) == 0:
raise Error("utc_str is empty")
if utc_str == "utc" or utc_str == "UTC" or utc_str == "Z":
return TimeZone(0, "utc")
var p = 3 if len(utc_str) > 3 and utc_str[0:3] == "UTC" else 0
var sign = -1 if utc_str[p] == "-" else 1
if utc_str[p] == "+" or utc_str[p] == "-":
p += 1
if (
len(utc_str) < p + 2
or not isdigit(ord(utc_str[p]))
or not isdigit(ord(utc_str[p + 1]))
):
raise Error("utc_str format is invalid")
var hours: Int = atol(utc_str[p : p + 2])
p += 2
var minutes: Int
if len(utc_str) <= p:
minutes = 0
elif len(utc_str) == p + 3 and utc_str[p] == ":":
minutes = atol(utc_str[p + 1 : p + 3])
elif len(utc_str) == p + 2 and isdigit(ord(utc_str[p])):
minutes = atol(utc_str[p : p + 2])
else:
minutes = 0
raise Error("utc_str format is invalid")
var offset: Int = sign * (hours * 3600 + minutes * 60)
return TimeZone(offset)
fn format(self) -> String:
var sign: String
var offset_abs: Int
if self.offset < 0:
sign = "-"
offset_abs = -self.offset
else:
sign = "+"
offset_abs = self.offset
var hh = offset_abs // 3600
var mm = offset_abs % 3600
return sign + rjust(hh, 2, "0") + ":" + rjust(mm, 2, "0")
@value
@register_passable("trivial")
struct CTm:
var tm_sec: Int32 # Seconds
var tm_min: Int32 # Minutes
var tm_hour: Int32 # Hour
var tm_mday: Int32 # Day of the month
var tm_mon: Int32 # Month
var tm_year: Int32 # Year minus 1900
var tm_wday: Int32 # Day of the week
var tm_yday: Int32 # Day of the year
var tm_isdst: Int32 # Daylight savings flag
var tm_gmtoff: Int64 # localtime zone offset seconds
fn __init__() -> Self:
return Self {
tm_sec: 0,
tm_min: 0,
tm_hour: 0,
tm_mday: 0,
tm_mon: 0,
tm_year: 0,
tm_wday: 0,
tm_yday: 0,
tm_isdst: 0,
tm_gmtoff: 0,
}
@value
struct Morrow:
var year: Int
var month: Int
var day: Int
var hour: Int
var minute: Int
var second: Int
var microsecond: Int
var tz: TimeZone
fn __init__(
inout self,
year: Int,
month: Int,
day: Int,
hour: Int = 0,
minute: Int = 0,
second: Int = 0,
microsecond: Int = 0,
tz: TimeZone = TimeZone.none(),
) raises:
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
self.tz = tz
fn __str__(self) raises -> String:
return self.isoformat()
fn isoformat(
self, sep: String = "T", timespec: StringLiteral = "auto"
) raises -> String:
"""Return the time formatted according to ISO.
The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'.
If self.tzinfo is not None, the UTC offset is also attached, giving
giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
The optional argument timespec specifies the number of additional
terms of the time to include. Valid options are 'auto', 'hours',
'minutes', 'seconds', 'milliseconds' and 'microseconds'.
"""
var date_str = (
rjust(self.year, 4, "0")
+ "-"
+ rjust(self.month, 2, "0")
+ "-"
+ rjust(self.day, 2, "0")
)
var time_str = String("")
if timespec == "auto" or timespec == "microseconds":
time_str = (
rjust(self.hour, 2, "0")
+ ":"
+ rjust(self.minute, 2, "0")
+ ":"
+ rjust(self.second, 2, "0")
+ "."
+ rjust(self.microsecond, 6, "0")
)
elif timespec == "milliseconds":
time_str = (
rjust(self.hour, 2, "0")
+ ":"
+ rjust(self.minute, 2, "0")
+ ":"
+ rjust(self.second, 2, "0")
+ "."
+ rjust(self.microsecond // 1000, 3, "0")
)
elif timespec == "seconds":
time_str = (
rjust(self.hour, 2, "0")
+ ":"
+ rjust(self.minute, 2, "0")
+ ":"
+ rjust(self.second, 2, "0")
)
elif timespec == "minutes":
time_str = rjust(self.hour, 2, "0") + ":" + rjust(self.minute, 2, "0")
elif timespec == "hours":
time_str = rjust(self.hour, 2, "0")
else:
raise Error()
if self.tz.is_none():
return sep.join(date_str, time_str)
else:
return sep.join(date_str, time_str) + self.tz.format()
@staticmethod
fn now() raises -> Self:
var t = c_gettimeofday()
return Self._fromtimestamp(t, False)
@staticmethod
fn utcnow() raises -> Self:
var t = c_gettimeofday()
return Self._fromtimestamp(t, True)
@staticmethod
fn _fromtimestamp(t: CTimeval, utc: Bool) raises -> Self:
var tm: CTm
var tz: TimeZone
if utc:
tm = c_gmtime(t.tv_sec)
tz = TimeZone(0, "UTC")
else:
tm = c_localtime(t.tv_sec)
tz = TimeZone(int(tm.tm_gmtoff), "local")
var result = Self(
int(tm.tm_year) + 1900,
int(tm.tm_mon) + 1,
int(tm.tm_mday),
int(tm.tm_hour),
int(tm.tm_min),
int(tm.tm_sec),
t.tv_usec,
tz,
)
return result
@staticmethod
fn fromtimestamp(timestamp: Float64) raises -> Self:
var timestamp_ = normalize_timestamp(timestamp)
var t = CTimeval(int(timestamp_))
return Self._fromtimestamp(t, False)
@staticmethod
fn utcfromtimestamp(timestamp: Float64) raises -> Self:
var timestamp_ = normalize_timestamp(timestamp)
var t = CTimeval(int(timestamp_))
return Self._fromtimestamp(t, True)
@value
@register_passable("trivial")
struct CTimeval:
var tv_sec: Int # Seconds
var tv_usec: Int # Microseconds
fn __init__(tv_sec: Int = 0, tv_usec: Int = 0) -> Self:
return Self {tv_sec: tv_sec, tv_usec: tv_usec}
def normalize_timestamp(timestamp: Float64) -> Float64:
"""Normalize millisecond and microsecond timestamps into normal timestamps."""
if timestamp > MAX_TIMESTAMP:
if timestamp < MAX_TIMESTAMP_MS:
timestamp /= 1000
elif timestamp < MAX_TIMESTAMP_US:
timestamp /= 1_000_000
else:
raise Error(
"The specified timestamp " + timestamp.__str__() + "is too large."
)
return timestamp
fn _repeat_string(string: String, n: Int) -> String:
var result: String = ""
for _ in range(n):
result += string
return result
fn rjust(string: String, width: Int, fillchar: String = " ") -> String:
var extra = width - len(string)
return _repeat_string(fillchar, extra) + string
fn rjust(string: Int, width: Int, fillchar: String = " ") -> String:
return rjust(string.__str__(), width, fillchar)
| lightbug_http/external/morrow.mojo | false |
import ..io
from ..builtins import copy, panic
from ..builtins.bytes import UInt8, index_byte
from ..strings import StringBuilder
alias MIN_READ_BUFFER_SIZE = 16
alias MAX_CONSECUTIVE_EMPTY_READS = 100
alias DEFAULT_BUF_SIZE = 8200
alias ERR_INVALID_UNREAD_BYTE = "bufio: invalid use of unread_byte"
alias ERR_INVALID_UNREAD_RUNE = "bufio: invalid use of unread_rune"
alias ERR_BUFFER_FULL = "bufio: buffer full"
alias ERR_NEGATIVE_COUNT = "bufio: negative count"
alias ERR_NEGATIVE_READ = "bufio: reader returned negative count from Read"
alias ERR_NEGATIVE_WRITE = "bufio: writer returned negative count from write"
# buffered input
struct Reader[R: io.Reader](Sized, io.Reader, io.ByteReader, io.ByteScanner):
"""Implements buffering for an io.Reader object."""
var buf: List[UInt8]
var reader: R # reader provided by the client
var read_pos: Int
var write_pos: Int # buf read and write positions
var last_byte: Int # last byte read for unread_byte; -1 means invalid
var last_rune_size: Int # size of last rune read for unread_rune; -1 means invalid
var err: Error
fn __init__(
inout self,
owned reader: R,
buf: List[UInt8] = List[UInt8](capacity=DEFAULT_BUF_SIZE),
read_pos: Int = 0,
write_pos: Int = 0,
last_byte: Int = -1,
last_rune_size: Int = -1,
):
self.buf = buf
self.reader = reader^
self.read_pos = read_pos
self.write_pos = write_pos
self.last_byte = last_byte
self.last_rune_size = last_rune_size
self.err = Error()
fn __moveinit__(inout self, owned existing: Self):
self.buf = existing.buf^
self.reader = existing.reader^
self.read_pos = existing.read_pos
self.write_pos = existing.write_pos
self.last_byte = existing.last_byte
self.last_rune_size = existing.last_rune_size
self.err = existing.err^
# size returns the size of the underlying buffer in bytes.
fn __len__(self) -> Int:
return len(self.buf)
# reset discards any buffered data, resets all state, and switches
# the buffered reader to read from r.
# Calling reset on the zero value of [Reader] initializes the internal buffer
# to the default size.
# Calling self.reset(b) (that is, resetting a [Reader] to itself) does nothing.
# fn reset[R: io.Reader](self, reader: R):
# # If a Reader r is passed to NewReader, NewReader will return r.
# # Different layers of code may do that, and then later pass r
# # to reset. Avoid infinite recursion in that case.
# if self == reader:
# return
# # if self.buf == nil:
# # self.buf = make(List[UInt8], DEFAULT_BUF_SIZE)
# self.reset(self.buf, r)
fn reset(inout self, buf: List[UInt8], owned reader: R):
self = Reader[R](
buf=buf,
reader=reader^,
last_byte=-1,
last_rune_size=-1,
)
fn fill(inout self):
"""Reads a new chunk into the buffer."""
# Slide existing data to beginning.
if self.read_pos > 0:
var current_capacity = self.buf.capacity
self.buf = self.buf[self.read_pos : self.write_pos]
self.buf.reserve(current_capacity)
self.write_pos -= self.read_pos
self.read_pos = 0
# Compares to the length of the entire List[UInt8] object, including 0 initialized positions.
# IE. var b = List[UInt8](capacity=8200), then trying to write at b[8200] and onwards will fail.
if self.write_pos >= self.buf.capacity:
panic("bufio.Reader: tried to fill full buffer")
# Read new data: try a limited number of times.
var i: Int = MAX_CONSECUTIVE_EMPTY_READS
while i > 0:
# TODO: Using temp until slicing can return a Reference
var temp = List[UInt8](capacity=DEFAULT_BUF_SIZE)
var bytes_read: Int
var err: Error
bytes_read, err = self.reader.read(temp)
if bytes_read < 0:
panic(ERR_NEGATIVE_READ)
bytes_read = copy(self.buf, temp, self.write_pos)
self.write_pos += bytes_read
if err:
self.err = err
return
if bytes_read > 0:
return
i -= 1
self.err = Error(io.ERR_NO_PROGRESS)
fn read_error(inout self) -> Error:
if not self.err:
return Error()
var err = self.err
self.err = Error()
return err
fn peek(inout self, number_of_bytes: Int) -> (List[UInt8], Error):
"""Returns the next n bytes without advancing the reader. The bytes stop
being valid at the next read call. If Peek returns fewer than n bytes, it
also returns an error explaining why the read is short. The error is
[ERR_BUFFER_FULL] if number_of_bytes is larger than b's buffer size.
Calling Peek prevents a [Reader.unread_byte] or [Reader.unread_rune] call from succeeding
until the next read operation.
Args:
number_of_bytes: The number of bytes to peek.
"""
if number_of_bytes < 0:
return List[UInt8](), Error(ERR_NEGATIVE_COUNT)
self.last_byte = -1
self.last_rune_size = -1
while self.write_pos - self.read_pos < number_of_bytes and self.write_pos - self.read_pos < self.buf.capacity:
self.fill() # self.write_pos-self.read_pos < self.buf.capacity => buffer is not full
if number_of_bytes > self.buf.capacity:
return self.buf[self.read_pos : self.write_pos], Error(ERR_BUFFER_FULL)
# 0 <= n <= self.buf.capacity
var err = Error()
var available_space = self.write_pos - self.read_pos
if available_space < number_of_bytes:
# not enough data in buffer
err = self.read_error()
if not err:
err = Error(ERR_BUFFER_FULL)
return self.buf[self.read_pos : self.read_pos + number_of_bytes], err
fn discard(inout self, number_of_bytes: Int) -> (Int, Error):
"""Discard skips the next n bytes, returning the number of bytes discarded.
If Discard skips fewer than n bytes, it also returns an error.
If 0 <= number_of_bytes <= self.buffered(), Discard is guaranteed to succeed without
reading from the underlying io.Reader.
"""
if number_of_bytes < 0:
return 0, Error(ERR_NEGATIVE_COUNT)
if number_of_bytes == 0:
return 0, Error()
self.last_byte = -1
self.last_rune_size = -1
var remain = number_of_bytes
while True:
var skip = self.buffered()
if skip == 0:
self.fill()
skip = self.buffered()
if skip > remain:
skip = remain
self.read_pos += skip
remain -= skip
if remain == 0:
return number_of_bytes, Error()
fn read(inout self, inout dest: List[UInt8]) -> (Int, Error):
"""Reads data into dest.
It returns the number of bytes read into dest.
The bytes are taken from at most one Read on the underlying [Reader],
hence n may be less than len(src).
To read exactly len(src) bytes, use io.ReadFull(b, src).
If the underlying [Reader] can return a non-zero count with io.EOF,
then this Read method can do so as well; see the [io.Reader] docs."""
var space_available = dest.capacity - len(dest)
if space_available == 0:
if self.buffered() > 0:
return 0, Error()
return 0, self.read_error()
var bytes_read: Int = 0
if self.read_pos == self.write_pos:
if space_available >= len(self.buf):
# Large read, empty buffer.
# Read directly into dest to avoid copy.
var bytes_read: Int
var err: Error
bytes_read, err = self.reader.read(dest)
self.err = err
if bytes_read < 0:
panic(ERR_NEGATIVE_READ)
if bytes_read > 0:
self.last_byte = int(dest[bytes_read - 1])
self.last_rune_size = -1
return bytes_read, self.read_error()
# One read.
# Do not use self.fill, which will loop.
self.read_pos = 0
self.write_pos = 0
var bytes_read: Int
var err: Error
bytes_read, err = self.reader.read(self.buf)
if bytes_read < 0:
panic(ERR_NEGATIVE_READ)
if bytes_read == 0:
return 0, self.read_error()
self.write_pos += bytes_read
# copy as much as we can
# Note: if the slice panics here, it is probably because
# the underlying reader returned a bad count. See issue 49795.
bytes_read = copy(dest, self.buf[self.read_pos : self.write_pos])
self.read_pos += bytes_read
self.last_byte = int(self.buf[self.read_pos - 1])
self.last_rune_size = -1
return bytes_read, Error()
fn read_byte(inout self) -> (UInt8, Error):
"""Reads and returns a single byte from the internal buffer. If no byte is available, returns an error."""
self.last_rune_size = -1
while self.read_pos == self.write_pos:
if self.err:
return UInt8(0), self.read_error()
self.fill() # buffer is empty
var c = self.buf[self.read_pos]
self.read_pos += 1
self.last_byte = int(c)
return c, Error()
fn unread_byte(inout self) -> Error:
"""Unreads the last byte. Only the most recently read byte can be unread.
unread_byte returns an error if the most recent method called on the
[Reader] was not a read operation. Notably, [Reader.peek], [Reader.discard], and [Reader.write_to] are not
considered read operations.
"""
if self.last_byte < 0 or self.read_pos == 0 and self.write_pos > 0:
return Error(ERR_INVALID_UNREAD_BYTE)
# self.read_pos > 0 or self.write_pos == 0
if self.read_pos > 0:
self.read_pos -= 1
else:
# self.read_pos == 0 and self.write_pos == 0
self.write_pos = 1
self.buf[self.read_pos] = self.last_byte
self.last_byte = -1
self.last_rune_size = -1
return Error()
# # read_rune reads a single UTF-8 encoded Unicode character and returns the
# # rune and its size in bytes. If the encoded rune is invalid, it consumes one byte
# # and returns unicode.ReplacementChar (U+FFFD) with a size of 1.
# fn read_rune(inout self) (r rune, size int, err error):
# for self.read_pos+utf8.UTFMax > self.write_pos and !utf8.FullRune(self.buf[self.read_pos:self.write_pos]) and self.err == nil and self.write_pos-self.read_pos < self.buf.capacity:
# self.fill() # self.write_pos-self.read_pos < len(buf) => buffer is not full
# self.last_rune_size = -1
# if self.read_pos == self.write_pos:
# return 0, 0, self.read_poseadErr()
# r, size = rune(self.buf[self.read_pos]), 1
# if r >= utf8.RuneSelf:
# r, size = utf8.DecodeRune(self.buf[self.read_pos:self.write_pos])
# self.read_pos += size
# self.last_byte = int(self.buf[self.read_pos-1])
# self.last_rune_size = size
# return r, size, nil
# # unread_rune unreads the last rune. If the most recent method called on
# # the [Reader] was not a [Reader.read_rune], [Reader.unread_rune] returns an error. (In this
# # regard it is stricter than [Reader.unread_byte], which will unread the last byte
# # from any read operation.)
# fn unread_rune() error:
# if self.last_rune_size < 0 or self.read_pos < self.last_rune_size:
# return ERR_INVALID_UNREAD_RUNE
# self.read_pos -= self.last_rune_size
# self.last_byte = -1
# self.last_rune_size = -1
# return nil
fn buffered(self) -> Int:
"""Returns the number of bytes that can be read from the current buffer.
Returns:
The number of bytes that can be read from the current buffer.
"""
return self.write_pos - self.read_pos
fn read_slice(inout self, delim: UInt8) -> (List[UInt8], Error):
"""Reads until the first occurrence of delim in the input,
returning a slice pointing at the bytes in the buffer. It includes the first occurrence of the delimiter.
The bytes stop being valid at the next read.
If read_slice encounters an error before finding a delimiter,
it returns all the data in the buffer and the error itself (often io.EOF).
read_slice fails with error [ERR_BUFFER_FULL] if the buffer fills without a delim.
Because the data returned from read_slice will be overwritten
by the next I/O operation, most clients should use
[Reader.read_bytes] or read_string instead.
read_slice returns err != nil if and only if line does not end in delim.
Args:
delim: The delimiter to search for.
Returns:
The List[UInt8] from the internal buffer.
"""
var err = Error()
var s = 0 # search start index
var line: List[UInt8] = List[UInt8](capacity=DEFAULT_BUF_SIZE)
while True:
# Search buffer.
var i = index_byte(self.buf[self.read_pos + s : self.write_pos], delim)
if i >= 0:
i += s
line = self.buf[self.read_pos : self.read_pos + i + 1]
self.read_pos += i + 1
break
# Pending error?
if self.err:
line = self.buf[self.read_pos : self.write_pos]
self.read_pos = self.write_pos
err = self.read_error()
break
# Buffer full?
if self.buffered() >= self.buf.capacity:
self.read_pos = self.write_pos
line = self.buf
err = Error(ERR_BUFFER_FULL)
break
s = self.write_pos - self.read_pos # do not rescan area we scanned before
self.fill() # buffer is not full
# Handle last byte, if any.
var i = len(line) - 1
if i >= 0:
self.last_byte = int(line[i])
self.last_rune_size = -1
return line, err
fn read_line(inout self) raises -> (List[UInt8], Bool):
"""Low-level line-reading primitive. Most callers should use
[Reader.read_bytes]('\n') or [Reader.read_string]('\n') instead or use a [Scanner].
read_line tries to return a single line, not including the end-of-line bytes.
If the line was too long for the buffer then isPrefix is set and the
beginning of the line is returned. The rest of the line will be returned
from future calls. isPrefix will be false when returning the last fragment
of the line. The returned buffer is only valid until the next call to
read_line. read_line either returns a non-nil line or it returns an error,
never both.
The text returned from read_line does not include the line end ("\r\n" or "\n").
No indication or error is given if the input ends without a final line end.
Calling [Reader.unread_byte] after read_line will always unread the last byte read
(possibly a character belonging to the line end) even if that byte is not
part of the line returned by read_line.
"""
var line: List[UInt8]
var err: Error
line, err = self.read_slice(ord("\n"))
if err and str(err) == ERR_BUFFER_FULL:
# Handle the case where "\r\n" straddles the buffer.
if len(line) > 0 and line[len(line) - 1] == ord("\r"):
# Put the '\r' back on buf and drop it from line.
# Let the next call to read_line check for "\r\n".
if self.read_pos == 0:
# should be unreachable
raise Error("bufio: tried to rewind past start of buffer")
self.read_pos -= 1
line = line[: len(line) - 1]
return line, True
if len(line) == 0:
return line, False
if line[len(line) - 1] == ord("\n"):
var drop = 1
if len(line) > 1 and line[len(line) - 2] == ord("\r"):
drop = 2
line = line[: len(line) - drop]
return line, False
fn collect_fragments(inout self, delim: UInt8) -> (List[List[UInt8]], List[UInt8], Int, Error):
"""Reads until the first occurrence of delim in the input. It
returns (slice of full buffers, remaining bytes before delim, total number
of bytes in the combined first two elements, error).
Args:
delim: The delimiter to search for.
"""
# Use read_slice to look for delim, accumulating full buffers.
var err = Error()
var full_buffers = List[List[UInt8]]()
var total_len = 0
var frag = List[UInt8](capacity=8200)
while True:
frag, err = self.read_slice(delim)
if not err:
break
var read_slice_error = err
if str(read_slice_error) != ERR_BUFFER_FULL:
err = read_slice_error
break
# Make a copy of the buffer.
var buf = List[UInt8](frag)
full_buffers.append(buf)
total_len += len(buf)
total_len += len(frag)
return full_buffers, frag, total_len, err
fn read_bytes(inout self, delim: UInt8) -> (List[UInt8], Error):
"""Reads until the first occurrence of delim in the input,
returning a slice containing the data up to and including the delimiter.
If read_bytes encounters an error before finding a delimiter,
it returns the data read before the error and the error itself (often io.EOF).
read_bytes returns err != nil if and only if the returned data does not end in
delim.
For simple uses, a Scanner may be more convenient.
Args:
delim: The delimiter to search for.
Returns:
The List[UInt8] from the internal buffer.
"""
var full: List[List[UInt8]]
var frag: List[UInt8]
var n: Int
var err: Error
full, frag, n, err = self.collect_fragments(delim)
# Allocate new buffer to hold the full pieces and the fragment.
var buf = List[UInt8](capacity=n)
n = 0
# copy full pieces and fragment in.
for i in range(len(full)):
var buffer = full[i]
n += copy(buf, buffer, n)
_ = copy(buf, frag, n)
return buf, err
fn read_string(inout self, delim: UInt8) -> (String, Error):
"""Reads until the first occurrence of delim in the input,
returning a string containing the data up to and including the delimiter.
If read_string encounters an error before finding a delimiter,
it returns the data read before the error and the error itself (often io.EOF).
read_string returns err != nil if and only if the returned data does not end in
delim.
For simple uses, a Scanner may be more convenient.
Args:
delim: The delimiter to search for.
Returns:
The String from the internal buffer.
"""
var full: List[List[UInt8]]
var frag: List[UInt8]
var n: Int
var err: Error
full, frag, n, err = self.collect_fragments(delim)
# Allocate new buffer to hold the full pieces and the fragment.
var buf = StringBuilder(capacity=n)
# copy full pieces and fragment in.
for i in range(len(full)):
var buffer = full[i]
_ = buf.write(Span(buffer))
_ = buf.write(Span(frag))
return str(buf), err
# fn write_to[W: io.Writer](inout self, inout writer: W) -> (Int64, Error):
# """Writes the internal buffer to the writer. This may make multiple calls to the [Reader.Read] method of the underlying [Reader].
# If the underlying reader supports the [Reader.WriteTo] method,
# this calls the underlying [Reader.WriteTo] without buffering.
# write_to implements io.WriterTo.
# Args:
# writer: The writer to write to.
# Returns:
# The number of bytes written.
# """
# self.last_byte = -1
# self.last_rune_size = -1
# var bytes_written: Int64
# var err: Error
# bytes_written, err = self.write_buf(writer)
# if err:
# return bytes_written, err
# # internal buffer not full, fill before writing to writer
# if (self.write_pos - self.read_pos) < self.buf.capacity:
# self.fill()
# while self.read_pos < self.write_pos:
# # self.read_pos < self.write_pos => buffer is not empty
# var bw: Int64
# var err: Error
# bw, err = self.write_buf(writer)
# bytes_written += bw
# self.fill() # buffer is empty
# return bytes_written, Error()
# fn write_buf[W: io.Writer](inout self, inout writer: W) -> (Int64, Error):
# """Writes the [Reader]'s buffer to the writer.
# Args:
# writer: The writer to write to.
# Returns:
# The number of bytes written.
# """
# # Nothing to write
# if self.read_pos == self.write_pos:
# return Int64(0), Error()
# # Write the buffer to the writer, if we hit EOF it's fine. That's not a failure condition.
# var bytes_written: Int
# var err: Error
# var buf_to_write = self.buf[self.read_pos : self.write_pos]
# bytes_written, err = writer.write(Span(buf_to_write))
# if err:
# return Int64(bytes_written), err
# if bytes_written < 0:
# panic(ERR_NEGATIVE_WRITE)
# self.read_pos += bytes_written
# return Int64(bytes_written), Error()
# fn new_reader_size[R: io.Reader](owned reader: R, size: Int) -> Reader[R]:
# """Returns a new [Reader] whose buffer has at least the specified
# size. If the argument io.Reader is already a [Reader] with large enough
# size, it returns the underlying [Reader].
# Args:
# reader: The reader to read from.
# size: The size of the buffer.
# Returns:
# The new [Reader].
# """
# # # Is it already a Reader?
# # b, ok := rd.(*Reader)
# # if ok and self.buf.capacity >= size:
# # return b
# var r = Reader(reader ^)
# r.reset(List[UInt8](capacity=max(size, MIN_READ_BUFFER_SIZE)), reader ^)
# return r
# fn new_reader[R: io.Reader](reader: R) -> Reader[R]:
# """Returns a new [Reader] whose buffer has the default size.
# Args:
# reader: The reader to read from.
# Returns:
# The new [Reader].
# """
# return new_reader_size(reader, DEFAULT_BUF_SIZE)
# buffered output
# TODO: Reader and Writer maybe should not take ownership of the underlying reader/writer? Seems okay for now.
struct Writer[W: io.Writer](Sized, io.Writer, io.ByteWriter, io.StringWriter):
"""Implements buffering for an [io.Writer] object.
# If an error occurs writing to a [Writer], no more data will be
# accepted and all subsequent writes, and [Writer.flush], will return the error.
# After all data has been written, the client should call the
# [Writer.flush] method to guarantee all data has been forwarded to
# the underlying [io.Writer]."""
var buf: List[UInt8]
var bytes_written: Int
var writer: W
var err: Error
fn __init__(
inout self,
owned writer: W,
buf: List[UInt8] = List[UInt8](capacity=DEFAULT_BUF_SIZE),
bytes_written: Int = 0,
):
self.buf = buf
self.bytes_written = bytes_written
self.writer = writer^
self.err = Error()
fn __moveinit__(inout self, owned existing: Self):
self.buf = existing.buf^
self.bytes_written = existing.bytes_written
self.writer = existing.writer^
self.err = existing.err^
fn __len__(self) -> Int:
"""Returns the size of the underlying buffer in bytes."""
return len(self.buf)
fn reset(inout self, owned writer: W):
"""Discards any unflushed buffered data, clears any error, and
resets b to write its output to w.
Calling reset on the zero value of [Writer] initializes the internal buffer
to the default size.
Calling w.reset(w) (that is, resetting a [Writer] to itself) does nothing.
Args:
writer: The writer to write to.
"""
# # If a Writer w is passed to new_writer, new_writer will return w.
# # Different layers of code may do that, and then later pass w
# # to reset. Avoid infinite recursion in that case.
# if self == writer:
# return
# if self.buf == nil:
# self.buf = make(List[UInt8], DEFAULT_BUF_SIZE)
self.err = Error()
self.bytes_written = 0
self.writer = writer^
fn flush(inout self) -> Error:
"""Writes any buffered data to the underlying [io.Writer]."""
# Prior to attempting to flush, check if there's a pre-existing error or if there's nothing to flush.
var err = Error()
if self.err:
return self.err
if self.bytes_written == 0:
return err
var bytes_written: Int = 0
bytes_written, err = self.writer.write(Span(self.buf[0 : self.bytes_written]))
# If the write was short, set a short write error and try to shift up the remaining bytes.
if bytes_written < self.bytes_written and not err:
err = Error(io.ERR_SHORT_WRITE)
if err:
if bytes_written > 0 and bytes_written < self.bytes_written:
_ = copy(self.buf, self.buf[bytes_written : self.bytes_written])
self.bytes_written -= bytes_written
self.err = err
return err
# Reset the buffer
self.buf = List[UInt8](capacity=self.buf.capacity)
self.bytes_written = 0
return err
fn available(self) -> Int:
"""Returns how many bytes are unused in the buffer."""
return self.buf.capacity - len(self.buf)
fn available_buffer(self) raises -> List[UInt8]:
"""Returns an empty buffer with self.available() capacity.
This buffer is intended to be appended to and
passed to an immediately succeeding [Writer.write] call.
The buffer is only valid until the next write operation on self.
Returns:
An empty buffer with self.available() capacity.
"""
return self.buf[self.bytes_written :][:0]
fn buffered(self) -> Int:
"""Returns the number of bytes that have been written into the current buffer.
Returns:
The number of bytes that have been written into the current buffer.
"""
return self.bytes_written
fn write(inout self, src: Span[UInt8]) -> (Int, Error):
"""Writes the contents of src into the buffer.
It returns the number of bytes written.
If nn < len(src), it also returns an error explaining
why the write is short.
Args:
src: The bytes to write.
Returns:
The number of bytes written.
"""
var total_bytes_written: Int = 0
var src_copy = src
var err = Error()
while len(src_copy) > self.available() and not self.err:
var bytes_written: Int = 0
if self.buffered() == 0:
# Large write, empty buffer.
# write directly from p to avoid copy.
bytes_written, err = self.writer.write(src_copy)
self.err = err
else:
bytes_written = copy(self.buf, src_copy, self.bytes_written)
self.bytes_written += bytes_written
_ = self.flush()
total_bytes_written += bytes_written
src_copy = src_copy[bytes_written : len(src_copy)]
if self.err:
return total_bytes_written, self.err
var n = copy(self.buf, src_copy, self.bytes_written)
self.bytes_written += n
total_bytes_written += n
return total_bytes_written, err
fn write_byte(inout self, src: UInt8) -> (Int, Error):
"""Writes a single byte to the internal buffer.
Args:
src: The byte to write.
"""
if self.err:
return 0, self.err
# If buffer is full, flush to the underlying writer.
var err = self.flush()
if self.available() <= 0 and err:
return 0, self.err
self.buf.append(src)
self.bytes_written += 1
return 1, Error()
# # WriteRune writes a single Unicode code point, returning
# # the number of bytes written and any error.
# fn WriteRune(r rune) (size int, err error):
# # Compare as uint32 to correctly handle negative runes.
# if uint32(r) < utf8.RuneSelf:
# err = self.write_posriteByte(byte(r))
# if err != nil:
# return 0, err
# return 1, nil
# if self.err != nil:
# return 0, self.err
# n := self.available()
# if n < utf8.UTFMax:
# if self.flush(); self.err != nil:
# return 0, self.err
# n = self.available()
# if n < utf8.UTFMax:
# # Can only happen if buffer is silly small.
# return self.write_posriteString(string(r))
# size = utf8.EncodeRune(self.buf[self.bytes_written:], r)
# self.bytes_written += size
# return size, nil
fn write_string(inout self, src: String) -> (Int, Error):
"""Writes a string to the internal buffer.
It returns the number of bytes written.
If the count is less than len(s), it also returns an error explaining
why the write is short.
Args:
src: The string to write.
Returns:
The number of bytes written.
"""
return self.write(src.as_bytes_slice())
fn read_from[R: io.Reader](inout self, inout reader: R) -> (Int64, Error):
"""Implements [io.ReaderFrom]. If the underlying writer
supports the read_from method, this calls the underlying read_from.
If there is buffered data and an underlying read_from, this fills
the buffer and writes it before calling read_from.
Args:
reader: The reader to read from.
Returns:
The number of bytes read.
"""
if self.err:
return Int64(0), self.err
var bytes_read: Int = 0
var total_bytes_written: Int64 = 0
var err = Error()
while True:
if self.available() == 0:
var err = self.flush()
if err:
return total_bytes_written, err
var nr = 0
while nr < MAX_CONSECUTIVE_EMPTY_READS:
# TODO: should really be using a slice that returns refs and not a copy.
# Read into remaining unused space in the buffer. We need to reserve capacity for the slice otherwise read will never hit EOF.
var sl = self.buf[self.bytes_written : len(self.buf)]
sl.reserve(self.buf.capacity)
bytes_read, err = reader.read(sl)
if bytes_read > 0:
bytes_read = copy(self.buf, sl, self.bytes_written)
if bytes_read != 0 or err:
break
nr += 1
if nr == MAX_CONSECUTIVE_EMPTY_READS:
return Int64(bytes_read), Error(io.ERR_NO_PROGRESS)
self.bytes_written += bytes_read
total_bytes_written += Int64(bytes_read)
if err:
break
if err and str(err) == io.EOF:
# If we filled the buffer exactly, flush preemptively.
if self.available() == 0:
err = self.flush()
else:
err = Error()
return total_bytes_written, Error()
fn new_writer_size[W: io.Writer](owned writer: W, size: Int) -> Writer[W]:
"""Returns a new [Writer] whose buffer has at least the specified
size. If the argument io.Writer is already a [Writer] with large enough
size, it returns the underlying [Writer]."""
# Is it already a Writer?
# b, ok := w.(*Writer)
# if ok and self.buf.capacity >= size:
# return b
var buf_size = size
if buf_size <= 0:
buf_size = DEFAULT_BUF_SIZE
return Writer[W](
buf=List[UInt8](capacity=size),
writer=writer^,
bytes_written=0,
)
fn new_writer[W: io.Writer](owned writer: W) -> Writer[W]:
"""Returns a new [Writer] whose buffer has the default size.
# If the argument io.Writer is already a [Writer] with large enough buffer size,
# it returns the underlying [Writer]."""
return new_writer_size[W](writer^, DEFAULT_BUF_SIZE)
# buffered input and output
struct ReadWriter[R: io.Reader, W: io.Writer]():
"""ReadWriter stores pointers to a [Reader] and a [Writer].
It implements [io.ReadWriter]."""
var reader: R
var writer: W
fn __init__(inout self, owned reader: R, owned writer: W):
self.reader = reader^
self.writer = writer^
# new_read_writer
fn new_read_writer[R: io.Reader, W: io.Writer](owned reader: R, owned writer: W) -> ReadWriter[R, W]:
"""Allocates a new [ReadWriter] that dispatches to r and w."""
return ReadWriter[R, W](reader^, writer^)
| lightbug_http/external/gojo/bufio/bufio.mojo | false |
import math
from collections import Optional
import ..io
from ..builtins import copy, panic, Error
from ..builtins.bytes import Byte, index_byte
from .bufio import MAX_CONSECUTIVE_EMPTY_READS
alias MAX_INT: Int = 2147483647
struct Scanner[R: io.Reader]():
"""Scanner provides a convenient Interface for reading data such as
a file of newline-delimited lines of text. Successive calls to
the [Scanner.Scan] method will step through the 'tokens' of a file, skipping
the bytes between the tokens. The specification of a token is
defined by a split function of type [SplitFunction]; the default split
function breaks the input Into lines with line termination stripped. [Scanner.split]
fntions are defined in this package for scanning a file Into
lines, bytes, UTF-8-encoded runes, and space-delimited words. The
client may instead provide a custom split function.
Scanning stops unrecoverably at EOF, the first I/O error, or a token too
large to fit in the [Scanner.buffer]. When a scan stops, the reader may have
advanced arbitrarily far past the last token. Programs that need more
control over error handling or large tokens, or must run sequential scans
on a reader, should use [bufio.Reader] instead."""
var reader: R # The reader provided by the client.
var split: SplitFunction # The function to split the tokens.
var max_token_size: Int # Maximum size of a token; modified by tests.
var token: List[Byte] # Last token returned by split.
var buf: List[Byte] # buffer used as argument to split.
var start: Int # First non-processed byte in buf.
var end: Int # End of data in buf.
var empties: Int # Count of successive empty tokens.
var scan_called: Bool # Scan has been called; buffer is in use.
var done: Bool # Scan has finished.
var err: Error
fn __init__(
inout self,
owned reader: R,
split: SplitFunction = scan_lines,
max_token_size: Int = MAX_SCAN_TOKEN_SIZE,
token: List[Byte] = List[Byte](capacity=io.BUFFER_SIZE),
buf: List[Byte] = List[Byte](capacity=io.BUFFER_SIZE),
start: Int = 0,
end: Int = 0,
empties: Int = 0,
scan_called: Bool = False,
done: Bool = False,
):
self.reader = reader^
self.split = split
self.max_token_size = max_token_size
self.token = token
self.buf = buf
self.start = start
self.end = end
self.empties = empties
self.scan_called = scan_called
self.done = done
self.err = Error()
fn current_token_as_bytes(self) -> List[Byte]:
"""Returns the most recent token generated by a call to [Scanner.Scan].
The underlying array may point to data that will be overwritten
by a subsequent call to Scan. It does no allocation.
"""
return self.token
fn current_token(self) -> String:
"""Returns the most recent token generated by a call to [Scanner.Scan]
as a newly allocated string holding its bytes."""
return String(self.token)
fn scan(inout self) raises -> Bool:
"""Advances the [Scanner] to the next token, which will then be
available through the [Scanner.current_token_as_bytes] or [Scanner.current_token] method.
It returns False when there are no more tokens, either by reaching the end of the input or an error.
After Scan returns False, the [Scanner.Err] method will return any error that
occurred during scanning, except if it was [io.EOF], [Scanner.Err].
Scan raises an Error if the split function returns too many empty
tokens without advancing the input. This is a common error mode for
scanners.
"""
if self.done:
return False
self.scan_called = True
# Loop until we have a token.
while True:
# See if we can get a token with what we already have.
# If we've run out of data but have an error, give the split function
# a chance to recover any remaining, possibly empty token.
if (self.end > self.start) or self.err:
var advance: Int
var token = List[Byte](capacity=io.BUFFER_SIZE)
var err = Error()
var at_eof = False
if self.err:
at_eof = True
advance, token, err = self.split(self.buf[self.start : self.end], at_eof)
if err:
if str(err) == str(ERR_FINAL_TOKEN):
self.token = token
self.done = True
# When token is not nil, it means the scanning stops
# with a trailing token, and thus the return value
# should be True to indicate the existence of the token.
return len(token) != 0
self.set_err(err)
return False
if not self.advance(advance):
return False
self.token = token
if len(token) != 0:
if not self.err or advance > 0:
self.empties = 0
else:
# Returning tokens not advancing input at EOF.
self.empties += 1
if self.empties > MAX_CONSECUTIVE_EMPTY_READS:
panic("bufio.Scan: too many empty tokens without progressing")
return True
# We cannot generate a token with what we are holding.
# If we've already hit EOF or an I/O error, we are done.
if self.err:
# Shut it down.
self.start = 0
self.end = 0
return False
# Must read more data.
# First, shift data to beginning of buffer if there's lots of empty space
# or space is needed.
if self.start > 0 and (self.end == len(self.buf) or self.start > int(len(self.buf) / 2)):
_ = copy(self.buf, self.buf[self.start : self.end])
self.end -= self.start
self.start = 0
# Is the buffer full? If so, resize.
if self.end == len(self.buf):
# Guarantee no overflow in the multiplication below.
if len(self.buf) >= self.max_token_size or len(self.buf) > int(MAX_INT / 2):
self.set_err(Error(str(ERR_TOO_LONG)))
return False
var new_size = len(self.buf) * 2
if new_size == 0:
new_size = START_BUF_SIZE
# Make a new List[Byte] buffer and copy the elements in
new_size = min(new_size, self.max_token_size)
var new_buf = List[Byte](capacity=new_size)
_ = copy(new_buf, self.buf[self.start : self.end])
self.buf = new_buf
self.end -= self.start
self.start = 0
# Finally we can read some input. Make sure we don't get stuck with
# a misbehaving Reader. Officially we don't need to do this, but let's
# be extra careful: Scanner is for safe, simple jobs.
var loop = 0
while True:
var bytes_read: Int
var sl = self.buf[self.end : len(self.buf)]
var err: Error
# Catch any reader errors and set the internal error field to that err instead of bubbling it up.
bytes_read, err = self.reader.read(sl)
_ = copy(self.buf, sl, self.end)
if bytes_read < 0 or len(self.buf) - self.end < bytes_read:
self.set_err(Error(str(ERR_BAD_READ_COUNT)))
break
self.end += bytes_read
if err:
self.set_err(err)
break
if bytes_read > 0:
self.empties = 0
break
loop += 1
if loop > MAX_CONSECUTIVE_EMPTY_READS:
self.set_err(Error(io.ERR_NO_PROGRESS))
break
fn set_err(inout self, err: Error):
"""Set the internal error field to the provided error.
Args:
err: The error to set.
"""
if self.err:
var value = str(self.err)
if value == "" or value == io.EOF:
self.err = err
else:
self.err = err
fn advance(inout self, n: Int) -> Bool:
"""Consumes n bytes of the buffer. It reports whether the advance was legal.
Args:
n: The number of bytes to advance the buffer by.
Returns:
True if the advance was legal, False otherwise.
"""
if n < 0:
self.set_err(Error(str(ERR_NEGATIVE_ADVANCE)))
return False
if n > self.end - self.start:
self.set_err(Error(str(ERR_ADVANCE_TOO_FAR)))
return False
self.start += n
return True
fn buffer(inout self, buf: List[Byte], max: Int) raises:
"""Sets the initial buffer to use when scanning
and the maximum size of buffer that may be allocated during scanning.
The maximum token size must be less than the larger of max and cap(buf).
If max <= cap(buf), [Scanner.Scan] will use this buffer only and do no allocation.
By default, [Scanner.Scan] uses an Internal buffer and sets the
maximum token size to [MAX_SCAN_TOKEN_SIZE].
buffer raises an Error if it is called after scanning has started.
Args:
buf: The buffer to use when scanning.
max: The maximum size of buffer that may be allocated during scanning.
Raises:
Error: If called after scanning has started.
"""
if self.scan_called:
raise Error("buffer called after Scan")
# self.buf = buf[0:buf.capacity()]
self.max_token_size = max
# # split sets the split function for the [Scanner].
# # The default split function is [scan_lines].
# #
# # split panics if it is called after scanning has started.
# fn split(inout self, split_function: SplitFunction) raises:
# if self.scan_called:
# raise Error("split called after Scan")
# self.split = split_function
# SplitFunction is the signature of the split function used to tokenize the
# input. The arguments are an initial substring of the remaining unprocessed
# data and a flag, at_eof, that reports whether the [Reader] has no more data
# to give. The return values are the number of bytes to advance the input
# and the next token to return to the user, if any, plus an error, if any.
#
# Scanning stops if the function returns an error, in which case some of
# the input may be discarded. If that error is [ERR_FINAL_TOKEN], scanning
# stops with no error. A non-nil token delivered with [ERR_FINAL_TOKEN]
# will be the last token, and a nil token with [ERR_FINAL_TOKEN]
# immediately stops the scanning.
#
# Otherwise, the [Scanner] advances the input. If the token is not nil,
# the [Scanner] returns it to the user. If the token is nil, the
# Scanner reads more data and continues scanning; if there is no more
# data--if at_eof was True--the [Scanner] returns. If the data does not
# yet hold a complete token, for instance if it has no newline while
# scanning lines, a [SplitFunction] can return (0, nil, nil) to signal the
# [Scanner] to read more data Into the slice and try again with a
# longer slice starting at the same poInt in the input.
#
# The function is never called with an empty data slice unless at_eof
# is True. If at_eof is True, however, data may be non-empty and,
# as always, holds unprocessed text.
alias SplitFunction = fn (data: List[Byte], at_eof: Bool) -> (Int, List[Byte], Error)
# # Errors returned by Scanner.
alias ERR_TOO_LONG = Error("bufio.Scanner: token too long")
alias ERR_NEGATIVE_ADVANCE = Error("bufio.Scanner: SplitFunction returns negative advance count")
alias ERR_ADVANCE_TOO_FAR = Error("bufio.Scanner: SplitFunction returns advance count beyond input")
alias ERR_BAD_READ_COUNT = Error("bufio.Scanner: Read returned impossible count")
# ERR_FINAL_TOKEN is a special sentinel error value. It is Intended to be
# returned by a split function to indicate that the scanning should stop
# with no error. If the token being delivered with this error is not nil,
# the token is the last token.
#
# The value is useful to stop processing early or when it is necessary to
# deliver a final empty token (which is different from a nil token).
# One could achieve the same behavior with a custom error value but
# providing one here is tidier.
# See the emptyFinalToken example for a use of this value.
alias ERR_FINAL_TOKEN = Error("final token")
# MAX_SCAN_TOKEN_SIZE is the maximum size used to buffer a token
# unless the user provides an explicit buffer with [Scanner.buffer].
# The actual maximum token size may be smaller as the buffer
# may need to include, for instance, a newline.
alias MAX_SCAN_TOKEN_SIZE = 64 * 1024
alias START_BUF_SIZE = 8200 # Size of initial allocation for buffer.
fn new_scanner[R: io.Reader](owned reader: R) -> Scanner[R]:
"""Returns a new [Scanner] to read from r.
The split function defaults to [scan_lines]."""
return Scanner(reader^)
###### split functions ######
fn scan_bytes(data: List[Byte], at_eof: Bool) -> (Int, List[Byte], Error):
"""Split function for a [Scanner] that returns each byte as a token."""
if at_eof and data.capacity == 0:
return 0, List[Byte](), Error()
return 1, data[0:1], Error()
# var errorRune = List[Byte](string(utf8.RuneError))
# # ScanRunes is a split function for a [Scanner] that returns each
# # UTF-8-encoded rune as a token. The sequence of runes returned is
# # equivalent to that from a range loop over the input as a string, which
# # means that erroneous UTF-8 encodings translate to U+FFFD = "\xef\xbf\xbd".
# # Because of the Scan Interface, this makes it impossible for the client to
# # distinguish correctly encoded replacement runes from encoding errors.
# fn ScanRunes(data List[Byte], at_eof Bool) (advance Int, token List[Byte], err error):
# if at_eof and data.capacity == 0:
# return 0, nil, nil
# # Fast path 1: ASCII.
# if data[0] < utf8.RuneSelf:
# return 1, data[0:1], nil
# # Fast path 2: Correct UTF-8 decode without error.
# _, width := utf8.DecodeRune(data)
# if width > 1:
# # It's a valid encoding. Width cannot be one for a correctly encoded
# # non-ASCII rune.
# return width, data[0:width], nil
# # We know it's an error: we have width==1 and implicitly r==utf8.RuneError.
# # Is the error because there wasn't a full rune to be decoded?
# # FullRune distinguishes correctly between erroneous and incomplete encodings.
# if !at_eof and !utf8.FullRune(data):
# # Incomplete; get more bytes.
# return 0, nil, nil
# # We have a real UTF-8 encoding error. Return a properly encoded error rune
# # but advance only one byte. This matches the behavior of a range loop over
# # an incorrectly encoded string.
# return 1, errorRune, nil
fn drop_carriage_return(data: List[Byte]) -> List[Byte]:
"""Drops a terminal \r from the data.
Args:
data: The data to strip.
Returns:
The stripped data.
"""
# In the case of a \r ending without a \n, indexing on -1 doesn't work as it finds a null terminator instead of \r.
if data.capacity > 0 and data[data.capacity - 1] == ord("\r"):
return data[0 : data.capacity - 1]
return data
# TODO: Doing modification of token and err in these split functions, so we don't have to return any memory only types as part of the return tuple.
fn scan_lines(data: List[Byte], at_eof: Bool) -> (Int, List[Byte], Error):
"""Split function for a [Scanner] that returns each line of
text, stripped of any trailing end-of-line marker. The returned line may
be empty. The end-of-line marker is one optional carriage return followed
by one mandatory newline. The last non-empty line of input will be returned even if it has no
newline.
Args:
data: The data to split.
at_eof: Whether the data is at the end of the file.
Returns:
The number of bytes to advance the input.
"""
if at_eof and data.capacity == 0:
return 0, List[Byte](), Error()
var i = index_byte(data, ord("\n"))
if i >= 0:
# We have a full newline-terminated line.
return i + 1, drop_carriage_return(data[0:i]), Error()
# If we're at EOF, we have a final, non-terminated line. Return it.
# if at_eof:
return data.capacity, drop_carriage_return(data), Error()
# Request more data.
# return 0
fn is_space(r: UInt8) -> Bool:
alias ALL_WHITESPACES: String = " \t\n\r\x0b\f"
if chr(int(r)) in ALL_WHITESPACES:
return True
return False
# TODO: Handle runes and utf8 decoding. For now, just assuming single byte length.
fn scan_words(data: List[Byte], at_eof: Bool) -> (Int, List[Byte], Error):
"""Split function for a [Scanner] that returns each
space-separated word of text, with surrounding spaces deleted. It will
never return an empty string. The definition of space is set by
unicode.IsSpace.
"""
# Skip leading spaces.
var start = 0
var width = 0
while start < data.capacity:
width = len(data[0])
if not is_space(data[0]):
break
start += width
# Scan until space, marking end of word.
var i = 0
width = 0
start = 0
while i < data.capacity:
width = len(data[i])
if is_space(data[i]):
return i + width, data[start:i], Error()
i += width
# If we're at EOF, we have a final, non-empty, non-terminated word. Return it.
if at_eof and data.capacity > start:
return data.capacity, data[start:], Error()
# Request more data.
return start, List[Byte](), Error()
| lightbug_http/external/gojo/bufio/scan.mojo | false |
<filename>lightbug_http/external/gojo/bufio/__init__.mojo
from .bufio import Reader, Writer, ReadWriter
from .scan import Scanner, scan_words, scan_bytes, scan_lines
| lightbug_http/external/gojo/bufio/__init__.mojo | false |
<filename>lightbug_http/external/gojo/builtins/attributes.mojo
fn copy[T: CollectionElement](inout target: List[T], source: List[T], start: Int = 0) -> Int:
"""Copies the contents of source into target at the same index. Returns the number of bytes copied.
Added a start parameter to specify the index to start copying into.
Args:
target: The buffer to copy into.
source: The buffer to copy from.
start: The index to start copying into.
Returns:
The number of bytes copied.
"""
var count = 0
for i in range(len(source)):
if i + start > len(target):
target[i + start] = source[i]
else:
target.append(source[i])
count += 1
return count
fn copy[T: CollectionElement](inout target: Span[T, True], source: Span[T], start: Int = 0) -> Int:
"""Copies the contents of source into target at the same index. Returns the number of bytes copied.
Added a start parameter to specify the index to start copying into.
Args:
target: The buffer to copy into.
source: The buffer to copy from.
start: The index to start copying into.
Returns:
The number of bytes copied.
"""
var count = 0
for i in range(len(source)):
target[i + start] = source[i]
count += 1
return count
fn cap[T: CollectionElement](iterable: List[T]) -> Int:
"""Returns the capacity of the List.
Args:
iterable: The List to get the capacity of.
"""
return iterable.capacity
| lightbug_http/external/gojo/builtins/attributes.mojo | false |
<filename>lightbug_http/external/gojo/builtins/bytes.mojo
alias Byte = UInt8
fn has_prefix(bytes: List[Byte], prefix: List[Byte]) -> Bool:
"""Reports whether the List[Byte] struct begins with prefix.
Args:
bytes: The List[Byte] struct to search.
prefix: The prefix to search for.
Returns:
True if the List[Byte] struct begins with prefix; otherwise, False.
"""
var len_comparison = len(bytes) >= len(prefix)
var prefix_comparison = equals(bytes[0 : len(prefix)], prefix)
return len_comparison and prefix_comparison
fn has_suffix(bytes: List[Byte], suffix: List[Byte]) -> Bool:
"""Reports whether the List[Byte] struct ends with suffix.
Args:
bytes: The List[Byte] struct to search.
suffix: The prefix to search for.
Returns:
True if the List[Byte] struct ends with suffix; otherwise, False.
"""
var len_comparison = len(bytes) >= len(suffix)
var suffix_comparison = equals(bytes[len(bytes) - len(suffix) : len(bytes)], suffix)
return len_comparison and suffix_comparison
fn index_byte(bytes: List[Byte], delim: Byte) -> Int:
"""Return the index of the first occurrence of the byte delim.
Args:
bytes: The List[Byte] struct to search.
delim: The byte to search for.
Returns:
The index of the first occurrence of the byte delim.
"""
for i in range(len(bytes)):
if bytes[i] == delim:
return i
return -1
fn to_string(bytes: List[Byte]) -> String:
"""Makes a deepcopy of the List[Byte] supplied and converts it to a string. If it's not null terminated, it will append a null byte.
Args:
bytes: The List[Byte] struct to convert.
Returns:
The string representation of the List[Byte] struct.
"""
var copy = List[Byte](bytes)
if copy[-1] != 0:
copy.append(0)
return String(copy)
| lightbug_http/external/gojo/builtins/bytes.mojo | false |
<filename>lightbug_http/external/gojo/builtins/errors.mojo
from sys import exit
fn panic[T: Stringable](message: T, code: Int = 1):
"""Panics the program with the given message and exit code.
Args:
message: The message to panic with.
code: The exit code to panic with.
"""
print("panic:", message)
exit(code)
| lightbug_http/external/gojo/builtins/errors.mojo | false |
<filename>lightbug_http/external/gojo/builtins/__init__.mojo
from .bytes import Byte, index_byte, has_suffix, has_prefix, to_string
from .list import equals
from .attributes import cap, copy
from .errors import exit, panic
alias Rune = Int32
| lightbug_http/external/gojo/builtins/__init__.mojo | false |
import ..io
from ..builtins import cap, copy, Byte, panic, index_byte
alias Rune = Int32
# SMALL_BUFFER_SIZE is an initial allocation minimal capacity.
alias SMALL_BUFFER_SIZE: Int = 64
# The ReadOp constants describe the last action performed on
# the buffer, so that unread_rune and unread_byte can check for
# invalid usage. op_read_runeX constants are chosen such that
# converted to Int they correspond to the rune size that was read.
alias ReadOp = Int8
# Don't use iota for these, as the values need to correspond with the
# names and comments, which is easier to see when being explicit.
alias OP_READ: ReadOp = -1 # Any other read operation.
alias OP_INVALID: ReadOp = 0 # Non-read operation.
alias OP_READ_RUNE1: ReadOp = 1 # read rune of size 1.
alias OP_READ_RUNE2: ReadOp = 2 # read rune of size 2.
alias OP_READ_RUNE3: ReadOp = 3 # read rune of size 3.
alias OP_READ_RUNE4: ReadOp = 4 # read rune of size 4.
alias MAX_INT: Int = 2147483647
# MIN_READ is the minimum slice size passed to a read call by
# [Buffer.read_from]. As long as the [Buffer] has at least MIN_READ bytes beyond
# what is required to hold the contents of r, read_from will not grow the
# underlying buffer.
alias MIN_READ: Int = 512
# ERR_TOO_LARGE is passed to panic if memory cannot be allocated to store data in a buffer.
alias ERR_TOO_LARGE = "buffer.Buffer: too large"
alias ERR_NEGATIVE_READ = "buffer.Buffer: reader returned negative count from read"
alias ERR_SHORT_WRITE = "short write"
# TODO: Removed read_from and write_to for now. Until the span arg trait issue is resolved.
# https://github.com/modularml/mojo/issues/2917
@value
struct Buffer(
Copyable,
Stringable,
Sized,
io.ReadWriter,
io.StringWriter,
io.ByteReader,
io.ByteWriter,
# WriterTo,
# ReaderFrom,
):
"""A Buffer is a variable-sized buffer of bytes with [Buffer.read] and [Buffer.write] methods.
The zero value for Buffer is an empty buffer ready to use.
"""
var buf: List[Byte] # contents are the bytes buf[off : len(buf)]
var off: Int # read at &buf[off], write at &buf[len(buf)]
var last_read: ReadOp # last read operation, so that unread* can work correctly.
fn __init__(inout self, owned buf: List[Byte]):
self.buf = buf
self.off = 0
self.last_read = OP_INVALID
fn bytes(self) -> List[Byte]:
"""Returns a slice of length self.buf.capacity holding the unread portion of the buffer.
The slice is valid for use only until the next buffer modification (that is,
only until the next call to a method like [Buffer.read], [Buffer.write], [Buffer.reset], or [Buffer.truncate]).
The slice aliases the buffer content at least until the next buffer modification,
so immediate changes to the slice will affect the result of future reads.
"""
return self.buf[self.off : len(self.buf)]
# fn available_buffer(self) raises -> List[Byte]:
# """Returns an empty buffer with self.Available() capacity.
# This buffer is intended to be appended to and
# passed to an immediately succeeding [Buffer.write] call.
# The buffer is only valid until the next write operation on self.
# """
# return self.buf[len(self.buf) :]
fn __str__(self) -> String:
"""Returns the contents of the unread portion of the buffer
as a string. If the [Buffer] is a nil pointer, it returns "<nil>".
To build strings more efficiently, see the strings.Builder type.
Creates a copy of the readable buffer and returns it as a string.
"""
var valid_bytes = self.buf[self.off : len(self.buf)]
valid_bytes.append(0)
return String(valid_bytes)
fn empty(self) -> Bool:
"""Reports whether the unread portion of the buffer is empty."""
return len(self.buf) <= self.off
fn __len__(self) -> Int:
"""Returns the number of bytes of the unread portion of the buffer;
self.buf.capacity == len(self.List[Byte]())."""
return len(self.buf) - self.off
fn cap(self) -> Int:
"""Cap returns the capacity of the buffer's underlying byte slice, that is, the
total space allocated for the buffer's data."""
return cap(self.buf)
fn available(self) -> Int:
"""Returns how many bytes are unused in the buffer."""
return self.buf.capacity - len(self.buf)
fn truncate(inout self, position: Int) raises:
"""Discards all but the first n unread bytes from the buffer
but continues to use the same allocated storage.
It panics if position is negative or greater than the length of the buffer.
Args:
position: The position to truncate the buffer to.
"""
if position == 0:
self.reset()
return
self.last_read = OP_INVALID
if position < 0 or position > self.buf.capacity:
raise Error("buffer.Buffer: truncation out of range")
self.buf = self.buf[: self.off + position]
fn reset(inout self):
"""Resets the buffer to be empty,
but it retains the underlying storage for use by future writes.
reset is the same as [buffer.truncate](0)."""
self.buf = List[Byte](capacity=self.buf.capacity)
self.off = 0
self.last_read = OP_INVALID
fn try_grow_by_reslice(inout self, n: Int) -> (Int, Bool):
"""Inlineable version of grow for the fast-case where the
internal buffer only needs to be resliced.
It returns the index where bytes should be written and whether it succeeded."""
var buffer_already_used = len(self.buf)
if n <= self.buf.capacity - buffer_already_used:
# FIXME: It seems like reslicing in go can extend the length of the slice. Doens't work like that for my get slice impl.
# Instead, just add bytes of len(n) to the end of the buffer for now.
# self.buf = self.buf[: l + n]
self.buf.reserve(self.buf.capacity + n)
return buffer_already_used, True
return 0, False
fn grow(inout self, n: Int) -> Int:
"""Grows the buffer to guarantee space for n more bytes.
It returns the index where bytes should be written.
If the buffer can't grow it will panic with ERR_TOO_LARGE."""
var write_at: Int = len(self.buf)
# If buffer is empty, reset to recover space.
if write_at == 0 and self.off != 0:
self.reset()
# Try to grow by means of a reslice.
var i: Int
var ok: Bool
i, ok = self.try_grow_by_reslice(n)
if ok:
return i
# If buffer length is 0 and elements being added is less than small_buffer_size, resize the buffer and write from the beginning.
if self.buf.capacity == 0 and n <= SMALL_BUFFER_SIZE:
self.buf.reserve(SMALL_BUFFER_SIZE)
return 0
var c = cap(self.buf)
if Float64(n) <= c / 2 - write_at:
# We can slide things down instead of allocating a new
# slice. We only need m+n <= c to slide, but
# we instead var capacity get twice as large so we
# don't spend all our time copying.
_ = copy(self.buf, self.buf[self.off :])
elif c > MAX_INT - c - n:
panic(ERR_TOO_LARGE)
# TODO: Commented out this branch because growing the slice here and then at the end is redundant?
# else:
# # Add self.off to account for self.buf[:self.off] being sliced off the front.
# # var sl = self.buf[self.off :]
# # self.buf = self.grow_slice(sl, self.off + n)
# Restore self.off and len(self.buf).
self.off = 0
# FIXME: It seems like reslicing in go can extend the length of the slice. Doens't work like that for my get slice impl.
# Instead, just add bytes of len(n) to the end of the buffer for now.
# self.buf = self.buf[: m + n]
self.buf.reserve(self.buf.capacity + n)
return write_at
fn Grow(inout self, n: Int):
"""Grows the buffer's capacity, if necessary, to guarantee space for
another n bytes. After grow(n), at least n bytes can be written to the
buffer without another allocation.
If n is negative, grow will panic.
If the buffer can't grow it will panic with [ERR_TOO_LARGE].
"""
if n < 0:
panic("buffer.Buffer.Grow: negative count")
var m = self.grow(n)
self.buf = self.buf[:m]
fn write(inout self, src: Span[Byte]) -> (Int, Error):
"""Appends the contents of p to the buffer, growing the buffer as
needed. The return value n is the length of p; err is always nil. If the
buffer becomes too large, write will panic with [ERR_TOO_LARGE].
Args:
src: The bytes to write to the buffer.
Returns:
The number of bytes written to the buffer.
"""
self.last_read = OP_INVALID
var write_at: Int
var ok: Bool
write_at, ok = self.try_grow_by_reslice(len(src))
if not ok:
write_at = self.grow(len(src))
var bytes_written = copy(self.buf, src, write_at)
return bytes_written, Error()
fn write_string(inout self, src: String) -> (Int, Error):
"""Appends the contents of s to the buffer, growing the buffer as
needed. The return value n is the length of s; err is always nil. If the
buffer becomes too large, write_string will panic with [ERR_TOO_LARGE].
Args:
src: The bytes to write to the buffer.
Returns:
The number of bytes written to the buffer.
"""
# self.last_read = OP_INVALID
# var write_at: Int
# var ok: Bool
# write_at, ok = self.try_grow_by_reslice(len(src))
# if not ok:
# m = self.grow(len(src))
# var b = self.buf[m:]
return self.write(src.as_bytes_slice())
# fn read_from[R: Reader](inout self, inout reader: R) -> (Int64, Error):
# """Reads data from r until EOF and appends it to the buffer, growing
# the buffer as needed. The return value n is the number of bytes read. Any
# error except io.EOF encountered during the read is also returned. If the
# buffer becomes too large, read_from will panic with [ERR_TOO_LARGE].
# Args:
# reader: The reader to read from.
# Returns:
# The number of bytes read from the reader.
# """
# self.last_read = OP_INVALID
# var total_bytes_read: Int64 = 0
# while True:
# _ = self.grow(MIN_READ)
# var span = Span(self.buf)
# var bytes_read: Int
# var err: Error
# bytes_read, err = reader.read(span)
# if bytes_read < 0:
# panic(ERR_NEGATIVE_READ)
# total_bytes_read += bytes_read
# var err_message = str(err)
# if err_message != "":
# if err_message == io.EOF:
# return total_bytes_read, Error()
# return total_bytes_read, err
fn grow_slice(self, inout b: List[Byte], n: Int) -> List[Byte]:
"""Grows b by n, preserving the original content of self.
If the allocation fails, it panics with ERR_TOO_LARGE.
"""
# TODO(http:#golang.org/issue/51462): We should rely on the append-make
# pattern so that the compiler can call runtime.growslice. For example:
# return append(b, make(bytes, n)...)
# This avoids unnecessary zero-ing of the first b.capacity bytes of the
# allocated slice, but this pattern causes b to escape onto the heap.
#
# Instead use the append-make pattern with a nil slice to ensure that
# we allocate buffers rounded up to the closest size class.
var c = b.capacity + n # ensure enough space for n elements
if c < 2 * cap(b):
# The growth rate has historically always been 2x. In the future,
# we could rely purely on append to determine the growth rate.
c = 2 * cap(b)
var resized_buffer = List[Byte](capacity=c)
_ = copy(resized_buffer, b)
# var b2: List[Byte] = List[Byte]()
# b2._vector.reserve(c)
# # var b2 = append(bytes(nil), make(bytes, c)...)
# _ = copy(b2, b)
# return b2[:b.capacity]
# b._vector.reserve(c)
return resized_buffer[: b.capacity]
# fn write_to[W: Writer](inout self, inout writer: W) -> (Int64, Error):
# """Writes data to w until the buffer is drained or an error occurs.
# The return value n is the number of bytes written; it always fits into an
# Int, but it is int64 to match the io.WriterTo trait. Any error
# encountered during the write is also returned.
# Args:
# writer: The writer to write to.
# Returns:
# The number of bytes written to the writer.
# """
# self.last_read = OP_INVALID
# var bytes_to_write = len(self.buf)
# var total_bytes_written: Int64 = 0
# if bytes_to_write > 0:
# # TODO: Replace usage of this intermeidate slice when normal slicing, once slice references work.
# var sl = Span(self.buf[self.off : bytes_to_write])
# var bytes_written: Int
# var err: Error
# bytes_written, err = writer.write(sl)
# if bytes_written > bytes_to_write:
# panic("bytes.Buffer.write_to: invalid write count")
# self.off += bytes_written
# total_bytes_written = Int64(bytes_written)
# var err_message = str(err)
# if err_message != "":
# return total_bytes_written, err
# # all bytes should have been written, by definition of write method in io.Writer
# if bytes_written != bytes_to_write:
# return total_bytes_written, Error(ERR_SHORT_WRITE)
# # Buffer is now empty; reset.
# self.reset()
# return total_bytes_written, Error()
fn write_byte(inout self, byte: Byte) -> (Int, Error):
"""Appends the byte c to the buffer, growing the buffer as needed.
The returned error is always nil, but is included to match [bufio.Writer]'s
write_byte. If the buffer becomes too large, write_byte will panic with
[ERR_TOO_LARGE].
Args:
byte: The byte to write to the buffer.
Returns:
The number of bytes written to the buffer.
"""
self.last_read = OP_INVALID
var write_at: Int
var ok: Bool
write_at, ok = self.try_grow_by_reslice(1)
if not ok:
write_at = self.grow(1)
_ = copy(self.buf, List[Byte](byte), write_at)
return write_at, Error()
# fn write_rune(inout self, r: Rune) -> Int:
# """Appends the UTF-8 encoding of Unicode code point r to the
# buffer, returning its length and an error, which is always nil but is
# included to match [bufio.Writer]'s write_rune. The buffer is grown as needed;
# if it becomes too large, write_rune will panic with [ERR_TOO_LARGE].
# """
# # Compare as uint32 to correctly handle negative runes.
# if UInt32(r) < utf8.RuneSelf:
# self.write_byte(Byte(r))
# return 1
# self.last_read = OP_INVALID
# var write_at: Int
# var ok: Bool
# write_at, ok = self.try_grow_by_reslice(utf8.UTFMax)
# if not ok:
# write_at = self.grow(utf8.UTFMax)
# self.buf = utf8.AppendRune(self.buf[:write_at], r)
# return len(self.buf) - write_at
fn read(inout self, inout dest: List[Byte]) -> (Int, Error):
"""Reads the next len(dest) bytes from the buffer or until the buffer
is drained. The return value n is the number of bytes read. If the
buffer has no data to return, err is io.EOF (unless len(dest) is zero);
otherwise it is nil.
Args:
dest: The buffer to read into.
Returns:
The number of bytes read from the buffer.
"""
self.last_read = OP_INVALID
if self.empty():
# Buffer is empty, reset to recover space.
self.reset()
if dest.capacity == 0:
return 0, Error()
return 0, Error(io.EOF)
var bytes_read = copy(dest, self.buf[self.off : len(self.buf)])
self.off += bytes_read
if bytes_read > 0:
self.last_read = OP_READ
return bytes_read, Error()
fn next(inout self, number_of_bytes: Int) raises -> List[Byte]:
"""Returns a slice containing the next n bytes from the buffer,
advancing the buffer as if the bytes had been returned by [Buffer.read].
If there are fewer than n bytes in the buffer, next returns the entire buffer.
The slice is only valid until the next call to a read or write method.
Args:
number_of_bytes: The number of bytes to read from the buffer.
Returns:
A slice containing the next n bytes from the buffer.
"""
self.last_read = OP_INVALID
var m = len(self)
var bytes_to_read = number_of_bytes
if bytes_to_read > m:
bytes_to_read = m
var data = self.buf[self.off : self.off + bytes_to_read]
self.off += bytes_to_read
if bytes_to_read > 0:
self.last_read = OP_READ
return data
fn read_byte(inout self) -> (Byte, Error):
"""Reads and returns the next byte from the buffer.
If no byte is available, it returns error io.EOF.
"""
if self.empty():
# Buffer is empty, reset to recover space.
self.reset()
return Byte(0), Error(io.EOF)
var byte = self.buf[self.off]
self.off += 1
self.last_read = OP_READ
return byte, Error()
# read_rune reads and returns the next UTF-8-encoded
# Unicode code point from the buffer.
# If no bytes are available, the error returned is io.EOF.
# If the bytes are an erroneous UTF-8 encoding, it
# consumes one byte and returns U+FFFD, 1.
# fn read_rune(self) (r rune, size Int, err error)
# if self.empty()
# # Buffer is empty, reset to recover space.
# self.reset()
# return 0, 0, io.EOF
#
# c := self.buf[self.off]
# if c < utf8.RuneSelf
# self.off+= 1
# self.last_read = OP_READ_RUNE1
# return rune(c), 1, nil
#
# r, n := utf8.DecodeRune(self.buf[self.off:])
# self.off += n
# self.last_read = ReadOp(n)
# return r, n, nil
#
# unread_rune unreads the last rune returned by [Buffer.read_rune].
# If the most recent read or write operation on the buffer was
# not a successful [Buffer.read_rune], unread_rune returns an error. (In this regard
# it is stricter than [Buffer.unread_byte], which will unread the last byte
# from any read operation.)
# fn unread_rune(self):
# if self.last_read <= OP_INVALID
# return errors.New("buffer.Buffer: unread_rune: previous operation was not a successful read_rune")
#
# if self.off >= Int(self.last_read)
# self.off -= Int(self.last_read)
#
# self.last_read = OP_INVALID
# return nil
# var err_unread_byte = errors.New("buffer.Buffer: unread_byte: previous operation was not a successful read")
fn unread_byte(inout self) -> Error:
"""Unreads the last byte returned by the most recent successful
read operation that read at least one byte. If a write has happened since
the last read, if the last read returned an error, or if the read read zero
bytes, unread_byte returns an error.
"""
if self.last_read == OP_INVALID:
return Error("buffer.Buffer: unread_byte: previous operation was not a successful read")
self.last_read = OP_INVALID
if self.off > 0:
self.off -= 1
return Error()
fn read_bytes(inout self, delim: Byte) -> (List[Byte], Error):
"""Reads until the first occurrence of delim in the input,
returning a slice containing the data up to and including the delimiter.
If read_bytes encounters an error before finding a delimiter,
it returns the data read before the error and the error itself (often io.EOF).
read_bytes returns err != nil if and only if the returned data does not end in
delim.
Args:
delim: The delimiter to read until.
Returns:
A List[Byte] struct containing the data up to and including the delimiter.
"""
var slice: List[Byte]
var err: Error
slice, err = self.read_slice(delim)
# return a copy of slice. The buffer's backing array may
# be overwritten by later calls.
var line = List[Byte](capacity=io.BUFFER_SIZE)
for i in range(len(slice)):
line.append(slice[i])
return line, Error()
fn read_slice(inout self, delim: Byte) -> (List[Byte], Error):
"""Like read_bytes but returns a reference to internal buffer data.
Args:
delim: The delimiter to read until.
Returns:
A List[Byte] struct containing the data up to and including the delimiter.
"""
var at_eof = False
var i = index_byte(self.buf[self.off : len(self.buf)], delim)
var end = self.off + i + 1
if i < 0:
end = len(self.buf)
at_eof = True
var line = self.buf[self.off : end]
self.off = end
self.last_read = OP_READ
if at_eof:
return line, Error(io.EOF)
return line, Error()
fn read_string(inout self, delim: Byte) -> (String, Error):
"""Reads until the first occurrence of delim in the input,
returning a string containing the data up to and including the delimiter.
If read_string encounters an error before finding a delimiter,
it returns the data read before the error and the error itself (often io.EOF).
read_string returns err != nil if and only if the returned data does not end
in delim.
Args:
delim: The delimiter to read until.
Returns:
A string containing the data up to and including the delimiter.
"""
var slice: List[Byte]
var err: Error
slice, err = self.read_slice(delim)
slice.append(0)
return String(slice), err
fn new_buffer() -> Buffer:
"""Creates and initializes a new [Buffer] using buf as its`
initial contents. The new [Buffer] takes ownership of buf, and the
caller should not use buf after this call. new_buffer is intended to
prepare a [Buffer] to read existing data. It can also be used to set
the initial size of the internal buffer for writing. To do that,
buf should have the desired capacity but a length of zero.
In most cases, new([Buffer]) (or just declaring a [Buffer] variable) is
sufficient to initialize a [Buffer].
"""
var b = List[Byte](capacity=io.BUFFER_SIZE)
return Buffer(b^)
fn new_buffer(owned buf: List[Byte]) -> Buffer:
"""Creates and initializes a new [Buffer] using buf as its`
initial contents. The new [Buffer] takes ownership of buf, and the
caller should not use buf after this call. new_buffer is intended to
prepare a [Buffer] to read existing data. It can also be used to set
the initial size of the internal buffer for writing. To do that,
buf should have the desired capacity but a length of zero.
In most cases, new([Buffer]) (or just declaring a [Buffer] variable) is
sufficient to initialize a [Buffer].
Args:
buf: The bytes to use as the initial contents of the buffer.
Returns:
A new [Buffer] initialized with the provided bytes.
"""
return Buffer(buf^)
fn new_buffer(owned s: String) -> Buffer:
"""Creates and initializes a new [Buffer] using string s as its
initial contents. It is intended to prepare a buffer to read an existing
string.
In most cases, new([Buffer]) (or just declaring a [Buffer] variable) is
sufficient to initialize a [Buffer].
Args:
s: The string to use as the initial contents of the buffer.
Returns:
A new [Buffer] initialized with the provided string.
"""
var bytes_buffer = List[Byte](s.as_bytes())
return Buffer(bytes_buffer^)
| lightbug_http/external/gojo/bytes/buffer.mojo | false |
<filename>lightbug_http/external/gojo/bytes/reader.mojo
from collections.optional import Optional
from ..builtins import cap, copy, Byte, panic
import ..io
@value
struct Reader(
Copyable,
Sized,
io.Reader,
io.ReaderAt,
# io.WriterTo,
io.Seeker,
io.ByteReader,
io.ByteScanner,
):
"""A Reader implements the io.Reader, io.ReaderAt, io.WriterTo, io.Seeker,
io.ByteScanner, and io.RuneScanner Interfaces by reading from
a byte slice.
Unlike a [Buffer], a Reader is read-only and supports seeking.
The zero value for Reader operates like a Reader of an empty slice.
"""
var buffer: List[Byte]
var index: Int64 # current reading index
var prev_rune: Int # index of previous rune; or < 0
fn __len__(self) -> Int:
"""len returns the number of bytes of the unread portion of the
slice."""
if self.index >= len(self.buffer):
return 0
return int(len(self.buffer) - self.index)
fn size(self) -> Int:
"""Returns the original length of the underlying byte slice.
Size is the number of bytes available for reading via [Reader.ReadAt].
The result is unaffected by any method calls except [Reader.Reset]."""
return len(self.buffer)
fn read(inout self, inout dest: List[Byte]) -> (Int, Error):
"""Reads from the internal buffer into the dest List[Byte] struct.
Implements the [io.Reader] Interface.
Args:
dest: The destination List[Byte] struct to read into.
Returns:
Int: The number of bytes read into dest."""
if self.index >= len(self.buffer):
return 0, Error(io.EOF)
self.prev_rune = -1
var unread_bytes = self.buffer[int(self.index) : len(self.buffer)]
var bytes_read = copy(dest, unread_bytes)
self.index += bytes_read
return bytes_read, Error()
fn read_at(self, inout dest: List[Byte], off: Int64) -> (Int, Error):
"""Reads len(dest) bytes into dest beginning at byte offset off.
Implements the [io.ReaderAt] Interface.
Args:
dest: The destination List[Byte] struct to read into.
off: The offset to start reading from.
Returns:
Int: The number of bytes read into dest.
"""
# cannot modify state - see io.ReaderAt
if off < 0:
return 0, Error("bytes.Reader.read_at: negative offset")
if off >= Int64(len(self.buffer)):
return 0, Error(io.EOF)
var unread_bytes = self.buffer[int(off) : len(self.buffer)]
var bytes_written = copy(dest, unread_bytes)
if bytes_written < len(dest):
return 0, Error(io.EOF)
return bytes_written, Error()
fn read_byte(inout self) -> (Byte, Error):
"""Reads and returns a single byte from the internal buffer. Implements the [io.ByteReader] Interface."""
self.prev_rune = -1
if self.index >= len(self.buffer):
return UInt8(0), Error(io.EOF)
var byte = self.buffer[int(self.index)]
self.index += 1
return byte, Error()
fn unread_byte(inout self) -> Error:
"""Unreads the last byte read by moving the read position back by one.
Complements [Reader.read_byte] in implementing the [io.ByteScanner] Interface.
"""
if self.index <= 0:
return Error("bytes.Reader.unread_byte: at beginning of slice")
self.prev_rune = -1
self.index -= 1
return Error()
# # read_rune implements the [io.RuneReader] Interface.
# fn read_rune(self) (ch rune, size Int, err error):
# if self.index >= Int64(len(self.buffer)):
# self.prev_rune = -1
# return 0, 0, io.EOF
# self.prev_rune = Int(self.index)
# if c := self.buffer[self.index]; c < utf8.RuneSelf:
# self.index+= 1
# return rune(c), 1, nil
# ch, size = utf8.DecodeRune(self.buffer[self.index:])
# self.index += Int64(size)
# return
# # unread_rune complements [Reader.read_rune] in implementing the [io.RuneScanner] Interface.
# fn unread_rune(self) error:
# if self.index <= 0:
# return errors.New("bytes.Reader.unread_rune: at beginning of slice")
# if self.prev_rune < 0:
# return errors.New("bytes.Reader.unread_rune: previous operation was not read_rune")
# self.index = Int64(self.prev_rune)
# self.prev_rune = -1
# return nil
fn seek(inout self, offset: Int64, whence: Int) -> (Int64, Error):
"""Moves the read position to the specified offset from the specified whence.
Implements the [io.Seeker] Interface.
Args:
offset: The offset to move to.
whence: The reference point for offset.
Returns:
The new position in which the next read will start from.
"""
self.prev_rune = -1
var position: Int64 = 0
if whence == io.SEEK_START:
position = offset
elif whence == io.SEEK_CURRENT:
position = self.index + offset
elif whence == io.SEEK_END:
position = len(self.buffer) + offset
else:
return Int64(0), Error("bytes.Reader.seek: invalid whence")
if position < 0:
return Int64(0), Error("bytes.Reader.seek: negative position")
self.index = position
return position, Error()
# fn write_to[W: io.Writer](inout self, inout writer: W) -> (Int64, Error):
# """Writes data to w until the buffer is drained or an error occurs.
# implements the [io.WriterTo] Interface.
# Args:
# writer: The writer to write to.
# """
# self.prev_rune = -1
# if self.index >= len(self.buffer):
# return Int64(0), Error()
# var bytes = Span(self.buffer[int(self.index) : len(self.buffer)])
# var write_count: Int
# var err: Error
# write_count, err = writer.write(bytes)
# if write_count > len(bytes):
# panic("bytes.Reader.write_to: invalid Write count")
# self.index += write_count
# if write_count != len(bytes):
# return Int64(write_count), Error(io.ERR_SHORT_WRITE)
# return Int64(write_count), Error()
fn reset(inout self, buffer: List[Byte]):
"""Resets the [Reader.Reader] to be reading from b.
Args:
buffer: The new buffer to read from.
"""
self.buffer = buffer
self.index = 0
self.prev_rune = -1
fn new_reader(buffer: List[Byte]) -> Reader:
"""Returns a new [Reader.Reader] reading from b.
Args:
buffer: The new buffer to read from.
"""
return Reader(buffer, 0, -1)
fn new_reader(buffer: String) -> Reader:
"""Returns a new [Reader.Reader] reading from b.
Args:
buffer: The new buffer to read from.
"""
return Reader(buffer.as_bytes(), 0, -1)
| lightbug_http/external/gojo/bytes/reader.mojo | false |
from .buffer import Buffer, new_buffer
from .reader import Reader, new_reader
| lightbug_http/external/gojo/bytes/__init__.mojo | false |
<filename>lightbug_http/external/gojo/fmt/fmt.mojo
"""Formatting options
General
%v the value in a default format
when printing structs, the plus flag (%+v) adds field names
Boolean
%t the word true or false
Integer
%d base 10
%q a single-quoted character literal.
%x base 16, with lower-case letters for a-f
%X base 16, with upper-case letters for A-F
Floating-point and complex constituents:
%f decimal point but no exponent, e.g. 123.456
String and slice of bytes (treated equivalently with these verbs):
%s the uninterpreted bytes of the string or slice
%q a double-quoted string
TODO:
- Add support for more formatting options
- Switch to buffered writing to avoid multiple string concatenations
- Add support for width and precision formatting options
- Handle escaping for String's %q
"""
from utils.variant import Variant
from math import floor
from ..builtins import Byte
alias Args = Variant[String, Int, Float64, Bool, List[Byte]]
fn replace_first(s: String, old: String, new: String) -> String:
"""Replace the first occurrence of a substring in a string.
Args:
s: The original string
old: The substring to be replaced
new: The new substring
Returns:
The string with the first occurrence of the old substring replaced by the new one.
"""
# Find the first occurrence of the old substring
var index = s.find(old)
# If the old substring is found, replace it
if index != -1:
return s[:index] + new + s[index + len(old) :]
# If the old substring is not found, return the original string
return s
fn find_first_verb(s: String, verbs: List[String]) -> String:
"""Find the first occurrence of a verb in a string.
Args:
s: The original string
verbs: The list of verbs to search for.
Returns:
The verb to replace.
"""
var index = -1
var verb: String = ""
for v in verbs:
var i = s.find(v[])
if i != -1 and (index == -1 or i < index):
index = i
verb = v[]
return verb
alias BASE10_TO_BASE16 = List[String]("0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f")
fn convert_base10_to_base16(value: Int) -> String:
"""Converts a base 10 number to base 16.
Args:
value: Base 10 number.
Returns:
Base 16 number as a String.
"""
var val: Float64 = 0.0
var result: Float64 = value
var base16: String = ""
while result > 1:
var temp = result / 16
var floor_result = floor(temp)
var remainder = temp - floor_result
result = floor_result
val = 16 * remainder
base16 = BASE10_TO_BASE16[int(val)] + base16
return base16
fn format_string(format: String, arg: String) -> String:
var verb = find_first_verb(format, List[String]("%s", "%q"))
var arg_to_place = arg
if verb == "%q":
arg_to_place = '"' + arg + '"'
return replace_first(format, String("%s"), arg)
fn format_bytes(format: String, arg: List[Byte]) -> String:
var argument = arg
if argument[-1] != 0:
argument.append(0)
return format_string(format, argument)
fn format_integer(format: String, arg: Int) -> String:
var verb = find_first_verb(format, List[String]("%x", "%X", "%d", "%q"))
var arg_to_place = str(arg)
if verb == "%x":
arg_to_place = str(convert_base10_to_base16(arg)).lower()
elif verb == "%X":
arg_to_place = str(convert_base10_to_base16(arg)).upper()
elif verb == "%q":
arg_to_place = "'" + str(arg) + "'"
return replace_first(format, verb, arg_to_place)
fn format_float(format: String, arg: Float64) -> String:
return replace_first(format, str("%f"), str(arg))
fn format_boolean(format: String, arg: Bool) -> String:
var value: String = "False"
if arg:
value = "True"
return replace_first(format, String("%t"), value)
# If the number of arguments does not match the number of format specifiers
alias BadArgCount = "(BAD ARG COUNT)"
fn sprintf(formatting: String, *args: Args) -> String:
var text = formatting
var raw_percent_count = formatting.count("%%") * 2
var formatter_count = formatting.count("%") - raw_percent_count
if formatter_count != len(args):
return BadArgCount
for i in range(len(args)):
var argument = args[i]
if argument.isa[String]():
text = format_string(text, argument[String])
elif argument.isa[List[Byte]]():
text = format_bytes(text, argument[List[Byte]])
elif argument.isa[Int]():
text = format_integer(text, argument[Int])
elif argument.isa[Float64]():
text = format_float(text, argument[Float64])
elif argument.isa[Bool]():
text = format_boolean(text, argument[Bool])
return text
# TODO: temporary until we have arg packing.
fn sprintf_str(formatting: String, args: List[String]) raises -> String:
var text = formatting
var formatter_count = formatting.count("%")
if formatter_count > len(args):
raise Error("Not enough arguments for format string")
elif formatter_count < len(args):
raise Error("Too many arguments for format string")
for i in range(len(args)):
text = format_string(text, args[i])
return text
fn printf(formatting: String, *args: Args) raises:
var text = formatting
var raw_percent_count = formatting.count("%%") * 2
var formatter_count = formatting.count("%") - raw_percent_count
if formatter_count > len(args):
raise Error("Not enough arguments for format string")
elif formatter_count < len(args):
raise Error("Too many arguments for format string")
for i in range(len(args)):
var argument = args[i]
if argument.isa[String]():
text = format_string(text, argument[String])
elif argument.isa[List[Byte]]():
text = format_bytes(text, argument[List[Byte]])
elif argument.isa[Int]():
text = format_integer(text, argument[Int])
elif argument.isa[Float64]():
text = format_float(text, argument[Float64])
elif argument.isa[Bool]():
text = format_boolean(text, argument[Bool])
else:
raise Error("Unknown for argument #" + str(i))
print(text)
| lightbug_http/external/gojo/fmt/fmt.mojo | false |
from .fmt import sprintf, printf, sprintf_str
| lightbug_http/external/gojo/fmt/__init__.mojo | false |
<filename>lightbug_http/external/gojo/io/io.mojo
from collections.optional import Optional
from ..builtins import cap, copy, Byte, panic
from .traits import ERR_UNEXPECTED_EOF
alias BUFFER_SIZE = 8200
fn write_string[W: Writer](inout writer: W, string: String) -> (Int, Error):
"""Writes the contents of the string s to w, which accepts a slice of bytes.
If w implements [StringWriter], [StringWriter.write_string] is invoked directly.
Otherwise, [Writer.write] is called exactly once.
Args:
writer: The writer to write to.
string: The string to write.
Returns:
The number of bytes written and an error, if any.
"""
return writer.write(string.as_bytes_slice())
fn write_string[W: StringWriter](inout writer: W, string: String) -> (Int, Error):
"""Writes the contents of the string s to w, which accepts a slice of bytes.
If w implements [StringWriter], [StringWriter.write_string] is invoked directly.
Otherwise, [Writer.write] is called exactly once.
Args:
writer: The writer to write to.
string: The string to write.
Returns:
The number of bytes written and an error, if any."""
return writer.write_string(string)
fn read_at_least[R: Reader](inout reader: R, inout dest: List[Byte], min: Int) -> (Int, Error):
"""Reads from r into buf until it has read at least min bytes.
It returns the number of bytes copied and an error if fewer bytes were read.
The error is EOF only if no bytes were read.
If an EOF happens after reading fewer than min bytes,
read_at_least returns [ERR_UNEXPECTED_EOF].
If min is greater than the length of buf, read_at_least returns [ERR_SHORT_BUFFER].
On return, n >= min if and only if err == nil.
If r returns an error having read at least min bytes, the error is dropped.
Args:
reader: The reader to read from.
dest: The buffer to read into.
min: The minimum number of bytes to read.
Returns:
The number of bytes read."""
var error = Error()
if len(dest) < min:
return 0, Error(io.ERR_SHORT_BUFFER)
var total_bytes_read: Int = 0
while total_bytes_read < min and not error:
var bytes_read: Int
bytes_read, error = reader.read(dest)
total_bytes_read += bytes_read
if total_bytes_read >= min:
error = Error()
elif total_bytes_read > 0 and str(error):
error = Error(ERR_UNEXPECTED_EOF)
return total_bytes_read, error
fn read_full[R: Reader](inout reader: R, inout dest: List[Byte]) -> (Int, Error):
"""Reads exactly len(buf) bytes from r into buf.
It returns the number of bytes copied and an error if fewer bytes were read.
The error is EOF only if no bytes were read.
If an EOF happens after reading some but not all the bytes,
read_full returns [ERR_UNEXPECTED_EOF].
On return, n == len(buf) if and only if err == nil.
If r returns an error having read at least len(buf) bytes, the error is dropped.
"""
return read_at_least(reader, dest, len(dest))
# fn copy_n[W: Writer, R: Reader](dst: W, src: R, n: Int64) raises -> Int64:
# """Copies n bytes (or until an error) from src to dst.
# It returns the number of bytes copied and the earliest
# error encountered while copying.
# On return, written == n if and only if err == nil.
# If dst implements [ReaderFrom], the copy is implemented using it.
# """
# var written = copy(dst, LimitReader(src, n))
# if written == n:
# return n
# if written < n:
# # src stopped early; must have been EOF.
# raise Error(ERR_UNEXPECTED_EOF)
# return written
# fn copy[W: Writer, R: Reader](dst: W, src: R, n: Int64) -> Int64:
# """copy copies from src to dst until either EOF is reached
# on src or an error occurs. It returns the number of bytes
# copied and the first error encountered while copying, if any.
# A successful copy returns err == nil, not err == EOF.
# Because copy is defined to read from src until EOF, it does
# not treat an EOF from Read as an error to be reported.
# If src implements [WriterTo],
# the copy is implemented by calling src.WriteTo(dst).
# Otherwise, if dst implements [ReaderFrom],
# the copy is implemented by calling dst.ReadFrom(src).
# """
# return copy_buffer(dst, src, nil)
# # CopyBuffer is identical to copy except that it stages through the
# # provided buffer (if one is required) rather than allocating a
# # temporary one. If buf is nil, one is allocated; otherwise if it has
# # zero length, CopyBuffer panics.
# #
# # If either src implements [WriterTo] or dst implements [ReaderFrom],
# # buf will not be used to perform the copy.
# fn CopyBuffer(dst Writer, src Reader, buf bytes) (written int64, err error) {
# if buf != nil and len(buf) == 0 {
# panic("empty buffer in CopyBuffer")
# }
# return copy_buffer(dst, src, buf)
# }
# fn copy_buffer[W: Writer, R: Reader](dst: W, src: R, buf: Span[Byte]) raises -> Int64:
# """Actual implementation of copy and CopyBuffer.
# if buf is nil, one is allocated.
# """
# var nr: Int
# nr = src.read(buf)
# while True:
# if nr > 0:
# var nw: Int
# nw = dst.write(get_slice(buf, 0, nr))
# if nw < 0 or nr < nw:
# nw = 0
# var written = Int64(nw)
# if nr != nw:
# raise Error(ERR_SHORT_WRITE)
# return written
# fn copy_buffer[W: Writer, R: ReaderWriteTo](dst: W, src: R, buf: Span[Byte]) -> Int64:
# return src.write_to(dst)
# fn copy_buffer[W: WriterReadFrom, R: Reader](dst: W, src: R, buf: Span[Byte]) -> Int64:
# return dst.read_from(src)
# # LimitReader returns a Reader that reads from r
# # but stops with EOF after n bytes.
# # The underlying implementation is a *LimitedReader.
# fn LimitReader(r Reader, n int64) Reader { return &LimitedReader{r, n} }
# # A LimitedReader reads from R but limits the amount of
# # data returned to just N bytes. Each call to Read
# # updates N to reflect the new amount remaining.
# # Read returns EOF when N <= 0 or when the underlying R returns EOF.
# struct LimitedReader():
# var R: Reader # underlying reader
# N int64 # max bytes remaining
# fn (l *LimitedReader) Read(p bytes) (n Int, err error) {
# if l.N <= 0 {
# return 0, EOF
# }
# if int64(len(p)) > l.N {
# p = p[0:l.N]
# }
# n, err = l.R.Read(p)
# l.N -= int64(n)
# return
# }
# # NewSectionReader returns a [SectionReader] that reads from r
# # starting at offset off and stops with EOF after n bytes.
# fn NewSectionReader(r ReaderAt, off int64, n int64) *SectionReader {
# var remaining int64
# const maxint64 = 1<<63 - 1
# if off <= maxint64-n {
# remaining = n + off
# } else {
# # Overflow, with no way to return error.
# # Assume we can read up to an offset of 1<<63 - 1.
# remaining = maxint64
# }
# return &SectionReader{r, off, off, remaining, n}
# }
# # SectionReader implements Read, Seek, and ReadAt on a section
# # of an underlying [ReaderAt].
# type SectionReader struct {
# r ReaderAt # constant after creation
# base int64 # constant after creation
# off int64
# limit int64 # constant after creation
# n int64 # constant after creation
# }
# fn (s *SectionReader) Read(p bytes) (n Int, err error) {
# if s.off >= s.limit {
# return 0, EOF
# }
# if max := s.limit - s.off; int64(len(p)) > max {
# p = p[0:max]
# }
# n, err = s.r.ReadAt(p, s.off)
# s.off += int64(n)
# return
# }
# alias errWhence = "Seek: invalid whence"
# alias errOffset = "Seek: invalid offset"
# fn (s *SectionReader) Seek(offset int64, whence Int) (int64, error) {
# switch whence {
# default:
# return 0, errWhence
# case SEEK_START:
# offset += s.base
# case SEEK_CURRENT:
# offset += s.off
# case SEEK_END:
# offset += s.limit
# }
# if offset < s.base {
# return 0, errOffset
# }
# s.off = offset
# return offset - s.base, nil
# }
# fn (s *SectionReader) ReadAt(p bytes, off int64) (n Int, err error) {
# if off < 0 or off >= s.capacity {
# return 0, EOF
# }
# off += s.base
# if max := s.limit - off; int64(len(p)) > max {
# p = p[0:max]
# n, err = s.r.ReadAt(p, off)
# if err == nil {
# err = EOF
# }
# return n, err
# }
# return s.r.ReadAt(p, off)
# }
# # Size returns the size of the section in bytes.
# fn (s *SectionReader) Size() int64 { return s.limit - s.base }
# # Outer returns the underlying [ReaderAt] and offsets for the section.
# #
# # The returned values are the same that were passed to [NewSectionReader]
# # when the [SectionReader] was created.
# fn (s *SectionReader) Outer() (r ReaderAt, off int64, n int64) {
# return s.r, s.base, s.n
# }
# # An OffsetWriter maps writes at offset base to offset base+off in the underlying writer.
# type OffsetWriter struct {
# w WriterAt
# base int64 # the original offset
# off int64 # the current offset
# }
# # NewOffsetWriter returns an [OffsetWriter] that writes to w
# # starting at offset off.
# fn NewOffsetWriter(w WriterAt, off int64) *OffsetWriter {
# return &OffsetWriter{w, off, off}
# }
# fn (o *OffsetWriter) Write(p bytes) (n Int, err error) {
# n, err = o.w.WriteAt(p, o.off)
# o.off += int64(n)
# return
# }
# fn (o *OffsetWriter) WriteAt(p bytes, off int64) (n Int, err error) {
# if off < 0 {
# return 0, errOffset
# }
# off += o.base
# return o.w.WriteAt(p, off)
# }
# fn (o *OffsetWriter) Seek(offset int64, whence Int) (int64, error) {
# switch whence {
# default:
# return 0, errWhence
# case SEEK_START:
# offset += o.base
# case SEEK_CURRENT:
# offset += o.off
# }
# if offset < o.base {
# return 0, errOffset
# }
# o.off = offset
# return offset - o.base, nil
# }
# # TeeReader returns a [Reader] that writes to w what it reads from r.
# # All reads from r performed through it are matched with
# # corresponding writes to w. There is no internal buffering -
# # the write must complete before the read completes.
# # Any error encountered while writing is reported as a read error.
# fn TeeReader(r Reader, w Writer) Reader {
# return &teeReader{r, w}
# }
# type teeReader struct {
# r Reader
# w Writer
# }
# fn (t *teeReader) Read(p bytes) (n Int, err error) {
# n, err = t.r.Read(p)
# if n > 0 {
# if n, err := t.w.Write(p[:n]); err != nil {
# return n, err
# }
# }
# return
# }
# # Discard is a [Writer] on which all Write calls succeed
# # without doing anything.
# var Discard Writer = discard{}
# type discard struct{}
# # discard implements ReaderFrom as an optimization so copy to
# # io.Discard can avoid doing unnecessary work.
# var _ ReaderFrom = discard{}
# fn (discard) Write(p bytes) (Int, error) {
# return len(p), nil
# }
# fn (discard) write_string(s string) (Int, error) {
# return len(s), nil
# }
# var blackHolePool = sync.Pool{
# New: fn() any {
# b := make(bytes, 8192)
# return &b
# },
# }
# fn (discard) ReadFrom(r Reader) (n int64, err error) {
# bufp := blackHolePool.Get().(*bytes)
# readSize := 0
# for {
# readSize, err = r.Read(*bufp)
# n += int64(readSize)
# if err != nil {
# blackHolePool.Put(bufp)
# if err == EOF {
# return n, nil
# }
# return
# }
# }
# }
# # NopCloser returns a [ReadCloser] with a no-op Close method wrapping
# # the provided [Reader] r.
# # If r implements [WriterTo], the returned [ReadCloser] will implement [WriterTo]
# # by forwarding calls to r.
# fn NopCloser(r Reader) ReadCloser {
# if _, ok := r.(WriterTo); ok {
# return nopCloserWriterTo{r}
# }
# return nopCloser{r}
# }
# type nopCloser struct {
# Reader
# }
# fn (nopCloser) Close() error { return nil }
# type nopCloserWriterTo struct {
# Reader
# }
# fn (nopCloserWriterTo) Close() error { return nil }
# fn (c nopCloserWriterTo) WriteTo(w Writer) (n int64, err error) {
# return c.Reader.(WriterTo).WriteTo(w)
# }
fn read_all[R: Reader](inout reader: R) -> (List[Byte], Error):
"""Reads from r until an error or EOF and returns the data it read.
A successful call returns err == nil, not err == EOF. Because ReadAll is
defined to read from src until EOF, it does not treat an EOF from Read
as an error to be reported.
Args:
reader: The reader to read from.
Returns:
The data read."""
var dest = List[Byte](capacity=BUFFER_SIZE)
var at_eof: Bool = False
while True:
var temp = List[Byte](capacity=BUFFER_SIZE)
var bytes_read: Int
var err: Error
bytes_read, err = reader.read(temp)
var err_message = str(err)
if err_message != "":
if err_message != EOF:
return dest, err
at_eof = True
# If new bytes will overflow the result, resize it.
# if some bytes were written, how do I append before returning result on the last one?
if len(dest) + len(temp) > dest.capacity:
dest.reserve(dest.capacity * 2)
dest.extend(temp)
if at_eof:
return dest, err
| lightbug_http/external/gojo/io/io.mojo | false |
from collections.optional import Optional
from ..builtins import Byte
alias Rune = Int32
# Package io provides basic interfaces to I/O primitives.
# Its primary job is to wrap existing implementations of such primitives,
# such as those in package os, into shared public interfaces that
# abstract the fntionality, plus some other related primitives.
#
# Because these interfaces and primitives wrap lower-level operations with
# various implementations, unless otherwise informed clients should not
# assume they are safe for parallel execution.
# Seek whence values.
alias SEEK_START = 0 # seek relative to the origin of the file
alias SEEK_CURRENT = 1 # seek relative to the current offset
alias SEEK_END = 2 # seek relative to the end
# ERR_SHORT_WRITE means that a write accepted fewer bytes than requested
# but failed to return an explicit error.
alias ERR_SHORT_WRITE = "short write"
# ERR_INVALID_WRITE means that a write returned an impossible count.
alias ERR_INVALID_WRITE = "invalid write result"
# ERR_SHORT_BUFFER means that a read required a longer buffer than was provided.
alias ERR_SHORT_BUFFER = "short buffer"
# EOF is the error returned by Read when no more input is available.
# (Read must return EOF itself, not an error wrapping EOF,
# because callers will test for EOF using ==.)
# fntions should return EOF only to signal a graceful end of input.
# If the EOF occurs unexpectedly in a structured data stream,
# the appropriate error is either [ERR_UNEXPECTED_EOF] or some other error
# giving more detail.
alias EOF = "EOF"
# ERR_UNEXPECTED_EOF means that EOF was encountered in the
# middle of reading a fixed-size block or data structure.
alias ERR_UNEXPECTED_EOF = "unexpected EOF"
# ERR_NO_PROGRESS is returned by some clients of a [Reader] when
# many calls to Read have failed to return any data or error,
# usually the sign of a broken [Reader] implementation.
alias ERR_NO_PROGRESS = "multiple Read calls return no data or error"
trait Reader(Movable):
"""Reader is the trait that wraps the basic Read method.
Read reads up to len(p) bytes into p. It returns the number of bytes
read (0 <= n <= len(p)) and any error encountered. Even if Read
returns n < len(p), it may use all of p as scratch space during the call.
If some data is available but not len(p) bytes, Read conventionally
returns what is available instead of waiting for more.
When Read encounters an error or end-of-file condition after
successfully reading n > 0 bytes, it returns the number of
bytes read. It may return the (non-nil) error from the same call
or return the error (and n == 0) from a subsequent call.
An instance of this general case is that a Reader returning
a non-zero number of bytes at the end of the input stream may
return either err == EOF or err == nil. The next Read should
return 0, EOF.
Callers should always process the n > 0 bytes returned before
considering the error err. Doing so correctly handles I/O errors
that happen after reading some bytes and also both of the
allowed EOF behaviors.
If len(p) == 0, Read should always return n == 0. It may return a
non-nil error if some error condition is known, such as EOF.
Implementations of Read are discouraged from returning a
zero byte count with a nil error, except when len(p) == 0.
Callers should treat a return of 0 and nil as indicating that
nothing happened; in particular it does not indicate EOF.
Implementations must not retain p."""
fn read(inout self, inout dest: List[Byte]) -> (Int, Error):
...
trait Writer(Movable):
"""Writer is the trait that wraps the basic Write method.
Write writes len(p) bytes from p to the underlying data stream.
It returns the number of bytes written from p (0 <= n <= len(p))
and any error encountered that caused the write to stop early.
Write must return a non-nil error if it returns n < len(p).
Write must not modify the slice data, even temporarily.
Implementations must not retain p.
"""
fn write(inout self, src: Span[Byte]) -> (Int, Error):
...
trait Closer(Movable):
"""
Closer is the trait that wraps the basic Close method.
The behavior of Close after the first call is undefined.
Specific implementations may document their own behavior.
"""
fn close(inout self) -> Error:
...
trait Seeker(Movable):
"""
Seeker is the trait that wraps the basic Seek method.
Seek sets the offset for the next Read or Write to offset,
interpreted according to whence:
[SEEK_START] means relative to the start of the file,
[SEEK_CURRENT] means relative to the current offset, and
[SEEK_END] means relative to the end
(for example, offset = -2 specifies the penultimate byte of the file).
Seek returns the new offset relative to the start of the
file or an error, if any.
Seeking to an offset before the start of the file is an error.
Seeking to any positive offset may be allowed, but if the new offset exceeds
the size of the underlying object the behavior of subsequent I/O operations
is implementation-dependent.
"""
fn seek(inout self, offset: Int64, whence: Int) -> (Int64, Error):
...
trait ReadWriter(Reader, Writer):
...
trait ReadCloser(Reader, Closer):
...
trait WriteCloser(Writer, Closer):
...
trait ReadWriteCloser(Reader, Writer, Closer):
...
trait ReadSeeker(Reader, Seeker):
...
trait ReadSeekCloser(Reader, Seeker, Closer):
...
trait WriteSeeker(Writer, Seeker):
...
trait ReadWriteSeeker(Reader, Writer, Seeker):
...
trait ReaderFrom:
"""ReaderFrom is the trait that wraps the ReadFrom method.
ReadFrom reads data from r until EOF or error.
The return value n is the number of bytes read.
Any error except EOF encountered during the read is also returned.
The [copy] function uses [ReaderFrom] if available."""
fn read_from[R: Reader](inout self, inout reader: R) -> (Int64, Error):
...
trait WriterReadFrom(Writer, ReaderFrom):
...
trait WriterTo:
"""WriterTo is the trait that wraps the WriteTo method.
WriteTo writes data to w until there's no more data to write or
when an error occurs. The return value n is the number of bytes
written. Any error encountered during the write is also returned.
The copy function uses WriterTo if available."""
fn write_to[W: Writer](inout self, inout writer: W) -> (Int64, Error):
...
trait ReaderWriteTo(Reader, WriterTo):
...
trait ReaderAt:
"""ReaderAt is the trait that wraps the basic ReadAt method.
ReadAt reads len(p) bytes into p starting at offset off in the
underlying input source. It returns the number of bytes
read (0 <= n <= len(p)) and any error encountered.
When ReadAt returns n < len(p), it returns a non-nil error
explaining why more bytes were not returned. In this respect,
ReadAt is stricter than Read.
Even if ReadAt returns n < len(p), it may use all of p as scratch
space during the call. If some data is available but not len(p) bytes,
ReadAt blocks until either all the data is available or an error occurs.
In this respect ReadAt is different from Read.
If the n = len(p) bytes returned by ReadAt are at the end of the
input source, ReadAt may return either err == EOF or err == nil.
If ReadAt is reading from an input source with a seek offset,
ReadAt should not affect nor be affected by the underlying
seek offset.
Clients of ReadAt can execute parallel ReadAt calls on the
same input source.
Implementations must not retain p."""
fn read_at(self, inout dest: List[Byte], off: Int64) -> (Int, Error):
...
trait WriterAt:
"""WriterAt is the trait that wraps the basic WriteAt method.
WriteAt writes len(p) bytes from p to the underlying data stream
at offset off. It returns the number of bytes written from p (0 <= n <= len(p))
and any error encountered that caused the write to stop early.
WriteAt must return a non-nil error if it returns n < len(p).
If WriteAt is writing to a destination with a seek offset,
WriteAt should not affect nor be affected by the underlying
seek offset.
Clients of WriteAt can execute parallel WriteAt calls on the same
destination if the ranges do not overlap.
Implementations must not retain p."""
fn write_at(self, src: Span[Byte], off: Int64) -> (Int, Error):
...
trait ByteReader:
"""ByteReader is the trait that wraps the read_byte method.
read_byte reads and returns the next byte from the input or
any error encountered. If read_byte returns an error, no input
byte was consumed, and the returned byte value is undefined.
read_byte provides an efficient trait for byte-at-time
processing. A [Reader] that does not implement ByteReader
can be wrapped using bufio.NewReader to add this method."""
fn read_byte(inout self) -> (Byte, Error):
...
trait ByteScanner(ByteReader):
"""ByteScanner is the trait that adds the unread_byte method to the
basic read_byte method.
unread_byte causes the next call to read_byte to return the last byte read.
If the last operation was not a successful call to read_byte, unread_byte may
return an error, unread the last byte read (or the byte prior to the
last-unread byte), or (in implementations that support the [Seeker] trait)
seek to one byte before the current offset."""
fn unread_byte(inout self) -> Error:
...
trait ByteWriter:
"""ByteWriter is the trait that wraps the write_byte method."""
fn write_byte(inout self, byte: Byte) -> (Int, Error):
...
trait RuneReader:
"""RuneReader is the trait that wraps the read_rune method.
read_rune reads a single encoded Unicode character
and returns the rune and its size in bytes. If no character is
available, err will be set."""
fn read_rune(inout self) -> (Rune, Int):
...
trait RuneScanner(RuneReader):
"""RuneScanner is the trait that adds the unread_rune method to the
basic read_rune method.
unread_rune causes the next call to read_rune to return the last rune read.
If the last operation was not a successful call to read_rune, unread_rune may
return an error, unread the last rune read (or the rune prior to the
last-unread rune), or (in implementations that support the [Seeker] trait)
seek to the start of the rune before the current offset."""
fn unread_rune(inout self) -> Rune:
...
trait StringWriter:
"""StringWriter is the trait that wraps the WriteString method."""
fn write_string(inout self, src: String) -> (Int, Error):
...
| lightbug_http/external/gojo/io/traits.mojo | false |
from .traits import (
Reader,
Writer,
Seeker,
Closer,
ReadWriter,
ReadCloser,
WriteCloser,
ReadWriteCloser,
ReadSeeker,
ReadSeekCloser,
WriteSeeker,
ReadWriteSeeker,
ReaderFrom,
WriterReadFrom,
WriterTo,
ReaderWriteTo,
ReaderAt,
WriterAt,
ByteReader,
ByteScanner,
ByteWriter,
RuneReader,
RuneScanner,
StringWriter,
SEEK_START,
SEEK_CURRENT,
SEEK_END,
ERR_SHORT_WRITE,
ERR_NO_PROGRESS,
ERR_SHORT_BUFFER,
EOF,
)
from .io import write_string, read_at_least, read_full, read_all, BUFFER_SIZE
alias i1 = __mlir_type.i1
alias i1_1 = __mlir_attr.`1: i1`
alias i1_0 = __mlir_attr.`0: i1`
| lightbug_http/external/gojo/io/__init__.mojo | false |
<filename>lightbug_http/external/gojo/net/address.mojo
@value
struct NetworkType:
var value: String
alias empty = NetworkType("")
alias tcp = NetworkType("tcp")
alias tcp4 = NetworkType("tcp4")
alias tcp6 = NetworkType("tcp6")
alias udp = NetworkType("udp")
alias udp4 = NetworkType("udp4")
alias udp6 = NetworkType("udp6")
alias ip = NetworkType("ip")
alias ip4 = NetworkType("ip4")
alias ip6 = NetworkType("ip6")
alias unix = NetworkType("unix")
trait Addr(CollectionElement, Stringable):
fn network(self) -> String:
"""Name of the network (for example, "tcp", "udp")."""
...
@value
struct TCPAddr(Addr):
"""Addr struct representing a TCP address.
Args:
ip: IP address.
port: Port number.
zone: IPv6 addressing zone.
"""
var ip: String
var port: Int
var zone: String # IPv6 addressing zone
fn __init__(inout self):
self.ip = String("127.0.0.1")
self.port = 8000
self.zone = ""
fn __init__(inout self, ip: String, port: Int):
self.ip = ip
self.port = port
self.zone = ""
fn __str__(self) -> String:
if self.zone != "":
return join_host_port(str(self.ip) + "%" + self.zone, str(self.port))
return join_host_port(self.ip, str(self.port))
fn network(self) -> String:
return NetworkType.tcp.value
fn resolve_internet_addr(network: String, address: String) raises -> TCPAddr:
var host: String = ""
var port: String = ""
var portnum: Int = 0
if (
network == NetworkType.tcp.value
or network == NetworkType.tcp4.value
or network == NetworkType.tcp6.value
or network == NetworkType.udp.value
or network == NetworkType.udp4.value
or network == NetworkType.udp6.value
):
if address != "":
var host_port = split_host_port(address)
host = host_port.host
port = str(host_port.port)
portnum = atol(port.__str__())
elif network == NetworkType.ip.value or network == NetworkType.ip4.value or network == NetworkType.ip6.value:
if address != "":
host = address
elif network == NetworkType.unix.value:
raise Error("Unix addresses not supported yet")
else:
raise Error("unsupported network type: " + network)
return TCPAddr(host, portnum)
alias missingPortError = Error("missing port in address")
alias tooManyColonsError = Error("too many colons in address")
struct HostPort(Stringable):
var host: String
var port: Int
fn __init__(inout self, host: String, port: Int):
self.host = host
self.port = port
fn __str__(self) -> String:
return join_host_port(self.host, str(self.port))
fn join_host_port(host: String, port: String) -> String:
if host.find(":") != -1: # must be IPv6 literal
return "[" + host + "]:" + port
return host + ":" + port
fn split_host_port(hostport: String) raises -> HostPort:
var host: String = ""
var port: String = ""
var colon_index = hostport.rfind(":")
var j: Int = 0
var k: Int = 0
if colon_index == -1:
raise missingPortError
if hostport[0] == "[":
var end_bracket_index = hostport.find("]")
if end_bracket_index == -1:
raise Error("missing ']' in address")
if end_bracket_index + 1 == len(hostport):
raise missingPortError
elif end_bracket_index + 1 == colon_index:
host = hostport[1:end_bracket_index]
j = 1
k = end_bracket_index + 1
else:
if hostport[end_bracket_index + 1] == ":":
raise tooManyColonsError
else:
raise missingPortError
else:
host = hostport[:colon_index]
if host.find(":") != -1:
raise tooManyColonsError
if hostport[j:].find("[") != -1:
raise Error("unexpected '[' in address")
if hostport[k:].find("]") != -1:
raise Error("unexpected ']' in address")
port = hostport[colon_index + 1 :]
if port == "":
raise missingPortError
if host == "":
raise Error("missing host")
return HostPort(host, atol(port))
| lightbug_http/external/gojo/net/address.mojo | false |
<filename>lightbug_http/external/gojo/net/dial.mojo
from .tcp import TCPAddr, TCPConnection, resolve_internet_addr
from .socket import Socket
from .address import split_host_port
@value
struct Dialer:
var local_address: TCPAddr
fn dial(self, network: String, address: String) raises -> TCPConnection:
var tcp_addr = resolve_internet_addr(network, address)
var socket = Socket(local_address=self.local_address)
socket.connect(tcp_addr.ip, tcp_addr.port)
return TCPConnection(socket^)
fn dial_tcp(network: String, remote_address: TCPAddr) raises -> TCPConnection:
"""Connects to the address on the named network.
The network must be "tcp", "tcp4", or "tcp6".
Args:
network: The network type.
remote_address: The remote address to connect to.
Returns:
The TCP connection.
"""
# TODO: Add conversion of domain name to ip address
return Dialer(remote_address).dial(network, remote_address.ip + ":" + str(remote_address.port))
fn dial_tcp(network: String, remote_address: String) raises -> TCPConnection:
"""Connects to the address on the named network.
The network must be "tcp", "tcp4", or "tcp6".
Args:
network: The network type.
remote_address: The remote address to connect to.
Returns:
The TCP connection.
"""
var address = split_host_port(remote_address)
return Dialer(TCPAddr(address.host, address.port)).dial(network, remote_address)
| lightbug_http/external/gojo/net/dial.mojo | false |
<filename>lightbug_http/external/gojo/net/fd.mojo
from collections.optional import Optional
import ..io
from ..builtins import Byte
from ..syscall.file import close
from ..syscall.types import c_char
from ..syscall.net import (
recv,
send,
strlen,
)
alias O_RDWR = 0o2
trait FileDescriptorBase(io.Reader, io.Writer, io.Closer):
...
struct FileDescriptor(FileDescriptorBase):
var fd: Int
var is_closed: Bool
# This takes ownership of a POSIX file descriptor.
fn __moveinit__(inout self, owned existing: Self):
self.fd = existing.fd
self.is_closed = existing.is_closed
fn __init__(inout self, fd: Int):
self.fd = fd
self.is_closed = False
fn __del__(owned self):
if not self.is_closed:
var err = self.close()
if err:
print(str(err))
fn close(inout self) -> Error:
"""Mark the file descriptor as closed."""
var close_status = close(self.fd)
if close_status == -1:
return Error("FileDescriptor.close: Failed to close socket")
self.is_closed = True
return Error()
fn dup(self) -> Self:
"""Duplicate the file descriptor."""
var new_fd = external_call["dup", Int, Int](self.fd)
return Self(new_fd)
# TODO: Need faster approach to copying data from the file descriptor to the buffer.
fn read(inout self, inout dest: List[Byte]) -> (Int, Error):
"""Receive data from the file descriptor and write it to the buffer provided."""
var ptr = Pointer[UInt8]().alloc(dest.capacity)
var bytes_received = recv(self.fd, ptr, dest.capacity, 0)
if bytes_received == -1:
return 0, Error("Failed to receive message from socket.")
var int8_ptr = ptr.bitcast[Int8]()
for i in range(bytes_received):
dest.append(int8_ptr[i])
if bytes_received < dest.capacity:
return bytes_received, Error(io.EOF)
return bytes_received, Error()
fn write(inout self, src: List[Byte]) -> (Int, Error):
"""Write data from the buffer to the file descriptor."""
var header_pointer = Pointer[Int8](src.data.address).bitcast[UInt8]()
var bytes_sent = send(self.fd, header_pointer, strlen(header_pointer), 0)
if bytes_sent == -1:
return 0, Error("Failed to send message")
return bytes_sent, Error()
| lightbug_http/external/gojo/net/fd.mojo | false |
from utils.variant import Variant
from utils.static_tuple import StaticTuple
from sys.info import os_is_linux, os_is_macos
from ..syscall.types import (
c_int,
c_char,
c_void,
c_uint,
)
from ..syscall.net import (
addrinfo,
addrinfo_unix,
AF_INET,
SOCK_STREAM,
AI_PASSIVE,
sockaddr,
sockaddr_in,
htons,
ntohs,
inet_pton,
inet_ntop,
getaddrinfo,
getaddrinfo_unix,
gai_strerror,
to_char_ptr,
c_charptr_to_string,
)
alias AddrInfo = Variant[addrinfo, addrinfo_unix]
fn get_addr_info(host: String) raises -> AddrInfo:
var status: Int32 = 0
if os_is_macos():
var servinfo = Pointer[addrinfo]().alloc(1)
servinfo.store(addrinfo())
var hints = addrinfo()
hints.ai_family = AF_INET
hints.ai_socktype = SOCK_STREAM
hints.ai_flags = AI_PASSIVE
var host_ptr = to_char_ptr(host)
var status = getaddrinfo(
host_ptr,
Pointer[UInt8](),
Pointer.address_of(hints),
Pointer.address_of(servinfo),
)
if status != 0:
print("getaddrinfo failed to execute with status:", status)
var msg_ptr = gai_strerror(c_int(status))
_ = external_call["printf", c_int, Pointer[c_char], Pointer[c_char]](
to_char_ptr("gai_strerror: %s"), msg_ptr
)
var msg = c_charptr_to_string(msg_ptr)
print("getaddrinfo error message: ", msg)
if not servinfo:
print("servinfo is null")
raise Error("Failed to get address info. Pointer to addrinfo is null.")
return servinfo.load()
elif os_is_linux():
var servinfo = Pointer[addrinfo_unix]().alloc(1)
servinfo.store(addrinfo_unix())
var hints = addrinfo_unix()
hints.ai_family = AF_INET
hints.ai_socktype = SOCK_STREAM
hints.ai_flags = AI_PASSIVE
var host_ptr = to_char_ptr(host)
var status = getaddrinfo_unix(
host_ptr,
Pointer[UInt8](),
Pointer.address_of(hints),
Pointer.address_of(servinfo),
)
if status != 0:
print("getaddrinfo failed to execute with status:", status)
var msg_ptr = gai_strerror(c_int(status))
_ = external_call["printf", c_int, Pointer[c_char], Pointer[c_char]](
to_char_ptr("gai_strerror: %s"), msg_ptr
)
var msg = c_charptr_to_string(msg_ptr)
print("getaddrinfo error message: ", msg)
if not servinfo:
print("servinfo is null")
raise Error("Failed to get address info. Pointer to addrinfo is null.")
return servinfo.load()
else:
raise Error("Windows is not supported yet! Sorry!")
fn get_ip_address(host: String) raises -> String:
"""Get the IP address of a host."""
# Call getaddrinfo to get the IP address of the host.
var result = get_addr_info(host)
var ai_addr: Pointer[sockaddr]
var address_family: Int32 = 0
var address_length: UInt32 = 0
if result.isa[addrinfo]():
var addrinfo = result[addrinfo]
ai_addr = addrinfo.ai_addr
address_family = addrinfo.ai_family
address_length = addrinfo.ai_addrlen
else:
var addrinfo = result[addrinfo_unix]
ai_addr = addrinfo.ai_addr
address_family = addrinfo.ai_family
address_length = addrinfo.ai_addrlen
if not ai_addr:
print("ai_addr is null")
raise Error("Failed to get IP address. getaddrinfo was called successfully, but ai_addr is null.")
# Cast sockaddr struct to sockaddr_in struct and convert the binary IP to a string using inet_ntop.
var addr_in = ai_addr.bitcast[sockaddr_in]().load()
return convert_binary_ip_to_string(addr_in.sin_addr.s_addr, address_family, address_length).strip()
fn convert_port_to_binary(port: Int) -> UInt16:
return htons(UInt16(port))
fn convert_binary_port_to_int(port: UInt16) -> Int:
return int(ntohs(port))
fn convert_ip_to_binary(ip_address: String, address_family: Int) -> UInt32:
var ip_buffer = Pointer[c_void].alloc(4)
var status = inet_pton(address_family, to_char_ptr(ip_address), ip_buffer)
if status == -1:
print("Failed to convert IP address to binary")
return ip_buffer.bitcast[c_uint]().load()
fn convert_binary_ip_to_string(owned ip_address: UInt32, address_family: Int32, address_length: UInt32) -> String:
"""Convert a binary IP address to a string by calling inet_ntop.
Args:
ip_address: The binary IP address.
address_family: The address family of the IP address.
address_length: The length of the address.
Returns:
The IP address as a string.
"""
# It seems like the len of the buffer depends on the length of the string IP.
# Allocating 10 works for localhost (127.0.0.1) which I suspect is 9 bytes + 1 null terminator byte. So max should be 16 (15 + 1).
var ip_buffer = Pointer[c_void].alloc(16)
var ip_address_ptr = Pointer.address_of(ip_address).bitcast[c_void]()
_ = inet_ntop(address_family, ip_address_ptr, ip_buffer, 16)
var string_buf = ip_buffer.bitcast[Int8]()
var index = 0
while True:
if string_buf[index] == 0:
break
index += 1
return StringRef(string_buf, index)
fn build_sockaddr_pointer(ip_address: String, port: Int, address_family: Int) -> Pointer[sockaddr]:
"""Build a sockaddr pointer from an IP address and port number.
https://learn.microsoft.com/en-us/windows/win32/winsock/sockaddr-2
https://learn.microsoft.com/en-us/windows/win32/api/ws2def/ns-ws2def-sockaddr_in.
"""
var bin_port = convert_port_to_binary(port)
var bin_ip = convert_ip_to_binary(ip_address, address_family)
var ai = sockaddr_in(address_family, bin_port, bin_ip, StaticTuple[c_char, 8]())
return Pointer[sockaddr_in].address_of(ai).bitcast[sockaddr]()
| lightbug_http/external/gojo/net/ip.mojo | false |
<filename>lightbug_http/external/gojo/net/net.mojo
from memory.arc import Arc
import ..io
from ..builtins import Byte
from .socket import Socket
from .address import Addr, TCPAddr
alias DEFAULT_BUFFER_SIZE = 8200
trait Conn(io.Writer, io.Reader, io.Closer):
fn __init__(inout self, owned socket: Socket):
...
"""Conn is a generic stream-oriented network connection."""
fn local_address(self) -> TCPAddr:
"""Returns the local network address, if known."""
...
fn remote_address(self) -> TCPAddr:
"""Returns the local network address, if known."""
...
# fn set_deadline(self, t: time.Time) -> Error:
# """Sets the read and write deadlines associated
# with the connection. It is equivalent to calling both
# SetReadDeadline and SetWriteDeadline.
# A deadline is an absolute time after which I/O operations
# fail instead of blocking. The deadline applies to all future
# and pending I/O, not just the immediately following call to
# read or write. After a deadline has been exceeded, the
# connection can be refreshed by setting a deadline in the future.
# If the deadline is exceeded a call to read or write or to other
# I/O methods will return an error that wraps os.ErrDeadlineExceeded.
# This can be tested using errors.Is(err, os.ErrDeadlineExceeded).
# The error's Timeout method will return true, but note that there
# are other possible errors for which the Timeout method will
# return true even if the deadline has not been exceeded.
# An idle timeout can be implemented by repeatedly extending
# the deadline after successful read or write calls.
# A zero value for t means I/O operations will not time out."""
# ...
# fn set_read_deadline(self, t: time.Time) -> Error:
# """Sets the deadline for future read calls
# and any currently-blocked read call.
# A zero value for t means read will not time out."""
# ...
# fn set_write_deadline(self, t: time.Time) -> Error:
# """Sets the deadline for future write calls
# and any currently-blocked write call.
# Even if write times out, it may return n > 0, indicating that
# some of the data was successfully written.
# A zero value for t means write will not time out."""
# ...
@value
struct Connection(Conn):
"""Connection is a concrete generic stream-oriented network connection.
It is used as the internal connection for structs like TCPConnection.
Args:
fd: The file descriptor of the connection.
"""
var fd: Arc[Socket]
fn __init__(inout self, owned socket: Socket):
self.fd = Arc(socket^)
fn read(inout self, inout dest: List[Byte]) -> (Int, Error):
"""Reads data from the underlying file descriptor.
Args:
dest: The buffer to read data into.
Returns:
The number of bytes read, or an error if one occurred.
"""
var bytes_written: Int = 0
var err = Error()
bytes_written, err = self.fd[].read(dest)
if err:
if str(err) != io.EOF:
return 0, err
return bytes_written, err
fn write(inout self, src: List[Byte]) -> (Int, Error):
"""Writes data to the underlying file descriptor.
Args:
src: The buffer to read data into.
Returns:
The number of bytes written, or an error if one occurred.
"""
var bytes_read: Int = 0
var err = Error()
bytes_read, err = self.fd[].write(src)
if err:
return 0, err
return bytes_read, err
fn close(inout self) -> Error:
"""Closes the underlying file descriptor.
Returns:
An error if one occurred, or None if the file descriptor was closed successfully.
"""
return self.fd[].close()
fn local_address(self) -> TCPAddr:
"""Returns the local network address.
The Addr returned is shared by all invocations of local_address, so do not modify it.
"""
return self.fd[].local_address
fn remote_address(self) -> TCPAddr:
"""Returns the remote network address.
The Addr returned is shared by all invocations of remote_address, so do not modify it.
"""
return self.fd[].remote_address
| lightbug_http/external/gojo/net/net.mojo | false |
<filename>lightbug_http/external/gojo/net/socket.mojo
from collections.optional import Optional
from ..builtins import Byte
from ..syscall.file import close
from ..syscall.types import (
c_void,
c_uint,
c_char,
c_int,
)
from ..syscall.net import (
sockaddr,
sockaddr_in,
addrinfo,
addrinfo_unix,
socklen_t,
socket,
connect,
recv,
send,
shutdown,
inet_pton,
inet_ntoa,
inet_ntop,
to_char_ptr,
htons,
ntohs,
strlen,
getaddrinfo,
getaddrinfo_unix,
gai_strerror,
c_charptr_to_string,
bind,
listen,
accept,
setsockopt,
getsockopt,
getsockname,
getpeername,
AF_INET,
SOCK_STREAM,
SHUT_RDWR,
AI_PASSIVE,
SOL_SOCKET,
SO_REUSEADDR,
SO_RCVTIMEO,
)
from .fd import FileDescriptor, FileDescriptorBase
from .ip import (
convert_binary_ip_to_string,
build_sockaddr_pointer,
convert_binary_port_to_int,
)
from .address import Addr, TCPAddr, HostPort
alias SocketClosedError = Error("Socket: Socket is already closed")
struct Socket(FileDescriptorBase):
"""Represents a network file descriptor. Wraps around a file descriptor and provides network functions.
Args:
local_address: The local address of the socket (local address if bound).
remote_address: The remote address of the socket (peer's address if connected).
address_family: The address family of the socket.
socket_type: The socket type.
protocol: The protocol.
"""
var sockfd: FileDescriptor
var address_family: Int
var socket_type: UInt8
var protocol: UInt8
var local_address: TCPAddr
var remote_address: TCPAddr
var _closed: Bool
var _is_connected: Bool
fn __init__(
inout self,
local_address: TCPAddr = TCPAddr(),
remote_address: TCPAddr = TCPAddr(),
address_family: Int = AF_INET,
socket_type: UInt8 = SOCK_STREAM,
protocol: UInt8 = 0,
) raises:
"""Create a new socket object.
Args:
local_address: The local address of the socket (local address if bound).
remote_address: The remote address of the socket (peer's address if connected).
address_family: The address family of the socket.
socket_type: The socket type.
protocol: The protocol.
"""
self.address_family = address_family
self.socket_type = socket_type
self.protocol = protocol
var fd = socket(address_family, SOCK_STREAM, 0)
if fd == -1:
raise Error("Socket creation error")
self.sockfd = FileDescriptor(int(fd))
self.local_address = local_address
self.remote_address = remote_address
self._closed = False
self._is_connected = False
fn __init__(
inout self,
fd: Int32,
address_family: Int,
socket_type: UInt8,
protocol: UInt8,
local_address: TCPAddr = TCPAddr(),
remote_address: TCPAddr = TCPAddr(),
):
"""
Create a new socket object when you already have a socket file descriptor. Typically through socket.accept().
Args:
fd: The file descriptor of the socket.
address_family: The address family of the socket.
socket_type: The socket type.
protocol: The protocol.
local_address: Local address of socket.
remote_address: Remote address of port.
"""
self.sockfd = FileDescriptor(int(fd))
self.address_family = address_family
self.socket_type = socket_type
self.protocol = protocol
self.local_address = local_address
self.remote_address = remote_address
self._closed = False
self._is_connected = True
fn __moveinit__(inout self, owned existing: Self):
self.sockfd = existing.sockfd^
self.address_family = existing.address_family
self.socket_type = existing.socket_type
self.protocol = existing.protocol
self.local_address = existing.local_address^
self.remote_address = existing.remote_address^
self._closed = existing._closed
self._is_connected = existing._is_connected
# fn __enter__(self) -> Self:
# return self
# fn __exit__(inout self) raises:
# if self._is_connected:
# self.shutdown()
# if not self._closed:
# self.close()
fn __del__(owned self):
if self._is_connected:
self.shutdown()
if not self._closed:
var err = self.close()
_ = self.sockfd.fd
if err:
print("Failed to close socket during deletion:", str(err))
@always_inline
fn accept(self) raises -> Self:
"""Accept a connection. The socket must be bound to an address and listening for connections.
The return value is a connection where conn is a new socket object usable to send and receive data on the connection,
and address is the address bound to the socket on the other end of the connection.
"""
var their_addr_ptr = Pointer[sockaddr].alloc(1)
var sin_size = socklen_t(sizeof[socklen_t]())
var new_sockfd = accept(self.sockfd.fd, their_addr_ptr, Pointer[socklen_t].address_of(sin_size))
if new_sockfd == -1:
raise Error("Failed to accept connection")
var remote = self.get_peer_name()
return Self(
new_sockfd,
self.address_family,
self.socket_type,
self.protocol,
self.local_address,
TCPAddr(remote.host, remote.port),
)
fn listen(self, backlog: Int = 0) raises:
"""Enable a server to accept connections.
Args:
backlog: The maximum number of queued connections. Should be at least 0, and the maximum is system-dependent (usually 5).
"""
var queued = backlog
if backlog < 0:
queued = 0
if listen(self.sockfd.fd, queued) == -1:
raise Error("Failed to listen for connections")
@always_inline
fn bind(inout self, address: String, port: Int) raises:
"""Bind the socket to address. The socket must not already be bound. (The format of address depends on the address family).
When a socket is created with Socket(), it exists in a name
space (address family) but has no address assigned to it. bind()
assigns the address specified by addr to the socket referred to
by the file descriptor sockfd. addrlen specifies the size, in
bytes, of the address structure pointed to by addr.
Traditionally, this operation is called 'assigning a name to a
socket'.
Args:
address: String - The IP address to bind the socket to.
port: The port number to bind the socket to.
"""
var sockaddr_pointer = build_sockaddr_pointer(address, port, self.address_family)
if bind(self.sockfd.fd, sockaddr_pointer, sizeof[sockaddr_in]()) == -1:
_ = shutdown(self.sockfd.fd, SHUT_RDWR)
raise Error("Binding socket failed. Wait a few seconds and try again?")
var local = self.get_sock_name()
self.local_address = TCPAddr(local.host, local.port)
@always_inline
fn file_no(self) -> Int32:
"""Return the file descriptor of the socket."""
return self.sockfd.fd
@always_inline
fn get_sock_name(self) raises -> HostPort:
"""Return the address of the socket."""
if self._closed:
raise SocketClosedError
# TODO: Add check to see if the socket is bound and error if not.
var local_address_ptr = Pointer[sockaddr].alloc(1)
var local_address_ptr_size = socklen_t(sizeof[sockaddr]())
var status = getsockname(
self.sockfd.fd,
local_address_ptr,
Pointer[socklen_t].address_of(local_address_ptr_size),
)
if status == -1:
raise Error("Socket.get_sock_name: Failed to get address of local socket.")
var addr_in = local_address_ptr.bitcast[sockaddr_in]().load()
return HostPort(
host=convert_binary_ip_to_string(addr_in.sin_addr.s_addr, AF_INET, 16),
port=convert_binary_port_to_int(addr_in.sin_port),
)
fn get_peer_name(self) raises -> HostPort:
"""Return the address of the peer connected to the socket."""
if self._closed:
raise SocketClosedError
# TODO: Add check to see if the socket is bound and error if not.
var remote_address_ptr = Pointer[sockaddr].alloc(1)
var remote_address_ptr_size = socklen_t(sizeof[sockaddr]())
var status = getpeername(
self.sockfd.fd,
remote_address_ptr,
Pointer[socklen_t].address_of(remote_address_ptr_size),
)
if status == -1:
raise Error("Socket.get_peer_name: Failed to get address of remote socket.")
# Cast sockaddr struct to sockaddr_in to convert binary IP to string.
var addr_in = remote_address_ptr.bitcast[sockaddr_in]().load()
return HostPort(
host=convert_binary_ip_to_string(addr_in.sin_addr.s_addr, AF_INET, 16),
port=convert_binary_port_to_int(addr_in.sin_port),
)
fn get_socket_option(self, option_name: Int) raises -> Int:
"""Return the value of the given socket option.
Args:
option_name: The socket option to get.
"""
var option_value_pointer = Pointer[c_void].alloc(1)
var option_len = socklen_t(sizeof[socklen_t]())
var option_len_pointer = Pointer.address_of(option_len)
var status = getsockopt(
self.sockfd.fd,
SOL_SOCKET,
option_name,
option_value_pointer,
option_len_pointer,
)
if status == -1:
raise Error("Socket.get_sock_opt failed with status: " + str(status))
return option_value_pointer.bitcast[Int]().load()
fn set_socket_option(self, option_name: Int, owned option_value: UInt8 = 1) raises:
"""Return the value of the given socket option.
Args:
option_name: The socket option to set.
option_value: The value to set the socket option to.
"""
var option_value_pointer = Pointer[c_void].address_of(option_value)
var option_len = sizeof[socklen_t]()
var status = setsockopt(self.sockfd.fd, SOL_SOCKET, option_name, option_value_pointer, option_len)
if status == -1:
raise Error("Socket.set_sock_opt failed with status: " + str(status))
fn connect(inout self, address: String, port: Int) raises:
"""Connect to a remote socket at address.
Args:
address: String - The IP address to connect to.
port: The port number to connect to.
"""
var sockaddr_pointer = build_sockaddr_pointer(address, port, self.address_family)
if connect(self.sockfd.fd, sockaddr_pointer, sizeof[sockaddr_in]()) == -1:
self.shutdown()
raise Error("Socket.connect: Failed to connect to the remote socket at: " + address + ":" + str(port))
var remote = self.get_peer_name()
self.remote_address = TCPAddr(remote.host, remote.port)
fn write(inout self: Self, src: List[Byte]) -> (Int, Error):
"""Send data to the socket. The socket must be connected to a remote socket.
Args:
src: The data to send.
Returns:
The number of bytes sent.
"""
var bytes_written: Int
var err: Error
bytes_written, err = self.sockfd.write(src)
if err:
return 0, err
return bytes_written, Error()
fn send_all(self, src: List[Byte], max_attempts: Int = 3) raises:
"""Send data to the socket. The socket must be connected to a remote socket.
Args:
src: The data to send.
max_attempts: The maximum number of attempts to send the data.
"""
var header_pointer = src.unsafe_ptr()
var total_bytes_sent = 0
var attempts = 0
# Try to send all the data in the buffer. If it did not send all the data, keep trying but start from the offset of the last successful send.
while total_bytes_sent < len(src):
if attempts > max_attempts:
raise Error("Failed to send message after " + str(max_attempts) + " attempts.")
var bytes_sent = send(
self.sockfd.fd,
header_pointer.offset(total_bytes_sent),
strlen(header_pointer.offset(total_bytes_sent)),
0,
)
if bytes_sent == -1:
raise Error("Failed to send message, wrote" + String(total_bytes_sent) + "bytes before failing.")
total_bytes_sent += bytes_sent
attempts += 1
fn send_to(inout self, src: List[Byte], address: String, port: Int) raises -> Int:
"""Send data to the a remote address by connecting to the remote socket before sending.
The socket must be not already be connected to a remote socket.
Args:
src: The data to send.
address: The IP address to connect to.
port: The port number to connect to.
"""
var header_pointer = Pointer[Int8](src.data.address).bitcast[UInt8]()
self.connect(address, port)
var bytes_written: Int
var err: Error
bytes_written, err = self.write(src)
if err:
raise err
return bytes_written
fn read(inout self, inout dest: List[Byte]) -> (Int, Error):
"""Receive data from the socket."""
# Not ideal since we can't use the pointer from the List[Byte] struct directly. So we use a temporary pointer to receive the data.
# Then we copy all the data over.
var bytes_written: Int
var err: Error
bytes_written, err = self.sockfd.read(dest)
if err:
if str(err) != "EOF":
return 0, err
return bytes_written, Error()
fn shutdown(self):
_ = shutdown(self.sockfd.fd, SHUT_RDWR)
fn close(inout self) -> Error:
"""Mark the socket closed.
Once that happens, all future operations on the socket object will fail.
The remote end will receive no more data (after queued data is flushed).
"""
self.shutdown()
var err = self.sockfd.close()
if err:
return err
self._closed = True
return Error()
# TODO: Trying to set timeout fails, but some other options don't?
# fn get_timeout(self) raises -> Seconds:
# """Return the timeout value for the socket."""
# return self.get_socket_option(SO_RCVTIMEO)
# fn set_timeout(self, owned duration: Seconds) raises:
# """Set the timeout value for the socket.
# Args:
# duration: Seconds - The timeout duration in seconds.
# """
# self.set_socket_option(SO_RCVTIMEO, duration)
fn send_file(self, file: FileHandle, offset: Int = 0) raises:
self.send_all(file.read_bytes())
| lightbug_http/external/gojo/net/socket.mojo | false |
from ..builtins import Byte
from ..syscall.net import SO_REUSEADDR
from .net import Connection, Conn
from .address import TCPAddr, NetworkType, split_host_port
from .socket import Socket
# Time in nanoseconds
alias Duration = Int
alias DEFAULT_BUFFER_SIZE = 8200
alias DEFAULT_TCP_KEEP_ALIVE = Duration(15 * 1000 * 1000 * 1000) # 15 seconds
fn resolve_internet_addr(network: String, address: String) raises -> TCPAddr:
var host: String = ""
var port: String = ""
var portnum: Int = 0
if (
network == NetworkType.tcp.value
or network == NetworkType.tcp4.value
or network == NetworkType.tcp6.value
or network == NetworkType.udp.value
or network == NetworkType.udp4.value
or network == NetworkType.udp6.value
):
if address != "":
var host_port = split_host_port(address)
host = host_port.host
port = str(host_port.port)
portnum = atol(port.__str__())
elif network == NetworkType.ip.value or network == NetworkType.ip4.value or network == NetworkType.ip6.value:
if address != "":
host = address
elif network == NetworkType.unix.value:
raise Error("Unix addresses not supported yet")
else:
raise Error("unsupported network type: " + network)
return TCPAddr(host, portnum)
# TODO: For now listener is paired with TCP until we need to support
# more than one type of Connection or Listener
@value
struct ListenConfig(CollectionElement):
var keep_alive: Duration
fn listen(self, network: String, address: String) raises -> TCPListener:
var tcp_addr = resolve_internet_addr(network, address)
var socket = Socket(local_address=tcp_addr)
socket.bind(tcp_addr.ip, tcp_addr.port)
socket.set_socket_option(SO_REUSEADDR, 1)
socket.listen()
print(str("Listening on ") + str(socket.local_address))
return TCPListener(socket^, self, network, address)
trait Listener(Movable):
# Raising here because a Result[Optional[Connection], Error] is funky.
fn accept(self) raises -> Connection:
...
fn close(inout self) -> Error:
...
fn addr(self) raises -> TCPAddr:
...
@value
struct TCPConnection(Conn):
"""TCPConn is an implementation of the Conn interface for TCP network connections.
Args:
connection: The underlying Connection.
"""
var _connection: Connection
fn __init__(inout self, connection: Connection):
self._connection = connection
fn __init__(inout self, owned socket: Socket):
self._connection = Connection(socket^)
fn __moveinit__(inout self, owned existing: Self):
self._connection = existing._connection^
fn read(inout self, inout dest: List[Byte]) -> (Int, Error):
"""Reads data from the underlying file descriptor.
Args:
dest: The buffer to read data into.
Returns:
The number of bytes read, or an error if one occurred.
"""
var bytes_written: Int
var err: Error
bytes_written, err = self._connection.read(dest)
if err:
if str(err) != io.EOF:
return 0, err
return bytes_written, Error()
fn write(inout self, src: List[Byte]) -> (Int, Error):
"""Writes data to the underlying file descriptor.
Args:
src: The buffer to read data into.
Returns:
The number of bytes written, or an error if one occurred.
"""
var bytes_written: Int
var err: Error
bytes_written, err = self._connection.write(src)
if err:
return 0, err
return bytes_written, Error()
fn close(inout self) -> Error:
"""Closes the underlying file descriptor.
Returns:
An error if one occurred, or None if the file descriptor was closed successfully.
"""
return self._connection.close()
fn local_address(self) -> TCPAddr:
"""Returns the local network address.
The Addr returned is shared by all invocations of local_address, so do not modify it.
Returns:
The local network address.
"""
return self._connection.local_address()
fn remote_address(self) -> TCPAddr:
"""Returns the remote network address.
The Addr returned is shared by all invocations of remote_address, so do not modify it.
Returns:
The remote network address.
"""
return self._connection.remote_address()
fn listen_tcp(network: String, local_address: TCPAddr) raises -> TCPListener:
"""Creates a new TCP listener.
Args:
network: The network type.
local_address: The local address to listen on.
"""
return ListenConfig(DEFAULT_TCP_KEEP_ALIVE).listen(network, local_address.ip + ":" + str(local_address.port))
fn listen_tcp(network: String, local_address: String) raises -> TCPListener:
"""Creates a new TCP listener.
Args:
network: The network type.
local_address: The address to listen on. The format is "host:port".
"""
return ListenConfig(DEFAULT_TCP_KEEP_ALIVE).listen(network, local_address)
struct TCPListener(Listener):
var _file_descriptor: Socket
var listen_config: ListenConfig
var network_type: String
var address: String
fn __init__(
inout self,
owned file_descriptor: Socket,
listen_config: ListenConfig,
network_type: String,
address: String,
):
self._file_descriptor = file_descriptor^
self.listen_config = listen_config
self.network_type = network_type
self.address = address
fn __moveinit__(inout self, owned existing: Self):
self._file_descriptor = existing._file_descriptor^
self.listen_config = existing.listen_config^
self.network_type = existing.network_type
self.address = existing.address
fn listen(self) raises -> Self:
return self.listen_config.listen(self.network_type, self.address)
fn accept(self) raises -> Connection:
return Connection(self._file_descriptor.accept())
fn accept_tcp(self) raises -> TCPConnection:
return TCPConnection(self._file_descriptor.accept())
fn close(inout self) -> Error:
return self._file_descriptor.close()
fn addr(self) raises -> TCPAddr:
return resolve_internet_addr(self.network_type, self.address)
| lightbug_http/external/gojo/net/tcp.mojo | false |
"""Adapted from go's net package
A good chunk of the leg work here came from the lightbug_http project! https://github.com/saviorand/lightbug_http/tree/main
"""
| lightbug_http/external/gojo/net/__init__.mojo | false |
<filename>lightbug_http/external/gojo/strings/builder.mojo
import ..io
from ..builtins import Byte
@value
struct StringBuilder[growth_factor: Float32 = 2](Stringable, Sized, io.Writer, io.StringWriter):
"""
A string builder class that allows for efficient string management and concatenation.
This class is useful when you need to build a string by appending multiple strings
together. The performance increase is not linear. Compared to string concatenation,
I've observed around 20-30x faster for writing and rending ~4KB and up to 2100x-2300x
for ~4MB. This is because it avoids the overhead of creating and destroying many
intermediate strings and performs memcopy operations.
The result is a more efficient when building larger string concatenations. It
is generally not recommended to use this class for small concatenations such as
a few strings like `a + b + c + d` because the overhead of creating the string
builder and appending the strings is not worth the performance gain.
Example:
```
from strings.builder import StringBuilder
var sb = StringBuilder()
sb.write_string("Hello ")
sb.write_string("World!")
print(sb) # Hello World!
```
"""
var data: DTypePointer[DType.uint8]
var size: Int
var capacity: Int
@always_inline
fn __init__(inout self, *, capacity: Int = 8200):
constrained[growth_factor >= 1.25]()
self.data = DTypePointer[DType.uint8]().alloc(capacity)
self.size = 0
self.capacity = capacity
@always_inline
fn __del__(owned self):
if self.data:
self.data.free()
@always_inline
fn __len__(self) -> Int:
"""
Returns the length of the string builder.
Returns:
The length of the string builder.
"""
return self.size
@always_inline
fn __str__(self) -> String:
"""
Converts the string builder to a string.
Returns:
The string representation of the string builder. Returns an empty
string if the string builder is empty.
"""
var copy = DTypePointer[DType.uint8]().alloc(self.size)
memcpy(copy, self.data, self.size)
return StringRef(copy, self.size)
@always_inline
fn render(self) -> StringSlice[is_mutable=False, lifetime=ImmutableStaticLifetime]:
"""
Return a StringSlice view of the data owned by the builder.
Slightly faster than __str__, 10-20% faster in limited testing.
Returns:
The string representation of the string builder. Returns an empty string if the string builder is empty.
"""
return StringSlice[is_mutable=False, lifetime=ImmutableStaticLifetime](unsafe_from_utf8_strref=StringRef(self.data, self.size))
@always_inline
fn _resize(inout self, capacity: Int) -> None:
"""
Resizes the string builder buffer.
Args:
capacity: The new capacity of the string builder buffer.
"""
var new_data = DTypePointer[DType.uint8]().alloc(capacity)
memcpy(new_data, self.data, self.size)
self.data.free()
self.data = new_data
self.capacity = capacity
return None
@always_inline
fn write(inout self, src: Span[Byte]) -> (Int, Error):
"""
Appends a byte Span to the builder buffer.
Args:
src: The byte array to append.
"""
if len(src) > self.capacity - self.size:
var new_capacity = int(self.capacity * growth_factor)
if new_capacity < self.capacity + len(src):
new_capacity = self.capacity + len(src)
self._resize(new_capacity)
memcpy(self.data.offset(self.size), src._data, len(src))
self.size += len(src)
return len(src), Error()
@always_inline
fn write_string(inout self, src: String) -> (Int, Error):
"""
Appends a string to the builder buffer.
Args:
src: The string to append.
"""
return self.write(src.as_bytes_slice())
| lightbug_http/external/gojo/strings/builder.mojo | false |
import ..io
from ..builtins import Byte, copy, panic
@value
struct Reader(Sized, io.Reader, io.ReaderAt, io.ByteReader, io.ByteScanner, io.Seeker, io.WriterTo):
"""A Reader that implements the [io.Reader], [io.ReaderAt], [io.ByteReader], [io.ByteScanner], [io.Seeker], and [io.WriterTo] traits
by reading from a string. The zero value for Reader operates like a Reader of an empty string.
"""
var string: String
var read_pos: Int64 # current reading index
var prev_rune: Int # index of previous rune; or < 0
fn __init__(inout self, string: String = ""):
self.string = string
self.read_pos = 0
self.prev_rune = -1
fn __len__(self) -> Int:
"""Returns the number of bytes of the unread portion of the string.
Returns:
int: the number of bytes of the unread portion of the string.
"""
if self.read_pos >= Int64(len(self.string)):
return 0
return int(Int64(len(self.string)) - self.read_pos)
fn size(self) -> Int64:
"""Returns the original length of the underlying string.
size is the number of bytes available for reading via [Reader.read_at].
The returned value is always the same and is not affected by calls
to any other method.
Returns:
The original length of the underlying string.
"""
return Int64(len(self.string))
fn read(inout self, inout dest: List[Byte]) -> (Int, Error):
"""Reads from the underlying string into the provided List[Byte] object.
Implements the [io.Reader] trait.
Args:
dest: The destination List[Byte] object to read into.
Returns:
The number of bytes read into dest.
"""
if self.read_pos >= Int64(len(self.string)):
return 0, Error(io.EOF)
self.prev_rune = -1
var bytes_written = copy(dest, self.string[int(self.read_pos) :].as_bytes())
self.read_pos += Int64(bytes_written)
return bytes_written, Error()
fn read_at(self, inout dest: List[Byte], off: Int64) -> (Int, Error):
"""Reads from the Reader into the dest List[Byte] starting at the offset off.
It returns the number of bytes read into dest and an error if any.
Implements the [io.ReaderAt] trait.
Args:
dest: The destination List[Byte] object to read into.
off: The byte offset to start reading from.
Returns:
The number of bytes read into dest.
"""
# cannot modify state - see io.ReaderAt
if off < 0:
return 0, Error("strings.Reader.read_at: negative offset")
if off >= Int64(len(self.string)):
return 0, Error(io.EOF)
var error = Error()
var copied_elements_count = copy(dest, self.string[int(off) :].as_bytes())
if copied_elements_count < len(dest):
error = Error(io.EOF)
return copied_elements_count, Error()
fn read_byte(inout self) -> (Byte, Error):
"""Reads the next byte from the underlying string.
Implements the [io.ByteReader] trait.
Returns:
The next byte from the underlying string.
"""
self.prev_rune = -1
if self.read_pos >= Int64(len(self.string)):
return Byte(0), Error(io.EOF)
var b = self.string[int(self.read_pos)]
self.read_pos += 1
return Byte(ord(b)), Error()
fn unread_byte(inout self) -> Error:
"""Unreads the last byte read. Only the most recent byte read can be unread.
Implements the [io.ByteScanner] trait.
"""
if self.read_pos <= 0:
return Error("strings.Reader.unread_byte: at beginning of string")
self.prev_rune = -1
self.read_pos -= 1
return Error()
# # read_rune implements the [io.RuneReader] trait.
# fn read_rune() (ch rune, size int, err error):
# if self.read_pos >= Int64(len(self.string)):
# self.prev_rune = -1
# return 0, 0, io.EOF
# self.prev_rune = int(self.read_pos)
# if c = self.string[self.read_pos]; c < utf8.RuneSelf:
# self.read_pos += 1
# return rune(c), 1, nil
# ch, size = utf8.DecodeRuneInString(self.string[self.read_pos:])
# self.read_pos += Int64(size)
# return
# # unread_rune implements the [io.RuneScanner] trait.
# fn unread_rune() error:
# if self.read_pos <= 0:
# return errors.New("strings.Reader.unread_rune: at beginning of string")
# if self.prev_rune < 0:
# return errors.New("strings.Reader.unread_rune: previous operation was not read_rune")
# self.read_pos = Int64(self.prev_rune)
# self.prev_rune = -1
# return nil
fn seek(inout self, offset: Int64, whence: Int) -> (Int64, Error):
"""Seeks to a new position in the underlying string. The next read will start from that position.
Implements the [io.Seeker] trait.
Args:
offset: The offset to seek to.
whence: The seek mode. It can be one of [io.SEEK_START], [io.SEEK_CURRENT], or [io.SEEK_END].
Returns:
The new position in the string.
"""
self.prev_rune = -1
var position: Int64 = 0
if whence == io.SEEK_START:
position = offset
elif whence == io.SEEK_CURRENT:
position = self.read_pos + offset
elif whence == io.SEEK_END:
position = Int64(len(self.string)) + offset
else:
return Int64(0), Error("strings.Reader.seek: invalid whence")
if position < 0:
return Int64(0), Error("strings.Reader.seek: negative position")
self.read_pos = position
return position, Error()
fn write_to[W: io.Writer](inout self, inout writer: W) -> (Int64, Error):
"""Writes the remaining portion of the underlying string to the provided writer.
Implements the [io.WriterTo] trait.
Args:
writer: The writer to write the remaining portion of the string to.
Returns:
The number of bytes written to the writer.
"""
self.prev_rune = -1
if self.read_pos >= Int64(len(self.string)):
return Int64(0), Error()
var chunk_to_write = self.string[int(self.read_pos) :]
var bytes_written: Int
var err: Error
bytes_written, err = io.write_string(writer, chunk_to_write)
if bytes_written > len(chunk_to_write):
panic("strings.Reader.write_to: invalid write_string count")
self.read_pos += Int64(bytes_written)
if bytes_written != len(chunk_to_write) and not err:
err = Error(io.ERR_SHORT_WRITE)
return Int64(bytes_written), err
# TODO: How can I differentiate between the two write_to methods when the writer implements both traits?
# fn write_to[W: io.StringWriter](inout self, inout writer: W) raises -> Int64:
# """Writes the remaining portion of the underlying string to the provided writer.
# Implements the [io.WriterTo] trait.
# Args:
# writer: The writer to write the remaining portion of the string to.
# Returns:
# The number of bytes written to the writer.
# """
# self.prev_rune = -1
# if self.read_pos >= Int64(len(self.string)):
# return 0
# var chunk_to_write = self.string[self.read_pos:]
# var bytes_written = io.write_string(writer, chunk_to_write)
# if bytes_written > len(chunk_to_write):
# raise Error("strings.Reader.write_to: invalid write_string count")
# self.read_pos += Int64(bytes_written)
# if bytes_written != len(chunk_to_write):
# raise Error(io.ERR_SHORT_WRITE)
# return Int64(bytes_written)
fn reset(inout self, string: String):
"""Resets the [Reader] to be reading from the beginning of the provided string.
Args:
string: The string to read from.
"""
self.string = string
self.read_pos = 0
self.prev_rune = -1
fn new_reader(string: String = "") -> Reader:
"""Returns a new [Reader] reading from the provided string.
It is similar to [bytes.new_buffer] but more efficient and non-writable.
Args:
string: The string to read from.
"""
return Reader(string)
| lightbug_http/external/gojo/strings/reader.mojo | false |
<filename>lightbug_http/external/gojo/strings/__init__.mojo
from .builder import StringBuilder
from .reader import Reader, new_reader
| lightbug_http/external/gojo/strings/__init__.mojo | false |
<filename>lightbug_http/external/gojo/syscall/file.mojo
from . import c_int, c_char, c_void, c_size_t, c_ssize_t
# --- ( File Related Syscalls & Structs )---------------------------------------
alias O_NONBLOCK = 16384
alias O_ACCMODE = 3
alias O_CLOEXEC = 524288
fn close(fildes: c_int) -> c_int:
"""Libc POSIX `close` function
Reference: https://man7.org/linux/man-pages/man3/close.3p.html
Fn signature: int close(int fildes).
Args:
fildes: A File Descriptor to close.
Returns:
Upon successful completion, 0 shall be returned; otherwise, -1
shall be returned and errno set to indicate the error.
"""
return external_call["close", c_int, c_int](fildes)
fn open[*T: AnyType](path: UnsafePointer[c_char], oflag: c_int) -> c_int:
"""Libc POSIX `open` function
Reference: https://man7.org/linux/man-pages/man3/open.3p.html
Fn signature: int open(const char *path, int oflag, ...).
Args:
path: A pointer to a C string containing the path to open.
oflag: The flags to open the file with.
Returns:
A File Descriptor or -1 in case of failure
"""
return external_call["open", c_int, UnsafePointer[c_char], c_int](path, oflag) # FnName, RetType # Args
fn read(fildes: c_int, buf: UnsafePointer[c_void], nbyte: c_size_t) -> c_int:
"""Libc POSIX `read` function
Reference: https://man7.org/linux/man-pages/man3/read.3p.html
Fn signature: sssize_t read(int fildes, void *buf, size_t nbyte).
Args: fildes: A File Descriptor.
buf: A pointer to a buffer to store the read data.
nbyte: The number of bytes to read.
Returns: The number of bytes read or -1 in case of failure.
"""
return external_call["read", c_ssize_t, c_int, UnsafePointer[c_void], c_size_t](fildes, buf, nbyte)
fn write(fildes: c_int, buf: UnsafePointer[c_void], nbyte: c_size_t) -> c_int:
"""Libc POSIX `write` function
Reference: https://man7.org/linux/man-pages/man3/write.3p.html
Fn signature: ssize_t write(int fildes, const void *buf, size_t nbyte).
Args: fildes: A File Descriptor.
buf: A pointer to a buffer to write.
nbyte: The number of bytes to write.
Returns: The number of bytes written or -1 in case of failure.
"""
return external_call["write", c_ssize_t, c_int, UnsafePointer[c_void], c_size_t](fildes, buf, nbyte)
| lightbug_http/external/gojo/syscall/file.mojo | false |
from . import c_char, c_int, c_ushort, c_uint, c_size_t, c_ssize_t
from .types import strlen
from .file import O_CLOEXEC, O_NONBLOCK
from utils.static_tuple import StaticTuple
alias IPPROTO_IPV6 = 41
alias IPV6_V6ONLY = 26
alias EPROTONOSUPPORT = 93
# Adapted from https://github.com/gabrieldemarmiesse/mojo-stdlib-extensions/ . Huge thanks to Gabriel!
alias FD_STDIN: c_int = 0
alias FD_STDOUT: c_int = 1
alias FD_STDERR: c_int = 2
alias SUCCESS = 0
alias GRND_NONBLOCK: UInt8 = 1
alias char_pointer = DTypePointer[DType.uint8]
# --- ( error.h Constants )-----------------------------------------------------
alias EPERM = 1
alias ENOENT = 2
alias ESRCH = 3
alias EINTR = 4
alias EIO = 5
alias ENXIO = 6
alias E2BIG = 7
alias ENOEXEC = 8
alias EBADF = 9
alias ECHILD = 10
alias EAGAIN = 11
alias ENOMEM = 12
alias EACCES = 13
alias EFAULT = 14
alias ENOTBLK = 15
alias EBUSY = 16
alias EEXIST = 17
alias EXDEV = 18
alias ENODEV = 19
alias ENOTDIR = 20
alias EISDIR = 21
alias EINVAL = 22
alias ENFILE = 23
alias EMFILE = 24
alias ENOTTY = 25
alias ETXTBSY = 26
alias EFBIG = 27
alias ENOSPC = 28
alias ESPIPE = 29
alias EROFS = 30
alias EMLINK = 31
alias EPIPE = 32
alias EDOM = 33
alias ERANGE = 34
alias EWOULDBLOCK = EAGAIN
fn to_char_ptr(s: String) -> DTypePointer[DType.uint8]:
"""Only ASCII-based strings."""
var ptr = DTypePointer[DType.uint8]().alloc(len(s))
for i in range(len(s)):
ptr.store(i, ord(s[i]))
return ptr
fn c_charptr_to_string(s: DTypePointer[DType.uint8]) -> String:
return String(s, strlen(s))
fn cftob(val: c_int) -> Bool:
"""Convert C-like failure (-1) to Bool."""
return rebind[Bool](val > 0)
# --- ( Network Related Constants )---------------------------------------------
alias sa_family_t = c_ushort
alias socklen_t = c_uint
alias in_addr_t = c_uint
alias in_port_t = c_ushort
# Address Family Constants
alias AF_UNSPEC = 0
alias AF_UNIX = 1
alias AF_LOCAL = AF_UNIX
alias AF_INET = 2
alias AF_AX25 = 3
alias AF_IPX = 4
alias AF_APPLETALK = 5
alias AF_NETROM = 6
alias AF_BRIDGE = 7
alias AF_ATMPVC = 8
alias AF_X25 = 9
alias AF_INET6 = 10
alias AF_ROSE = 11
alias AF_DECnet = 12
alias AF_NETBEUI = 13
alias AF_SECURITY = 14
alias AF_KEY = 15
alias AF_NETLINK = 16
alias AF_ROUTE = AF_NETLINK
alias AF_PACKET = 17
alias AF_ASH = 18
alias AF_ECONET = 19
alias AF_ATMSVC = 20
alias AF_RDS = 21
alias AF_SNA = 22
alias AF_IRDA = 23
alias AF_PPPOX = 24
alias AF_WANPIPE = 25
alias AF_LLC = 26
alias AF_CAN = 29
alias AF_TIPC = 30
alias AF_BLUETOOTH = 31
alias AF_IUCV = 32
alias AF_RXRPC = 33
alias AF_ISDN = 34
alias AF_PHONET = 35
alias AF_IEEE802154 = 36
alias AF_CAIF = 37
alias AF_ALG = 38
alias AF_NFC = 39
alias AF_VSOCK = 40
alias AF_KCM = 41
alias AF_QIPCRTR = 42
alias AF_MAX = 43
# Protocol family constants
alias PF_UNSPEC = AF_UNSPEC
alias PF_UNIX = AF_UNIX
alias PF_LOCAL = AF_LOCAL
alias PF_INET = AF_INET
alias PF_AX25 = AF_AX25
alias PF_IPX = AF_IPX
alias PF_APPLETALK = AF_APPLETALK
alias PF_NETROM = AF_NETROM
alias PF_BRIDGE = AF_BRIDGE
alias PF_ATMPVC = AF_ATMPVC
alias PF_X25 = AF_X25
alias PF_INET6 = AF_INET6
alias PF_ROSE = AF_ROSE
alias PF_DECnet = AF_DECnet
alias PF_NETBEUI = AF_NETBEUI
alias PF_SECURITY = AF_SECURITY
alias PF_KEY = AF_KEY
alias PF_NETLINK = AF_NETLINK
alias PF_ROUTE = AF_ROUTE
alias PF_PACKET = AF_PACKET
alias PF_ASH = AF_ASH
alias PF_ECONET = AF_ECONET
alias PF_ATMSVC = AF_ATMSVC
alias PF_RDS = AF_RDS
alias PF_SNA = AF_SNA
alias PF_IRDA = AF_IRDA
alias PF_PPPOX = AF_PPPOX
alias PF_WANPIPE = AF_WANPIPE
alias PF_LLC = AF_LLC
alias PF_CAN = AF_CAN
alias PF_TIPC = AF_TIPC
alias PF_BLUETOOTH = AF_BLUETOOTH
alias PF_IUCV = AF_IUCV
alias PF_RXRPC = AF_RXRPC
alias PF_ISDN = AF_ISDN
alias PF_PHONET = AF_PHONET
alias PF_IEEE802154 = AF_IEEE802154
alias PF_CAIF = AF_CAIF
alias PF_ALG = AF_ALG
alias PF_NFC = AF_NFC
alias PF_VSOCK = AF_VSOCK
alias PF_KCM = AF_KCM
alias PF_QIPCRTR = AF_QIPCRTR
alias PF_MAX = AF_MAX
# Socket Type constants
alias SOCK_STREAM = 1
alias SOCK_DGRAM = 2
alias SOCK_RAW = 3
alias SOCK_RDM = 4
alias SOCK_SEQPACKET = 5
alias SOCK_DCCP = 6
alias SOCK_PACKET = 10
alias SOCK_CLOEXEC = O_CLOEXEC
alias SOCK_NONBLOCK = O_NONBLOCK
# Address Information
alias AI_PASSIVE = 1
alias AI_CANONNAME = 2
alias AI_NUMERICHOST = 4
alias AI_V4MAPPED = 2048
alias AI_ALL = 256
alias AI_ADDRCONFIG = 1024
alias AI_IDN = 64
alias INET_ADDRSTRLEN = 16
alias INET6_ADDRSTRLEN = 46
alias SHUT_RD = 0
alias SHUT_WR = 1
alias SHUT_RDWR = 2
alias SOL_SOCKET = 65535
# Socket Options
alias SO_DEBUG = 1
alias SO_REUSEADDR = 4
alias SO_TYPE = 4104
alias SO_ERROR = 4103
alias SO_DONTROUTE = 16
alias SO_BROADCAST = 32
alias SO_SNDBUF = 4097
alias SO_RCVBUF = 4098
alias SO_KEEPALIVE = 8
alias SO_OOBINLINE = 256
alias SO_LINGER = 128
alias SO_REUSEPORT = 512
alias SO_RCVLOWAT = 4100
alias SO_SNDLOWAT = 4099
alias SO_RCVTIMEO = 4102
alias SO_SNDTIMEO = 4101
alias SO_RCVTIMEO_OLD = 4102
alias SO_SNDTIMEO_OLD = 4101
alias SO_ACCEPTCONN = 2
# unsure of these socket options, they weren't available via python
alias SO_NO_CHECK = 11
alias SO_PRIORITY = 12
alias SO_BSDCOMPAT = 14
alias SO_PASSCRED = 16
alias SO_PEERCRED = 17
alias SO_SECURITY_AUTHENTICATION = 22
alias SO_SECURITY_ENCRYPTION_TRANSPORT = 23
alias SO_SECURITY_ENCRYPTION_NETWORK = 24
alias SO_BINDTODEVICE = 25
alias SO_ATTACH_FILTER = 26
alias SO_DETACH_FILTER = 27
alias SO_GET_FILTER = SO_ATTACH_FILTER
alias SO_PEERNAME = 28
alias SO_TIMESTAMP = 29
alias SO_TIMESTAMP_OLD = 29
alias SO_PEERSEC = 31
alias SO_SNDBUFFORCE = 32
alias SO_RCVBUFFORCE = 33
alias SO_PASSSEC = 34
alias SO_TIMESTAMPNS = 35
alias SO_TIMESTAMPNS_OLD = 35
alias SO_MARK = 36
alias SO_TIMESTAMPING = 37
alias SO_TIMESTAMPING_OLD = 37
alias SO_PROTOCOL = 38
alias SO_DOMAIN = 39
alias SO_RXQ_OVFL = 40
alias SO_WIFI_STATUS = 41
alias SCM_WIFI_STATUS = SO_WIFI_STATUS
alias SO_PEEK_OFF = 42
alias SO_NOFCS = 43
alias SO_LOCK_FILTER = 44
alias SO_SELECT_ERR_QUEUE = 45
alias SO_BUSY_POLL = 46
alias SO_MAX_PACING_RATE = 47
alias SO_BPF_EXTENSIONS = 48
alias SO_INCOMING_CPU = 49
alias SO_ATTACH_BPF = 50
alias SO_DETACH_BPF = SO_DETACH_FILTER
alias SO_ATTACH_REUSEPORT_CBPF = 51
alias SO_ATTACH_REUSEPORT_EBPF = 52
alias SO_CNX_ADVICE = 53
alias SCM_TIMESTAMPING_OPT_STATS = 54
alias SO_MEMINFO = 55
alias SO_INCOMING_NAPI_ID = 56
alias SO_COOKIE = 57
alias SCM_TIMESTAMPING_PKTINFO = 58
alias SO_PEERGROUPS = 59
alias SO_ZEROCOPY = 60
alias SO_TXTIME = 61
alias SCM_TXTIME = SO_TXTIME
alias SO_BINDTOIFINDEX = 62
alias SO_TIMESTAMP_NEW = 63
alias SO_TIMESTAMPNS_NEW = 64
alias SO_TIMESTAMPING_NEW = 65
alias SO_RCVTIMEO_NEW = 66
alias SO_SNDTIMEO_NEW = 67
alias SO_DETACH_REUSEPORT_BPF = 68
# --- ( Network Related Structs )-----------------------------------------------
@value
@register_passable("trivial")
struct in_addr:
var s_addr: in_addr_t
@value
@register_passable("trivial")
struct in6_addr:
var s6_addr: StaticTuple[c_char, 16]
@value
@register_passable("trivial")
struct sockaddr:
var sa_family: sa_family_t
var sa_data: StaticTuple[c_char, 14]
@value
@register_passable("trivial")
struct sockaddr_in:
var sin_family: sa_family_t
var sin_port: in_port_t
var sin_addr: in_addr
var sin_zero: StaticTuple[c_char, 8]
@value
@register_passable("trivial")
struct sockaddr_in6:
var sin6_family: sa_family_t
var sin6_port: in_port_t
var sin6_flowinfo: c_uint
var sin6_addr: in6_addr
var sin6_scope_id: c_uint
@value
@register_passable("trivial")
struct addrinfo:
"""Struct field ordering can vary based on platform.
For MacOS, I had to swap the order of ai_canonname and ai_addr.
https://stackoverflow.com/questions/53575101/calling-getaddrinfo-directly-from-python-ai-addr-is-null-pointer.
"""
var ai_flags: c_int
var ai_family: c_int
var ai_socktype: c_int
var ai_protocol: c_int
var ai_addrlen: socklen_t
var ai_canonname: DTypePointer[DType.uint8]
var ai_addr: UnsafePointer[sockaddr]
var ai_next: UnsafePointer[addrinfo]
fn __init__(
inout self,
ai_flags: c_int = 0,
ai_family: c_int = 0,
ai_socktype: c_int = 0,
ai_protocol: c_int = 0,
ai_addrlen: socklen_t = 0,
ai_canonname: DTypePointer[DType.uint8] = DTypePointer[DType.uint8](),
ai_addr: UnsafePointer[sockaddr] = UnsafePointer[sockaddr](),
ai_next: UnsafePointer[addrinfo] = UnsafePointer[addrinfo](),
):
self.ai_flags = ai_flags
self.ai_family = ai_family
self.ai_socktype = ai_socktype
self.ai_protocol = ai_protocol
self.ai_addrlen = ai_addrlen
self.ai_canonname = ai_canonname
self.ai_addr = ai_addr
self.ai_next = ai_next
# fn __init__() -> Self:
# return Self(0, 0, 0, 0, 0, DTypePointer[DType.uint8](), UnsafePointer[sockaddr](), UnsafePointer[addrinfo]())
@value
@register_passable("trivial")
struct addrinfo_unix:
"""Struct field ordering can vary based on platform.
For MacOS, I had to swap the order of ai_canonname and ai_addr.
https://stackoverflow.com/questions/53575101/calling-getaddrinfo-directly-from-python-ai-addr-is-null-pointer.
"""
var ai_flags: c_int
var ai_family: c_int
var ai_socktype: c_int
var ai_protocol: c_int
var ai_addrlen: socklen_t
var ai_addr: UnsafePointer[sockaddr]
var ai_canonname: DTypePointer[DType.uint8]
var ai_next: UnsafePointer[addrinfo]
fn __init__(
inout self,
ai_flags: c_int = 0,
ai_family: c_int = 0,
ai_socktype: c_int = 0,
ai_protocol: c_int = 0,
ai_addrlen: socklen_t = 0,
ai_canonname: DTypePointer[DType.uint8] = DTypePointer[DType.uint8](),
ai_addr: UnsafePointer[sockaddr] = UnsafePointer[sockaddr](),
ai_next: UnsafePointer[addrinfo] = UnsafePointer[addrinfo](),
):
self.ai_flags = ai_flags
self.ai_family = ai_family
self.ai_socktype = ai_socktype
self.ai_protocol = ai_protocol
self.ai_addrlen = ai_addrlen
self.ai_canonname = ai_canonname
self.ai_addr = ai_addr
self.ai_next = ai_next
# --- ( Network Related Syscalls & Structs )------------------------------------
fn htonl(hostlong: c_uint) -> c_uint:
"""Libc POSIX `htonl` function
Reference: https://man7.org/linux/man-pages/man3/htonl.3p.html
Fn signature: uint32_t htonl(uint32_t hostlong).
Args: hostlong: A 32-bit integer in host byte order.
Returns: The value provided in network byte order.
"""
return external_call["htonl", c_uint, c_uint](hostlong)
fn htons(hostshort: c_ushort) -> c_ushort:
"""Libc POSIX `htons` function
Reference: https://man7.org/linux/man-pages/man3/htonl.3p.html
Fn signature: uint16_t htons(uint16_t hostshort).
Args: hostshort: A 16-bit integer in host byte order.
Returns: The value provided in network byte order.
"""
return external_call["htons", c_ushort, c_ushort](hostshort)
fn ntohl(netlong: c_uint) -> c_uint:
"""Libc POSIX `ntohl` function
Reference: https://man7.org/linux/man-pages/man3/htonl.3p.html
Fn signature: uint32_t ntohl(uint32_t netlong).
Args: netlong: A 32-bit integer in network byte order.
Returns: The value provided in host byte order.
"""
return external_call["ntohl", c_uint, c_uint](netlong)
fn ntohs(netshort: c_ushort) -> c_ushort:
"""Libc POSIX `ntohs` function
Reference: https://man7.org/linux/man-pages/man3/htonl.3p.html
Fn signature: uint16_t ntohs(uint16_t netshort).
Args: netshort: A 16-bit integer in network byte order.
Returns: The value provided in host byte order.
"""
return external_call["ntohs", c_ushort, c_ushort](netshort)
fn inet_ntop(
af: c_int, src: DTypePointer[DType.uint8], dst: DTypePointer[DType.uint8], size: socklen_t
) -> DTypePointer[DType.uint8]:
"""Libc POSIX `inet_ntop` function
Reference: https://man7.org/linux/man-pages/man3/inet_ntop.3p.html.
Fn signature: const char *inet_ntop(int af, const void *restrict src, char *restrict dst, socklen_t size).
Args:
af: Address Family see AF_ aliases.
src: A pointer to a binary address.
dst: A pointer to a buffer to store the result.
size: The size of the buffer.
Returns:
A pointer to the buffer containing the result.
"""
return external_call[
"inet_ntop",
DTypePointer[DType.uint8], # FnName, RetType
c_int,
DTypePointer[DType.uint8],
DTypePointer[DType.uint8],
socklen_t, # Args
](af, src, dst, size)
fn inet_pton(af: c_int, src: DTypePointer[DType.uint8], dst: DTypePointer[DType.uint8]) -> c_int:
"""Libc POSIX `inet_pton` function
Reference: https://man7.org/linux/man-pages/man3/inet_ntop.3p.html
Fn signature: int inet_pton(int af, const char *restrict src, void *restrict dst).
Args: af: Address Family see AF_ aliases.
src: A pointer to a string containing the address.
dst: A pointer to a buffer to store the result.
Returns: 1 on success, 0 if the input is not a valid address, -1 on error.
"""
return external_call[
"inet_pton",
c_int, # FnName, RetType
c_int,
DTypePointer[DType.uint8],
DTypePointer[DType.uint8], # Args
](af, src, dst)
fn inet_addr(cp: DTypePointer[DType.uint8]) -> in_addr_t:
"""Libc POSIX `inet_addr` function
Reference: https://man7.org/linux/man-pages/man3/inet_addr.3p.html
Fn signature: in_addr_t inet_addr(const char *cp).
Args: cp: A pointer to a string containing the address.
Returns: The address in network byte order.
"""
return external_call["inet_addr", in_addr_t, DTypePointer[DType.uint8]](cp)
fn inet_ntoa(addr: in_addr) -> DTypePointer[DType.uint8]:
"""Libc POSIX `inet_ntoa` function
Reference: https://man7.org/linux/man-pages/man3/inet_addr.3p.html
Fn signature: char *inet_ntoa(struct in_addr in).
Args: in: A pointer to a string containing the address.
Returns: The address in network byte order.
"""
return external_call["inet_ntoa", DTypePointer[DType.uint8], in_addr](addr)
fn socket(domain: c_int, type: c_int, protocol: c_int) -> c_int:
"""Libc POSIX `socket` function
Reference: https://man7.org/linux/man-pages/man3/socket.3p.html
Fn signature: int socket(int domain, int type, int protocol).
Args: domain: Address Family see AF_ aliases.
type: Socket Type see SOCK_ aliases.
protocol: The protocol to use.
Returns: A File Descriptor or -1 in case of failure.
"""
return external_call["socket", c_int, c_int, c_int, c_int](domain, type, protocol) # FnName, RetType # Args
fn setsockopt(
socket: c_int,
level: c_int,
option_name: c_int,
option_value: DTypePointer[DType.uint8],
option_len: socklen_t,
) -> c_int:
"""Libc POSIX `setsockopt` function
Reference: https://man7.org/linux/man-pages/man3/setsockopt.3p.html
Fn signature: int setsockopt(int socket, int level, int option_name, const void *option_value, socklen_t option_len).
Args:
socket: A File Descriptor.
level: The protocol level.
option_name: The option to set.
option_value: A pointer to the value to set.
option_len: The size of the value.
Returns: 0 on success, -1 on error.
"""
return external_call[
"setsockopt",
c_int, # FnName, RetType
c_int,
c_int,
c_int,
DTypePointer[DType.uint8],
socklen_t, # Args
](socket, level, option_name, option_value, option_len)
fn getsockopt(
socket: c_int,
level: c_int,
option_name: c_int,
option_value: DTypePointer[DType.uint8],
option_len: UnsafePointer[socklen_t],
) -> c_int:
"""Libc POSIX `getsockopt` function
Reference: https://man7.org/linux/man-pages/man3/getsockopt.3p.html
Fn signature: int getsockopt(int socket, int level, int option_name, void *restrict option_value, socklen_t *restrict option_len).
Args: socket: A File Descriptor.
level: The protocol level.
option_name: The option to get.
option_value: A pointer to the value to get.
option_len: DTypePointer to the size of the value.
Returns: 0 on success, -1 on error.
"""
return external_call[
"getsockopt",
c_int, # FnName, RetType
c_int,
c_int,
c_int,
DTypePointer[DType.uint8],
UnsafePointer[socklen_t], # Args
](socket, level, option_name, option_value, option_len)
fn getsockname(socket: c_int, address: UnsafePointer[sockaddr], address_len: UnsafePointer[socklen_t]) -> c_int:
"""Libc POSIX `getsockname` function
Reference: https://man7.org/linux/man-pages/man3/getsockname.3p.html
Fn signature: int getsockname(int socket, struct sockaddr *restrict address, socklen_t *restrict address_len).
Args: socket: A File Descriptor.
address: A pointer to a buffer to store the address of the peer.
address_len: A pointer to the size of the buffer.
Returns: 0 on success, -1 on error.
"""
return external_call[
"getsockname",
c_int, # FnName, RetType
c_int,
UnsafePointer[sockaddr],
UnsafePointer[socklen_t], # Args
](socket, address, address_len)
fn getpeername(sockfd: c_int, addr: UnsafePointer[sockaddr], address_len: UnsafePointer[socklen_t]) -> c_int:
"""Libc POSIX `getpeername` function
Reference: https://man7.org/linux/man-pages/man2/getpeername.2.html
Fn signature: int getpeername(int socket, struct sockaddr *restrict addr, socklen_t *restrict address_len).
Args: sockfd: A File Descriptor.
addr: A pointer to a buffer to store the address of the peer.
address_len: A pointer to the size of the buffer.
Returns: 0 on success, -1 on error.
"""
return external_call[
"getpeername",
c_int, # FnName, RetType
c_int,
UnsafePointer[sockaddr],
UnsafePointer[socklen_t], # Args
](sockfd, addr, address_len)
fn bind(socket: c_int, address: UnsafePointer[sockaddr], address_len: socklen_t) -> c_int:
"""Libc POSIX `bind` function
Reference: https://man7.org/linux/man-pages/man3/bind.3p.html
Fn signature: int bind(int socket, const struct sockaddr *address, socklen_t address_len).
"""
return external_call["bind", c_int, c_int, UnsafePointer[sockaddr], socklen_t]( # FnName, RetType # Args
socket, address, address_len
)
fn listen(socket: c_int, backlog: c_int) -> c_int:
"""Libc POSIX `listen` function
Reference: https://man7.org/linux/man-pages/man3/listen.3p.html
Fn signature: int listen(int socket, int backlog).
Args: socket: A File Descriptor.
backlog: The maximum length of the queue of pending connections.
Returns: 0 on success, -1 on error.
"""
return external_call["listen", c_int, c_int, c_int](socket, backlog)
fn accept(socket: c_int, address: UnsafePointer[sockaddr], address_len: UnsafePointer[socklen_t]) -> c_int:
"""Libc POSIX `accept` function
Reference: https://man7.org/linux/man-pages/man3/accept.3p.html
Fn signature: int accept(int socket, struct sockaddr *restrict address, socklen_t *restrict address_len).
Args: socket: A File Descriptor.
address: A pointer to a buffer to store the address of the peer.
address_len: A pointer to the size of the buffer.
Returns: A File Descriptor or -1 in case of failure.
"""
return external_call[
"accept",
c_int, # FnName, RetType
c_int,
UnsafePointer[sockaddr],
UnsafePointer[socklen_t], # Args
](socket, address, address_len)
fn connect(socket: c_int, address: UnsafePointer[sockaddr], address_len: socklen_t) -> c_int:
"""Libc POSIX `connect` function
Reference: https://man7.org/linux/man-pages/man3/connect.3p.html
Fn signature: int connect(int socket, const struct sockaddr *address, socklen_t address_len).
Args: socket: A File Descriptor.
address: A pointer to the address to connect to.
address_len: The size of the address.
Returns: 0 on success, -1 on error.
"""
return external_call["connect", c_int, c_int, UnsafePointer[sockaddr], socklen_t]( # FnName, RetType # Args
socket, address, address_len
)
fn recv(socket: c_int, buffer: DTypePointer[DType.uint8], length: c_size_t, flags: c_int) -> c_ssize_t:
"""Libc POSIX `recv` function
Reference: https://man7.org/linux/man-pages/man3/recv.3p.html
Fn signature: ssize_t recv(int socket, void *buffer, size_t length, int flags).
"""
return external_call[
"recv",
c_ssize_t, # FnName, RetType
c_int,
DTypePointer[DType.uint8],
c_size_t,
c_int, # Args
](socket, buffer, length, flags)
fn send(socket: c_int, buffer: DTypePointer[DType.uint8], length: c_size_t, flags: c_int) -> c_ssize_t:
"""Libc POSIX `send` function
Reference: https://man7.org/linux/man-pages/man3/send.3p.html
Fn signature: ssize_t send(int socket, const void *buffer, size_t length, int flags).
Args: socket: A File Descriptor.
buffer: A pointer to the buffer to send.
length: The size of the buffer.
flags: Flags to control the behaviour of the function.
Returns: The number of bytes sent or -1 in case of failure.
"""
return external_call[
"send",
c_ssize_t, # FnName, RetType
c_int,
DTypePointer[DType.uint8],
c_size_t,
c_int, # Args
](socket, buffer, length, flags)
fn shutdown(socket: c_int, how: c_int) -> c_int:
"""Libc POSIX `shutdown` function
Reference: https://man7.org/linux/man-pages/man3/shutdown.3p.html
Fn signature: int shutdown(int socket, int how).
Args: socket: A File Descriptor.
how: How to shutdown the socket.
Returns: 0 on success, -1 on error.
"""
return external_call["shutdown", c_int, c_int, c_int](socket, how) # FnName, RetType # Args
fn getaddrinfo(
nodename: DTypePointer[DType.uint8],
servname: DTypePointer[DType.uint8],
hints: UnsafePointer[addrinfo],
res: UnsafePointer[UnsafePointer[addrinfo]],
) -> c_int:
"""Libc POSIX `getaddrinfo` function
Reference: https://man7.org/linux/man-pages/man3/getaddrinfo.3p.html
Fn signature: int getaddrinfo(const char *restrict nodename, const char *restrict servname, const struct addrinfo *restrict hints, struct addrinfo **restrict res).
"""
return external_call[
"getaddrinfo",
c_int, # FnName, RetType
DTypePointer[DType.uint8],
DTypePointer[DType.uint8],
UnsafePointer[addrinfo], # Args
UnsafePointer[UnsafePointer[addrinfo]], # Args
](nodename, servname, hints, res)
fn getaddrinfo_unix(
nodename: DTypePointer[DType.uint8],
servname: DTypePointer[DType.uint8],
hints: UnsafePointer[addrinfo_unix],
res: UnsafePointer[UnsafePointer[addrinfo_unix]],
) -> c_int:
"""Libc POSIX `getaddrinfo` function
Reference: https://man7.org/linux/man-pages/man3/getaddrinfo.3p.html
Fn signature: int getaddrinfo(const char *restrict nodename, const char *restrict servname, const struct addrinfo *restrict hints, struct addrinfo **restrict res).
"""
return external_call[
"getaddrinfo",
c_int, # FnName, RetType
DTypePointer[DType.uint8],
DTypePointer[DType.uint8],
UnsafePointer[addrinfo_unix], # Args
UnsafePointer[UnsafePointer[addrinfo_unix]], # Args
](nodename, servname, hints, res)
fn gai_strerror(ecode: c_int) -> DTypePointer[DType.uint8]:
"""Libc POSIX `gai_strerror` function
Reference: https://man7.org/linux/man-pages/man3/gai_strerror.3p.html
Fn signature: const char *gai_strerror(int ecode).
Args: ecode: The error code.
Returns: A pointer to a string describing the error.
"""
return external_call["gai_strerror", DTypePointer[DType.uint8], c_int](ecode) # FnName, RetType # Args
# fn inet_pton(address_family: Int, address: String) -> Int:
# var ip_buf_size = 4
# if address_family == AF_INET6:
# ip_buf_size = 16
# var ip_buf = DTypePointer[DType.uint8].alloc(ip_buf_size)
# var conv_status = inet_pton(rebind[c_int](address_family), to_char_ptr(address), ip_buf)
# return int(ip_buf.bitcast[c_uint]().load())
| lightbug_http/external/gojo/syscall/net.mojo | false |
<filename>lightbug_http/external/gojo/syscall/types.mojo
fn strlen(s: DTypePointer[DType.uint8]) -> c_size_t:
"""Libc POSIX `strlen` function
Reference: https://man7.org/linux/man-pages/man3/strlen.3p.html
Fn signature: size_t strlen(const char *s).
Args: s: A pointer to a C string.
Returns: The length of the string.
"""
return external_call["strlen", c_size_t, DTypePointer[DType.uint8]](s)
| lightbug_http/external/gojo/syscall/types.mojo | false |
from .net import FD_STDIN, FD_STDOUT, FD_STDERR
# Adapted from https://github.com/crisadamo/mojo-Libc . Huge thanks to Cristian!
# C types
alias c_void = UInt8
alias c_char = UInt8
alias c_schar = Int8
alias c_uchar = UInt8
alias c_short = Int16
alias c_ushort = UInt16
alias c_int = Int32
alias c_uint = UInt32
alias c_long = Int64
alias c_ulong = UInt64
alias c_float = Float32
alias c_double = Float64
# `Int` is known to be machine's width
alias c_size_t = Int
alias c_ssize_t = Int
alias ptrdiff_t = Int64
alias intptr_t = Int64
alias uintptr_t = UInt64
| lightbug_http/external/gojo/syscall/__init__.mojo | false |
from testing import testing
@value
struct MojoTest:
"""
A utility struct for testing.
"""
var test_name: String
fn __init__(inout self, test_name: String):
self.test_name = test_name
print("# " + test_name)
fn assert_true(self, cond: Bool, message: String = ""):
try:
if message == "":
testing.assert_true(cond)
else:
testing.assert_true(cond, message)
except e:
print(e)
fn assert_false(self, cond: Bool, message: String = ""):
try:
if message == "":
testing.assert_false(cond)
else:
testing.assert_false(cond, message)
except e:
print(e)
fn assert_equal[T: testing.Testable](self, left: T, right: T):
try:
testing.assert_equal(left, right)
except e:
print(e) | lightbug_http/external/gojo/tests/wrapper.mojo | false |
<filename>lightbug_http/external/gojo/uni__init__.mojo
from .utf8 import string_iterator, rune_count_in_string
| lightbug_http/external/gojo/uni__init__.mojo | false |
"""Almost all of the actual implementation in this module was written by @mzaks (https://github.com/mzaks)!
This would not be possible without his help.
"""
from ...builtins import Rune
from algorithm.functional import vectorize
from memory.unsafe import DTypePointer
from sys.info import simdwidthof
from bit import countl_zero
# The default lowest and highest continuation byte.
alias locb = 0b10000000
alias hicb = 0b10111111
alias RUNE_SELF = 0x80 # Characters below RuneSelf are represented as themselves in a single byte
# acceptRange gives the range of valid values for the second byte in a UTF-8
# sequence.
@value
struct AcceptRange(CollectionElement):
var lo: UInt8 # lowest value for second byte.
var hi: UInt8 # highest value for second byte.
# ACCEPT_RANGES has size 16 to avoid bounds checks in the code that uses it.
alias ACCEPT_RANGES = List[AcceptRange](
AcceptRange(locb, hicb),
AcceptRange(0xA0, hicb),
AcceptRange(locb, 0x9F),
AcceptRange(0x90, hicb),
AcceptRange(locb, 0x8F),
)
# These names of these constants are chosen to give nice alignment in the
# table below. The first nibble is an index into acceptRanges or F for
# special one-byte cases. The second nibble is the Rune length or the
# Status for the special one-byte case.
alias xx = 0xF1 # invalid: size 1
alias as1 = 0xF0 # ASCII: size 1
alias s1 = 0x02 # accept 0, size 2
alias s2 = 0x13 # accept 1, size 3
alias s3 = 0x03 # accept 0, size 3
alias s4 = 0x23 # accept 2, size 3
alias s5 = 0x34 # accept 3, size 4
alias s6 = 0x04 # accept 0, size 4
alias s7 = 0x44 # accept 4, size 4
# first is information about the first byte in a UTF-8 sequence.
var first = List[UInt8](
# 1 2 3 4 5 6 7 8 9 A B C D E F
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1, # 0x00-0x0F
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1, # 0x10-0x1F
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1, # 0x20-0x2F
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1, # 0x30-0x3F
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1, # 0x40-0x4F
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1, # 0x50-0x5F
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1, # 0x60-0x6F
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1,
as1, # 0x70-0x7F
# 1 2 3 4 5 6 7 8 9 A B C D E F
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx, # 0x80-0x8F
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx, # 0x90-0x9F
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx, # 0xA0-0xAF
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx, # 0xB0-0xBF
xx,
xx,
s1,
s1,
s1,
s1,
s1,
s1,
s1,
s1,
s1,
s1,
s1,
s1,
s1,
s1, # 0xC0-0xCF
s1,
s1,
s1,
s1,
s1,
s1,
s1,
s1,
s1,
s1,
s1,
s1,
s1,
s1,
s1,
s1, # 0xD0-0xDF
s2,
s3,
s3,
s3,
s3,
s3,
s3,
s3,
s3,
s3,
s3,
s3,
s3,
s4,
s3,
s3, # 0xE0-0xEF
s5,
s6,
s6,
s6,
s7,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx,
xx, # 0xF0-0xFF
)
alias simd_width_u8 = simdwidthof[DType.uint8]()
fn rune_count_in_string(s: String) -> Int:
"""Count the number of runes in a string.
Args:
s: The string to count runes in.
Returns:
The number of runes in the string.
"""
var p = DTypePointer[DType.uint8](s.unsafe_uint8_ptr())
var string_byte_length = len(s)
var result = 0
@parameter
fn count[simd_width: Int](offset: Int):
result += int(((p.load[width=simd_width](offset) >> 6) != 0b10).reduce_add())
vectorize[count, simd_width_u8](string_byte_length)
return result
| lightbug_http/external/gojo/uniutf8/runes.mojo | false |
"""Almost all of the actual implementation in this module was written by @mzaks (https://github.com/mzaks)!
This would not be possible without his help.
"""
from .runes import string_iterator, rune_count_in_string
| lightbug_http/external/gojo/uniutf8/__init__.mojo | false |
<filename>lightbug_http/lightbug_http/client.mojo
from lightbug_http.http import HTTPRequest, HTTPResponse
trait Client:
fn __init__(inout self) raises:
...
fn __init__(inout self, host: StringLiteral, port: Int) raises:
...
fn do(self, req: HTTPRequest) raises -> HTTPResponse:
...
| lightbug_http/lightbug_http/client.mojo | false |
<filename>lightbug_http/lightbug_http/error.mojo
from lightbug_http.http import HTTPResponse
from lightbug_http.header import ResponseHeader
from lightbug_http.io.bytes import bytes
# TODO: Custom error handlers provided by the user
@value
struct ErrorHandler:
fn Error(self) -> HTTPResponse:
return HTTPResponse(ResponseHeader(), bytes("TODO")) | lightbug_http/lightbug_http/error.mojo | false |
<filename>lightbug_http/lightbug_http/header.mojo
from external.gojo.bufio import Reader
from lightbug_http.strings import (
strHttp11,
strHttp10,
strSlash,
strMethodGet,
rChar,
nChar,
colonChar,
whitespace,
tab
)
from lightbug_http.io.bytes import Bytes, Byte, BytesView, bytes_equal, bytes, index_byte, compare_case_insensitive, next_line, last_index_byte
alias statusOK = 200
@value
struct RequestHeader:
var disable_normalization: Bool
var no_http_1_1: Bool
var __connection_close: Bool
var __content_length: Int
var __content_length_bytes: Bytes
var __method: Bytes
var __request_uri: Bytes
var proto: Bytes
var __host: Bytes
var __content_type: Bytes
var __user_agent: Bytes
var __transfer_encoding: Bytes
var raw_headers: Bytes
var __trailer: Bytes
fn __init__(inout self) -> None:
self.disable_normalization = False
self.no_http_1_1 = False
self.__connection_close = False
self.__content_length = 0
self.__content_length_bytes = Bytes()
self.__method = Bytes()
self.__request_uri = Bytes()
self.proto = Bytes()
self.__host = Bytes()
self.__content_type = Bytes()
self.__user_agent = Bytes()
self.__transfer_encoding = Bytes()
self.raw_headers = Bytes()
self.__trailer = Bytes()
fn __init__(inout self, host: String) -> None:
self.disable_normalization = False
self.no_http_1_1 = False
self.__connection_close = False
self.__content_length = 0
self.__content_length_bytes = Bytes()
self.__method = Bytes()
self.__request_uri = Bytes()
self.proto = Bytes()
self.__host = bytes(host)
self.__content_type = Bytes()
self.__user_agent = Bytes()
self.__transfer_encoding = Bytes()
self.raw_headers = Bytes()
self.__trailer = Bytes()
fn __init__(inout self, rawheaders: Bytes) -> None:
self.disable_normalization = False
self.no_http_1_1 = False
self.__connection_close = False
self.__content_length = 0
self.__content_length_bytes = Bytes()
self.__method = Bytes()
self.__request_uri = Bytes()
self.proto = Bytes()
self.__host = Bytes()
self.__content_type = Bytes()
self.__user_agent = Bytes()
self.__transfer_encoding = Bytes()
self.raw_headers = rawheaders
self.__trailer = Bytes()
fn __init__(
inout self,
disable_normalization: Bool,
no_http_1_1: Bool,
connection_close: Bool,
content_length: Int,
content_length_bytes: Bytes,
method: Bytes,
request_uri: Bytes,
proto: Bytes,
host: Bytes,
content_type: Bytes,
user_agent: Bytes,
transfer_encoding: Bytes,
raw_headers: Bytes,
trailer: Bytes,
) -> None:
self.disable_normalization = disable_normalization
self.no_http_1_1 = no_http_1_1
self.__connection_close = connection_close
self.__content_length = content_length
self.__content_length_bytes = content_length_bytes
self.__method = method
self.__request_uri = request_uri
self.proto = proto
self.__host = host
self.__content_type = content_type
self.__user_agent = user_agent
self.__transfer_encoding = transfer_encoding
self.raw_headers = raw_headers
self.__trailer = trailer
fn set_content_type(inout self, content_type: String) -> Self:
self.__content_type = bytes(content_type)
return self
fn set_content_type_bytes(inout self, content_type: Bytes) -> Self:
self.__content_type = content_type
return self
fn content_type(self) -> BytesView:
return BytesView(unsafe_ptr=self.__content_type.unsafe_ptr(), len=self.__content_type.size)
fn set_host(inout self, host: String) -> Self:
self.__host = bytes(host)
return self
fn set_host_bytes(inout self, host: Bytes) -> Self:
self.__host = host
return self
fn host(self) -> BytesView:
return BytesView(unsafe_ptr=self.__host.unsafe_ptr(), len=self.__host.size)
fn set_user_agent(inout self, user_agent: String) -> Self:
self.__user_agent = bytes(user_agent)
return self
fn set_user_agent_bytes(inout self, user_agent: Bytes) -> Self:
self.__user_agent = user_agent
return self
fn user_agent(self) -> BytesView:
return BytesView(unsafe_ptr=self.__user_agent.unsafe_ptr(), len=self.__user_agent.size)
fn set_method(inout self, method: String) -> Self:
self.__method = bytes(method)
return self
fn set_method_bytes(inout self, method: Bytes) -> Self:
self.__method = method
return self
fn method(self) -> BytesView:
if len(self.__method) == 0:
return strMethodGet.as_bytes_slice()
return BytesView(unsafe_ptr=self.__method.unsafe_ptr(), len=self.__method.size)
fn set_protocol(inout self, proto: String) -> Self:
self.no_http_1_1 = False # hardcoded until HTTP/2 is supported
self.proto = bytes(proto)
return self
fn set_protocol_bytes(inout self, proto: Bytes) -> Self:
self.no_http_1_1 = False # hardcoded until HTTP/2 is supported
self.proto = proto
return self
fn protocol_str(self) -> String:
if len(self.proto) == 0:
return strHttp11
return String(self.proto)
fn protocol(self) -> BytesView:
if len(self.proto) == 0:
return strHttp11.as_bytes_slice()
return BytesView(unsafe_ptr=self.proto.unsafe_ptr(), len=self.proto.size)
fn content_length(self) -> Int:
return self.__content_length
fn set_content_length(inout self, content_length: Int) -> Self:
self.__content_length = content_length
return self
fn set_content_length_bytes(inout self, content_length: Bytes) -> Self:
self.__content_length_bytes = content_length
return self
fn set_request_uri(inout self, request_uri: String) -> Self:
self.__request_uri = request_uri.as_bytes_slice()
return self
fn set_request_uri_bytes(inout self, request_uri: Bytes) -> Self:
self.__request_uri = request_uri
return self
fn request_uri(self) -> BytesView:
if len(self.__request_uri) <= 1:
return BytesView(unsafe_ptr=strSlash.as_bytes_slice().unsafe_ptr(), len=2)
return BytesView(unsafe_ptr=self.__request_uri.unsafe_ptr(), len=self.__request_uri.size)
fn set_transfer_encoding(inout self, transfer_encoding: String) -> Self:
self.__transfer_encoding = bytes(transfer_encoding)
return self
fn set_transfer_encoding_bytes(inout self, transfer_encoding: Bytes) -> Self:
self.__transfer_encoding = transfer_encoding
return self
fn transfer_encoding(self) -> BytesView:
return BytesView(unsafe_ptr=self.__transfer_encoding.unsafe_ptr(), len=self.__transfer_encoding.size)
fn set_trailer(inout self, trailer: String) -> Self:
self.__trailer = bytes(trailer)
return self
fn set_trailer_bytes(inout self, trailer: Bytes) -> Self:
self.__trailer = trailer
return self
fn trailer(self) -> BytesView:
return BytesView(unsafe_ptr=self.__trailer.unsafe_ptr(), len=self.__trailer.size)
fn trailer_str(self) -> String:
return String(self.__trailer)
fn set_connection_close(inout self) -> Self:
self.__connection_close = True
return self
fn reset_connection_close(inout self) -> Self:
if self.__connection_close == False:
return self
else:
self.__connection_close = False
return self
fn connection_close(self) -> Bool:
return self.__connection_close
fn headers(self) -> String:
return String(self.raw_headers)
fn parse_raw(inout self, inout r: Reader) raises -> Int:
var first_byte = r.peek(1)
if len(first_byte) == 0:
raise Error("Failed to read first byte from request header")
var buf: Bytes
var e: Error
buf, e = r.peek(r.buffered())
if e:
raise Error("Failed to read request header: " + e.__str__())
if len(buf) == 0:
raise Error("Failed to read request header, empty buffer")
var end_of_first_line = self.parse_first_line(buf)
var header_len = self.read_raw_headers(buf[end_of_first_line:])
self.parse_headers(buf[end_of_first_line:])
return end_of_first_line + header_len
fn parse_first_line(inout self, buf: Bytes) raises -> Int:
var b_next = buf
var b = Bytes()
while len(b) == 0:
try:
b, b_next = next_line(b_next)
except e:
raise Error("Failed to read first line from request, " + e.__str__())
var first_whitespace = index_byte(b, bytes(whitespace, pop=False)[0])
if first_whitespace <= 0:
raise Error("Could not find HTTP request method in request line: " + String(b))
_ = self.set_method_bytes(b[:first_whitespace])
var last_whitespace = last_index_byte(b, bytes(whitespace, pop=False)[0]) + 1
if last_whitespace < 0:
raise Error("Could not find request target or HTTP version in request line: " + String(b))
elif last_whitespace == 0:
raise Error("Request URI is empty: " + String(b))
var proto = b[last_whitespace :]
if len(proto) != len(bytes(strHttp11, pop=False)):
raise Error("Invalid protocol, HTTP version not supported: " + String(proto))
_ = self.set_protocol_bytes(proto)
_ = self.set_request_uri_bytes(b[first_whitespace+1:last_whitespace])
return len(buf) - len(b_next)
fn parse_headers(inout self, buf: Bytes) raises -> None:
_ = self.set_content_length(-2)
var s = headerScanner()
s.set_b(buf)
while s.next():
if len(s.key()) > 0:
self.parse_header(s.key(), s.value())
fn parse_header(inout self, key: Bytes, value: Bytes) raises -> None:
if index_byte(key, bytes(colonChar, pop=False)[0]) == -1 or index_byte(key, bytes(tab, pop=False)[0]) != -1:
raise Error("Invalid header key: " + String(key))
var key_first = key[0].__xor__(0x20)
if key_first == bytes("h", pop=False)[0] or key_first == bytes("H", pop=False)[0]:
if compare_case_insensitive(key, bytes("host", pop=False)):
_ = self.set_host_bytes(bytes(value))
return
elif key_first == bytes("u", pop=False)[0] or key_first == bytes("U", pop=False)[0]:
if compare_case_insensitive(key, bytes("user-agent", pop=False)):
_ = self.set_user_agent_bytes(bytes(value))
return
elif key_first == bytes("c", pop=False)[0] or key_first == bytes("C", pop=False)[0]:
if compare_case_insensitive(key, bytes("content-type", pop=False)):
_ = self.set_content_type_bytes(bytes(value))
return
if compare_case_insensitive(key, bytes("content-length", pop=False)):
if self.content_length() != -1:
_ = self.set_content_length(atol(value))
return
if compare_case_insensitive(key, bytes("connection", pop=False)):
if compare_case_insensitive(bytes(value), bytes("close", pop=False)):
_ = self.set_connection_close()
else:
_ = self.reset_connection_close()
return
elif key_first == bytes("t", pop=False)[0] or key_first == bytes("T", pop=False)[0]:
if compare_case_insensitive(key, bytes("transfer-encoding", pop=False)):
_ = self.set_transfer_encoding_bytes(bytes(value, pop=False))
return
if compare_case_insensitive(key, bytes("trailer", pop=False)):
_ = self.set_trailer_bytes(bytes(value, pop=False))
return
if self.content_length() < 0:
_ = self.set_content_length(0)
return
fn read_raw_headers(inout self, buf: Bytes) raises -> Int:
var n = index_byte(buf, bytes(nChar, pop=False)[0])
if n == -1:
self.raw_headers = self.raw_headers[:0]
raise Error("Failed to find a newline in headers")
if n == 0 or (n == 1 and (buf[0] == bytes(rChar, pop=False)[0])):
# empty line -> end of headers
return n + 1
n += 1
var b = buf
var m = n
while True:
b = b[m:]
m = index_byte(b, bytes(nChar, pop=False)[0])
if m == -1:
raise Error("Failed to find a newline in headers")
m += 1
n += m
if m == 2 and (b[0] == bytes(rChar, pop=False)[0]) or m == 1:
self.raw_headers = self.raw_headers + buf[:n]
return n
@value
struct ResponseHeader:
var disable_normalization: Bool
var no_http_1_1: Bool
var __connection_close: Bool
var __status_code: Int
var __status_message: Bytes
var __protocol: Bytes
var __content_length: Int
var __content_length_bytes: Bytes
var __content_type: Bytes
var __content_encoding: Bytes
var __server: Bytes
var __trailer: Bytes
var raw_headers: Bytes
fn __init__(
inout self,
) -> None:
self.disable_normalization = False
self.no_http_1_1 = False
self.__connection_close = False
self.__status_code = 200
self.__status_message = Bytes()
self.__protocol = Bytes()
self.__content_length = 0
self.__content_length_bytes = Bytes()
self.__content_type = Bytes()
self.__content_encoding = Bytes()
self.__server = Bytes()
self.__trailer = Bytes()
self.raw_headers = Bytes()
fn __init__(
inout self,
raw_headers: Bytes,
) -> None:
self.disable_normalization = False
self.no_http_1_1 = False
self.__connection_close = False
self.__status_code = 200
self.__status_message = Bytes()
self.__protocol = Bytes()
self.__content_length = 0
self.__content_length_bytes = Bytes()
self.__content_type = Bytes()
self.__content_encoding = Bytes()
self.__server = Bytes()
self.__trailer = Bytes()
self.raw_headers = raw_headers
fn __init__(
inout self,
status_code: Int,
status_message: Bytes,
content_type: Bytes,
) -> None:
self.disable_normalization = False
self.no_http_1_1 = False
self.__connection_close = False
self.__status_code = status_code
self.__status_message = status_message
self.__protocol = Bytes()
self.__content_length = 0
self.__content_length_bytes = Bytes()
self.__content_type = content_type
self.__content_encoding = Bytes()
self.__server = Bytes()
self.__trailer = Bytes()
self.raw_headers = Bytes()
fn __init__(
inout self,
status_code: Int,
status_message: Bytes,
content_type: Bytes,
content_encoding: Bytes,
) -> None:
self.disable_normalization = False
self.no_http_1_1 = False
self.__connection_close = False
self.__status_code = status_code
self.__status_message = status_message
self.__protocol = Bytes()
self.__content_length = 0
self.__content_length_bytes = Bytes()
self.__content_type = content_type
self.__content_encoding = content_encoding
self.__server = Bytes()
self.__trailer = Bytes()
self.raw_headers = Bytes()
fn __init__(
inout self,
connection_close: Bool,
status_code: Int,
status_message: Bytes,
content_type: Bytes,
) -> None:
self.disable_normalization = False
self.no_http_1_1 = False
self.__connection_close = connection_close
self.__status_code = status_code
self.__status_message = status_message
self.__protocol = Bytes()
self.__content_length = 0
self.__content_length_bytes = Bytes()
self.__content_type = content_type
self.__content_encoding = Bytes()
self.__server = Bytes()
self.__trailer = Bytes()
self.raw_headers = Bytes()
fn __init__(
inout self,
disable_normalization: Bool,
no_http_1_1: Bool,
connection_close: Bool,
status_code: Int,
status_message: Bytes,
protocol: Bytes,
content_length: Int,
content_length_bytes: Bytes,
content_type: Bytes,
content_encoding: Bytes,
server: Bytes,
trailer: Bytes,
) -> None:
self.disable_normalization = disable_normalization
self.no_http_1_1 = no_http_1_1
self.__connection_close = connection_close
self.__status_code = status_code
self.__status_message = status_message
self.__protocol = protocol
self.__content_length = content_length
self.__content_length_bytes = content_length_bytes
self.__content_type = content_type
self.__content_encoding = content_encoding
self.__server = server
self.__trailer = trailer
self.raw_headers = Bytes()
fn set_status_code(inout self, code: Int) -> Self:
self.__status_code = code
return self
fn status_code(self) -> Int:
if self.__status_code == 0:
return statusOK
return self.__status_code
fn set_status_message(inout self, message: Bytes) -> Self:
self.__status_message = message
return self
fn status_message(self) -> BytesView:
return BytesView(unsafe_ptr=self.__status_message.unsafe_ptr(), len=self.__status_message.size)
fn status_message_str(self) -> String:
return String(self.status_message())
fn content_type(self) -> BytesView:
return BytesView(unsafe_ptr=self.__content_type.unsafe_ptr(), len=self.__content_type.size)
fn set_content_type(inout self, content_type: String) -> Self:
self.__content_type = bytes(content_type)
return self
fn set_content_type_bytes(inout self, content_type: Bytes) -> Self:
self.__content_type = content_type
return self
fn content_encoding(self) -> BytesView:
return BytesView(unsafe_ptr=self.__content_encoding.unsafe_ptr(), len=self.__content_encoding.size)
fn set_content_encoding(inout self, content_encoding: String) -> Self:
self.__content_encoding = bytes(content_encoding)
return self
fn set_content_encoding_bytes(inout self, content_encoding: Bytes) -> Self:
self.__content_encoding = content_encoding
return self
fn content_length(self) -> Int:
return self.__content_length
fn set_content_length(inout self, content_length: Int) -> Self:
self.__content_length = content_length
return self
fn set_content_length_bytes(inout self, content_length: Bytes) -> Self:
self.__content_length_bytes = content_length
return self
fn server(self) -> BytesView:
return BytesView(unsafe_ptr=self.__server.unsafe_ptr(), len=self.__server.size)
fn set_server(inout self, server: String) -> Self:
self.__server = bytes(server)
return self
fn set_server_bytes(inout self, server: Bytes) -> Self:
self.__server = server
return self
fn set_protocol(inout self, proto: String) -> Self:
self.no_http_1_1 = False # hardcoded until HTTP/2 is supported
self.__protocol = bytes(proto)
return self
fn set_protocol_bytes(inout self, protocol: Bytes) -> Self:
self.no_http_1_1 = False # hardcoded until HTTP/2 is supported
self.__protocol = protocol
return self
fn protocol_str(self) -> String:
if len(self.__protocol) == 0:
return strHttp11
return String(self.__protocol)
fn protocol(self) -> BytesView:
if len(self.__protocol) == 0:
return BytesView(unsafe_ptr=strHttp11.as_bytes_slice().unsafe_ptr(), len=8)
return BytesView(unsafe_ptr=self.__protocol.unsafe_ptr(), len=self.__protocol.size)
fn set_trailer(inout self, trailer: String) -> Self:
self.__trailer = bytes(trailer)
return self
fn set_trailer_bytes(inout self, trailer: Bytes) -> Self:
self.__trailer = trailer
return self
fn trailer(self) -> BytesView:
return BytesView(unsafe_ptr=self.__trailer.unsafe_ptr(), len=self.__trailer.size)
fn trailer_str(self) -> String:
return String(self.trailer())
fn set_connection_close(inout self) -> Self:
self.__connection_close = True
return self
fn reset_connection_close(inout self) -> Self:
if self.__connection_close == False:
return self
else:
self.__connection_close = False
return self
fn connection_close(self) -> Bool:
return self.__connection_close
fn headers(self) -> String:
return String(self.raw_headers)
fn parse_raw(inout self, inout r: Reader) raises -> Int:
var first_byte = r.peek(1)
if len(first_byte) == 0:
raise Error("Failed to read first byte from response header")
var buf: Bytes
var e: Error
buf, e = r.peek(r.buffered())
if e:
raise Error("Failed to read response header: " + e.__str__())
if len(buf) == 0:
raise Error("Failed to read response header, empty buffer")
var end_of_first_line = self.parse_first_line(buf)
var header_len = self.read_raw_headers(buf[end_of_first_line:])
self.parse_headers(buf[end_of_first_line:])
return end_of_first_line + header_len
fn parse_first_line(inout self, buf: Bytes) raises -> Int:
var b_next = buf
var b = Bytes()
while len(b) == 0:
try:
b, b_next = next_line(b_next)
except e:
raise Error("Failed to read first line from response, " + e.__str__())
var first_whitespace = index_byte(b, bytes(whitespace, pop=False)[0])
if first_whitespace <= 0:
raise Error("Could not find HTTP version in response line: " + String(b))
_ = self.set_protocol(b[:first_whitespace+2])
var end_of_status_code = first_whitespace+5 # status code is always 3 digits, this calculation includes null terminator
var status_code = atol(b[first_whitespace+1:end_of_status_code])
_ = self.set_status_code(status_code)
var status_text = b[end_of_status_code :]
if len(status_text) > 1:
_ = self.set_status_message(status_text)
return len(buf) - len(b_next)
fn parse_headers(inout self, buf: Bytes) raises -> None:
_ = self.set_content_length(-2)
var s = headerScanner()
s.set_b(buf)
while s.next():
if len(s.key()) > 0:
self.parse_header(s.key(), s.value())
fn parse_header(inout self, key: Bytes, value: Bytes) raises -> None:
if index_byte(key, bytes(colonChar, pop=False)[0]) == -1 or index_byte(key, bytes(tab, pop=False)[0]) != -1:
raise Error("Invalid header key: " + String(key))
var key_first = key[0].__xor__(0x20)
if key_first == bytes("c", pop=False)[0] or key_first == bytes("C", pop=False)[0]:
if compare_case_insensitive(key, bytes("content-type", pop=False)):
_ = self.set_content_type_bytes(bytes(value))
return
if compare_case_insensitive(key, bytes("content-encoding", pop=False)):
_ = self.set_content_encoding_bytes(bytes(value))
return
if compare_case_insensitive(key, bytes("content-length", pop=False)):
if self.content_length() != -1:
var content_length = value
_ = self.set_content_length(atol(content_length))
_ = self.set_content_length_bytes(bytes(content_length))
return
if compare_case_insensitive(key, bytes("connection", pop=False)):
if compare_case_insensitive(bytes(value), bytes("close", pop=False)):
_ = self.set_connection_close()
else:
_ = self.reset_connection_close()
return
elif key_first == bytes("s", pop=False)[0] or key_first == bytes("S", pop=False)[0]:
if compare_case_insensitive(key, bytes("server", pop=False)):
_ = self.set_server_bytes(bytes(value))
return
elif key_first == bytes("t", pop=False)[0] or key_first == bytes("T", pop=False)[0]:
if compare_case_insensitive(key, bytes("transfer-encoding", pop=False)):
if not compare_case_insensitive(value, bytes("identity", pop=False)):
_ = self.set_content_length(-1)
return
if compare_case_insensitive(key, bytes("trailer", pop=False)):
_ = self.set_trailer_bytes(bytes(value))
fn read_raw_headers(inout self, buf: Bytes) raises -> Int:
var n = index_byte(buf, bytes(nChar, pop=False)[0])
if n == -1:
self.raw_headers = self.raw_headers[:0]
raise Error("Failed to find a newline in headers")
if n == 0 or (n == 1 and (buf[0] == bytes(rChar, pop=False)[0])):
# empty line -> end of headers
return n + 1
n += 1
var b = buf
var m = n
while True:
b = b[m:]
m = index_byte(b, bytes(nChar, pop=False)[0])
if m == -1:
raise Error("Failed to find a newline in headers")
m += 1
n += m
if m == 2 and (b[0] == bytes(rChar, pop=False)[0]) or m == 1:
self.raw_headers = self.raw_headers + buf[:n]
return n
struct headerScanner:
var __b: Bytes
var __key: Bytes
var __value: Bytes
var __subslice_len: Int
var disable_normalization: Bool
var __next_colon: Int
var __next_line: Int
var __initialized: Bool
fn __init__(inout self) -> None:
self.__b = Bytes()
self.__key = Bytes()
self.__value = Bytes()
self.__subslice_len = 0
self.disable_normalization = False
self.__next_colon = 0
self.__next_line = 0
self.__initialized = False
fn b(self) -> Bytes:
return self.__b
fn set_b(inout self, b: Bytes) -> None:
self.__b = b
fn key(self) -> Bytes:
return self.__key
fn set_key(inout self, key: Bytes) -> None:
self.__key = key
fn value(self) -> Bytes:
return self.__value
fn set_value(inout self, value: Bytes) -> None:
self.__value = value
fn subslice_len(self) -> Int:
return self.__subslice_len
fn set_subslice_len(inout self, n: Int) -> None:
self.__subslice_len = n
fn next_colon(self) -> Int:
return self.__next_colon
fn set_next_colon(inout self, n: Int) -> None:
self.__next_colon = n
fn next_line(self) -> Int:
return self.__next_line
fn set_next_line(inout self, n: Int) -> None:
self.__next_line = n
fn initialized(self) -> Bool:
return self.__initialized
fn set_initialized(inout self) -> None:
self.__initialized = True
fn next(inout self) raises -> Bool:
if not self.initialized():
self.set_next_colon(-1)
self.set_next_line(-1)
self.set_initialized()
var b_len = len(self.b())
if b_len >= 2 and (self.b()[0] == bytes(rChar, pop=False)[0]) and (self.b()[1] == bytes(nChar, pop=False)[0]):
self.set_b(self.b()[2:])
self.set_subslice_len(2)
return False
if b_len >= 1 and (self.b()[0] == bytes(nChar, pop=False)[0]):
self.set_b(self.b()[1:])
self.set_subslice_len(self.subslice_len() + 1)
return False
var colon: Int
if self.next_colon() >= 0:
colon = self.next_colon()
self.set_next_colon(-1)
else:
colon = index_byte(self.b(), bytes(colonChar, pop=False)[0])
var newline = index_byte(self.b(), bytes(nChar, pop=False)[0])
if newline < 0:
raise Error("Invalid header, did not find a newline at the end of the header")
if newline < colon:
raise Error("Invalid header, found a newline before the colon")
if colon < 0:
raise Error("Invalid header, did not find a colon")
var jump_to = colon + 1
self.set_key(self.b()[:jump_to])
while len(self.b()) > jump_to and (self.b()[jump_to] == bytes(whitespace, pop=False)[0]):
jump_to += 1
self.set_next_line(self.next_line() - 1)
self.set_subslice_len(self.subslice_len() + jump_to)
self.set_b(self.b()[jump_to:])
if self.next_line() >= 0:
jump_to = self.next_line()
self.set_next_line(-1)
else:
jump_to = index_byte(self.b(), bytes(nChar, pop=False)[0])
if jump_to < 0:
raise Error("Invalid header, did not find a newline")
jump_to += 1
self.set_value(self.b()[:jump_to])
self.set_subslice_len(self.subslice_len() + jump_to)
self.set_b(self.b()[jump_to:])
if jump_to > 0 and (self.value()[jump_to-1] == bytes(rChar, pop=False)[0]):
jump_to -= 1
while jump_to > 0 and (self.value()[jump_to-1] == bytes(whitespace, pop=False)[0]):
jump_to -= 1
self.set_value(self.value()[:jump_to])
return True
| lightbug_http/lightbug_http/header.mojo | false |
from time import now
from external.morrow import Morrow
from external.gojo.strings.builder import StringBuilder
from external.gojo.bufio import Reader
from lightbug_http.uri import URI
from lightbug_http.io.bytes import Bytes, BytesView, bytes
from lightbug_http.header import RequestHeader, ResponseHeader
from lightbug_http.io.sync import Duration
from lightbug_http.net import Addr, TCPAddr
from lightbug_http.strings import strHttp11, strHttp, strSlash, whitespace, rChar, nChar
trait Request:
fn __init__(inout self, uri: URI):
...
fn __init__(
inout self,
header: RequestHeader,
uri: URI,
body: Bytes,
parsed_uri: Bool,
server_is_tls: Bool,
timeout: Duration,
disable_redirect_path_normalization: Bool,
):
...
fn set_host(inout self, host: String) -> Self:
...
fn set_host_bytes(inout self, host: Bytes) -> Self:
...
fn host(self) -> String:
...
fn set_request_uri(inout self, request_uri: String) -> Self:
...
fn set_request_uri_bytes(inout self, request_uri: Bytes) -> Self:
...
fn request_uri(inout self) -> String:
...
fn set_connection_close(inout self) -> Self:
...
fn connection_close(self) -> Bool:
...
trait Response:
fn __init__(inout self, header: ResponseHeader, body: Bytes):
...
fn set_status_code(inout self, status_code: Int) -> Self:
...
fn status_code(self) -> Int:
...
fn set_connection_close(inout self) -> Self:
...
fn connection_close(self) -> Bool:
...
@value
struct HTTPRequest(Request):
var header: RequestHeader
var __uri: URI
var body_raw: Bytes
var parsed_uri: Bool
var server_is_tls: Bool
var timeout: Duration
var disable_redirect_path_normalization: Bool
fn __init__(inout self, uri: URI):
self.header = RequestHeader("127.0.0.1")
self.__uri = uri
self.body_raw = Bytes()
self.parsed_uri = False
self.server_is_tls = False
self.timeout = Duration()
self.disable_redirect_path_normalization = False
fn __init__(inout self, uri: URI, headers: RequestHeader):
self.header = headers
self.__uri = uri
self.body_raw = Bytes()
self.parsed_uri = False
self.server_is_tls = False
self.timeout = Duration()
self.disable_redirect_path_normalization = False
fn __init__(inout self, uri: URI, buf: Bytes, headers: RequestHeader):
self.header = headers
self.__uri = uri
self.body_raw = buf
self.parsed_uri = False
self.server_is_tls = False
self.timeout = Duration()
self.disable_redirect_path_normalization = False
fn __init__(
inout self,
header: RequestHeader,
uri: URI,
body: Bytes,
parsed_uri: Bool,
server_is_tls: Bool,
timeout: Duration,
disable_redirect_path_normalization: Bool,
):
self.header = header
self.__uri = uri
self.body_raw = body
self.parsed_uri = parsed_uri
self.server_is_tls = server_is_tls
self.timeout = timeout
self.disable_redirect_path_normalization = disable_redirect_path_normalization
fn get_body_bytes(self) -> BytesView:
return BytesView(unsafe_ptr=self.body_raw.unsafe_ptr(), len=self.body_raw.size)
fn set_body_bytes(inout self, body: Bytes) -> Self:
self.body_raw = body
return self
fn set_host(inout self, host: String) -> Self:
_ = self.__uri.set_host(host)
return self
fn set_host_bytes(inout self, host: Bytes) -> Self:
_ = self.__uri.set_host_bytes(host)
return self
fn host(self) -> String:
return self.__uri.host_str()
fn set_request_uri(inout self, request_uri: String) -> Self:
_ = self.header.set_request_uri(request_uri.as_bytes())
self.parsed_uri = False
return self
fn set_request_uri_bytes(inout self, request_uri: Bytes) -> Self:
_ = self.header.set_request_uri_bytes(request_uri)
return self
fn request_uri(inout self) -> String:
if self.parsed_uri:
_ = self.set_request_uri_bytes(self.__uri.request_uri())
return self.header.request_uri()
fn uri(self) -> URI:
return self.__uri
fn set_connection_close(inout self) -> Self:
_ = self.header.set_connection_close()
return self
fn connection_close(self) -> Bool:
return self.header.connection_close()
fn read_body(inout self, inout r: Reader, content_length: Int, header_len: Int, max_body_size: Int) raises -> None:
if content_length > max_body_size:
raise Error("Request body too large")
_ = r.discard(header_len)
var body_buf: Bytes
body_buf, _ = r.peek(r.buffered())
_ = self.set_body_bytes(bytes(body_buf))
@value
struct HTTPResponse(Response):
var header: ResponseHeader
var stream_immediate_header_flush: Bool
var stream_body: Bool
var body_raw: Bytes
var skip_reading_writing_body: Bool
var raddr: TCPAddr
var laddr: TCPAddr
fn __init__(inout self, body_bytes: Bytes):
self.header = ResponseHeader(
200,
bytes("OK"),
bytes("application/octet-stream"),
)
self.stream_immediate_header_flush = False
self.stream_body = False
self.body_raw = body_bytes
self.skip_reading_writing_body = False
self.raddr = TCPAddr()
self.laddr = TCPAddr()
fn __init__(inout self, header: ResponseHeader, body_bytes: Bytes):
self.header = header
self.stream_immediate_header_flush = False
self.stream_body = False
self.body_raw = body_bytes
self.skip_reading_writing_body = False
self.raddr = TCPAddr()
self.laddr = TCPAddr()
fn get_body_bytes(self) -> BytesView:
return BytesView(unsafe_ptr=self.body_raw.unsafe_ptr(), len=self.body_raw.size)
fn get_body(self) -> Bytes:
return self.body_raw
fn set_body_bytes(inout self, body: Bytes) -> Self:
self.body_raw = body
return self
fn set_status_code(inout self, status_code: Int) -> Self:
_ = self.header.set_status_code(status_code)
return self
fn status_code(self) -> Int:
return self.header.status_code()
fn set_connection_close(inout self) -> Self:
_ = self.header.set_connection_close()
return self
fn connection_close(self) -> Bool:
return self.header.connection_close()
fn read_body(inout self, inout r: Reader, header_len: Int) raises -> None:
_ = r.discard(header_len)
var body_buf: Bytes
body_buf, _ = r.peek(r.buffered())
_ = self.set_body_bytes(bytes(body_buf))
fn OK(body: StringLiteral) -> HTTPResponse:
return HTTPResponse(
ResponseHeader(200, bytes("OK"), bytes("text/plain")), bytes(body),
)
fn OK(body: StringLiteral, content_type: String) -> HTTPResponse:
return HTTPResponse(
ResponseHeader(200, bytes("OK"), bytes(content_type)), bytes(body),
)
fn OK(body: String) -> HTTPResponse:
return HTTPResponse(
ResponseHeader(200, bytes("OK"), bytes("text/plain")), bytes(body),
)
fn OK(body: String, content_type: String) -> HTTPResponse:
return HTTPResponse(
ResponseHeader(200, bytes("OK"), bytes(content_type)), bytes(body),
)
fn OK(body: Bytes) -> HTTPResponse:
return HTTPResponse(
ResponseHeader(200, bytes("OK"), bytes("text/plain")), body,
)
fn OK(body: Bytes, content_type: String) -> HTTPResponse:
return HTTPResponse(
ResponseHeader(200, bytes("OK"), bytes(content_type)), body,
)
fn OK(body: Bytes, content_type: String, content_encoding: String) -> HTTPResponse:
return HTTPResponse(
ResponseHeader(200, bytes("OK"), bytes(content_type), bytes(content_encoding)), body,
)
fn NotFound(path: String) -> HTTPResponse:
return HTTPResponse(
ResponseHeader(404, bytes("Not Found"), bytes("text/plain")), bytes("path " + path + " not found"),
)
fn encode(req: HTTPRequest) raises -> StringSlice[False, ImmutableStaticLifetime]:
var builder = StringBuilder()
_ = builder.write(req.header.method())
_ = builder.write_string(whitespace)
if len(req.uri().path_bytes()) > 1:
_ = builder.write_string(req.uri().path())
else:
_ = builder.write_string(strSlash)
_ = builder.write_string(whitespace)
_ = builder.write(req.header.protocol())
_ = builder.write_string(rChar)
_ = builder.write_string(nChar)
if len(req.header.host()) > 0:
_ = builder.write_string("Host: ")
_ = builder.write(req.header.host())
_ = builder.write_string(rChar)
_ = builder.write_string(nChar)
if len(req.body_raw) > 0:
if len(req.header.content_type()) > 0:
_ = builder.write_string("Content-Type: ")
_ = builder.write(req.header.content_type())
_ = builder.write_string(rChar)
_ = builder.write_string(nChar)
_ = builder.write_string("Content-Length: ")
_ = builder.write_string(len(req.body_raw).__str__())
_ = builder.write_string(rChar)
_ = builder.write_string(nChar)
_ = builder.write_string("Connection: ")
if req.connection_close():
_ = builder.write_string("close")
else:
_ = builder.write_string("keep-alive")
_ = builder.write_string(rChar)
_ = builder.write_string(nChar)
_ = builder.write_string(rChar)
_ = builder.write_string(nChar)
if len(req.body_raw) > 0:
_ = builder.write(req.get_body_bytes())
return StringSlice[False, ImmutableStaticLifetime](unsafe_from_utf8_ptr=builder.render().unsafe_ptr(), len=builder.size)
fn encode(res: HTTPResponse) raises -> Bytes:
var current_time = String()
try:
current_time = Morrow.utcnow().__str__()
except e:
print("Error getting current time: " + str(e))
current_time = str(now())
var builder = StringBuilder()
_ = builder.write(res.header.protocol())
_ = builder.write_string(whitespace)
_ = builder.write_string(res.header.status_code().__str__())
_ = builder.write_string(whitespace)
_ = builder.write(res.header.status_message())
_ = builder.write_string(rChar)
_ = builder.write_string(nChar)
_ = builder.write_string("Server: lightbug_http")
_ = builder.write_string(rChar)
_ = builder.write_string(nChar)
_ = builder.write_string("Content-Type: ")
_ = builder.write(res.header.content_type())
_ = builder.write_string(rChar)
_ = builder.write_string(nChar)
if len(res.header.content_encoding()) > 0:
_ = builder.write_string("Content-Encoding: ")
_ = builder.write(res.header.content_encoding())
_ = builder.write_string(rChar)
_ = builder.write_string(nChar)
if len(res.body_raw) > 0:
_ = builder.write_string("Content-Length: ")
_ = builder.write_string(len(res.body_raw).__str__())
_ = builder.write_string(rChar)
_ = builder.write_string(nChar)
else:
_ = builder.write_string("Content-Length: 0")
_ = builder.write_string(rChar)
_ = builder.write_string(nChar)
_ = builder.write_string("Connection: ")
if res.connection_close():
_ = builder.write_string("close")
else:
_ = builder.write_string("keep-alive")
_ = builder.write_string(rChar)
_ = builder.write_string(nChar)
_ = builder.write_string("Date: ")
_ = builder.write_string(current_time)
_ = builder.write_string(rChar)
_ = builder.write_string(nChar)
_ = builder.write_string(rChar)
_ = builder.write_string(nChar)
if len(res.body_raw) > 0:
_ = builder.write(res.get_body_bytes())
return builder.render().as_bytes_slice()
fn split_http_string(buf: Bytes) raises -> (String, String, String):
var request = String(buf)
var request_first_line_headers_body = request.split("\r\n\r\n")
if len(request_first_line_headers_body) == 0:
raise Error("Invalid HTTP string, did not find a double newline")
var request_first_line_headers = request_first_line_headers_body[0]
var request_body = String()
if len(request_first_line_headers_body) > 1:
request_body = request_first_line_headers_body[1]
var request_first_line_headers_list = request_first_line_headers.split("\r\n", 1)
var request_first_line = String()
var request_headers = String()
if len(request_first_line_headers_list) == 0:
raise Error("Invalid HTTP string, did not find a newline in the first line")
if len(request_first_line_headers_list) == 1:
request_first_line = request_first_line_headers_list[0]
else:
request_first_line = request_first_line_headers_list[0]
request_headers = request_first_line_headers_list[1]
return (request_first_line, request_headers, request_body) | lightbug_http/lightbug_http/http.mojo | false |
from lightbug_http.strings import NetworkType
from lightbug_http.io.bytes import Bytes
from lightbug_http.io.sync import Duration
from lightbug_http.sys.net import SysConnection
from external.libc import (
c_void,
AF_INET,
sockaddr,
sockaddr_in,
socklen_t,
getsockname,
getpeername,
ntohs,
inet_ntop
)
alias default_buffer_size = 4096
alias default_tcp_keep_alive = Duration(15 * 1000 * 1000 * 1000) # 15 seconds
trait Net(DefaultConstructible):
fn __init__(inout self) raises:
...
fn __init__(inout self, keep_alive: Duration) raises:
...
# A listen method should be implemented on structs that implement Net.
# Signature is not enforced for now.
# fn listen(inout self, network: String, addr: String) raises -> Listener:
# ...
trait ListenConfig:
fn __init__(inout self, keep_alive: Duration) raises:
...
# A listen method should be implemented on structs that implement ListenConfig.
# Signature is not enforced for now.
# fn listen(inout self, network: String, address: String) raises -> Listener:
# ...
trait Listener(Movable):
fn __init__(inout self) raises:
...
fn __init__(inout self, addr: TCPAddr) raises:
...
fn accept(borrowed self) raises -> SysConnection:
...
fn close(self) raises:
...
fn addr(self) -> TCPAddr:
...
trait Connection(Movable):
fn __init__(inout self, laddr: String, raddr: String) raises:
...
fn __init__(inout self, laddr: TCPAddr, raddr: TCPAddr) raises:
...
fn read(self, inout buf: Bytes) raises -> Int:
...
fn write(self, buf: Bytes) raises -> Int:
...
fn close(self) raises:
...
fn local_addr(inout self) raises -> TCPAddr:
...
fn remote_addr(self) raises -> TCPAddr:
...
trait Addr(CollectionElement):
fn __init__(inout self):
...
fn __init__(inout self, ip: String, port: Int):
...
fn network(self) -> String:
...
fn string(self) -> String:
...
alias TCPAddrList = List[TCPAddr]
@value
struct TCPAddr(Addr):
var ip: String
var port: Int
var zone: String # IPv6 addressing zone
fn __init__(inout self):
self.ip = String("127.0.0.1")
self.port = 8000
self.zone = ""
fn __init__(inout self, ip: String, port: Int):
self.ip = ip
self.port = port
self.zone = ""
fn network(self) -> String:
return NetworkType.tcp.value
fn string(self) -> String:
if self.zone != "":
return join_host_port(self.ip + "%" + self.zone, self.port.__str__())
return join_host_port(self.ip, self.port.__str__())
fn resolve_internet_addr(network: String, address: String) raises -> TCPAddr:
var host: String = ""
var port: String = ""
var portnum: Int = 0
if (
network == NetworkType.tcp.value
or network == NetworkType.tcp4.value
or network == NetworkType.tcp6.value
or network == NetworkType.udp.value
or network == NetworkType.udp4.value
or network == NetworkType.udp6.value
):
if address != "":
var host_port = split_host_port(address)
host = host_port.host
port = host_port.port
portnum = atol(port.__str__())
elif (
network == NetworkType.ip.value
or network == NetworkType.ip4.value
or network == NetworkType.ip6.value
):
if address != "":
host = address
elif network == NetworkType.unix.value:
raise Error("Unix addresses not supported yet")
else:
raise Error("unsupported network type: " + network)
return TCPAddr(host, portnum)
fn join_host_port(host: String, port: String) -> String:
if host.find(":") != -1: # must be IPv6 literal
return "[" + host + "]:" + port
return host + ":" + port
alias missingPortError = Error("missing port in address")
alias tooManyColonsError = Error("too many colons in address")
struct HostPort:
var host: String
var port: String
fn __init__(inout self, host: String, port: String):
self.host = host
self.port = port
fn split_host_port(hostport: String) raises -> HostPort:
var host: String = ""
var port: String = ""
var colon_index = hostport.rfind(":")
var j: Int = 0
var k: Int = 0
if colon_index == -1:
raise missingPortError
if hostport[0] == "[":
var end_bracket_index = hostport.find("]")
if end_bracket_index == -1:
raise Error("missing ']' in address")
if end_bracket_index + 1 == len(hostport):
raise missingPortError
elif end_bracket_index + 1 == colon_index:
host = hostport[1:end_bracket_index]
j = 1
k = end_bracket_index + 1
else:
if hostport[end_bracket_index + 1] == ":":
raise tooManyColonsError
else:
raise missingPortError
else:
host = hostport[:colon_index]
if host.find(":") != -1:
raise tooManyColonsError
if hostport[j:].find("[") != -1:
raise Error("unexpected '[' in address")
if hostport[k:].find("]") != -1:
raise Error("unexpected ']' in address")
port = hostport[colon_index + 1 :]
if port == "":
raise missingPortError
if host == "":
raise Error("missing host")
return HostPort(host, port)
fn convert_binary_port_to_int(port: UInt16) -> Int:
return int(ntohs(port))
fn convert_binary_ip_to_string(
owned ip_address: UInt32, address_family: Int32, address_length: UInt32
) -> String:
"""Convert a binary IP address to a string by calling inet_ntop.
Args:
ip_address: The binary IP address.
address_family: The address family of the IP address.
address_length: The length of the address.
Returns:
The IP address as a string.
"""
# It seems like the len of the buffer depends on the length of the string IP.
# Allocating 10 works for localhost (127.0.0.1) which I suspect is 9 bytes + 1 null terminator byte. So max should be 16 (15 + 1).
var ip_buffer = UnsafePointer[c_void].alloc(16)
var ip_address_ptr = UnsafePointer.address_of(ip_address).bitcast[c_void]()
_ = inet_ntop(address_family, ip_address_ptr, ip_buffer, 16)
var string_buf = ip_buffer.bitcast[Int8]()
var index = 0
while True:
if string_buf[index] == 0:
break
index += 1
return StringRef(string_buf, index)
fn get_sock_name(fd: Int32) raises -> HostPort:
"""Return the address of the socket."""
var local_address_ptr = UnsafePointer[sockaddr].alloc(1)
var local_address_ptr_size = socklen_t(sizeof[sockaddr]())
var status = getsockname(
fd,
local_address_ptr,
UnsafePointer[socklen_t].address_of(local_address_ptr_size),
)
if status == -1:
raise Error("get_sock_name: Failed to get address of local socket.")
var addr_in = local_address_ptr.bitcast[sockaddr_in]()[]
return HostPort(
host=convert_binary_ip_to_string(addr_in.sin_addr.s_addr, AF_INET, 16),
port=convert_binary_port_to_int(addr_in.sin_port).__str__(),
)
fn get_peer_name(fd: Int32) raises -> HostPort:
"""Return the address of the peer connected to the socket."""
var remote_address_ptr = UnsafePointer[sockaddr].alloc(1)
var remote_address_ptr_size = socklen_t(sizeof[sockaddr]())
var status = getpeername(
fd,
remote_address_ptr,
UnsafePointer[socklen_t].address_of(remote_address_ptr_size),
)
if status == -1:
raise Error("get_peer_name: Failed to get address of remote socket.")
# Cast sockaddr struct to sockaddr_in to convert binary IP to string.
var addr_in = remote_address_ptr.bitcast[sockaddr_in]()[]
return HostPort(
host=convert_binary_ip_to_string(addr_in.sin_addr.s_addr, AF_INET, 16),
port=convert_binary_port_to_int(addr_in.sin_port).__str__(),
)
| lightbug_http/lightbug_http/net.mojo | false |
<filename>lightbug_http/lightbug_http/server.mojo
from lightbug_http.error import ErrorHandler
from lightbug_http.service import HTTPService
from lightbug_http.net import Listener
alias DefaultConcurrency: Int = 256 * 1024
trait ServerTrait:
fn __init__(
inout self, addr: String, service: HTTPService, error_handler: ErrorHandler
):
...
fn get_concurrency(self) -> Int:
...
fn listen_and_serve(self, address: String, handler: HTTPService) raises -> None:
...
fn serve(self, ln: Listener, handler: HTTPService) raises -> None:
...
| lightbug_http/lightbug_http/server.mojo | false |
<filename>lightbug_http/lightbug_http/service.mojo
from lightbug_http.http import HTTPRequest, HTTPResponse, OK, NotFound
from lightbug_http.io.bytes import Bytes, bytes
trait HTTPService:
fn func(self, req: HTTPRequest) raises -> HTTPResponse:
...
@value
struct Printer(HTTPService):
fn func(self, req: HTTPRequest) raises -> HTTPResponse:
var body = req.body_raw
print(String(body))
return OK(body)
@value
struct Welcome(HTTPService):
fn func(self, req: HTTPRequest) raises -> HTTPResponse:
var uri = req.uri()
if uri.path() == "/":
var html: Bytes
with open("static/lightbug_welcome.html", "r") as f:
html = f.read_bytes()
return OK(html, "text/html; charset=utf-8")
if uri.path() == "/logo.png":
var image: Bytes
with open("static/logo.png", "r") as f:
image = f.read_bytes()
return OK(image, "image/png")
return NotFound(uri.path())
@value
struct ExampleRouter(HTTPService):
fn func(self, req: HTTPRequest) raises -> HTTPResponse:
var body = req.body_raw
var uri = req.uri()
if uri.path() == "/":
print("I'm on the index path!")
if uri.path() == "/first":
print("I'm on /first!")
elif uri.path() == "/second":
print("I'm on /second!")
elif uri.path() == "/echo":
print(String(body))
return OK(body)
@value
struct TechEmpowerRouter(HTTPService):
fn func(self, req: HTTPRequest) raises -> HTTPResponse:
# var body = req.body_raw
var uri = req.uri()
if uri.path() == "/plaintext":
return OK("Hello, World!", "text/plain")
elif uri.path() == "/json":
return OK('{"message": "Hello, World!"}', "application/json")
return OK("Hello world!") # text/plain is the default
| lightbug_http/lightbug_http/service.mojo | false |
from lightbug_http.io.bytes import Bytes
alias strSlash = "/"
alias strHttp = "http"
alias http = "http"
alias strHttps = "https"
alias https = "https"
alias strHttp11 = "HTTP/1.1"
alias strHttp10 = "HTTP/1.0"
alias strMethodGet = "GET"
alias rChar = "\r"
alias nChar = "\n"
alias colonChar = ":"
alias empty_string = ""
alias whitespace = " "
alias tab = "\t"
@value
struct NetworkType:
var value: String
alias empty = NetworkType("")
alias tcp = NetworkType("tcp")
alias tcp4 = NetworkType("tcp4")
alias tcp6 = NetworkType("tcp6")
alias udp = NetworkType("udp")
alias udp4 = NetworkType("udp4")
alias udp6 = NetworkType("udp6")
alias ip = NetworkType("ip")
alias ip4 = NetworkType("ip4")
alias ip6 = NetworkType("ip6")
alias unix = NetworkType("unix")
@value
struct ConnType:
var value: String
alias empty = ConnType("")
alias http = ConnType("http")
alias websocket = ConnType("websocket")
@value
struct RequestMethod:
var value: String
alias get = RequestMethod("GET")
alias post = RequestMethod("POST")
alias put = RequestMethod("PUT")
alias delete = RequestMethod("DELETE")
alias head = RequestMethod("HEAD")
alias patch = RequestMethod("PATCH")
alias options = RequestMethod("OPTIONS")
@value
struct CharSet:
var value: String
alias utf8 = CharSet("utf-8")
@value
struct MediaType:
var value: String
alias empty = MediaType("")
alias plain = MediaType("text/plain")
alias json = MediaType("application/json")
@value
struct Message:
var type: String
alias empty = Message("")
alias http_start = Message("http.response.start")
| lightbug_http/lightbug_http/strings.mojo | false |
<filename>lightbug_http/lightbug_http/uri.mojo
from lightbug_http.io.bytes import Bytes, BytesView, bytes_equal, bytes
from lightbug_http.strings import (
strSlash,
strHttp11,
strHttp10,
strHttp,
http,
strHttps,
https,
)
@value
struct URI:
var __path_original: Bytes
var __scheme: Bytes
var __path: Bytes
var __query_string: Bytes
var __hash: Bytes
var __host: Bytes
var __http_version: Bytes
var disable_path_normalization: Bool
var __full_uri: Bytes
var __request_uri: Bytes
var __username: Bytes
var __password: Bytes
fn __init__(
inout self,
full_uri: String,
) -> None:
self.__path_original = Bytes()
self.__scheme = Bytes()
self.__path = Bytes()
self.__query_string = Bytes()
self.__hash = Bytes()
self.__host = Bytes()
self.__http_version = Bytes()
self.disable_path_normalization = False
self.__full_uri = bytes(full_uri, pop=False)
self.__request_uri = Bytes()
self.__username = Bytes()
self.__password = Bytes()
fn __init__(
inout self,
full_uri: String,
host: String
) -> None:
self.__path_original = Bytes()
self.__scheme = Bytes()
self.__path = Bytes()
self.__query_string = Bytes()
self.__hash = Bytes()
self.__host = bytes(host)
self.__http_version = Bytes()
self.disable_path_normalization = False
self.__full_uri = bytes(full_uri)
self.__request_uri = Bytes()
self.__username = Bytes()
self.__password = Bytes()
fn __init__(
inout self,
scheme: String,
host: String,
path: String,
) -> None:
self.__path_original = bytes(path)
self.__scheme = scheme.as_bytes()
self.__path = normalise_path(bytes(path), self.__path_original)
self.__query_string = Bytes()
self.__hash = Bytes()
self.__host = bytes(host)
self.__http_version = Bytes()
self.disable_path_normalization = False
self.__full_uri = Bytes()
self.__request_uri = Bytes()
self.__username = Bytes()
self.__password = Bytes()
fn __init__(
inout self,
path_original: Bytes,
path: Bytes,
scheme: Bytes,
query_string: Bytes,
hash: Bytes,
host: Bytes,
http_version: Bytes,
disable_path_normalization: Bool,
full_uri: Bytes,
request_uri: Bytes,
username: Bytes,
password: Bytes,
):
self.__path_original = path_original
self.__scheme = scheme
self.__path = path
self.__query_string = query_string
self.__hash = hash
self.__host = host
self.__http_version = http_version
self.disable_path_normalization = disable_path_normalization
self.__full_uri = full_uri
self.__request_uri = request_uri
self.__username = username
self.__password = password
fn path_original(self) -> BytesView:
return BytesView(unsafe_ptr=self.__path_original.unsafe_ptr(), len=self.__path_original.size)
fn set_path(inout self, path: String) -> Self:
self.__path = normalise_path(bytes(path), self.__path_original)
return self
fn set_path_bytes(inout self, path: Bytes) -> Self:
self.__path = normalise_path(path, self.__path_original)
return self
fn path(self) -> String:
if len(self.__path) == 0:
return strSlash
return String(self.__path)
fn path_bytes(self) -> BytesView:
if len(self.__path) == 0:
return BytesView(unsafe_ptr=strSlash.as_bytes_slice().unsafe_ptr(), len=2)
return BytesView(unsafe_ptr=self.__path.unsafe_ptr(), len=self.__path.size)
fn set_scheme(inout self, scheme: String) -> Self:
self.__scheme = bytes(scheme)
return self
fn set_scheme_bytes(inout self, scheme: Bytes) -> Self:
self.__scheme = scheme
return self
fn scheme(self) -> BytesView:
if len(self.__scheme) == 0:
return BytesView(unsafe_ptr=strHttp.as_bytes_slice().unsafe_ptr(), len=5)
return BytesView(unsafe_ptr=self.__scheme.unsafe_ptr(), len=self.__scheme.size)
fn http_version(self) -> BytesView:
if len(self.__http_version) == 0:
return BytesView(unsafe_ptr=strHttp11.as_bytes_slice().unsafe_ptr(), len=9)
return BytesView(unsafe_ptr=self.__http_version.unsafe_ptr(), len=self.__http_version.size)
fn http_version_str(self) -> String:
return self.__http_version
fn set_http_version(inout self, http_version: String) -> Self:
self.__http_version = bytes(http_version)
return self
fn set_http_version_bytes(inout self, http_version: Bytes) -> Self:
self.__http_version = http_version
return self
fn is_http_1_1(self) -> Bool:
return bytes_equal(self.http_version(), bytes(strHttp11, pop=False))
fn is_http_1_0(self) -> Bool:
return bytes_equal(self.http_version(), bytes(strHttp10, pop=False))
fn is_https(self) -> Bool:
return bytes_equal(self.__scheme, bytes(https, pop=False))
fn is_http(self) -> Bool:
return bytes_equal(self.__scheme, bytes(http, pop=False)) or len(self.__scheme) == 0
fn set_request_uri(inout self, request_uri: String) -> Self:
self.__request_uri = bytes(request_uri)
return self
fn set_request_uri_bytes(inout self, request_uri: Bytes) -> Self:
self.__request_uri = request_uri
return self
fn request_uri(self) -> BytesView:
return BytesView(unsafe_ptr=self.__request_uri.unsafe_ptr(), len=self.__request_uri.size)
fn set_query_string(inout self, query_string: String) -> Self:
self.__query_string = bytes(query_string)
return self
fn set_query_string_bytes(inout self, query_string: Bytes) -> Self:
self.__query_string = query_string
return self
fn query_string(self) -> BytesView:
return BytesView(unsafe_ptr=self.__query_string.unsafe_ptr(), len=self.__query_string.size)
fn set_hash(inout self, hash: String) -> Self:
self.__hash = bytes(hash)
return self
fn set_hash_bytes(inout self, hash: Bytes) -> Self:
self.__hash = hash
return self
fn hash(self) -> BytesView:
return BytesView(unsafe_ptr=self.__hash.unsafe_ptr(), len=self.__hash.size)
fn set_host(inout self, host: String) -> Self:
self.__host = bytes(host)
return self
fn set_host_bytes(inout self, host: Bytes) -> Self:
self.__host = host
return self
fn host(self) -> BytesView:
return BytesView(unsafe_ptr=self.__host.unsafe_ptr(), len=self.__host.size)
fn host_str(self) -> String:
return self.__host
fn full_uri(self) -> BytesView:
return BytesView(unsafe_ptr=self.__full_uri.unsafe_ptr(), len=self.__full_uri.size)
fn set_username(inout self, username: String) -> Self:
self.__username = bytes(username)
return self
fn set_username_bytes(inout self, username: Bytes) -> Self:
self.__username = username
return self
fn username(self) -> BytesView:
return BytesView(unsafe_ptr=self.__username.unsafe_ptr(), len=self.__username.size)
fn set_password(inout self, password: String) -> Self:
self.__password = bytes(password)
return self
fn set_password_bytes(inout self, password: Bytes) -> Self:
self.__password = password
return self
fn password(self) -> BytesView:
return BytesView(unsafe_ptr=self.__password.unsafe_ptr(), len=self.__password.size)
fn parse(inout self) raises -> None:
var raw_uri = String(self.__full_uri)
var proto_str = String(strHttp11)
var is_https = False
var proto_end = raw_uri.find("://")
var remainder_uri: String
if proto_end >= 0:
proto_str = raw_uri[:proto_end]
if proto_str == https:
is_https = True
remainder_uri = raw_uri[proto_end + 3:]
else:
remainder_uri = raw_uri
_ = self.set_scheme_bytes(proto_str.as_bytes_slice())
var path_start = remainder_uri.find("/")
var host_and_port: String
var request_uri: String
if path_start >= 0:
host_and_port = remainder_uri[:path_start]
request_uri = remainder_uri[path_start:]
_ = self.set_host_bytes(bytes(host_and_port[:path_start], pop=False))
else:
host_and_port = remainder_uri
request_uri = strSlash
_ = self.set_host_bytes(bytes(host_and_port, pop=False))
if is_https:
_ = self.set_scheme_bytes(bytes(https, pop=False))
else:
_ = self.set_scheme_bytes(bytes(http, pop=False))
var n = request_uri.find("?")
if n >= 0:
self.__path_original = bytes(request_uri[:n], pop=False)
self.__query_string = bytes(request_uri[n + 1 :], pop=False)
else:
self.__path_original = bytes(request_uri, pop=False)
self.__query_string = Bytes()
_ = self.set_path_bytes(normalise_path(self.__path_original, self.__path_original))
_ = self.set_request_uri_bytes(bytes(request_uri, pop=False))
fn normalise_path(path: Bytes, path_original: Bytes) -> Bytes:
# TODO: implement
return path
| lightbug_http/lightbug_http/uri.mojo | false |
<filename>lightbug_http/lightbug_http/__init__.mojo
from lightbug_http.http import HTTPRequest, HTTPResponse, OK
from lightbug_http.service import HTTPService, Welcome
from lightbug_http.sys.server import SysServer
from lightbug_http.tests.run import run_tests
trait DefaultConstructible:
fn __init__(inout self) raises:
...
| lightbug_http/lightbug_http/__init__.mojo | false |
<filename>lightbug_http/lightbug_http/io/bytes.mojo
from python import PythonObject
from lightbug_http.strings import nChar, rChar
alias Byte = UInt8
alias Bytes = List[Byte]
alias BytesView = Span[is_mutable=False, T=Byte, lifetime=ImmutableStaticLifetime]
fn bytes(s: StringLiteral, pop: Bool = True) -> Bytes:
# This is currently null-terminated, which we don't want in HTTP responses
var buf = String(s)._buffer
if pop:
_ = buf.pop()
return buf
fn bytes(s: String, pop: Bool = True) -> Bytes:
# This is currently null-terminated, which we don't want in HTTP responses
var buf = s._buffer
if pop:
_ = buf.pop()
return buf
fn bytes_equal(a: Bytes, b: Bytes) -> Bool:
return String(a) == String(b)
fn index_byte(buf: Bytes, c: Byte) -> Int:
for i in range(len(buf)):
if buf[i] == c:
return i
return -1
fn last_index_byte(buf: Bytes, c: Byte) -> Int:
for i in range(len(buf)-1, -1, -1):
if buf[i] == c:
return i
return -1
fn compare_case_insensitive(a: Bytes, b: Bytes) -> Bool:
if len(a) != len(b):
return False
for i in range(len(a) - 1):
if (a[i] | 0x20) != (b[i] | 0x20):
return False
return True
fn next_line(b: Bytes) raises -> (Bytes, Bytes):
var n_next = index_byte(b, bytes(nChar, pop=False)[0])
if n_next < 0:
raise Error("next_line: newline not found")
var n = n_next
if n > 0 and (b[n-1] == bytes(rChar, pop=False)[0]):
n -= 1
return (b[:n+1], b[n_next+1:])
@value
@register_passable("trivial")
struct UnsafeString:
var data: Pointer[UInt8]
var len: Int
fn __init__(str: StringLiteral) -> UnsafeString:
var l = str.__len__()
var s = String(str)
var p = Pointer[UInt8].alloc(l)
for i in range(l):
p.store(i, s._buffer[i])
return UnsafeString(p, l)
fn __init__(str: String) -> UnsafeString:
var l = str.__len__()
var p = Pointer[UInt8].alloc(l)
for i in range(l):
p.store(i, str._buffer[i])
return UnsafeString(p, l)
fn to_string(self) -> String:
var s = String(self.data, self.len)
return s
| lightbug_http/lightbug_http/io/bytes.mojo | false |
<filename>lightbug_http/lightbug_http/io/sync.mojo
# Time in nanoseconds
alias Duration = Int
| lightbug_http/lightbug_http/io/sync.mojo | false |
<filename>lightbug_http/lightbug_http/python/client.mojo
from lightbug_http.client import Client
from lightbug_http.http import HTTPRequest, HTTPResponse
from lightbug_http.python import Modules
from lightbug_http.io.bytes import Bytes, UnsafeString, bytes
from lightbug_http.strings import CharSet
struct PythonClient(Client):
var pymodules: Modules
var socket: PythonObject
var name: String
var host: StringLiteral
var port: Int
fn __init__(inout self) raises:
self.pymodules = Modules()
self.socket = self.pymodules.socket.socket()
self.host = "127.0.0.1"
self.port = 8888
self.name = "lightbug_http_client"
fn __init__(inout self, host: StringLiteral, port: Int) raises:
self.pymodules = Modules()
self.socket = self.pymodules.socket.socket()
self.host = host
self.port = port
self.name = "lightbug_http_client"
fn do(self, req: HTTPRequest) raises -> HTTPResponse:
var uri = req.uri()
try:
_ = uri.parse()
except e:
print("error parsing uri: " + e.__str__())
var host = String(uri.host())
if host == "":
raise Error("URI is nil")
var is_tls = False
if uri.is_https():
is_tls = True
var host_port = host.split(":")
var host_str = host_port[0]
var port = atol(host_port[1])
_ = self.socket.connect((UnsafeString(host_str.__str__()), port))
var data = self.pymodules.builtins.bytes(
String(req.body_raw), CharSet.utf8.value
)
_ = self.socket.sendall(data)
var res = self.socket.recv(1024).decode()
_ = self.socket.close()
return HTTPResponse(bytes(res))
| lightbug_http/lightbug_http/python/client.mojo | false |
from lightbug_http.python import Modules
from lightbug_http.io.bytes import Bytes, UnsafeString, bytes
from lightbug_http.io.sync import Duration
from lightbug_http.net import (
Net,
TCPAddr,
Listener,
ListenConfig,
resolve_internet_addr,
default_buffer_size,
)
from lightbug_http.net import Connection, default_tcp_keep_alive
from lightbug_http.strings import CharSet
@value
struct PythonTCPListener:
var __pymodules: PythonObject
var __addr: TCPAddr
var socket: PythonObject
fn __init__(inout self) raises:
self.__pymodules = None
self.__addr = TCPAddr("localhost", 8080)
self.socket = None
fn __init__(inout self, addr: TCPAddr) raises:
self.__pymodules = None
self.__addr = addr
self.socket = None
fn __init__(inout self, pymodules: PythonObject, addr: TCPAddr) raises:
self.__pymodules = pymodules
self.__addr = addr
self.socket = None
fn __init__(
inout self, pymodules: PythonObject, addr: TCPAddr, socket: PythonObject
) raises:
self.__pymodules = pymodules
self.__addr = addr
self.socket = socket
@always_inline
fn accept(self) raises -> PythonConnection:
var conn_addr = self.socket.accept()
return PythonConnection(self.__pymodules, conn_addr)
fn close(self) raises:
if self.socket == None:
raise Error("socket is None, cannot close")
_ = self.socket.close()
fn addr(self) -> TCPAddr:
return self.__addr
struct PythonListenConfig:
var __pymodules: Modules
var __keep_alive: Duration
fn __init__(inout self):
self.__keep_alive = default_tcp_keep_alive
self.__pymodules = Modules()
fn __init__(inout self, keep_alive: Duration):
self.__keep_alive = keep_alive
self.__pymodules = Modules()
fn listen(inout self, network: String, address: String) raises -> PythonTCPListener:
var addr = resolve_internet_addr(network, address)
var listener = PythonTCPListener(
self.__pymodules.builtins,
addr,
self.__pymodules.socket.socket(
self.__pymodules.socket.AF_INET,
self.__pymodules.socket.SOCK_STREAM,
),
)
_ = listener.socket.bind((UnsafeString(addr.ip), addr.port))
_ = listener.socket.listen()
print("Listening on " + String(addr.ip) + ":" + String(addr.port))
return listener
@value
struct PythonConnection(Connection):
var pymodules: PythonObject
var conn: PythonObject
var raddr: PythonObject
var laddr: PythonObject
fn __init__(inout self, laddr: String, raddr: String) raises:
self.conn = None
self.raddr = PythonObject(raddr)
self.laddr = PythonObject(laddr)
self.pymodules = Modules().builtins
fn __init__(inout self, laddr: TCPAddr, raddr: TCPAddr) raises:
self.conn = None
self.raddr = PythonObject(raddr.ip + ":" + raddr.port.__str__())
self.laddr = PythonObject(laddr.ip + ":" + laddr.port.__str__())
self.pymodules = Modules().builtins
fn __init__(inout self, pymodules: PythonObject, py_conn_addr: PythonObject) raises:
self.conn = py_conn_addr[0]
self.raddr = py_conn_addr[1]
self.laddr = ""
self.pymodules = pymodules
fn read(self, inout buf: Bytes) raises -> Int:
var data = self.conn.recv(default_buffer_size)
buf = bytes(
self.pymodules.bytes.decode(data, CharSet.utf8.value).__str__()
)
return len(buf)
fn write(self, buf: Bytes) raises -> Int:
var data = self.pymodules.bytes(String(buf), CharSet.utf8.value)
_ = self.conn.sendall(data)
return len(buf)
fn close(self) raises:
_ = self.conn.close()
fn local_addr(inout self) raises -> TCPAddr:
if self.laddr.__str__() == "":
self.laddr = self.conn.getsockname()
return TCPAddr(self.laddr[0].__str__(), self.laddr[1].__int__())
fn remote_addr(self) raises -> TCPAddr:
return TCPAddr(self.raddr[0].__str__(), self.raddr[1].__int__())
struct PythonNet:
var __lc: PythonListenConfig
fn __init__(inout self):
self.__lc = PythonListenConfig(default_tcp_keep_alive)
fn __init__(inout self, keep_alive: Duration) raises:
self.__lc = PythonListenConfig(keep_alive)
fn listen(inout self, network: String, addr: String) raises -> PythonTCPListener:
return self.__lc.listen(network, addr)
| lightbug_http/lightbug_http/python/net.mojo | false |
from lightbug_http.server import DefaultConcurrency
from lightbug_http.net import Listener
from lightbug_http.http import HTTPRequest, encode, split_http_string
from lightbug_http.uri import URI
from lightbug_http.header import RequestHeader
from lightbug_http.python.net import (
PythonTCPListener,
PythonNet,
PythonConnection,
)
from lightbug_http.python import Modules
from lightbug_http.service import HTTPService
from lightbug_http.io.sync import Duration
from lightbug_http.io.bytes import Bytes
from lightbug_http.error import ErrorHandler
from lightbug_http.strings import NetworkType
struct PythonServer:
var pymodules: Modules
var error_handler: ErrorHandler
var name: String
var max_concurrent_connections: Int
var tcp_keep_alive: Bool
var ln: PythonTCPListener
fn __init__(inout self) raises:
self.pymodules = Modules()
self.error_handler = ErrorHandler()
self.name = "lightbug_http"
self.max_concurrent_connections = 1000
self.tcp_keep_alive = False
self.ln = PythonTCPListener()
fn __init__(inout self, error_handler: ErrorHandler) raises:
self.pymodules = Modules()
self.error_handler = error_handler
self.name = "lightbug_http"
self.max_concurrent_connections = 1000
self.tcp_keep_alive = False
self.ln = PythonTCPListener()
fn get_concurrency(self) -> Int:
var concurrency = self.max_concurrent_connections
if concurrency <= 0:
concurrency = DefaultConcurrency
return concurrency
fn listen_and_serve[
T: HTTPService
](inout self, address: String, handler: T) raises -> None:
var __net = PythonNet()
var listener = __net.listen(NetworkType.tcp4.value, address)
self.serve(listener, handler)
fn serve[
T: HTTPService
](inout self, ln: PythonTCPListener, handler: T) raises -> None:
self.ln = ln
while True:
var conn = self.ln.accept()
var buf = Bytes()
var read_len = conn.read(buf)
if read_len == 0:
conn.close()
break
var request_first_line: String
var request_headers: String
var request_body: String
request_first_line, request_headers, request_body = split_http_string(buf)
var uri = URI(request_first_line)
try:
uri.parse()
except:
conn.close()
raise Error("Failed to parse request line")
var header = RequestHeader(request_headers.as_bytes())
try:
header.parse_raw(request_first_line)
except:
conn.close()
raise Error("Failed to parse request header")
var res = handler.func(
HTTPRequest(
uri,
buf,
header,
)
)
var res_encoded = encode(res)
_ = conn.write(res_encoded.as_bytes_slice())
conn.close()
| lightbug_http/lightbug_http/python/server.mojo | false |
<filename>lightbug_http/lightbug_http/python/__init__.mojo
from python import Python, PythonObject
@value
struct Modules:
var builtins: PythonObject
var socket: PythonObject
fn __init__(inout self) -> None:
self.builtins = self.__load_builtins()
self.socket = self.__load_socket()
@staticmethod
fn __load_socket() -> PythonObject:
try:
var socket = Python.import_module("socket")
return socket
except e:
print("Failed to import socket module")
return None
@staticmethod
fn __load_builtins() -> PythonObject:
try:
var builtins = Python.import_module("builtins")
return builtins
except e:
print("Failed to import builtins module")
return None
| lightbug_http/lightbug_http/python/__init__.mojo | false |
<filename>lightbug_http/lightbug_http/sys/client.mojo
from external.gojo.bufio import Reader, Scanner, scan_words, scan_bytes
from external.gojo.bytes import buffer
from external.libc import (
c_int,
AF_INET,
SOCK_STREAM,
socket,
connect,
send,
recv,
close,
)
from lightbug_http.client import Client
from lightbug_http.net import default_buffer_size
from lightbug_http.http import HTTPRequest, HTTPResponse, encode, split_http_string
from lightbug_http.header import ResponseHeader
from lightbug_http.sys.net import create_connection
from lightbug_http.io.bytes import Bytes
struct MojoClient(Client):
var fd: c_int
var host: StringLiteral
var port: Int
var name: String
fn __init__(inout self) raises:
self.fd = socket(AF_INET, SOCK_STREAM, 0)
self.host = "127.0.0.1"
self.port = 8888
self.name = "lightbug_http_client"
fn __init__(inout self, host: StringLiteral, port: Int) raises:
self.fd = socket(AF_INET, SOCK_STREAM, 0)
self.host = host
self.port = port
self.name = "lightbug_http_client"
fn do(self, req: HTTPRequest) raises -> HTTPResponse:
"""
The `do` method is responsible for sending an HTTP request to a server and receiving the corresponding response.
It performs the following steps:
1. Creates a connection to the server specified in the request.
2. Sends the request body using the connection.
3. Receives the response from the server.
4. Closes the connection.
5. Returns the received response as an `HTTPResponse` object.
Note: The code assumes that the `HTTPRequest` object passed as an argument has a valid URI with a host and port specified.
Parameters
----------
req : HTTPRequest :
An `HTTPRequest` object representing the request to be sent.
Returns
-------
HTTPResponse :
The received response.
Raises
------
Error :
If there is a failure in sending or receiving the message.
"""
var uri = req.uri()
var host = String(uri.host())
if host == "":
raise Error("URI is nil")
var is_tls = False
if uri.is_https():
is_tls = True
var host_str: String
var port: Int
if host.__contains__(":"):
var host_port = host.split(":")
host_str = host_port[0]
port = atol(host_port[1])
else:
host_str = host
if is_tls:
port = 443
else:
port = 80
var conn = create_connection(self.fd, host_str, port)
var req_encoded = encode(req)
var bytes_sent = conn.write(req_encoded)
if bytes_sent == -1:
raise Error("Failed to send message")
var new_buf = Bytes(capacity=default_buffer_size)
var bytes_recv = conn.read(new_buf)
if bytes_recv == 0:
conn.close()
var buf = buffer.new_buffer(new_buf^)
var reader = Reader(buf^)
var error = Error()
# # Ugly hack for now in case the default buffer is too large and we read additional responses from the server
# var newline_in_body = response_body.find("\r\n")
# if newline_in_body != -1:
# response_body = response_body[:newline_in_body]
var header = ResponseHeader()
var first_line_and_headers_len = 0
try:
first_line_and_headers_len = header.parse_raw(reader)
except e:
conn.close()
error = Error("Failed to parse response headers: " + e.__str__())
var response = HTTPResponse(header, Bytes())
try:
response.read_body(reader, first_line_and_headers_len,)
except e:
error = Error("Failed to read request body: " + e.__str__())
# var total_recv = bytes_recv
# while header.content_length() > total_recv:
# if header.content_length() != 0 and header.content_length() != -2:
# var remaining_body = Bytes()
# var read_len = conn.read(remaining_body)
# response_body += remaining_body
# total_recv += read_len
conn.close()
return response
| lightbug_http/lightbug_http/sys/client.mojo | false |
from utils import StaticTuple
from lightbug_http.net import (
Listener,
ListenConfig,
Connection,
TCPAddr,
Net,
resolve_internet_addr,
default_buffer_size,
default_tcp_keep_alive,
get_peer_name,
)
from lightbug_http.strings import NetworkType
from lightbug_http.io.bytes import Bytes, bytes
from lightbug_http.io.sync import Duration
from external.libc import (
c_void,
c_int,
c_uint,
c_char,
in_addr,
sockaddr,
sockaddr_in,
socklen_t,
AI_PASSIVE,
AF_INET,
AF_INET6,
SOCK_STREAM,
SOL_SOCKET,
SO_REUSEADDR,
SHUT_RDWR,
htons,
inet_pton,
to_char_ptr,
socket,
connect,
setsockopt,
listen,
accept,
send,
recv,
bind,
shutdown,
close,
)
from sys.info import os_is_macos
from time import sleep
trait AnAddrInfo:
fn get_ip_address(self, host: String) raises -> in_addr:
"""
TODO: Once default functions can be implemented in traits, this function should use the functions currently
implemented in the `addrinfo_macos` and `addrinfo_unix` structs.
"""
...
fn getaddrinfo[
T: AnAddrInfo
](
nodename: UnsafePointer[c_char],
servname: UnsafePointer[c_char],
hints: UnsafePointer[T],
res: UnsafePointer[UnsafePointer[T]],
) -> c_int:
"""
Overwrites the existing libc `getaddrinfo` function to use the AnAddrInfo trait.
Libc POSIX `getaddrinfo` function
Reference: https://man7.org/linux/man-pages/man3/getaddrinfo.3p.html
Fn signature: int getaddrinfo(const char *restrict nodename, const char *restrict servname, const struct addrinfo *restrict hints, struct addrinfo **restrict res).
"""
return external_call[
"getaddrinfo",
c_int, # FnName, RetType
UnsafePointer[c_char],
UnsafePointer[c_char],
UnsafePointer[T], # Args
UnsafePointer[UnsafePointer[T]], # Args
](nodename, servname, hints, res)
@value
struct SysListener:
"""
A TCP listener that listens for incoming connections and can accept them.
"""
var fd: c_int
var __addr: TCPAddr
fn __init__(inout self) raises:
self.__addr = TCPAddr("localhost", 8080)
self.fd = socket(AF_INET, SOCK_STREAM, 0)
fn __init__(inout self, addr: TCPAddr) raises:
self.__addr = addr
self.fd = socket(AF_INET, SOCK_STREAM, 0)
fn __init__(inout self, addr: TCPAddr, fd: c_int) raises:
self.__addr = addr
self.fd = fd
fn accept(self) raises -> SysConnection:
var their_addr_ptr = UnsafePointer[sockaddr].alloc(1)
var sin_size = socklen_t(sizeof[socklen_t]())
var new_sockfd = accept(
self.fd, their_addr_ptr, UnsafePointer[socklen_t].address_of(sin_size)
)
if new_sockfd == -1:
print("Failed to accept connection, system accept() returned an error.")
var peer = get_peer_name(new_sockfd)
return SysConnection(
self.__addr, TCPAddr(peer.host, atol(peer.port)), new_sockfd
)
fn close(self) raises:
_ = shutdown(self.fd, SHUT_RDWR)
var close_status = close(self.fd)
if close_status == -1:
print("Failed to close new_sockfd")
fn addr(self) -> TCPAddr:
return self.__addr
struct SysListenConfig(ListenConfig):
var __keep_alive: Duration
fn __init__(inout self) raises:
self.__keep_alive = default_tcp_keep_alive
fn __init__(inout self, keep_alive: Duration) raises:
self.__keep_alive = keep_alive
fn listen(inout self, network: String, address: String) raises -> SysListener:
var addr = resolve_internet_addr(network, address)
var address_family = AF_INET
var ip_buf_size = 4
if address_family == AF_INET6:
ip_buf_size = 16
var ip_buf = UnsafePointer[c_void].alloc(ip_buf_size)
var conv_status = inet_pton(address_family, to_char_ptr(addr.ip), ip_buf)
var raw_ip = ip_buf.bitcast[c_uint]()[]
var bin_port = htons(UInt16(addr.port))
var ai = sockaddr_in(address_family, bin_port, raw_ip, StaticTuple[c_char, 8]())
var ai_ptr = UnsafePointer[sockaddr_in].address_of(ai).bitcast[sockaddr]()
var sockfd = socket(address_family, SOCK_STREAM, 0)
if sockfd == -1:
print("Socket creation error")
var yes: Int = 1
_ = setsockopt(
sockfd,
SOL_SOCKET,
SO_REUSEADDR,
UnsafePointer[Int].address_of(yes).bitcast[c_void](),
sizeof[Int](),
)
var bind_success = False
var bind_fail_logged = False
while not bind_success:
var bind = bind(sockfd, ai_ptr, sizeof[sockaddr_in]())
if bind == 0:
bind_success = True
else:
if not bind_fail_logged:
print("Bind attempt failed. The address might be in use or the socket might not be available.")
print("Retrying. Might take 10-15 seconds.")
bind_fail_logged = True
print(".", end="", flush=True)
_ = shutdown(sockfd, SHUT_RDWR)
sleep(1)
if listen(sockfd, c_int(128)) == -1:
print("Listen failed.\n on sockfd " + sockfd.__str__())
var listener = SysListener(addr, sockfd)
print(
"\n🔥🐝 Lightbug is listening on "
+ "http://"
+ addr.ip
+ ":"
+ addr.port.__str__()
)
print("Ready to accept connections...")
return listener
@value
struct SysConnection(Connection):
var fd: c_int
var raddr: TCPAddr
var laddr: TCPAddr
fn __init__(inout self, laddr: String, raddr: String) raises:
self.raddr = resolve_internet_addr(NetworkType.tcp4.value, raddr)
self.laddr = resolve_internet_addr(NetworkType.tcp4.value, laddr)
self.fd = socket(AF_INET, SOCK_STREAM, 0)
fn __init__(inout self, laddr: TCPAddr, raddr: TCPAddr) raises:
self.raddr = raddr
self.laddr = laddr
self.fd = socket(AF_INET, SOCK_STREAM, 0)
fn __init__(inout self, laddr: TCPAddr, raddr: TCPAddr, fd: c_int) raises:
self.raddr = raddr
self.laddr = laddr
self.fd = fd
fn read(self, inout buf: Bytes) raises -> Int:
var bytes_recv = recv(self.fd, DTypePointer[DType.uint8](buf.unsafe_ptr()).offset(buf.size), buf.capacity - buf.size, 0)
if bytes_recv == -1:
return 0
buf.size += bytes_recv
if bytes_recv == 0:
return 0
if bytes_recv < buf.capacity:
return bytes_recv
return bytes_recv
fn write(self, msg: String) raises -> Int:
if send(self.fd, to_char_ptr(msg).bitcast[c_void](), len(msg), 0) == -1:
print("Failed to send response")
return len(msg)
fn write(self, buf: Bytes) raises -> Int:
if send(self.fd, to_char_ptr(buf).bitcast[c_void](), len(buf), 0) == -1:
print("Failed to send response")
return len(buf)
fn close(self) raises:
_ = shutdown(self.fd, SHUT_RDWR)
var close_status = close(self.fd)
if close_status == -1:
print("Failed to close new_sockfd")
fn local_addr(inout self) raises -> TCPAddr:
return self.laddr
fn remote_addr(self) raises -> TCPAddr:
return self.raddr
struct SysNet:
var __lc: SysListenConfig
fn __init__(inout self) raises:
self.__lc = SysListenConfig(default_tcp_keep_alive)
fn __init__(inout self, keep_alive: Duration) raises:
self.__lc = SysListenConfig(keep_alive)
fn listen(inout self, network: String, addr: String) raises -> SysListener:
return self.__lc.listen(network, addr)
@value
@register_passable("trivial")
struct addrinfo_macos(AnAddrInfo):
"""
For MacOS, I had to swap the order of ai_canonname and ai_addr.
https://stackoverflow.com/questions/53575101/calling-getaddrinfo-directly-from-python-ai-addr-is-null-pointer.
"""
var ai_flags: c_int
var ai_family: c_int
var ai_socktype: c_int
var ai_protocol: c_int
var ai_addrlen: socklen_t
var ai_canonname: UnsafePointer[c_char]
var ai_addr: UnsafePointer[sockaddr]
var ai_next: UnsafePointer[c_void]
fn __init__() -> Self:
return Self(
0, 0, 0, 0, 0, UnsafePointer[c_char](), UnsafePointer[sockaddr](), UnsafePointer[c_void]()
)
fn get_ip_address(self, host: String) raises -> in_addr:
"""
Returns an IP address based on the host.
This is a MacOS-specific implementation.
Args:
host: String - The host to get the IP from.
Returns:
UInt32 - The IP address.
"""
var host_ptr = to_char_ptr(host)
var servinfo = UnsafePointer[Self]().alloc(1)
initialize_pointee_move(servinfo, Self())
var hints = Self()
hints.ai_family = AF_INET
hints.ai_socktype = SOCK_STREAM
hints.ai_flags = AI_PASSIVE
var error = getaddrinfo[Self](
host_ptr,
UnsafePointer[UInt8](),
UnsafePointer.address_of(hints),
UnsafePointer.address_of(servinfo),
)
if error != 0:
print("getaddrinfo failed")
raise Error("Failed to get IP address. getaddrinfo failed.")
var addrinfo = servinfo[]
var ai_addr = addrinfo.ai_addr
if not ai_addr:
print("ai_addr is null")
raise Error(
"Failed to get IP address. getaddrinfo was called successfully, but"
" ai_addr is null."
)
var addr_in = ai_addr.bitcast[sockaddr_in]()[]
return addr_in.sin_addr
@value
@register_passable("trivial")
struct addrinfo_unix(AnAddrInfo):
"""
Standard addrinfo struct for Unix systems. Overwrites the existing libc `getaddrinfo` function to adhere to the AnAddrInfo trait.
"""
var ai_flags: c_int
var ai_family: c_int
var ai_socktype: c_int
var ai_protocol: c_int
var ai_addrlen: socklen_t
var ai_addr: UnsafePointer[sockaddr]
var ai_canonname: UnsafePointer[c_char]
var ai_next: UnsafePointer[c_void]
fn __init__() -> Self:
return Self(
0, 0, 0, 0, 0, UnsafePointer[sockaddr](), UnsafePointer[c_char](), UnsafePointer[c_void]()
)
fn get_ip_address(self, host: String) raises -> in_addr:
"""
Returns an IP address based on the host.
This is a Unix-specific implementation.
Args:
host: String - The host to get IP from.
Returns:
UInt32 - The IP address.
"""
var host_ptr = to_char_ptr(String(host))
var servinfo = UnsafePointer[Self]().alloc(1)
initialize_pointee_move(servinfo, Self())
var hints = Self()
hints.ai_family = AF_INET
hints.ai_socktype = SOCK_STREAM
hints.ai_flags = AI_PASSIVE
var error = getaddrinfo[Self](
host_ptr,
UnsafePointer[UInt8](),
UnsafePointer.address_of(hints),
UnsafePointer.address_of(servinfo),
)
if error != 0:
print("getaddrinfo failed")
raise Error("Failed to get IP address. getaddrinfo failed.")
var addrinfo = servinfo[]
var ai_addr = addrinfo.ai_addr
if not ai_addr:
print("ai_addr is null")
raise Error(
"Failed to get IP address. getaddrinfo was called successfully, but"
" ai_addr is null."
)
var addr_in = ai_addr.bitcast[sockaddr_in]()[]
return addr_in.sin_addr
fn create_connection(sock: c_int, host: String, port: UInt16) raises -> SysConnection:
"""
Connect to a server using a socket.
Args:
sock: Int32 - The socket file descriptor.
host: String - The host to connect to.
port: UInt16 - The port to connect to.
Returns:
Int32 - The socket file descriptor.
"""
var ip: in_addr
if os_is_macos():
ip = addrinfo_macos().get_ip_address(host)
else:
ip = addrinfo_unix().get_ip_address(host)
# Convert ip address to network byte order.
var addr: sockaddr_in = sockaddr_in(
AF_INET, htons(port), ip, StaticTuple[c_char, 8](0, 0, 0, 0, 0, 0, 0, 0)
)
var addr_ptr = UnsafePointer[sockaddr_in].address_of(addr).bitcast[sockaddr]()
if connect(sock, addr_ptr, sizeof[sockaddr_in]()) == -1:
_ = shutdown(sock, SHUT_RDWR)
raise Error("Failed to connect to server")
var laddr = TCPAddr()
var raddr = TCPAddr(host, int(port))
var conn = SysConnection(sock, laddr, raddr)
return conn
| lightbug_http/lightbug_http/sys/net.mojo | false |
<filename>lightbug_http/lightbug_http/sys/server.mojo
from external.gojo.bufio import Reader, Scanner, scan_words, scan_bytes
from external.gojo.bytes import buffer
from lightbug_http.server import DefaultConcurrency
from lightbug_http.net import Listener, default_buffer_size
from lightbug_http.http import HTTPRequest, encode, split_http_string
from lightbug_http.uri import URI
from lightbug_http.header import RequestHeader
from lightbug_http.sys.net import SysListener, SysConnection, SysNet
from lightbug_http.service import HTTPService
from lightbug_http.io.sync import Duration
from lightbug_http.io.bytes import Bytes, bytes
from lightbug_http.error import ErrorHandler
from lightbug_http.strings import NetworkType
alias default_max_request_body_size = 4 * 1024 * 1024 # 4MB
@value
struct SysServer:
"""
A Mojo-based server that accept incoming requests and delivers HTTP services.
"""
var error_handler: ErrorHandler
var name: String
var __address: String
var max_concurrent_connections: Int
var max_requests_per_connection: Int
var __max_request_body_size: Int
var tcp_keep_alive: Bool
var ln: SysListener
fn __init__(inout self) raises:
self.error_handler = ErrorHandler()
self.name = "lightbug_http"
self.__address = "127.0.0.1"
self.max_concurrent_connections = 1000
self.max_requests_per_connection = 0
self.__max_request_body_size = default_max_request_body_size
self.tcp_keep_alive = False
self.ln = SysListener()
fn __init__(inout self, tcp_keep_alive: Bool) raises:
self.error_handler = ErrorHandler()
self.name = "lightbug_http"
self.__address = "127.0.0.1"
self.max_concurrent_connections = 1000
self.max_requests_per_connection = 0
self.__max_request_body_size = default_max_request_body_size
self.tcp_keep_alive = tcp_keep_alive
self.ln = SysListener()
fn __init__(inout self, own_address: String) raises:
self.error_handler = ErrorHandler()
self.name = "lightbug_http"
self.__address = own_address
self.max_concurrent_connections = 1000
self.max_requests_per_connection = 0
self.__max_request_body_size = default_max_request_body_size
self.tcp_keep_alive = False
self.ln = SysListener()
fn __init__(inout self, error_handler: ErrorHandler) raises:
self.error_handler = error_handler
self.name = "lightbug_http"
self.__address = "127.0.0.1"
self.max_concurrent_connections = 1000
self.max_requests_per_connection = 0
self.__max_request_body_size = default_max_request_body_size
self.tcp_keep_alive = False
self.ln = SysListener()
fn __init__(inout self, max_request_body_size: Int) raises:
self.error_handler = ErrorHandler()
self.name = "lightbug_http"
self.__address = "127.0.0.1"
self.max_concurrent_connections = 1000
self.max_requests_per_connection = 0
self.__max_request_body_size = max_request_body_size
self.tcp_keep_alive = False
self.ln = SysListener()
fn __init__(inout self, max_request_body_size: Int, tcp_keep_alive: Bool) raises:
self.error_handler = ErrorHandler()
self.name = "lightbug_http"
self.__address = "127.0.0.1"
self.max_concurrent_connections = 1000
self.max_requests_per_connection = 0
self.__max_request_body_size = max_request_body_size
self.tcp_keep_alive = tcp_keep_alive
self.ln = SysListener()
fn address(self) -> String:
return self.__address
fn set_address(inout self, own_address: String) -> Self:
self.__address = own_address
return self
fn max_request_body_size(self) -> Int:
return self.__max_request_body_size
fn set_max_request_body_size(inout self, size: Int) -> Self:
self.__max_request_body_size = size
return self
fn get_concurrency(self) -> Int:
"""
Retrieve the concurrency level which is either
the configured max_concurrent_connections or the DefaultConcurrency.
Returns:
Int: concurrency level for the server.
"""
var concurrency = self.max_concurrent_connections
if concurrency <= 0:
concurrency = DefaultConcurrency
return concurrency
fn listen_and_serve[
T: HTTPService
](inout self, address: String, handler: T) raises -> None:
"""
Listen for incoming connections and serve HTTP requests.
Args:
address : String - The address (host:port) to listen on.
handler : HTTPService - An object that handles incoming HTTP requests.
"""
var __net = SysNet()
var listener = __net.listen(NetworkType.tcp4.value, address)
_ = self.set_address(address)
self.serve(listener, handler)
fn serve[T: HTTPService](inout self, ln: SysListener, handler: T) raises -> None:
"""
Serve HTTP requests.
Args:
ln : SysListener - TCP server that listens for incoming connections.
handler : HTTPService - An object that handles incoming HTTP requests.
Raises:
If there is an error while serving requests.
"""
self.ln = ln
while True:
var conn = self.ln.accept()
self.serve_connection(conn, handler)
fn serve_connection[T: HTTPService](inout self, conn: SysConnection, handler: T) raises -> None:
"""
Serve a single connection.
Args:
conn : SysConnection - A connection object that represents a client connection.
handler : HTTPService - An object that handles incoming HTTP requests.
Raises:
If there is an error while serving the connection.
"""
var b = Bytes(capacity=default_buffer_size)
var bytes_recv = conn.read(b)
if bytes_recv == 0:
conn.close()
return
var buf = buffer.new_buffer(b^)
var reader = Reader(buf^)
var error = Error()
var max_request_body_size = self.max_request_body_size()
if max_request_body_size <= 0:
max_request_body_size = default_max_request_body_size
var req_number = 0
while True:
req_number += 1
if req_number > 1:
var b = Bytes(capacity=default_buffer_size)
var bytes_recv = conn.read(b)
if bytes_recv == 0:
conn.close()
break
buf = buffer.new_buffer(b^)
reader = Reader(buf^)
var header = RequestHeader()
var first_line_and_headers_len = 0
try:
first_line_and_headers_len = header.parse_raw(reader)
except e:
error = Error("Failed to parse request headers: " + e.__str__())
var uri = URI(self.address() + String(header.request_uri()))
try:
uri.parse()
except e:
error = Error("Failed to parse request line:" + e.__str__())
if header.content_length() > 0:
if max_request_body_size > 0 and header.content_length() > max_request_body_size:
error = Error("Request body too large")
var request = HTTPRequest(
uri,
Bytes(),
header,
)
try:
request.read_body(reader, header.content_length(), first_line_and_headers_len, max_request_body_size)
except e:
error = Error("Failed to read request body: " + e.__str__())
var res = handler.func(request)
if not self.tcp_keep_alive:
_ = res.set_connection_close()
var res_encoded = encode(res)
_ = conn.write(res_encoded)
if not self.tcp_keep_alive:
conn.close()
return
| lightbug_http/lightbug_http/sys/server.mojo | false |
from external.gojo.tests.wrapper import MojoTest
from external.morrow import Morrow
from tests.utils import (
default_server_conn_string,
getRequest,
)
from lightbug_http.python.client import PythonClient
from lightbug_http.sys.client import MojoClient
from lightbug_http.http import HTTPRequest, encode
from lightbug_http.uri import URI
from lightbug_http.header import RequestHeader
from lightbug_http.io.bytes import bytes
def test_client():
var mojo_client = MojoClient()
var py_client = PythonClient()
test_mojo_client_lightbug_external_req(mojo_client)
test_python_client_lightbug(py_client)
fn test_mojo_client_lightbug(client: MojoClient) raises:
var test = MojoTest("test_mojo_client_lightbug")
var res = client.do(
HTTPRequest(
URI(default_server_conn_string),
bytes("Hello world!"),
RequestHeader(getRequest),
)
)
test.assert_equal(
String(res.body_raw[0:112]),
String(
"HTTP/1.1 200 OK\r\nServer: lightbug_http\r\nContent-Type:"
" text/plain\r\nContent-Length: 12\r\nConnection: close\r\nDate: "
),
)
fn test_mojo_client_lightbug_external_req(client: MojoClient) raises:
var test = MojoTest("test_mojo_client_lightbug_external_req")
var req = HTTPRequest(
URI("http://grandinnerastoundingspell.neverssl.com/online/"),
)
try:
var res = client.do(req)
test.assert_equal(res.header.status_code(), 200)
except e:
print(e)
fn test_python_client_lightbug(client: PythonClient) raises:
var test = MojoTest("test_python_client_lightbug")
var res = client.do(
HTTPRequest(
URI(default_server_conn_string),
bytes("Hello world!"),
RequestHeader(getRequest),
)
)
test.assert_equal(
String(res.body_raw[0:112]),
String(
"HTTP/1.1 200 OK\r\nServer: lightbug_http\r\nContent-Type:"
" text/plain\r\nContent-Length: 12\r\nConnection: close\r\nDate: "
),
)
| lightbug_http/tests/test_client.mojo | false |
from external.gojo.tests.wrapper import MojoTest
from external.gojo.bytes import buffer
from external.gojo.bufio import Reader
from lightbug_http.header import RequestHeader, ResponseHeader
from lightbug_http.io.bytes import Bytes, bytes
from lightbug_http.strings import empty_string
from lightbug_http.net import default_buffer_size
def test_header():
test_parse_request_header()
test_parse_response_header()
def test_parse_request_header():
var test = MojoTest("test_parse_request_header")
var headers_str = bytes('''GET /index.html HTTP/1.1\r\nHost: example.com\r\nUser-Agent: Mozilla/5.0\r\nContent-Type: text/html\r\nContent-Length: 1234\r\nConnection: close\r\nTrailer: end-of-message\r\n\r\n''')
var header = RequestHeader()
var b = Bytes(headers_str)
var buf = buffer.new_buffer(b^)
var reader = Reader(buf^)
_ = header.parse_raw(reader)
test.assert_equal(String(header.request_uri()), "/index.html")
test.assert_equal(String(header.protocol()), "HTTP/1.1")
test.assert_equal(header.no_http_1_1, False)
test.assert_equal(String(header.host()), String("example.com"))
test.assert_equal(String(header.user_agent()), "Mozilla/5.0")
test.assert_equal(String(header.content_type()), "text/html")
test.assert_equal(header.content_length(), 1234)
test.assert_equal(header.connection_close(), True)
def test_parse_response_header():
var test = MojoTest("test_parse_response_header")
var headers_str = bytes('''HTTP/1.1 200 OK\r\nServer: example.com\r\nUser-Agent: Mozilla/5.0\r\nContent-Type: text/html\r\nContent-Encoding: gzip\r\nContent-Length: 1234\r\nConnection: close\r\nTrailer: end-of-message\r\n\r\n''')
var header = ResponseHeader()
var b = Bytes(headers_str)
var buf = buffer.new_buffer(b^)
var reader = Reader(buf^)
_ = header.parse_raw(reader)
test.assert_equal(String(header.protocol()), "HTTP/1.1")
test.assert_equal(header.no_http_1_1, False)
test.assert_equal(header.status_code(), 200)
test.assert_equal(String(header.status_message()), "OK")
test.assert_equal(String(header.server()), "example.com")
test.assert_equal(String(header.content_type()), "text/html")
test.assert_equal(String(header.content_encoding()), "gzip")
test.assert_equal(header.content_length(), 1234)
test.assert_equal(header.connection_close(), True)
test.assert_equal(header.trailer_str(), "end-of-message")
| lightbug_http/tests/test_header.mojo | false |
from external.gojo.tests.wrapper import MojoTest
from lightbug_http.io.bytes import Bytes, bytes
from lightbug_http.http import HTTPRequest, HTTPResponse, split_http_string, encode
from lightbug_http.header import RequestHeader
from lightbug_http.uri import URI
from tests.utils import (
default_server_conn_string,
getRequest,
)
def test_http():
test_split_http_string()
test_encode_http_request()
test_encode_http_response()
def test_split_http_string():
var test = MojoTest("test_split_http_string")
var cases = Dict[StringLiteral, List[StringLiteral]]()
cases["GET /index.html HTTP/1.1\r\nHost: www.example.com\r\nUser-Agent: Mozilla/5.0\r\nContent-Type: text/html\r\nContent-Length: 1234\r\nConnection: close\r\nTrailer: end-of-message\r\n\r\nHello, World!\0"] =
List("GET /index.html HTTP/1.1",
"Host: www.example.com\r\nUser-Agent: Mozilla/5.0\r\nContent-Type: text/html\r\nContent-Length: 1234\r\nConnection: close\r\nTrailer: end-of-message",
"Hello, World!")
for c in cases.items():
var buf = bytes((c[].key))
request_first_line, request_headers, request_body = split_http_string(buf)
test.assert_equal(request_first_line, c[].value[0])
test.assert_equal(request_headers, String(c[].value[1]))
test.assert_equal(request_body, c[].value[2])
def test_encode_http_request():
var test = MojoTest("test_encode_http_request")
var uri = URI(default_server_conn_string)
var req = HTTPRequest(
uri,
String("Hello world!").as_bytes(),
RequestHeader(getRequest),
)
var req_encoded = encode(req)
test.assert_equal(String(req_encoded), "GET / HTTP/1.1\r\nContent-Length: 12\r\nConnection: keep-alive\r\n\r\nHello world!")
def test_encode_http_response():
var test = MojoTest("test_encode_http_response")
var res = HTTPResponse(
bytes("Hello, World!"),
)
var res_encoded = encode(res)
var res_str = String(res_encoded)
# Since we cannot compare the exact date, we will only compare the headers until the date and the body
var expected_full = "HTTP/1.1 200 OK\r\nServer: lightbug_http\r\nContent-Type: application/octet-stream\r\nContent-Length: 13\r\nConnection: keep-alive\r\nDate: 2024-06-02T13:41:50.766880+00:00\r\n\r\nHello, World!"
var expected_headers_len = 124
var hello_world_len = len(String("Hello, World!")) - 1 # -1 for the null terminator
var date_header_len = len(String("Date: 2024-06-02T13:41:50.766880+00:00"))
var expected_split = String(expected_full).split("\r\n\r\n")
var expected_headers = expected_split[0]
var expected_body = expected_split[1]
test.assert_equal(res_str[:expected_headers_len], expected_headers[:len(expected_headers) - date_header_len])
test.assert_equal(res_str[(len(res_str) - hello_world_len):len(res_str) + 1], expected_body) | lightbug_http/tests/test_http.mojo | false |
<filename>lightbug_http/tests/test_io.mojo
from external.gojo.tests.wrapper import MojoTest
from lightbug_http.io.bytes import Bytes, bytes_equal, bytes
def test_io():
test_string_literal_to_bytes()
fn test_string_literal_to_bytes() raises:
var test = MojoTest("test_string_to_bytes")
var cases = Dict[StringLiteral, Bytes]()
cases[""] = Bytes()
cases["Hello world!"] = List[UInt8](72, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 33)
cases["\0"] = List[UInt8](0)
cases["\0\0\0\0"] = List[UInt8](0, 0, 0, 0)
cases["OK"] = List[UInt8](79, 75)
cases["HTTP/1.1 200 OK"] = List[UInt8](72, 84, 84, 80, 47, 49, 46, 49, 32, 50, 48, 48, 32, 79, 75)
for c in cases.items():
test.assert_true(bytes_equal(bytes(c[].key), c[].value))
fn test_string_to_bytes() raises:
var test = MojoTest("test_string_to_bytes")
var cases = Dict[String, Bytes]()
cases[String("")] = Bytes()
cases[String("Hello world!")] = List[UInt8](72, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 33)
cases[String("\0")] = List[UInt8](0)
cases[String("\0\0\0\0")] = List[UInt8](0, 0, 0, 0)
cases[String("OK")] = List[UInt8](79, 75)
cases[String("HTTP/1.1 200 OK")] = List[UInt8](72, 84, 84, 80, 47, 49, 46, 49, 32, 50, 48, 48, 32, 79, 75)
for c in cases.items():
test.assert_true(bytes_equal(bytes(c[].key), c[].value)) | lightbug_http/tests/test_io.mojo | false |
<filename>lightbug_http/tests/test_net.mojo
def test_net():
test_split_host_port()
def test_split_host_port():
... | lightbug_http/tests/test_net.mojo | false |
from external.gojo.tests.wrapper import MojoTest
from lightbug_http.uri import URI
from lightbug_http.strings import empty_string
from lightbug_http.io.bytes import Bytes
def test_uri():
test_uri_no_parse_defaults()
test_uri_parse_http_with_port()
test_uri_parse_https_with_port()
test_uri_parse_http_with_path()
test_uri_parse_https_with_path()
test_uri_parse_http_basic()
test_uri_parse_http_basic_www()
test_uri_parse_http_with_query_string()
test_uri_parse_http_with_hash()
test_uri_parse_http_with_query_string_and_hash()
def test_uri_no_parse_defaults():
var test = MojoTest("test_uri_no_parse_defaults")
var uri = URI("http://example.com")
test.assert_equal(String(uri.full_uri()), "http://example.com")
test.assert_equal(String(uri.scheme()), "http")
test.assert_equal(String(uri.path()), "/")
def test_uri_parse_http_with_port():
var test = MojoTest("test_uri_parse_http_with_port")
var uri = URI("http://example.com:8080/index.html")
_ = uri.parse()
test.assert_equal(String(uri.scheme()), "http")
test.assert_equal(String(uri.host()), "example.com:8080")
test.assert_equal(String(uri.path()), "/index.html")
test.assert_equal(String(uri.path_original()), "/index.html")
test.assert_equal(String(uri.request_uri()), "/index.html")
test.assert_equal(String(uri.http_version()), "HTTP/1.1")
test.assert_equal(uri.is_http_1_0(), False)
test.assert_equal(uri.is_http_1_1(), True)
test.assert_equal(uri.is_https(), False)
test.assert_equal(uri.is_http(), True)
test.assert_equal(String(uri.query_string()), String(empty_string.as_bytes_slice()))
def test_uri_parse_https_with_port():
var test = MojoTest("test_uri_parse_https_with_port")
var uri = URI("https://example.com:8080/index.html")
_ = uri.parse()
test.assert_equal(String(uri.scheme()), "https")
test.assert_equal(String(uri.host()), "example.com:8080")
test.assert_equal(String(uri.path()), "/index.html")
test.assert_equal(String(uri.path_original()), "/index.html")
test.assert_equal(String(uri.request_uri()), "/index.html")
test.assert_equal(uri.is_https(), True)
test.assert_equal(uri.is_http(), False)
test.assert_equal(String(uri.query_string()), String(empty_string.as_bytes_slice()))
def test_uri_parse_http_with_path():
var test = MojoTest("test_uri_parse_http_with_path")
uri = URI("http://example.com/index.html")
_ = uri.parse()
test.assert_equal(String(uri.scheme()), "http")
test.assert_equal(String(uri.host()), "example.com")
test.assert_equal(String(uri.path()), "/index.html")
test.assert_equal(String(uri.path_original()), "/index.html")
test.assert_equal(String(uri.request_uri()), "/index.html")
test.assert_equal(uri.is_https(), False)
test.assert_equal(uri.is_http(), True)
test.assert_equal(String(uri.query_string()), String(empty_string.as_bytes_slice()))
def test_uri_parse_https_with_path():
var test = MojoTest("test_uri_parse_https_with_path")
uri = URI("https://example.com/index.html")
_ = uri.parse()
test.assert_equal(String(uri.scheme()), "https")
test.assert_equal(String(uri.host()), "example.com")
test.assert_equal(String(uri.path()), "/index.html")
test.assert_equal(String(uri.path_original()), "/index.html")
test.assert_equal(String(uri.request_uri()), "/index.html")
test.assert_equal(uri.is_https(), True)
test.assert_equal(uri.is_http(), False)
test.assert_equal(String(uri.query_string()), String(empty_string.as_bytes_slice()))
def test_uri_parse_http_basic():
var test = MojoTest("test_uri_parse_http_basic")
uri = URI("http://example.com")
_ = uri.parse()
test.assert_equal(String(uri.scheme()), "http")
test.assert_equal(String(uri.host()), "example.com")
test.assert_equal(String(uri.path()), "/")
test.assert_equal(String(uri.path_original()), "/")
test.assert_equal(String(uri.http_version()), "HTTP/1.1")
test.assert_equal(String(uri.request_uri()), "/")
test.assert_equal(String(uri.query_string()), String(empty_string.as_bytes_slice()))
def test_uri_parse_http_basic_www():
var test = MojoTest("test_uri_parse_http_basic_www")
uri = URI("http://www.example.com")
_ = uri.parse()
test.assert_equal(String(uri.scheme()), "http")
test.assert_equal(String(uri.host()), "www.example.com")
test.assert_equal(String(uri.path()), "/")
test.assert_equal(String(uri.path_original()), "/")
test.assert_equal(String(uri.request_uri()), "/")
test.assert_equal(String(uri.http_version()), "HTTP/1.1")
test.assert_equal(String(uri.query_string()), String(empty_string.as_bytes_slice()))
def test_uri_parse_http_with_query_string():
...
def test_uri_parse_http_with_hash():
...
def test_uri_parse_http_with_query_string_and_hash():
...
| lightbug_http/tests/test_uri.mojo | false |
from python import Python, PythonObject
from lightbug_http.io.bytes import Bytes
from lightbug_http.error import ErrorHandler
from lightbug_http.uri import URI
from lightbug_http.http import HTTPRequest, HTTPResponse, ResponseHeader
from lightbug_http.net import Listener, Addr, Connection, TCPAddr
from lightbug_http.service import HTTPService, OK
from lightbug_http.server import ServerTrait
from lightbug_http.client import Client
from lightbug_http.io.bytes import bytes
alias default_server_conn_string = "http://localhost:8080"
alias getRequest = bytes(
"GET /foobar?baz HTTP/1.1\r\nHost: google.com\r\nUser-Agent: aaa/bbb/ccc/ddd/eee"
" Firefox Chrome MSIE Opera\r\n"
+ "Referer: http://example.com/aaa?bbb=ccc\r\nCookie: foo=bar; baz=baraz;"
" aa=aakslsdweriwereowriewroire\r\n\r\n"
)
alias defaultExpectedGetResponse = bytes(
"HTTP/1.1 200 OK\r\nServer: lightbug_http\r\nContent-Type:"
" text/plain\r\nContent-Length: 12\r\nConnection: close\r\nDate: \r\n\r\nHello"
" world!"
)
@parameter
fn new_httpx_client() -> PythonObject:
try:
var httpx = Python.import_module("httpx")
return httpx
except e:
print("Could not set up httpx client: " + e.__str__())
return None
fn new_fake_listener(request_count: Int, request: Bytes) -> FakeListener:
return FakeListener(request_count, request)
struct ReqInfo:
var full_uri: URI
var host: String
var is_tls: Bool
fn __init__(inout self, full_uri: URI, host: String, is_tls: Bool):
self.full_uri = full_uri
self.host = host
self.is_tls = is_tls
struct FakeClient(Client):
"""FakeClient doesn't actually send any requests, but it extracts useful information from the input.
"""
var name: String
var host: StringLiteral
var port: Int
var req_full_uri: URI
var req_host: String
var req_is_tls: Bool
fn __init__(inout self) raises:
self.host = "127.0.0.1"
self.port = 8888
self.name = "lightbug_http_fake_client"
self.req_full_uri = URI("")
self.req_host = ""
self.req_is_tls = False
fn __init__(inout self, host: StringLiteral, port: Int) raises:
self.host = host
self.port = port
self.name = "lightbug_http_fake_client"
self.req_full_uri = URI("")
self.req_host = ""
self.req_is_tls = False
fn do(self, req: HTTPRequest) raises -> HTTPResponse:
return OK(String(defaultExpectedGetResponse))
fn extract(inout self, req: HTTPRequest) raises -> ReqInfo:
var full_uri = req.uri()
try:
_ = full_uri.parse()
except e:
print("error parsing uri: " + e.__str__())
self.req_full_uri = full_uri
var host = String(full_uri.host())
if host == "":
raise Error("URI host is nil")
self.req_host = host
var is_tls = full_uri.is_https()
self.req_is_tls = is_tls
return ReqInfo(full_uri, host, is_tls)
struct FakeServer(ServerTrait):
var __listener: FakeListener
var __handler: FakeResponder
fn __init__(inout self, listener: FakeListener, handler: FakeResponder):
self.__listener = listener
self.__handler = handler
fn __init__(
inout self, addr: String, service: HTTPService, error_handler: ErrorHandler
):
self.__listener = FakeListener()
self.__handler = FakeResponder()
fn get_concurrency(self) -> Int:
return 1
fn listen_and_serve(self, address: String, handler: HTTPService) raises -> None:
...
fn serve(inout self) -> None:
while not self.__listener.closed:
try:
_ = self.__listener.accept()
except e:
print(e)
fn serve(self, ln: Listener, handler: HTTPService) raises -> None:
...
@value
struct FakeResponder(HTTPService):
fn func(self, req: HTTPRequest) raises -> HTTPResponse:
var method = String(req.header.method())
if method != "GET":
raise Error("Did not expect a non-GET request! Got: " + method)
return OK(bytes("Hello, world!"))
@value
struct FakeConnection(Connection):
fn __init__(inout self, laddr: String, raddr: String) raises:
...
fn __init__(inout self, laddr: TCPAddr, raddr: TCPAddr) raises:
...
fn read(self, inout buf: Bytes) raises -> Int:
return 0
fn write(self, buf: Bytes) raises -> Int:
return 0
fn close(self) raises:
...
fn local_addr(inout self) raises -> TCPAddr:
return TCPAddr()
fn remote_addr(self) raises -> TCPAddr:
return TCPAddr()
@value
struct FakeListener:
var request_count: Int
var request: Bytes
var closed: Bool
fn __init__(inout self):
self.request_count = 0
self.request = Bytes()
self.closed = False
fn __init__(inout self, addr: TCPAddr):
self.request_count = 0
self.request = Bytes()
self.closed = False
fn __init__(inout self, request_count: Int, request: Bytes):
self.request_count = request_count
self.request = request
self.closed = False
@always_inline
fn accept(self) raises -> FakeConnection:
return FakeConnection()
fn close(self) raises:
pass
fn addr(self) -> TCPAddr:
return TCPAddr()
@value
struct TestStruct:
var a: String
var b: String
var c: Bytes
var d: Int
var e: TestStructNested
fn __init__(inout self, a: String, b: String) -> None:
self.a = a
self.b = b
self.c = bytes("c")
self.d = 1
self.e = TestStructNested("a", 1)
fn set_a_direct(inout self, a: String) -> Self:
self.a = a
return self
fn set_a_copy(self, a: String) -> Self:
return Self(a, self.b)
@value
struct TestStructNested:
var a: String
var b: Int
fn __init__(inout self, a: String, b: Int) -> None:
self.a = a
self.b = b
fn set_a_direct(inout self, a: String) -> Self:
self.a = a
return self
fn set_a_copy(self, a: String) -> Self:
return Self(a, self.b)
| lightbug_http/tests/utils.mojo | false |
<filename>llm.mojo/test_gpt2.mojo
from collections.vector import InlinedFixedVector
from time import now
from sys import exit
from train_gpt2 import (
GPT2,
ParameterTensors,
gpt2_forward,
gpt2_zero_grad,
gpt2_backward,
gpt2_update,
gpt2_free,
)
alias dtype = DType.float32
alias FLOAT = SIMD[dtype, 1]
alias dtype_int = DType.int32
alias INT = SIMD[dtype_int, 1]
alias SIZEOF_INT = sizeof[DType.int32]()
alias SIZEOF_FLOAT = sizeof[DType.float32]()
# poor man's tensor checker
fn check_tensor(
inout a: DTypePointer[dtype], inout b: DTypePointer[dtype], n: Int, label: StringRef
) -> Bool:
var print_upto: Int = 5
var ok: Bool = True
var maxdiff: FLOAT = 0.0
var tol: FLOAT = 2e-2
print(label)
for i in range(n):
# look at the diffence at position i of these two tensors
var diff = abs(a[i] - b[i])
# keep track of the overall error
ok = ok and (diff <= tol)
if diff > maxdiff:
maxdiff = diff
# for the first few elements of each tensor, pretty print
# the actual numbers, so we can do a visual, qualitative proof/assessment
if i < print_upto:
if diff <= tol:
if i < print_upto:
print("OK ", end="")
else:
if i < print_upto:
print("NOT OK ", end="")
print(a[i], b[i])
# prvar the:Int final result
if ok:
print("TENSOR OK")
else:
print("TENSOR NOT OK, maxdif =", maxdiff)
return ok
fn read_to_dtype_pointer[T:DType](inout ptr:DTypePointer[T],file_handle:FileHandle,num:Int,alloc:Bool=False) raises -> None :
if alloc:
ptr = DTypePointer[T].alloc(num)
_ = file_handle.read(ptr,num)
fn main() raises:
# build the GPT-2 model from a checkpoint
var model = GPT2("gpt2_124M.bin")
var C: Int = model.config.channels
var V: Int = model.config.vocab_size
var maxT: Int = model.config.max_seq_len
var L: Int = model.config.num_layers
# load additional information that we will use for debugging and error checking
var state_file = open("gpt2_124M_debug_state.bin", "r")
var state_header = DTypePointer[DType.int32].alloc(256)
read_to_dtype_pointer[DType.int32](state_header,state_file,256)
if state_header[0] != 20240327:
print("Bad magic model file")
exit(1)
if state_header[1] != 2:
print("Bad version in model file:",state_header[1])
exit(1)
var B: Int = int(state_header[2]) # batch size, e.g. 4
var T: Int = int(state_header[3]) # time / sequence length (e.g. 64, up to maxT)
print("[State]")
print("batch_size:", B)
print("seq_len:", T)
var expected_grads = ParameterTensors()
var expected_grads_memory = expected_grads.alloc_and_point_parameters(
model.param_sizes
)
# inputs and expected outputs, only used for error checking
var x = DTypePointer[dtype_int]().alloc(B * T)
var y = DTypePointer[dtype_int]().alloc(B * T)
var expected_logits = DTypePointer[dtype]().alloc(B * T * V)
var expected_loss = DTypePointer[dtype]().alloc(1)
# read reference information from Python
read_to_dtype_pointer[DType.int32](x,state_file,B*T)
read_to_dtype_pointer[DType.int32](y,state_file,B*T)
read_to_dtype_pointer[DType.float32](expected_logits,state_file,B*T*V)
read_to_dtype_pointer[DType.float32](expected_loss,state_file,1)
read_to_dtype_pointer[DType.float32](expected_grads_memory,state_file,model.num_parameters)
state_file.close()
# overall OK signal for the test
var allok: Bool = True
var elapsed_time_ms = 0.0
# let's do 10 training iterations, following the pytorch code
# var losses = InlinedFixedVector[type=Float32,size=10](10)
var expected_losses = List[Float32](
5.270007133483887,
4.059706687927246,
3.3751230239868164,
2.8007826805114746,
2.315382242202759,
1.8490285873413086,
1.3946564197540283,
0.9991465210914612,
0.6240804195404053,
0.37651097774505615,
)
for step in range(10):
var start = now()
gpt2_forward(model, x, y, B, T)
gpt2_zero_grad(model)
gpt2_backward(model)
elapsed_time_ms = (now() - start) / 1_000_000
if step == 0:
# error checking at step 0 for reference activations/gradients
# at this point, target should be equal to expected_logits, let's compare
var logits_ok: Bool = True
for i in range(B * T * V):
if i < 3:
print(expected_logits[i], model.acts.logits[i])
if abs(expected_logits[i] - model.acts.logits[i]) >= 1e-2:
print("MISMATCH AT INDEX " + str(i) + ":")
print(expected_logits[i], model.acts.logits[i])
logits_ok = False
break
if not logits_ok:
print("NOT ", end="")
print("OK (LOGITS)")
allok = allok and logits_ok
# compare the achieved loss
if abs(model.mean_loss - expected_loss[0]) >= 1e-2:
print("LOSS MISMATCH:", model.mean_loss, expected_loss[0])
allok = False
else:
print("LOSS OK:", model.mean_loss, expected_loss[0])
# finally check all the gradients
var gradoks = InlinedFixedVector[Bool, 16](16)
gradoks[0] = check_tensor(
model.grads.wte, expected_grads.wte, V * C, "dwte"
)
gradoks[1] = check_tensor(
model.grads.wpe, expected_grads.wpe, maxT * C, "dwpe"
)
gradoks[2] = check_tensor(
model.grads.ln1w, expected_grads.ln1w, L * C, "dln1w"
)
gradoks[3] = check_tensor(
model.grads.ln1b, expected_grads.ln1b, L * C, "dln1b"
)
gradoks[4] = check_tensor(
model.grads.qkvw, expected_grads.qkvw, L * 3 * C * C, "dqkvw"
)
gradoks[5] = check_tensor(
model.grads.qkvb, expected_grads.qkvb, L * 3 * C, "dqkvb"
)
gradoks[6] = check_tensor(
model.grads.attprojw, expected_grads.attprojw, L * C * C, "dattprojw"
)
gradoks[7] = check_tensor(
model.grads.attprojb, expected_grads.attprojb, L * C, "dattprojb"
)
gradoks[8] = check_tensor(
model.grads.ln2w, expected_grads.ln2w, L * C, "dln2w"
)
gradoks[9] = check_tensor(
model.grads.ln2b, expected_grads.ln2b, L * C, "dln2b"
)
gradoks[10] = check_tensor(
model.grads.fcw, expected_grads.fcw, L * 4 * C * C, "dfcw"
)
gradoks[11] = check_tensor(
model.grads.fcb, expected_grads.fcb, L * 4 * C, "dfcb"
)
gradoks[12] = check_tensor(
model.grads.fcprojw, expected_grads.fcprojw, L * C * 4 * C, "dfcprojw"
)
gradoks[13] = check_tensor(
model.grads.fcprojb, expected_grads.fcprojb, L * C, "dfcprojb"
)
gradoks[14] = check_tensor(
model.grads.lnfw, expected_grads.lnfw, C, "dlnfw"
)
gradoks[15] = check_tensor(
model.grads.lnfb, expected_grads.lnfb, C, "dlnfb"
)
for i in range(16):
allok = allok and gradoks[i]
gpt2_update(model, 1e-4, 0.9, 0.999, 1e-8, 0.01, step + 1)
var expected_loss = expected_losses[step]
var actual_loss = model.mean_loss
var step_loss_ok = abs(expected_loss - actual_loss) < 1e-2;
allok = allok and step_loss_ok
# prvar the:Int timing information at the end
print(
"step "
+ str(step)
+ ": loss "
+ str(model.mean_loss)
+ " (took "
+ str(elapsed_time_ms)
+ " ms) OK = "
+ str(step_loss_ok)
)
print("overall okay:", allok)
# free everything
x.free()
y.free()
expected_logits.free()
expected_loss.free()
expected_grads_memory.free()
| llm.mojo/test_gpt2.mojo | false |
<filename>llm.mojo/train_gpt2.mojo
from algorithm import vectorize, parallelize
from collections.vector import InlinedFixedVector
from math import sqrt, rsqrt, exp, tanh, cosh, log
from memory import memset, memset_zero, memcpy
from python import Python
from time import now
from sys.info import is_apple_silicon
from sys import exit
fn get_simd_width() -> Int:
if is_apple_silicon():
return 4 * simdwidthof[dtype]()
else:
return 2 * simdwidthof[dtype]()
alias dtype = DType.float32 # must be float32 for now
alias dtype_int = DType.int32 # must be int32 for now
alias SIZEOF_FLOAT = sizeof[dtype]()
alias SIZEOF_INT = sizeof[dtype_int]()
alias SIMD_WIDTH = get_simd_width()
alias RU32_HEX = 0x2545F4914F6CDD1D
alias RF32_DIV = 16777216.0
alias FLOAT = SIMD[dtype, 1]
alias INT = SIMD[dtype_int, 1]
alias NULL = DTypePointer[dtype]()
alias NULL_INT = DTypePointer[dtype_int]()
alias M_PI: FLOAT = 3.141592653589793115997963468544185161590576171875
alias GPT2_EOT = 50256
alias NUM_PARALLELIZE = 8
alias UNROLL_FACTOR = 4
## ----------------------------------------------------------------------------
# all the individual layers' forward and backward passes
fn encoder_forward(
out: DTypePointer[dtype],
inp: DTypePointer[dtype_int],
wte: DTypePointer[dtype],
wpe: DTypePointer[dtype],
B: Int,
T: Int,
C: Int,
):
@parameter
fn _calc(b: Int):
for t in range(T):
# seek to the output position in out[b,t,:]
var out_bt: DTypePointer[dtype] = out + b * T * C + t * C
# get the index of the token at inp[b, t]
var ix = inp[b * T + t]
# seek to the position in wte corresponding to the token
var wte_ix: DTypePointer[dtype] = wte + ix * C
# seek to the position in wpe corresponding to the position
var wpe_t: DTypePointer[dtype] = wpe + t * C
# add the two vectors and store the result in out[b,t,:]
@parameter
fn _op[width: Int](iv: Int):
out_bt.store[width=width](
iv, wte_ix.load[width=width](iv) + wpe_t.load[width=width](iv)
)
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=C)
parallelize[_calc](B)
fn encoder_backward(
dwte: DTypePointer[dtype],
dwpe: DTypePointer[dtype],
dout: DTypePointer[dtype],
inp: DTypePointer[dtype_int],
B: Int,
T: Int,
C: Int,
):
@parameter
fn _calc(b: Int):
for t in range(T):
var dout_bt: DTypePointer[dtype] = dout + b * T * C + t * C
var ix = inp[b * T + t]
var dwte_ix: DTypePointer[dtype] = dwte + ix * C
var dwpe_t: DTypePointer[dtype] = dwpe + t * C
@parameter
fn _op[width: Int](iv: Int):
var d = dout_bt.load[width=width](iv)
dwte_ix.store[width=width](iv, dwte_ix.load[width=width](iv) + d)
dwpe_t.store[width=width](iv, dwpe_t.load[width=width](iv) + d)
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=C)
parallelize[_calc](B)
fn layernorm_forward(
out: DTypePointer[dtype],
mean: DTypePointer[dtype],
rstd: DTypePointer[dtype],
inp: DTypePointer[dtype],
weight: DTypePointer[dtype],
bias: DTypePointer[dtype],
B: Int,
T: Int,
C: Int,
):
var eps: FLOAT = 1e-5
@parameter
fn _calc(b: Int):
for t in range(T):
# seek to the input position inp[b,t,:]
var x: DTypePointer[dtype] = inp + b * T * C + t * C
# calculate the mean
var m: FLOAT = 0.0
@parameter
fn _op[width: Int](iv: Int):
m += x.load[width=width](iv).reduce_add[1]()
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=C)
m = m / C
# calculate the variance (without any bias correction)
var v: FLOAT = 0.0
@parameter
fn _op2[width: Int](iv: Int):
var xshift = x.load[width=width](iv) - m
v += pow(xshift, 2).reduce_add[1]()
vectorize[_op2, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=C)
v = v / C
# calculate the rstd
var s: FLOAT = 1.0 / sqrt(v + eps)
# seek to the output position in out[b,t,:]
var out_bt: DTypePointer[dtype] = out + b * T * C + t * C
@parameter
fn _op3[width: Int](iv: Int):
var n = s * (x.load[width=width](iv) - m) # normalized output
out_bt.store[width=width](
iv, n * weight.load[width=width](iv) + bias.load[width=width](iv)
) # scale and shift it
vectorize[_op3, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=C)
# cache the mean and rstd for the backward pass later
mean[b * T + t] = m
rstd[b * T + t] = s
parallelize[_calc](B)
fn layernorm_backward(
dinp: DTypePointer[dtype],
dweight: DTypePointer[dtype],
dbias: DTypePointer[dtype],
dout: DTypePointer[dtype],
inp: DTypePointer[dtype],
weight: DTypePointer[dtype],
mean: DTypePointer[dtype],
rstd: DTypePointer[dtype],
B: Int,
T: Int,
C: Int,
):
@parameter
fn _calc(b: Int):
for t in range(T):
var dout_bt: DTypePointer[dtype] = dout + b * T * C + t * C
var inp_bt: DTypePointer[dtype] = inp + b * T * C + t * C
var dinp_bt: DTypePointer[dtype] = dinp + b * T * C + t * C
var mean_bt: FLOAT = mean[b * T + t]
var rstd_bt: FLOAT = rstd[b * T + t]
# first: two reduce operations
var dnorm_mean: FLOAT = 0.0
var dnorm_norm_mean: FLOAT = 0.0
@parameter
fn _op[width: Int](iv: Int):
var norm_bti = (inp_bt.load[width=width](iv) - mean_bt) * rstd_bt
var dnorm_i = weight.load[width=width](iv) * dout_bt.load[width=width](
iv
)
dnorm_mean += dnorm_i.reduce_add[1]()
dnorm_norm_mean += (dnorm_i * norm_bti).reduce_add[1]()
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=C)
dnorm_mean = dnorm_mean / C
dnorm_norm_mean = dnorm_norm_mean / C
# now iterate again and accumulate all the gradients
@parameter
fn _op2[width: Int](iv: Int):
var norm_bti = (inp_bt.load[width=width](iv) - mean_bt) * rstd_bt
var dnorm_i = weight.load[width=width](iv) * dout_bt.load[width=width](
iv
)
# gradient contribution to bias
dbias.store[width=width](
iv, dbias.load[width=width](iv) + dout_bt.load[width=width](iv)
)
# gradient contribution to weight
dweight.store[width=width](
iv,
dweight.load[width=width](iv)
+ norm_bti * dout_bt.load[width=width](iv),
)
# gradient contribution to input
dinp_bt.store[width=width](
iv,
dinp_bt.load[width=width](iv)
+ (dnorm_i - dnorm_mean - (norm_bti * dnorm_norm_mean)) * rstd_bt,
)
vectorize[_op2, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=C)
parallelize[_calc](B)
fn matmul_forward(
out: DTypePointer[dtype],
inp: DTypePointer[dtype],
weight: DTypePointer[dtype],
bias: DTypePointer[dtype],
B: Int,
T: Int,
C: Int,
OC: Int,
):
# most of the running time is spent here and in matmul_backward
# OC is short for "output channels"
# inp is (B,T,C), weight is (OC, C), bias is (OC)
# out will be (B,T,OC)
# pragma omp parallel for collapse(2)
@parameter
fn _calc(b: Int):
for t in range(T):
var out_bt: DTypePointer[dtype] = out + b * T * OC + t * OC
var inp_bt: DTypePointer[dtype] = inp + b * T * C + t * C
for o in range(OC):
var val: FLOAT = 0.0
if bias != NULL:
val = bias[o]
var wrow: DTypePointer[dtype] = weight + o * C
@parameter
fn _op[width: Int](iv: Int):
var t = inp_bt.load[width=width](iv) * wrow.load[width=width](iv)
val += t.reduce_add[1]()
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=C)
out_bt[o] = val
parallelize[_calc](B)
fn matmul_backward(
dinp: DTypePointer[dtype],
dweight: DTypePointer[dtype],
dbias: DTypePointer[dtype],
dout: DTypePointer[dtype],
inp: DTypePointer[dtype],
weight: DTypePointer[dtype],
B: Int,
T: Int,
C: Int,
OC: Int,
):
# most of the running time is spent here and in matmul_forward
# this backward could be done in a single "round" of loops
# but that doesn't afford an efficient parallelization strategy
# backward into inp first, parallelize over B,T
# pragma omp parallel for collapse(2)
@parameter
fn _calc(b: Int):
for t in range(T):
var dout_bt: DTypePointer[dtype] = dout + b * T * OC + t * OC
var dinp_bt: DTypePointer[dtype] = dinp + b * T * C + t * C
for o in range(OC):
var wrow: DTypePointer[dtype] = weight + o * C
var d: FLOAT = dout_bt[o]
@parameter
fn _op[width: Int](iv: Int):
dinp_bt.store[width=width](
iv,
dinp_bt.load[width=width](iv) + wrow.load[width=width](iv) * d,
) # scale and shift it
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=C)
parallelize[_calc](B)
# backward into weight/bias, parallelize over output channels OC
# pragma omp parallel for
@parameter
fn _calc2(o: Int):
for b in range(B):
for t in range(T):
var dout_bt: DTypePointer[dtype] = dout + b * T * OC + t * OC
var inp_bt: DTypePointer[dtype] = inp + b * T * C + t * C
var dwrow: DTypePointer[dtype] = dweight + o * C
var d: FLOAT = dout_bt[o]
if dbias != NULL:
dbias[o] += d
@parameter
fn _op[width: Int](iv: Int):
dwrow.store[width=width](
iv,
dwrow.load[width=width](iv) + inp_bt.load[width=width](iv) * d,
) # scale and shift it
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=C)
parallelize[_calc2](OC)
fn attention_forward(
out: DTypePointer[dtype],
preatt: DTypePointer[dtype],
att: DTypePointer[dtype],
inp: DTypePointer[dtype],
B: Int,
T: Int,
C: Int,
NH: Int,
):
# input is (B, T, 3C) Q,K,V
# preatt, att are (B, NH, T, T)
# output is (B, T, C)
var C3: Int = C * 3
var hs: Int = C // NH # head size
var scale: FLOAT = 1.0 / sqrt(hs)
# pragma omp parallel for collapse(3)
@parameter
fn _calc(b: Int):
# for b in range(B):
for t in range(T):
for h in range(NH):
var query_t: DTypePointer[dtype] = inp + b * T * C3 + t * C3 + h * hs
var preatt_bth: DTypePointer[
dtype
] = preatt + b * NH * T * T + h * T * T + t * T
var att_bth: DTypePointer[
dtype
] = att + b * NH * T * T + h * T * T + t * T
# pass 1: calculate query dot key and maxval
var maxval: FLOAT = -10000.0 # TODO something better
for t2 in range(t + 1):
var key_t2: DTypePointer[
dtype
] = inp + b * T * C3 + t2 * C3 + h * hs + C # +C because it's key
# (query_t) dot (key_t2)
var val: FLOAT = 0.0
@parameter
fn _op[width: Int](iv: Int):
var t = query_t.load[width=width](iv) * key_t2.load[
width=width
](iv)
val += t.reduce_add[1]()
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=hs)
val *= scale
if val > maxval:
maxval = val
preatt_bth[t2] = val
# pass 2: calculate the exp and keep track of sum
var expsum: FLOAT = 0.0
@parameter
fn _op2[width: Int](iv: Int):
var expv = exp(preatt_bth.load[width=width](iv) - maxval)
expsum += expv.reduce_add[1]()
att_bth.store[width=width](iv, expv)
vectorize[_op2, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=t + 1)
var expsum_inv: FLOAT = 0.0
if expsum != 0.0:
expsum_inv = 1.0 / expsum
# pass 3: normalize to get the softmax
@parameter
fn _op3[width: Int](t2: Int):
att_bth.store[width=width](
t2, att_bth.load[width=width](t2) * expsum_inv
)
vectorize[_op3, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=t + 1)
memset_zero(att_bth + t + 1, T - t - 1)
# pass 4: accumulate weighted values into the output of attention
var out_bth: DTypePointer[dtype] = out + b * T * C + t * C + h * hs
# for i in range(hs):
# out_bth[i] = 0.0
memset_zero(out_bth, hs)
for t2 in range(t + 1):
var value_t2 = inp + b * T * C3 + t2 * C3 + h * hs + C * 2 # +C*2 because it's value
var att_btht2: FLOAT = att_bth[t2]
@parameter
fn _op4[width: Int](iv: Int):
out_bth.store[width=width](
iv,
out_bth.load[width=width](iv)
+ att_btht2 * value_t2.load[width=width](iv),
)
vectorize[_op4, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=hs)
parallelize[_calc](B)
fn attention_backward(
dinp: DTypePointer[dtype],
dpreatt: DTypePointer[dtype],
datt: DTypePointer[dtype],
dout: DTypePointer[dtype],
inp: DTypePointer[dtype],
att: DTypePointer[dtype],
B: Int,
T: Int,
C: Int,
NH: Int,
):
# inp/dinp are (B, T, 3C) Q,K,V
# att/datt/dpreatt are (B, NH, T, T)
# dout is (B, T, C)
var C3: Int = C * 3
var hs: Int = C // NH # head size
var scale: FLOAT = 1.0 / sqrt(hs)
@parameter
fn _calc(b: Int):
for t in range(T):
for h in range(NH):
var att_bth: DTypePointer[
dtype
] = att + b * NH * T * T + h * T * T + t * T
var datt_bth: DTypePointer[
dtype
] = datt + b * NH * T * T + h * T * T + t * T
var dpreatt_bth: DTypePointer[
dtype
] = dpreatt + b * NH * T * T + h * T * T + t * T
var dquery_t: DTypePointer[dtype] = dinp + b * T * C3 + t * C3 + h * hs
var query_t: DTypePointer[dtype] = inp + b * T * C3 + t * C3 + h * hs
# backward pass 4, through the value accumulation
var dout_bth: DTypePointer[dtype] = dout + b * T * C + t * C + h * hs
for t2 in range(t + 1):
var value_t2: DTypePointer[
dtype
] = inp + b * T * C3 + t2 * C3 + h * hs + C * 2 # +C*2 because it's value
var dvalue_t2: DTypePointer[
dtype
] = dinp + b * T * C3 + t2 * C3 + h * hs + C * 2
@parameter
fn _op[width: Int](iv: Int):
# for i in range(hs):
# in the forward pass this was:
# out_bth[i] += att_bth[t2] * value_t2[i]
# so now we have:
datt_bth[t2] += (
value_t2.load[width=width](iv)
* dout_bth.load[width=width](iv)
).reduce_add[1]()
dvalue_t2.store[width=width](
iv,
dvalue_t2.load[width=width](iv)
+ att_bth[t2] * dout_bth.load[width=width](iv),
)
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=hs)
# backward pass 2 & 3, the softmax
# note that softmax (like e.g. tanh) doesn't need the input (preatt) to backward
for t2 in range(t + 1):
@parameter
fn _op3[width: Int](t3: Int):
var local_derivative = -att_bth[t2] * att_bth.load[width=width](
t3
)
dpreatt_bth.store[width=width](
t3,
dpreatt_bth.load[width=width](t3)
+ local_derivative * datt_bth[t2],
)
vectorize[_op3, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=t + 1)
dpreatt_bth[t2] += att_bth[t2] * datt_bth[t2]
# backward pass 1, the query @ key matmul
for t2 in range(t + 1):
var key_t2: DTypePointer[
dtype
] = inp + b * T * C3 + t2 * C3 + h * hs + C # +C because it's key
var dkey_t2: DTypePointer[
dtype
] = dinp + b * T * C3 + t2 * C3 + h * hs + C # +C because it's key
@parameter
fn _op2[width: Int](iv: Int):
# for i in range(hs):
# in the forward pass this was:
# preatt_bth[t2] += (query_t[i] * key_t2[i]) * scale
# so now we have:
dquery_t.store[width=width](
iv,
dquery_t.load[width=width](iv)
+ key_t2.load[width=width](iv) * dpreatt_bth[t2] * scale,
)
dkey_t2.store[width=width](
iv,
dkey_t2.load[width=width](iv)
+ query_t.load[width=width](iv) * dpreatt_bth[t2] * scale,
)
vectorize[_op2, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=hs)
parallelize[_calc](B)
fn gelu_forward(out: DTypePointer[dtype], inp: DTypePointer[dtype], N: Int):
var s: FLOAT = sqrt(2.0 / M_PI)
var num_vectorize = N // NUM_PARALLELIZE
@parameter
fn _calc(ip: Int):
@parameter
fn _op[width: Int](_iv: Int):
var iv = ip * num_vectorize + _iv
var x = inp.load[width=width](iv)
var cube = 0.044715 * pow(x, 3)
out.store[width=width](iv, 0.5 * x * (1.0 + tanh(s * (x + cube))))
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](num_vectorize)
parallelize[_calc](NUM_PARALLELIZE)
fn gelu_backward(
dinp: DTypePointer[dtype],
inp: DTypePointer[dtype],
dout: DTypePointer[dtype],
N: Int,
):
var s: FLOAT = sqrt(2.0 / M_PI)
var num_vectorize = N // NUM_PARALLELIZE
@parameter
fn _calc(ip: Int):
@parameter
fn _op[width: Int](_iv: Int):
var iv = ip * num_vectorize + _iv
var x = inp.load[width=width](iv)
var cube = 0.044715 * pow(x, 3)
var tanh_arg = s * (x + cube)
var tanh_out = tanh(tanh_arg)
var coshf_out = cosh(tanh_arg)
var sech_out = 1.0 / (coshf_out * coshf_out)
var local_grad = 0.5 * (1.0 + tanh_out) + x * 0.5 * sech_out * s * (
1.0 + 3.0 * 0.044715 * x * x
)
dinp.store[width=width](
iv, dinp.load[width=width](iv) + local_grad * dout.load[width=width](iv)
)
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](num_vectorize)
parallelize[_calc](NUM_PARALLELIZE)
fn residual_forward(
out: DTypePointer[dtype],
inp1: DTypePointer[dtype],
inp2: DTypePointer[dtype],
N: Int,
):
var num_vectorize = N // NUM_PARALLELIZE
@parameter
fn _calc(ip: Int):
@parameter
fn _op[width: Int](_iv: Int):
var iv = ip * num_vectorize + _iv
out.store[width=width](
iv, inp1.load[width=width](iv) + inp2.load[width=width](iv)
) # scale and shift it
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](num_vectorize)
parallelize[_calc](NUM_PARALLELIZE)
fn residual_backward(
dinp1: DTypePointer[dtype],
dinp2: DTypePointer[dtype],
dout: DTypePointer[dtype],
N: Int,
):
var num_vectorize = N // NUM_PARALLELIZE
@parameter
fn _calc(ip: Int):
@parameter
fn _op[width: Int](_iv: Int):
var iv = ip * num_vectorize + _iv
dinp1.store[width=width](
iv, dinp1.load[width=width](iv) + dout.load[width=width](iv)
) # scale and shift it
dinp2.store[width=width](
iv, dinp2.load[width=width](iv) + dout.load[width=width](iv)
) # scale and shift it
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](num_vectorize)
parallelize[_calc](NUM_PARALLELIZE)
fn softmax_forward(
probs: DTypePointer[dtype], logits: DTypePointer[dtype], B: Int, T: Int,V:Int, Vp: Int
):
# output: probs are (B,T,Vp) of the probabilities (sums to 1.0 in each b,t position)
# input: logits is (B,T,Vp) of the unnormalized log probabilities
# Vp is the padded vocab size (for efficiency), V is the "real" vocab size
# example: Vp is 50304 and V is 50257
@parameter
fn _calc(b: Int):
# for b in range(B):
for t in range(T):
# probs <- softmax(logits)
var logits_bt: DTypePointer[dtype] = logits + b * T * Vp + t * Vp
var probs_bt: DTypePointer[dtype] = probs + b * T * Vp + t * Vp
var maxval: FLOAT = -10000.0 # TODO something better
for i in range(V):
if logits_bt[i] > maxval:
maxval = logits_bt[i]
var sum: FLOAT = 0.0
@parameter
fn _op[width: Int](iv: Int):
probs_bt.store[width=width](
iv, exp(logits_bt.load[width=width](iv) - maxval)
)
sum += probs_bt.load[width=width](iv).reduce_add[1]()
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=V)
@parameter
fn _op2[width: Int](iv: Int):
probs_bt.store[width=width](
iv, probs_bt.load[width=width](iv) / sum
) # scale and shift it
vectorize[_op2, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=V)
# for extra super safety we may wish to include this too,
# forcing the probabilities here to be zero, but it shouldn't matter
@parameter
fn _op3[width: Int](iv: Int):
probs_bt.store[width=width](
iv+V,0.0
)
vectorize[_op3, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=Vp-V)
parallelize[_calc](B)
fn crossentropy_forward(
losses: DTypePointer[dtype],
probs: DTypePointer[dtype],
targets: DTypePointer[dtype_int],
B: Int,
T: Int,
Vp: Int
):
# output: losses is (B,T) of the individual losses at each position
# input: probs are (B,T,Vp) of the probabilities
# input: targets is (B,T) of integers giving the correct index in logits
@parameter
fn _calc(b: Int):
for t in range(T): # todo
# loss = -log(probs[target])
var probs_bt: DTypePointer[dtype] = probs + b * T * Vp + t * Vp
var ix = targets[b * T + t]
losses[b * T + t] = -log(probs_bt.load(ix))
parallelize[_calc](B)
fn crossentropy_softmax_backward(
dlogits: DTypePointer[dtype],
dlosses: DTypePointer[dtype],
probs: DTypePointer[dtype],
targets: DTypePointer[dtype_int],
B: Int,
T: Int,
V: Int,
Vp: Int
):
# backwards through both softmax and crossentropy
@parameter
fn _calc(b: Int):
for t in range(T):
var dlogits_bt: DTypePointer[dtype] = dlogits + b * T * Vp + t * Vp
var probs_bt: DTypePointer[dtype] = probs + b * T * Vp + t * Vp
var dloss: FLOAT = dlosses[b * T + t]
var ix = targets[b * T + t]
@parameter
fn _op[width: Int](iv: Int):
dlogits_bt.store[width=width](
iv,
dlogits_bt.load[width=width](iv)
+ probs_bt.load[width=width](iv) * dloss,
)
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](size=V)
if ix >= 0 and ix < V:
dlogits_bt.store(ix,dlogits_bt.load(ix)-dloss)
parallelize[_calc](B)
# ----------------------------------------------------------------------------
# GPT-2 model definition
# the parameters of the model
alias NUM_PARAMETER_TENSORS = 16
struct ParameterTensors:
var params_memory: DTypePointer[dtype]
var wte: DTypePointer[dtype] # (V, C)
var wpe: DTypePointer[dtype] # (maxT, C)
var ln1w: DTypePointer[dtype] # (L, C)
var ln1b: DTypePointer[dtype] # (L, C)
var qkvw: DTypePointer[dtype] # (L, 3*C, C)
var qkvb: DTypePointer[dtype] # (L, 3*C)
var attprojw: DTypePointer[dtype] # (L, C, C)
var attprojb: DTypePointer[dtype] # (L, C)
var ln2w: DTypePointer[dtype] # (L, C)
var ln2b: DTypePointer[dtype] # (L, C)
var fcw: DTypePointer[dtype] # (L, 4*C, C)
var fcb: DTypePointer[dtype] # (L, 4*C)
var fcprojw: DTypePointer[dtype] # (L, C, 4*C)
var fcprojb: DTypePointer[dtype] # (L, C)
var lnfw: DTypePointer[dtype] # (C)
var lnfb: DTypePointer[dtype] # (C)
fn __init__(
inout self
):
self.params_memory = DTypePointer[dtype]()
self.wte = DTypePointer[dtype]()
self.wpe = DTypePointer[dtype]()
self.ln1w = DTypePointer[dtype]()
self.ln1b = DTypePointer[dtype]()
self.qkvw = DTypePointer[dtype]()
self.qkvb = DTypePointer[dtype]()
self.attprojw = DTypePointer[dtype]()
self.attprojb = DTypePointer[dtype]()
self.ln2w = DTypePointer[dtype]()
self.ln2b = DTypePointer[dtype]()
self.fcw = DTypePointer[dtype]()
self.fcb = DTypePointer[dtype]()
self.fcprojw = DTypePointer[dtype]()
self.fcprojb = DTypePointer[dtype]()
self.lnfw = DTypePointer[dtype]()
self.lnfb = DTypePointer[dtype]()
fn alloc_and_point_parameters(
inout self,
param_sizes: InlinedFixedVector[type=Int, size=NUM_PARAMETER_TENSORS],
) -> DTypePointer[dtype]:
var num_parameters: Int = 0
for i in range(NUM_PARAMETER_TENSORS):
num_parameters += param_sizes[i]
# malloc all parameters all at once
self.params_memory = DTypePointer[dtype]().alloc(num_parameters)
# assign all the tensors
var ptrs = List(
Pointer.address_of(self.wte),
Pointer.address_of(self.wpe),
Pointer.address_of(self.ln1w),
Pointer.address_of(self.ln1b),
Pointer.address_of(self.qkvw),
Pointer.address_of(self.qkvb),
Pointer.address_of(self.attprojw),
Pointer.address_of(self.attprojb),
Pointer.address_of(self.ln2w),
Pointer.address_of(self.ln2b),
Pointer.address_of(self.fcw),
Pointer.address_of(self.fcb),
Pointer.address_of(self.fcprojw),
Pointer.address_of(self.fcprojb),
Pointer.address_of(self.lnfw),
Pointer.address_of(self.lnfb),
)
var params_memory_iterator: DTypePointer[dtype] = self.params_memory
for i in range(NUM_PARAMETER_TENSORS):
ptrs[i][] = params_memory_iterator
params_memory_iterator += param_sizes[i]
return self.params_memory
alias NUM_ACTIVATION_TENSORS = 23
@value
struct ActivationTensors:
var encoded: DTypePointer[dtype] # (B, T, C)
var ln1: DTypePointer[dtype] # (L, B, T, C)
var ln1_mean: DTypePointer[dtype] # (L, B, T)
var ln1_rstd: DTypePointer[dtype] # (L, B, T)
var qkv: DTypePointer[dtype] # (L, B, T, 3*C)
var atty: DTypePointer[dtype] # (L, B, T, C)
var preatt: DTypePointer[dtype] # (L, B, NH, T, T)
var att: DTypePointer[dtype] # (L, B, NH, T, T)
var attproj: DTypePointer[dtype] # (L, B, T, C)
var residual2: DTypePointer[dtype] # (L, B, T, C)
var ln2: DTypePointer[dtype] # (L, B, T, C)
var ln2_mean: DTypePointer[dtype] # (L, B, T)
var ln2_rstd: DTypePointer[dtype] # (L, B, T)
var fch: DTypePointer[dtype] # (L, B, T, 4*C)
var fch_gelu: DTypePointer[dtype] # (L, B, T, 4*C)
var fcproj: DTypePointer[dtype] # (L, B, T, C)
var residual3: DTypePointer[dtype] # (L, B, T, C)
var lnf: DTypePointer[dtype] # (B, T, C)
var lnf_mean: DTypePointer[dtype] # (B, T)
var lnf_rstd: DTypePointer[dtype] # (B, T)
var logits: DTypePointer[dtype] # (B, T, V)
var probs: DTypePointer[dtype] # (B, T, V)
var losses: DTypePointer[dtype] # (B, T)
fn __init__(
inout self,
):
self.encoded = DTypePointer[dtype]()
self.ln1 = DTypePointer[dtype]()
self.ln1_mean = DTypePointer[dtype]()
self.ln1_rstd = DTypePointer[dtype]()
self.qkv = DTypePointer[dtype]()
self.atty = DTypePointer[dtype]()
self.preatt = DTypePointer[dtype]()
self.att = DTypePointer[dtype]()
self.attproj = DTypePointer[dtype]()
self.residual2 = DTypePointer[dtype]()
self.ln2 = DTypePointer[dtype]()
self.ln2_mean = DTypePointer[dtype]()
self.ln2_rstd = DTypePointer[dtype]()
self.fch = DTypePointer[dtype]()
self.fch_gelu = DTypePointer[dtype]()
self.fcproj = DTypePointer[dtype]()
self.residual3 = DTypePointer[dtype]()
self.lnf = DTypePointer[dtype]()
self.lnf_mean = DTypePointer[dtype]()
self.lnf_rstd = DTypePointer[dtype]()
self.logits = DTypePointer[dtype]()
self.probs = DTypePointer[dtype]()
self.losses = DTypePointer[dtype]()
fn alloc_and_point_activations(
inout self, act_sizes: InlinedFixedVector[type=Int, size=NUM_ACTIVATION_TENSORS]
) -> DTypePointer[dtype]:
var ptrs = List(
Pointer.address_of(self.encoded),
Pointer.address_of(self.ln1),
Pointer.address_of(self.ln1_mean),
Pointer.address_of(self.ln1_rstd),
Pointer.address_of(self.qkv),
Pointer.address_of(self.atty),
Pointer.address_of(self.preatt),
Pointer.address_of(self.att),
Pointer.address_of(self.attproj),
Pointer.address_of(self.residual2),
Pointer.address_of(self.ln2),
Pointer.address_of(self.ln2_mean),
Pointer.address_of(self.ln2_rstd),
Pointer.address_of(self.fch),
Pointer.address_of(self.fch_gelu),
Pointer.address_of(self.fcproj),
Pointer.address_of(self.residual3),
Pointer.address_of(self.lnf),
Pointer.address_of(self.lnf_mean),
Pointer.address_of(self.lnf_rstd),
Pointer.address_of(self.logits),
Pointer.address_of(self.probs),
Pointer.address_of(self.losses),
)
var num_activations: Int = 0
for i in range(NUM_ACTIVATION_TENSORS):
num_activations += act_sizes[i]
var acts_memory = DTypePointer[dtype]().alloc(num_activations)
var acts_memory_iterator: DTypePointer[dtype] = acts_memory
for i in range(NUM_ACTIVATION_TENSORS):
ptrs[i][] = acts_memory_iterator
acts_memory_iterator += act_sizes[i]
return acts_memory
@value
struct GPT2Config:
var max_seq_len: Int # max sequence length, e.g. 1024
var vocab_size: Int # vocab size, e.g. 50257
var num_layers: Int # number of layers, e.g. 12
var num_heads: Int # number of heads in attention, e.g. 12
var channels: Int # number of channels, e.g. 768
var padded_vocab_size:Int # padded to e.g. %128==0, 50304
struct GPT2:
var config: GPT2Config
# the weights of the model, and their sizes
var params: ParameterTensors
var param_sizes: InlinedFixedVector[type=Int, size=NUM_PARAMETER_TENSORS]
var params_memory: DTypePointer[dtype]
var num_parameters: Int
# gradients of the weights
var grads: ParameterTensors
var grads_memory: DTypePointer[dtype]
# buffers for the AdamW optimizer
var m_memory: DTypePointer[dtype]
var v_memory: DTypePointer[dtype]
# the activations of the model, and their sizes
var acts: ActivationTensors
var act_sizes: InlinedFixedVector[type=Int, size=NUM_ACTIVATION_TENSORS]
var acts_memory: DTypePointer[dtype]
var num_activations: Int
# gradients of the activations
var grads_acts: ActivationTensors
var grads_acts_memory: DTypePointer[dtype]
# other run state configuration
var batch_size: Int # the batch size (B) of current forward pass
var seq_len: Int # the sequence length (T) of current forward pass
var inputs: DTypePointer[dtype_int] # the input tokens for the current forward pass
var targets: DTypePointer[
dtype_int
] # the target tokens for the current forward pass
var mean_loss: FLOAT # after a forward pass with targets, will be populated with the mean loss
var checkpoint_path: StringRef
fn __init__(inout self, checkpoint_path: StringRef) raises:
self.checkpoint_path = checkpoint_path
self.param_sizes = InlinedFixedVector[type=Int, size=NUM_PARAMETER_TENSORS](
NUM_PARAMETER_TENSORS
)
self.act_sizes = InlinedFixedVector[type=Int, size=NUM_ACTIVATION_TENSORS](
NUM_ACTIVATION_TENSORS
)
var model_file = open(checkpoint_path, "r")
var model_header = DTypePointer[dtype.int32].alloc(256)
read_to_dtype_pointer[DType.int32](model_header,model_file,256)
if model_header[0] != 20240326:
print("Bad magic model file",model_header[0])
exit(1)
if model_header[1] != 3:
print("Bad version in model file")
exit(1)
# read in hyperparameters
self.config = GPT2Config(
int(model_header[2]),
int(model_header[3]),
int(model_header[4]),
int(model_header[5]),
int(model_header[6]),
int(model_header[7]),
)
var maxT: Int = self.config.max_seq_len
var V: Int = self.config.vocab_size
var L: Int = self.config.num_layers
var NH: Int = self.config.num_heads
var C: Int = self.config.channels
var Vp: Int = self.config.padded_vocab_size
# allocate space for all the parameters and read them in
self.param_sizes[0] = Vp * C
self.param_sizes[1] = maxT * C
self.param_sizes[2] = L * C
self.param_sizes[3] = L * C
self.param_sizes[4] = L * (3 * C) * C
self.param_sizes[5] = L * (3 * C)
self.param_sizes[6] = L * C * C
self.param_sizes[7] = L * C
self.param_sizes[8] = L * C
self.param_sizes[9] = L * C
self.param_sizes[10] = L * (4 * C) * C
self.param_sizes[11] = L * (4 * C)
self.param_sizes[12] = L * C * (4 * C)
self.param_sizes[13] = L * C
self.param_sizes[14] = C
self.param_sizes[15] = C
# cound the number of paramaters
var num_parameters: Int = 0
for i in range(NUM_PARAMETER_TENSORS):
num_parameters += self.param_sizes[i]
self.num_parameters = num_parameters
# read in all the parameters from file
self.params = ParameterTensors()
self.params_memory = self.params.alloc_and_point_parameters(self.param_sizes)
read_to_dtype_pointer[DType.float32](self.params_memory,model_file,num_parameters)
model_file.close()
# other inits
self.acts = ActivationTensors()
self.num_activations = 0 # for now
self.acts_memory = NULL
self.grads_memory = NULL
self.m_memory = NULL
self.v_memory = NULL
self.grads_acts_memory = NULL
self.inputs = NULL_INT
self.targets = NULL_INT
self.batch_size = 0
self.seq_len = 0
self.mean_loss = -1.0 # -1.0 will designate no loss
self.grads = ParameterTensors()
self.grads_acts = ActivationTensors()
print("[GPT-2]")
print("max_seq_len:", self.config.max_seq_len)
print("vocab_size:", self.config.vocab_size)
print("padded_vocab_size:", self.config.padded_vocab_size)
print("num_layers:", self.config.num_layers)
print("num_heads:", self.config.num_heads)
print("channels:", self.config.channels)
print("num_parameters:", num_parameters)
fn gpt2_forward(
inout model: GPT2,
inputs: DTypePointer[dtype_int],
targets: DTypePointer[dtype_int],
B: Int,
T: Int,
):
# targets are optional and could be NULL
# ensure the model was initialized or error out
if model.params_memory == NULL:
print("Error: model was not initialized properly.")
# convenience parameters
var V: Int = model.config.vocab_size
var Vp: Int = model.config.padded_vocab_size
var L: Int = model.config.num_layers
var NH: Int = model.config.num_heads
var C: Int = model.config.channels
# allocate space for all the activations if needed (done here, lazily)
if model.acts_memory == NULL:
# record the current B,T as well
model.batch_size = B
model.seq_len = T
# and now allocate the space
model.act_sizes[0] = B * T * C
model.act_sizes[1] = L * B * T * C
model.act_sizes[2] = L * B * T
model.act_sizes[3] = L * B * T
model.act_sizes[4] = L * B * T * 3 * C
model.act_sizes[5] = L * B * T * C
model.act_sizes[6] = L * B * NH * T * T
model.act_sizes[7] = L * B * NH * T * T
model.act_sizes[8] = L * B * T * C
model.act_sizes[9] = L * B * T * C
model.act_sizes[10] = L * B * T * C
model.act_sizes[11] = L * B * T
model.act_sizes[12] = L * B * T
model.act_sizes[13] = L * B * T * 4 * C
model.act_sizes[14] = L * B * T * 4 * C
model.act_sizes[15] = L * B * T * C
model.act_sizes[16] = L * B * T * C
model.act_sizes[17] = B * T * C
model.act_sizes[18] = B * T
model.act_sizes[19] = B * T
model.act_sizes[20] = B * T * Vp
model.act_sizes[21] = B * T * Vp
model.act_sizes[22] = B * T
var num_activations: Int = 0
for i in range(NUM_ACTIVATION_TENSORS):
num_activations += model.act_sizes[i]
print("num_activations:", num_activations)
model.acts_memory = model.acts.alloc_and_point_activations(model.act_sizes)
model.num_activations = num_activations
# also create memory for caching inputs and targets
model.inputs = DTypePointer[dtype_int]().alloc(B * T)
model.targets = DTypePointer[dtype_int]().alloc(B * T)
else:
# validate B,T is no larger than what was previously allocated
# in principle, we could re-allocate a larger chunk of memory, for now we just error out
if B > model.batch_size or T > model.seq_len:
print("Error: batch size or sequence length is inadequately large")
# print("Model: B=%d T=%d, Desired: B=%d T=%d\n", model.batch_size, model.seq_len, B, T)
# cache the inputs/targets
memcpy(model.inputs, inputs, B * T)
if targets != NULL_INT:
memcpy(model.targets, targets, B * T)
# forward pass
var residual: DTypePointer[dtype]
encoder_forward(
model.acts.encoded, inputs, model.params.wte, model.params.wpe, B, T, C
) # encoding goes into residual[0]
for l in range(L):
residual = model.acts.residual3 + (l - 1) * B * T * C
if l == 0:
residual = model.acts.encoded
# get the pointers of the weights for this layer
var l_ln1w: DTypePointer[dtype] = model.params.ln1w + l * C
var l_ln1b: DTypePointer[dtype] = model.params.ln1b + l * C
var l_qkvw: DTypePointer[dtype] = model.params.qkvw + l * 3 * C * C
var l_qkvb: DTypePointer[dtype] = model.params.qkvb + l * 3 * C
var l_attprojw: DTypePointer[dtype] = model.params.attprojw + l * C * C
var l_attprojb: DTypePointer[dtype] = model.params.attprojb + l * C
var l_ln2w: DTypePointer[dtype] = model.params.ln2w + l * C
var l_ln2b: DTypePointer[dtype] = model.params.ln2b + l * C
var l_fcw: DTypePointer[dtype] = model.params.fcw + l * 4 * C * C
var l_fcb: DTypePointer[dtype] = model.params.fcb + l * 4 * C
var l_fcprojw: DTypePointer[dtype] = model.params.fcprojw + l * C * 4 * C
var l_fcprojb: DTypePointer[dtype] = model.params.fcprojb + l * C
# get the pointers of the activations for this layer
var l_ln1: DTypePointer[dtype] = model.acts.ln1 + l * B * T * C
var l_ln1_mean: DTypePointer[dtype] = model.acts.ln1_mean + l * B * T
var l_ln1_rstd: DTypePointer[dtype] = model.acts.ln1_rstd + l * B * T
var l_qkv: DTypePointer[dtype] = model.acts.qkv + l * B * T * 3 * C
var l_atty: DTypePointer[dtype] = model.acts.atty + l * B * T * C
var l_preatt: DTypePointer[dtype] = model.acts.preatt + l * B * NH * T * T
var l_att: DTypePointer[dtype] = model.acts.att + l * B * NH * T * T
var l_attproj: DTypePointer[dtype] = model.acts.attproj + l * B * T * C
var l_residual2: DTypePointer[dtype] = model.acts.residual2 + l * B * T * C
var l_ln2: DTypePointer[dtype] = model.acts.ln2 + l * B * T * C
var l_ln2_mean: DTypePointer[dtype] = model.acts.ln2_mean + l * B * T
var l_ln2_rstd: DTypePointer[dtype] = model.acts.ln2_rstd + l * B * T
var l_fch: DTypePointer[dtype] = model.acts.fch + l * B * T * 4 * C
var l_fch_gelu: DTypePointer[dtype] = model.acts.fch_gelu + l * B * T * 4 * C
var l_fcproj: DTypePointer[dtype] = model.acts.fcproj + l * B * T * C
var l_residual3: DTypePointer[dtype] = model.acts.residual3 + l * B * T * C
# now do the forward pass
layernorm_forward(
l_ln1, l_ln1_mean, l_ln1_rstd, residual, l_ln1w, l_ln1b, B, T, C
)
matmul_forward(l_qkv, l_ln1, l_qkvw, l_qkvb, B, T, C, 3 * C)
attention_forward(l_atty, l_preatt, l_att, l_qkv, B, T, C, NH)
matmul_forward(l_attproj, l_atty, l_attprojw, l_attprojb, B, T, C, C)
residual_forward(l_residual2, residual, l_attproj, B * T * C)
layernorm_forward(
l_ln2, l_ln2_mean, l_ln2_rstd, l_residual2, l_ln2w, l_ln2b, B, T, C
)
matmul_forward(l_fch, l_ln2, l_fcw, l_fcb, B, T, C, 4 * C)
gelu_forward(l_fch_gelu, l_fch, B * T * 4 * C)
matmul_forward(l_fcproj, l_fch_gelu, l_fcprojw, l_fcprojb, B, T, 4 * C, C)
residual_forward(l_residual3, l_residual2, l_fcproj, B * T * C)
residual = (
model.acts.residual3 + (L - 1) * B * T * C
) # last residual is in residual3
layernorm_forward(
model.acts.lnf,
model.acts.lnf_mean,
model.acts.lnf_rstd,
residual,
model.params.lnfw,
model.params.lnfb,
B,
T,
C,
)
matmul_forward(
model.acts.logits, model.acts.lnf, model.params.wte, NULL, B, T, C, Vp
)
softmax_forward(model.acts.probs, model.acts.logits, B, T, V,Vp)
# also forward the cross-entropy loss function if we have the targets
if targets != NULL_INT:
crossentropy_forward(model.acts.losses, model.acts.probs, targets, B, T, Vp)
# for convenience also evaluate the mean loss
var mean_loss: FLOAT = 0.0
for i in range(B * T):
mean_loss += model.acts.losses[i]
mean_loss /= B * T
model.mean_loss = mean_loss
else:
# if we don't have targets, we don't have a loss
model.mean_loss = -1.0
fn gpt2_zero_grad(inout model: GPT2):
if model.grads_memory != NULL:
memset_zero(model.grads_memory, model.num_parameters)
if model.grads_acts_memory != NULL:
memset_zero(model.grads_acts_memory, model.num_activations)
fn gpt2_backward(inout model: GPT2):
# double check we forwarded previously, with targets
if model.mean_loss == -1.0:
print("Error: must forward with targets before backward\n")
# lazily allocate the memory for gradients of the weights and activations, if needed
if model.grads_memory == NULL:
model.grads_memory = model.grads.alloc_and_point_parameters(model.param_sizes)
model.grads_acts_memory = model.grads_acts.alloc_and_point_activations(
model.act_sizes
)
gpt2_zero_grad(model)
# convenience shortcuts
var B: Int = model.batch_size
var T: Int = model.seq_len
var V: Int = model.config.vocab_size
var Vp: Int = model.config.padded_vocab_size
var L: Int = model.config.num_layers
var NH: Int = model.config.num_heads
var C: Int = model.config.channels
# backward pass
# we kick off the chain rule by filling in dlosses with 1.0f/(B*T)
# technically this is a small, inline backward() pass of calculating
# total, final loss as the mean over all losses over all (B,T) positions in the batch
var dloss_mean: FLOAT = 1.0 / (B * T)
@parameter
fn _op[width: Int](iv: Int):
model.grads_acts.losses.store[width=width](iv, dloss_mean)
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR]((B * T))
crossentropy_softmax_backward(
model.grads_acts.logits,
model.grads_acts.losses,
model.acts.probs,
model.targets,
B,
T,
V,
Vp
)
matmul_backward(
model.grads_acts.lnf,
model.grads.wte,
NULL,
model.grads_acts.logits,
model.acts.lnf,
model.params.wte,
B,
T,
C,
Vp,
)
var residual: DTypePointer[dtype] = model.acts.residual3 + (
L - 1
) * B * T * C # last layer's residual
var dresidual: DTypePointer[dtype] = model.grads_acts.residual3 + (
L - 1
) * B * T * C # write to last layer's residual
layernorm_backward(
dresidual,
model.grads.lnfw,
model.grads.lnfb,
model.grads_acts.lnf,
residual,
model.params.lnfw,
model.acts.lnf_mean,
model.acts.lnf_rstd,
B,
T,
C,
)
for l in range(L - 1, -1, -1):
var residual = model.acts.encoded
var dresidual = model.grads_acts.encoded
if l != 0:
residual = model.acts.residual3 + (l - 1) * B * T * C
dresidual = model.grads_acts.residual3 + (l - 1) * B * T * C
# get the pointers of the weights for this layer
var l_ln1w: DTypePointer[dtype] = model.params.ln1w + l * C
var l_qkvw: DTypePointer[dtype] = model.params.qkvw + l * 3 * C * C
var l_attprojw: DTypePointer[dtype] = model.params.attprojw + l * C * C
var l_ln2w: DTypePointer[dtype] = model.params.ln2w + l * C
var l_fcw: DTypePointer[dtype] = model.params.fcw + l * 4 * C * C
var l_fcprojw: DTypePointer[dtype] = model.params.fcprojw + l * C * 4 * C
# get the pointers of the gradients of the weights for this layer
var dl_ln1w: DTypePointer[dtype] = model.grads.ln1w + l * C
var dl_ln1b: DTypePointer[dtype] = model.grads.ln1b + l * C
var dl_qkvw: DTypePointer[dtype] = model.grads.qkvw + l * 3 * C * C
var dl_qkvb: DTypePointer[dtype] = model.grads.qkvb + l * 3 * C
var dl_attprojw: DTypePointer[dtype] = model.grads.attprojw + l * C * C
var dl_attprojb: DTypePointer[dtype] = model.grads.attprojb + l * C
var dl_ln2w: DTypePointer[dtype] = model.grads.ln2w + l * C
var dl_ln2b: DTypePointer[dtype] = model.grads.ln2b + l * C
var dl_fcw: DTypePointer[dtype] = model.grads.fcw + l * 4 * C * C
var dl_fcb: DTypePointer[dtype] = model.grads.fcb + l * 4 * C
var dl_fcprojw: DTypePointer[dtype] = model.grads.fcprojw + l * C * 4 * C
var dl_fcprojb: DTypePointer[dtype] = model.grads.fcprojb + l * C
# get the pointers of the activations for this layer
var l_ln1: DTypePointer[dtype] = model.acts.ln1 + l * B * T * C
var l_ln1_mean: DTypePointer[dtype] = model.acts.ln1_mean + l * B * T
var l_ln1_rstd: DTypePointer[dtype] = model.acts.ln1_rstd + l * B * T
var l_qkv: DTypePointer[dtype] = model.acts.qkv + l * B * T * 3 * C
var l_atty: DTypePointer[dtype] = model.acts.atty + l * B * T * C
var l_att: DTypePointer[dtype] = model.acts.att + l * B * NH * T * T
var l_residual2: DTypePointer[dtype] = model.acts.residual2 + l * B * T * C
var l_ln2: DTypePointer[dtype] = model.acts.ln2 + l * B * T * C
var l_ln2_mean: DTypePointer[dtype] = model.acts.ln2_mean + l * B * T
var l_ln2_rstd: DTypePointer[dtype] = model.acts.ln2_rstd + l * B * T
var l_fch: DTypePointer[dtype] = model.acts.fch + l * B * T * 4 * C
var l_fch_gelu: DTypePointer[dtype] = model.acts.fch_gelu + l * B * T * 4 * C
# get the pointers of the gradients of the activations for this layer
var dl_ln1: DTypePointer[dtype] = model.grads_acts.ln1 + l * B * T * C
var dl_qkv: DTypePointer[dtype] = model.grads_acts.qkv + l * B * T * 3 * C
var dl_atty: DTypePointer[dtype] = model.grads_acts.atty + l * B * T * C
var dl_preatt: DTypePointer[
dtype
] = model.grads_acts.preatt + l * B * NH * T * T
var dl_att: DTypePointer[dtype] = model.grads_acts.att + l * B * NH * T * T
var dl_attproj: DTypePointer[dtype] = model.grads_acts.attproj + l * B * T * C
var dl_residual2: DTypePointer[
dtype
] = model.grads_acts.residual2 + l * B * T * C
var dl_ln2: DTypePointer[dtype] = model.grads_acts.ln2 + l * B * T * C
var dl_fch: DTypePointer[dtype] = model.grads_acts.fch + l * B * T * 4 * C
var dl_fch_gelu: DTypePointer[
dtype
] = model.grads_acts.fch_gelu + l * B * T * 4 * C
var dl_fcproj: DTypePointer[dtype] = model.grads_acts.fcproj + l * B * T * C
var dl_residual3: DTypePointer[
dtype
] = model.grads_acts.residual3 + l * B * T * C
# backprop this layer
residual_backward(dl_residual2, dl_fcproj, dl_residual3, B * T * C)
matmul_backward(
dl_fch_gelu,
dl_fcprojw,
dl_fcprojb,
dl_fcproj,
l_fch_gelu,
l_fcprojw,
B,
T,
4 * C,
C,
)
gelu_backward(dl_fch, l_fch, dl_fch_gelu, B * T * 4 * C)
matmul_backward(dl_ln2, dl_fcw, dl_fcb, dl_fch, l_ln2, l_fcw, B, T, C, 4 * C)
layernorm_backward(
dl_residual2,
dl_ln2w,
dl_ln2b,
dl_ln2,
l_residual2,
l_ln2w,
l_ln2_mean,
l_ln2_rstd,
B,
T,
C,
)
residual_backward(dresidual, dl_attproj, dl_residual2, B * T * C)
matmul_backward(
dl_atty,
dl_attprojw,
dl_attprojb,
dl_attproj,
l_atty,
l_attprojw,
B,
T,
C,
C,
)
attention_backward(
dl_qkv, dl_preatt, dl_att, dl_atty, l_qkv, l_att, B, T, C, NH
)
matmul_backward(dl_ln1, dl_qkvw, dl_qkvb, dl_qkv, l_ln1, l_qkvw, B, T, C, 3 * C)
layernorm_backward(
dresidual,
dl_ln1w,
dl_ln1b,
dl_ln1,
residual,
l_ln1w,
l_ln1_mean,
l_ln1_rstd,
B,
T,
C,
)
encoder_backward(
model.grads.wte,
model.grads.wpe,
model.grads_acts.encoded,
model.inputs,
B,
T,
C,
)
fn gpt2_update(
inout model: GPT2,
learning_rate: FLOAT,
beta1: FLOAT,
beta2: FLOAT,
eps: FLOAT,
weight_decay: FLOAT,
t: Int,
):
# reference: https:#pytorch.org/docs/stable/generated/torch.optim.AdamW.html
# lazily allocate the memory for m_memory and v_memory
if model.m_memory == NULL:
model.m_memory = DTypePointer[dtype]().alloc(model.num_parameters)
model.v_memory = DTypePointer[dtype]().alloc(model.num_parameters)
memset_zero(model.m_memory, model.num_parameters)
memset_zero(model.v_memory, model.num_parameters)
var num_vectorize = model.num_parameters // NUM_PARALLELIZE
@parameter
fn _calc(ip: Int):
@parameter
fn _op[width: Int](_iv: Int):
var iv = ip * num_vectorize + _iv
var param = model.params_memory.load[width=width](iv)
var grad = model.grads_memory.load[width=width](iv)
# update the first moment (momentum)
var m = beta1 * model.m_memory.load[width=width](iv) + (1.0 - beta1) * grad
# update the second moment (RMSprop)
var v = beta2 * model.v_memory.load[width=width](iv) + (
1.0 - beta2
) * grad * grad
# bias-correct both moments
var m_hat = m / (1.0 - pow(beta1, t))
var v_hat = v / (1.0 - pow(beta2, t))
# update
model.m_memory.store[width=width](iv, m)
model.v_memory.store[width=width](iv, v)
model.params_memory.store[width=width](
iv,
model.params_memory.load[width=width](iv)
- learning_rate * (m_hat / (sqrt(v_hat) + eps) + weight_decay * param),
)
vectorize[_op, SIMD_WIDTH, unroll_factor=UNROLL_FACTOR](num_vectorize)
parallelize[_calc](NUM_PARALLELIZE)
fn gpt2_free(inout model: GPT2):
model.params_memory.free()
model.grads_memory.free()
model.m_memory.free()
model.v_memory.free()
model.acts_memory.free()
model.grads_acts_memory.free()
model.inputs.free()
model.targets.free()
# ifndef TESTING
# if we are TESTING (see test_gpt2.c), we'll skip the maiN:Int32 below
# ----------------------------------------------------------------------------
# data loader lite
# returns random batches of data from a file of integers
struct DataLoader:
# hyperparameters
var B: Int
var T: Int
# input handling and its state
var filename: StringRef
var tokens_file: FileHandle
var file_size: Int
var current_position: Int
# output memory
var batch: DTypePointer[dtype_int]
var inputs: DTypePointer[dtype_int]
var targets: DTypePointer[dtype_int]
# convenience variables
var num_batches: Int
fn __init__(inout self):
self.B = 0
self.T = 0
self.filename = ""
self.tokens_file = FileHandle()
self.file_size = 0
self.current_position = 0
self.batch = DTypePointer[dtype_int]()
self.inputs = DTypePointer[dtype_int]()
self.targets = DTypePointer[dtype_int]()
self.num_batches = 0
fn dataloader_init(
inout loader: DataLoader, filename: StringRef, B: Int, T: Int
) raises:
loader.B = B
loader.T = T
try:
loader.tokens_file = open(filename, "rb")
except e:
print("Error opening file",filename,e)
exit(1)
# determine the file size
var _os = Python.import_module("os")
loader.file_size = int(_os.path.getsize(filename))
if loader.file_size < (B * T + 1) * 4:
print("Error: file size is too small for the batch size and sequence length\n")
loader.current_position = 0 # start at the beginning
# allocate space for B*T + 1 integers to store the inputs and targets loader.batch = (int*) malloc((B * T + 1) * sizeof(int))
loader.batch = DTypePointer[dtype_int]().alloc(B * T + 1)
loader.inputs = loader.batch
loader.targets = loader.batch + 1 # targets are shifted by one
loader.num_batches = loader.file_size // (B * T * SIZEOF_INT)
fn dataloader_reset(inout loader: DataLoader):
loader.current_position = 0
fn dataloader_next_batch(inout loader: DataLoader) raises:
var B: Int = loader.B
var T: Int = loader.T
# if we are at the end of the file, loop back to the beginning
if loader.current_position + ((B * T + 1) * SIZEOF_INT) > loader.file_size:
loader.current_position = 0
# read the B*T+1 integers from the file into batch
var q = loader.tokens_file.seek(loader.current_position)
read_to_dtype_pointer(loader.batch,loader.tokens_file,B * T + 1)
# advance the current position by B*T integers
loader.current_position += B * T * SIZEOF_INT
fn dataloader_free(inout loader: DataLoader) raises:
loader.tokens_file.close()
loader.batch.free()
# ----------------------------------------------------------------------------
# sampler
fn random_u32(inout state: UInt64) -> UInt32:
state ^= state >> 12
state ^= state << 25
state ^= state >> 27
return ((state * RU32_HEX) >> 32).cast[DType.uint32]()
fn random_f32(inout state: UInt64) -> Float32:
return (random_u32(state) >> 8).cast[DType.float32]() / RF32_DIV
fn sample_mult(probabilities: DTypePointer[dtype], n: Int, coin: FLOAT) -> Int:
# sample index from probabilities (they must sum to 1!)
# coin is a random number in [0, 1), usually from random_f32()
var cdf: FLOAT = 0.0
for i in range(n):
cdf += probabilities[i]
if coin < cdf:
return i
return n - 1
# ----------------------------------------------------------------------------
# Tokenizer (only supports decoding)
# this mojo version needs refinements, still buggy
struct Tokenizer:
var vocab_size: Int
var token_table: List[String]
var init_ok: Int
fn __init__(inout self, filename: StringRef) raises:
self.vocab_size = 0
self.token_table = List[String]()
self.init_ok = 0
var file: FileHandle
try:
file = open(filename, "rb")
except:
print("---")
print("WARNING: Failed to open the tokenizer file", filename)
print("The Tokenizer is a new feature added April 14 2024.")
print("Re-run `python train_gpt2.py` to write it")
print("---")
self.init_ok = 0
return
var header = DTypePointer[DType.int32].alloc(256)
read_to_dtype_pointer(header,file,256)
if header[0] != 20240328:
print("Bad magic model file",header[0])
exit(1)
if header[1] != 2:
print("Bad version in model file", header[1])
exit(1)
self.vocab_size = int(header[2])
for i in range(self.vocab_size):
var length = int(file.read_bytes(1)[0])
var str: String = file.read(length)
if length > 0 and len(str) > 0:
self.token_table.append(str)
else:
self.token_table.append("")
file.close()
self.init_ok = 1
fn decode(self, token_id: Int) -> String:
if self.init_ok == 0:
return ""
if token_id >= 0 and token_id < self.vocab_size:
return self.token_table[token_id]
else:
return ""
fn safe_printf(self, s: String):
# the tokens are raw bytes, and we we only want to print the printable ones
# many bytes can be various control codes, backspace, etc.
if s == str(NULL):
return
if s[0] == "\0":
return
# handle individual byte tokens
# every token is asserted to be at least one byte so doing piece[1] is ok
### --- TODO
# if (s[1] == '\0') {
# unsigned char byte_val = piece[0];
# if (!(isprint(byte_val) || isspace(byte_val))) {
# return; // weird byte, don't print it
# }
# }
print(s, end="")
fn read_to_dtype_pointer[T:DType](inout ptr:DTypePointer[T],file_handle:FileHandle,num:Int,alloc:Bool=False) raises -> None :
if alloc:
ptr = DTypePointer[T].alloc(num)
_ = file_handle.read(ptr,num)
# ----------------------------------------------------------------------------
# main training loop
fn main() raises:
# build the GPT-2 model from a checkpoint
var model = GPT2("gpt2_124M.bin")
# build the DataLoaders from tokens files. for now use tiny_shakespeare if available, else tiny_stories
var tiny_stories_train: StringRef = "./data/TinyStories_train.bin"
var tiny_stories_val: StringRef = "./data/TinyStories_val.bin"
var tiny_shakespeare_train: StringRef = "./data/tiny_shakespeare_train.bin"
var tiny_shakespeare_val: StringRef = "./data/tiny_shakespeare_val.bin"
var train_tokens: StringRef = tiny_shakespeare_train
var val_tokens: StringRef = tiny_shakespeare_val
try:
var file = open(tiny_shakespeare_train, "r")
file.close()
except:
# both in one go ...
train_tokens = tiny_stories_train
val_tokens = tiny_stories_val
var B: Int = 4
var T: Int = 64
var train_loader = DataLoader()
dataloader_init(train_loader, train_tokens, B, T)
print("train dataset num_batches:", train_loader.num_batches)
var val_loader = DataLoader()
dataloader_init(val_loader, val_tokens, B, T)
print("val dataset num_batches:", val_loader.num_batches)
var val_num_batches: Int = 10
# build the Tokenizer
var tokenizer = Tokenizer("gpt2_tokenizer.bin")
# some memory for generating samples from the model
var rng_state: UInt64 = 1337
var gen_max_length: Int = 64
var gen_tokens = DTypePointer[dtype_int]().alloc(gen_max_length)
# train
var elapsed_time_ms_total = 0.0
for step in range(41):
# once in a while estimate the validation loss
if step % 10 == 0:
var val_loss: FLOAT = 0.0
dataloader_reset(val_loader)
for i in range(val_num_batches):
dataloader_next_batch(val_loader)
gpt2_forward(model, val_loader.inputs, val_loader.targets, B, T)
val_loss += model.mean_loss
val_loss /= val_num_batches
print("val loss", val_loss)
# once in a while do model inference to prgenerated INT32 text
if step > 0 and step % 20 == 0:
gen_tokens[0] = GPT2_EOT # the GPT-2 EOT token kicks off the generation
print("generating:\n---")
for t in range(1, gen_max_length):
# note that inference is wasteful here because
# for each t, we re-compute all activations between 0 and t
# leaving this alone because you want separate code for inference anyway
# the inference here is just for sanity checking purposes
gpt2_forward(model, gen_tokens, NULL_INT, 1, t)
var probs = model.acts.probs + (t - 1) * model.config.padded_vocab_size
var coin: FLOAT = random_f32(rng_state).cast[dtype]()
var next_token: Int = sample_mult(probs, model.config.vocab_size, coin)
gen_tokens[t] = next_token
# print the generated token, either using the Tokenizer or a fallback
if tokenizer.init_ok:
var token_str: String = tokenizer.decode(next_token)
tokenizer.safe_printf(token_str)
else:
# fall back to printing the token id
print(next_token, end=" ")
print("\n---")
# do a training step
var start_time = now()
dataloader_next_batch(train_loader)
gpt2_forward(model, train_loader.inputs, train_loader.targets, B, T)
gpt2_zero_grad(model)
gpt2_backward(model)
gpt2_update(model, 1e-4, 0.9, 0.999, 1e-8, 0.0, step + 1)
var elapsed_time_ms = (now() - start_time) / 1_000_000.0
elapsed_time_ms_total += elapsed_time_ms
print(
"step "
+ str(step)
+ ": train loss "
+ str(model.mean_loss)
+ " (took "
+ str(int(elapsed_time_ms))
+ " ms, average: "
+ str(int(elapsed_time_ms_total / (step + 1)))
+ " ms)"
)
# free
dataloader_free(train_loader)
dataloader_free(val_loader)
gpt2_free(model)
| llm.mojo/train_gpt2.mojo | false |
<filename>llm.mojo/train_gpt2_basic.mojo
from collections.vector import InlinedFixedVector
from math import sqrt,rsqrt,exp,tanh,cosh,log,pow
from memory import memset_zero,memcpy
from python import Python
from time import now
alias RU32_HEX = 0x2545F4914F6CDD1D
alias RF32_DIV = 16777216.0
alias dtype = DType.float32
alias FLOAT = SIMD[dtype,1]
alias dtype_int = DType.int32
alias INT = SIMD[dtype_int, 1]
alias NULL = DTypePointer[dtype]()
alias NULL_INT = DTypePointer[dtype_int]()
alias M_PI:FLOAT = 3.141592653589793115997963468544185161590576171875
alias GPT2_EOT=50256
alias EXIT_1 = external_call["exit",Int](1)
alias SIZEOF_INT = sizeof[DType.int32]()
alias SIZEOF_FLOAT = sizeof[DType.float32]()
## ----------------------------------------------------------------------------
# all the individual layers' forward and backward passes
fn encoder_forward(out:DTypePointer[dtype], inp:DTypePointer[dtype_int], wte:DTypePointer[dtype], wpe:DTypePointer[dtype],B:Int32,T:Int32,C:Int32):
for b in range(B):
for t in range(T):
# seek to the output position in out[b,t,:]
var out_bt:DTypePointer[dtype] = out + b * T * C + t * C
# get the index of the token at inp[b, t]
var ix:Int32 = inp[b * T + t]
# seek to the position in wte corresponding to the token
var wte_ix:DTypePointer[dtype] = wte + ix * C
# seek to the position in wpe corresponding to the position
var wpe_t:DTypePointer[dtype] = wpe + t * C
# add the two vectors and store the result in out[b,t,:]
for i in range(C):
out_bt[i] = wte_ix[i] + wpe_t[i]
fn encoder_backward(dwte:DTypePointer[dtype], dwpe:DTypePointer[dtype],dout:DTypePointer[dtype], inp:DTypePointer[dtype_int],B:Int32,T:Int32,C:Int32):
for b in range(B):
for t in range(T):
var dout_bt:DTypePointer[dtype] = dout + b * T * C + t * C
var ix:Int32 = inp[b * T + t]
var dwte_ix:DTypePointer[dtype] = dwte + ix * C
var dwpe_t:DTypePointer[dtype] = dwpe + t * C
for i in range(C):
var d:FLOAT = dout_bt[i]
dwte_ix[i] += d
dwpe_t[i] += d
fn layernorm_forward(inout out:DTypePointer[dtype], mean:DTypePointer[dtype], rstd:DTypePointer[dtype],inp:DTypePointer[dtype], weight:DTypePointer[dtype], bias:DTypePointer[dtype],B:Int32,T:Int32,C:Int32):
var eps:FLOAT = 1e-5
for b in range(B):
for t in range(T):
# seek to the input position inp[b,t,:]
var x:DTypePointer[dtype] = inp + b * T * C + t * C
# calculate the mean
var m:FLOAT = 0.0
for i in range(C):
m += x[i]
m = m/int(C)
# calculate the variance (without any bias correction)
var v:FLOAT = 0.0
for i in range(C):
var xshift:FLOAT = x[i] - m
v += xshift * xshift
v = v/int(C)
# calculate the rstd
var s:FLOAT = 1.0 / sqrt(v + eps)
# seek to the output position in out[b,t,:]
var out_bt:DTypePointer[dtype] = out + b * T * C + t * C
for i in range(C):
var n:FLOAT = (s * (x[i] - m)) # normalized output
var o:FLOAT = n * weight[i] + bias[i] # scale and shift it
out_bt[i] = o # write
# cache the mean and rstd for the backward pass later
mean[b * T + t] = m
rstd[b * T + t] = s
fn layernorm_backward( dinp:DTypePointer[dtype], dweight:DTypePointer[dtype], dbias:DTypePointer[dtype],
dout:DTypePointer[dtype], inp:DTypePointer[dtype], weight:DTypePointer[dtype], mean:DTypePointer[dtype], rstd:DTypePointer[dtype],
B:Int32,T:Int32,C:Int32):
for b in range(B):
for t in range(T):
var dout_bt:DTypePointer[dtype] = dout + b * T * C + t * C
var inp_bt:DTypePointer[dtype] = inp + b * T * C + t * C
var dinp_bt:DTypePointer[dtype] = dinp + b * T * C + t * C
var mean_bt:FLOAT = mean[b * T + t]
var rstd_bt:FLOAT = rstd[b * T + t]
# first: two reduce operations
var dnorm_mean:FLOAT = 0.0
var dnorm_norm_mean:FLOAT = 0.0
for i in range(C):
var norm_bti:FLOAT = (inp_bt[i] - mean_bt) * rstd_bt
var dnorm_i:FLOAT = weight[i] * dout_bt[i]
dnorm_mean += dnorm_i
dnorm_norm_mean += dnorm_i * norm_bti
dnorm_mean = dnorm_mean / int(C)
dnorm_norm_mean = dnorm_norm_mean / int(C)
# now iterate again and accumulate all the gradients
for i in range(C):
var norm_bti:FLOAT = (inp_bt[i] - mean_bt) * rstd_bt
var dnorm_i:FLOAT = weight[i] * dout_bt[i]
# gradient contribution to bias
dbias[i] += dout_bt[i]
# gradient contribution to weight
dweight[i] += norm_bti * dout_bt[i]
# gradient contribution to input
var dval:FLOAT = 0.0
dval += dnorm_i # term 1
dval -= dnorm_mean # term 2
dval -= norm_bti * dnorm_norm_mean # term 3
dval *= rstd_bt # final scale
dinp_bt[i] += dval
fn matmul_forward( out:DTypePointer[dtype],
inp:DTypePointer[dtype], weight:DTypePointer[dtype], bias:DTypePointer[dtype],
B:Int32,T:Int32,C:Int32,OC:Int32):
# most of the running time is spent here and in matmul_backward
# OC is short for "output channels"
# inp is (B,T,C), weight is (OC, C), bias is (OC)
# out will be (B,T,OC)
#pragma omp parallel for collapse(2)
for b in range(B):
for t in range(T):
var out_bt:DTypePointer[dtype] = out + b * T * OC + t * OC
var inp_bt:DTypePointer[dtype] = inp + b * T * C + t * C
for o in range(OC):
var val:FLOAT = 0.0
if bias != NULL:
val = bias[o]
var wrow:DTypePointer[dtype] = weight + o*C
for i in range(C):
val += inp_bt[i] * wrow[i]
out_bt[o] = val
fn matmul_backward( dinp:DTypePointer[dtype], dweight:DTypePointer[dtype], dbias:DTypePointer[dtype],
dout:DTypePointer[dtype], inp:DTypePointer[dtype], weight:DTypePointer[dtype],
B:Int32,T:Int32,C:Int32,OC:Int32):
# most of the running time is spent here and in matmul_forward
# this backward could be done in a single "round" of loops
# but that doesn't afford an efficient parallelization strategy
# backward into inp first, parallelize over B,T
#pragma omp parallel for collapse(2)
for b in range(B):
for t in range(T):
var dout_bt:DTypePointer[dtype] = dout + b * T * OC + t * OC
var dinp_bt:DTypePointer[dtype] = dinp + b * T * C + t * C
for o in range(OC):
var wrow:DTypePointer[dtype] = weight + o*C
var d:FLOAT = dout_bt[o]
for i in range(C):
dinp_bt[i] += wrow[i] * d
# backward into weight/bias, parallelize over output channels OC
#pragma omp parallel for
for o in range(OC):
for b in range(B):
for t in range(T):
var dout_bt:DTypePointer[dtype] = dout + b * T * OC + t * OC
var inp_bt:DTypePointer[dtype] = inp + b * T * C + t * C
var dwrow:DTypePointer[dtype] = dweight + o*C
var d:FLOAT = dout_bt[o]
if (dbias != NULL):
dbias[o] += d
for i in range(C):
dwrow[i] += inp_bt[i] * d
fn attention_forward( out:DTypePointer[dtype], preatt:DTypePointer[dtype], att:DTypePointer[dtype],
inp:DTypePointer[dtype],
B:Int32,T:Int32,C:Int32,NH:Int32):
# input is (B, T, 3C) Q,K,V
# preatt, att are (B, NH, T, T)
# output is (B, T, C)
var C3:Int32 = C*3
var hs:Int32 = C / NH # head size
var scale:FLOAT = 1.0 / sqrt(hs.cast[dtype]())
#pragma omp parallel for collapse(3)
for b in range(B):
for t in range(T):
for h in range(NH):
var query_t:DTypePointer[dtype] = inp + b * T * C3 + t * C3 + h * hs
var preatt_bth:DTypePointer[dtype] = preatt + b*NH*T*T + h*T*T + t*T
var att_bth:DTypePointer[dtype] = att + b*NH*T*T + h*T*T + t*T
# pass 1: calculate query dot key and maxval
var maxval:FLOAT = -10000.0 # TODO something better
for t2 in range(t+1):
var key_t2:DTypePointer[dtype] = inp + b * T * C3 + t2 * C3 + h * hs + C # +C because it's key
# (query_t) dot (key_t2)
var val:FLOAT = 0.0
for i in range(hs):
val += query_t[i] * key_t2[i]
val *= scale
if (val > maxval):
maxval = val
preatt_bth[t2] = val
# pass 2: calculate the exp and keep track of sum
var expsum:FLOAT = 0.0
for t2 in range(t+1):
var expv:FLOAT = exp(preatt_bth[t2] - maxval)
expsum += expv
att_bth[t2] = expv
var expsum_inv:FLOAT = 1.0 / expsum
if expsum == 0.0:
expsum_inv = 0.0
# pass 3: normalize to get the softmax
for t2 in range(T):
if (t2 <= t):
att_bth[t2] *= expsum_inv
else:
# causal attention mask. not strictly necessary to set to zero here
# only doing this explicitly for debugging and checking to PyTorch
att_bth[t2] = 0.0
# pass 4: accumulate weighted values into the output of attention
var out_bth:DTypePointer[dtype] = out + b * T * C + t * C + h * hs
for i in range(hs):
out_bth[i] = 0.0
for t2 in range(t+1):
var value_t2:DTypePointer[dtype] = inp + b * T * C3 + t2 * C3 + h * hs + C*2 # +C*2 because it's value
var att_btht2:FLOAT = att_bth[t2]
for i in range(hs):
out_bth[i] += att_btht2 * value_t2[i]
fn attention_backward( dinp:DTypePointer[dtype], dpreatt:DTypePointer[dtype], datt:DTypePointer[dtype],
dout:DTypePointer[dtype], inp:DTypePointer[dtype], att:DTypePointer[dtype],
B:Int32,T:Int32,C:Int32,NH:Int32):
# inp/dinp are (B, T, 3C) Q,K,V
# att/datt/dpreatt are (B, NH, T, T)
# dout is (B, T, C)
var C3:Int32 = C*3
var hs:Int32 = C / NH # head size
var scale:FLOAT = 1.0 / sqrt(hs.cast[dtype]())
for b in range(B):
for t in range(T):
for h in range(NH):
var att_bth:DTypePointer[dtype] = att + b*NH*T*T + h*T*T + t*T
var datt_bth:DTypePointer[dtype] = datt + b*NH*T*T + h*T*T + t*T
var dpreatt_bth:DTypePointer[dtype] = dpreatt + b*NH*T*T + h*T*T + t*T
var dquery_t:DTypePointer[dtype] = dinp + b * T * C3 + t * C3 + h * hs
var query_t:DTypePointer[dtype] = inp + b * T * C3 + t * C3 + h * hs
# backward pass 4, through the value accumulation
var dout_bth:DTypePointer[dtype] = dout + b * T * C + t * C + h * hs
for t2 in range(t+1):
var value_t2:DTypePointer[dtype] = inp + b * T * C3 + t2 * C3 + h * hs + C*2 # +C*2 because it's value
var dvalue_t2:DTypePointer[dtype] = dinp + b * T * C3 + t2 * C3 + h * hs + C*2
for i in range(hs):
# in the forward pass this was:
# out_bth[i] += att_bth[t2] * value_t2[i]
# so now we have:
datt_bth[t2] += value_t2[i] * dout_bth[i]
dvalue_t2[i] += att_bth[t2] * dout_bth[i]
# backward pass 2 & 3, the softmax
# note that softmax (like e.g. tanh) doesn't need the input (preatt) to backward
for t2 in range(t+1):
for t3 in range(t+1):
var indicator:FLOAT = 0.0
if t2 == t3:
indicator = 1.0
var local_derivative:FLOAT = att_bth[t2] * (indicator - att_bth[t3])
dpreatt_bth[t3] += local_derivative * datt_bth[t2]
# backward pass 1, the query @ key matmul
for t2 in range(t+1):
var key_t2:DTypePointer[dtype] = inp + b * T * C3 + t2 * C3 + h * hs + C # +C because it's key
var dkey_t2:DTypePointer[dtype] = dinp + b * T * C3 + t2 * C3 + h * hs + C # +C because it's key
for i in range(hs):
# in the forward pass this was:
# preatt_bth[t2] += (query_t[i] * key_t2[i]) * scale
# so now we have:
dquery_t[i] += key_t2[i] * dpreatt_bth[t2] * scale
dkey_t2[i] += query_t[i] * dpreatt_bth[t2] * scale
fn gelu_forward( out:DTypePointer[dtype], inp:DTypePointer[dtype],N:Int32):
var s:FLOAT = sqrt(2.0 / M_PI)
for i in range(N):
var x:FLOAT = inp[i]
var cube:FLOAT = 0.044715 * x * x * x
out[i] = 0.5 * x * (1.0 + tanh(s * (x + cube)))
fn gelu_backward( dinp:DTypePointer[dtype], inp:DTypePointer[dtype], dout:DTypePointer[dtype],N:Int32):
var s:FLOAT = sqrt(2.0 / M_PI)
for i in range(N):
var x:FLOAT = inp[i]
var cube:FLOAT = 0.044715 * x * x * x
var tanh_arg:FLOAT = s * (x + cube)
var tanh_out:FLOAT = tanh(tanh_arg)
var coshf_out:FLOAT = cosh(tanh_arg)
var sech_out:FLOAT = 1.0 / (coshf_out * coshf_out)
var local_grad:FLOAT = 0.5 * (1.0 + tanh_out) + x * 0.5 * sech_out * s * (1.0 + 3.0 * 0.044715 * x * x)
dinp[i] += local_grad * dout[i]
fn residual_forward( out:DTypePointer[dtype], inp1:DTypePointer[dtype], inp2:DTypePointer[dtype],N:Int32):
for i in range(N):
out[i] = inp1[i] + inp2[i]
fn residual_backward( dinp1:DTypePointer[dtype], dinp2:DTypePointer[dtype], dout:DTypePointer[dtype],N:Int32):
for i in range(N):
dinp1[i] += dout[i]
dinp2[i] += dout[i]
fn softmax_forward( probs:DTypePointer[dtype], logits:DTypePointer[dtype],B:Int32,T:Int32,V:Int32):
# output: probs are (B,T,V) of the probabilities
# input: logits is (B,T,V) of the unnormalized log probabilities
#pragma omp parallel for collapse(2)
for b in range(B):
for t in range(T):
# probs <- softmax(logits)
var logits_bt:DTypePointer[dtype] = logits + b * T * V + t * V
var probs_bt:DTypePointer[dtype] = probs + b * T * V + t * V
var maxval:FLOAT = -10000.0 # TODO something better
for i in range(V):
if (logits_bt[i] > maxval):
maxval = logits_bt[i]
var sum:FLOAT = 0.0
for i in range(V):
probs_bt[i] = exp(logits_bt[i] - maxval)
sum += probs_bt[i]
for i in range(V):
probs_bt[i] /= sum
fn crossentropy_forward( losses:DTypePointer[dtype],
probs:DTypePointer[dtype], targets:DTypePointer[dtype_int],
B:Int32,T:Int32,V:Int32):
# output: losses is (B,T) of the individual losses at each position
# input: probs are (B,T,V) of the probabilities
# input: targets is (B,T) of integers giving the correct index in logits
for b in range(B):
for t in range(T):
# loss = -log(probs[target])
var probs_bt:DTypePointer[dtype] = probs + b * T * V + t * V
var ix:Int32 = targets[b * T + t]
losses[b * T + t] = -log(probs_bt[ix])
fn crossentropy_softmax_backward( dlogits:DTypePointer[dtype],
dlosses:DTypePointer[dtype], probs:DTypePointer[dtype], targets:DTypePointer[dtype_int],
B:Int32,T:Int32,V:Int32):
# backwards through both softmax and crossentropy
for b in range(B):
for t in range(T):
var dlogits_bt:DTypePointer[dtype] = dlogits + b * T * V + t * V
var probs_bt:DTypePointer[dtype] = probs + b * T * V + t * V
var dloss:FLOAT = dlosses[b * T + t]
var ix:Int32 = targets[b * T + t]
for i in range(V):
var p:FLOAT = probs_bt[i]
var indicator:FLOAT = 0.0
if ix == i:
indicator = 1.0
dlogits_bt[i] += (p - indicator) * dloss
# ----------------------------------------------------------------------------
# GPT-2 model definition
# the parameters of the model
alias NUM_PARAMETER_TENSORS = 16
struct ParameterTensors:
var params_memory: DTypePointer[dtype]
var wte: DTypePointer[dtype] # (V, C)
var wpe: DTypePointer[dtype] # (maxT, C)
var ln1w: DTypePointer[dtype] # (L, C)
var ln1b: DTypePointer[dtype] # (L, C)
var qkvw: DTypePointer[dtype] # (L, 3*C, C)
var qkvb: DTypePointer[dtype] # (L, 3*C)
var attprojw: DTypePointer[dtype] # (L, C, C)
var attprojb: DTypePointer[dtype] # (L, C)
var ln2w: DTypePointer[dtype] # (L, C)
var ln2b: DTypePointer[dtype] # (L, C)
var fcw: DTypePointer[dtype] # (L, 4*C, C)
var fcb: DTypePointer[dtype] # (L, 4*C)
var fcprojw: DTypePointer[dtype] # (L, C, 4*C)
var fcprojb: DTypePointer[dtype] # (L, C)
var lnfw: DTypePointer[dtype] # (C)
var lnfb: DTypePointer[dtype] # (C)
fn __init__(
inout self,
):
self.params_memory = DTypePointer[dtype]()
self.wte = DTypePointer[dtype]()
self.wpe = DTypePointer[dtype]()
self.ln1w = DTypePointer[dtype]()
self.ln1b = DTypePointer[dtype]()
self.qkvw = DTypePointer[dtype]()
self.qkvb = DTypePointer[dtype]()
self.attprojw = DTypePointer[dtype]()
self.attprojb = DTypePointer[dtype]()
self.ln2w = DTypePointer[dtype]()
self.ln2b = DTypePointer[dtype]()
self.fcw = DTypePointer[dtype]()
self.fcb = DTypePointer[dtype]()
self.fcprojw = DTypePointer[dtype]()
self.fcprojb = DTypePointer[dtype]()
self.lnfw = DTypePointer[dtype]()
self.lnfb = DTypePointer[dtype]()
fn alloc_and_point_parameters(inout self,param_sizes: InlinedFixedVector[type=Int32, size=NUM_PARAMETER_TENSORS]) -> DTypePointer[dtype]:
var num_parameters: Int64 = 0
var i: Int
for i in range(NUM_PARAMETER_TENSORS):
num_parameters += param_sizes[i].cast[DType.int64]()
# malloc all parameters all at once
self.params_memory = DTypePointer[dtype]().alloc(int(num_parameters))
# assign all the tensors
var ptrs = List(
Pointer.address_of(self.wte),
Pointer.address_of(self.wpe),
Pointer.address_of(self.ln1w),
Pointer.address_of(self.ln1b),
Pointer.address_of(self.qkvw),
Pointer.address_of(self.qkvb),
Pointer.address_of(self.attprojw),
Pointer.address_of(self.attprojb),
Pointer.address_of(self.ln2w),
Pointer.address_of(self.ln2b),
Pointer.address_of(self.fcw),
Pointer.address_of(self.fcb),
Pointer.address_of(self.fcprojw),
Pointer.address_of(self.fcprojb),
Pointer.address_of(self.lnfw),
Pointer.address_of(self.lnfb),
)
var params_memory_iterator: DTypePointer[dtype] = self.params_memory
for i in range(NUM_PARAMETER_TENSORS):
ptrs[i][] = params_memory_iterator
params_memory_iterator += param_sizes[i]
return self.params_memory
alias NUM_ACTIVATION_TENSORS = 23
@register_passable("trivial")
struct ActivationTensors:
var encoded: DTypePointer[dtype] # (B, T, C)
var ln1: DTypePointer[dtype] # (L, B, T, C)
var ln1_mean: DTypePointer[dtype] # (L, B, T)
var ln1_rstd: DTypePointer[dtype] # (L, B, T)
var qkv: DTypePointer[dtype] # (L, B, T, 3*C)
var atty: DTypePointer[dtype] # (L, B, T, C)
var preatt: DTypePointer[dtype] # (L, B, NH, T, T)
var att: DTypePointer[dtype] # (L, B, NH, T, T)
var attproj: DTypePointer[dtype] # (L, B, T, C)
var residual2: DTypePointer[dtype] # (L, B, T, C)
var ln2: DTypePointer[dtype] # (L, B, T, C)
var ln2_mean: DTypePointer[dtype] # (L, B, T)
var ln2_rstd: DTypePointer[dtype] # (L, B, T)
var fch: DTypePointer[dtype] # (L, B, T, 4*C)
var fch_gelu: DTypePointer[dtype] # (L, B, T, 4*C)
var fcproj: DTypePointer[dtype] # (L, B, T, C)
var residual3: DTypePointer[dtype] # (L, B, T, C)
var lnf: DTypePointer[dtype] # (B, T, C)
var lnf_mean: DTypePointer[dtype] # (B, T)
var lnf_rstd: DTypePointer[dtype] # (B, T)
var logits: DTypePointer[dtype] # (B, T, V)
var probs: DTypePointer[dtype] # (B, T, V)
var losses: DTypePointer[dtype] # (B, T)
fn __init__(
inout self,
):
self.encoded = DTypePointer[dtype]()
self.ln1 = DTypePointer[dtype]()
self.ln1_mean = DTypePointer[dtype]()
self.ln1_rstd = DTypePointer[dtype]()
self.qkv = DTypePointer[dtype]()
self.atty = DTypePointer[dtype]()
self.preatt = DTypePointer[dtype]()
self.att = DTypePointer[dtype]()
self.attproj = DTypePointer[dtype]()
self.residual2 = DTypePointer[dtype]()
self.ln2 = DTypePointer[dtype]()
self.ln2_mean = DTypePointer[dtype]()
self.ln2_rstd = DTypePointer[dtype]()
self.fch = DTypePointer[dtype]()
self.fch_gelu = DTypePointer[dtype]()
self.fcproj = DTypePointer[dtype]()
self.residual3 = DTypePointer[dtype]()
self.lnf = DTypePointer[dtype]()
self.lnf_mean = DTypePointer[dtype]()
self.lnf_rstd = DTypePointer[dtype]()
self.logits = DTypePointer[dtype]()
self.probs = DTypePointer[dtype]()
self.losses = DTypePointer[dtype]()
fn alloc_and_point_activations(inout self,act_sizes: InlinedFixedVector[type=Int32, size=NUM_ACTIVATION_TENSORS]) -> DTypePointer[dtype]:
var ptrs = List(
Pointer.address_of(self.encoded),
Pointer.address_of(self.ln1),
Pointer.address_of(self.ln1_mean),
Pointer.address_of(self.ln1_rstd),
Pointer.address_of(self.qkv),
Pointer.address_of(self.atty),
Pointer.address_of(self.preatt),
Pointer.address_of(self.att),
Pointer.address_of(self.attproj),
Pointer.address_of(self.residual2),
Pointer.address_of(self.ln2),
Pointer.address_of(self.ln2_mean),
Pointer.address_of(self.ln2_rstd),
Pointer.address_of(self.fch),
Pointer.address_of(self.fch_gelu),
Pointer.address_of(self.fcproj),
Pointer.address_of(self.residual3),
Pointer.address_of(self.lnf),
Pointer.address_of(self.lnf_mean),
Pointer.address_of(self.lnf_rstd),
Pointer.address_of(self.logits),
Pointer.address_of(self.probs),
Pointer.address_of(self.losses),
)
var num_activations: Int64 = 0
for i in range(NUM_ACTIVATION_TENSORS):
num_activations += act_sizes[i].cast[DType.int64]()
var acts_memory = DTypePointer[dtype]().alloc(int(num_activations))
var acts_memory_iterator: DTypePointer[dtype] = acts_memory
for i in range(NUM_ACTIVATION_TENSORS):
ptrs[i][] = acts_memory_iterator
acts_memory_iterator += act_sizes[i]
return acts_memory
@value
struct GPT2Config:
var max_seq_len: Int32 # max sequence length, e.g. 1024
var vocab_size: Int32 # vocab size, e.g. 50257
var num_layers: Int32 # number of layers, e.g. 12
var num_heads: Int32 # number of heads in attention, e.g. 12
var channels: Int32 # number of channels, e.g. 768
struct GPT2:
var config: GPT2Config
# the weights of the model, and their sizes
var params: ParameterTensors
var param_sizes: InlinedFixedVector[type=Int32, size=NUM_PARAMETER_TENSORS]
var params_memory: DTypePointer[dtype]
var num_parameters: Int64
# gradients of the weights
var grads: ParameterTensors
var grads_memory: DTypePointer[dtype]
# buffers for the AdamW optimizer
var m_memory: DTypePointer[dtype]
var v_memory: DTypePointer[dtype]
# the activations of the model, and their sizes
var acts: ActivationTensors
var act_sizes: InlinedFixedVector[type=Int32, size=NUM_ACTIVATION_TENSORS]
var acts_memory: DTypePointer[dtype]
var num_activations: Int64
# gradients of the activations
var grads_acts: ActivationTensors
var grads_acts_memory: DTypePointer[dtype]
# other run state configuration
var batch_size: INT # the batch size (B) of current forward pass
var seq_len: INT # the sequence length (T) of current forward pass
var inputs: DTypePointer[dtype_int] # the input tokens for the current forward pass
var targets: DTypePointer[
dtype_int
] # the target tokens for the current forward pass
var mean_loss: FLOAT # after a forward pass with targets, will be populated with the mean loss
var checkpoint_path: StringRef
fn __init__(inout self, checkpoint_path: StringRef) raises:
self.checkpoint_path = checkpoint_path
self.param_sizes = InlinedFixedVector[type=Int32, size=NUM_PARAMETER_TENSORS](
NUM_PARAMETER_TENSORS
)
self.act_sizes = InlinedFixedVector[type=Int32, size=NUM_ACTIVATION_TENSORS](
NUM_ACTIVATION_TENSORS
)
var model_file = open(checkpoint_path, "r")
var bytes_of_config_params = 256 * sizeof[DType.int32]()
# config_data_raw id Tensor[DType.int8] with bytes_of_config_params elements
var config_data_raw = model_file.read(bytes_of_config_params)
var model_header = config_data_raw._steal_ptr().bitcast[DType.int32]()
if model_header[0] != 20240326:
print("Bad magic model file")
# EXIT_1
if model_header[1] != 1:
print("Bad version in model file")
# EXIT_1
# read in hyperparameters
self.config = GPT2Config(
model_header[2].cast[DType.int32](),
model_header[3].cast[DType.int32](),
model_header[4].cast[DType.int32](),
model_header[5].cast[DType.int32](),
model_header[6].cast[DType.int32](),
)
var maxT: Int32 = self.config.max_seq_len
var V: Int32 = self.config.vocab_size
var L: Int32 = self.config.num_layers
var NH: Int32 = self.config.num_heads
var C: Int32 = self.config.channels
print("[GPT-2]")
print("max_seq_len:", self.config.max_seq_len)
print("vocab_size:", self.config.vocab_size)
print("num_layers:", self.config.num_layers)
print("num_heads:", self.config.num_heads)
print("channels:", self.config.channels)
# allocate space for all the parameters and read them in
self.param_sizes[0] = V * C
self.param_sizes[1] = maxT * C
self.param_sizes[2] = L * C
self.param_sizes[3] = L * C
self.param_sizes[4] = L * (3 * C) * C
self.param_sizes[5] = L * (3 * C)
self.param_sizes[6] = L * C * C
self.param_sizes[7] = L * C
self.param_sizes[8] = L * C
self.param_sizes[9] = L * C
self.param_sizes[10] = L * (4 * C) * C
self.param_sizes[11] = L * (4 * C)
self.param_sizes[12] = L * C * (4 * C)
self.param_sizes[13] = L * C
self.param_sizes[14] = C
self.param_sizes[15] = C
# cound the number of paramaters
var num_parameters: Int32 = 0
for i in range(NUM_PARAMETER_TENSORS):
num_parameters += self.param_sizes[i]
print("num_parameters:", num_parameters)
self.num_parameters = num_parameters.cast[DType.int64]()
# read in all the parameters from file
self.params = ParameterTensors()
self.params_memory = self.params.alloc_and_point_parameters(self.param_sizes)
var data_raw = model_file.read( int(num_parameters * SIZEOF_FLOAT))
model_file.close()
var float32_ptr= data_raw._steal_ptr().bitcast[DType.float32]()
memcpy(dest=self.params_memory,src=float32_ptr,count=int(num_parameters))
# other inits
self.acts = ActivationTensors()
self.num_activations = 0 # for now
self.acts_memory = NULL
self.grads_memory = NULL
self.m_memory = NULL
self.v_memory = NULL
self.grads_acts_memory = NULL
self.inputs = NULL_INT
self.targets = NULL_INT
self.batch_size = 0
self.seq_len = 0
self.mean_loss = -1.0 # -1.0 will designate no loss
self.grads = ParameterTensors()
self.grads_acts = ActivationTensors()
fn gpt2_forward(inout model:GPT2, inputs:DTypePointer[dtype_int], targets:DTypePointer[dtype_int],B:Int32,T:Int32):
# targets are optional and could be NULL
# ensure the model was initialized or error out
if (model.params_memory == NULL):
print("Error: model was not initialized properly.")
# convenience parameters
var V:Int32 = model.config.vocab_size
var L:Int32 = model.config.num_layers
var NH:Int32 = model.config.num_heads
var C:Int32 = model.config.channels
# allocate space for all the activations if needed (done here, lazily)
if(model.acts_memory == NULL):
# record the current B,T as well
model.batch_size = B
model.seq_len = T
# and now allocate the space
model.act_sizes[0] = B * T * C
model.act_sizes[1] = L * B * T * C
model.act_sizes[2] = L * B * T
model.act_sizes[3] = L * B * T
model.act_sizes[4] = L * B * T * 3*C
model.act_sizes[5] = L * B * T * C
model.act_sizes[6] = L * B * NH * T * T
model.act_sizes[7] = L * B * NH * T * T
model.act_sizes[8] = L * B * T * C
model.act_sizes[9] = L * B * T * C
model.act_sizes[10] = L * B * T * C
model.act_sizes[11] = L * B * T
model.act_sizes[12] = L * B * T
model.act_sizes[13] = L * B * T * 4*C
model.act_sizes[14] = L * B * T * 4*C
model.act_sizes[15] = L * B * T * C
model.act_sizes[16] = L * B * T * C
model.act_sizes[17] = B * T * C
model.act_sizes[18] = B * T
model.act_sizes[19] = B * T
model.act_sizes[20] = B * T * V
model.act_sizes[21] = B * T * V
model.act_sizes[22] = B * T
var num_activations:Int32 = 0
for i in range(NUM_ACTIVATION_TENSORS):
num_activations += model.act_sizes[i]
print("num_activations:", num_activations)
model.acts_memory = model.acts.alloc_and_point_activations(model.act_sizes)
model.num_activations = num_activations.cast[DType.int64]()
# also create memory for caching inputs and targets
model.inputs = DTypePointer[dtype_int]().alloc(int(B * T) )
model.targets = DTypePointer[dtype_int]().alloc(int(B * T) )
else:
# validate B,T is no larger than what was previously allocated
# in principle, we could re-allocate a larger chunk of memory, for now we just error out
if B > int(model.batch_size) or T > int(model.seq_len):
print("Error: batch size or sequence length is inadequately large")
#print("Model: B=%d T=%d, Desired: B=%d T=%d\n", model.batch_size, model.seq_len, B, T)
# cache the inputs/targets
memcpy(model.inputs, inputs, int(B * T))
if targets != NULL_INT:
memcpy(model.targets, targets, int(B * T))
# forward pass
var residual:DTypePointer[dtype]
encoder_forward(model.acts.encoded, inputs, model.params.wte, model.params.wpe, B, T, C) # encoding goes into residual[0]
for l in range(L):
residual = model.acts.residual3 + (l-1) * B * T * C
if l == 0:
residual = model.acts.encoded
# get the pointers of the weights for this layer
var l_ln1w:DTypePointer[dtype] = model.params.ln1w + l * C
var l_ln1b:DTypePointer[dtype] = model.params.ln1b + l * C
var l_qkvw:DTypePointer[dtype] = model.params.qkvw + l * 3*C * C
var l_qkvb:DTypePointer[dtype] = model.params.qkvb + l * 3*C
var l_attprojw:DTypePointer[dtype] = model.params.attprojw + l * C * C
var l_attprojb:DTypePointer[dtype] = model.params.attprojb + l * C
var l_ln2w:DTypePointer[dtype] = model.params.ln2w + l * C
var l_ln2b:DTypePointer[dtype] = model.params.ln2b + l * C
var l_fcw:DTypePointer[dtype] = model.params.fcw + l * 4*C * C
var l_fcb:DTypePointer[dtype] = model.params.fcb + l * 4*C
var l_fcprojw:DTypePointer[dtype] = model.params.fcprojw + l * C * 4*C
var l_fcprojb:DTypePointer[dtype] = model.params.fcprojb + l * C
# get the pointers of the activations for this layer
var l_ln1:DTypePointer[dtype] = model.acts.ln1 + l * B * T * C
var l_ln1_mean:DTypePointer[dtype] = model.acts.ln1_mean + l * B * T
var l_ln1_rstd:DTypePointer[dtype] = model.acts.ln1_rstd + l * B * T
var l_qkv:DTypePointer[dtype] = model.acts.qkv + l * B * T * 3*C
var l_atty:DTypePointer[dtype] = model.acts.atty + l * B * T * C
var l_preatt:DTypePointer[dtype] = model.acts.preatt + l * B * NH * T * T
var l_att:DTypePointer[dtype] = model.acts.att + l * B * NH * T * T
var l_attproj:DTypePointer[dtype] = model.acts.attproj + l * B * T * C
var l_residual2:DTypePointer[dtype] = model.acts.residual2 + l * B * T * C
var l_ln2:DTypePointer[dtype] = model.acts.ln2 + l * B * T * C
var l_ln2_mean:DTypePointer[dtype] = model.acts.ln2_mean + l * B * T
var l_ln2_rstd:DTypePointer[dtype] = model.acts.ln2_rstd + l * B * T
var l_fch:DTypePointer[dtype] = model.acts.fch + l * B * T * 4*C
var l_fch_gelu:DTypePointer[dtype] = model.acts.fch_gelu + l * B * T * 4*C
var l_fcproj:DTypePointer[dtype] = model.acts.fcproj + l * B * T * C
var l_residual3:DTypePointer[dtype] = model.acts.residual3 + l * B * T * C
# now do the forward pass
layernorm_forward(l_ln1, l_ln1_mean, l_ln1_rstd, residual, l_ln1w, l_ln1b, B, T, C)
matmul_forward(l_qkv, l_ln1, l_qkvw, l_qkvb, B, T, C, 3*C)
attention_forward(l_atty, l_preatt, l_att, l_qkv, B, T, C, NH)
matmul_forward(l_attproj, l_atty, l_attprojw, l_attprojb, B, T, C, C)
residual_forward(l_residual2, residual, l_attproj, B*T*C)
layernorm_forward(l_ln2, l_ln2_mean, l_ln2_rstd, l_residual2, l_ln2w, l_ln2b, B, T, C)
matmul_forward(l_fch, l_ln2, l_fcw, l_fcb, B, T, C, 4*C)
gelu_forward(l_fch_gelu, l_fch, B*T*4*C)
matmul_forward(l_fcproj, l_fch_gelu, l_fcprojw, l_fcprojb, B, T, 4*C, C)
residual_forward(l_residual3, l_residual2, l_fcproj, B*T*C)
residual = model.acts.residual3 + (L-1) * B * T * C # last residual is in residual3
layernorm_forward(model.acts.lnf, model.acts.lnf_mean, model.acts.lnf_rstd, residual, model.params.lnfw, model.params.lnfb, B, T, C)
matmul_forward(model.acts.logits, model.acts.lnf, model.params.wte, NULL, B, T, C, V)
softmax_forward(model.acts.probs, model.acts.logits, B, T, V)
# also forward the cross-entropy loss function if we have the targets
if targets != NULL_INT:
crossentropy_forward(model.acts.losses, model.acts.probs, targets, B, T, V)
# for convenience also evaluate the mean loss
var mean_loss:FLOAT = 0.0
for i in range(B*T):
mean_loss += model.acts.losses[i]
mean_loss /= int(B * T)
model.mean_loss = mean_loss
else:
# if we don't have targets, we don't have a loss
model.mean_loss = -1.0
fn gpt2_zero_grad(inout model:GPT2):
if(model.grads_memory != NULL):
memset_zero(model.grads_memory, int(model.num_parameters))
if(model.grads_acts_memory != NULL):
memset_zero(model.grads_acts_memory, int(model.num_activations))
fn gpt2_backward(inout model:GPT2):
# double check we forwarded previously, with targets
if (model.mean_loss == -1.0):
print("Error: must forward with targets before backward\n")
# lazily allocate the memory for gradients of the weights and activations, if needed
if (model.grads_memory == NULL):
model.grads_memory = model.grads.alloc_and_point_parameters(model.param_sizes)
model.grads_acts_memory = model.grads_acts.alloc_and_point_activations( model.act_sizes)
gpt2_zero_grad(model)
# convenience shortcuts
var B:Int32 = model.batch_size
var T:Int32 = model.seq_len
var V:Int32 = model.config.vocab_size
var L:Int32 = model.config.num_layers
var NH:Int32 = model.config.num_heads
var C:Int32 = model.config.channels
# backward pass
# we kick off the chain by filling in dlosses with 1.0/(B*T), to get the mean loss
var dloss_mean:FLOAT = 1.0 / int(B * T)
for i in range(B*T):
model.grads_acts.losses[i] = dloss_mean
crossentropy_softmax_backward(model.grads_acts.logits, model.grads_acts.losses, model.acts.probs, model.targets, B, T, V)
matmul_backward(model.grads_acts.lnf, model.grads.wte, NULL, model.grads_acts.logits, model.acts.lnf, model.params.wte, B, T, C, V)
var residual:DTypePointer[dtype] = model.acts.residual3 + (L-1) * B * T * C # last layer's residual
var dresidual:DTypePointer[dtype] = model.grads_acts.residual3 + (L-1) * B * T * C # write to last layer's residual
layernorm_backward(dresidual, model.grads.lnfw, model.grads.lnfb, model.grads_acts.lnf, residual, model.params.lnfw, model.acts.lnf_mean, model.acts.lnf_rstd, B, T, C)
for l in range(L-1,-1,-1):
var residual = model.acts.encoded
var dresidual = model.grads_acts.encoded
if l != 0:
residual = model.acts.residual3 + (l-1) * B * T * C
dresidual = model.grads_acts.residual3 + (l-1) * B * T * C
# get the pointers of the weights for this layer
var l_ln1w:DTypePointer[dtype] = model.params.ln1w + l * C
var l_qkvw:DTypePointer[dtype] = model.params.qkvw + l * 3*C * C
var l_attprojw:DTypePointer[dtype] = model.params.attprojw + l * C * C
var l_ln2w:DTypePointer[dtype] = model.params.ln2w + l * C
var l_fcw:DTypePointer[dtype] = model.params.fcw + l * 4*C * C
var l_fcprojw:DTypePointer[dtype] = model.params.fcprojw + l * C * 4*C
# get the pointers of the gradients of the weights for this layer
var dl_ln1w:DTypePointer[dtype] = model.grads.ln1w + l * C
var dl_ln1b:DTypePointer[dtype] = model.grads.ln1b + l * C
var dl_qkvw:DTypePointer[dtype] = model.grads.qkvw + l * 3*C * C
var dl_qkvb:DTypePointer[dtype] = model.grads.qkvb + l * 3*C
var dl_attprojw:DTypePointer[dtype] = model.grads.attprojw + l * C * C
var dl_attprojb:DTypePointer[dtype] = model.grads.attprojb + l * C
var dl_ln2w:DTypePointer[dtype] = model.grads.ln2w + l * C
var dl_ln2b:DTypePointer[dtype] = model.grads.ln2b + l * C
var dl_fcw:DTypePointer[dtype] = model.grads.fcw + l * 4*C * C
var dl_fcb:DTypePointer[dtype] = model.grads.fcb + l * 4*C
var dl_fcprojw:DTypePointer[dtype] = model.grads.fcprojw + l * C * 4*C
var dl_fcprojb:DTypePointer[dtype] = model.grads.fcprojb + l * C
# get the pointers of the activations for this layer
var l_ln1:DTypePointer[dtype] = model.acts.ln1 + l * B * T * C
var l_ln1_mean:DTypePointer[dtype] = model.acts.ln1_mean + l * B * T
var l_ln1_rstd:DTypePointer[dtype] = model.acts.ln1_rstd + l * B * T
var l_qkv:DTypePointer[dtype] = model.acts.qkv + l * B * T * 3*C
var l_atty:DTypePointer[dtype] = model.acts.atty + l * B * T * C
var l_att:DTypePointer[dtype] = model.acts.att + l * B * NH * T * T
var l_residual2:DTypePointer[dtype] = model.acts.residual2 + l * B * T * C
var l_ln2:DTypePointer[dtype] = model.acts.ln2 + l * B * T * C
var l_ln2_mean:DTypePointer[dtype] = model.acts.ln2_mean + l * B * T
var l_ln2_rstd:DTypePointer[dtype] = model.acts.ln2_rstd + l * B * T
var l_fch:DTypePointer[dtype] = model.acts.fch + l * B * T * 4*C
var l_fch_gelu:DTypePointer[dtype] = model.acts.fch_gelu + l * B * T * 4*C
# get the pointers of the gradients of the activations for this layer
var dl_ln1:DTypePointer[dtype] = model.grads_acts.ln1 + l * B * T * C
var dl_qkv:DTypePointer[dtype] = model.grads_acts.qkv + l * B * T * 3*C
var dl_atty:DTypePointer[dtype] = model.grads_acts.atty + l * B * T * C
var dl_preatt:DTypePointer[dtype] = model.grads_acts.preatt + l * B * NH * T * T
var dl_att:DTypePointer[dtype] = model.grads_acts.att + l * B * NH * T * T
var dl_attproj:DTypePointer[dtype] = model.grads_acts.attproj + l * B * T * C
var dl_residual2:DTypePointer[dtype] = model.grads_acts.residual2 + l * B * T * C
var dl_ln2:DTypePointer[dtype] = model.grads_acts.ln2 + l * B * T * C
var dl_fch:DTypePointer[dtype] = model.grads_acts.fch + l * B * T * 4*C
var dl_fch_gelu:DTypePointer[dtype] = model.grads_acts.fch_gelu + l * B * T * 4*C
var dl_fcproj:DTypePointer[dtype] = model.grads_acts.fcproj + l * B * T * C
var dl_residual3:DTypePointer[dtype] = model.grads_acts.residual3 + l * B * T * C
# backprop this layer
residual_backward(dl_residual2, dl_fcproj, dl_residual3, B*T*C)
matmul_backward(dl_fch_gelu, dl_fcprojw, dl_fcprojb, dl_fcproj, l_fch_gelu, l_fcprojw, B, T, 4*C, C)
gelu_backward(dl_fch, l_fch, dl_fch_gelu, B*T*4*C)
matmul_backward(dl_ln2, dl_fcw, dl_fcb, dl_fch, l_ln2, l_fcw, B, T, C, 4*C)
layernorm_backward(dl_residual2, dl_ln2w, dl_ln2b, dl_ln2, l_residual2, l_ln2w, l_ln2_mean, l_ln2_rstd, B, T, C)
residual_backward(dresidual, dl_attproj, dl_residual2, B*T*C)
matmul_backward(dl_atty, dl_attprojw, dl_attprojb, dl_attproj, l_atty, l_attprojw, B, T, C, C)
attention_backward(dl_qkv, dl_preatt, dl_att, dl_atty, l_qkv, l_att, B, T, C, NH)
matmul_backward(dl_ln1, dl_qkvw, dl_qkvb, dl_qkv, l_ln1, l_qkvw, B, T, C, 3*C)
layernorm_backward(dresidual, dl_ln1w, dl_ln1b, dl_ln1, residual, l_ln1w, l_ln1_mean, l_ln1_rstd, B, T, C)
encoder_backward(model.grads.wte, model.grads.wpe, model.grads_acts.encoded, model.inputs, B, T, C)
fn gpt2_update(inout model:GPT2, learning_rate:FLOAT, beta1:FLOAT, beta2:FLOAT, eps:FLOAT, weight_decay:FLOAT,t:Int32):
# reference: https:#pytorch.org/docs/stable/generated/torch.optim.AdamW.html
# lazily allocate the memory for m_memory and v_memory
if (model.m_memory == NULL):
model.m_memory = DTypePointer[dtype]().alloc(int(model.num_parameters))
model.v_memory = DTypePointer[dtype]().alloc(int(model.num_parameters))
memset_zero(model.m_memory,int(model.num_parameters))
memset_zero(model.v_memory,int(model.num_parameters))
for i in range(model.num_parameters):
var param:FLOAT = model.params_memory[i]
var grad:FLOAT = model.grads_memory[i]
# update the first moment (momentum)
var m:FLOAT = beta1 * model.m_memory[i] + (1.0 - beta1) * grad
# update the second moment (RMSprop)
var v:FLOAT = beta2 * model.v_memory[i] + (1.0 - beta2) * grad * grad
# bias-correct both moments
var m_hat:FLOAT = m / (1.0 - pow(beta1, t))
var v_hat:FLOAT = v / (1.0 - pow(beta2, t))
# update
model.m_memory[i] = m
model.v_memory[i] = v
model.params_memory[i] -= learning_rate * (m_hat / (sqrt(v_hat) + eps) + weight_decay * param)
fn gpt2_free(inout model:GPT2):
model.params_memory.free()
model.grads_memory.free()
model.m_memory.free()
model.v_memory.free()
model.acts_memory.free()
model.grads_acts_memory.free()
model.inputs.free()
model.targets.free()
#ifndef TESTING
# if we are TESTING (see test_gpt2.c), we'll skip the maiN:Int32 below
# ----------------------------------------------------------------------------
# data loader lite
# returns random batches of data from a file of integers
struct DataLoader:
# hyperparameters
var B:Int32
var T:Int32
# input handling and its state
var filename:StringRef
var tokens_file:FileHandle
var file_size:Int32
var current_position:Int32
# output memory
var batch:DTypePointer[dtype_int]
var inputs:DTypePointer[dtype_int]
var targets:DTypePointer[dtype_int]
# convenience variables
var num_batches:Int32
fn __init__(inout self):
self.B = 0
self.T = 0
self.filename = ""
self.tokens_file=FileHandle()
self.file_size = 0
self.current_position = 0
self.batch = DTypePointer[dtype_int]()
self.inputs = DTypePointer[dtype_int]()
self.targets = DTypePointer[dtype_int]()
self.num_batches = 0
fn dataloader_init(inout loader:DataLoader,filename:StringRef,B:Int32,T:Int32) raises:
loader.B = B
loader.T = T
# open the input file for reading
try:
loader.tokens_file = open(filename, "rb")
except e:
print("Error opening tokens file",e)
# determine the file size
var _os = Python.import_module("os")
loader.file_size = int(_os.path.getsize(filename))
if (loader.file_size < int(B * T + 1) * 4):
print("Error: file size is too small for the batch size and sequence length\n")
loader.current_position = 0 # start at the beginning
# allocate space for B*T + 1 integers to store the inputs and targets loader.batch = (int*) malloc((B * T + 1) * sizeof(int))
loader.batch = DTypePointer[dtype_int]().alloc(int(B * T + 1))
loader.inputs = loader.batch
loader.targets = loader.batch + 1 # targets are shifted by one
loader.num_batches = int(loader.file_size) / int(B * T * SIZEOF_INT)
fn dataloader_reset(inout loader:DataLoader):
loader.current_position = 0
fn dataloader_next_batch(inout loader:DataLoader) raises:
var B:Int32 = loader.B
var T:Int32 = loader.T
# if we are at the end of the file, loop back to the beginning
if loader.current_position + int((B*T+1) * SIZEOF_INT) > loader.file_size:
loader.current_position = 0
# read the B*T+1 integers from the file into batch
_ = loader.tokens_file.seek( int(loader.current_position))
# config_data_raw id Tensor[DType.int8] with bytes_of_config_params elements
var data_raw = loader.tokens_file.read(int((B*T+1) * SIZEOF_INT))
var int32_ptr= data_raw._steal_ptr().bitcast[DType.int32]()
memcpy(dest=loader.batch,src=int32_ptr,count=int(B*T+1))
# advance the current position by B*T integers
loader.current_position += B*T * SIZEOF_INT
fn dataloader_free(inout loader:DataLoader) raises:
loader.tokens_file.close()
loader.batch.free()
# ----------------------------------------------------------------------------
# sampler
fn random_u32(inout state:UInt64) -> UInt32:
state ^= state >> 12;
state ^= state << 25;
state ^= state >> 27;
return ((state * RU32_HEX) >> 32).cast[DType.uint32]();
fn random_f32(inout state:UInt64) -> Float32:
return (random_u32(state) >> 8).cast[DType.float32]() / RF32_DIV
fn sample_mult( probabilities:DTypePointer[dtype],n:Int32, coin:FLOAT) -> Int32:
# sample index from probabilities (they must sum to 1!)
# coin is a random number in [0, 1), usually from random_f32()
var cdf:FLOAT = 0.0
for i in range(n):
cdf += probabilities[i]
if (coin < cdf):
return i
return n - 1
# ----------------------------------------------------------------------------
# Tokenizer (only supports decoding)
struct Tokenizer:
var vocab_size:Int
var token_table:List[String]
var init_ok:Int
fn __init__(inout self,filename:StringRef) raises:
self.vocab_size = 0
self.token_table = List[String]()
self.init_ok = 0
var file:FileHandle
try:
file = open(filename, "rb")
except:
print("---")
print("WARNING: Failed to open the tokenizer file", filename)
print("The Tokenizer is a new feature added April 14 2024.")
print("Re-run `python train_gpt2.py` to write it")
print("---")
self.init_ok = 0
return
var num_bytes = 256 * sizeof[DType.int32]()
var data_raw = file.read(num_bytes)
var header = data_raw._steal_ptr().bitcast[DType.int32]()
if header[0] != 20240328:
print("Bad magic model file")
# EXIT_1
if header[1] != 1:
print("Bad version in model file")
# EXIT_1
self.vocab_size = int(header[2])
for i in range(self.vocab_size):
var length = int(file.read_bytes(1)[0])
var str: String = file.read(length)
if length>0 and len(str)>0:
self.token_table.append(str)
file.close()
self.init_ok = 1
fn decode(self, token_id:Int) -> String:
if (self.init_ok == 0):
return ""
if (token_id < self.vocab_size):
return self.token_table[token_id]
else:
print("invalid token id", token_id)
return ""
fn safe_printf(self,str:String):
# the tokens are raw bytes, and we we only want to print the printable ones
# many bytes can be various control codes, backspace, etc.
if str == NULL:
return
if str[0] == '\0':
return
# handle individual byte tokens
# every token is asserted to be at least one byte so doing piece[1] is ok
### --- TODO
#if (str[1] == '\0') {
#unsigned char byte_val = piece[0];
#if (!(isprint(byte_val) || isspace(byte_val))) {
# return; // weird byte, don't print it
#}
#}
print(str,end = "")
# ----------------------------------------------------------------------------
# main training loop
fn main() raises:
# build the GPT-2 model from a checkpoint
var model = GPT2("gpt2_124M.bin")
# build the DataLoaders from tokens files. for now use tiny_shakespeare if available, else tiny_stories
var tiny_stories_train:StringRef = "./data/TinyStories_train.bin"
var tiny_stories_val:StringRef = "./data/TinyStories_val.bin"
var tiny_shakespeare_train:StringRef = "./data/tiny_shakespeare_train.bin"
var tiny_shakespeare_val:StringRef = "./data/tiny_shakespeare_val.bin"
##var train_tokens:StringRef = access(tiny_shakespeare_train, F_OK) != -1 ? tiny_shakespeare_train : tiny_stories_train
##var val_tokens:StringRef = access(tiny_shakespeare_val, F_OK) != -1 ? tiny_shakespeare_val : tiny_stories_val
var train_tokens:StringRef = tiny_shakespeare_train
var val_tokens:StringRef = tiny_shakespeare_val
var B:Int32 = 4
var T:Int32 = 64
var train_loader = DataLoader()
dataloader_init(train_loader, train_tokens, B, T)
print("train dataset num_batches:", train_loader.num_batches)
var val_loader = DataLoader()
dataloader_init(val_loader, val_tokens, B, T)
print("val dataset num_batches:", val_loader.num_batches)
var val_num_batches:Int32 = 10
# build the Tokenizer
var tokenizer = Tokenizer("gpt2_tokenizer.bin")
# some memory for generating samples from the model
var rng_state:UInt64 = 1337
var gen_max_length:Int32 = 64
var gen_tokens = DTypePointer[dtype_int]().alloc(int(gen_max_length))
# train
var elapsed_time_ms_total = 0.0
for step in range(41):
# once in a while estimate the validation loss
if step % 10 == 0:
var val_loss:FLOAT = 0.0
dataloader_reset(val_loader)
for i in range(val_num_batches):
dataloader_next_batch(val_loader)
gpt2_forward(model, val_loader.inputs, val_loader.targets, B, T)
val_loss += model.mean_loss
val_loss /= int(val_num_batches)
print("val loss", val_loss)
# once in a while do model inference to prgenerated INT32 text
if step > 0 and step % 20 == 0:
gen_tokens[0] = GPT2_EOT # the GPT-2 EOT token kicks off the generation
print("generating:\n---")
for t in range(1,gen_max_length):
# note that inference is wasteful here because
# for each t, we re-compute all activations between 0 and t
# leaving this alone because you want separate code for inference anyway
# the inference here is just for sanity checking purposes
gpt2_forward(model, gen_tokens, NULL_INT, 1, t)
var probs = model.acts.probs + (t-1) * model.config.vocab_size
var coin:FLOAT = random_f32(rng_state)
var next_token:Int = int(sample_mult(probs, model.config.vocab_size, coin))
gen_tokens[t] = next_token
# print the generated token, either using the Tokenizer or a fallback
if tokenizer.init_ok:
var token_str:String = tokenizer.decode(next_token)
tokenizer.safe_printf(token_str)
else:
# fall back to printing the token id
print("%d ", next_token)
print("\n---")
# do a training step
var start_time = now()
dataloader_next_batch(train_loader)
gpt2_forward(model, train_loader.inputs, train_loader.targets, B, T)
gpt2_zero_grad(model)
gpt2_backward(model)
gpt2_update(model, 1e-4, 0.9, 0.999, 1e-8, 0.0, step+1)
var elapsed_time_ms = (now() - start_time)/1_000_000.
elapsed_time_ms_total += elapsed_time_ms
print("step " + str(step) + ": train loss " + str(model.mean_loss) + " (took " + int(elapsed_time_ms) + " ms, average: " + int(elapsed_time_ms_total/(step+1)) + " ms)")
# free
dataloader_free(train_loader)
dataloader_free(val_loader)
gpt2_free(model)
| llm.mojo/train_gpt2_basic.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
from pathlib import Path
from max.engine import InferenceSession
from max.graph import Graph, TensorType, ops
from tensor import Tensor, TensorShape
def construct_graph[op_name: StringLiteral]() -> Graph:
graph = Graph(TensorType(DType.float32, 2, 6))
matmul_constant_value = Tensor[DType.float32](TensorShape(6, 1), 0.15)
matmul_constant = graph.constant(matmul_constant_value)
matmul = graph[0] @ matmul_constant
gelu = ops.custom[op_name](matmul, matmul.type())
graph.output(gelu)
return graph
def main():
# Load the graph with custom ops package
session = InferenceSession()
# Try changing the op_name to a different op from gelu.mojo
model = session.load(
construct_graph["my_gelu"](),
custom_ops_paths=Path("custom_ops.mojopkg"),
)
# Create some sample input to run through the model:
input = Tensor[DType.float32].randn(TensorShape(2, 6))
results = model.execute("input0", input)
output = results.get[DType.float32]("output0")
print(output)
| max/examples/extensibility/max-graph.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
from random import randn
from python import Python
from .python_utils import tensor_to_numpy, numpy_to_tensor
from max import register
from max.extensibility import Tensor, empty_tensor
@register.op("monnx.det_v11")
fn det[type: DType, rank: Int](x: Tensor[type, rank]) -> Tensor[type, rank - 2]:
try:
var np = Python.import_module("numpy")
var np_array = tensor_to_numpy(x, np)
var np_out = np.linalg.det(np_array)
return numpy_to_tensor[type, rank - 2](np_out)
except e:
print(e)
return empty_tensor[type, rank - 2](0)
| max/examples/extensibility/custom_ops/det.mojo | false |
<filename>max/examples/extensibility/custom_ops/gelu.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
from max.extensibility import Tensor, empty_tensor
from max import register
from math import erf, exp, sqrt, tanh
from random import randn
@register.op("my_gelu")
fn gelu[type: DType, rank: Int](x: Tensor[type, rank]) -> Tensor[type, rank]:
var output = empty_tensor[type](x.shape)
@always_inline
@parameter
fn func[width: Int](i: StaticIntTuple[rank]) -> SIMD[type, width]:
var tmp = x.simd_load[width](i)
return tmp / 2 * (1 + erf(tmp / sqrt(2)))
print("Hello, custom GELU!")
output.for_each[func]()
return output^
@register.op("my_tanh_gelu")
fn gelu_tanh_approx[
type: DType, rank: Int
](x: Tensor[type, rank]) -> Tensor[type, rank]:
var output = empty_tensor[type](x.shape)
@always_inline
@parameter
fn func[width: Int](i: StaticIntTuple[rank]) -> SIMD[type, width]:
var tmp = x.simd_load[width](i)
return (
0.5 * tmp * (1 + tanh(0.7978845608 * (tmp + 0.044715 * tmp**3)))
)
print("Hello, custom tanh GELU!")
output.for_each[func]()
return output^
@register.op("my_sigmoid_gelu")
fn gelu_sigmoid_approx[
type: DType, rank: Int
](x: Tensor[type, rank]) -> Tensor[type, rank]:
var output = empty_tensor[type](x.shape)
@always_inline
@parameter
fn func[width: Int](i: StaticIntTuple[rank]) -> SIMD[type, width]:
var tmp = x.simd_load[width](i)
return tmp * (1 / (1 + exp(-1.702 * tmp)))
print("Hello, custom sigmoid GELU!")
output.for_each[func]()
return output^
| max/examples/extensibility/custom_ops/gelu.mojo | false |
<filename>max/examples/extensibility/custom_ops/python_utils.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
from extensibility import Tensor, empty_tensor
import python
@always_inline
fn numpy_data_pointer[
type: DType
](numpy_array: PythonObject) raises -> DTypePointer[type]:
return DTypePointer[type](
address=int(numpy_array.__array_interface__["data"][0])
)
@always_inline
fn memcpy_to_numpy[
type: DType, rank: Int
](array: PythonObject, tensor: Tensor[type, rank]) raises:
var dst = numpy_data_pointer[type](array)
var src = tensor.data
var length = tensor.nelems()
memcpy(dst, src, length)
@always_inline
fn memcpy_from_numpy[
type: DType, rank: Int
](array: PythonObject, tensor: Tensor[type, rank]) raises:
var src = numpy_data_pointer[type](array)
var dst = tensor.data
var length = tensor.nelems()
memcpy(dst, src, length)
@always_inline
fn shape_to_python_list[
rank: Int
](shape: StaticIntTuple[rank]) raises -> PythonObject:
var python_list = python.Python.evaluate("list()")
for i in range(rank):
_ = python_list.append(shape[i])
return python_list^
@always_inline
fn get_np_dtype[type: DType](np: PythonObject) raises -> PythonObject:
@parameter
if type.is_float32():
return np.float32
elif type.is_int32():
return np.int32
elif type.is_int64():
return np.int64
elif type.is_uint8():
return np.uint8
raise "Unknown datatype"
@always_inline
fn tensor_to_numpy[
type: DType
](tensor: Tensor[type], np: PythonObject) raises -> PythonObject:
var shape = shape_to_python_list(tensor.shape)
var tensor_as_numpy = np.zeros(shape, get_np_dtype[type](np))
_ = shape^
memcpy_to_numpy(tensor_as_numpy, tensor)
return tensor_as_numpy^
@always_inline
fn numpy_to_tensor[
type: DType, rank: Int
](array: PythonObject) raises -> Tensor[type, rank]:
var shape = StaticIntTuple[rank]()
var array_shape = array.shape
for i in range(rank):
shape[i] = array_shape[i].__index__()
var out = empty_tensor[type, rank]((shape))
memcpy_from_numpy(array, out)
return out^
| max/examples/extensibility/custom_ops/python_utils.mojo | false |
<filename>max/examples/extensibility/custom_ops/__init__.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
# ===----------------------------------------------------------------------=== #
from .gelu import *
from .det import *
| max/examples/extensibility/custom_ops/__init__.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
from tensor import Tensor
from utils import StaticTuple
@value
struct SamplerResult(Stringable):
"""Container for a token sampler decision. This struct retains some
context on what options remained after filtering (to aid in
rationalizing sampler behavior). The goal is to facilitate experimentation,
not raw performance."""
# Chosen token (vocabulary index)
var selected: Int
# Options the selected token was sampled from after filtering
var options: List[Int]
# List of the associated likelihoods (len(options) == len(likelihoods))
var likelihoods: List[Float32]
fn __init__(
inout self: Self,
selected: Int,
options: List[Int] = List[Int](),
likelihoods: List[Float32] = List[Float32](),
):
self.selected = selected
self.options = options
self.likelihoods = likelihoods
fn __str__(self) -> String:
var msg = "Selected: " + str(self.selected) + " from "
for i in range(len(self.options)):
msg += (
"["
+ str(self.options[i])
+ ", "
+ str(self.likelihoods[i])
+ "] "
)
return msg
trait TokenSampler:
"""A generic token sampler that takes in a list of logits and samples
an element based on the associated likelihoods."""
fn sample[dtype: DType](self, logits: Tensor[dtype]) -> SamplerResult:
...
| max/examples/graph-api/pipelines/llama2/token_sampler/token_sampler.mojo | false |
<filename>max/examples/graph-api/pipelines/llama2/token_sampler/weighted_sampler.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
from .token_sampler import TokenSampler, SamplerResult
from tensor import Tensor
from random import random_float64
import math
from utils.numerics import min_finite
@value
struct WeightedSampler(TokenSampler):
# Standard temperature parameter -- 1.0 is unmodified, 0.0 is effectively greedy sampling
var temperature: Float32
# min_p style filter (source: https://github.com/ggerganov/llama.cpp/pull/3841)
var min_p: Float32
fn __init__(inout self: Self, temperature: Float32, min_p: Float32 = 0.05):
self.temperature = temperature
self.min_p = min_p
fn sample[dtype: DType](self, logits: Tensor[dtype]) -> SamplerResult:
var normalization = Scalar[DType.float32](0)
# Add a floor to mitigate div0 if T=0.0 is passed in.
var temp_modified: SIMD[DType.float32, 1] = max(
Float32(1e-6), self.temperature
)
# Overflow mitigation.
# p_i = exp(logit_i / T) / (sum_j exp(logit_j / T))
# = exp(logit_max / T) / exp(logit_max / T) (...)
# = exp((logit_i-logit_max)/T) / (sum_j exp((logit_j-logit_max)/T))
var largest = min_finite[dtype]()
for i in range(logits.num_elements()):
if largest < logits[0, i]:
largest = logits[0, i]
for i in range(logits.num_elements()):
var intermediate: SIMD[DType.float32, 1] = (
logits[0, i] - largest
).cast[DType.float32]() / temp_modified
var p = math.exp(intermediate)
normalization += p
# Start filtering for min_p
var retained_idx = List[Int]()
var retained_p = List[Float32]()
var options = List[Int]()
var likelihoods = List[Float32]()
# Now run through again with the actual probabilities
for i in range(logits.num_elements()):
var intermediate: SIMD[DType.float32, 1] = (
logits[0, i] - largest
).cast[DType.float32]() / temp_modified
var p: Float32 = math.exp(intermediate) / normalization
if p >= (self.min_p / normalization):
retained_idx.append(i)
retained_p.append(p)
# Renormalize after filtering min_p
normalization = Scalar[DType.float32](0)
for v in range(len(retained_idx)):
normalization += retained_p[v]
# Simple O(N) weighted sampler
# Collect the considered tokens as we go for the SamplerResult
var u = random_float64()
var cdf = Scalar[dtype.float32](0.0)
for i in range(len(retained_idx)):
options.append(retained_idx[i])
likelihoods.append(
retained_p[i] / normalization.cast[DType.float32]()
)
cdf += retained_p[i] / normalization
if cdf > u:
return SamplerResult(retained_idx[i], options)
return SamplerResult(retained_idx[len(retained_idx) - 1], options)
| max/examples/graph-api/pipelines/llama2/token_sampler/weighted_sampler.mojo | false |
<filename>max/examples/graph-api/pipelines/llama2/token_sampler/__init__.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Token sampler strategies for Llama2
"""
| max/examples/graph-api/pipelines/llama2/token_sampler/__init__.mojo | false |
<filename>max/examples/graph-api/pipelines/llama3/tokenizer/regex.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""POSIX 2 regular expressions via regcomp/regexec."""
from sys.info import os_is_macos
from _mlir._c.ffi import MLIR_func
def set_locale_unicode():
empty_string = str("")
locale = external_call["setlocale", UnsafePointer[UInt8]](
0, Reference(empty_string.as_bytes_slice()[0])
) # LC_ALL
if not locale:
raise "didn't set locale"
struct ExecuteOption:
alias STARTEND = 4
struct CompileOption:
alias EXTENDED = 1
alias ICASE = 2
alias ENHANCED = 0o400
fn llvm_regcomp(ptr: UnsafePointer[_CRegex], pattern: String, mode: Int) -> Int:
return MLIR_func[
"llvm_regcomp",
fn (UnsafePointer[_CRegex], UnsafePointer[Int8], Int) -> Int,
]()(ptr, pattern.unsafe_ptr(), mode)
fn llvm_regexec(
ptr: UnsafePointer[_CRegex],
string: String,
inout pmatch: List[_CRegexMatch],
mode: Int,
) -> Int:
return MLIR_func[
"llvm_regexec",
fn (
UnsafePointer[_CRegex],
UnsafePointer[Int8],
Int,
UnsafePointer[_CRegexMatch],
Int,
) -> Int,
]()(ptr, string.unsafe_ptr(), len(pmatch), pmatch.unsafe_ptr(), mode)
fn llvm_regfree(ptr: UnsafePointer[_CRegex]):
return MLIR_func["llvm_regfree", fn (UnsafePointer[_CRegex]) -> NoneType]()(
ptr
)
fn llvm_regerror(
error: Int,
ptr: UnsafePointer[_CRegex],
message: UnsafePointer[UInt8],
max_size: Int,
) -> Int:
return MLIR_func[
"llvm_regerror",
fn (Int, UnsafePointer[_CRegex], UnsafePointer[UInt8], Int) -> Int,
]()(error, ptr, message, max_size)
struct _CRegex:
# This corresponds to the llvm_regex_t type definition.
var _magic: Int
var capture_groups: Int
var _re_endp: UnsafePointer[NoneType]
var _re_guts: UnsafePointer[NoneType]
var _initialized: Bool
fn __init__(inout self):
self._magic = 0
self.capture_groups = 0
self._re_endp = UnsafePointer[NoneType]()
self._re_guts = UnsafePointer[NoneType]()
self._initialized = False
fn __moveinit__(inout self, owned existing: Self):
# _CRegex can't be safely moved once it's initialized.
# We have to implement __move__ currently to satisfy Arc's Movable
# trait bounds.
self.__init__()
fn __del__(owned self):
if self._initialized:
llvm_regfree(self._ptr())
@staticmethod
def compile(pattern: String, options: Int = 0) -> Arc[Self]:
self = Arc(Self())
self[]._compile(pattern, options | CompileOption.EXTENDED)
return self
def _compile(inout self, pattern: String, options: Int):
err = llvm_regcomp(self._ptr(), pattern, options)
if err:
raise self._error(err)
self._initialized = True
def exec(self, string: String, start: Int = 0) -> List[_CRegexMatch]:
# This list should be able to be stack-allocated to avoid a heap
# allocation when there's no match. stack_allocation currently
# only supports static allocation sizes.
max_groups = self.capture_groups + 1
matches = List[_CRegexMatch](capacity=max_groups)
matches.append(_CRegexMatch(start, len(string)))
matches.size = max_groups
no_match = llvm_regexec(
self._ptr(), string, matches, ExecuteOption.STARTEND
)
if no_match:
return List[_CRegexMatch]()
return matches^
def _error(self, code: Int) -> String:
alias MAX_ERROR_LENGTH = 2048
message = UnsafePointer[UInt8].alloc(MAX_ERROR_LENGTH)
size = llvm_regerror(code, self._ptr(), message, MAX_ERROR_LENGTH)
return "regex error: " + String(message, size)
fn _ptr(self) -> UnsafePointer[Self]:
return UnsafePointer.address_of(self)
@value
struct _CRegexMatch:
var start: Int
var end: Int
fn __bool__(self) -> Bool:
return self.start != -1
fn __repr__(self) -> String:
return (
str("_Group(start=")
+ str(self.start)
+ ", end="
+ str(self.end)
+ ")"
)
@value
struct _MatchIter[
regex_lifetime: ImmutableLifetime,
string_lifetime: ImmutableLifetime,
]:
var regex: Reference[Regex, False, regex_lifetime]
var string: Reference[String, False, string_lifetime]
var start: Int
var next_match: Optional[Match[string_lifetime]]
# This is a workaround for not having negative lookaheads, expect this
# interface to change. This allows using capture groups to tell the regex
# to "match" only part of the match, and continue at the end of the capture
# group.
var negative_lookahead_hack: Bool
def __init__(
inout self,
regex: Reference[Regex, False, regex_lifetime],
string: Reference[String, False, string_lifetime],
negative_lookahead_hack: Bool = False,
):
self.regex = regex
self.string = string
self.start = 0
self.next_match = None
self.negative_lookahead_hack = negative_lookahead_hack
self._next()
fn __iter__(self) -> Self:
return self
def __next__(inout self) -> Match[string_lifetime]:
m = self.next_match.value()[]
self._next()
return m^
fn __len__(self) -> Int:
return int(Bool(self.next_match))
def _next(inout self):
m = self.regex[].find(self.string[], start=self.start)
self.next_match = m
if m and self.negative_lookahead_hack:
# End the regex at the end of the last capture group,
# or at the end of the match if none are populated.
max_end = self.start
for i in range(1, len(m.value()[]._groups)):
group = m.value()[]._groups[i]
if group and group.end > max_end:
max_end = group.end
if max_end == self.start:
max_end = m.value()[].end()
self.start = max_end
self.next_match.value()[]._groups[0].end = max_end
else:
self.start = m.value()[].end() if m else len(self.string[])
@value
struct Match[lifetime: ImmutableLifetime]:
var _string: Span[UInt8, False, lifetime]
var _groups: List[_CRegexMatch]
fn __getitem__(self, group: Int) -> StringSlice[False, lifetime]:
var m = self._groups[group]
return StringSlice(unsafe_from_utf8=self._string[m.start : m.end])
fn __str__(self) -> String:
return str(self[0])
fn __len__(self) -> Int:
return self.end() - self.start()
fn start(self) -> Int:
return self._groups[0].start
fn end(self) -> Int:
return self._groups[0].end
fn __repr__(self) -> String:
return (
str("Match(start=")
+ str(self.start())
+ ", end="
+ str(self.end())
+ ", match="
+ repr(str(self))
+ ")"
)
@value
struct Regex:
var _c: Arc[_CRegex]
def __init__(inout self, pattern: String, options: Int = 0):
self._c = _CRegex.compile(pattern, options)
def find(
self, string: String, start: Int = 0
) -> Optional[Match[__lifetime_of(string)]]:
groups = self._c[].exec(string, start=start)
if groups:
return Match(string.as_bytes_slice(), groups^)
return None
def findall(
self, string: String, negative_lookahead_hack: Bool = False
) -> _MatchIter[__lifetime_of(self), __lifetime_of(string)]:
return _MatchIter(self, string, negative_lookahead_hack)
| max/examples/graph-api/pipelines/llama3/tokenizer/regex.mojo | false |
<filename>max/examples/graph-api/pipelines/llama3/tokenizer/tiktoken.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""TikToken implementation."""
from base64 import b64decode
from collections import Dict
from pathlib import Path
from .bpe import BPETokenizer, TokenWithID
from .regex import Match, Regex, CompileOption
from ...weights.gguf import GGUFArray, GGUFString
def _next_rune(inout span: Span[UInt8, _]) -> Int:
if not span[0] & 0x80:
result = int(span[0])
span = span[1:]
return int(result)
elif not span[0] & 0x20:
result = ((int(span[0]) & 0x1F) << 6) | (int(span[1]) & 0x3F)
span = span[2:]
return int(result)
elif not span[0] & 0x10:
result = (
((int(span[0]) & 0x0F) << 12)
| ((int(span[1]) & 0x3F) << 6)
| (int(span[2]) & 0x3F)
)
span = span[3:]
return int(result)
else:
result = (
((int(span[0]) & 0x07) << 18)
| ((int(span[1]) & 0x3F) << 12)
| ((int(span[2]) & 0x3F) << 6)
| (int(span[3]) & 0x3F)
)
span = span[4:]
return int(result)
def _runes(string: String) -> List[Int]:
span = string.as_bytes_slice()
runes = List[Int]()
while len(span):
runes.append(_next_rune(span))
return runes^
def _decode_token(string: String, decode_map: Dict[Int, UInt8]) -> String:
result = List[UInt8]()
for rune in _runes(string):
result.append(decode_map[rune[]])
result.append(0)
return result
def _decode_map() -> Dict[Int, UInt8]:
# I have no idea why this is the way it is.
decode_map = Dict[Int, UInt8]()
for i in range(256, 289): # 0-32
decode_map[i] = i - 256
for i in range(33, 127): # 33-126
decode_map[i] = i
for i in range(289, 323): # 127-160
decode_map[i] = i - 162
for i in range(161, 256): # 161-255
decode_map[i] = i
decode_map[323] = 173
return decode_map^
struct TikTokenEncoder:
var bpe: BPETokenizer
var regex: Regex
var special_tokens: Dict[String, Int]
def __init__(
inout self,
owned bpe: BPETokenizer,
owned regex: Regex,
owned special_tokens: Dict[String, Int],
):
self.bpe = bpe^
self.regex = regex^
self.special_tokens = special_tokens^
@staticmethod
def cl100k_base_llama3(path: Path) -> Self:
return Self.cl100k_base_llama3(BPETokenizer.from_tiktoken(path))
@staticmethod
def cl100k_base_llama3(tokens: GGUFArray) -> Self:
bpe = BPETokenizer()
decode_map = _decode_map()
for i in range(tokens.n):
encoded = str(tokens.data[i])
bpe.add_token(_decode_token(encoded, decode_map), i)
return Self.cl100k_base_llama3(bpe)
@staticmethod
def cl100k_base_llama3(owned bpe: BPETokenizer) -> Self:
special_tokens = Dict[String, Int]()
special_tokens["<|begin_of_text|>"] = 128000
special_tokens["<|end_of_text|>"] = 128001
for e in special_tokens.items():
bpe.add_token(e[].key, e[].value)
pattern = str("|").join(
"'[sdmt]|ll|ve|re",
"[^\r\n[:alnum:]]?[[:alpha:]]+",
"[[:digit:]]{1,3}",
" ?[^[:space:][:alnum:]]+[\r\n]*",
"[[:space:]]*[\r\n]",
"([[:space:]]+)[[:space:]]",
"[[:space:]]+",
)
return Self(bpe^, Regex(pattern, CompileOption.ICASE), special_tokens^)
def encode(
self,
string: String,
bos: Optional[String] = str("<|begin_of_text|>"),
eos: Optional[String] = None,
) -> List[TokenWithID]:
# Compared to Rust tiktoken, this does not currently implement
# - special tokens (not used in llama3)
# - multithreaded decoding
# multithreaded decoding is quite a bit more complex, as it requires
# both splitting the text across threads and merging afterwards, and also
# needs to know how to handle the boundary conditions between different segments
tokens = List[TokenWithID]()
if bos:
tokens += self.encode_special(bos.value()[])
for segment in self.regex.findall(string, negative_lookahead_hack=True):
ss = str(segment)
if token_id := self.bpe.token_ids.find(ss):
tokens += TokenWithID(ss^, token_id.value()[])
else:
tokens += self.bpe.encode(ss^)
if eos:
tokens += self.encode_special(eos.value()[])
return tokens
def encode_special(self, string: String) -> TokenWithID:
if special_id := self.special_tokens.find(string):
return TokenWithID(string, special_id.value()[])
return TokenWithID(string, self.bpe.token_ids[string])
def decode(self, token_id: Int) -> TokenWithID:
return TokenWithID(self.bpe.vocab[token_id].token, token_id)
| max/examples/graph-api/pipelines/llama3/tokenizer/tiktoken.mojo | false |
<filename>max/examples/graph-api/pipelines/replit/run.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
from pathlib import cwd, Path
import sys
from max.engine import InferenceSession, Model, TensorMap
from tensor import Tensor, TensorShape
from .model.replit import Replit
from .weights.replit_checkpoint import ReplitCheckpoint
from .weights.hyperparams import get_default
from ..tokenizer import AutoTokenizer
# TODO: Expand this back out to 512 once MSDK-305 is fully resolved.
alias MAX_SEQ_LEN = 33
@value
struct Config:
"""Configuration for token generation runtime options."""
var converted_weights_path: Path
var prompt: String
def __init__(
inout self,
/,
converted_weights_path: Path = "",
prompt: String = 'def hello():\n print("hello world")',
):
self.converted_weights_path = converted_weights_path
self.prompt = prompt
self.parse_args()
def parse_args(inout self):
args = sys.argv()
@parameter
def read_value(index: Int) -> StringRef:
if index >= len(args):
raise "missing value for parameter `" + str(
args[index - 1]
) + "`"
return args[index]
# Skip the run_pipeline.mojo and replit arguments.
i = 2
while i < len(args):
if args[i] == "--converted_weights_path":
self.converted_weights_path = Path(read_value(i + 1))
i += 2
elif args[i] == "--prompt":
self.prompt = read_value(i + 1)
i += 2
else:
raise "unsupported CLI argument: " + String(args[i])
if len(str(self.converted_weights_path)) == 0:
self.converted_weights_path = cwd().joinpath(
".cache/replit/converted"
)
def replit_run():
config = Config()
checkpoint_file = config.converted_weights_path
# Generate a graph that does a single forward pass of the replit model.
print("Building model...")
replit = Replit[ReplitCheckpoint, DType.float32](get_default())
g = replit.build_graph(
"replit",
ReplitCheckpoint(checkpoint_file),
with_attention_mask=True,
use_cache=True,
)
# Load the graph into the session, which generates the MLIR and runs
# optimization passes on it.
print("Compiling...")
session = InferenceSession()
compiled_model = session.load(g)
# Set up input and caches, and preprocess the input.
input_string = config.prompt
print("Running on input:", input_string)
alias hf_model_name = "replit/replit-code-v1_5-3b"
bpe_tokenizer = AutoTokenizer(hf_model_name)
# Make sure newlines are properly encoded in the prompt.
prompt = List(input_string.replace("\\n", "\n"))
encoded_prompt = bpe_tokenizer.encode(prompt)
tokens = Tensor(TensorShape(1, len(encoded_prompt)), encoded_prompt)
k_cache, v_cache = replit.create_empty_cache()
# Greedily generate tokens one at a time until the end token is reached or
# the token length has reached the max.
print("Output:")
for n in range(len(encoded_prompt), MAX_SEQ_LEN + 1):
attention_mask = Tensor[DType.bool](TensorShape(1, n), True)
results = execute(
compiled_model, session, tokens, attention_mask, k_cache, v_cache
)
output = results.get[DType.float32]("output0")
k_cache = results.get[DType.float32]("output1")
v_cache = results.get[DType.float32]("output2")
argmax = output.argmax(axis=-1).astype[DType.int64]()
argmax_length = argmax.dim(1)
next_token = argmax[0, argmax_length - 1]
if bpe_tokenizer.is_end_of_text(next_token):
break
tokens = Tensor[DType.int64](TensorShape(1, 1), next_token)
tokens_str = bpe_tokenizer.decode(next_token)
print(tokens_str, end="")
print()
def execute(
model: Model,
session: InferenceSession,
tokens: Tensor[DType.int64],
attention_mask: Tensor[DType.bool],
k_cache: Tensor[DType.float32],
v_cache: Tensor[DType.float32],
) -> TensorMap:
input_map = session.new_tensor_map()
input_map.borrow("input0", tokens)
input_map.borrow("input1", attention_mask)
input_map.borrow("input2", k_cache)
input_map.borrow("input3", v_cache)
result_map = model.execute(input_map)
return result_map^
| max/examples/graph-api/pipelines/replit/run.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved."
# ===----------------------------------------------------------------------=== #
| max/examples/graph-api/pipelines/replit/__init__.mojo | false |
<filename>max/examples/graph-api/pipelines/replit/bpe_tokenizer/ball.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""A simple arena linked-list implementation."""
from collections import List, Optional
@value
struct Node[T: CollectionElement](CollectionElement):
"""A node in the linked list."""
var value: T
var prev: Optional[Ball[T].ID]
var next: Optional[Ball[T].ID]
struct Ball[T: CollectionElement]:
"""A doubly-linked-list with nodes in a memory arena.
- Elements in the list have an ID which can be used to reference them again.
- IDs will never change or be re-used. If an item is removed its ID is invalid.
- Linked-list ops are done on the arena directly
```mojo
from tokenizer.ball import Ball
var list = Ball[Int]()
var id1 = list.append(0)
var id2 = list.append(1)
list[id2] == 1
list.next(id1).value()[] == id2
list.prev(id2).value()[] == id1
list.remove(id1)
list._head.value()[] == id2
(id1 in list) == False
list[id2] = 3
```
"""
alias ID = Int
var _arena: List[Optional[Node[T]]]
var _head: Optional[Self.ID]
var _tail: Optional[Self.ID]
fn __init__(inout self):
"""Constructs a new empty linked list."""
self._arena = List[Optional[Node[T]]]()
self._head = None
self._tail = None
fn __contains__(self, id: Self.ID) -> Bool:
"""Checks whether the node is still in the list."""
return 0 <= id < len(self._arena) and self._arena[id]
fn append(inout self, owned value: T) -> Self.ID:
"""Adds a new element to the back of the list."""
var id = len(self._arena)
var node = Node[T](value^, self._tail, None)
if self._tail:
self._get_node(self._tail.value()[]).next = id
else:
self._head = id
self._tail = id
self._arena.append(node)
return id
fn remove(inout self, id: Self.ID):
"""Removes an element from the list."""
var node = self._arena[id]._value_copy()
self._arena[id] = None
if node.prev:
self._get_node(node.prev.value()[]).next = node.next
if node.next:
self._get_node(node.next.value()[]).prev = node.prev
if self._head.value()[] == id:
self._head = node.next
if self._tail.value()[] == id:
self._tail = node.prev
fn next(self, id: Self.ID) -> Optional[Self.ID]:
"""Gets the next item in the list, if any."""
return self._get_node(id).next
fn prev(self, id: Self.ID) -> Optional[Self.ID]:
"""Gets the previous item in the list, if any."""
return self._get_node(id).prev
fn _get_node[
mutability: Bool,
lifetime: AnyLifetime[mutability].type,
](self: Reference[Self, mutability, lifetime], id: Self.ID) -> ref [
lifetime
] Node[T]:
return self[]._arena.__get_ref(id)[].value()[]
fn __getitem__[
mutability: Bool,
lifetime: AnyLifetime[mutability].type,
](self: Reference[Self, mutability, lifetime], id: Self.ID) -> ref [
lifetime
] T:
"""Gets a reference to a value in the list."""
return self[]._get_node(id).value
| max/examples/graph-api/pipelines/replit/bpe_tokenizer/ball.mojo | false |
<filename>max/examples/graph-api/pipelines/replit/bpe_tokenizer/bpe_tokenizer.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
from pathlib import Path
from utils.variant import Variant
from .ball import Ball
from .json import JsonStorage, NULL, NodeType
from .max_heap import MaxHeap, OrderableElement
@value
struct TokenWithID(CollectionElement, Stringable):
"""A string token, along with its ID in the vocabulary (or 0 for unk)."""
var token: String
var id: Int
fn __str__(self) -> String:
return "[" + str(self.token) + "=" + str(self.id) + "]"
@value
struct StringPair(KeyElement):
var left: String
var right: String
fn __eq__(self, other: Self) -> Bool:
return (self.left == other.left) and (self.right == other.right)
fn __ne__(self, other: Self) -> Bool:
return (self.left != other.left) or (self.right != other.right)
fn __moveinit__(inout self, owned existing: Self):
self.left = existing.left^
self.right = existing.right^
fn __copyinit__(inout self, existing: Self):
self.left = existing.left
self.right = existing.right
fn __hash__(self) -> Int:
return hash(self.left) * 12345 + hash(self.right)
@value
struct MergeScore(CollectionElement):
var rank: Int
var id: Int # Vocab ID
fn __moveinit__(inout self, owned existing: Self):
self.rank = existing.rank
self.id = existing.id
fn __copyinit__(inout self, existing: Self):
self.rank = existing.rank
self.id = existing.id
@value
struct MergeOption(OrderableElement):
"""Metadata for tracking possible BPE merges in a priority queue."""
var left: Ball[String].ID
var right: Ball[String].ID
var score: Float32
var checksum: Int
fn __lt__(self, other: Self) -> Bool:
return (self.score < other.score) or (
self.score == other.score and self.left > other.left
)
@value
struct Tokenizer:
var vocab: Dict[String, Int]
var merges: Dict[StringPair, MergeScore]
var json_str: String
@staticmethod
def from_file(path: Path) -> Self:
return Self.from_string(path.read_text())
@staticmethod
fn from_string(owned s: String) raises -> Self:
# NOTE: JsonStorage takes unsafe StringRef's into S, and `merges`
# maintains those StringRefs for the duration of the Tokenizer object.
# We make sure to keep s alive as long as we need it to avoid dangling
# pointers.
var j = JsonStorage.from_string(StringRef(s.unsafe_uint8_ptr(), len(s)))
# Just read the vocab and merges, assume the configuration
# parameters are as expected (e.g. type=BPE, byte_fallback=False, etc).
var vocab_node = j.get("model", "vocab")
var vocab_storage = j.storage[vocab_node.storage_index]
var vocab = Dict[String, Int]()
for item in vocab_storage.items():
vocab[item[].key] = item[].value.to_int()
var merge_node = j.get("model", "merges")
var merge_storage = j.storage[merge_node.storage_index]
var num_merges = len(merge_storage)
var merges = Dict[StringPair, MergeScore]()
for n in range(num_merges):
var merge = merge_storage[str(n)].value
var split = str(merge).split(" ")
if len(split) != 2:
raise "Invalid merge: " + str(merge)
var merged = split[0] + split[1]
try:
var vocab_id = vocab[merged]
# Set the merge score to the negative index to prioritize
# earlier merges.
merges[StringPair(split[0], split[1])] = MergeScore(
-n, vocab_id
)
except:
raise "Could not find '" + str(merged) + "' in tokenizer vocab."
return Self(vocab, merges, s^)
fn encode(
self,
str: String,
bos: Optional[String] = None,
eos: Optional[String] = None,
) raises -> List[TokenWithID]:
"""Encode a string according to the BPE algorithm.
The BPE vocabulary is a set of scored strings. BPE starts by
considering every character in the input string as its own token,
and then greedily merges the highest scoring adjacent pair
until no more adjacent token merges exist in the vocabulary.
We implement the tokens as a linked list, with a priority queue
of merge options. We execute the highest-scoring merge, adding
new merge options to the priority queue if they exist in the vocabulary.
We can't remove out-dated merge options from the priority queue, so
instead we add a checksum to them, which is the length of the merge
they're expecting. Linked list elements only stop existing or grow
in length, so we can always safely recognize an outdated merge.
"""
var output = List[TokenWithID]()
if bos and bos.value()[] in self.vocab:
output.append(TokenWithID(bos.value()[], self.vocab[bos.value()[]]))
var merge_options = MaxHeap[MergeOption]()
var tokens = Ball[String]()
@parameter
fn maybe_add_merge(left: tokens.ID, right: tokens.ID) raises:
var pair = StringPair(tokens[left], tokens[right])
if pair in self.merges:
var merge = self.merges[pair]
var score = merge.rank
merge_options.push(MergeOption(left, right, score, merge.id))
# Initialize the tokens linked-list and initial merges.
var prev: Optional[Ball[String].ID] = None
for i in range(len(str)):
var id = tokens.append(str[i].replace(" ", "Ġ"))
if prev:
maybe_add_merge(prev.value()[], id)
prev = id
while merge_options:
var merge = merge_options.pop()
# Check whether the best merge is still valid
if merge.left not in tokens or merge.right not in tokens:
continue # outdated merge option
var pair = StringPair(tokens[merge.left], tokens[merge.right])
if (
pair not in self.merges
or self.merges[pair].id != merge.checksum
):
continue # outdated merge option
# Merge the right token into the left token, then
# add any new valid merge options to the priority queue.
var left = tokens.prev(merge.left)
var right = tokens.next(merge.right)
tokens[merge.left] = tokens[merge.left] + tokens[merge.right]
tokens.remove(merge.right)
if right:
maybe_add_merge(merge.left, right.value()[])
if left:
maybe_add_merge(left.value()[], merge.left)
# Loop through the final list and construct the token sequence.
var node_id = tokens._head
while node_id:
var id = node_id.value()[]
var token = tokens[id]
output.append(TokenWithID(token, self._encode_token(token)))
node_id = tokens.next(id)
if eos and eos.value()[] in self.vocab:
output.append(TokenWithID(eos.value()[], self.vocab[eos.value()[]]))
return output
fn _encode_token(self, token: String) raises -> Int:
return self.vocab.find(token).or_else(0)
| max/examples/graph-api/pipelines/replit/bpe_tokenizer/bpe_tokenizer.mojo | false |
<filename>max/examples/graph-api/pipelines/replit/bpe_tokenizer/json.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""
Some helpful JSON utilities.
JsonStorage: A container for parsed JSON data.
Non-recursive, due to the
[bug with recursive structs](https://linear.app/modularml/issue/MOCO-577/%5Bmojo%5D-%5Bbug%5D-value-doesnt-work-on-recursive-structs).
```
var s: String = "{'version': 1, 'data': ['a', 'b', 'c']}"
var js = JsonStorage.from_string(s^)
```
All values can be accessed via `JsonStorage.get`
```
print(js.get()) # Node(type=object, value="{...}")
print(js.get('version')) --> Node(type=number, value="1")
print(js.get('version').to_int()) --> 1
print(js.get('data')) --> Node(type=array, value="['a', 'b', 'c']")
print(js.get('data', '1')) --> Node(type=string, value="b")
```
"""
from collections import List, Dict, Set
var WS = Set[String](" ", "\n", "\t")
alias COLON = ":"
alias COMMA = ","
alias OBJECT_OPEN = "{"
alias OBJECT_CLOSE = "}"
alias ARRAY_OPEN = "["
alias ARRAY_CLOSE = "]"
alias DOUBLE_QUOTE = '"'
alias NULL = "null"
alias TRUE = "true"
alias FALSE = "false"
alias ESCAPE = "\\"
alias NULL_END = "\n"
var NUMBER_CHARS = Set[String](
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "E", "e", ".", "-"
)
# Create a separate set without the "e" so the `get_next_token` function can
# easily differentiate between a number and "true"/"false" literals (when
# searching from right-to-left, "true"/"false" start with "e")
var INITIAL_NUMBER_CHARS = Set[String](
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ".", "-"
)
@value
struct NodeType(Stringable, EqualityComparable):
alias object = NodeType(0)
alias array = NodeType(1)
alias string = NodeType(2)
alias number = NodeType(3)
alias bool = NodeType(4)
alias null = NodeType(5)
alias end = NodeType(6)
var kind: UInt8
fn __eq__(self, other: Self) -> Bool:
return self.kind == other.kind
fn __ne__(self, other: Self) -> Bool:
return self.kind != other.kind
fn __str__(self) -> String:
if self.kind == 0:
return "object"
if self.kind == 1:
return "array"
if self.kind == 2:
return "string"
if self.kind == 3:
return "number"
if self.kind == 4:
return "bool"
if self.kind == 5:
return "null"
if self.kind == 6:
return "end"
return "unknown type"
@value
struct TokenType(EqualityComparable, Stringable, KeyElement):
alias unknown = TokenType(0)
alias object_open = TokenType(1)
alias object_close = TokenType(2)
alias array_open = TokenType(3)
alias array_close = TokenType(4)
alias colon = TokenType(5)
alias comma = TokenType(6)
alias string = TokenType(7)
alias number = TokenType(8)
alias bool = TokenType(9)
alias null = TokenType(10)
alias end = TokenType(11)
var kind: Int
fn __eq__(self, other: Self) -> Bool:
return self.kind == other.kind
fn __ne__(self, other: Self) -> Bool:
return self.kind != other.kind
fn __str__(self) -> String:
if self.kind == 0:
return "unknown"
if self.kind == 1:
return "object_open"
if self.kind == 2:
return "object_close"
if self.kind == 3:
return "array_open"
if self.kind == 4:
return "array_close"
if self.kind == 5:
return "colon"
if self.kind == 6:
return "comma"
if self.kind == 7:
return "string"
if self.kind == 8:
return "number"
if self.kind == 9:
return "bool"
if self.kind == 10:
return "null"
if self.kind == 11:
return "end"
return "unknown type"
fn __hash__(self) -> Int:
"""Return a 64-bit hash of the type's data."""
return self.kind
fn to_node_type(self) raises -> NodeType:
if self.kind == 1:
return NodeType.object
elif self.kind == 3:
return NodeType.array
elif self.kind == 7:
return NodeType.string
elif self.kind == 8:
return NodeType.number
elif self.kind == 9:
return NodeType.bool
elif self.kind == 10:
return NodeType.null
elif self.kind == 11:
return NodeType.end
raise "Cannot convert token type " + str(self) + " into a NodeType."
var VALUE_TYPES = Set[TokenType](
TokenType.bool, TokenType.number, TokenType.null, TokenType.string
)
def get_next_token(inout s: StringRef) -> (StringRef, TokenType):
"""Gets the next token within the limits and returns the unscanned indices.
Args:
s: JSON string, which is advanced beyond consumed bytes.
Returns:
Tuple of (
Substring containing the token contents
The type of token returned.
)
"""
# Skip the white spaces.
while True:
if s.empty():
return StringRef(), TokenType.end
if s[0] in WS:
s = s.drop_front()
else:
break
# Keep track of how many bytes are in this token.
var token = s.take_front(1)
var i = 1
var end_idx = len(s)
var token_type: TokenType
# TODO: Why doesn't StringRef have a normal getitem?
var c = String(s[0])
# Detect which type of token this is.
if c == OBJECT_OPEN:
token_type = TokenType.object_open
elif c == OBJECT_CLOSE:
token_type = TokenType.object_close
elif c == ARRAY_OPEN:
token_type = TokenType.array_open
elif c == ARRAY_CLOSE:
token_type = TokenType.array_close
elif c == COLON:
token_type = TokenType.colon
elif c == COMMA:
token_type = TokenType.comma
elif c == DOUBLE_QUOTE:
while True:
if i == end_idx:
raise "Could not find end double quotes."
if s[i] == ESCAPE:
if i + 1 == end_idx:
raise "escape at end of line."
i += 1 # Skip the next character
elif s[i] == DOUBLE_QUOTE:
break
i += 1
# Crop the double quotes from the token.
token = s.drop_front(1).take_front(i - 1)
token_type = TokenType.string
# Move the i one more char, since it's a double-quote that's part of
# this string.
i += 1
elif c in INITIAL_NUMBER_CHARS:
while i < end_idx:
if s[i] not in NUMBER_CHARS:
break
i += 1
# TODO: Validate number
token = s.take_front(i)
token_type = TokenType.number
elif islower(ord(c)):
# Check if the next token is "true", "false" or "null"
var first_idx = i
while i < end_idx and islower(ord(s[i])):
i += 1
token = s.take_front(i)
if token == NULL:
token_type = TokenType.null
elif token == TRUE:
token_type = TokenType.bool
elif token == FALSE:
token_type = TokenType.bool
else:
raise 'Invalid token "' + str(token) + '" in "' + String(s) + '"'
else:
var start = max(0, i - 20)
var end = min(end_idx, i + 20)
raise (
"Unable to parse token: "
+ c
+ " (ord="
+ str(ord(c))
+ ")\n"
+ "Context: "
+ String(s)[start:end]
)
s = s.drop_front(i)
return token, token_type
@value
struct Node(Stringable):
var type: NodeType
var value: StringRef
# Index into the parent JSON storage. Only valid for "Object" and "array"
# node types.
var storage_index: Int
fn __str__(self) -> String:
return (
"Node(type="
+ str(self.type)
+ ", value="
+ str(self.value)
+ ", idx="
+ str(self.storage_index)
+ ")"
)
# TODO: Add `to_float`
fn to_int(self) raises -> Int:
if self.type == NodeType.number:
return int(self.value)
else:
raise "Cannot convert node of type " + str(
self.type
) + " to number."
fn to_bool(self) raises -> Bool:
if self.type == NodeType.bool:
if self.value == TRUE:
return True
elif self.value == FALSE:
return False
else:
raise "Something went wrong."
else:
raise "Cannot convert node of type " + str(self.type) + " to bool."
@value
struct JsonStorage:
var root: Node
var storage: List[Dict[String, Node]]
@staticmethod
def from_string(s: StringRef) -> Self:
js = _from_string(s)
# Make sure nothing appears afterwards:
token, token_type = get_next_token(s)
if token_type != TokenType.end:
raise "Unexpected token found: " + str(token)
return js
def get(self, args: List[String]) -> Node:
var node = self.root
if len(args) == 0:
return node
for n in range(len(args)):
var key = args[n]
if node.type != NodeType.object and node.type != NodeType.array:
raise "Can't access key '" + key + "' from " + str(
node
) + " because it's not an array or object."
try:
node = self.storage[node.storage_index][key]
except e:
raise "Unable to get key '" + key + "' from " + str(node)
return node
def get(self, *args: String) -> Node:
# Convert to list -- can't do self.get(args) :(
var args_list = List[String]()
for ele in args:
args_list.append(ele[])
return self.get(args_list)
def _from_string(inout s: StringRef) -> JsonStorage:
# Dict and Arrays will want the entire span as their location.
orig_buffer = s
token, token_type = get_next_token(s)
root = Node(token_type.to_node_type(), token, -1)
var storage = List[Dict[String, Node]]()
if token_type == TokenType.object_open:
root.storage_index = 0
var root_storage = Dict[String, Node]()
storage.append(root_storage)
var object_closed = False
while not object_closed:
# Look ahead to see if the next token is a "}"
var temp = s
token, token_type = get_next_token(temp)
if token_type == TokenType.object_close:
object_closed = True
s = temp
break
# Read the next token (object key)
token, token_type = get_next_token(s)
var key = token
if token_type == TokenType.end:
break
elif token_type == TokenType.object_close:
object_closed = True
break
elif token_type != TokenType.string:
raise "JSON key must be a string, got: " + str(token)
# Consume the next token (should be a ':')
token, token_type = get_next_token(s)
if token_type != TokenType.colon:
raise "Expected a ':' after string key, got: " + str(token)
# Get the value using a recursive call to _from_string:
var value = _from_string(s)
# Get the current length of `storage` which will be used to
# increment all indices in the returned nodes.
var inc = len(storage)
value.root.storage_index += inc
for d in value.storage:
for ele in d[].values():
ele[].storage_index += inc
storage.extend(value.storage)
# Add the returned value storage to the root's storage.
storage[0][key] = value.root # root_storage[key] doesn't work
# Consume the next token, which could end the object or should be
# a comma.
token, token_type = get_next_token(s)
if token_type == TokenType.object_close:
object_closed = True
break
elif token_type == TokenType.end:
break
elif token_type != TokenType.comma:
raise "Invalid formatted JSON object. Expected a comma but got: " + str(
token
)
if object_closed:
root.value = orig_buffer
else:
raise "Invalid formatted JSON object. Object was never closed."
elif token_type == TokenType.array_open:
root.storage_index = 0
var root_storage = Dict[String, Node]()
storage.append(root_storage)
# Arrays are also stored in a dict. The keys are the str(int index)
# of each element.
var key_int = 0
var array_closed = False
while True:
# Look ahead to see if the next token is a "]"
var temp = s
token, token_type = get_next_token(temp)
if token_type == TokenType.array_close:
array_closed = True
s = temp
break
# Get the value using a recursive call to _from_string:
var value = _from_string(s)
# Get the current length of `storage` which will be used to
# increment all indices in the returned nodes.
var inc = len(storage)
value.root.storage_index += inc
for d in value.storage:
for ele in d[].values():
ele[].storage_index += inc
storage.extend(value.storage)
# Add the returned value storage to the root's storage.
# (root_storage[key] doesn't work)
storage[0][str(key_int)] = value.root
key_int += 1
# Consume the next token, which could end the array or should be
# a comma.
token, token_type = get_next_token(s)
if token_type == TokenType.array_close:
array_closed = True
break
elif token_type == TokenType.end:
break
elif token_type != TokenType.comma:
raise "Invalid formatted JSON array. Expected a comma but got: " + str(
token
)
if array_closed:
root.value = orig_buffer
else:
raise "Invalid formatted JSON array. Object was never closed."
return JsonStorage(root, storage)
| max/examples/graph-api/pipelines/replit/bpe_tokenizer/json.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""A simple generic max-heap implementation."""
from collections import List
trait Orderable:
"""Types which have a total order defined on them."""
fn __lt__(self, other: Self) -> Bool:
pass
trait OrderableElement(Orderable, CollectionElement):
"""Orderable types which are also CollectionElements."""
pass
struct MaxHeap[ElementType: OrderableElement](Sized, Boolable):
"""A max-heap of an Orderable collection type.
A MaxHeap is a convenient data structure for implementing a priority queue.
Usage:
```mojo
pq = MaxHeap[...]()
pq.push(initial)
while pq:
var top = pq.pop()
if something: pq.push(another)
```
"""
var heap: List[ElementType]
var begin_idx: Int
fn __init__(inout self):
"""Constructs an empty heap."""
self.heap = List[ElementType]()
self.begin_idx = 0
fn __len__(self) -> Int:
"""Checks how many elements are in the heap.."""
return len(self.heap) - self.begin_idx
fn __bool__(self) -> Bool:
"""Checks whether the heap has any elements in it."""
return len(self) != 0
fn push(inout self, owned elem: ElementType):
"""Adds a value to the heap."""
self.heap.append(elem^)
self._bubble_up(len(self.heap) - 1)
fn pop(inout self) -> ElementType:
"""Removes the top element from the heap and return it."""
debug_assert(self, "heap is empty")
self._sink_down(self.begin_idx)
var top = self.heap[self.begin_idx]
self.begin_idx += 1
return top
fn _swap(inout self, i1: Int, i2: Int):
# TODO: Swap syntax doesn't support non-register-passable types
var tmp = self.heap[i1]
self.heap[i1] = self.heap[i2]
self.heap[i2] = tmp
fn _bubble_up(inout self, idx: Int):
if idx == self.begin_idx:
return
var parent_idx = self._parent_idx(idx)
var parent = self.heap[parent_idx]
var current = self.heap[idx]
if parent < current:
self._swap(parent_idx, idx)
self._bubble_up(parent_idx)
fn _sink_down(inout self, idx: Int):
var li = self._left_child_idx(idx)
var ri = self._right_child_idx(idx)
var target_idx = idx # smaller of the two children, if we should sink down
if li < len(self.heap) - 1 and self.heap[target_idx] < self.heap[li]:
target_idx = li
elif ri < len(self.heap) - 1 and self.heap[target_idx] < self.heap[ri]:
target_idx = ri
if target_idx != idx:
self._swap(idx, target_idx)
self._sink_down(target_idx)
fn _real_idx(self, idx: Int) -> Int:
return idx + self.begin_idx
fn _parent_idx(self, idx: Int) -> Int:
return (self._real_idx(idx) - 1) // 2
fn _left_child_idx(self, idx: Int) -> Int:
return self._real_idx(idx) * 2 + 1
fn _right_child_idx(self, idx: Int) -> Int:
return (self._real_idx(idx) * 2) + 2
| max/examples/graph-api/pipelines/replit/bpe_tokenizer/max_heap.mojo | false |
<filename>max/examples/graph-api/pipelines/replit/bpe_tokenizer/__init__.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
| max/examples/graph-api/pipelines/replit/bpe_tokenizer/__init__.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""The attention mechanism used within the model."""
from collections import Optional
from utils.numerics import min_finite
from max.graph import ops, Dim, TensorType, Symbol
from max.graph.error import error
from tensor import Tensor, TensorShape
from ..weights.hyperparams import HyperParams
from ..layers.linear import Linear
@always_inline
def tril(input: Symbol, k: Int = 0) -> Symbol:
"""Gets the bottom triangle of the input.
The upper triangle above the kth diagnoal is zero'd out.
Args:
input: The input tensor.
k: The diagonal at which values at and below are True, and values
above are False.
Returns:
A Dtype.bool matrix.
Raises:
If the input has rank < 2.
"""
g = input.graph()
if input.tensor_type().rank() < 2:
raise "Can't get tril of Tensor with rank < 2"
input_shape = ops.shape_of(input)
N = input_shape[-2] # Number of rows
M = input_shape[-1] # number of columns
mask = tri(N, M, g.scalar(Int64(k)))
return input * mask
def tri(rows: Symbol, cols: Symbol, k: Symbol) -> Symbol:
"""Returns a triangular mask matrix.
Args:
rows: Number of rows in the returned matrix.
cols: Number of columns in the returned matrix.
k: The diagonal at which values at and below are True, and values
above are False.
Returns:
A Dtype.bool matrix.
"""
g = rows.graph()
int_dtype = rows.tensor_type().dtype
step = g.scalar(1, int_dtype)
row = ops.range_fill(
start=g.scalar(0, int_dtype), limit=rows, step=step
).reshape(-1, 1)
col = ops.range_fill(start=-k, limit=(cols - k), step=step).reshape(1, -1)
return ops.greater_equal(row, col)
@value
struct GroupedQueryAttention:
"""An attention layer that uses an intermediate number of key-value heads.
"""
var hyperparams: HyperParams
var wkqv: Linear
var out_proj: Linear
def __call__(
self,
input: Symbol,
attn_bias: Optional[Symbol] = None,
k_cache: Optional[Symbol] = None,
v_cache: Optional[Symbol] = None,
is_causal: Bool = True,
) -> (Symbol, Symbol, Symbol):
"""Builds the GQA layer.
Args:
input: Encoded inputs.
attn_bias: An additive bias to apply to the attention weights.
k_cache: Cached computed keys for previous tokens.
v_cache: Cached computed values for previous tokens. If
`k_cache` is defined, `v_cache` must be defined as well.
is_causal: Whether to apply a mask to the attention layer to ensure
that the output tokens are only based on past positions.
Returns:
Attention outputs, new k_cache, and new v_cache.
Raises:
Error when `v_cache` is not defined when `k_cache` is defined.
"""
g = input.graph()
n_heads = self.hyperparams.n_heads
kv_n_heads = self.hyperparams.kv_n_heads
d_model = self.hyperparams.d_model
batch_size = self.hyperparams.batch_size
head_dim = d_model // n_heads
qkv = self.wkqv(input)
split = ops.split[3](
qkv,
sizes=(d_model, kv_n_heads * head_dim, kv_n_heads * head_dim),
axis=2,
)
query = split[0]
key = split[1]
value = split[2]
# Apply scaled dot product attention on the query, key and value.
q = query.reshape(batch_size, -1, n_heads, head_dim)
q = ops.transpose(q, 1, 2)
k = key.reshape(batch_size, -1, kv_n_heads, head_dim)
k = ops.transpose(k, 1, 2)
k = ops.transpose(k, 2, 3)
v = value.reshape(batch_size, -1, kv_n_heads, head_dim)
v = ops.transpose(v, 1, 2)
if k_cache:
k_cache_value = k_cache.value()[]
k = ops.concat(List[Symbol](k_cache_value, k), axis=3)
if not v_cache:
raise error(g, "v_cache cannot be None if k_cache is defined.")
v_cache_value = v_cache.value()[]
v = ops.concat(List[Symbol](v_cache_value, v), axis=2)
# Record the k and v into the cache. An extra dimension is added
# so that all cached keys/values can be concatenated on that dimension.
k_cache_update = k.reshape(1, batch_size, kv_n_heads, head_dim, -1)
v_cache_update = v.reshape(1, batch_size, kv_n_heads, -1, head_dim)
if kv_n_heads > 1 and kv_n_heads < n_heads:
# Repeat interleave k and v to match the number of heads in the
# query.
n_repeats = n_heads // kv_n_heads
k = k.reshape(batch_size, kv_n_heads, 1, head_dim, -1)
k = ops.tile(k, List[Int64](1, 1, n_repeats, 1, 1))
k = k.reshape(batch_size, kv_n_heads * n_repeats, head_dim, -1)
v = v.reshape(batch_size, kv_n_heads, 1, -1, head_dim)
v = ops.tile(v, List[Int64](batch_size, 1, n_repeats, 1, 1))
v = v.reshape(1, kv_n_heads * n_repeats, -1, head_dim)
softmax_scale = 1 / math.sqrt(d_model / n_heads)
attn_weight = (q @ k) * softmax_scale
s_q = ops.shape_of(q)[2]
s_k = ops.shape_of(k)[-1]
if attn_bias:
bias = attn_bias.value()[]
attn_bias_shape = ops.shape_of(bias)
_s_q = ops.max(g.scalar(Int32(0)), attn_bias_shape[2] - s_q)
_s_k = ops.max(g.scalar(Int32(0)), attn_bias_shape[3] - s_k)
bias = bias[:, :, _s_q:, _s_k:].rebind(
Dim.dynamic(), Dim.dynamic(), 1, Dim.dynamic()
)
attn_weight = attn_weight + bias
if is_causal and (not q.tensor_type().dims[2] == 1):
# Apply a triangular mask to the attention weight so that in the
# later matmul, each token in the ouput doesn't involve
# information from future positions.
s = ops.max(s_q, s_k)
causal_mask = g.full[DType.bool](1, List[Symbol](s, s))
causal_mask = tril(causal_mask)
causal_mask = causal_mask[-s_q:, -s_k:].reshape(1, 1, s_q, s_k)
attn_weight_shape = ops.shape_of(attn_weight)
min_val = g.op(
"mo.broadcast_to",
List[Symbol](
g.scalar(min_finite[DType.float32]()), attn_weight_shape
),
TensorType(
DType.float32,
Dim.dynamic(),
Dim.dynamic(),
Dim.dynamic(),
Dim.dynamic(),
),
)
causal_mask = g.op(
"mo.broadcast_to",
List[Symbol](causal_mask, attn_weight_shape),
TensorType(
causal_mask.tensor_type().dtype,
Dim.dynamic(),
Dim.dynamic(),
Dim.dynamic(),
Dim.dynamic(),
),
)
attn_weight = ops.select(causal_mask, attn_weight, min_val)
attn_weight = ops.softmax(attn_weight)
out = attn_weight @ v
out = ops.transpose(out, 1, 2)
out = out.reshape(batch_size, -1, self.hyperparams.d_model)
return self.out_proj(out), k_cache_update, v_cache_update
| max/examples/graph-api/pipelines/replit/layers/attention.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""The core Transformer block of the model."""
from collections import Optional
from max.graph import ops, Symbol, Graph
from ..layers.attention import GroupedQueryAttention
from ..layers.linear import Linear
from ..layers.norm import LPLayerNorm
from ..weights.replit_checkpoint import Checkpoint
from ..weights.hyperparams import HyperParams
@value
struct MPTMLP:
"""Multiplayer perceptron used in MPT."""
var up_proj: Linear
var down_proj: Linear
def __call__(self, input: Symbol) -> Symbol:
return self.down_proj(ops.gelu(self.up_proj(input)))
struct MPTBlock[T: Checkpoint, weights_type: DType]:
"""A block in the MosaicML Pretrained Transformer model."""
var norm_1: LPLayerNorm
var attn: GroupedQueryAttention
var norm_2: LPLayerNorm
var ffn: MPTMLP
def __init__(
inout self,
norm_1: LPLayerNorm,
attn: GroupedQueryAttention,
norm_2: LPLayerNorm,
ffn: MPTMLP,
):
self.norm_1 = norm_1
self.attn = attn
self.norm_2 = norm_2
self.ffn = ffn
def __call__(
self,
input: Symbol,
attn_bias: Optional[Symbol] = None,
k_cache: Optional[Symbol] = None,
v_cache: Optional[Symbol] = None,
) -> (Symbol, Symbol, Symbol):
a = self.norm_1(input)
b, k_cache_update, v_cache_update = self.attn(
a, attn_bias, k_cache, v_cache
)
# Rebind the attention output to the shape of the input to allow the
# `add` op to correctly set shapes.
b = ops.rebind(b, input.tensor_type().dims)
output = input + b
m = self.norm_2(output)
n = self.ffn(m)
return output + n, k_cache_update, v_cache_update
@staticmethod
def create(
params: T,
prefix: String,
g: Graph,
hyperparams: HyperParams,
) -> MPTBlock[T, weights_type]:
"""Build a MPT Block from the given params and string prefix.
Args:
params: Checkpoint class for getting the weights.
prefix: String prefix to add to the weight key.
g: Graph in which to create the weights.
hyperparams: Model hyperparameters.
Returns:
A new MPTBlock object.
"""
@parameter
def load_param(name: String) -> Symbol:
return ops.cast(
g.constant(params.get[weights_type](prefix + name)),
DType.float32,
)
norm_1 = LPLayerNorm(load_param("norm_1.weight"), hyperparams)
attn = GroupedQueryAttention(
hyperparams,
Linear(load_param("attn.Wqkv.weight")),
Linear(load_param("attn.out_proj.weight")),
)
norm_2 = LPLayerNorm(load_param("norm_2.weight"), hyperparams)
ffn = MPTMLP(
Linear(load_param("ffn.up_proj.weight")),
Linear(load_param("ffn.down_proj.weight")),
)
return MPTBlock[T, weights_type](norm_1, attn, norm_2, ffn)
| max/examples/graph-api/pipelines/replit/layers/block.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
from collections import Optional, List
from pathlib import Path
from max.graph import ops, Dim, TensorType, Symbol
@value
struct Embedding:
"""A layer that converts tokens into dense vectors."""
var weights: Symbol
def __call__(self, input: Symbol) -> Symbol:
return ops.gather(self.weights, input, axis=0)
@value
struct SharedEmbedding:
"""An embedding layer that can both embed and unembed inputs."""
var weights: Symbol
def __call__(self, input: Symbol, unembed: Bool = False) -> Symbol:
if unembed:
return input @ ops.transpose_matrix(self.weights)
return ops.gather(self.weights, input, axis=0)
| max/examples/graph-api/pipelines/replit/layers/embedding.mojo | false |
<filename>max/examples/graph-api/pipelines/replit/layers/linear.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
from max.graph import ops, Symbol
@value
struct Linear:
"""A fully connected layer."""
var weight: Symbol
def __call__(self, input: Symbol) -> Symbol:
return input @ ops.transpose_matrix(self.weight)
| max/examples/graph-api/pipelines/replit/layers/linear.mojo | false |
<filename>max/examples/graph-api/pipelines/replit/layers/norm.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
from max.graph import ops, TensorType, Symbol
from tensor import Tensor, TensorShape
from ..weights.hyperparams import HyperParams
@value
struct LPLayerNorm:
"""Low Precision Layer Normalization."""
alias eps: Float32 = 1e-05
var weight: Symbol
var hyperparams: HyperParams
def __call__(self, input: Symbol) -> Symbol:
g = input.graph()
beta = g.constant(
Tensor[DType.float32](TensorShape(self.hyperparams.d_model), 0)
)
out = ops.layer_norm(input, self.weight, beta, self.eps)
return out
| max/examples/graph-api/pipelines/replit/layers/norm.mojo | false |
<filename>max/examples/graph-api/pipelines/replit/layers/__init__.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
| max/examples/graph-api/pipelines/replit/layers/__init__.mojo | false |
<filename>max/examples/graph-api/pipelines/replit/model/replit.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
import math
from utils.numerics import min_finite
from collections import Optional, List
from max.graph import ops, Dim, TensorType, Symbol, Graph, Type
from tensor import Tensor, TensorShape
from ..layers.embedding import SharedEmbedding
from ..layers.block import MPTBlock
from ..layers.norm import LPLayerNorm
from ..weights.replit_checkpoint import Checkpoint
from ..weights.hyperparams import HyperParams
def gen_slopes(g: Graph, n_heads: Int32, alibi_bias_max: Int32 = 8) -> Symbol:
ceil = math.ceil(math.log2(n_heads.cast[DType.float32]()))
two_simd = Float32(2)
_n_heads = pow(two_simd, ceil).cast[DType.int32]()
m = ops.cast(g.range[DType.int32](1, _n_heads + 1, 1), DType.float32)
m = m * g.scalar(
alibi_bias_max.cast[DType.float32]() / _n_heads.cast[DType.float32]()
)
pow_ = ops.pow(g.scalar(Float32(2)), m)
slopes = ops.div(g.scalar(Float32(1)), pow_)
if _n_heads != n_heads:
# TODO: Update to slopes[1::2] and slopes[::2] when slicing is fixed.
slopes = ops.concat(
List[Symbol](
slopes[1 : int(_n_heads) : 2], slopes[0 : int(_n_heads) : 2]
)
)
slopes = slopes[: int(n_heads)]
return slopes.reshape(1, int(n_heads), 1, 1)
struct Replit[T: Checkpoint, weights_type: DType]:
"""Replit model implementation."""
var hyperparams: HyperParams
def __init__(inout self, hyperparams: HyperParams):
self.hyperparams = hyperparams
def create_empty_cache(
self,
) -> (Tensor[DType.float32], Tensor[DType.float32]):
head_dim = self.hyperparams.d_model // self.hyperparams.n_heads
return (
Tensor[DType.float32](
TensorShape(
self.hyperparams.num_blocks,
self.hyperparams.batch_size,
self.hyperparams.kv_n_heads,
head_dim,
0,
)
),
Tensor[DType.float32](
TensorShape(
self.hyperparams.num_blocks,
self.hyperparams.batch_size,
self.hyperparams.kv_n_heads,
0,
head_dim,
)
),
)
def _attn_bias(
self, g: Graph, attention_mask: Optional[Symbol] = None
) -> Symbol:
alibi_bias = ops.cast(
g.range[DType.int32](1 - self.hyperparams.seq_len, 1, 1),
DType.float32,
)
alibi_bias = alibi_bias.reshape(1, 1, 1, self.hyperparams.seq_len)
slopes = gen_slopes(
g, self.hyperparams.n_heads, self.hyperparams.alibi_bias_max
)
attn_bias = ops.cast(alibi_bias * slopes, DType.float32)
if attention_mask:
mask = attention_mask.value()[]
s_k = ops.shape_of(mask)[-1]
_s_k = ops.max(
g.scalar(Int32(0)), ops.shape_of(attn_bias)[-1] - s_k
)
attn_bias = attn_bias[:, :, :, _s_k:]
attn_bias_shape = ops.shape_of(attn_bias)
broadcast_dims = List[Dim](
Dim.dynamic(), Dim.dynamic(), Dim.dynamic(), Dim.dynamic()
)
mask = mask.reshape(-1, 1, 1, s_k)
mask = g.op(
"mo.broadcast_to",
List[Symbol](mask, attn_bias_shape),
TensorType(mask.tensor_type().dtype, broadcast_dims),
)
min_val = g.op(
"mo.broadcast_to",
List[Symbol](
g.scalar(min_finite[DType.float32]()), attn_bias_shape
),
TensorType(DType.float32, broadcast_dims),
)
attn_bias = ops.select(mask, attn_bias, min_val)
return attn_bias
def build_graph(
self,
name: String,
params: T,
with_attention_mask: Bool = False,
use_cache: Bool = False,
) -> Graph:
"""Builds the replit model graph.
The graph takes encoded tokens as input and outputs the predicted
logits.
Args:
name: Name of the graph.
params: Checkpoint class that loads parameter values.
with_attention_mask: Whether to build the graph with an attention
mask input.
use_cache: Whether to build the graph with a key and value cache.
When this is true, the updated cache values are included in the
graph outputs.
Returns:
Replit Graph.
"""
# Set up graph and inputs.
input_type = TensorType(
DType.int64, self.hyperparams.batch_size, Dim.dynamic()
)
in_types = List[Type](input_type)
mask_input_idx = -1
cache_input_idx = -1
if with_attention_mask:
attention_mask_type = TensorType(
DType.bool, self.hyperparams.batch_size, Dim.dynamic()
)
in_types.append(attention_mask_type)
mask_input_idx = 1
cache_input_idx = 2
else:
cache_input_idx = 1
if use_cache:
head_dim = self.hyperparams.d_model // self.hyperparams.n_heads
k_cache_type = TensorType(
DType.float32,
self.hyperparams.num_blocks,
self.hyperparams.batch_size,
self.hyperparams.kv_n_heads,
head_dim,
Dim.dynamic(),
)
v_cache_type = TensorType(
DType.float32,
self.hyperparams.num_blocks,
self.hyperparams.batch_size,
self.hyperparams.kv_n_heads,
Dim.dynamic(),
head_dim,
)
in_types.append(k_cache_type)
in_types.append(v_cache_type)
g = Graph(
name,
in_types=in_types,
)
@parameter
def weight(name: String) -> Symbol:
return ops.cast(
g.constant(params.get[weights_type](name)), DType.float32
)
wte = SharedEmbedding(weight("transformer.wte.weight"))
x = wte(g[0])
if with_attention_mask:
attn_bias = self._attn_bias(g, g[1])
else:
attn_bias = self._attn_bias(g)
# Run through the transformer blocks. If the key and values are cached,
# store the updated values which will later be concatenated and returned
# as outputs.
k_cache_updates = List[Symbol]()
v_cache_updates = List[Symbol]()
for i in range(self.hyperparams.num_blocks):
block_prefix = "transformer.blocks." + str(i) + "."
block = MPTBlock[T, weights_type].create(
params, block_prefix, g, self.hyperparams
)
if use_cache:
k_cache = g[cache_input_idx][i]
v_cache = g[cache_input_idx + 1][i]
x, k_cache_update, v_cache_update = block(
x, attn_bias, k_cache, v_cache
)
k_cache_updates.append(k_cache_update)
v_cache_updates.append(v_cache_update)
else:
x = block(x, attn_bias)[0]
norm_f = LPLayerNorm(
weight("transformer.norm_f.weight"), self.hyperparams
)
x = norm_f(x)
# Generate output tokens using the same SharedEmbedding layer created
# previously.
x = wte(x, True)
if use_cache:
k_cache = ops.concat(k_cache_updates)
v_cache = ops.concat(v_cache_updates)
g.output(List[Symbol](x, k_cache, v_cache))
else:
g.output(x)
return g
| max/examples/graph-api/pipelines/replit/model/replit.mojo | false |