content
stringlengths 32
91.6k
| path
stringlengths 14
91
| fimified
bool 2
classes |
---|---|---|
<filename>max/examples/graph-api/pipelines/replit/model/__init__.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
| max/examples/graph-api/pipelines/replit/model/__init__.mojo | false |
<filename>max/examples/graph-api/pipelines/replit/weights/hyperparams.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
@value
struct HyperParams:
var batch_size: Int
var seq_len: Int
var n_heads: Int
var causal: Bool
var alibi: Bool
var alibi_bias_max: Int
var num_blocks: Int
var vocab_size: Int
var d_model: Int
var kv_n_heads: Int
def get_default() -> HyperParams:
return HyperParams(
batch_size=1,
seq_len=4096,
n_heads=24,
causal=True,
alibi=True,
alibi_bias_max=8,
num_blocks=32,
vocab_size=32768,
d_model=3072,
kv_n_heads=8,
)
| max/examples/graph-api/pipelines/replit/weights/hyperparams.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
from pathlib import Path
from tensor import Tensor
trait Checkpoint(Movable):
def __init__(inout self, path: Path):
"""Initializes the weights file from a path.
Args:
path: Filepath to the model's weights file.
"""
...
def get[type: DType](self, key: String) -> Tensor[type]:
"""Returns a tensor for `key` at layer `layer_idx`, possibly seeking the file.
`self` is `inout` here due to implementations that seek a file pointer.
Args:
key: Used to look up the tensor in the weights file.
Returns:
A tensor corresponding to `key` and `layer_idx` and containing a
copy of data from the weights file.
Raises:
An error for invalid key arguments.
"""
...
struct ReplitCheckpoint(Checkpoint):
"""Reads from a directory containing serialized Mojo Tensors."""
var model_path: Path
def __init__(inout self, path: Path):
"""Initializes the weights from a path.
Args:
path: Path to model weights directory.
"""
self.model_path = path
fn __moveinit__(inout self, owned existing: Self):
self.model_path = existing.model_path
def get[type: DType](self, key: String) -> Tensor[type]:
"""Returns a tensor for `key` at layer `layer_idx`, possibly seeking the file.
`self` is `inout` here due to implementations that seek a file pointer.
Args:
key: Used to look up the tensor in the weights file.
Returns:
A tensor corresponding to `key` and `layer_idx` and containing a
copy of data from the weights file.
Raises:
An error for invalid key arguments.
"""
path = self.model_path / key
if not path.exists():
raise "Could not load checkpoint tensor value. " + str(
path
) + " does not exist."
return Tensor[type].load(path)
| max/examples/graph-api/pipelines/replit/weights/replit_checkpoint.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
| max/examples/graph-api/pipelines/replit/weights/__init__.mojo | false |
<filename>mo-time/test.mojo
from mo_time import Instant, DateTimeLocal, Duration
from testing import assert_equal
fn main():
let instant = Instant.now()
let dt_instant = DateTimeLocal.from_instant(instant)
print("dt_instant: ", dt_instant.__str__())
let dt_utc_instant = DateTimeLocal.from_instant_utc(instant)
print("dt_utc_instant: ", dt_utc_instant.__str__())
let dt_utc = DateTimeLocal.now_utc()
print("dt_utc: ", dt_utc.__str__())
try:
let pydt = dt_utc.to_py()
print("Python datetime: ", pydt.to_string())
let mojodt = DateTimeLocal.from_py(pydt)
print("Mojo datetime: ", mojodt.__str__())
except e:
print("Exception: ", e)
let dt_local = DateTimeLocal.now()
print("dt_local: ", dt_local.__str__())
let duration = Duration(0, 0, 0, 365, 0, 0)
let dt2 = DateTimeLocal(2023, 9, 14, 0, 0, 0)
_ = assert_equal(dt2.__str__(), "2023-09-14T00:00:00")
let target_dt_str = "2024-09-14T00:00:00"
_ = assert_equal(dt2.plus_years(1).__str__(), target_dt_str)
_ = assert_equal(dt2.plus_months(12).__str__(), target_dt_str)
_ = assert_equal(dt2.plus_days(366).__str__(), target_dt_str)
_ = assert_equal(dt2.plus_hours(8784).__str__(), target_dt_str)
_ = assert_equal(dt2.plus_minutes(527040).__str__(), target_dt_str)
_ = assert_equal(dt2.plus_seconds(31622400).__str__(), target_dt_str)
| mo-time/test.mojo | false |
from memory.unsafe import Pointer
alias _CLOCK_REALTIME = 0
@value
@register_passable("trivial")
struct _CTimeSpec:
var tv_sec: Int # Seconds
var tv_nsec: Int # NanoSeconds
fn __init__() -> Self:
return Self {tv_sec: 0, tv_nsec: 0}
fn as_nanoseconds(self) -> Int:
return self.tv_sec * 1_000_000_000 + self.tv_nsec
@always_inline
fn clock_gettime() -> _CTimeSpec:
"""Low-level call to the clock_gettime libc function."""
var ts = _CTimeSpec()
let ts_pointer = Pointer[_CTimeSpec].address_of(ts)
let clockid_si32: Int32 = _CLOCK_REALTIME
external_call["clock_gettime", NoneType, Int32, Pointer[_CTimeSpec]](
clockid_si32, ts_pointer
)
return ts
@value
@register_passable("trivial")
struct C_tm:
var tm_sec: Int32
var tm_min: Int32
var tm_hour: Int32
var tm_mday: Int32
var tm_mon: Int32
var tm_year: Int32
var tm_wday: Int32
var tm_yday: Int32
var tm_isdst: Int32
fn __init__() -> Self:
return Self {
tm_sec: 0,
tm_min: 0,
tm_hour: 0,
tm_mday: 0,
tm_mon: 0,
tm_year: 0,
tm_wday: 0,
tm_yday: 0,
tm_isdst: 0,
}
@always_inline
fn ts_to_utc_tm(owned ts: _CTimeSpec) -> C_tm:
let ts_pointer = Pointer[Int].address_of(ts.tv_sec)
# Call libc's clock_gettime.
let tm = external_call["gmtime", Pointer[C_tm], Pointer[Int]](ts_pointer).load()
return tm
@always_inline
fn ts_to_local_tm(owned ts: _CTimeSpec) -> C_tm:
let ts_pointer = Pointer[Int].address_of(ts.tv_sec)
# Call libc's clock_gettime.
let tm = external_call["localtime", Pointer[C_tm], Pointer[Int]](ts_pointer).load()
return tm
| mo-time/mo_time/ctypes.mojo | false |
from mo_time.ctypes import ts_to_local_tm, ts_to_utc_tm, _CTimeSpec, C_tm
from mo_time.duration import Duration, days_in_month
from python.object import PythonObject
from python import Python
@value
struct Date:
var year: Int32
var month: Int32
var day: Int32
fn to_datetimelocal(self, time: Time) -> DateTimeLocal:
return DateTimeLocal(
self.year,
self.month,
self.day,
time.hour,
time.minute,
time.second,
)
fn to_datetimelocal(self) -> DateTimeLocal:
return DateTimeLocal(self.year, self.month, self.day, 0, 0, 0)
@value
struct Time:
var hour: Int32
var minute: Int32
var second: Int32
fn to_datetimelocal(self, date: Date) -> DateTimeLocal:
return DateTimeLocal(
date.year,
date.month,
date.day,
self.hour,
self.minute,
self.second,
)
@value
struct DateTimeLocal:
var year: Int32
var month: Int32
var day: Int32
var hour: Int32
var minute: Int32
var second: Int32
@staticmethod
fn from_instant_utc(instant: Instant) -> Self:
let ts = _CTimeSpec(instant.seconds, instant.nanos)
let tm = ts_to_utc_tm(ts)
return DateTimeLocal._from_tm(tm)
@staticmethod
fn from_instant(instant: Instant) -> Self:
let ts = _CTimeSpec(instant.seconds, instant.nanos)
let tm = ts_to_local_tm(ts)
return DateTimeLocal._from_tm(tm)
@staticmethod
fn _from_tm(tm: C_tm) -> Self:
return DateTimeLocal(
tm.tm_year + 1900,
tm.tm_mon + 1,
tm.tm_mday,
tm.tm_hour,
tm.tm_min,
tm.tm_sec,
)
@staticmethod
fn now_utc() -> Self:
return DateTimeLocal.from_instant_utc(Instant.now())
@staticmethod
fn now() -> Self:
return DateTimeLocal.from_instant(Instant.now())
@staticmethod
fn from_py(py_datetime: PythonObject) raises -> Self:
return DateTimeLocal(
Int32(py_datetime.year.to_float64().to_int()),
Int32(py_datetime.month.to_float64().to_int()),
Int32(py_datetime.day.to_float64().to_int()),
Int32(py_datetime.hour.to_float64().to_int()),
Int32(py_datetime.minute.to_float64().to_int()),
Int32(py_datetime.second.to_float64().to_int()),
)
fn to_py(self) raises -> PythonObject:
let dateimte = Python.import_module("datetime")
return dateimte.datetime(
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
)
# type conversions
fn to_date(self) -> Date:
return Date(
self.year,
self.month,
self.day,
)
fn to_time(self) -> Time:
return Time(
self.hour,
self.minute,
self.second,
)
# arithmetic
fn plus_years(self, years: Int32) -> Self:
return DateTimeLocal(
self.year + years,
self.month,
self.day,
self.hour,
self.minute,
self.second,
)
fn plus_months(self, months: Int32) -> Self:
let new_year = self.year + (months / 12)
let new_month = ((self.month - 1 + months) % 12) + 1
return DateTimeLocal(
new_year,
new_month,
self.day,
self.hour,
self.minute,
self.second,
)
fn plus_days(self, days: Int32) -> Self:
var new_day = days + self.day
var new_month = self.month
var new_year = self.year
var days_in_current_month = days_in_month(
new_year.__int__(), new_month.__int__()
)
while new_day > days_in_current_month:
new_day -= days_in_current_month
new_year += new_month // 12
new_month = (new_month % 12) + 1
days_in_current_month = days_in_month(
new_year.__int__(), new_month.__int__()
)
return DateTimeLocal(
new_year,
new_month,
new_day,
self.hour,
self.minute,
self.second,
)
fn plus_hours(self, hours: Int32) -> Self:
let new_hour = (self.hour + hours) % 24
let overflow_days = hours / 24
return DateTimeLocal(
self.year,
self.month,
self.day,
new_hour,
self.minute,
self.second,
).plus_days(overflow_days)
fn plus_minutes(self, minutes: Int32) -> Self:
let new_minute = (self.minute + minutes) % 60
let overflow_hours = minutes / 60
return DateTimeLocal(
self.year,
self.month,
self.day,
self.hour,
new_minute,
self.second,
).plus_hours(overflow_hours)
fn plus_seconds(self, seconds: Int32) -> Self:
let new_second = (self.second + seconds) % 60
let overflow_minutes = seconds / 60
return DateTimeLocal(
self.year,
self.month,
self.day,
self.hour,
self.minute,
new_second,
).plus_minutes(overflow_minutes)
fn __str__(self) -> String:
# TODO use strftime
return (
String(self.year.to_int())
+ "-"
+ ("0" if self.month < 10 else "")
+ String(self.month.to_int())
+ "-"
+ ("0" if self.day < 10 else "")
+ String(self.day.to_int())
+ "T"
+ ("0" if self.hour < 10 else "")
+ String(self.hour.to_int())
+ ":"
+ ("0" if self.minute < 10 else "")
+ String(self.minute.to_int())
+ ":"
+ ("0" if self.second < 10 else "")
+ String(self.second.to_int())
)
fn __repr__(self) -> String:
return self.__str__()
| mo-time/mo_time/date_time.mojo | false |
<filename>mo-time/mo_time/duration.mojo
from utils.list import VariadicList
alias _DAYS_IN_MONTH = VariadicList[Int](31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
@always_inline
fn is_leap_year(year: Int) -> Bool:
return (year % 4 == 0) and ((year % 100 != 0) or (year % 400 == 0))
fn days_in_month(year: Int, month: Int) -> Int:
let should_add_leap_day = month == 2 and is_leap_year(year)
let leap_day_addition = UInt8(should_add_leap_day).to_int()
return _DAYS_IN_MONTH[month - 1] + leap_day_addition
@value
struct Duration:
var seconds: Int32
var minutes: Int32
var hours: Int32
var days: Int32
var months: Int32
var years: Int32
| mo-time/mo_time/duration.mojo | false |
from mo_time.ctypes import clock_gettime
@value
struct Instant:
"""Seconds since epoch."""
var seconds: Int
"""Nanos since second."""
var nanos: Int
fn __init__(inout self):
self.seconds = 0
self.nanos = 0
@staticmethod
fn now() -> Self:
let ts = clock_gettime()
return Instant(ts.tv_sec, ts.tv_nsec)
| mo-time/mo_time/instant.mojo | false |
from mo_time.instant import Instant
from mo_time.date_time import DateTimeLocal
from mo_time.duration import Duration
alias VERSION = "0.1.2"
| mo-time/mo_time/__init__.mojo | false |
<filename>mogeo/mogeo/__init__.mojo
"""
MoGeo: Mojo Geospatial/Geometric Package
"""
| mogeo/mogeo/__init__.mojo | false |
struct FeatureCollection:
pass
| mogeo/mogeo/geog/feature_collection.mojo | false |
from math import nan, isnan
from math.limit import max_finite
@always_inline
fn empty_value[dtype: DType]() -> SIMD[dtype, 1]:
"""
Define a special value to mark empty slots or dimensions in structs. Required because SIMD must be power of two.
"""
@parameter
if dtype.is_floating_point():
return nan[dtype]()
else:
return max_finite[dtype]()
@always_inline
fn is_empty[dtype: DType, simd_width: Int](value: SIMD[dtype, simd_width]) -> Bool:
"""
Check for empty value. Note: NaN cannot be compared by equality. This helper function calls isnan() if the dtype
is floating point.
"""
@parameter
if dtype.is_floating_point():
return isnan[dtype, simd_width](value)
else:
return value == max_finite[dtype]()
| mogeo/mogeo/geom/empty.mojo | false |
<filename>mogeo/mogeo/geom/enums.mojo
@value
@register_passable("trivial")
struct CoordDims(Stringable, Sized):
"""
Enum for encoding the OGC/WKT variants of Points.
"""
# TODO: use a real enum here, when mojo supports.
var value: SIMD[DType.uint8, 1]
alias Point = CoordDims(100)
"""
2 dimensional Point.
"""
alias PointZ = CoordDims(101)
"""
3 dimensional Point, has height or altitude (Z).
"""
alias PointM = CoordDims(102)
"""
3 dimensional Point, has measure (M).
"""
alias PointZM = CoordDims(103)
"""
4 dimensional Point, has height and measure (ZM)
"""
alias PointND = CoordDims(104)
"""
N-dimensional Point, number of dimensions from constructor.
"""
fn __eq__(self, other: Self) -> Bool:
return self.value == other.value
fn __ne__(self, other: Self) -> Bool:
return not self.__eq__(other)
fn __str__(self) -> String:
"""
Convert to string, using WKT point variants.
"""
if self == CoordDims.Point:
return "Point"
elif self == CoordDims.PointZ:
return "Point Z"
elif self == CoordDims.PointM:
return "Point M"
elif self == CoordDims.PointZM:
return "Point ZM"
else:
return "Point ND"
fn __len__(self) -> Int:
if self == CoordDims.Point:
return 2
elif self == CoordDims.PointM or self == CoordDims.PointZ:
return 3
elif self == CoordDims.PointZM:
return 4
else:
return self.value.to_int()
fn has_height(self) -> Bool:
return (self == CoordDims.PointZ) or (self == CoordDims.PointZM)
fn has_measure(self) -> Bool:
return (self == CoordDims.PointM) or (self == CoordDims.PointZM)
| mogeo/mogeo/geom/enums.mojo | false |
<filename>mogeo/mogeo/geom/envelope.mojo
from utils.index import Index
from math.limit import inf, neginf, max_finite, min_finite
from sys.info import simdwidthof, simdbitwidth
from algorithm import vectorize
from algorithm.functional import parallelize
import math
from tensor import Tensor
from mogeo.geom.empty import empty_value, is_empty
from mogeo.geom.point import Point
from mogeo.geom.enums import CoordDims
from mogeo.geom.layout import Layout
from mogeo.geom.traits import Geometric, Emptyable
from mogeo.serialization.traits import JSONable, WKTable, Geoarrowable
from mogeo.serialization import (
WKTParser,
JSONParser,
)
@value
@register_passable("trivial")
struct Envelope[dtype: DType](
CollectionElement,
Emptyable,
Geometric,
# JSONable,
Sized,
Stringable,
# WKTable,
):
"""
Envelope aka Bounding Box.
> "The value of the bbox member must be an array of length 2*n where n is the number of dimensions represented in
the contained geometries, with all axes of the most southwesterly point followed by all axes of the more
northeasterly point." GeoJSON spec https://datatracker.ietf.org/doc/html/rfc7946
"""
alias point_simd_dims = 4
alias envelope_simd_dims = 8
alias PointCoordsT = SIMD[dtype, Self.point_simd_dims]
alias PointT = Point[dtype]
alias x_index = 0
alias y_index = 1
alias z_index = 2
alias m_index = 3
var coords: SIMD[dtype, Self.envelope_simd_dims]
var ogc_dims: CoordDims
fn __init__(point: Point[dtype]) -> Self:
"""
Construct Envelope of Point.
"""
var coords = SIMD[dtype, Self.envelope_simd_dims]()
@unroll
for i in range(Self.point_simd_dims):
coords[i] = point.coords[i]
coords[i + Self.point_simd_dims] = point.coords[i]
return Self {coords: coords, ogc_dims: point.ogc_dims}
# fn __init__(line_string: LineString[simd_dims, dtype]) -> Self:
# """
# Construct Envelope of LineString.
# """
# return Self(line_string.data)
fn __init__(data: Layout[dtype=dtype]) -> Self:
"""
Construct Envelope from memory Layout.
"""
alias nelts = simdbitwidth()
alias n = Self.envelope_simd_dims
var coords = SIMD[dtype, Self.envelope_simd_dims]()
# fill initial values of with inf/neginf at each position in the 2*n array
@unroll
for d in range(n): # dims 1:4
coords[d] = max_finite[
dtype
]() # min (southwest) values, start from max finite.
@unroll
for d in range(Self.point_simd_dims, n): # dims 5:8
coords[d] = min_finite[
dtype
]() # max (northeast) values, start from min finite
let num_features = data.coordinates.shape()[1]
# vectorized load and min/max calculation for each of the dims
@unroll
for dim in range(Self.point_simd_dims):
@parameter
fn min_max_simd[simd_width: Int](feature_idx: Int):
let index = Index(dim, feature_idx)
let values = data.coordinates.simd_load[simd_width](index)
let min = values.reduce_min()
if min < coords[dim]:
coords[dim] = min
let max = values.reduce_max()
if max > coords[Self.point_simd_dims + dim]:
coords[Self.point_simd_dims + dim] = max
vectorize[nelts, min_max_simd](num_features)
return Self {coords: coords, ogc_dims: data.ogc_dims}
@staticmethod
fn empty(ogc_dims: CoordDims = CoordDims.Point) -> Self:
let coords = SIMD[dtype, Self.envelope_simd_dims](empty_value[dtype]())
return Self {coords: coords, ogc_dims: ogc_dims}
fn __eq__(self, other: Self) -> Bool:
# NaN is used as empty value, so here cannot simply compare with __eq__ on the SIMD values.
@unroll
for i in range(Self.envelope_simd_dims):
if is_empty(self.coords[i]) and is_empty(other.coords[i]):
pass # equality at index i
else:
if is_empty(self.coords[i]) or is_empty(other.coords[i]):
return False # early out: one or the other is empty (but not both) -> not equal
if self.coords[i] != other.coords[i]:
return False # not equal
return True # equal
fn __ne__(self, other: Self) -> Bool:
return not self == other
fn __repr__(self) -> String:
var res = "Envelope [" + dtype.__str__() + "]("
for i in range(Self.envelope_simd_dims):
res += str(self.coords[i])
if i < Self.envelope_simd_dims - 1:
res += ", "
res += ")"
return res
fn __len__(self) -> Int:
return self.dims()
fn __str__(self) -> String:
return self.__repr__()
#
# Getters
#
fn southwesterly_point(self) -> Self.PointT:
alias offset = 0
return Self.PointT(self.coords.slice[Self.point_simd_dims](offset))
fn northeasterly_point(self) -> Self.PointT:
alias offset = Self.point_simd_dims
return Self.PointT(self.coords.slice[Self.point_simd_dims](offset))
@always_inline
fn min_x(self) -> SIMD[dtype, 1]:
let i = self.x_index
return self.coords[i]
@always_inline
fn max_x(self) -> SIMD[dtype, 1]:
let i = Self.point_simd_dims + self.x_index
return self.coords[i]
@always_inline
fn min_y(self) -> SIMD[dtype, 1]:
alias i = self.y_index
return self.coords[i]
@always_inline
fn max_y(self) -> SIMD[dtype, 1]:
alias i = Self.point_simd_dims + Self.y_index
return self.coords[i]
@always_inline
fn min_z(self) -> SIMD[dtype, 1]:
alias i = Self.z_index
return self.coords[i]
@always_inline
fn max_z(self) -> SIMD[dtype, 1]:
alias i = Self.point_simd_dims + Self.z_index
return self.coords[i]
@always_inline
fn min_m(self) -> SIMD[dtype, 1]:
let i = self.m_index
return self.coords[i]
@always_inline
fn max_m(self) -> SIMD[dtype, 1]:
let i = Self.point_simd_dims + Self.m_index
return self.coords[i]
fn dims(self) -> Int:
return len(self.ogc_dims)
fn set_ogc_dims(inout self, ogc_dims: CoordDims):
"""
Setter for ogc_dims enum. May be only be useful if the Point constructor with variadic list of coordinate values.
(ex: when Point Z vs Point M is ambiguous.
"""
debug_assert(
len(self.ogc_dims) == 3 and len(ogc_dims) == 3,
"Unsafe change of dimension number",
)
self.ogc_dims = ogc_dims
fn has_height(self) -> Bool:
return (self.ogc_dims == CoordDims.PointZ) or (
self.ogc_dims == CoordDims.PointZM
)
fn has_measure(self) -> Bool:
return (self.ogc_dims == CoordDims.PointM) or (
self.ogc_dims == CoordDims.PointZM
)
fn is_empty(self) -> Bool:
return is_empty[dtype](self.coords)
fn envelope[dtype: DType = dtype](self) -> Self:
"""
Geometric trait.
"""
return self
fn wkt(self) -> String:
"""
TODO: wkt.
POLYGON ((xmin ymin, xmax ymin, xmax ymax, xmin ymax, xmin ymin)).
"""
return "POLYGON ((xmin ymin, xmax ymin, xmax ymax, xmin ymax, xmin ymin))"
| mogeo/mogeo/geom/envelope.mojo | false |
struct GeometryCollection:
pass
| mogeo/mogeo/geom/geometry_collection.mojo | false |
from math.limit import max_finite
from tensor import Tensor
from .traits import Dimensionable
from .enums import CoordDims
@value
struct Layout[dtype: DType = DType.float64, offset_dtype: DType = DType.uint32](
Sized, Dimensionable
):
"""
Memory layout inspired by, but not exactly following, the GeoArrow format.
### Spec
https://geoarrow.org
"""
alias dimensions_idx = 0
alias features_idx = 1
var coordinates: Tensor[dtype]
var geometry_offsets: Tensor[offset_dtype]
var part_offsets: Tensor[offset_dtype]
var ring_offsets: Tensor[offset_dtype]
var ogc_dims: CoordDims
fn __init__(
inout self,
ogc_dims: CoordDims = CoordDims.Point,
coords_size: Int = 0,
geoms_size: Int = 0,
parts_size: Int = 0,
rings_size: Int = 0,
):
"""
Create column-oriented tensor: rows (dims) x cols (coords), plus offsets vectors.
"""
if max_finite[offset_dtype]() < coords_size:
print(
"Warning: offset_dtype parameter not large enough for coords_size arg.",
offset_dtype,
coords_size,
)
self.ogc_dims = ogc_dims
self.coordinates = Tensor[dtype](len(ogc_dims), coords_size)
self.geometry_offsets = Tensor[offset_dtype](geoms_size)
self.part_offsets = Tensor[offset_dtype](parts_size)
self.ring_offsets = Tensor[offset_dtype](rings_size)
fn __eq__(self, other: Self) -> Bool:
"""
Check equality of coordinates and offsets vs other.
"""
if (
self.coordinates == other.coordinates
and self.geometry_offsets == other.geometry_offsets
and self.part_offsets == other.part_offsets
and self.ring_offsets == other.ring_offsets
):
return True
return False
fn __ne__(self, other: Self) -> Bool:
"""
Check in-equality of coordinates and offsets vs other.
"""
return not self == other
fn __len__(self) -> Int:
"""
Length is the number of coordinates (constructor's `coords_size` argument)
"""
return self.coordinates.shape()[self.features_idx]
fn dims(self) -> Int:
"""
Num dimensions (X, Y, Z, M, etc). (constructor's `dims` argument).
"""
return self.coordinates.shape()[self.dimensions_idx]
fn has_height(self) -> Bool:
return self.ogc_dims == CoordDims.PointZ or self.ogc_dims == CoordDims.PointZM
fn has_measure(self) -> Bool:
return self.ogc_dims == CoordDims.PointM or self.ogc_dims == CoordDims.PointZM
fn set_ogc_dims(inout self, ogc_dims: CoordDims):
"""
Setter for ogc_dims enum. May be only be useful if the Point constructor with variadic list of coordinate values.
(ex: when Point Z vs Point M is ambiguous.
"""
debug_assert(
len(self.ogc_dims) == len(ogc_dims), "Unsafe change of dimension number"
)
self.ogc_dims = ogc_dims
| mogeo/mogeo/geom/layout.mojo | false |
<filename>mogeo/mogeo/geom/line_string.mojo
from tensor import Tensor, TensorSpec, TensorShape
from utils.index import Index
from utils.vector import DynamicVector
from memory import memcmp
from python import Python
from mogeo.serialization import WKTParser, JSONParser
from mogeo.geom.point import Point
from mogeo.geom.layout import Layout
from mogeo.geom.enums import CoordDims
from mogeo.geom.empty import is_empty, empty_value
from mogeo.geom.traits import Geometric, Emptyable
from mogeo.serialization.traits import WKTable, JSONable, Geoarrowable
from mogeo.serialization import (
WKTParser,
JSONParser,
)
@value
struct LineString[dtype: DType = DType.float64](
CollectionElement,
Emptyable,
# Geoarrowable,
Geometric,
JSONable,
Sized,
Stringable,
# WKTable,
):
"""
Models an OGC-style LineString.
A LineString consists of a sequence of two or more vertices along with all points along the linearly-interpolated
curves (line segments) between each pair of consecutive vertices. Consecutive vertices may be equal.
The line segments in the line may intersect each other (in other words, the linestring may "curl back" in itself and
self-intersect).
- Linestrings with exactly two identical points are invalid.
- Linestrings must have either 0 or 2 or more points.
- If these conditions are not met, the constructors raise an Error.
"""
var data: Layout[dtype]
fn __init__(inout self):
"""
Construct empty LineString.
"""
self.data = Layout[dtype]()
fn __init(inout self, data: Layout[dtype]):
self.data = data
fn __init__(inout self, *points: Point[dtype]):
"""
Construct `LineString` from variadic list of `Point`.
"""
debug_assert(len(points) > 0, "unreachable")
let n = len(points)
if n == 0:
# empty linestring
self.data = Layout[dtype]()
return
let sample_pt = points[0]
let dims = len(sample_pt)
self.data = Layout[dtype](coords_size=n)
for y in range(dims):
for x in range(len(points)):
self.data.coordinates[Index(y, x)] = points[x].coords[y]
fn __init__(inout self, points: DynamicVector[Point[dtype]]):
"""
Construct `LineString` from a vector of `Points`.
"""
# here the geometry_offsets, part_offsets, and ring_offsets are unused because
# of using "struct coordinate representation" (tensor)
let n = len(points)
if n == 0:
# empty linestring
self.data = Layout[dtype]()
return
let sample_pt = points[0]
let dims = len(sample_pt)
self.data = Layout[dtype](coords_size=n)
for y in range(dims):
for x in range(len(points)):
self.data.coordinates[Index(y, x)] = points[x].coords[y]
@staticmethod
fn empty(dims: CoordDims = CoordDims.Point) -> Self:
return Self()
fn __len__(self) -> Int:
"""
Return the number of Point elements.
"""
return self.data.coordinates.shape()[1]
fn dims(self) -> Int:
return len(self.data.ogc_dims)
fn __eq__(self, other: Self) -> Bool:
return self.data == other.data
fn __ne__(self, other: Self) -> Bool:
return not self.__eq__(other)
fn __repr__(self) -> String:
return (
"LineString ["
+ str(self.data.ogc_dims)
+ ", "
+ dtype.__str__()
+ "]("
+ String(len(self))
+ " points)"
)
fn __getitem__(self: Self, feature_index: Int) -> Point[dtype]:
"""
Get Point from LineString at index.
"""
var result = Point[dtype]()
for i in range(self.dims()):
result.coords[i] = self.data.coordinates[Index(i, feature_index)]
return result
fn has_height(self) -> Bool:
return self.data.has_height()
fn has_measure(self) -> Bool:
return self.data.has_measure()
fn set_ogc_dims(inout self, ogc_dims: CoordDims):
"""
Setter for ogc_dims enum. May be only be useful if the Point constructor with variadic list of coordinate values.
(ex: when Point Z vs Point M is ambiguous.
"""
debug_assert(
len(self.data.ogc_dims) == len(ogc_dims),
"Unsafe change of dimension number",
)
self.data.set_ogc_dims(ogc_dims)
fn is_valid(self, inout err: String) -> Bool:
"""
Validate geometry. When False, sets the `err` string with a condition.
- Linestrings with exactly two identical points are invalid.
- Linestrings must have either 0 or 2 or more points.
- LineStrings must not be closed: try LinearRing.
"""
if self.is_empty():
return True
let n = len(self)
if n == 2 and self[0] == self[1]:
err = "LineStrings with exactly two identical points are invalid."
return False
if n == 1:
err = "LineStrings must have either 0 or 2 or more points."
return False
if self.is_closed():
err = "LineStrings must not be closed: try LinearRing."
return True
@staticmethod
fn from_json(json_dict: PythonObject) raises -> Self:
"""
Construct `MultiPoint` from GeoJSON Python dictionary.
"""
var json_coords = json_dict.get("coordinates", Python.none())
if not json_coords:
raise Error("LineString.from_json(): coordinates property missing in dict.")
var points = DynamicVector[Point[dtype]]()
for coords in json_coords:
let lon = coords[0].to_float64().cast[dtype]()
let lat = coords[1].to_float64().cast[dtype]()
let pt = Point[dtype](lon, lat)
points.push_back(pt)
return LineString[dtype](points)
@staticmethod
fn from_json(json_str: String) raises -> Self:
"""
Construct `LineString` from GeoJSON serialized string.
"""
let json_dict = JSONParser.parse(json_str)
return Self.from_json(json_dict)
fn __str__(self) -> String:
return self.__repr__()
fn json(self) raises -> String:
"""
Serialize `LineString` to GeoJSON. Coordinates of LineString are an array of positions.
### Spec
- https://geojson.org
- https://datatracker.ietf.org/doc/html/rfc7946
```json
{
"type": "LineString",
"coordinates": [
[100.0, 0.0],
[101.0, 1.0]
]
}
```
"""
if self.data.ogc_dims.value > CoordDims.PointZ.value:
raise Error(
"GeoJSON only allows dimensions X, Y, and optionally Z (RFC 7946)"
)
let dims = self.dims()
let n = len(self)
var res = String('{"type":"LineString","coordinates":[')
for i in range(n):
let pt = self[i]
res += "["
for dim in range(3):
if dim > dims - 1:
break
res += pt[dim]
if dim < dims - 1:
res += ","
res += "]"
if i < n - 1:
res += ","
res += "]}"
return res
fn wkt(self) -> String:
if self.is_empty():
return "LINESTRING EMPTY"
let dims = self.dims()
var res = String("LINESTRING(")
let n = len(self)
for i in range(n):
let pt = self[i]
for j in range(dims):
res += pt.coords[j]
if j < dims - 1:
res += " "
if i < n - 1:
res += ", "
res += ")"
return res
fn is_closed(self) -> Bool:
"""
If LineString is closed (0 and n-1 points are equal), it's not valid: a LinearRing should be used instead.
"""
let n = len(self)
if n == 1:
return False
let start_pt = self[0]
let end_pt = self[n - 1]
return start_pt == end_pt
fn is_empty(self) -> Bool:
return len(self) == 0
| mogeo/mogeo/geom/line_string.mojo | false |
struct MultiLineString:
pass
| mogeo/mogeo/geom/multi_line_string.mojo | false |
from tensor import Tensor, TensorSpec, TensorShape
from utils.index import Index
from utils.vector import DynamicVector
from memory import memcmp
from python import Python
from mogeo.serialization import WKTParser, JSONParser
from mogeo.geom.layout import Layout
from mogeo.geom.empty import is_empty, empty_value
from mogeo.geom.traits import Geometric, Emptyable
from mogeo.geom.point import Point
from mogeo.geom.enums import CoordDims
from mogeo.serialization.traits import WKTable, JSONable, Geoarrowable
from mogeo.serialization import (
WKTParser,
JSONParser,
)
@value
struct MultiPoint[dtype: DType = DType.float64](
CollectionElement,
Emptyable,
Geoarrowable,
Geometric,
JSONable,
Sized,
Stringable,
WKTable,
):
"""
Models an OGC-style MultiPoint. Any collection of Points is a valid MultiPoint,
except [heterogeneous dimension multipoints](https://geoarrow.org/format) which are unsupported.
"""
var data: Layout[dtype]
fn __init__(inout self):
"""
Construct empty MultiPoint.
"""
self.data = Layout[dtype]()
fn __init(inout self, data: Layout[dtype]):
self.data = data
fn __init__(inout self, *points: Point[dtype]):
"""
Construct `MultiPoint` from a variadic list of `Points`.
"""
debug_assert(len(points) > 0, "unreachable")
let n = len(points)
# sample 1st point as prototype to get dims
let sample_pt = points[0]
let dims = len(sample_pt)
self.data = Layout[dtype](ogc_dims=sample_pt.ogc_dims, coords_size=n)
for y in range(dims):
for x in range(n):
self.data.coordinates[Index(y, x)] = points[x].coords[y]
fn __init__(inout self, points: DynamicVector[Point[dtype]]):
"""
Construct `MultiPoint` from a vector of `Point`.
"""
let n = len(points)
if len(points) == 0:
# early out with empty MultiPoint
self.data = Layout[dtype]()
return
# sample 1st point as prototype to get dims
let sample_pt = points[0]
let dims = len(sample_pt)
self.data = Layout[dtype](ogc_dims=sample_pt.ogc_dims, coords_size=n)
for dim in range(dims):
for i in range(n):
let value = points[i].coords[dim]
self.data.coordinates[Index(dim, i)] = value
@staticmethod
fn from_json(json_dict: PythonObject) raises -> Self:
"""
Construct `MultiPoint` from GeoJSON (Python dictionary).
"""
let json_coords = json_dict["coordinates"]
let n = int(json_coords.__len__())
# TODO: type checking of json_dict (coordinates property exists)
let dims = json_coords[0].__len__().to_float64().to_int()
let ogc_dims = CoordDims.PointZ if dims == 3 else CoordDims.Point
var data = Layout[dtype](ogc_dims, coords_size=n)
for dim in range(dims):
for i in range(n):
let point = json_coords[i]
# TODO: bounds check of geojson point
let value = point[dim].to_float64().cast[dtype]()
data.coordinates[Index(dim, i)] = value
return Self(data)
@staticmethod
fn from_json(json_str: String) raises -> Self:
"""
Construct `MultiPoint` from GeoJSON serialized string.
"""
let json_dict = JSONParser.parse(json_str)
return Self.from_json(json_dict)
@staticmethod
fn from_wkt(wkt: String) raises -> Self:
let geometry_sequence = WKTParser.parse(wkt)
# TODO: validate PythonObject is a class MultiPoint https://shapely.readthedocs.io/en/stable/reference/shapely.MultiPoint.html
let n = geometry_sequence.geoms.__len__().to_float64().to_int()
if n == 0:
return Self()
let sample_pt = geometry_sequence.geoms[0]
let coords_tuple = sample_pt.coords[0]
let dims = coords_tuple.__len__().to_float64().to_int()
let ogc_dims = CoordDims.PointZ if dims == 3 else CoordDims.Point
var data = Layout[dtype](ogc_dims, coords_size=n)
for y in range(dims):
for x in range(n):
let geom = geometry_sequence.geoms[x]
let coords_tuple = geom.coords[0]
let value = coords_tuple[y].to_float64().cast[dtype]()
data.coordinates[Index(y, x)] = value
return Self(data)
@staticmethod
fn from_geoarrow(table: PythonObject) raises -> Self:
let ga = Python.import_module("geoarrow.pyarrow")
let geoarrow = ga.as_geoarrow(table["geometry"])
let chunk = geoarrow[0]
let n = chunk.value.__len__()
# TODO: inspect first point to see number of dims (same as in from_wkt above)
if n > 2:
raise Error("Invalid Point dims parameter vs. geoarrow: " + str(n))
# TODO: add to Layout
# return result
return Self()
@staticmethod
fn empty(ogc_dims: CoordDims = CoordDims.Point) -> Self:
return Self()
fn __len__(self) -> Int:
"""
Returns the number of Point elements.
"""
return self.data.coordinates.shape()[1]
fn __eq__(self, other: Self) -> Bool:
return self.data == other.data
fn __ne__(self, other: Self) -> Bool:
return not self.__eq__(other)
fn __repr__(self) -> String:
return (
"MultiPoint ["
+ str(self.data.ogc_dims)
+ ", "
+ str(dtype)
+ "]("
+ String(len(self))
+ " points)"
)
fn dims(self) -> Int:
return len(self.data.ogc_dims)
fn has_height(self) -> Bool:
return self.data.has_height()
fn has_measure(self) -> Bool:
return self.data.has_measure()
fn set_ogc_dims(inout self, ogc_dims: CoordDims):
"""
Setter for ogc_dims enum. May be only be useful if the Point constructor with variadic list of coordinate values.
(ex: when Point Z vs Point M is ambiguous.
"""
debug_assert(
len(self.data.ogc_dims) == len(ogc_dims),
"Unsafe change of dimension number",
)
self.data.set_ogc_dims(ogc_dims)
fn __getitem__(self: Self, feature_index: Int) -> Point[dtype]:
"""
Get Point from MultiPoint at index.
"""
var point = Point[dtype](self.data.ogc_dims)
for dim_index in range(self.dims()):
point.coords[dim_index] = self.data.coordinates[
Index(dim_index, feature_index)
]
return point
fn __str__(self) -> String:
return self.__repr__()
fn json(self) raises -> String:
"""
Serialize `MultiPoint` to GeoJSON. Coordinates of MultiPoint are an array of positions.
### Spec
- https://geojson.org
- https://datatracker.ietf.org/doc/html/rfc7946
```json
{
"type": "MultiPoint",
"coordinates": [
[100.0, 0.0],
[101.0, 1.0]
]
}
```
"""
if self.data.ogc_dims.value > CoordDims.PointZ.value:
raise Error(
"GeoJSON only allows dimensions X, Y, and optionally Z (RFC 7946)"
)
let n = len(self)
let dims = self.data.dims()
var res = String('{"type":"MultiPoint","coordinates":[')
for i in range(n):
let pt = self[i]
res += "["
for dim in range(dims):
res += pt[dim]
if dim < dims - 1:
res += ","
res += "]"
if i < n - 1:
res += ","
res += "]}"
return res
fn wkt(self) -> String:
if self.is_empty():
return "MULTIPOINT EMPTY"
let dims = self.data.dims()
var res = String("MULTIPOINT (")
let n = len(self)
for i in range(n):
let pt = self[i]
for dim in range(dims):
res += pt[dims]
if dim < dims - 1:
res += " "
if i < n - 1:
res += ", "
res += ")"
return res
fn is_empty(self) -> Bool:
return len(self) == 0
fn geoarrow(self) -> PythonObject:
# TODO: geoarrow
return PythonObject()
| mogeo/mogeo/geom/multi_point.mojo | false |
from python import Python
from math import nan, isnan
from math.limit import max_finite
from mogeo.geom.empty import empty_value, is_empty
from mogeo.geom.envelope import Envelope
from mogeo.serialization.traits import WKTable, JSONable, Geoarrowable
from mogeo.serialization import (
WKTParser,
JSONParser,
)
from .traits import Geometric, Emptyable
from .enums import CoordDims
alias Point64 = Point[DType.float64]
alias Point32 = Point[DType.float32]
alias Point16 = Point[DType.float16]
@value
@register_passable("trivial")
struct Point[dtype: DType = DType.float64](
CollectionElement,
Geoarrowable,
Geometric,
Emptyable,
JSONable,
Sized,
Stringable,
WKTable,
):
"""
Point is a register-passable (copy-efficient) struct holding 2 or more dimension values.
### Parameters
- dtype: supports any float or integer type (default = float64)
### Memory Layouts
Some examples of memory layout using Mojo SIMD[dtype, 4] value:
```txt
```
"""
alias simd_dims = 4
alias x_index = 0
alias y_index = 1
alias z_index = 2
alias m_index = 3
var coords: SIMD[dtype, Self.simd_dims]
var ogc_dims: CoordDims
#
# Constructors (in addition to @value's member-wise init)
#
fn __init__(dims: CoordDims = CoordDims.Point) -> Self:
"""
Create Point with empty values.
"""
let empty = empty_value[dtype]()
let coords = SIMD[dtype, Self.simd_dims](empty)
return Self {coords: coords, ogc_dims: dims}
fn __init__(*coords_list: SIMD[dtype, 1]) -> Self:
"""
Create Point from variadic list of SIMD values. Any missing elements are padded with empty values.
### See also
Setter method for ogc_dims enum. May be useful when Point Z vs Point M is ambiguous in this constructor.
"""
let empty = empty_value[dtype]()
var coords = SIMD[dtype, Self.simd_dims](empty)
var ogc_dims = CoordDims.Point
let n = len(coords_list)
for i in range(Self.simd_dims):
if i < n:
coords[i] = coords_list[i]
if n == 3:
ogc_dims = CoordDims.PointZ
# workaround in case this is a Point M (measure). Duplicate the measure value in index 2 and 3.
coords[Self.m_index] = coords[Self.z_index]
elif n >= 4:
ogc_dims = CoordDims.PointZM
return Self {coords: coords, ogc_dims: ogc_dims}
fn __init__(
coords: SIMD[dtype, Self.simd_dims], dims: CoordDims = CoordDims.Point
) -> Self:
"""
Create Point from existing SIMD vector of coordinates.
"""
return Self {coords: coords, ogc_dims: dims}
#
# Static constructor methods.
#
@staticmethod
fn from_json(json_dict: PythonObject) raises -> Self:
# TODO: type checking of json_dict
# TODO: bounds checking of coords_len
let json_coords = json_dict["coordinates"]
let coords_len = int(json_coords.__len__())
var result = Self()
for i in range(coords_len):
result.coords[i] = json_coords[i].to_float64().cast[dtype]()
return result
@staticmethod
fn from_json(json_str: String) raises -> Self:
let json_dict = JSONParser.parse(json_str)
return Self.from_json(json_dict)
@staticmethod
fn from_wkt(wkt: String) raises -> Self:
var result = Self()
let geos_pt = WKTParser.parse(wkt)
let coords_tuple = geos_pt.coords[0]
let coords_len = coords_tuple.__len__().to_float64().to_int()
for i in range(coords_len):
result.coords[i] = coords_tuple[i].to_float64().cast[dtype]()
return result
@staticmethod
fn from_geoarrow(table: PythonObject) raises -> Self:
let ga = Python.import_module("geoarrow.pyarrow")
let geoarrow = ga.as_geoarrow(table["geometry"])
let chunk = geoarrow[0]
let n = chunk.value.__len__()
if n > Self.simd_dims:
raise Error("Invalid Point dims parameter vs. geoarrow: " + str(n))
var result = Self()
for dim in range(n):
let val = chunk.value[dim].as_py().to_float64().cast[dtype]()
result.coords[dim] = val
return result
@staticmethod
fn empty(dims: CoordDims = CoordDims.Point) -> Self:
"""
Emptyable trait.
"""
return Self.__init__(dims)
#
# Getters/Setters
#
fn set_ogc_dims(inout self, ogc_dims: CoordDims):
"""
Setter for ogc_dims enum. May be only be useful if the Point constructor with variadic list of coordinate values.
(ex: when Point Z vs Point M is ambiguous.
"""
debug_assert(
len(self.ogc_dims) == len(ogc_dims), "Unsafe change of dimension number"
)
self.ogc_dims = ogc_dims
fn dims(self) -> Int:
return len(self.ogc_dims)
fn has_height(self) -> Bool:
return (self.ogc_dims == CoordDims.PointZ) or (
self.ogc_dims == CoordDims.PointZM
)
fn has_measure(self) -> Bool:
return (self.ogc_dims == CoordDims.PointM) or (
self.ogc_dims == CoordDims.PointZM
)
fn is_empty(self) -> Bool:
return is_empty[dtype](self.coords)
@always_inline
fn x(self) -> SIMD[dtype, 1]:
"""
Get the x value (0 index).
"""
return self.coords[self.x_index]
@always_inline
fn y(self) -> SIMD[dtype, 1]:
"""
Get the y value (1 index).
"""
return self.coords[self.y_index]
@always_inline
fn z(self) -> SIMD[dtype, 1]:
"""
Get the z or altitude value (2 index).
"""
return self.coords[self.z_index]
@always_inline
fn alt(self) -> SIMD[dtype, 1]:
"""
Get the z or altitude value (2 index).
"""
return self.z()
@always_inline
fn m(self) -> SIMD[dtype, 1]:
"""
Get the measure value (3 index).
"""
return self.coords[self.m_index]
fn envelope(self) -> Envelope[dtype]:
return Envelope[dtype](self)
fn __len__(self) -> Int:
"""
Returns the number of non-empty dimensions.
"""
return self.dims()
fn __getitem__(self, d: Int) -> SIMD[dtype, 1]:
"""
Get the value of coordinate at this dimension.
"""
return self.coords[d] if d < Self.simd_dims else empty_value[dtype]()
fn __eq__(self, other: Self) -> Bool:
# NaN is used as empty value, so here cannot simply compare with __eq__ on the SIMD values.
@unroll
for i in range(Self.simd_dims):
if is_empty(self.coords[i]) and is_empty(other.coords[i]):
pass # equality at index i
else:
if is_empty(self.coords[i]) or is_empty(other.coords[i]):
return False # early out: one or the other is empty (but not both) -> not equal
if self.coords[i] != other.coords[i]:
return False # not equal
return True # equal
fn __ne__(self, other: Self) -> Bool:
return not self.__eq__(other)
fn __repr__(self) -> String:
let point_variant = str(self.ogc_dims)
var res = point_variant + " [" + dtype.__str__() + "]("
for i in range(Self.simd_dims):
res += str(self.coords[i])
if i < Self.simd_dims - 1:
res += ", "
res += ")"
return res
fn __str__(self) -> String:
return self.__repr__()
fn json(self) raises -> String:
if self.ogc_dims.value > CoordDims.PointZ.value:
raise Error(
"GeoJSON only allows dimensions X, Y, and optionally Z (RFC 7946)"
)
# include only x, y, and optionally z (height)
var res = String('{"type":"Point","coordinates":[')
let dims = 3 if self.has_height() else 2
for i in range(dims):
if i > 3:
break
res += self.coords[i]
if i < dims - 1:
res += ","
res += "]}"
return res
fn wkt(self) -> String:
if self.is_empty():
return "POINT EMPTY"
var result = str(self.ogc_dims) + " ("
result += str(self.x()) + " " + str(self.y())
if self.ogc_dims == CoordDims.PointZ or self.ogc_dims == CoordDims.PointZM:
result += " " + str(self.z())
if self.ogc_dims == CoordDims.PointM or self.ogc_dims == CoordDims.PointZM:
result += " " + str(self.m())
result += ")"
return result
fn geoarrow(self) -> PythonObject:
# TODO: geoarrow()
return None
| mogeo/mogeo/geom/point.mojo | false |
<filename>mogeo/mogeo/geom/traits.mojo
from .enums import CoordDims
from .envelope import Envelope
trait Dimensionable:
fn dims(self) -> Int:
...
fn has_height(self) -> Bool:
...
fn has_measure(self) -> Bool:
...
fn set_ogc_dims(inout self, ogc_dims: CoordDims):
"""
Setter for ogc_dims enum. May be useful if the Point constructor with variadic list of coordinate values.
(Point Z vs Point M is ambiguous).
"""
...
trait Emptyable:
@staticmethod
fn empty(dims: CoordDims = CoordDims.Point) -> Self:
...
fn is_empty(self) -> Bool:
...
trait Geometric(Dimensionable):
...
# TODO: Geometric trait seems to require parameter support on Traits (TBD mojo version?)
# fn envelope(self) -> Envelope[dtype]:
# fn contains(self, other: Self) -> Bool
# fn contains(self, other: Self) -> Bool
# fn intersects(self, other: Self) -> Bool
# fn overlaps(self, other: Self) -> Bool
# fn disjoint(self, other: Self) -> Bool
# fn touches(self, other: Self) -> Bool
# fn intersection(self, other: Self) -> Self
# fn union(self, other: Self) -> Self
# fn difference(self, other: Self) -> Self
# fn buffer(self, size: SIMD[dtype, 1]) -> Self
# fn convex_hull(self) -> Polygon[dtype]
# fn simplify(self) -> Self
# fn centroid(self) -> SIMD[dtype, 1]
# fn area(self) -> SIMD[dtype, 1]
# fn length(self) -> SIMD[dtype, 1]
# fn translate(self, SIMD[dtype, simd_dims]) -> Self
# fn rotate(self, degrees: SIMD[dtype, 1]) -> Self
| mogeo/mogeo/geom/traits.mojo | false |
from python import Python
from python.object import PythonObject
struct JSONParser:
@staticmethod
fn parse(json_str: String) raises -> PythonObject:
"""
Wraps json parser implementation.
"""
let orjson = Python.import_module("orjson")
return orjson.loads(json_str)
| mogeo/mogeo/serialization/json.mojo | false |
trait WKTable:
"""
Serializable to and from Well Known Text (WKT).
### Specs
- https://libgeos.org/specifications/wkt
- https://www.ogc.org/standard/sfa/
- https://www.ogc.org/standard/sfs/
"""
@staticmethod
fn from_wkt(wkt: String) raises -> Self:
...
fn wkt(self) -> String:
...
trait JSONable:
"""
Serializable to and from GeoJSON representation of Point. Point coordinates are in x, y order (easting, northing for
projected coordinates, longitude, and latitude for geographic coordinates).
### Specs
- https://geojson.org
- https://datatracker.ietf.org/doc/html/rfc7946
"""
@staticmethod
fn from_json(json: PythonObject) raises -> Self:
...
@staticmethod
fn from_json(json_str: String) raises -> Self:
...
fn json(self) raises -> String:
"""
Serialize to GeoJSON format.
### Raises Error
Error is raised for PointM and PointZM, because measure and other higher dimensions are not part of the GeoJSON
spec.
> An OPTIONAL third-position element SHALL be the height in meters above or below the WGS 84 reference
> ellipsoid. (RFC 7946)
"""
...
trait Geoarrowable:
"""
Serializable to and from GeoArrow representation of a Point.
### Spec
- https://geoarrow.org/
"""
@staticmethod
fn from_geoarrow(table: PythonObject) raises -> Self:
"""
Create Point from geoarrow / pyarrow table with geometry column.
"""
...
# TODO: to geoarrow
# fn geoarrow(self) -> PythonObject:
# ...
| mogeo/mogeo/serialization/traits.mojo | false |
from python import Python
from python.object import PythonObject
struct WKTParser:
@staticmethod
fn parse(wkt: String) raises -> PythonObject:
"""
Wraps shapely.from_wkt to convert WKT string to a Shapely object.
"""
let shapely = Python.import_module("shapely")
return shapely.from_wkt(wkt)
| mogeo/mogeo/serialization/wkt.mojo | false |
"""
Serialization module.
"""
from .json import *
from .wkt import *
from .geoarrow import *
| mogeo/mogeo/serialization/__init__.mojo | false |
<filename>mogeo/mogeo/test/constants.mojo
let lon = -108.680
let lat = 38.974
let height = 8.0
let measure = 42.0
| mogeo/mogeo/test/constants.mojo | false |
import testing
from pathlib import Path
from python import Python
fn load_geoarrow_test_fixture(path: Path) raises -> PythonObject:
"""
Reads the geoarrow test data fixture at path.
Returns
-------
table : pyarrow.Table
The contents of the Feather file as a pyarrow.Table
"""
let feather = Python.import_module("pyarrow.feather")
let table = feather.read_table(PythonObject(path.__str__()))
return table
| mogeo/mogeo/test/helpers.mojo | false |
<filename>mogeo/mogeo/test/pytest.mojo
import testing
@value
struct MojoTest:
"""
A utility struct for testing.
"""
var test_name: String
fn __init__(inout self, test_name: String):
self.test_name = test_name
print("# " + test_name)
fn assert_true(self, cond: Bool, message: String):
"""
Wraps testing.assert_true.
"""
try:
testing.assert_true(cond, message)
except e:
print(e)
| mogeo/mogeo/test/pytest.mojo | false |
<filename>mogeo/mogeo/test/geog/__init__.mojo
"""
Tests for mogeo/geog module.
"""
| mogeo/mogeo/test/geog/__init__.mojo | false |
from mogeo.geom.empty import empty_value, is_empty
from mogeo.test.pytest import MojoTest
fn main() raises:
let test = MojoTest("empty_value")
let empty_f64 = empty_value[DType.float64]()
let empty_f32 = empty_value[DType.float32]()
let empty_f16 = empty_value[DType.float16]()
let empty_int = empty_value[DType.int32]()
let empty_uint = empty_value[DType.uint32]()
test.assert_true(is_empty(empty_f64), "empty_f64")
test.assert_true(is_empty(empty_f32), "empty_f32")
test.assert_true(is_empty(empty_f16), "empty_f16")
test.assert_true(is_empty(empty_int), "empty_int")
test.assert_true(is_empty(empty_uint), "empty_uint")
test.assert_true(not is_empty[DType.float64, 1](42), "not empty")
test.assert_true(not is_empty[DType.uint16, 1](42), "not empty")
| mogeo/mogeo/test/geom/test_empty.mojo | false |
<filename>mogeo/mogeo/test/geom/test_enums_coorddims.mojo
from python import Python
from python.object import PythonObject
from pathlib import Path
from mogeo.geom.empty import empty_value, is_empty
from mogeo.test.pytest import MojoTest
from mogeo.geom.enums import CoordDims
fn main() raises:
test_coord_dims()
fn test_coord_dims() raises:
test_constructors()
test_str()
test_eq()
test_getters()
test_len()
fn test_constructors():
let test = MojoTest("constructors")
_ = CoordDims(42)
fn test_len():
let test = MojoTest("len")
let n = 42
let pt = CoordDims(n)
test.assert_true(len(pt) == n, "dims()")
fn test_getters():
let test = MojoTest("getters")
let pt = CoordDims.Point
test.assert_true(not pt.has_height(), "has_height")
test.assert_true(not pt.has_measure(), "has_measure")
let pt_z = CoordDims.PointZ
test.assert_true(pt_z.has_height(), "has_height")
test.assert_true(not pt_z.has_measure(), "has_measure")
let pt_m = CoordDims.PointM
test.assert_true(pt_m.has_measure(), "has_height")
test.assert_true(not pt_m.has_height(), "has_measure")
let pt_zm = CoordDims.PointZM
test.assert_true(pt_zm.has_measure(), "has_height")
test.assert_true(pt_zm.has_height(), "has_measure")
fn test_str() raises:
let test = MojoTest("__str__")
let pt = CoordDims.Point
test.assert_true(str(pt) == "Point", "__str__")
let pt_z = CoordDims.PointZ
test.assert_true(str(pt_z) == "Point Z", "__str__")
let pt_m = CoordDims.PointM
test.assert_true(str(pt_m) == "Point M", "__str__")
let pt_zm = CoordDims.PointZM
test.assert_true(str(pt_zm) == "Point ZM", "__str__")
let pt_nd = CoordDims.PointND
test.assert_true(str(pt_nd) == "Point ND", "__str__")
fn test_eq() raises:
let test = MojoTest("__eq__, __ne__")
let pt = CoordDims.Point
let pt_z = CoordDims.PointZ
test.assert_true(pt != pt_z, "__ne__")
let n = 42
let pt_nd_a = CoordDims(n)
let pt_nd_b = CoordDims(n)
test.assert_true(pt_nd_a == pt_nd_b, "__eq__")
| mogeo/mogeo/test/geom/test_enums_coorddims.mojo | false |
<filename>mogeo/mogeo/test/geom/test_envelope.mojo
from python import Python
from python.object import PythonObject
from utils.vector import DynamicVector
from pathlib import Path
from random import rand
from mogeo.test.pytest import MojoTest
from mogeo.test.constants import lon, lat, height, measure
from mogeo.geom.envelope import Envelope
from mogeo.geom.point import Point
from mogeo.geom.enums import CoordDims
from mogeo.geom.layout import Layout
from mogeo.geom.traits import Geometric, Emptyable
fn main() raises:
test_envelope()
fn test_envelope() raises:
test_constructors()
test_repr()
test_min_max()
test_southwesterly_point()
test_northeasterly_point()
test_with_geos()
test_equality_ops()
# test_wkt()
# test_json()
# test_from_json()
# test_from_wkt()
fn test_constructors() raises:
let test = MojoTest("constructors, aliases")
# from Point
_ = Envelope(Point(lon, lat))
_ = Envelope(Point(lon, lat, height))
_ = Envelope(Point(lon, lat, measure))
_ = Envelope(Point(lon, lat, height, measure))
_ = Envelope(Point[DType.int8](lon, lat))
_ = Envelope(Point(lon, lat, height, measure))
# from LineString
# alias Point2_f16 = Point[DType.float16]
# _ = Envelope(
# LineString(
# Point2_f16(lon, lat),
# Point2_f16(lon + 1, lat + 1),
# Point2_f16(lon + 2, lat + 2),
# Point2_f16(lon + 3, lat + 3),
# Point2_f16(lon + 4, lat + 4),
# Point2_f16(lon + 5, lat + 5),
# )
# )
fn test_repr() raises:
let test = MojoTest("repr")
# TODO: more variations of envelope structs
let e_pt2 = Envelope(Point(lon, lat))
test.assert_true(
e_pt2.__repr__()
== "Envelope [float64](-108.68000000000001, 38.973999999999997, nan, nan,"
" -108.68000000000001, 38.973999999999997, nan, nan)",
"__repr__",
)
# e = Envelope(
# LineString(Point2(lon, lat), Point2(lon + 1, lat + 1), Point2(lon + 2, lat + 2))
# )
# test.assert_true(
# e.__repr__()
# == "Envelope[float64](-108.68000000000001, 38.973999999999997,"
# " -106.68000000000001, 40.973999999999997)",
# "__repr__",
# )
fn test_min_max() raises:
let test = MojoTest("min/max methods")
let e_of_pt2 = Envelope(Point(lon, lat))
test.assert_true(e_of_pt2.min_x() == lon, "min_x")
test.assert_true(e_of_pt2.min_y() == lat, "min_y")
test.assert_true(e_of_pt2.max_x() == lon, "max_x")
test.assert_true(e_of_pt2.max_y() == lat, "max_y")
# let e_of_ls2 = Envelope(
# LineString(
# Point2(lon, lat),
# Point2(lon + 1, lat + 1),
# Point2(lon + 2, lat + 5),
# Point2(lon + 5, lat + 3),
# Point2(lon + 4, lat + 4),
# Point2(lon + 3, lat + 2),
# )
# )
# test.assert_true(e_of_ls2.min_x() == lon, "min_x")
# test.assert_true(e_of_ls2.min_y() == lat, "min_y")
# test.assert_true(e_of_ls2.max_x() == lon + 5, "max_x")
# test.assert_true(e_of_ls2.max_y() == lat + 5, "max_y")
# let e_of_ls3 = Envelope(
# LineStringZ(
# PointZ(lon, lat, height),
# PointZ(lon + 1, lat + 1, height - 1),
# PointZ(lon + 2, lat + 2, height - 2),
# PointZ(lon + 7, lat + 5, height - 5),
# PointZ(lon + 4, lat + 4, height - 4),
# PointZ(lon + 5, lat + 3, height - 3),
# )
# )
# test.assert_true(e_of_ls3.min_x() == lon, "min_x")
# test.assert_true(e_of_ls3.min_y() == lat, "min_y")
# test.assert_true(e_of_ls3.min_z() == height - 5, "min_z")
# test.assert_true(e_of_ls3.max_x() == lon + 7, "max_x")
# test.assert_true(e_of_ls3.max_y() == lat + 5, "max_y")
# test.assert_true(e_of_ls3.max_z() == height, "max_z")
# let e_of_ls4 = Envelope(
# LineString(
# PointZ(lon, lat, height, measure),
# PointZ(lon + 1, lat + 1, height - 1, measure + 0.01),
# PointZ(lon + 2, lat + 2, height - 7, measure + 0.05),
# PointZ(lon + 5, lat + 3, height - 3, measure + 0.03),
# PointZ(lon + 4, lat + 5, height - 4, measure + 0.04),
# PointZ(lon + 3, lat + 4, height - 5, measure + 0.02),
# )
# )
# test.assert_true(e_of_ls4.min_x() == lon, "min_x")
# test.assert_true(e_of_ls4.min_y() == lat, "min_y")
# test.assert_true(e_of_ls4.min_z() == height - 7, "min_z")
# test.assert_true(e_of_ls4.min_m() == measure, "min_m")
# test.assert_true(e_of_ls4.max_x() == lon + 5, "max_x")
# test.assert_true(e_of_ls4.max_y() == lat + 5, "max_y")
# test.assert_true(e_of_ls4.max_z() == height, "max_z")
# test.assert_true(e_of_ls4.max_m() == measure + 0.05, "max_m")
fn test_southwesterly_point() raises:
let test = MojoTest("southwesterly_point")
let e = Envelope(Point(lon, lat))
let sw_pt = e.southwesterly_point()
test.assert_true(sw_pt.x() == lon, "southwesterly_point")
test.assert_true(sw_pt.y() == lat, "southwesterly_point")
fn test_northeasterly_point() raises:
let test = MojoTest("northeasterly_point")
let e = Envelope(Point(lon, lat))
let sw_pt = e.northeasterly_point()
test.assert_true(sw_pt.x() == lon, "northeasterly_point")
test.assert_true(sw_pt.y() == lat, "northeasterly_point")
fn test_with_geos() raises:
"""
Check envelope of complex features using shapely's envelope function.
"""
let test = MojoTest("shapely/geos")
let json = Python.import_module("orjson")
let builtins = Python.import_module("builtins")
let shapely = Python.import_module("shapely")
let envelope = shapely.envelope
let shape = shapely.geometry.shape
let mapping = shapely.geometry.mapping
# LineString
# let path = Path("mogeo/test/fixtures/geojson/line_string")
# let fixtures = VariadicList("curved.geojson", "straight.geojson", "zigzag.geojson")
# for i in range(len(fixtures)):
# let file = path / fixtures[i]
# with open(file, "r") as f:
# let geojson = f.read()
# let geojson_dict = json.loads(geojson)
# let geometry = shape(geojson_dict)
# let expect_bounds = geometry.bounds
# let lstr = LineString.from_json(geojson_dict)
# let env = Envelope(lstr)
# for i in range(4):
# test.assert_true(
# env.coords[i].cast[DType.float64]()
# == expect_bounds[i].to_float64(),
# "envelope index:" + String(i),
# )
fn test_equality_ops() raises:
"""
Test __eq__ and __ne__ methods.
"""
let test = MojoTest("equality ops")
let e2 = Envelope(Point(lon, lat))
let e2_eq = Envelope(Point(lon, lat))
let e2_ne = Envelope(Point(lon + 0.01, lat - 0.02))
test.assert_true(e2 == e2_eq, "__eq__")
test.assert_true(e2 != e2_ne, "__ne__")
let e3 = Envelope(Point(lon, lat, height))
let e3_eq = Envelope(Point(lon, lat, height))
let e3_ne = Envelope(Point(lon, lat, height * 2))
test.assert_true(e3 == e3_eq, "__eq__")
test.assert_true(e3 != e3_ne, "__ne__")
let e4 = Envelope(Point(lon, lat, height, measure))
let e4_eq = Envelope(Point(lon, lat, height, measure))
let e4_ne = Envelope(Point(lon, lat, height, measure * 2))
test.assert_true(e4 == e4_eq, "__eq__")
test.assert_true(e4 != e4_ne, "__ne__")
| mogeo/mogeo/test/geom/test_envelope.mojo | false |
from tensor import Tensor, TensorSpec, TensorShape
from utils.index import Index
from mogeo.test.pytest import MojoTest
from mogeo.geom.layout import Layout
from mogeo.test.constants import lat, lon, height, measure
from mogeo.geom.enums import CoordDims
fn main() raises:
test_constructors()
test_equality_ops()
test_len()
test_dims()
fn test_constructors() raises:
let test = MojoTest("constructors")
var n = 10
# 2x10 (default of 2 dims)
let layout_a = Layout(coords_size=n)
var shape = layout_a.coordinates.shape()
test.assert_true(shape[0] == 2, "2x10 constructor")
test.assert_true(shape[1] == n, "2x10 constructor")
# 3x15
n = 15
let layout_b = Layout(ogc_dims=CoordDims.PointZ, coords_size=n)
shape = layout_b.coordinates.shape()
test.assert_true(shape[0] == 3, "3x15 constructor")
test.assert_true(shape[1] == n, "3x15 constructor")
# 4x20
n = 20
let layout_c = Layout(ogc_dims=CoordDims.PointZM, coords_size=n)
shape = layout_c.coordinates.shape()
test.assert_true(shape[0] == 4, "4x20 constructor")
test.assert_true(shape[1] == n, "4x20 constructor")
fn test_equality_ops() raises:
let test = MojoTest("equality ops")
let n = 20
var ga2 = Layout(coords_size=n, geoms_size=0, parts_size=0, rings_size=0)
var ga2b = Layout(coords_size=n, geoms_size=0, parts_size=0, rings_size=0)
for dim in range(2):
for coord in range(n):
let idx = Index(dim, coord)
ga2.coordinates[idx] = 42.0
ga2b.coordinates[idx] = 42.0
test.assert_true(ga2 == ga2b, "__eq__")
ga2.coordinates[Index(0, n - 1)] = 3.14
test.assert_true(ga2 != ga2b, "__ne__")
fn test_len() raises:
let test = MojoTest("__len__")
let n = 50
let l = Layout(coords_size=n)
test.assert_true(len(l) == 50, "__len__")
fn test_dims() raises:
let test = MojoTest("dims")
let l = Layout(coords_size=10)
let expect_dims = len(CoordDims.Point)
test.assert_true(l.dims() == expect_dims, "dims")
| mogeo/mogeo/test/geom/test_layout.mojo | false |
<filename>mogeo/mogeo/test/geom/test_line_string.mojo
from python import Python
from python.object import PythonObject
from utils.vector import DynamicVector
from utils.index import Index
from pathlib import Path
from mogeo.test.pytest import MojoTest
from mogeo.test.constants import lon, lat, height, measure
from mogeo.geom.point import Point, Point64
from mogeo.geom.line_string import LineString
fn main() raises:
test_constructors()
test_validate()
test_memory_layout()
test_get_item()
test_equality_ops()
test_repr()
test_stringable()
test_emptyable()
test_wktable()
test_jsonable()
test_geoarrowable()
fn test_constructors() raises:
var test = MojoTest("variadic list constructor")
let lstr = LineString(Point(lon, lat), Point(lon, lat), Point(lon, lat + 1))
test.assert_true(lstr[0] == Point(lon, lat), "variadic list constructor")
test.assert_true(lstr[1] == Point(lon, lat), "variadic list constructor")
test.assert_true(lstr[2] == Point(lon, lat + 1), "variadic list constructor")
test.assert_true(lstr.__len__() == 3, "variadic list constructor")
test = MojoTest("vector constructor")
var points_vec = DynamicVector[Point64](10)
for n in range(10):
points_vec.push_back(Point(lon + n, lat - n))
let lstr2 = LineString[DType.float64](points_vec)
for n in range(10):
let expect_pt = Point(lon + n, lat - n)
test.assert_true(lstr2[n] == expect_pt, "vector constructor")
test.assert_true(lstr2.__len__() == 10, "vector constructor")
fn test_validate() raises:
let test = MojoTest("is_valid")
var err = String()
var valid: Bool = False
valid = LineString(Point(lon, lat), Point(lon, lat)).is_valid(err)
test.assert_true(not valid, "is_valid")
test.assert_true(
err == "LineStrings with exactly two identical points are invalid.",
"unexpected error value",
)
valid = LineString(Point(lon, lat)).is_valid(err)
test.assert_true(
err == "LineStrings must have either 0 or 2 or more points.",
"unexpected error value",
)
valid = LineString(
Point(lon, lat), Point(lon + 1, lat + 1), Point(lon, lat)
).is_valid(err)
test.assert_true(
err == "LineStrings must not be closed: try LinearRing.",
"unexpected error value",
)
fn test_memory_layout() raises:
# Test if LineString fills the Layout struct correctly.
let test = MojoTest("memory_layout")
# equality check each point by indexing into the LineString.
var points_vec20 = DynamicVector[Point64](10)
for n in range(10):
points_vec20.push_back(Point(lon + n, lat - n))
let lstr = LineString(points_vec20)
for n in range(10):
let expect_pt = Point(lon + n, lat - n)
test.assert_true(lstr[n] == expect_pt, "memory_layout")
# here the geometry_offsets, part_offsets, and ring_offsets are unused because
# of using "struct coordinate representation" (tensor)
let layout = lstr.data
test.assert_true(
layout.geometry_offsets.num_elements() == 0, "geo_arrow geometry_offsets"
)
test.assert_true(layout.part_offsets.num_elements() == 0, "geo_arrow part_offsets")
test.assert_true(layout.ring_offsets.num_elements() == 0, "geo_arrow ring_offsets")
fn test_get_item() raises:
let test = MojoTest("get_item")
var points_vec = DynamicVector[Point64](10)
for n in range(10):
points_vec.push_back(Point(lon + n, lat - n))
let lstr = LineString(points_vec)
for n in range(10):
let expect_pt = Point(lon + n, lat - n)
let got_pt = lstr[n]
test.assert_true(got_pt == expect_pt, "get_item")
fn test_equality_ops() raises:
let test = MojoTest("equality operators")
# partial simd_load (n - i < nelts)
let lstr8 = LineString(
Point(1, 2), Point(3, 4), Point(5, 6), Point(7, 8), Point(9, 10)
)
let lstr9 = LineString(
Point(1.1, 2.1),
Point(3.1, 4.1),
Point(5.1, 6.1),
Point(7.1, 8.1),
Point(9.1, 10.1),
)
test.assert_true(lstr8 != lstr9, "partial simd_load (n - i < nelts)")
# partial simd_load (n - i < nelts)
alias PointF32 = Point[DType.float32]
let lstr10 = LineString(
PointF32(1, 2),
PointF32(5, 6),
PointF32(10, 11),
)
let lstr11 = LineString(
PointF32(1, 2),
PointF32(5, 6),
PointF32(10, 11.1),
)
test.assert_true(lstr10 != lstr11, "partial simd_load (n - i < nelts) (b)")
# not equal
alias PointF16 = Point[DType.float16]
let lstr12 = LineString(
PointF16(1, 2),
PointF16(5, 6),
PointF16(10, 11),
)
let lstr13 = LineString(
PointF16(1, 2),
PointF16(5, 6),
PointF16(10, 11.1),
)
test.assert_true(lstr12 != lstr13, "__ne__")
var points_vec = DynamicVector[Point64](10)
for n in range(10):
points_vec.push_back(Point(lon + n, lat - n))
let lstr2 = LineString(points_vec)
let lstr3 = LineString(points_vec)
test.assert_true(lstr2 == lstr3, "__eq__")
let lstr4 = LineString(Point(lon, lat), Point(lon, lat), Point(lon, lat + 1))
let lstr5 = LineString(Point(lon, lat), Point(lon, lat), Point(lon, lat + 1))
test.assert_true(lstr4 == lstr5, "__eq__")
let lstr6 = LineString(Point(42, lat), Point(lon, lat))
test.assert_true(lstr5 != lstr6, "__eq__")
fn test_emptyable() raises:
let test = MojoTest("is_empty")
let empty_lstr = LineString()
_ = empty_lstr.is_empty()
fn test_repr() raises:
let test = MojoTest("__repr__")
let lstr = LineString(Point(42, lat), Point(lon, lat))
test.assert_true(
lstr.__repr__() == "LineString [Point, float64](2 points)", "__repr__"
)
fn test_sized() raises:
let test = MojoTest("sized")
let lstr = LineString(Point(42, lat), Point(lon, lat))
test.assert_true(len(lstr) == 2, "__len__")
fn test_stringable() raises:
let test = MojoTest("stringable")
let lstr = LineString(Point(42, lat), Point(lon, lat))
test.assert_true(lstr.__str__() == lstr.__repr__(), "__str__")
fn test_wktable() raises:
test_wkt()
# test_from_wkt()
fn test_wkt() raises:
let test = MojoTest("wkt")
let lstr = LineString(Point(lon, lat), Point(lon, lat), Point(lon, lat + 1))
test.assert_true(
lstr.wkt()
== "LINESTRING(-108.68000000000001 38.973999999999997, -108.68000000000001"
" 38.973999999999997, -108.68000000000001 39.973999999999997)",
"wkt",
)
fn test_jsonable() raises:
test_json()
test_from_json()
fn test_json() raises:
let test = MojoTest("json")
var points_vec = DynamicVector[Point64](10)
for n in range(10):
points_vec.push_back(Point(lon + n, lat - n))
let json = LineString(points_vec).json()
test.assert_true(
json
== '{"type":"LineString","coordinates":[[-108.68000000000001,38.973999999999997],[-107.68000000000001,37.973999999999997],[-106.68000000000001,36.973999999999997],[-105.68000000000001,35.973999999999997],[-104.68000000000001,34.973999999999997],[-103.68000000000001,33.973999999999997],[-102.68000000000001,32.973999999999997],[-101.68000000000001,31.973999999999997],[-100.68000000000001,30.973999999999997],[-99.680000000000007,29.973999999999997]]}',
"json",
)
fn test_from_json() raises:
let test = MojoTest("from_json()")
let json = Python.import_module("orjson")
let builtins = Python.import_module("builtins")
let path = Path("mogeo/test/fixtures/geojson/line_string")
let fixtures = VariadicList("curved.geojson", "straight.geojson", "zigzag.geojson")
for i in range(len(fixtures)):
let file = path / fixtures[i]
with open(file.path, "r") as f:
let geojson = f.read()
let geojson_dict = json.loads(geojson)
_ = LineString.from_json(geojson_dict)
fn test_geoarrowable() raises:
# TODO: geoarrowable trait
pass
| mogeo/mogeo/test/geom/test_line_string.mojo | false |
<filename>mogeo/mogeo/test/geom/test_multi_point.mojo
from python import Python
from python.object import PythonObject
from utils.vector import DynamicVector
from utils.index import Index
from pathlib import Path
from mogeo.test.constants import lat, lon, height, measure
from mogeo.test.pytest import MojoTest
from mogeo.geom.point import Point, Point64
from mogeo.geom.multi_point import MultiPoint
from mogeo.serialization.json import JSONParser
from mogeo.geom.enums import CoordDims
fn main() raises:
test_multi_point()
fn test_multi_point() raises:
test_constructors()
test_mem_layout()
test_get_item()
test_equality_ops()
test_is_empty()
test_repr()
test_stringable()
test_wktable()
test_jsonable()
fn test_constructors() raises:
var test = MojoTest("variadic list constructor")
let mpt = MultiPoint(Point(lon, lat), Point(lon, lat), Point(lon, lat + 1))
test.assert_true(mpt[0] == Point(lon, lat), "variadic list constructor")
test.assert_true(mpt[1] == Point(lon, lat), "variadic list constructor")
test.assert_true(mpt[2] == Point(lon, lat + 1), "variadic list constructor")
test.assert_true(len(mpt) == 3, "variadic list constructor")
test = MojoTest("vector constructor")
var points_vec = DynamicVector[Point64](10)
for n in range(10):
points_vec.push_back(Point(lon + n, lat - n))
_ = MultiPoint(points_vec)
fn test_mem_layout() raises:
"""
Test if MultiPoint fills the Layout struct correctly.
"""
let test = MojoTest("mem layout")
# equality check each point by indexing into the MultiPoint.
var points_vec = DynamicVector[Point64](10)
for n in range(10):
points_vec.push_back(Point(lon + n, lat - n))
let mpt2 = MultiPoint(points_vec)
for n in range(10):
let expect_pt = Point(lon + n, lat - n)
test.assert_true(mpt2[n] == expect_pt, "test_mem_layout")
let layout = mpt2.data
# offsets fields are empty in MultiPoint because of using geo_arrows "struct coordinate representation"
test.assert_true(
layout.geometry_offsets.num_elements() == 0, "geo_arrow geometry_offsets"
)
test.assert_true(layout.part_offsets.num_elements() == 0, "geo_arrow part_offsets")
test.assert_true(layout.ring_offsets.num_elements() == 0, "geo_arrow ring_offsets")
fn test_get_item() raises:
let test = MojoTest("get_item")
var points_vec = DynamicVector[Point64](10)
for n in range(10):
points_vec.push_back(Point(lon + n, lat - n))
let mpt = MultiPoint(points_vec)
for n in range(10):
let expect_pt = Point(lon + n, lat - n)
let got_pt = mpt[n]
test.assert_true(got_pt == expect_pt, "get_item")
fn test_equality_ops() raises:
let test = MojoTest("equality operators")
# partial simd_load (n - i < nelts)
let mpt1 = MultiPoint(
Point(1, 2), Point(3, 4), Point(5, 6), Point(7, 8), Point(9, 10)
)
let mpt2 = MultiPoint(
Point(1.1, 2.1),
Point(3.1, 4.1),
Point(5.1, 6.1),
Point(7.1, 8.1),
Point(9.1, 10.1),
)
test.assert_true(mpt1 != mpt2, "partial simd_load (n - i < nelts)")
# partial simd_load (n - i < nelts)
alias Point2F32 = Point[DType.float32]
let mpt5 = MultiPoint(
Point2F32(1, 2),
Point2F32(5, 6),
Point2F32(10, 11),
)
let mpt6 = MultiPoint(
Point2F32(1, 2),
Point2F32(5, 6),
Point2F32(10, 11.1),
)
test.assert_true(mpt5 != mpt6, "partial simd_load (n - i < nelts) (b)")
alias Point2F16 = Point[DType.float16]
let mpt7 = MultiPoint(
Point2F16(1, 2),
Point2F16(5, 6),
Point2F16(10, 11),
)
let mpt8 = MultiPoint(
Point2F16(1, 2),
Point2F16(5, 6),
Point2F16(10, 11.1),
)
test.assert_true(mpt7 != mpt8, "__ne__")
var points_vec2 = DynamicVector[Point64](10)
for n in range(10):
points_vec2.push_back(Point(lon + n, lat - n))
let mpt9 = MultiPoint(points_vec2)
let mpt10 = MultiPoint(points_vec2)
test.assert_true(mpt9 == mpt10, "__eq__")
test.assert_true(mpt9 != mpt2, "__ne__")
let mpt11 = MultiPoint(Point(lon, lat), Point(lon, lat), Point(lon, lat + 1))
let mpt12 = MultiPoint(Point(lon, lat), Point(lon, lat), Point(lon, lat + 1))
test.assert_true(mpt11 == mpt12, "__eq__")
test.assert_true(mpt9 != mpt12, "__ne__")
fn test_is_empty() raises:
let test = MojoTest("is_empty")
let empty_mpt = MultiPoint()
test.assert_true(empty_mpt.is_empty() == True, "is_empty()")
fn test_repr() raises:
let test = MojoTest("__repr__")
let mpt = MultiPoint(Point(lon, lat), Point(lon + 1, lat + 1))
let s = mpt.__repr__()
test.assert_true(s == "MultiPoint [Point, float64](2 points)", "__repr__")
fn test_stringable() raises:
let test = MojoTest("__str__")
let mpt = MultiPoint(Point(lon, lat), Point(lon + 1, lat + 1))
test.assert_true(mpt.__str__() == mpt.__repr__(), "__str__")
fn test_wktable() raises:
let test = MojoTest("wktable")
let path = Path("mogeo/test/fixtures/wkt/multi_point")
let fixtures = VariadicList("point.wkt", "point_z.wkt")
for i in range(len(fixtures)):
let file = path / fixtures[i]
with open(file.path, "r") as f:
let wkt = f.read()
let mp = MultiPoint.from_wkt(wkt)
test.assert_true(
mp.wkt() != "FIXME", "wkt"
) # FIXME: no number formatting so cannot compare wkt strings.
fn test_jsonable() raises:
test_json()
test_from_json()
fn test_json() raises:
let test = MojoTest("json")
let mpt = MultiPoint(Point(lon, lat), Point(lon + 1, lat + 1))
test.assert_true(
mpt.json()
== '{"type":"MultiPoint","coordinates":[[-108.68000000000001,38.973999999999997],[-107.68000000000001,39.973999999999997]]}',
"json",
)
let mpt_z = MultiPoint(Point(lon, lat, height), Point(lon + 1, lat + 1, height - 1))
test.assert_true(
mpt_z.json()
== '{"type":"MultiPoint","coordinates":[[-108.68000000000001,38.973999999999997,8.0],[-107.68000000000001,39.973999999999997,7.0]]}',
"json",
)
let expect_error = "GeoJSON only allows dimensions X, Y, and optionally Z (RFC 7946)"
var mpt_m = MultiPoint(
Point(lon, lat, measure), Point(lon + 1, lat + 1, measure - 1)
)
mpt_m.set_ogc_dims(CoordDims.PointM)
try:
_ = mpt_m.json()
except e:
test.assert_true(str(e) == expect_error, "json raises")
let mpt_zm = MultiPoint(
Point(lon, lat, height, measure),
Point(lon + 1, lat + 1, height * 2, measure - 1),
)
try:
_ = mpt_zm.json()
except e:
test.assert_true(str(e) == expect_error, "json raises")
fn test_from_json() raises:
pass
# let test = MojoTest("from_json")
# let path = Path("mogeo/test/fixtures/geojson/multi_point")
# let fixtures = VariadicList("multi_point.geojson") # , "multi_point_z.geojson"
# for i in range(len(fixtures)):
# let file = path / fixtures[i]
# with open(file.path, "r") as f:
# let json_str = f.read()
# _ = MultiPoint.from_json(json_str)
# let json_dict = JSONParser.parse(json_str)
# _ = MultiPoint.from_json(json_dict)
| mogeo/mogeo/test/geom/test_multi_point.mojo | false |
from python import Python
from python.object import PythonObject
from pathlib import Path
from mogeo.geom.empty import empty_value, is_empty
from mogeo.geom.point import Point, CoordDims
from mogeo.geom.traits import Dimensionable, Geometric, Emptyable
from mogeo.test.helpers import load_geoarrow_test_fixture
from mogeo.test.pytest import MojoTest
from mogeo.test.constants import lon, lat, height, measure
fn main() raises:
test_constructors()
test_repr()
test_equality_ops()
test_getters()
test_setters()
test_sized()
test_stringable()
test_dimensionable()
test_geometric()
test_emptyable()
test_stringable()
test_wktable()
test_jsonable()
test_geoarrowable()
fn test_constructors():
let test = MojoTest("constructors")
_ = Point()
_ = Point(lon, lat)
_ = Point(lon, lat, height)
_ = Point(lon, lat, measure)
_ = Point(lon, lat, height, measure)
_ = Point[DType.int32]()
_ = Point[DType.float32]()
_ = Point[DType.int32](lon, lat)
_ = Point[DType.float32](lon, lat)
_ = Point[dtype = DType.float16](SIMD[DType.float16, 4](lon, lat, height, measure))
_ = Point[dtype = DType.float32](SIMD[DType.float32, 4](lon, lat, height, measure))
fn test_repr() raises:
let test = MojoTest("repr")
let pt = Point(lon, lat)
test.assert_true(
pt.__repr__()
== "Point [float64](-108.68000000000001, 38.973999999999997, nan, nan)",
"repr",
)
let pt_z = Point(lon, lat, height)
test.assert_true(
pt_z.__repr__()
== "Point Z [float64](-108.68000000000001, 38.973999999999997, 8.0, 8.0)",
"repr",
)
# the variadic list constructor cannot distinguish Point Z from Point M, so use the set_ogc_dims method.
var pt_m = pt_z
pt_m.set_ogc_dims(CoordDims.PointM)
test.assert_true(
pt_m.__repr__()
== "Point M [float64](-108.68000000000001, 38.973999999999997, 8.0, 8.0)",
"repr",
)
let pt_zm = Point(lon, lat, height, measure)
test.assert_true(
pt_zm.__repr__()
== "Point ZM [float64](-108.68000000000001, 38.973999999999997, 8.0, 42.0)",
"repr",
)
fn test_stringable() raises:
let test = MojoTest("stringable")
let pt_z = Point(lon, lat, height)
test.assert_true(str(pt_z) == pt_z.__repr__(), "__str__")
fn test_sized() raises:
let test = MojoTest("sized")
let pt_z = Point(lon, lat, height)
test.assert_true(len(pt_z) == 3, "__len__")
fn test_dimensionable() raises:
let pt = Point(lon, lat)
let pt_z = Point(lon, lat, height)
var pt_m = Point(lon, lat, measure)
let pt_zm = Point(lon, lat, height, measure)
var test = MojoTest("dims")
test.assert_true(pt.dims() == 2, "dims")
test.assert_true(pt_z.dims() == 3, "dims")
test.assert_true(pt_m.dims() == 3, "dims")
test.assert_true(pt_zm.dims() == 4, "dims")
test = MojoTest("has_height")
test.assert_true(pt_z.has_height(), "has_height")
test = MojoTest("has_measure")
test.assert_true(not pt_m.has_measure(), "has_measure")
pt_m.set_ogc_dims(CoordDims.PointM)
test.assert_true(pt_m.has_measure(), "has_measure")
fn test_geometric() raises:
let test = MojoTest("geometric")
fn test_emptyable() raises:
let test = MojoTest("emptyable")
let pt_e = Point.empty()
test.assert_true(is_empty(pt_e.x()), "empty")
test.assert_true(is_empty(pt_e.y()), "empty")
test.assert_true(is_empty(pt_e.z()), "empty")
test.assert_true(is_empty(pt_e.m()), "empty")
test.assert_true(is_empty(pt_e.coords), "empty")
fn test_empty_default_values() raises:
let test = MojoTest("empty default/padding values")
let pt_4 = Point(lon, lat)
let expect_value = empty_value[pt_4.dtype]()
test.assert_true(pt_4.coords[2] == expect_value, "NaN expected")
test.assert_true(pt_4.coords[3] == expect_value, "NaN expected")
let pt_4_int = Point[DType.uint16](lon, lat)
let expect_value_int = empty_value[pt_4_int.dtype]()
test.assert_true(pt_4_int.coords[2] == expect_value_int, "max_finite expected")
test.assert_true(pt_4_int.coords[3] == expect_value_int, "max_finite expected")
fn test_equality_ops() raises:
let test = MojoTest("equality operators")
let p2a = Point(lon, lat)
let p2b = Point(lon, lat)
test.assert_true(p2a == p2b, "__eq__")
let p2i = Point[DType.int16](lon, lat)
let p2ib = Point[DType.int16](lon, lat)
test.assert_true(p2i == p2ib, "__eq__")
let p2ic = Point[DType.int16](lon + 1, lat)
test.assert_true(p2i != p2ic, "__ne_")
let p4 = Point(lon, lat, height, measure)
let p4a = Point(lon, lat, height, measure)
let p4b = Point(lon + 0.001, lat, height, measure)
test.assert_true(p4 == p4a, "__eq__")
test.assert_true(p4 != p4b, "__eq__")
fn test_is_empty() raises:
let test = MojoTest("is_empty")
let pt2 = Point()
test.assert_true(pt2.is_empty(), "is_empty")
let pti = Point[DType.int8]()
test.assert_true(pti.is_empty(), "is_empty")
let pt_z = Point[DType.int8](CoordDims.PointZ)
test.assert_true(pt_z.is_empty(), "is_empty")
let pt_m = Point[DType.int8](CoordDims.PointM)
test.assert_true(pt_m.is_empty(), "is_empty")
let pt_zm = Point[DType.int8](CoordDims.PointZM)
test.assert_true(pt_zm.is_empty(), "is_empty")
fn test_getters() raises:
let test = MojoTest("getters")
let pt2 = Point(lon, lat)
test.assert_true(pt2.x() == lon, "p2.x() == lon")
test.assert_true(pt2.y() == lat, "p2.y() == lat")
let pt_z = Point(lon, lat, height)
test.assert_true(pt_z.x() == lon, "pt_z.x() == lon")
test.assert_true(pt_z.y() == lat, "pt_z.y() == lat")
test.assert_true(pt_z.z() == height, "pt_z.z() == height")
let pt_m = Point(lon, lat, measure)
test.assert_true(pt_m.x() == lon, "pt_m.x() == lon")
test.assert_true(pt_m.y() == lat, "pt_m.y() == lat")
test.assert_true(pt_m.m() == measure, "pt_m.m() == measure")
let point_zm = Point(lon, lat, height, measure)
test.assert_true(point_zm.x() == lon, "point_zm.x() == lon")
test.assert_true(point_zm.y() == lat, "point_zm.y() == lat")
test.assert_true(point_zm.z() == height, "point_zm.z() == height")
test.assert_true(point_zm.m() == measure, "point_zm.m() == measure")
fn test_setters() raises:
let test = MojoTest("setters")
var pt = Point(lon, lat, measure)
pt.set_ogc_dims(CoordDims.PointM)
test.assert_true(pt.ogc_dims == CoordDims.PointM, "set_ogc_dims")
fn test_jsonable() raises:
test_json()
test_from_json()
fn test_json() raises:
let test = MojoTest("json")
let pt2 = Point(lon, lat)
test.assert_true(
pt2.json()
== '{"type":"Point","coordinates":[-108.68000000000001,38.973999999999997]}',
"json()",
)
let pt3 = Point(lon, lat, height)
test.assert_true(
pt3.json()
== '{"type":"Point","coordinates":[-108.68000000000001,38.973999999999997,8.0]}',
"json()",
)
let expect_error = "GeoJSON only allows dimensions X, Y, and optionally Z (RFC 7946)"
var pt_m = Point(lon, lat, measure)
pt_m.set_ogc_dims(CoordDims.PointM)
try:
_ = pt_m.json()
except e:
test.assert_true(str(e) == expect_error, "json raises")
let pt4 = Point(lon, lat, height, measure)
try:
_ = pt4.json()
except e:
test.assert_true(str(e) == expect_error, "json raises")
fn test_from_json() raises:
let test = MojoTest("from_json")
let orjson = Python.import_module("orjson")
let json_str = String('{"type":"Point","coordinates":[102.001, 3.502]}')
let json_dict = orjson.loads(json_str)
let pt2 = Point.from_json(json_dict)
test.assert_true(pt2.x() == 102.001, "pt2.x()")
test.assert_true(pt2.y() == 3.502, "pt2.y()")
let ptz = Point.from_json(json_dict)
test.assert_true(ptz.x() == 102.001, "ptz.x()")
test.assert_true(ptz.y() == 3.502, "ptz.y()")
let pt_f32 = Point[dtype = DType.float32].from_json(json_str)
test.assert_true(pt_f32.x() == 102.001, "pt_f32.x()")
test.assert_true(pt_f32.y() == 3.502, "pt_f32.y()")
let pt_int = Point[dtype = DType.uint8].from_json(json_dict)
test.assert_true(pt_int.x() == 102, "pt_int.x()")
test.assert_true(pt_int.y() == 3, "pt_int.y()")
fn test_wktable() raises:
test_wkt()
test_from_wkt()
fn test_wkt() raises:
let test = MojoTest("wkt")
let pt = Point(lon, lat)
test.assert_true(
pt.wkt() == "Point (-108.68000000000001 38.973999999999997)", "wkt"
)
let pt_z = Point(lon, lat, height)
test.assert_true(
pt_z.wkt() == "Point Z (-108.68000000000001 38.973999999999997 8.0)", "wkt"
)
var pt_m = Point(lon, lat, measure)
pt_m.set_ogc_dims(CoordDims.PointM)
test.assert_true(
pt_m.wkt() == "Point M (-108.68000000000001 38.973999999999997 42.0)", "wkt"
)
let pt_zm = Point(lon, lat, height, measure)
test.assert_true(
pt_zm.wkt() == "Point ZM (-108.68000000000001 38.973999999999997 8.0 42.0)",
"wkt",
)
let p2i = Point[DType.int32](lon, lat)
test.assert_true(p2i.wkt() == "Point (-108 38)", "wkt")
fn test_from_wkt() raises:
let test = MojoTest("from_wkt")
let path = Path("mogeo/test/fixtures/wkt/point/point.wkt")
let wkt: String
with open(path, "rb") as f:
wkt = f.read()
let expect_x = -108.68000000000001
let expect_y = 38.973999999999997
try:
let point_2d = Point.from_wkt(wkt)
test.assert_true(point_2d.x() == expect_x, "point_2d.x()")
test.assert_true(point_2d.y() == expect_y, "point_2d.y()")
let point_3d = Point.from_wkt(wkt)
test.assert_true(
point_3d.__repr__()
== "Point [float64](-108.68000000000001, 38.973999999999997, nan, nan)",
"from_wkt",
)
let point_2d_u8 = Point[DType.uint8].from_wkt(wkt)
test.assert_true(
point_2d_u8.__repr__() == "Point [uint8](148, 38, 255, 255)", "from_wkt())"
)
let point_2d_f32 = Point[DType.float32].from_wkt(wkt)
test.assert_true(
point_2d_f32.__repr__()
== "Point [float32](-108.68000030517578, 38.9739990234375, nan, nan)",
"from_wkt",
)
except:
raise Error(
"from_wkt(): Maybe failed to import_module of shapely? check venv's install"
" packages."
)
fn test_geoarrowable() raises:
# TODO test_geoarrow()
test_from_geoarrow()
# fn test_geoarrow() raises:
# let test = MojoTest("geoarrow")
fn test_from_geoarrow() raises:
let test = MojoTest("from_geoarrow")
let ga = Python.import_module("geoarrow.pyarrow")
let path = Path("mogeo/test/fixtures/geoarrow/geoarrow-data/example")
let empty = empty_value[DType.float64]()
var file = path / "example-point.arrow"
var table = load_geoarrow_test_fixture(file)
var geoarrow = ga.as_geoarrow(table["geometry"])
var chunk = geoarrow[0]
let point_2d = Point.from_geoarrow(table)
let expect_point_2d = Point(
SIMD[point_2d.dtype, point_2d.simd_dims](30.0, 10.0, empty, empty)
)
test.assert_true(point_2d == expect_point_2d, "expect_coords_2d")
file = path / "example-point_z.arrow"
table = load_geoarrow_test_fixture(file)
geoarrow = ga.as_geoarrow(table["geometry"])
chunk = geoarrow[0]
# print(chunk.wkt)
let point_3d = Point.from_geoarrow(table)
let expect_point_3d = Point(
SIMD[point_3d.dtype, point_3d.simd_dims](
30.0, 10.0, 40.0, empty_value[point_3d.dtype]()
)
)
for i in range(3):
# cannot check the nan for equality
test.assert_true(point_3d == expect_point_3d, "expect_point_3d")
file = path / "example-point_zm.arrow"
table = load_geoarrow_test_fixture(file)
geoarrow = ga.as_geoarrow(table["geometry"])
chunk = geoarrow[0]
# print(chunk.wkt)
let point_4d = Point.from_geoarrow(table)
let expect_point_4d = Point(
SIMD[point_4d.dtype, point_4d.simd_dims](30.0, 10.0, 40.0, 300.0)
)
test.assert_true(point_4d == expect_point_4d, "expect_point_4d")
file = path / "example-point_m.arrow"
table = load_geoarrow_test_fixture(file)
geoarrow = ga.as_geoarrow(table["geometry"])
chunk = geoarrow[0]
# print(chunk.wkt)
let point_m = Point.from_geoarrow(table)
let expect_coords_m = SIMD[point_m.dtype, point_m.simd_dims](
30.0, 10.0, 300.0, empty_value[point_m.dtype]()
)
for i in range(3): # cannot equality check the NaN
test.assert_true(point_m.coords[i] == expect_coords_m[i], "expect_coords_m")
test.assert_true(is_empty(point_m.coords[3]), "expect_coords_m")
| mogeo/mogeo/test/geom/test_point.mojo | false |
<filename>mogeo/mogeo/test/geom/__init__.mojo
"""
Tests for mogeo/geom module.
"""
| mogeo/mogeo/test/geom/__init__.mojo | false |
<filename>mojo/examples/deviceinfo.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2023, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
# RUN: %mojo %s | FileCheck %s
# This sample prints the current host system information using APIs from the
# sys module.
from sys.info import (
_current_cpu,
_current_target,
_triple_attr,
)
from sys import (
os_is_linux,
os_is_macos,
os_is_windows,
has_sse4,
has_avx,
has_avx2,
has_avx512f,
has_vnni,
has_intel_amx,
has_neon,
is_apple_m1,
is_apple_m2,
is_apple_m3,
num_physical_cores,
num_logical_cores,
)
def main():
var os = ""
if os_is_linux():
os = "linux"
elif os_is_macos():
os = "macOS"
else:
os = "windows"
var cpu = String(_current_cpu())
var arch = String(_triple_attr())
var cpu_features = String("")
if has_sse4():
cpu_features += " sse4"
if has_avx():
cpu_features += " avx"
if has_avx2():
cpu_features += " avx2"
if has_avx512f():
cpu_features += " avx512f"
if has_vnni():
if has_avx512f():
cpu_features += " avx512_vnni"
else:
cpu_features += " avx_vnni"
if has_intel_amx():
cpu_features += " intel_amx"
if has_neon():
cpu_features += " neon"
if is_apple_m1():
cpu_features += " Apple M1"
if is_apple_m2():
cpu_features += " Apple M2"
if is_apple_m3():
cpu_features += " Apple M3"
print("System information: ")
print(" OS : ", os)
print(" CPU : ", cpu)
print(" Arch : ", arch)
print(" Physical Cores : ", num_physical_cores())
print(" Logical Cores : ", num_logical_cores())
# CHECK: CPU Features
print(" CPU Features :", cpu_features)
| mojo/examples/deviceinfo.mojo | false |
<filename>mojo/examples/hello_interop.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2023, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
# This sample demonstrates some basic Mojo
# range() and print() functions available in the standard library.
# It also demonstrates Python interop by importing the simple_interop.py file.
from python import Python
def main():
print("Hello Mojo 🔥!")
for x in range(9, 0, -3):
print(x)
Python.add_to_path(".")
Python.add_to_path("./examples")
var test_module = Python.import_module("simple_interop")
test_module.test_interop_func()
| mojo/examples/hello_interop.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2023, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
# RUN: %mojo %s | FileCheck %s
import benchmark
from math import iota
from sys import num_physical_cores
from algorithm import parallelize, vectorize
from complex import ComplexFloat64, ComplexSIMD
alias float_type = DType.float32
alias int_type = DType.int32
alias simd_width = 2 * simdwidthof[float_type]()
alias unit = benchmark.Unit.ms
alias cols = 960
alias rows = 960
alias MAX_ITERS = 200
alias min_x = -2.0
alias max_x = 0.6
alias min_y = -1.5
alias max_y = 1.5
struct Matrix[type: DType, rows: Int, cols: Int]:
var data: DTypePointer[type]
fn __init__(inout self):
self.data = DTypePointer[type].alloc(rows * cols)
fn store[nelts: Int](self, row: Int, col: Int, val: SIMD[type, nelts]):
self.data.store[width=nelts](row * cols + col, val)
fn mandelbrot_kernel_SIMD[
simd_width: Int
](c: ComplexSIMD[float_type, simd_width]) -> SIMD[int_type, simd_width]:
"""A vectorized implementation of the inner mandelbrot computation."""
var cx = c.re
var cy = c.im
var x = SIMD[float_type, simd_width](0)
var y = SIMD[float_type, simd_width](0)
var y2 = SIMD[float_type, simd_width](0)
var iters = SIMD[int_type, simd_width](0)
var t: SIMD[DType.bool, simd_width] = True
for _ in range(MAX_ITERS):
if not any(t):
break
y2 = y * y
y = x.fma(y + y, cy)
t = x.fma(x, y2) <= 4
x = x.fma(x, cx - y2)
iters = t.select(iters + 1, iters)
return iters
fn main() raises:
var matrix = Matrix[int_type, rows, cols]()
@parameter
fn worker(row: Int):
var scale_x = (max_x - min_x) / cols
var scale_y = (max_y - min_y) / rows
@parameter
fn compute_vector[simd_width: Int](col: Int):
"""Each time we operate on a `simd_width` vector of pixels."""
var cx = min_x + (col + iota[float_type, simd_width]()) * scale_x
var cy = min_y + row * scale_y
var c = ComplexSIMD[float_type, simd_width](cx, cy)
matrix.store(row, col, mandelbrot_kernel_SIMD(c))
# Vectorize the call to compute_vector with a chunk of pixels.
vectorize[compute_vector, simd_width, size=cols]()
@parameter
fn bench():
for row in range(rows):
worker(row)
@parameter
fn bench_parallel():
parallelize[worker](rows, rows)
print("Number of physical cores:", num_physical_cores())
var vectorized = benchmark.run[bench]().mean(unit)
print("Vectorized:", vectorized, unit)
var parallelized = benchmark.run[bench_parallel]().mean(unit)
print("Parallelized:", parallelized, unit)
# CHECK: Parallel speedup
print("Parallel speedup:", vectorized / parallelized)
matrix.data.free()
| mojo/examples/mandelbrot.mojo | false |
<filename>mojo/examples/matmul.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2023, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
# This sample demonstrates how various systems optimizations can be applied to a
# naive matmul implementation in Mojo to gain significant performance speedups
from random import rand
import benchmark
from algorithm import Static2DTileUnitFunc as Tile2DFunc
from algorithm import parallelize, vectorize
from sys import info
from memory import memset_zero
from python import Python
alias M = 512 # rows of A and C
alias N = 4096 # cols of B and C
alias K = 512 # cols of A and rows of B
alias type = DType.float32
# simdwidth of = amount of `type` elements that fit into a single SIMD register
# 2x multiplier will use multiple SIMD registers in parallel where possible
alias nelts = simdwidthof[type]() * 2
alias tile_n = 64 # N must be a multiple of this
alias tile_k = 4 # K must be a multiple of this
struct Matrix[rows: Int, cols: Int]:
var data: DTypePointer[type]
# Initialize zeroeing all values
fn __init__(inout self):
self.data = DTypePointer[type].alloc(rows * cols)
memset_zero(self.data, rows * cols)
# Initialize taking a pointer, don't set any elements
fn __init__(inout self, data: DTypePointer[type]):
self.data = data
## Initialize with random values
@staticmethod
fn rand() -> Self:
var data = DTypePointer[type].alloc(rows * cols)
rand(data, rows * cols)
return Self(data)
fn __getitem__(self, y: Int, x: Int) -> Scalar[type]:
return self.load[1](y, x)
fn __setitem__(inout self, y: Int, x: Int, val: Scalar[type]):
self.store[1](y, x, val)
fn load[nelts: Int](self, y: Int, x: Int) -> SIMD[type, nelts]:
return self.data.load[width=nelts](y * self.cols + x)
fn store[nelts: Int](self, y: Int, x: Int, val: SIMD[type, nelts]):
return self.data.store[width=nelts](y * self.cols + x, val)
def run_matmul_python() -> Float64:
Python.add_to_path(".")
var pymatmul: PythonObject = Python.import_module("pymatmul")
var py = Python.import_module("builtins")
var gflops = pymatmul.benchmark_matmul_python(128, 128, 128).to_float64()
py.print(py.str("{:<13}{:>8.3f} GFLOPS").format("Python:", gflops))
return gflops
def run_matmul_numpy() -> Float64:
var pymatmul: PythonObject = Python.import_module("pymatmul")
var py = Python.import_module("builtins")
var gflops = pymatmul.benchmark_matmul_numpy(M, N, K).to_float64()
py.print(py.str("{:<13}{:>8.3f} GFLOPS").format("Numpy:", gflops))
return gflops
fn matmul_naive(inout C: Matrix, A: Matrix, B: Matrix):
for m in range(C.rows):
for k in range(A.cols):
for n in range(C.cols):
C[m, n] += A[m, k] * B[k, n]
# Using stdlib vectorize function
fn matmul_vectorized(inout C: Matrix, A: Matrix, B: Matrix):
for m in range(C.rows):
for k in range(A.cols):
@parameter
fn dot[nelts: Int](n: Int):
C.store[nelts](
m, n, C.load[nelts](m, n) + A[m, k] * B.load[nelts](k, n)
)
vectorize[dot, nelts, size = C.cols]()
# Parallelize the code by using the builtin parallelize function
# num_workers is the number of worker threads to use in parallalize
fn matmul_parallelized(inout C: Matrix, A: Matrix, B: Matrix):
var num_workers = C.rows
@parameter
fn calc_row(m: Int):
for k in range(A.cols):
@parameter
fn dot[nelts: Int](n: Int):
C.store[nelts](
m, n, C.load[nelts](m, n) + A[m, k] * B.load[nelts](k, n)
)
vectorize[dot, nelts, size = C.cols]()
parallelize[calc_row](C.rows, num_workers)
# Perform 2D tiling on the iteration space defined by end_x and end_y
fn tile[tiled_fn: Tile2DFunc, tile_x: Int, tile_y: Int](end_x: Int, end_y: Int):
for y in range(0, end_y, tile_y):
for x in range(0, end_x, tile_x):
tiled_fn[tile_x, tile_y](x, y)
# Use the above tile function to perform tiled matmul
# Also parallelize with num_workers threads
fn matmul_tiled(inout C: Matrix, A: Matrix, B: Matrix):
var num_workers = C.rows
@parameter
fn calc_row(m: Int):
@parameter
fn calc_tile[tile_x: Int, tile_y: Int](x: Int, y: Int):
for k in range(y, y + tile_y):
@parameter
fn dot[nelts: Int](n: Int):
C.store(
m,
n + x,
C.load[nelts](m, n + x)
+ A[m, k] * B.load[nelts](k, n + x),
)
vectorize[dot, nelts, size=tile_x]()
tile[calc_tile, tile_n, tile_k](C.cols, B.rows)
parallelize[calc_row](C.rows, num_workers)
# Unroll the vectorized loop by a constant factor
# Also parallelize with num_workers threads
fn matmul_unrolled[mode: Int](inout C: Matrix, A: Matrix, B: Matrix):
var num_workers: Int
if mode == 1:
num_workers = info.num_physical_cores()
elif mode == 2:
num_workers = info.num_logical_cores()
elif mode == 3:
num_workers = info.num_performance_cores()
else:
num_workers = C.rows
@parameter
fn calc_row(m: Int):
@parameter
fn calc_tile[tile_x: Int, tile_y: Int](x: Int, y: Int):
@parameter
for _k in range(tile_y):
var k = _k + y
@parameter
fn dot[nelts: Int](n: Int):
C.store(
m,
n + x,
C.load[nelts](m, n + x)
+ A[m, k] * B.load[nelts](k, n + x),
)
vectorize[
dot, nelts, size=tile_x, unroll_factor = tile_x // nelts
]()
tile[calc_tile, tile_n, tile_k](C.cols, B.rows)
parallelize[calc_row](C.rows, num_workers)
@always_inline
fn bench[
func: fn (inout Matrix, Matrix, Matrix) -> None, name: StringLiteral
](base_gflops: Float64) raises:
var A = Matrix[M, K].rand()
var B = Matrix[K, N].rand()
var C = Matrix[M, N]()
@always_inline
@parameter
fn test_fn():
_ = func(C, A, B)
var secs = benchmark.run[test_fn](max_runtime_secs=0.5).mean()
A.data.free()
B.data.free()
C.data.free()
var gflops = ((2 * M * N * K) / secs) / 1e9
var speedup: Float64 = gflops / base_gflops
var py = Python.import_module("builtins")
_ = py.print(
py.str("{:<13}{:>8.3f} GFLOPS {:>9.2f}x Python").format(
name, gflops, speedup
)
)
@always_inline
fn test_matrix_equal[
func: fn (inout Matrix, Matrix, Matrix) -> None
](inout C: Matrix, A: Matrix, B: Matrix) raises -> Bool:
"""Runs a matmul function on A and B and tests the result for equality with
C on every element.
"""
var result = Matrix[M, N]()
_ = func(result, A, B)
for i in range(C.rows):
for j in range(C.cols):
if C[i, j] != result[i, j]:
return False
return True
fn test_all() raises:
var A = Matrix[M, K].rand()
var B = Matrix[K, N].rand()
var C = Matrix[M, N]()
matmul_naive(C, A, B)
if not test_matrix_equal[matmul_vectorized](C, A, B):
raise Error("Vectorize output does not match naive implementation")
if not test_matrix_equal[matmul_parallelized](C, A, B):
raise Error("Parallelize output does not match naive implementation")
if not test_matrix_equal[matmul_tiled](C, A, B):
raise Error("Tiled output does not match naive implementation")
if not test_matrix_equal[matmul_unrolled[0]](C, A, B):
raise Error("Unroll output does not match naive implementation")
if not test_matrix_equal[matmul_unrolled[1]](
C,
A,
B,
):
raise Error(
"Unroll with workers as physical cores output does not match naive"
" implementation"
)
if not test_matrix_equal[matmul_unrolled[2]](
C,
A,
B,
):
raise Error(
"Unroll with workers as logical cores output does not match naive"
" implementation"
)
if not test_matrix_equal[matmul_unrolled[3]](
C,
A,
B,
):
raise Error(
"Unroll with workers as performance cores output does not match"
" naive implementation"
)
A.data.free()
B.data.free()
C.data.free()
fn main() raises:
constrained[N % tile_n == 0, "N must be a multiple of tile_n"]()
constrained[K % tile_k == 0, "K must be a multiple of tile_k"]()
test_all()
print("CPU Results\n")
var python_gflops = run_matmul_python()
var numpy_gflops = run_matmul_numpy()
bench[matmul_naive, "Naive:"](python_gflops)
bench[matmul_vectorized, "Vectorized: "](python_gflops)
bench[matmul_parallelized, "Parallelized:"](python_gflops)
bench[matmul_tiled, "Tiled:"](python_gflops)
bench[matmul_unrolled[0], "Unrolled:"](python_gflops)
bench[matmul_unrolled[1], "Unrolled w/ workers == physical cores:"](
python_gflops
)
bench[matmul_unrolled[2], "Unrolled w/ workers == logical cores:"](
python_gflops
)
bench[matmul_unrolled[3], "Unrolled w/ workers == performance cores:"](
python_gflops
)
| mojo/examples/matmul.mojo | false |
<filename>mojo/examples/nbody.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2023, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
# RUN: %mojo %s
# This sample implements the nbody benchmarking in
# https://benchmarksgame-team.pages.debian.net/benchmarksgame/performance/nbody.html
from math import sqrt
from benchmark import run, keep
from collections import List
from testing import assert_almost_equal
alias PI = 3.141592653589793
alias SOLAR_MASS = 4 * PI * PI
alias DAYS_PER_YEAR = 365.24
@value
struct Planet:
var pos: SIMD[DType.float64, 4]
var velocity: SIMD[DType.float64, 4]
var mass: Float64
fn __init__(
inout self,
pos: SIMD[DType.float64, 4],
velocity: SIMD[DType.float64, 4],
mass: Float64,
):
self.pos = pos
self.velocity = velocity
self.mass = mass
alias Sun = Planet(
0,
0,
SOLAR_MASS,
)
alias Jupiter = Planet(
SIMD[DType.float64, 4](
4.84143144246472090e00,
-1.16032004402742839e00,
-1.03622044471123109e-01,
0,
),
SIMD[DType.float64, 4](
1.66007664274403694e-03 * DAYS_PER_YEAR,
7.69901118419740425e-03 * DAYS_PER_YEAR,
-6.90460016972063023e-05 * DAYS_PER_YEAR,
0,
),
9.54791938424326609e-04 * SOLAR_MASS,
)
alias Saturn = Planet(
SIMD[DType.float64, 4](
8.34336671824457987e00,
4.12479856412430479e00,
-4.03523417114321381e-01,
0,
),
SIMD[DType.float64, 4](
-2.76742510726862411e-03 * DAYS_PER_YEAR,
4.99852801234917238e-03 * DAYS_PER_YEAR,
2.30417297573763929e-05 * DAYS_PER_YEAR,
0,
),
2.85885980666130812e-04 * SOLAR_MASS,
)
alias Uranus = Planet(
SIMD[DType.float64, 4](
1.28943695621391310e01,
-1.51111514016986312e01,
-2.23307578892655734e-01,
0,
),
SIMD[DType.float64, 4](
2.96460137564761618e-03 * DAYS_PER_YEAR,
2.37847173959480950e-03 * DAYS_PER_YEAR,
-2.96589568540237556e-05 * DAYS_PER_YEAR,
0,
),
4.36624404335156298e-05 * SOLAR_MASS,
)
alias Neptune = Planet(
SIMD[DType.float64, 4](
1.53796971148509165e01,
-2.59193146099879641e01,
1.79258772950371181e-01,
0,
),
SIMD[DType.float64, 4](
2.68067772490389322e-03 * DAYS_PER_YEAR,
1.62824170038242295e-03 * DAYS_PER_YEAR,
-9.51592254519715870e-05 * DAYS_PER_YEAR,
0,
),
5.15138902046611451e-05 * SOLAR_MASS,
)
alias INITIAL_SYSTEM = List[Planet](Sun, Jupiter, Saturn, Uranus, Neptune)
@always_inline
fn offset_momentum(inout bodies: List[Planet]):
var p = SIMD[DType.float64, 4]()
for body in bodies:
p += body[].velocity * body[].mass
var body = bodies[0]
body.velocity = -p / SOLAR_MASS
bodies[0] = body
@always_inline
fn advance(inout bodies: List[Planet], dt: Float64):
for i in range(len(INITIAL_SYSTEM)):
for j in range(len(INITIAL_SYSTEM) - i - 1):
var body_i = bodies[i]
var body_j = bodies[j + i + 1]
var diff = body_i.pos - body_j.pos
var diff_sqr = (diff * diff).reduce_add()
var mag = dt / (diff_sqr * sqrt(diff_sqr))
body_i.velocity -= diff * body_j.mass * mag
body_j.velocity += diff * body_i.mass * mag
bodies[i] = body_i
bodies[j + i + 1] = body_j
for body in bodies:
body[].pos += dt * body[].velocity
@always_inline
fn energy(bodies: List[Planet]) -> Float64:
var e: Float64 = 0
for i in range(len(INITIAL_SYSTEM)):
var body_i = bodies[i]
e += (
0.5
* body_i.mass
* ((body_i.velocity * body_i.velocity).reduce_add())
)
for j in range(len(INITIAL_SYSTEM) - i - 1):
var body_j = bodies[j + i + 1]
var diff = body_i.pos - body_j.pos
var distance = sqrt((diff * diff).reduce_add())
e -= (body_i.mass * body_j.mass) / distance
return e
def run_system():
print("Starting nbody...")
var system = INITIAL_SYSTEM
offset_momentum(system)
print("Energy of System:", energy(system))
for i in range(50_000_000):
advance(system, 0.01)
var system_energy = energy(system)
assert_almost_equal(system_energy, -0.1690599)
print("Energy of System:", system_energy)
def benchmark():
fn benchmark_fn():
var system = INITIAL_SYSTEM
offset_momentum(system)
keep(energy(system))
for i in range(50_000_000):
advance(system, 0.01)
keep(energy(system))
run[benchmark_fn](max_runtime_secs=0.5).print()
def main():
run_system()
| mojo/examples/nbody.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2023, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
# RUN: %mojo %s | FileCheck %s
# This sample implements a simple reduction operation on a
# large array of values to produce a single result.
# Reductions and scans are common algorithm patterns in parallel computing.
from time import now
from algorithm import sum
from benchmark import Unit, benchmark, keep
from buffer import Buffer
from python import Python
from random import rand
# Change these numbers to reduce on different sizes
alias size_small: Int = 1 << 21
alias size_large: Int = 1 << 27
# Datatype for Tensor/Array
alias type = DType.float32
alias scalar = Scalar[type]
# Use the https://en.wikipedia.org/wiki/Kahan_summation_algorithm
# Simple summation of the array elements
fn naive_reduce_sum[size: Int](buffer: Buffer[type, size]) -> scalar:
var my_sum: scalar = 0
var c: scalar = 0
for i in range(buffer.size):
var y = buffer[i] - c
var t = my_sum + y
c = (t - my_sum) - y
my_sum = t
return my_sum
fn stdlib_reduce_sum[size: Int](array: Buffer[type, size]) -> scalar:
var my_sum = sum(array)
return my_sum
def pretty_print(name: String, elements: Int, time: Float64):
py = Python.import_module("builtins")
py.print(
py.str("{:<16} {:>11,} {:>8.2f}ms").format(
name + " elements:", elements, time
)
)
fn bench[
func: fn[size: Int] (buffer: Buffer[type, size]) -> scalar,
size: Int,
name: String,
](buffer: Buffer[type, size]) raises:
@parameter
fn runner():
var result = func[size](buffer)
keep(result)
var ms = benchmark.run[runner](max_runtime_secs=0.5).mean(Unit.ms)
pretty_print(name, size, ms)
fn main() raises:
print(
"Sum all values in a small array and large array\n"
"Shows algorithm.sum from stdlib with much better performance\n"
)
# Allocate and randomize data, then create two buffers
var ptr_small = DTypePointer[type].alloc(size_small)
var ptr_large = DTypePointer[type].alloc(size_large)
rand(ptr_small, size_small)
rand(ptr_large, size_large)
var buffer_small = Buffer[type, size_small](ptr_small)
var buffer_large = Buffer[type, size_large](ptr_large)
bench[naive_reduce_sum, size_small, "naive"](buffer_small)
bench[naive_reduce_sum, size_large, "naive"](buffer_large)
bench[stdlib_reduce_sum, size_small, "stdlib"](buffer_small)
# CHECK: stdlib elements
bench[stdlib_reduce_sum, size_large, "stdlib"](buffer_large)
ptr_small.free()
ptr_large.free()
| mojo/examples/reduce.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
import sys
from pathlib import Path
# We can't check much more than this at the moment, because the license year
# changes and the language is not mature enough to do regex yet.
var LICENSE = String(
"""
# ===----------------------------------------------------------------------=== #
# Copyright (c)
"""
).strip()
def main():
target_paths = sys.argv()
if len(target_paths) < 2:
raise Error("A file path must be given as a command line argument.")
files_without_license = List[Path]()
for i in range(len(target_paths)):
if i == 0:
# this is the current file
continue
file_path = Path(target_paths[i])
if not file_path.read_text().startswith(LICENSE):
files_without_license.append(file_path)
if len(files_without_license) > 0:
print("The following files have missing licences 💥 💔 💥")
for file in files_without_license:
print(file[])
print("Please add the license to each file before committing.")
sys.exit(1)
| mojo/stdlib/scripts/check-licenses.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
| mojo/stdlib/src/__init__.mojo | false |
<filename>mojo/stdlib/src/base64/base64.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Provides functions for base64 encoding strings.
You can import these APIs from the `base64` package. For example:
```mojo
from base64 import b64encode
```
"""
from collections import List
from sys import simdwidthof
# ===----------------------------------------------------------------------===#
# Utilities
# ===----------------------------------------------------------------------===#
@always_inline
fn _ascii_to_value(char: String) -> Int:
"""Converts an ASCII character to its integer value for base64 decoding.
Args:
char: A single character string.
Returns:
The integer value of the character for base64 decoding, or -1 if invalid.
"""
var char_val = ord(char)
if char == "=":
return 0
elif ord("A") <= char_val <= ord("Z"):
return char_val - ord("A")
elif ord("a") <= char_val <= ord("z"):
return char_val - ord("a") + 26
elif ord("0") <= char_val <= ord("9"):
return char_val - ord("0") + 52
elif char == "+":
return 62
elif char == "/":
return 63
else:
return -1
# ===----------------------------------------------------------------------===#
# b64encode
# ===----------------------------------------------------------------------===#
fn b64encode(str: String) -> String:
"""Performs base64 encoding on the input string.
Args:
str: The input string.
Returns:
Base64 encoding of the input string.
"""
alias lookup = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
var b64chars = lookup.as_uint8_ptr()
var length = len(str)
var out = String._buffer_type(capacity=length + 1)
@parameter
@always_inline
fn s(idx: Int) -> Int:
# TODO: Remove cast once transition to UInt8 string types is complete.
return int(str.unsafe_ptr().bitcast[UInt8]()[idx])
# This algorithm is based on https://arxiv.org/abs/1704.00605
var end = length - (length % 3)
for i in range(0, end, 3):
var si = s(i)
var si_1 = s(i + 1)
var si_2 = s(i + 2)
out.append(b64chars[si // 4])
out.append(b64chars[((si * 16) % 64) + si_1 // 16])
out.append(b64chars[((si_1 * 4) % 64) + si_2 // 64])
out.append(b64chars[si_2 % 64])
if end < length:
var si = s(end)
out.append(b64chars[si // 4])
if end == length - 1:
out.append(b64chars[(si * 16) % 64])
out.append(ord("="))
elif end == length - 2:
var si_1 = s(end + 1)
out.append(b64chars[((si * 16) % 64) + si_1 // 16])
out.append(b64chars[(si_1 * 4) % 64])
out.append(ord("="))
out.append(0)
return String(out^)
# ===----------------------------------------------------------------------===#
# b64decode
# ===----------------------------------------------------------------------===#
@always_inline
fn b64decode(str: String) -> String:
"""Performs base64 decoding on the input string.
Args:
str: A base64 encoded string.
Returns:
The decoded string.
"""
var n = len(str)
debug_assert(n % 4 == 0, "Input length must be divisible by 4")
var p = String._buffer_type(capacity=n + 1)
# This algorithm is based on https://arxiv.org/abs/1704.00605
for i in range(0, n, 4):
var a = _ascii_to_value(str[i])
var b = _ascii_to_value(str[i + 1])
var c = _ascii_to_value(str[i + 2])
var d = _ascii_to_value(str[i + 3])
debug_assert(
a >= 0 and b >= 0 and c >= 0 and d >= 0,
"Unexpected character encountered",
)
p.append((a << 2) | (b >> 4))
if str[i + 2] == "=":
break
p.append(((b & 0x0F) << 4) | (c >> 2))
if str[i + 3] == "=":
break
p.append(((c & 0x03) << 6) | d)
p.append(0)
return p
# ===----------------------------------------------------------------------===#
# b16encode
# ===----------------------------------------------------------------------===#
fn b16encode(str: String) -> String:
"""Performs base16 encoding on the input string.
Args:
str: The input string.
Returns:
Base16 encoding of the input string.
"""
alias lookup = "0123456789ABCDEF"
var b16chars = lookup.as_uint8_ptr()
var length = len(str)
var out = List[UInt8](capacity=length * 2 + 1)
@parameter
@always_inline
fn str_bytes(idx: UInt8) -> UInt8:
return str._buffer[int(idx)]
for i in range(length):
var str_byte = str_bytes(i)
var hi = str_byte >> 4
var lo = str_byte & 0b1111
out.append(b16chars[int(hi)])
out.append(b16chars[int(lo)])
out.append(0)
return String(out^)
# ===----------------------------------------------------------------------===#
# b16decode
# ===----------------------------------------------------------------------===#
@always_inline
fn b16decode(str: String) -> String:
"""Performs base16 decoding on the input string.
Args:
str: A base16 encoded string.
Returns:
The decoded string.
"""
# TODO: Replace with dict literal when possible
@parameter
@always_inline
fn decode(c: String) -> Int:
var char_val = ord(c)
if ord("A") <= char_val <= ord("Z"):
return char_val - ord("A") + 10
elif ord("a") <= char_val <= ord("z"):
return char_val - ord("a") + 10
elif ord("0") <= char_val <= ord("9"):
return char_val - ord("0")
return -1
var n = len(str)
debug_assert(n % 2 == 0, "Input length must be divisible by 2")
var p = List[UInt8](capacity=int(n / 2) + 1)
for i in range(0, n, 2):
var hi = str[i]
var lo = str[i + 1]
p.append(decode(hi) << 4 | decode(lo))
p.append(0)
return p
| mojo/stdlib/src/base64/base64.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the base64 package."""
from .base64 import b64encode, b64decode, b16encode, b16decode
| mojo/stdlib/src/base64/__init__.mojo | false |
<filename>mojo/stdlib/src/bit/bit.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Provides functions for bit manipulation.
You can import these APIs from the `bit` package. For example:
```mojo
from bit import countl_zero
```
"""
from sys import llvm_intrinsic
from sys.info import bitwidthof
# ===----------------------------------------------------------------------===#
# countl_zero
# ===----------------------------------------------------------------------===#
@always_inline("nodebug")
fn countl_zero(val: Int) -> Int:
"""Counts the number of leading zeros of an integer.
Args:
val: The input value.
Returns:
The number of leading zeros of the input.
"""
return llvm_intrinsic["llvm.ctlz", Int, has_side_effect=False](val, False)
@always_inline("nodebug")
fn countl_zero[
type: DType, simd_width: Int
](val: SIMD[type, simd_width]) -> SIMD[type, simd_width]:
"""Counts the per-element number of leading zeros in a SIMD vector.
Parameters:
type: `DType` used for the computation.
simd_width: SIMD width used for the computation.
Constraints:
The element type of the input vector must be integral.
Args:
val: The input value.
Returns:
A SIMD value where the element at position `i` contains the number of
leading zeros at position `i` of the input value.
"""
constrained[type.is_integral(), "must be integral"]()
return llvm_intrinsic["llvm.ctlz", __type_of(val), has_side_effect=False](
val, False
)
# ===----------------------------------------------------------------------===#
# countr_zero
# ===----------------------------------------------------------------------===#
@always_inline("nodebug")
fn countr_zero(val: Int) -> Int:
"""Counts the number of trailing zeros for an integer.
Args:
val: The input value.
Returns:
The number of trailing zeros of the input.
"""
return llvm_intrinsic["llvm.cttz", Int, has_side_effect=False](val, False)
@always_inline("nodebug")
fn countr_zero[
type: DType, simd_width: Int
](val: SIMD[type, simd_width]) -> SIMD[type, simd_width]:
"""Counts the per-element number of trailing zeros in a SIMD vector.
Parameters:
type: `dtype` used for the computation.
simd_width: SIMD width used for the computation.
Constraints:
The element type of the input vector must be integral.
Args:
val: The input value.
Returns:
A SIMD value where the element at position `i` contains the number of
trailing zeros at position `i` of the input value.
"""
constrained[type.is_integral(), "must be integral"]()
return llvm_intrinsic["llvm.cttz", __type_of(val), has_side_effect=False](
val, False
)
# ===----------------------------------------------------------------------===#
# bit_reverse
# ===----------------------------------------------------------------------===#
# TODO: implement bit_reverse for Int type
@always_inline("nodebug")
fn bit_reverse[
type: DType, simd_width: Int
](val: SIMD[type, simd_width]) -> SIMD[type, simd_width]:
"""Element-wise reverses the bitpattern of a SIMD vector of integer values.
Parameters:
type: `dtype` used for the computation.
simd_width: SIMD width used for the computation.
Args:
val: The input value.
Constraints:
The element type of the input vector must be integral.
Returns:
A SIMD value where the element at position `i` has a reversed bitpattern
of an integer value of the element at position `i` of the input value.
"""
constrained[type.is_integral(), "must be integral"]()
return llvm_intrinsic[
"llvm.bitreverse", __type_of(val), has_side_effect=False
](val)
# ===----------------------------------------------------------------------===#
# byte_swap
# ===----------------------------------------------------------------------===#
# TODO: implement byte_swap for Int type
@always_inline("nodebug")
fn byte_swap[
type: DType, simd_width: Int
](val: SIMD[type, simd_width]) -> SIMD[type, simd_width]:
"""Byte-swaps a SIMD vector of integer values with an even number of bytes.
Byte swap an integer value or vector of integer values with an even number
of bytes (positive multiple of 16 bits). This is equivalent to `llvm.bswap`
intrinsic that has the following semantics:
The `llvm.bswap.i16` intrinsic returns an i16 value that has the high and
low byte of the input i16 swapped. Similarly, the `llvm.bswap.i32` intrinsic
returns an i32 value that has the four bytes of the input i32 swapped, so
that if the input bytes are numbered 0, 1, 2, 3 then the returned i32 will
have its bytes in 3, 2, 1, 0 order. The `llvm.bswap.i48`, `llvm.bswap.i64`
and other intrinsics extend this concept to additional even-byte lengths (6
bytes, 8 bytes and more, respectively).
Parameters:
type: `dtype` used for the computation.
simd_width: SIMD width used for the computation.
Constraints:
The element type of the input vector must be an integral type with an
even number of bytes (Bitwidth % 16 == 0).
Args:
val: The input value.
Returns:
A SIMD value where the element at position `i` is the value of the
element at position `i` of the input value with its bytes swapped.
"""
constrained[type.is_integral(), "must be integral"]()
return llvm_intrinsic["llvm.bswap", __type_of(val), has_side_effect=False](
val
)
# ===----------------------------------------------------------------------===#
# pop_count
# ===----------------------------------------------------------------------===#
# TODO: implement pop_count for Int type
@always_inline("nodebug")
fn pop_count[
type: DType, simd_width: Int
](val: SIMD[type, simd_width]) -> SIMD[type, simd_width]:
"""Counts the number of bits set in a SIMD vector of integer values.
Parameters:
type: `dtype` used for the computation.
simd_width: SIMD width used for the computation.
Constraints:
The element type of the input vector must be integral.
Args:
val: The input value.
Returns:
A SIMD value where the element at position `i` contains the number of
bits set in the element at position `i` of the input value.
"""
constrained[type.is_integral(), "must be integral"]()
return llvm_intrinsic["llvm.ctpop", __type_of(val), has_side_effect=False](
val
)
# ===----------------------------------------------------------------------===#
# bit_not
# ===----------------------------------------------------------------------===#
# TODO: implement bit_not for Int type
@always_inline("nodebug")
fn bit_not[
type: DType, simd_width: Int
](val: SIMD[type, simd_width]) -> SIMD[type, simd_width]:
"""Performs a bitwise NOT operation on an SIMD vector of integer values.
Parameters:
type: `dtype` used for the computation.
simd_width: SIMD width used for the computation.
Constraints:
The element type of the input vector must be integral.
Args:
val: The input value.
Returns:
A SIMD value where the element at position `i` is computed as a bitwise
NOT of the integer value at position `i` of the input value.
"""
constrained[type.is_integral(), "must be integral"]()
var neg_one = SIMD[type, simd_width].splat(-1)
return __mlir_op.`pop.xor`(val.value, neg_one.value)
# ===----------------------------------------------------------------------===#
# bit_width
# ===----------------------------------------------------------------------===#
@always_inline
fn bit_width(val: Int) -> Int:
"""Computes the minimum number of bits required to represent the integer.
Args:
val: The input value.
Returns:
The number of bits required to represent the integer.
"""
alias bitwidth = bitwidthof[Int]()
return bitwidth - countl_zero(~val if val < 0 else val)
@always_inline
fn bit_width[
type: DType, simd_width: Int
](val: SIMD[type, simd_width]) -> SIMD[type, simd_width]:
"""Computes the minimum number of bits required to represent the SIMD vector
of integer values.
Parameters:
type: `dtype` used for the computation.
simd_width: SIMD width used for the computation.
Constraints:
The element type of the input vector must be integral.
Args:
val: The input value.
Returns:
A SIMD value where the element at position `i` equals to the number of
bits required to represent the integer at position `i` of the input
value.
"""
constrained[type.is_integral(), "must be integral"]()
alias bitwidth = bitwidthof[type]()
@parameter
if type.is_unsigned():
return bitwidth - countl_zero(val)
else:
var leading_zero_pos = countl_zero(val)
var leading_zero_neg = countl_zero(bit_not(val))
var leading_zero = (val < 0).select(leading_zero_neg, leading_zero_pos)
return bitwidth - leading_zero
# ===----------------------------------------------------------------------===#
# is_power_of_two
# ===----------------------------------------------------------------------===#
# reference: https://en.cppreference.com/w/cpp/numeric/has_single_bit
@always_inline
fn is_power_of_two(val: Int) -> Bool:
"""Checks if the input value is a power of 2.
Args:
val: The input value.
Returns:
True if the input value is a power of 2, False otherwise.
"""
return (val != 0) & (val & (val - 1) == 0)
@always_inline
fn is_power_of_two[
type: DType, simd_width: Int
](val: SIMD[type, simd_width]) -> SIMD[DType.bool, simd_width]:
"""Checks if the input value is a power of 2 for each element of a SIMD vector.
Parameters:
type: `dtype` used for the computation.
simd_width: SIMD width used for the computation.
Constraints:
The element type of the input vector must be integral.
Args:
val: The input value.
Returns:
A SIMD value where the element at position `i` is True if the integer at
position `i` of the input value is a power of 2, False otherwise.
"""
constrained[type.is_integral(), "must be integral"]()
return (val != 0) & (val & (val - 1) == 0)
# ===----------------------------------------------------------------------===#
# bit_ceil
# ===----------------------------------------------------------------------===#
# reference: https://en.cppreference.com/w/cpp/numeric/bit_ceil
@always_inline("nodebug")
fn bit_ceil(val: Int) -> Int:
"""Computes the smallest power of 2 that is greater than or equal to the
input value. Any integral value less than or equal to 1 will be ceiled to 1.
Args:
val: The input value.
Returns:
The smallest power of 2 that is greater than or equal to the input value.
"""
if val <= 1:
return 1
if is_power_of_two(val):
return val
return 1 << bit_width(val - 1)
@always_inline("nodebug")
fn bit_ceil[
type: DType, simd_width: Int
](val: SIMD[type, simd_width]) -> SIMD[type, simd_width]:
"""Computes the smallest power of 2 that is greater than or equal to the
input value for each element of a SIMD vector. Any integral value less than
or equal to 1 will be ceiled to 1.
Parameters:
type: `dtype` used for the computation.
simd_width: SIMD width used for the computation.
Constraints:
The element type of the input vector must be integral.
Args:
val: The input value.
Returns:
A SIMD value where the element at position `i` is the smallest power of 2
that is greater than or equal to the integer at position `i` of the input
value.
"""
constrained[type.is_integral(), "must be integral"]()
alias ones = SIMD[type, simd_width].splat(1)
return (val > 1).select(1 << bit_width(val - ones), ones)
# ===----------------------------------------------------------------------===#
# bit_floor
# ===----------------------------------------------------------------------===#
# reference: https://en.cppreference.com/w/cpp/numeric/bit_floor
@always_inline("nodebug")
fn bit_floor(val: Int) -> Int:
"""Computes the largest power of 2 that is less than or equal to the input
value. Any integral value less than or equal to 0 will be floored to 0.
Args:
val: The input value.
Returns:
The largest power of 2 that is less than or equal to the input value.
"""
if val <= 0:
return 0
return 1 << (bit_width(val) - 1)
@always_inline("nodebug")
fn bit_floor[
type: DType, simd_width: Int
](val: SIMD[type, simd_width]) -> SIMD[type, simd_width]:
"""Computes the largest power of 2 that is less than or equal to the input
value for each element of a SIMD vector. Any integral value less than or
equal to 0 will be floored to 0.
Parameters:
type: `dtype` used for the computation.
simd_width: SIMD width used for the computation.
Constraints:
The element type of the input vector must be integral.
Args:
val: The input value.
Returns:
A SIMD value where the element at position `i` is the largest power of 2
that is less than or equal to the integer at position `i` of the input
value.
"""
constrained[type.is_integral(), "must be integral and unsigned"]()
alias zeros = SIMD[type, simd_width].splat(0)
return (val > 0).select(1 << (bit_width(val) - 1), zeros)
# ===----------------------------------------------------------------------===#
# rotate_bits_left
# ===----------------------------------------------------------------------===#
@always_inline
fn rotate_bits_left[shift: Int](x: Int) -> Int:
"""Shifts the bits of an input to the left by `shift` bits (with
wrap-around).
Constraints:
`-size <= shift < size`
Parameters:
shift: The number of bit positions by which to rotate the bits of the
integer to the left (with wrap-around).
Args:
x: The input value.
Returns:
The input rotated to the left by `shift` elements (with wrap-around).
"""
constrained[
shift >= -sizeof[Int]() and shift < sizeof[Int](),
"Constraints: -sizeof[Int]() <= shift < sizeof[Int]()",
]()
@parameter
if shift == 0:
return x
elif shift < 0:
return rotate_bits_right[-shift](x)
else:
return llvm_intrinsic["llvm.fshl", Int, has_side_effect=False](
x, x, shift
)
fn rotate_bits_left[
shift: Int, type: DType, width: Int
](x: SIMD[type, width]) -> SIMD[type, width]:
"""Shifts bits to the left by `shift` positions (with wrap-around) for each
element of a SIMD vector.
Constraints:
`0 <= shift < size`
Parameters:
shift: The number of positions by which to shift left the bits for each
element of a SIMD vector to the left (with wrap-around).
type: The `dtype` of the input and output SIMD vector.
Constraints: must be integral and unsigned.
width: The width of the input and output SIMD vector.
Args:
x: SIMD vector to perform the operation on.
Returns:
The SIMD vector with each element's bits shifted to the left by `shift`
bits (with wrap-around).
"""
constrained[type.is_unsigned(), "Only unsigned types can be rotated."]()
@parameter
if shift == 0:
return x
elif shift < 0:
return rotate_bits_right[-shift, type, width](x)
else:
return llvm_intrinsic["llvm.fshl", __type_of(x), has_side_effect=False](
x, x, SIMD[type, width](shift)
)
# ===----------------------------------------------------------------------===#
# rotate_bits_right
# ===----------------------------------------------------------------------===#
@always_inline
fn rotate_bits_right[shift: Int](x: Int) -> Int:
"""Shifts the bits of an input to the right by `shift` bits (with
wrap-around).
Constraints:
`-size <= shift < size`
Parameters:
shift: The number of bit positions by which to rotate the bits of the
integer to the right (with wrap-around).
Args:
x: The input value.
Returns:
The input rotated to the right by `shift` elements (with wrap-around).
"""
constrained[
shift >= -sizeof[Int]() and shift < sizeof[Int](),
"Constraints: -sizeof[Int]() <= shift < sizeof[Int]()",
]()
@parameter
if shift == 0:
return x
elif shift < 0:
return rotate_bits_left[-shift](x)
else:
return llvm_intrinsic["llvm.fshr", Int, has_side_effect=False](
x, x, shift
)
fn rotate_bits_right[
shift: Int,
type: DType,
width: Int,
](x: SIMD[type, width]) -> SIMD[type, width]:
"""Shifts bits to the right by `shift` positions (with wrap-around) for each
element of a SIMD vector.
Constraints:
`0 <= shift < size`
Parameters:
shift: The number of positions by which to shift right the bits for each
element of a SIMD vector to the left (with wrap-around).
type: The `dtype` of the input and output SIMD vector.
Constraints: must be integral and unsigned.
width: The width of the input and output SIMD vector.
Args:
x: SIMD vector to perform the operation on.
Returns:
The SIMD vector with each element's bits shifted to the right by `shift`
bits (with wrap-around).
"""
constrained[type.is_unsigned(), "Only unsigned types can be rotated."]()
@parameter
if shift == 0:
return x
elif shift < 0:
return rotate_bits_left[-shift, type, width](x)
else:
return llvm_intrinsic["llvm.fshr", __type_of(x), has_side_effect=False](
x, x, SIMD[type, width](shift)
)
| mojo/stdlib/src/bit/bit.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the bit package."""
from .bit import (
countl_zero,
countr_zero,
bit_reverse,
byte_swap,
pop_count,
bit_not,
bit_width,
rotate_bits_left,
rotate_bits_right,
is_power_of_two,
bit_ceil,
bit_floor,
)
| mojo/stdlib/src/bit/__init__.mojo | false |
<filename>mojo/stdlib/src/builtin/anytype.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Defines the `AnyType` trait.
These are Mojo built-ins, so you don't need to import them.
"""
# ===----------------------------------------------------------------------=== #
# AnyType
# ===----------------------------------------------------------------------=== #
trait AnyType:
"""The AnyType trait describes a type that has a destructor.
In Mojo, a type that provide a destructor indicates to the language that it
is an object with a lifetime whose destructor needs to be called whenever
an instance of the object reaches the end of its lifetime. Hence, only
non-trivial types may have destructors.
Any composition of types that have lifetimes is also an object with a
lifetime, and the resultant type receives a destructor regardless of whether
the user explicitly defines one.
All types pessimistically require a destructor when used in generic
functions. Hence, all Mojo traits are considered to inherit from
AnyType, providing a default no-op destructor implementation for types
that may need them.
Example implementing the `AnyType` trait on `Foo` that frees the
allocated memory:
```mojo
@value
struct Foo(AnyType):
var p: UnsafePointer[Int]
var size: Int
fn __init__(inout self, size: Int):
self.p = UnsafePointer[Int].alloc(size)
self.size = size
fn __del__(owned self):
print("--freeing allocated memory--")
self.p.free()
```
"""
fn __del__(owned self, /):
"""Destroy the contained value.
The destructor receives an owned value and is expected to perform any
actions needed to end the lifetime of the object. In the simplest case,
this is nothing, and the language treats the object as being dead at the
end of this function.
"""
...
| mojo/stdlib/src/builtin/anytype.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the `bin()` function.
These are Mojo built-ins, so you don't need to import them.
"""
# Need this until we have constraints to stop the compiler from matching this
# directly to bin[type: DType](num: Scalar[type]).
@always_inline("nodebug")
fn bin(b: Scalar[DType.bool], /) -> String:
"""Returns the binary representation of a scalar bool.
Args:
b: A scalar bool value.
Returns:
The binary string representation of b.
"""
return bin(int(b))
fn bin[type: DType](num: Scalar[type], /) -> String:
"""Return the binary string representation an integral value.
```mojo
print(bin(123))
print(bin(-123))
```
```plaintext
'0b1111011'
'-0b1111011'
```
Parameters:
type: The data type of the integral scalar.
Args:
num: An integral scalar value.
Returns:
The binary string representation of num.
"""
constrained[type.is_integral(), "Expected integral value"]()
alias BIN_PREFIX = "0b"
if num == 0:
return BIN_PREFIX + "0"
# TODD: pre-allocate string size when #2194 is resolved
var result = String()
var cpy = abs(num)
while cpy > 0:
result += str(cpy & 1)
cpy = cpy >> 1
result = BIN_PREFIX + result[::-1]
return "-" + result if num < 0 else result
@always_inline("nodebug")
fn bin[T: Indexer](num: T, /) -> String:
"""Returns the binary representation of an indexer type.
Parameters:
T: The Indexer type.
Args:
num: An indexer value.
Returns:
The binary string representation of num.
"""
return bin(Scalar[DType.index](index(num)))
| mojo/stdlib/src/builtin/bin.mojo | false |
<filename>mojo/stdlib/src/builtin/bool.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the Bool class.
These are Mojo built-ins, so you don't need to import them.
"""
from utils._visualizers import lldb_formatter_wrapping_type
from collections import Set
# ===----------------------------------------------------------------------=== #
# Boolable
# ===----------------------------------------------------------------------=== #
trait Boolable:
"""The `Boolable` trait describes a type that can be converted to a bool.
This trait requires the type to implement the `__bool__()` method. For
example:
```mojo
@value
struct Foo(Boolable):
var val: Bool
fn __bool__(self) -> Bool:
return self.val
```
"""
fn __bool__(self) -> Bool:
"""Get the boolean representation of the value.
Returns:
The boolean representation of the value.
"""
...
# ===----------------------------------------------------------------------=== #
# Bool
# ===----------------------------------------------------------------------=== #
@lldb_formatter_wrapping_type
@value
@register_passable("trivial")
struct Bool(
Stringable,
ComparableCollectionElement,
Boolable,
Intable,
Indexer,
):
"""The primitive Bool scalar value used in Mojo."""
var value: __mlir_type.i1
"""The underlying storage of the boolean value."""
@always_inline("nodebug")
fn __init__(value: __mlir_type.i1) -> Bool:
"""Construct a Bool value given a __mlir_type.i1 value.
Args:
value: The initial __mlir_type.i1 value.
Returns:
The constructed Bool value.
"""
return Self {value: value}
@always_inline("nodebug")
fn __init__(value: __mlir_type.`!pop.scalar<bool>`) -> Bool:
"""Construct a Bool value given a `!pop.scalar<bool>` value.
Args:
value: The initial value.
Returns:
The constructed Bool value.
"""
return __mlir_op.`pop.cast_to_builtin`[_type = __mlir_type.i1](value)
@always_inline("nodebug")
fn __init__[boolable: Boolable](value: boolable) -> Bool:
"""Implicitly convert a Boolable value to a Bool.
Parameters:
boolable: The Boolable type.
Args:
value: The boolable value.
Returns:
The constructed Bool value.
"""
return value.__bool__()
@always_inline("nodebug")
fn __bool__(self) -> Bool:
"""Convert to Bool.
Returns:
This value.
"""
return self
@always_inline("nodebug")
fn __mlir_i1__(self) -> __mlir_type.i1:
"""Convert this Bool to __mlir_type.i1.
This method is a special hook used by the compiler to test boolean
objects in control flow conditions. It should be implemented by Bool
but not other general boolean convertible types (they should implement
`__bool__` instead).
Returns:
The underlying value for the Bool.
"""
return self.value
@always_inline("nodebug")
fn _as_scalar_bool(self) -> __mlir_type.`!pop.scalar<bool>`:
return __mlir_op.`pop.cast_from_builtin`[
_type = __mlir_type.`!pop.scalar<bool>`
](self.value)
fn __str__(self) -> String:
"""Get the bool as a string.
Returns:
A string representation.
"""
return "True" if self else "False"
@always_inline("nodebug")
fn __int__(self) -> Int:
"""Convert this Bool to an integer.
Returns:
1 if the Bool is True, 0 otherwise.
"""
return __mlir_op.`pop.select`[_type=Int](self.value, Int(1), Int(0))
@always_inline("nodebug")
fn __index__(self) -> Int:
"""Convert this Bool to an integer for indexing purposes.
Returns:
1 if the Bool is True, 0 otherwise.
"""
return self.__int__()
@always_inline("nodebug")
fn __eq__(self, rhs: Bool) -> Bool:
"""Compare this Bool to RHS.
Performs an equality comparison between the Bool value and the argument.
This method gets invoked when a user uses the `==` infix operator.
Args:
rhs: The rhs value of the equality statement.
Returns:
True if the two values match and False otherwise.
"""
return __mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred eq>`](
self._as_scalar_bool(), rhs._as_scalar_bool()
)
@always_inline("nodebug")
fn __ne__(self, rhs: Bool) -> Bool:
"""Compare this Bool to RHS.
Performs a non-equality comparison between the Bool value and the
argument. This method gets invoked when a user uses the `!=` infix
operator.
Args:
rhs: The rhs value of the non-equality statement.
Returns:
False if the two values do match and True otherwise.
"""
return __mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred ne>`](
self._as_scalar_bool(), rhs._as_scalar_bool()
)
@always_inline("nodebug")
fn __lt__(self, rhs: Self) -> Bool:
"""Compare this Bool to RHS using less-than comparison.
Args:
rhs: The rhs of the operation.
Returns:
True if self is False and rhs is True.
"""
return __mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred lt>`](
self._as_scalar_bool(), rhs._as_scalar_bool()
)
@always_inline("nodebug")
fn __le__(self, rhs: Self) -> Bool:
"""Compare this Bool to RHS using less-than-or-equal comparison.
Args:
rhs: The rhs of the operation.
Returns:
True if self is False and rhs is True or False.
"""
return __mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred le>`](
self._as_scalar_bool(), rhs._as_scalar_bool()
)
@always_inline("nodebug")
fn __gt__(self, rhs: Self) -> Bool:
"""Compare this Bool to RHS using greater-than comparison.
Args:
rhs: The rhs of the operation.
Returns:
True if self is True and rhs is False.
"""
return __mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred gt>`](
self._as_scalar_bool(), rhs._as_scalar_bool()
)
@always_inline("nodebug")
fn __ge__(self, rhs: Self) -> Bool:
"""Compare this Bool to RHS using greater-than-or-equal comparison.
Args:
rhs: The rhs of the operation.
Returns:
True if self is True and rhs is True or False.
"""
return __mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred ge>`](
self._as_scalar_bool(), rhs._as_scalar_bool()
)
# ===-------------------------------------------------------------------===#
# Bitwise operations
# ===-------------------------------------------------------------------===#
@always_inline("nodebug")
fn __invert__(self) -> Bool:
"""Inverts the Bool value.
Returns:
True if the object is false and False otherwise.
"""
var true = __mlir_op.`kgen.param.constant`[
_type = __mlir_type.`!pop.scalar<bool>`,
value = __mlir_attr.`#pop.simd<true> : !pop.scalar<bool>`,
]()
return __mlir_op.`pop.xor`(self._as_scalar_bool(), true)
@always_inline("nodebug")
fn __and__(self, rhs: Bool) -> Bool:
"""Returns `self & rhs`.
Bitwise and's the Bool value with the argument. This method gets invoked
when a user uses the `and` infix operator.
Args:
rhs: The right hand side of the `and` statement.
Returns:
`self & rhs`.
"""
return __mlir_op.`pop.and`(
self._as_scalar_bool(), rhs._as_scalar_bool()
)
@always_inline("nodebug")
fn __iand__(inout self, rhs: Bool):
"""Computes `self & rhs` and store the result in `self`.
Args:
rhs: The right hand side of the `and` statement.
"""
self = self & rhs
@always_inline("nodebug")
fn __rand__(self, lhs: Bool) -> Bool:
"""Returns `lhs & self`.
Args:
lhs: The left hand side of the `and` statement.
Returns:
`lhs & self`.
"""
return lhs & self
@always_inline("nodebug")
fn __or__(self, rhs: Bool) -> Bool:
"""Returns `self | rhs`.
Bitwise or's the Bool value with the argument. This method gets invoked
when a user uses the `or` infix operator.
Args:
rhs: The right hand side of the `or` statement.
Returns:
`self | rhs`.
"""
return __mlir_op.`pop.or`(self._as_scalar_bool(), rhs._as_scalar_bool())
@always_inline("nodebug")
fn __ior__(inout self, rhs: Bool):
"""Computes `self | rhs` and store the result in `self`.
Args:
rhs: The right hand side of the `or` statement.
"""
self = self | rhs
@always_inline("nodebug")
fn __ror__(self, lhs: Bool) -> Bool:
"""Returns `lhs | self`.
Args:
lhs: The left hand side of the `or` statement.
Returns:
`lhs | self`.
"""
return lhs | self
@always_inline("nodebug")
fn __xor__(self, rhs: Bool) -> Bool:
"""Returns `self ^ rhs`.
Bitwise Xor's the Bool value with the argument. This method gets invoked
when a user uses the `^` infix operator.
Args:
rhs: The right hand side of the `xor` statement.
Returns:
`self ^ rhs`.
"""
return __mlir_op.`pop.xor`(
self._as_scalar_bool(), rhs._as_scalar_bool()
)
@always_inline("nodebug")
fn __ixor__(inout self, rhs: Bool):
"""Computes `self ^ rhs` and stores the result in `self`.
Args:
rhs: The right hand side of the `xor` statement.
"""
self = self ^ rhs
@always_inline("nodebug")
fn __rxor__(self, lhs: Bool) -> Bool:
"""Returns `lhs ^ self`.
Args:
lhs: The left hand side of the `xor` statement.
Returns:
`lhs ^ self`.
"""
return lhs ^ self
@always_inline("nodebug")
fn __neg__(self) -> Int:
"""Defines the unary `-` operation.
Returns:
0 for -False and -1 for -True.
"""
return __mlir_op.`index.casts`[_type = __mlir_type.index](self.value)
# ===----------------------------------------------------------------------=== #
# bool
# ===----------------------------------------------------------------------=== #
@always_inline
fn bool(value: None) -> Bool:
"""Get the bool representation of the `None` type.
Args:
value: The object to get the bool representation of.
Returns:
The bool representation of the object.
"""
return False
@always_inline
fn bool[T: Boolable](value: T) -> Bool:
"""Get the bool representation of the object.
Parameters:
T: The type of the object.
Args:
value: The object to get the bool representation of.
Returns:
The bool representation of the object.
"""
return value.__bool__()
# ===----------------------------------------------------------------------=== #
# any
# ===----------------------------------------------------------------------=== #
# TODO: Combine these into Iterators over Boolable elements
fn any[T: BoolableCollectionElement](list: List[T]) -> Bool:
"""Checks if **any** element in the list is truthy.
Parameters:
T: The type of elements to check.
Args:
list: The list to check.
Returns:
`True` if **any** element in the list is truthy, `False` otherwise.
"""
for item in list:
if item[]:
return True
return False
fn any[T: BoolableKeyElement](set: Set[T]) -> Bool:
"""Checks if **any** element in the set is truthy.
Parameters:
T: The type of elements to check.
Args:
set: The set to check.
Returns:
`True` if **any** element in the set is truthy, `False` otherwise.
"""
for item in set:
if item[]:
return True
return False
fn any(value: SIMD) -> Bool:
"""Checks if **any** element in the simd vector is truthy.
Args:
value: The simd vector to check.
Returns:
`True` if **any** element in the simd vector is truthy, `False`
otherwise.
"""
return value.cast[DType.bool]().reduce_or()
# ===----------------------------------------------------------------------=== #
# all
# ===----------------------------------------------------------------------=== #
# TODO: Combine these into Iterators over Boolable elements
fn all[T: BoolableCollectionElement](list: List[T]) -> Bool:
"""Checks if **all** elements in the list are truthy.
Parameters:
T: The type of elements to check.
Args:
list: The list to check.
Returns:
`True` if **all** elements in the list are truthy, `False` otherwise.
"""
for item in list:
if not item[]:
return False
return True
fn all[T: BoolableKeyElement](set: Set[T]) -> Bool:
"""Checks if **all** elements in the set are truthy.
Parameters:
T: The type of elements to check.
Args:
set: The set to check.
Returns:
`True` if **all** elements in the set are truthy, `False` otherwise.
"""
for item in set:
if not item[]:
return False
return True
fn all(value: SIMD) -> Bool:
"""Checks if **all** elements in the simd vector are truthy.
Args:
value: The simd vector to check.
Returns:
`True` if **all** elements in the simd vector are truthy, `False`
otherwise.
"""
return value.cast[DType.bool]().reduce_and()
| mojo/stdlib/src/builtin/bool.mojo | false |
<filename>mojo/stdlib/src/builtin/breakpoint.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""This module includes the builtin breakpoint function."""
from sys import breakpointhook
@always_inline("nodebug")
fn breakpoint():
"""Cause an execution trap with the intention of requesting the attention
of a debugger."""
breakpointhook()
| mojo/stdlib/src/builtin/breakpoint.mojo | false |
<filename>mojo/stdlib/src/builtin/builtin_list.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the ListLiteral class.
These are Mojo built-ins, so you don't need to import them.
"""
from memory import Reference, UnsafePointer, LegacyPointer
from memory.unsafe_pointer import destroy_pointee
# ===----------------------------------------------------------------------===#
# ListLiteral
# ===----------------------------------------------------------------------===#
struct ListLiteral[*Ts: Movable](Sized, Movable):
"""The type of a literal heterogeneous list expression.
A list consists of zero or more values, separated by commas.
Parameters:
Ts: The type of the elements.
"""
var storage: Tuple[Ts]
"""The underlying storage for the list."""
@always_inline("nodebug")
fn __init__(inout self, owned *args: *Ts):
"""Construct the list literal from the given values.
Args:
args: The init values.
"""
self.storage = Tuple(storage=args^)
fn __moveinit__(inout self, owned existing: Self):
"""Move construct the list.
Args:
existing: The value to move from.
"""
self.storage = existing.storage^
@always_inline("nodebug")
fn __len__(self) -> Int:
"""Get the list length.
Returns:
The length of this ListLiteral.
"""
return len(self.storage)
@always_inline("nodebug")
fn get[i: Int, T: Movable](self) -> ref [__lifetime_of(self)] T:
"""Get a list element at the given index.
Parameters:
i: The element index.
T: The element type.
Returns:
The element at the given index.
"""
return rebind[Reference[T, False, __lifetime_of(self)]](
Reference(self.storage[i])
)[]
# ===----------------------------------------------------------------------===#
# VariadicList / VariadicListMem
# ===----------------------------------------------------------------------===#
@value
struct _VariadicListIter[type: AnyTrivialRegType]:
"""Const Iterator for VariadicList.
Parameters:
type: The type of the elements in the list.
"""
var index: Int
var src: VariadicList[type]
fn __next__(inout self) -> type:
self.index += 1
return self.src[self.index - 1]
fn __len__(self) -> Int:
return len(self.src) - self.index
@register_passable("trivial")
struct VariadicList[type: AnyTrivialRegType](Sized):
"""A utility class to access variadic function arguments. Provides a "list"
view of the function argument so that the size of the argument list and each
individual argument can be accessed.
Parameters:
type: The type of the elements in the list.
"""
alias _mlir_type = __mlir_type[`!kgen.variadic<`, type, `>`]
var value: Self._mlir_type
"""The underlying storage for the variadic list."""
alias IterType = _VariadicListIter[type]
@always_inline
fn __init__(inout self, *value: type):
"""Constructs a VariadicList from a variadic list of arguments.
Args:
value: The variadic argument list to construct the variadic list
with.
"""
self = value
@always_inline
fn __init__(inout self, value: Self._mlir_type):
"""Constructs a VariadicList from a variadic argument type.
Args:
value: The variadic argument to construct the list with.
"""
self.value = value
@always_inline
fn __len__(self) -> Int:
"""Gets the size of the list.
Returns:
The number of elements on the variadic list.
"""
return __mlir_op.`pop.variadic.size`(self.value)
@always_inline
fn __getitem__(self, idx: Int) -> type:
"""Gets a single element on the variadic list.
Args:
idx: The index of the element to access on the list.
Returns:
The element on the list corresponding to the given index.
"""
return __mlir_op.`pop.variadic.get`(self.value, idx.value)
@always_inline
fn __iter__(self) -> Self.IterType:
"""Iterate over the list.
Returns:
An iterator to the start of the list.
"""
return Self.IterType(0, self)
@value
struct _VariadicListMemIter[
elt_type: AnyType,
elt_is_mutable: Bool,
elt_lifetime: AnyLifetime[elt_is_mutable].type,
list_lifetime: ImmutableLifetime,
]:
"""Iterator for VariadicListMem.
Parameters:
elt_type: The type of the elements in the list.
elt_is_mutable: Whether the elements in the list are mutable.
elt_lifetime: The lifetime of the elements.
list_lifetime: The lifetime of the VariadicListMem.
"""
alias variadic_list_type = VariadicListMem[
elt_type, elt_is_mutable.value, elt_lifetime
]
var index: Int
var src: Reference[Self.variadic_list_type, False, list_lifetime]
fn __next__(inout self) -> Self.variadic_list_type.reference_type:
self.index += 1
# TODO: Need to make this return a dereferenced reference, not a
# reference that must be deref'd by the user.
# NOTE: Using UnsafePointer here to get lifetimes to match.
return UnsafePointer.address_of(self.src[][self.index - 1])[]
fn __len__(self) -> Int:
return len(self.src[]) - self.index
# Helper to compute the union of two lifetimes:
# TODO: parametric aliases would be nice.
struct _lit_lifetime_union[
is_mutable: Bool,
a: AnyLifetime[is_mutable].type,
b: AnyLifetime[is_mutable].type,
]:
alias result = __mlir_attr[
`#lit.lifetime.union<`,
a,
`,`,
b,
`> : !lit.lifetime<`,
is_mutable.value,
`>`,
]
struct _lit_mut_cast[
is_mutable: Bool,
operand: AnyLifetime[is_mutable].type,
result_mutable: Bool,
]:
alias result = __mlir_attr[
`#lit.lifetime.mutcast<`,
operand,
`> : !lit.lifetime<`,
+result_mutable.value,
`>`,
]
struct VariadicListMem[
element_type: AnyType,
elt_is_mutable: __mlir_type.i1,
lifetime: __mlir_type[`!lit.lifetime<`, elt_is_mutable, `>`],
](Sized):
"""A utility class to access variadic function arguments of memory-only
types that may have ownership. It exposes references to the elements in a
way that can be enumerated. Each element may be accessed with `elt[]`.
Parameters:
element_type: The type of the elements in the list.
elt_is_mutable: True if the elements of the list are mutable for an
inout or owned argument.
lifetime: The reference lifetime of the underlying elements.
"""
alias reference_type = Reference[
element_type, Bool {value: elt_is_mutable}, lifetime
]
alias _mlir_ref_type = Self.reference_type._mlir_type
alias _mlir_type = __mlir_type[
`!kgen.variadic<`, Self._mlir_ref_type, `, borrow_in_mem>`
]
var value: Self._mlir_type
"""The underlying storage, a variadic list of references to elements of the
given type."""
# This is true when the elements are 'owned' - these are destroyed when
# the VariadicListMem is destroyed.
var _is_owned: Bool
# Provide support for borrowed variadic arguments.
@always_inline
fn __init__(inout self, value: Self._mlir_type):
"""Constructs a VariadicList from a variadic argument type.
Args:
value: The variadic argument to construct the list with.
"""
self.value = value
self._is_owned = False
# Provide support for variadics of *inout* arguments. The reference will
# automatically be inferred to be mutable, and the !kgen.variadic will have
# convention=inout.
alias _inout_variadic_type = __mlir_type[
`!kgen.variadic<`, Self._mlir_ref_type, `, inout>`
]
@always_inline
fn __init__(inout self, value: Self._inout_variadic_type):
"""Constructs a VariadicList from a variadic argument type.
Args:
value: The variadic argument to construct the list with.
"""
var tmp = value
# We need to bitcast different argument conventions to a consistent
# representation. This is ugly but effective.
self.value = UnsafePointer.address_of(tmp).bitcast[Self._mlir_type]()[]
self._is_owned = False
# Provide support for variadics of *owned* arguments. The reference will
# automatically be inferred to be mutable, and the !kgen.variadic will have
# convention=owned_in_mem.
alias _owned_variadic_type = __mlir_type[
`!kgen.variadic<`, Self._mlir_ref_type, `, owned_in_mem>`
]
@always_inline
fn __init__(inout self, value: Self._owned_variadic_type):
"""Constructs a VariadicList from a variadic argument type.
Args:
value: The variadic argument to construct the list with.
"""
var tmp = value
# We need to bitcast different argument conventions to a consistent
# representation. This is ugly but effective.
self.value = UnsafePointer.address_of(tmp).bitcast[Self._mlir_type]()[]
self._is_owned = True
@always_inline
fn __moveinit__(inout self, owned existing: Self):
"""Moves constructor.
Args:
existing: The existing VariadicListMem.
"""
self.value = existing.value
self._is_owned = existing._is_owned
@always_inline
fn __del__(owned self):
"""Destructor that releases elements if owned."""
# Immutable variadics never own the memory underlying them,
# microoptimize out a check of _is_owned.
@parameter
if not Bool(elt_is_mutable):
return
else:
# If the elements are unowned, just return.
if not self._is_owned:
return
# Otherwise this is a variadic of owned elements, destroy them. We
# destroy in backwards order to match how arguments are normally torn
# down when CheckLifetimes is left to its own devices.
for i in reversed(range(len(self))):
destroy_pointee(UnsafePointer.address_of(self[i]))
@always_inline
fn __len__(self) -> Int:
"""Gets the size of the list.
Returns:
The number of elements on the variadic list.
"""
return __mlir_op.`pop.variadic.size`(self.value)
@always_inline
fn __getitem__(
self, idx: Int
) -> ref [
_lit_lifetime_union[
Bool {value: elt_is_mutable},
lifetime,
# cast mutability of self to match the mutability of the element,
# since that is what we want to use in the ultimate reference and
# the union overall doesn't matter.
_lit_mut_cast[
False, __lifetime_of(self), Bool {value: elt_is_mutable}
].result,
].result
] element_type:
"""Gets a single element on the variadic list.
Args:
idx: The index of the element to access on the list.
Returns:
A low-level pointer to the element on the list corresponding to the
given index.
"""
return Reference(__mlir_op.`pop.variadic.get`(self.value, idx.value))[]
fn __iter__(
self,
) -> _VariadicListMemIter[
element_type,
Bool {value: elt_is_mutable},
lifetime,
__lifetime_of(self),
]:
"""Iterate over the list.
Returns:
An iterator to the start of the list.
"""
return _VariadicListMemIter[
element_type,
Bool {value: elt_is_mutable},
lifetime,
__lifetime_of(self),
](0, self)
# ===----------------------------------------------------------------------===#
# _LITRefPackHelper
# ===----------------------------------------------------------------------===#
alias _AnyTypeMetaType = __mlir_type[`!lit.anytrait<`, AnyType, `>`]
@value
struct _LITRefPackHelper[
is_mutable: __mlir_type.i1,
lifetime: AnyLifetime[Bool {value: is_mutable}].type,
address_space: __mlir_type.index,
element_trait: _AnyTypeMetaType,
*element_types: element_trait,
]:
"""This struct mirrors the !lit.ref.pack type, and provides aliases and
methods that are useful for working with it."""
alias _mlir_type = __mlir_type[
`!lit.ref.pack<:variadic<`,
element_trait,
`> `,
element_types,
`, `,
lifetime,
`, `,
+address_space,
`>`,
]
var storage: Self._mlir_type
# This is the element_types list lowered to `variadic<type>` type for kgen.
alias _kgen_element_types = rebind[
__mlir_type.`!kgen.variadic<!kgen.type>`
](Self.element_types)
# Use variadic_ptr_map to construct the type list of the !kgen.pack that the
# !lit.ref.pack will lower to. It exposes the pointers introduced by the
# references.
alias _variadic_pointer_types = __mlir_attr[
`#kgen.param.expr<variadic_ptr_map, `,
Self._kgen_element_types,
`, 0: index>: !kgen.variadic<!kgen.type>`,
]
# This is the !kgen.pack type with pointer elements.
alias kgen_pack_with_pointer_type = __mlir_type[
`!kgen.pack<:variadic<type> `, Self._variadic_pointer_types, `>`
]
# This rebinds `in_pack` to the equivalent `!kgen.pack` with kgen pointers.
fn get_as_kgen_pack(self) -> Self.kgen_pack_with_pointer_type:
return rebind[Self.kgen_pack_with_pointer_type](self.storage)
# This is the `!kgen.pack` type that happens if one loads all the elements
# of the pack.
alias loaded_kgen_pack_type = __mlir_type[
`!kgen.pack<:variadic<type> `, Self._kgen_element_types, `>`
]
# This returns the stored KGEN pack after loading all of the elements.
# FIXME(37129): This doesn't actually work because vtables aren't getting
# removed from TypeConstants correctly.
fn get_loaded_kgen_pack(self) -> Self.loaded_kgen_pack_type:
return rebind[Self.loaded_kgen_pack_type](
__mlir_op.`kgen.pack.load`(self.get_as_kgen_pack())
)
# ===----------------------------------------------------------------------===#
# VariadicPack
# ===----------------------------------------------------------------------===#
@register_passable
struct VariadicPack[
elt_is_mutable: __mlir_type.i1,
lifetime: __mlir_type[`!lit.lifetime<`, elt_is_mutable, `>`],
element_trait: _AnyTypeMetaType,
*element_types: element_trait,
](Sized):
"""A utility class to access variadic pack arguments and provide an API for
doing things with them.
Parameters:
elt_is_mutable: True if the elements of the list are mutable for an
inout or owned argument pack.
lifetime: The reference lifetime of the underlying elements.
element_trait: The trait that each element of the pack conforms to.
element_types: The list of types held by the argument pack.
"""
alias _mlir_type = __mlir_type[
`!lit.ref.pack<:variadic<`,
element_trait,
`> `,
element_types,
`, `,
lifetime,
`>`,
]
var _value: Self._mlir_type
var _is_owned: Bool
@always_inline
fn __init__(inout self, value: Self._mlir_type, is_owned: Bool):
"""Constructs a VariadicPack from the internal representation.
Args:
value: The argument to construct the pack with.
is_owned: Whether this is an 'owned' pack or 'inout'/'borrowed'.
"""
self._value = value
self._is_owned = is_owned
@always_inline
fn __del__(owned self):
"""Destructor that releases elements if owned."""
# Immutable variadics never own the memory underlying them,
# microoptimize out a check of _is_owned.
@parameter
if not Bool(elt_is_mutable):
return
else:
# If the elements are unowned, just return.
if not self._is_owned:
return
alias len = Self.__len__()
@parameter
fn destroy_elt[i: Int]():
# destroy the elements in reverse order.
destroy_pointee(UnsafePointer.address_of(self[len - i - 1]))
unroll[destroy_elt, len]()
@always_inline
@staticmethod
fn __len__() -> Int:
"""Return the VariadicPack length.
Returns:
The number of elements in the variadic pack.
"""
@parameter
fn variadic_size(
x: __mlir_type[`!kgen.variadic<`, element_trait, `>`]
) -> Int:
return __mlir_op.`pop.variadic.size`(x)
alias result = variadic_size(element_types)
return result
@always_inline
fn __len__(self) -> Int:
"""Return the VariadicPack length.
Returns:
The number of elements in the variadic pack.
"""
return Self.__len__()
@always_inline
fn __getitem__[
index: Int
](self) -> ref [Self.lifetime] element_types[index.value]:
"""Return a reference to an element of the pack.
Parameters:
index: The element of the pack to return.
Returns:
A reference to the element. The Reference's mutability follows the
mutability of the pack argument convention.
"""
var ref_elt = __mlir_op.`lit.ref.pack.extract`[index = index.value](
self._value
)
# Rebind the !lit.ref to agree on the element type. This is needed
# because we're getting a low level rebind to AnyType when the
# element_types[index] expression is erased to AnyType for Reference.
alias result_ref = Reference[
element_types[index.value],
Bool {value: Self.elt_is_mutable},
Self.lifetime,
]
return Reference(rebind[result_ref._mlir_type](ref_elt))[]
@always_inline
fn each[func: fn[T: element_trait] (T) capturing -> None](self):
"""Apply a function to each element of the pack in order. This applies
the specified function (which must be parametric on the element type) to
each element of the pack, from the first element to the last, passing
in each element as a borrowed argument.
Parameters:
func: The function to apply to each element.
"""
@parameter
fn unrolled[i: Int]():
func(self[i])
unroll[unrolled, Self.__len__()]()
@always_inline
fn each_idx[
func: fn[idx: Int, T: element_trait] (T) capturing -> None
](self):
"""Apply a function to each element of the pack in order. This applies
the specified function (which must be parametric on the element type) to
each element of the pack, from the first element to the last, passing
in each element as a borrowed argument.
Parameters:
func: The function to apply to each element.
"""
@parameter
fn unrolled[i: Int]():
func[i, element_types[i.value]](self[i])
unroll[unrolled, Self.__len__()]()
| mojo/stdlib/src/builtin/builtin_list.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements slice.
These are Mojo built-ins, so you don't need to import them.
"""
from sys.intrinsics import _mlirtype_is_eq
@always_inline("nodebug")
fn _int_max_value() -> Int:
# FIXME: The `slice` type should have the concept of `None` indices, but the
# effect of a `None` end index is the same as a very large end index.
return int(Int32.MAX)
@always_inline("nodebug")
fn _default_or[T: AnyTrivialRegType](value: T, default: Int) -> Int:
# TODO: Handle `__index__` for other types when we have traits!
@parameter
if _mlirtype_is_eq[T, Int]():
return __mlir_op.`kgen.rebind`[_type=Int](value)
else:
__mlir_op.`kgen.param.assert`[
cond = (_mlirtype_is_eq[T, NoneType]()).__mlir_i1__(),
message = "expected Int or NoneType".value,
]()
return default
@register_passable("trivial")
struct Slice(Stringable, EqualityComparable):
"""Represents a slice expression.
Objects of this type are generated when slice syntax is used within square
brackets, e.g.:
```mojo
var msg: String = "Hello Mojo"
# Both are equivalent and print "Mojo".
print(msg[6:])
print(msg.__getitem__(Slice(6, len(msg))))
```
"""
var start: Int
"""The starting index of the slice."""
var end: Int
"""The end index of the slice."""
var step: Int
"""The step increment value of the slice."""
@always_inline("nodebug")
fn __init__(inout self, start: Int, end: Int):
"""Construct slice given the start and end values.
Args:
start: The start value.
end: The end value.
"""
self.start = start
self.end = end
self.step = 1
@always_inline("nodebug")
fn __init__[
T0: AnyTrivialRegType, T1: AnyTrivialRegType, T2: AnyTrivialRegType
](inout self, start: T0, end: T1, step: T2):
"""Construct slice given the start, end and step values.
Parameters:
T0: Type of the start value.
T1: Type of the end value.
T2: Type of the step value.
Args:
start: The start value.
end: The end value.
step: The step value.
"""
self.start = _default_or(start, 0)
self.end = _default_or(end, _int_max_value())
self.step = _default_or(step, 1)
fn __str__(self) -> String:
"""Gets the string representation of the span.
Returns:
The string representation of the span.
"""
var res = str(self.start)
res += ":"
if self._has_end():
res += str(self.end)
res += ":"
res += str(self.step)
return res
@always_inline("nodebug")
fn __eq__(self, other: Self) -> Bool:
"""Compare this slice to the other.
Args:
other: The slice to compare to.
Returns:
True if start, end, and step values of this slice match the
corresponding values of the other slice and False otherwise.
"""
return (
self.start == other.start
and self.end == other.end
and self.step == other.step
)
@always_inline("nodebug")
fn __ne__(self, other: Self) -> Bool:
"""Compare this slice to the other.
Args:
other: The slice to compare to.
Returns:
False if start, end, and step values of this slice match the
corresponding values of the other slice and True otherwise.
"""
return not (self == other)
@always_inline
fn unsafe_indices(self) -> Int:
"""Return the length of the slice.
Only use this function if start/end is guaranteed to be not None.
Returns:
The length of the slice.
"""
return len(range(self.start, self.end, self.step))
@always_inline
fn __getitem__(self, idx: Int) -> Int:
"""Get the slice index.
Args:
idx: The index.
Returns:
The slice index.
"""
return self.start + index(idx) * self.step
@always_inline("nodebug")
fn _has_end(self) -> Bool:
return self.end != _int_max_value()
@always_inline("nodebug")
fn slice(end: Int) -> Slice:
"""Construct slice given the end value.
Args:
end: The end value.
Returns:
The constructed slice.
"""
return Slice(0, end)
@always_inline("nodebug")
fn slice(start: Int, end: Int) -> Slice:
"""Construct slice given the start and end values.
Args:
start: The start value.
end: The end value.
Returns:
The constructed slice.
"""
return Slice(start, end)
# TODO(30496): Modernize the slice type
@always_inline("nodebug")
fn slice[
T0: AnyTrivialRegType, T1: AnyTrivialRegType, T2: AnyTrivialRegType
](start: T0, end: T1, step: T2) -> Slice:
"""Construct a Slice given the start, end and step values.
Parameters:
T0: Type of the start value.
T1: Type of the end value.
T2: Type of the step value.
Args:
start: The start value.
end: The end value.
step: The step value.
Returns:
The constructed slice.
"""
return Slice(start, end, step)
| mojo/stdlib/src/builtin/builtin_slice.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
trait Comparable(EqualityComparable):
"""A type which can be compared with other instances of itself."""
fn __lt__(self, rhs: Self) -> Bool:
"""Define whether `self` is less than `rhs`.
Args:
rhs: The right hand side of the comparison.
Returns:
True if `self` is less than `rhs`.
"""
...
fn __le__(self, rhs: Self) -> Bool:
"""Define whether `self` is less than or equal to `rhs`.
Args:
rhs: The right hand side of the comparison.
Returns:
True if `self` is less than or equal to `rhs`.
"""
...
fn __gt__(self, rhs: Self) -> Bool:
"""Define whether `self` is greater than `rhs`.
Args:
rhs: The right hand side of the comparison.
Returns:
True if `self` is greater than `rhs`.
"""
...
fn __ge__(self, rhs: Self) -> Bool:
"""Define whether `self` is greater than or equal to `rhs`.
Args:
rhs: The right hand side of the comparison.
Returns:
True if `self` is greater than or equal to `rhs`.
"""
...
| mojo/stdlib/src/builtin/comparable.mojo | false |
<filename>mojo/stdlib/src/builtin/constrained.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements compile time constraints.
These are Mojo built-ins, so you don't need to import them.
"""
@always_inline("nodebug")
fn constrained[cond: Bool, msg: StringLiteral = "param assertion failed"]():
"""Compile time checks that the condition is true.
The `constrained` is similar to `static_assert` in C++ and is used to
introduce constraints on the enclosing function. In Mojo, the assert places
a constraint on the function. The message is displayed when the assertion
fails.
Parameters:
cond: The bool value to assert.
msg: The message to display on failure.
"""
__mlir_op.`kgen.param.assert`[
cond = cond.__mlir_i1__(), message = msg.value
]()
return
| mojo/stdlib/src/builtin/constrained.mojo | false |
<filename>mojo/stdlib/src/builtin/coroutine.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements classes and methods for coroutines.
These are Mojo built-ins, so you don't need to import them.
"""
from sys import sizeof
from memory import UnsafePointer
# ===----------------------------------------------------------------------=== #
# _suspend_async
# ===----------------------------------------------------------------------=== #
alias AnyCoroutine = __mlir_type.`!co.routine`
@always_inline
fn _suspend_async[body: fn (AnyCoroutine) capturing -> None]():
__mlir_region await_body(hdl: AnyCoroutine):
body(hdl)
__mlir_op.`co.suspend.end`()
__mlir_op.`co.suspend`[_region = "await_body".value]()
# ===----------------------------------------------------------------------=== #
# _CoroutineContext
# ===----------------------------------------------------------------------=== #
@register_passable("trivial")
struct _CoroutineContext:
"""The default context for a Coroutine, capturing the resume function
callback and parent Coroutine. The resume function will typically just
resume the parent. May be overwritten by other context types with different
interpretations of the payload, but which nevertheless be the same size
and contain the resume function and a payload pointer."""
# Passed the coroutine being completed and its context's payload.
alias _resume_fn_type = fn (AnyCoroutine, AnyCoroutine) -> None
var _resume_fn: Self._resume_fn_type
var _parent_hdl: AnyCoroutine
fn _coro_resume_callback(
handle: AnyCoroutine,
parent: AnyCoroutine,
):
"""Resume the parent Coroutine."""
_coro_resume_fn(parent)
@always_inline
fn _coro_resume_fn(handle: AnyCoroutine):
"""This function is a generic coroutine resume function."""
__mlir_op.`co.resume`(handle)
fn _coro_resume_noop_callback(handle: AnyCoroutine, null: AnyCoroutine):
"""Return immediately since nothing to resume."""
return
# ===----------------------------------------------------------------------=== #
# Coroutine
# ===----------------------------------------------------------------------=== #
@register_passable
struct Coroutine[
is_mut: Bool, //,
type: AnyTrivialRegType,
lifetime: AnyLifetime[is_mut].type,
]:
"""Represents a coroutine.
Coroutines can pause execution saving the state of the program (including
values of local variables and the location of the next instruction to be
executed). When the coroutine is resumed, execution continues from where it
left off, with the saved state restored.
Parameters:
is_mut: Whether the lifetime is mutable.
type: Type of value returned upon completion of the coroutine.
lifetime: The lifetime of the coroutine's captures.
"""
var _handle: AnyCoroutine
@always_inline
fn _get_ctx[ctx_type: AnyTrivialRegType](self) -> UnsafePointer[ctx_type]:
"""Returns the pointer to the coroutine context.
Parameters:
ctx_type: The type of the coroutine context.
Returns:
The coroutine context.
"""
constrained[
sizeof[_CoroutineContext]() == sizeof[ctx_type](),
"context size must be 16 bytes",
]()
return __mlir_op.`co.get_callback_ptr`[
_type = __mlir_type[`!kgen.pointer<`, ctx_type, `>`]
](self._handle)
@always_inline
fn get(self) -> type:
"""Get the value of the fulfilled coroutine promise.
Returns:
The value of the fulfilled promise.
"""
return __mlir_op.`co.get_results`[_type=type](self._handle)
@always_inline
fn __init__(handle: AnyCoroutine) -> Self:
"""Construct a coroutine object from a handle.
Args:
handle: The init handle.
Returns:
The constructed coroutine object.
"""
return Self {_handle: handle}
@always_inline
fn __del__(owned self):
"""Destroy the coroutine object."""
__mlir_op.`co.destroy`(self._handle)
@always_inline
fn __await__(self) -> type:
"""Suspends the current coroutine until the coroutine is complete.
Returns:
The coroutine promise.
"""
@always_inline
@parameter
fn await_body(parent_hdl: AnyCoroutine):
LegacyPointer(self._get_ctx[_CoroutineContext]().address).store(
_CoroutineContext {
_resume_fn: _coro_resume_callback, _parent_hdl: parent_hdl
}
)
__mlir_op.`co.resume`(self._handle)
_suspend_async[await_body]()
return self.get()
# Never call this method.
fn _deprecated_direct_resume(self) -> type:
LegacyPointer(self._get_ctx[_CoroutineContext]().address).store(
_CoroutineContext {
_resume_fn: _coro_resume_noop_callback,
_parent_hdl: self._handle,
}
)
__mlir_op.`co.resume`(self._handle)
return self.get()
# ===----------------------------------------------------------------------=== #
# RaisingCoroutine
# ===----------------------------------------------------------------------=== #
@register_passable
struct RaisingCoroutine[
is_mut: Bool, //,
type: AnyTrivialRegType,
lifetime: AnyLifetime[is_mut].type,
]:
"""Represents a coroutine that can raise.
Coroutines can pause execution saving the state of the program (including
values of local variables and the location of the next instruction to be
executed). When the coroutine is resumed, execution continues from where it
left off, with the saved state restored.
Parameters:
is_mut: Whether the lifetime is mutable.
type: Type of value returned upon completion of the coroutine.
lifetime: The lifetime of the coroutine's captures.
"""
alias _var_type = __mlir_type[`!kgen.variant<`, Error, `, `, type, `>`]
var _handle: AnyCoroutine
@always_inline
fn get(self) raises -> type:
"""Get the value of the fulfilled coroutine promise.
Returns:
The value of the fulfilled promise.
"""
var variant = __mlir_op.`co.get_results`[_type = Self._var_type](
self._handle
)
if __mlir_op.`kgen.variant.is`[index = Int(0).value](variant):
raise __mlir_op.`kgen.variant.take`[index = Int(0).value](variant)
return __mlir_op.`kgen.variant.take`[index = Int(1).value](variant)
@always_inline
fn _get_ctx[ctx_type: AnyTrivialRegType](self) -> UnsafePointer[ctx_type]:
"""Returns the pointer to the coroutine context.
Parameters:
ctx_type: The type of the coroutine context.
Returns:
The coroutine context.
"""
constrained[
sizeof[_CoroutineContext]() == sizeof[ctx_type](),
"context size must be 16 bytes",
]()
return __mlir_op.`co.get_callback_ptr`[
_type = __mlir_type[`!kgen.pointer<`, ctx_type, `>`]
](self._handle)
@always_inline
fn __init__(inout self, handle: AnyCoroutine):
"""Construct a coroutine object from a handle.
Args:
handle: The init handle.
"""
self = Self {_handle: handle}
@always_inline
fn __del__(owned self):
"""Destroy the coroutine object."""
__mlir_op.`co.destroy`(self._handle)
@always_inline
fn __await__(self) raises -> type:
"""Suspends the current coroutine until the coroutine is complete.
Returns:
The coroutine promise.
"""
@always_inline
@parameter
fn await_body(parent_hdl: AnyCoroutine):
LegacyPointer(self._get_ctx[_CoroutineContext]().address).store(
_CoroutineContext {
_resume_fn: _coro_resume_callback, _parent_hdl: parent_hdl
}
)
__mlir_op.`co.resume`(self._handle)
_suspend_async[await_body]()
return self.get()
| mojo/stdlib/src/builtin/coroutine.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements a debug assert.
These are Mojo built-ins, so you don't need to import them.
"""
from os import abort
from sys._build import is_kernels_debug_build
from sys import triple_is_nvidia_cuda, is_defined
from builtin._location import __call_location, _SourceLocation
@always_inline
fn debug_assert[stringable: Stringable](cond: Bool, msg: stringable):
"""Asserts that the condition is true.
The `debug_assert` is similar to `assert` in C++. It is a no-op in release
builds unless MOJO_ENABLE_ASSERTIONS is defined.
Right now, users of the mojo-sdk must explicitly specify `-D MOJO_ENABLE_ASSERTIONS`
to enable assertions. It is not sufficient to compile programs with `-debug-level full`
for enabling assertions in the library.
Parameters:
stringable: The type of the message.
Args:
cond: The bool value to assert.
msg: The message to display on failure.
"""
# Print an error and fail.
alias err = is_kernels_debug_build() or is_defined[
"MOJO_ENABLE_ASSERTIONS"
]()
# Print a warning, but do not fail (useful for testing assert behavior).
alias warn = is_defined["ASSERT_WARNING"]()
@parameter
if err or warn:
if not cond:
_debug_assert_msg[err](msg, __call_location())
@no_inline
fn _debug_assert_msg[
err: Bool, stringable: Stringable
](msg: stringable, loc: _SourceLocation):
"""Aborts with (or prints) the given message and location.
Note that it's important that this function doesn't get inlined; otherwise,
an indirect recursion of @always_inline functions is possible (e.g. because
abort's implementation could use debug_assert)
"""
@parameter
if triple_is_nvidia_cuda():
# On GPUs, assert shouldn't allocate.
@parameter
if err:
abort()
else:
print("Assert Warning")
return
@parameter
if err:
abort(loc.prefix("Assert Error: " + str(msg)))
else:
print(loc.prefix("Assert Warning:"), str(msg))
| mojo/stdlib/src/builtin/debug_assert.mojo | false |
<filename>mojo/stdlib/src/builtin/dtype.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the DType class.
These are Mojo built-ins, so you don't need to import them.
"""
from collections import KeyElement
from sys import sizeof as _sizeof
from utils import unroll
alias _mIsSigned = UInt8(1)
alias _mIsInteger = UInt8(1 << 7)
alias _mIsFloat = UInt8(1 << 6)
@value
@register_passable("trivial")
struct DType(Stringable, Representable, KeyElement):
"""Represents DType and provides methods for working with it."""
alias type = __mlir_type.`!kgen.dtype`
var value: Self.type
"""The underlying storage for the DType value."""
alias invalid = DType(
__mlir_attr.`#kgen.dtype.constant<invalid> : !kgen.dtype`
)
"""Represents an invalid or unknown data type."""
alias bool = DType(__mlir_attr.`#kgen.dtype.constant<bool> : !kgen.dtype`)
"""Represents a boolean data type."""
alias int8 = DType(__mlir_attr.`#kgen.dtype.constant<si8> : !kgen.dtype`)
"""Represents a signed integer type whose bitwidth is 8."""
alias uint8 = DType(__mlir_attr.`#kgen.dtype.constant<ui8> : !kgen.dtype`)
"""Represents an unsigned integer type whose bitwidth is 8."""
alias int16 = DType(__mlir_attr.`#kgen.dtype.constant<si16> : !kgen.dtype`)
"""Represents a signed integer type whose bitwidth is 16."""
alias uint16 = DType(__mlir_attr.`#kgen.dtype.constant<ui16> : !kgen.dtype`)
"""Represents an unsigned integer type whose bitwidth is 16."""
alias int32 = DType(__mlir_attr.`#kgen.dtype.constant<si32> : !kgen.dtype`)
"""Represents a signed integer type whose bitwidth is 32."""
alias uint32 = DType(__mlir_attr.`#kgen.dtype.constant<ui32> : !kgen.dtype`)
"""Represents an unsigned integer type whose bitwidth is 32."""
alias int64 = DType(__mlir_attr.`#kgen.dtype.constant<si64> : !kgen.dtype`)
"""Represents a signed integer type whose bitwidth is 64."""
alias uint64 = DType(__mlir_attr.`#kgen.dtype.constant<ui64> : !kgen.dtype`)
"""Represents an unsigned integer type whose bitwidth is 64."""
alias bfloat16 = DType(
__mlir_attr.`#kgen.dtype.constant<bf16> : !kgen.dtype`
)
"""Represents a brain floating point value whose bitwidth is 16."""
alias float16 = DType(__mlir_attr.`#kgen.dtype.constant<f16> : !kgen.dtype`)
"""Represents an IEEE754-2008 `binary16` floating point value."""
alias float32 = DType(__mlir_attr.`#kgen.dtype.constant<f32> : !kgen.dtype`)
"""Represents an IEEE754-2008 `binary32` floating point value."""
alias tensor_float32 = DType(
__mlir_attr.`#kgen.dtype.constant<tf32> : !kgen.dtype`
)
"""Represents a special floating point format supported by NVIDIA Tensor
Cores, with the same range as float32 and reduced precision (>=10 bits).
Note that this type is only available on NVIDIA GPUs.
"""
alias float64 = DType(__mlir_attr.`#kgen.dtype.constant<f64> : !kgen.dtype`)
"""Represents an IEEE754-2008 `binary64` floating point value."""
alias index = DType(__mlir_attr.`#kgen.dtype.constant<index> : !kgen.dtype`)
"""Represents an integral type whose bitwidth is the maximum integral value
on the system."""
alias address = DType(
__mlir_attr.`#kgen.dtype.constant<address> : !kgen.dtype`
)
"""Represents a pointer type whose bitwidth is the same as the bitwidth
of the hardware's pointer type (32-bit on 32-bit machines and 64-bit on
64-bit machines)."""
@always_inline("nodebug")
fn __str__(self) -> String:
"""Gets the name of the DType.
Returns:
The name of the dtype.
"""
if self == DType.bool:
return "bool"
if self == DType.int8:
return "int8"
if self == DType.uint8:
return "uint8"
if self == DType.int16:
return "int16"
if self == DType.uint16:
return "uint16"
if self == DType.int32:
return "int32"
if self == DType.uint32:
return "uint32"
if self == DType.int64:
return "int64"
if self == DType.uint64:
return "uint64"
if self == DType.index:
return "index"
if self == DType.bfloat16:
return "bfloat16"
if self == DType.float16:
return "float16"
if self == DType.float32:
return "float32"
if self == DType.tensor_float32:
return "tensor_float32"
if self == DType.float64:
return "float64"
if self == DType.invalid:
return "invalid"
if self == DType.address:
return "address"
return "<<unknown>>"
@always_inline("nodebug")
fn __repr__(self) -> String:
"""Gets the representation of the DType e.g. `"DType.float32"`.
Returns:
The representation of the dtype.
"""
return "DType." + str(self)
@always_inline("nodebug")
fn get_value(self) -> __mlir_type.`!kgen.dtype`:
"""Gets the associated internal kgen.dtype value.
Returns:
The kgen.dtype value.
"""
return self.value
@staticmethod
fn _from_ui8(ui8: __mlir_type.ui8) -> DType:
return __mlir_op.`pop.dtype.from_ui8`(ui8)
@staticmethod
fn _from_ui8(ui8: __mlir_type.`!pop.scalar<ui8>`) -> DType:
return DType._from_ui8(
__mlir_op.`pop.cast_to_builtin`[_type = __mlir_type.ui8](ui8)
)
@always_inline("nodebug")
fn _as_i8(
self,
) -> __mlir_type.`!pop.scalar<ui8>`:
var val = __mlir_op.`pop.dtype.to_ui8`(self.value)
return __mlir_op.`pop.cast_from_builtin`[
_type = __mlir_type.`!pop.scalar<ui8>`
](val)
@always_inline("nodebug")
fn __eq__(self, rhs: DType) -> Bool:
"""Compares one DType to another for equality.
Args:
rhs: The DType to compare against.
Returns:
True if the DTypes are the same and False otherwise.
"""
return __mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred eq>`](
self._as_i8(), rhs._as_i8()
)
@always_inline("nodebug")
fn __ne__(self, rhs: DType) -> Bool:
"""Compares one DType to another for non-equality.
Args:
rhs: The DType to compare against.
Returns:
False if the DTypes are the same and True otherwise.
"""
return __mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred ne>`](
self._as_i8(), rhs._as_i8()
)
fn __hash__(self) -> Int:
return hash(UInt8(self._as_i8()))
@always_inline("nodebug")
fn isa[other: DType](self) -> Bool:
"""Checks if this DType matches the other one, specified as a
parameter.
Parameters:
other: The DType to compare against.
Returns:
True if the DTypes are the same and False otherwise.
"""
return self == other
@always_inline("nodebug")
fn is_bool(self) -> Bool:
"""Checks if this DType is Bool.
Returns:
True if the DType is Bool and False otherwise.
"""
return self.isa[DType.bool]()
@always_inline("nodebug")
fn is_uint8(self) -> Bool:
"""Checks if this DType is UInt8.
Returns:
True if the DType is UInt8 and False otherwise.
"""
return self.isa[DType.uint8]()
@always_inline("nodebug")
fn is_int8(self) -> Bool:
"""Checks if this DType is Int8.
Returns:
True if the DType is Int8 and False otherwise.
"""
return self.isa[DType.int8]()
@always_inline("nodebug")
fn is_uint16(self) -> Bool:
"""Checks if this DType is UInt16.
Returns:
True if the DType is UInt16 and False otherwise.
"""
return self.isa[DType.uint16]()
@always_inline("nodebug")
fn is_int16(self) -> Bool:
"""Checks if this DType is Int16.
Returns:
True if the DType is Int16 and False otherwise.
"""
return self.isa[DType.int16]()
@always_inline("nodebug")
fn is_uint32(self) -> Bool:
"""Checks if this DType is UInt32.
Returns:
True if the DType is UInt32 and False otherwise.
"""
return self.isa[DType.uint32]()
@always_inline("nodebug")
fn is_int32(self) -> Bool:
"""Checks if this DType is Int32.
Returns:
True if the DType is Int32 and False otherwise.
"""
return self.isa[DType.int32]()
@always_inline("nodebug")
fn is_uint64(self) -> Bool:
"""Checks if this DType is UInt64.
Returns:
True if the DType is UInt64 and False otherwise.
"""
return self.isa[DType.uint64]()
@always_inline("nodebug")
fn is_int64(self) -> Bool:
"""Checks if this DType is Int64.
Returns:
True if the DType is Int64 and False otherwise.
"""
return self.isa[DType.int64]()
@always_inline("nodebug")
fn is_bfloat16(self) -> Bool:
"""Checks if this DType is BFloat16.
Returns:
True if the DType is BFloat16 and False otherwise.
"""
return self.isa[DType.bfloat16]()
@always_inline("nodebug")
fn is_float16(self) -> Bool:
"""Checks if this DType is Float16.
Returns:
True if the DType is Float16 and False otherwise.
"""
return self.isa[DType.float16]()
@always_inline("nodebug")
fn is_float32(self) -> Bool:
"""Checks if this DType is Float32.
Returns:
True if the DType is Float32 and False otherwise.
"""
return self.isa[DType.float32]()
@always_inline("nodebug")
fn is_tensor_float32(self) -> Bool:
"""Checks if this DType is Tensor Float32.
Returns:
True if the DType is Tensor Float32 and False otherwise.
"""
return self.isa[DType.tensor_float32]()
@always_inline("nodebug")
fn is_float64(self) -> Bool:
"""Checks if this DType is Float64.
Returns:
True if the DType is Float64 and False otherwise.
"""
return self.isa[DType.float64]()
@always_inline("nodebug")
fn is_index(self) -> Bool:
"""Checks if this DType is Index.
Returns:
True if the DType is Index and False otherwise.
"""
return self.isa[DType.index]()
@always_inline("nodebug")
fn is_index32(self) -> Bool:
"""Checks if this DType is Index and 32 bit.
Returns:
True if this DType is Index and 32 bit, False otherwise.
"""
return self.is_index() and (self.sizeof() == DType.int32.sizeof())
@always_inline("nodebug")
fn is_index64(self) -> Bool:
"""Checks if this DType is Index and 64 bit.
Returns:
True if this DType is Index and 64 bit, False otherwise.
"""
return self.is_index() and (self.sizeof() == DType.int64.sizeof())
@always_inline("nodebug")
fn is_address(self) -> Bool:
"""Checks if this DType is Address.
Returns:
True if the DType is Address and False otherwise.
"""
return self.isa[DType.address]()
@always_inline("nodebug")
fn is_unsigned(self) -> Bool:
"""Returns True if the type parameter is unsigned and False otherwise.
Returns:
Returns True if the input type parameter is unsigned.
"""
if not self.is_integral():
return False
return Bool(
__mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred eq>`](
__mlir_op.`pop.and`(self._as_i8(), _mIsSigned.value),
UInt8(0).value,
)
)
@always_inline("nodebug")
fn is_signed(self) -> Bool:
"""Returns True if the type parameter is signed and False otherwise.
Returns:
Returns True if the input type parameter is signed.
"""
if self.is_index() or self.is_floating_point():
return True
if not self.is_integral():
return False
return Bool(
__mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred ne>`](
__mlir_op.`pop.and`(self._as_i8(), _mIsSigned.value),
UInt8(0).value,
)
)
@always_inline("nodebug")
fn is_integral(self) -> Bool:
"""Returns True if the type parameter is an integer and False otherwise.
Returns:
Returns True if the input type parameter is an integer.
"""
if self.is_index():
return True
return Bool(
__mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred ne>`](
__mlir_op.`pop.and`(self._as_i8(), _mIsInteger.value),
UInt8(0).value,
)
)
@always_inline("nodebug")
fn is_floating_point(self) -> Bool:
"""Returns True if the type parameter is a floating-point and False
otherwise.
Returns:
Returns True if the input type parameter is a floating-point.
"""
if self.is_integral():
return False
return Bool(
__mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred ne>`](
__mlir_op.`pop.and`(self._as_i8(), _mIsFloat.value),
UInt8(0).value,
)
)
@always_inline("nodebug")
fn is_half_float(self) -> Bool:
"""Returns True if the type is a half-precision floating point type,
e.g. either fp16 or bf16.
Returns:
True if the type is a half-precision float, false otherwise..
"""
return self.is_float16() or self.is_bfloat16()
@always_inline("nodebug")
fn is_numeric(self) -> Bool:
"""Returns True if the type parameter is numeric (i.e. you can perform
arithmetic operations on).
Returns:
Returns True if the input type parameter is either integral or
floating-point.
"""
return self.is_integral() or self.is_floating_point()
@always_inline
fn sizeof(self) -> Int:
"""Returns the size in bytes of the current DType.
Returns:
Returns the size in bytes of the current DType.
"""
return __mlir_op.`pop.dtype.sizeof`(self.value)
@always_inline
fn bitwidth(self) -> Int:
"""Returns the size in bits of the current DType.
Returns:
Returns the size in bits of the current DType.
"""
var size_in_bytes = self.sizeof()
return 8 * size_in_bytes
# ===----------------------------------------------------------------------===#
# dispatch_integral
# ===----------------------------------------------------------------------===#
@always_inline
fn dispatch_integral[
func: fn[type: DType] () capturing -> None
](self) raises:
"""Dispatches an integral function corresponding to the current DType.
Constraints:
DType must be integral.
Parameters:
func: A parametrized on dtype function to dispatch.
"""
if self.is_uint8():
func[DType.uint8]()
elif self.is_int8():
func[DType.int8]()
elif self.is_uint16():
func[DType.uint16]()
elif self.is_int16():
func[DType.int16]()
elif self.is_uint32():
func[DType.uint32]()
elif self.is_int32():
func[DType.int32]()
elif self.is_uint64():
func[DType.uint64.value]()
elif self.is_int64():
func[DType.int64]()
elif self.is_index():
func[DType.index]()
else:
raise Error("only integral types are supported")
# ===----------------------------------------------------------------------===#
# dispatch_floating
# ===----------------------------------------------------------------------===#
@always_inline
fn dispatch_floating[
func: fn[type: DType] () capturing -> None
](self) raises:
"""Dispatches a floating-point function corresponding to the current DType.
Constraints:
DType must be floating-point or integral.
Parameters:
func: A parametrized on dtype function to dispatch.
"""
if self.is_float16():
func[DType.float16]()
# TODO(#15473): Enable after extending LLVM support
# elif self.is_bfloat16():
# func[DType.bfloat16]()
elif self.is_float32():
func[DType.float32]()
elif self.is_float64():
func[DType.float64]()
else:
raise Error("only floating point types are supported")
@always_inline
fn _dispatch_bitwidth[
func: fn[type: DType] () capturing -> None,
](self) raises:
"""Dispatches a function corresponding to the current DType's bitwidth.
This should only be used if func only depends on the bitwidth of the dtype,
and not other properties of the dtype.
Parameters:
func: A parametrized on dtype function to dispatch.
"""
var bitwidth = self.bitwidth()
if bitwidth == 8:
func[DType.uint8]()
elif bitwidth == 16:
func[DType.uint16]()
elif bitwidth == 32:
func[DType.uint32]()
elif bitwidth == 64:
func[DType.uint64]()
else:
raise Error(
"bitwidth_dispatch only supports types with bitwidth [8, 16,"
" 32, 64]"
)
return
@always_inline
fn _dispatch_custom[
func: fn[type: DType] () capturing -> None, *dtypes: DType
](self) raises:
"""Dispatches a function corresponding to current DType if it matches
any type in the dtypes parameter.
Parameters:
func: A parametrized on dtype function to dispatch.
dtypes: A list of DTypes on which to do dispatch.
"""
alias dtype_var = VariadicList[DType](dtypes)
@parameter
for idx in range(len(dtype_var)):
alias dtype = dtype_var[idx]
if self == dtype:
return func[dtype]()
raise Error(
"dispatch_custom: dynamic_type does not match any dtype parameters"
)
# ===----------------------------------------------------------------------===#
# dispatch_arithmetic
# ===----------------------------------------------------------------------===#
@always_inline
fn dispatch_arithmetic[
func: fn[type: DType] () capturing -> None
](self) raises:
"""Dispatches a function corresponding to the current DType.
Parameters:
func: A parametrized on dtype function to dispatch.
"""
if self.is_floating_point():
self.dispatch_floating[func]()
elif self.is_integral():
self.dispatch_integral[func]()
else:
raise Error("only arithmetic types are supported")
# ===-------------------------------------------------------------------===#
# integral_type_of
# ===-------------------------------------------------------------------===#
@always_inline("nodebug")
fn _integral_type_of[type: DType]() -> DType:
"""Gets the integral type which has the same bitwidth as the input type."""
@parameter
if type.is_integral():
return type
@parameter
if type == DType.bfloat16 or type == DType.float16:
return DType.int16
@parameter
if type == DType.float32 or type == DType.tensor_float32:
return DType.int32
@parameter
if type == DType.float64:
return DType.int64
return type.invalid
fn _scientific_notation_digits[type: DType]() -> StringLiteral:
"""Get the number of digits as a StringLiteral for the scientific notation
representation of a float.
"""
constrained[type.is_floating_point(), "expected floating point type"]()
@parameter
if type == DType.bfloat16 or type == DType.float16:
return "4"
elif type == DType.float32 or type == DType.tensor_float32:
return "8"
else:
constrained[type == DType.float64, "unknown floating point type"]()
return "16"
# ===-------------------------------------------------------------------===#
# _uint_type_of_width
# ===-------------------------------------------------------------------===#
fn _uint_type_of_width[width: Int]() -> DType:
@parameter
if width == 8:
return DType.uint8
elif width == 16:
return DType.uint16
elif width == 32:
return DType.uint32
else:
constrained[width == 64]()
return DType.uint64
# ===-------------------------------------------------------------------===#
# printf format
# ===-------------------------------------------------------------------===#
@always_inline
fn _index_printf_format() -> StringLiteral:
@parameter
if bitwidthof[Int]() == 32:
return "%d"
elif os_is_windows():
return "%lld"
else:
return "%ld"
@always_inline
fn _get_dtype_printf_format[type: DType]() -> StringLiteral:
@parameter
if type == DType.bool:
return _index_printf_format()
elif type == DType.uint8:
return "%hhu"
elif type == DType.int8:
return "%hhi"
elif type == DType.uint16:
return "%hu"
elif type == DType.int16:
return "%hi"
elif type == DType.uint32:
return "%u"
elif type == DType.int32:
return "%i"
elif type == DType.int64:
@parameter
if os_is_windows():
return "%lld"
else:
return "%ld"
elif type == DType.uint64:
@parameter
if os_is_windows():
return "%llu"
else:
return "%lu"
elif type == DType.index:
return _index_printf_format()
elif type == DType.address:
return "%p"
elif type.is_floating_point():
return "%.17g"
else:
constrained[False, "invalid dtype"]()
return ""
fn _get_runtime_dtype_size(type: DType) -> Int:
"""
Get the size of the dynamic dtype.
We cannot directly using type.sizeof(), since that only works with
statically known dtypes. Instead, we have to perform a dispatch to
determine the size of the dtype.
"""
alias type_list = List[DType](
DType.bool,
DType.int8,
DType.uint8,
DType.int16,
DType.uint16,
DType.bfloat16,
DType.float16,
DType.int32,
DType.uint32,
DType.float32,
DType.tensor_float32,
DType.int64,
DType.uint64,
DType.float64,
DType.index,
DType.address,
)
@parameter
for idx in range(len(type_list)):
alias concrete_type = type_list[idx]
if concrete_type == type:
return sizeof[concrete_type]()
abort("unable to get the dtype size of " + str(type))
return -1
| mojo/stdlib/src/builtin/dtype.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
trait EqualityComparable:
"""A type which can be compared for equality with other instances of itself.
"""
fn __eq__(self, other: Self) -> Bool:
"""Define whether two instances of the object are equal to each other.
Args:
other: Another instance of the same type.
Returns:
True if the instances are equal according to the type's definition
of equality, False otherwise.
"""
pass
fn __ne__(self, other: Self) -> Bool:
"""Define whether two instances of the object are not equal to each other.
Args:
other: Another instance of the same type.
Returns:
True if the instances are not equal according to the type's definition
of equality, False otherwise.
"""
pass
| mojo/stdlib/src/builtin/equality_comparable.mojo | false |
<filename>mojo/stdlib/src/builtin/error.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the Error class.
These are Mojo built-ins, so you don't need to import them.
"""
from sys import alignof, sizeof
from memory.memory import _free
from memory import memcmp, memcpy, UnsafePointer
# ===----------------------------------------------------------------------===#
# Error
# ===----------------------------------------------------------------------===#
@register_passable
struct Error(Stringable, Boolable):
"""This type represents an Error."""
var data: UnsafePointer[UInt8]
"""A pointer to the beginning of the string data being referenced."""
var loaded_length: Int
"""The length of the string being referenced.
Error instances conditionally own their error message. To reduce
the size of the error instance we use the sign bit of the length field
to store the ownership value. When loaded_length is negative it indicates
ownership and a free is executed in the destructor.
"""
@always_inline("nodebug")
fn __init__() -> Error:
"""Default constructor.
Returns:
The constructed Error object.
"""
return Error {data: UnsafePointer[UInt8](), loaded_length: 0}
@always_inline("nodebug")
fn __init__(value: StringLiteral) -> Error:
"""Construct an Error object with a given string literal.
Args:
value: The error message.
Returns:
The constructed Error object.
"""
return Error {
# TODO: Remove cast once string UInt8 transition is complete.
data: value.unsafe_ptr().bitcast[UInt8](),
loaded_length: len(value),
}
@always_inline("nodebug")
fn __init__(src: String) -> Error:
"""Construct an Error object with a given string.
Args:
src: The error message.
Returns:
The constructed Error object.
"""
var length = len(src)
var dest = UnsafePointer[UInt8].alloc(length + 1)
memcpy(
dest=dest,
# TODO: Remove cast once string UInt8 transition is complete.
src=src.unsafe_ptr().bitcast[UInt8](),
count=length,
)
dest[length] = 0
return Error {data: dest, loaded_length: -length}
@always_inline("nodebug")
fn __init__(src: StringRef) -> Error:
"""Construct an Error object with a given string ref.
Args:
src: The error message.
Returns:
The constructed Error object.
"""
var length = len(src)
var dest = UnsafePointer[UInt8].alloc(length + 1)
memcpy(
dest=dest,
src=src.unsafe_ptr(),
count=length,
)
dest[length] = 0
return Error {data: dest, loaded_length: -length}
fn __del__(owned self):
"""Releases memory if allocated."""
if self.loaded_length < 0:
self.data.free()
@always_inline("nodebug")
fn __copyinit__(existing: Self) -> Self:
"""Creates a deep copy of an existing error.
Returns:
The copy of the original error.
"""
if existing.loaded_length < 0:
var length = -existing.loaded_length
var dest = UnsafePointer[UInt8].alloc(length + 1)
memcpy(dest, existing.data, length)
dest[length] = 0
return Error {data: dest, loaded_length: existing.loaded_length}
else:
return Error {
data: existing.data, loaded_length: existing.loaded_length
}
fn __bool__(self) -> Bool:
"""Returns True if the error is set and false otherwise.
Returns:
True if the error object contains a value and False otherwise.
"""
return self.data.__bool__()
fn __str__(self) -> String:
"""Converts the Error to string representation.
Returns:
A String of the error message.
"""
return self._message()
fn __repr__(self) -> String:
"""Converts the Error to printable representation.
Returns:
A printable representation of the error message.
"""
return str(self)
@always_inline
fn _message(self) -> String:
"""Converts the Error to string representation.
Returns:
A String of the error message.
"""
if not self:
return ""
var length = self.loaded_length
if length < 0:
length = -length
return String(StringRef(self.data, length))
| mojo/stdlib/src/builtin/error.mojo | false |
<filename>mojo/stdlib/src/builtin/file.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the file based methods.
These are Mojo built-ins, so you don't need to import them.
For example, here's how to read a file:
```mojo
var f = open("my_file.txt", "r")
print(f.read())
f.close()
```
Or use a `with` statement to close the file automatically:
```mojo
with open("my_file.txt", "r") as f:
print(f.read())
```
"""
from os import PathLike
from sys import external_call
from memory import AddressSpace, DTypePointer, Pointer
@register_passable
struct _OwnedStringRef(Boolable):
var data: DTypePointer[DType.int8]
var length: Int
fn __init__() -> _OwnedStringRef:
return Self {data: DTypePointer[DType.int8](), length: 0}
fn __del__(owned self):
if self.data:
self.data.free()
fn consume_as_error(owned self) -> Error:
var data = self.data
# Don't free self.data in our dtor.
self.data = DTypePointer[DType.int8]()
var length = self.length
return Error {
data: UnsafePointer[UInt8]._from_dtype_ptr(
# TODO: Remove cast once string UInt8 transition is complete.
data.bitcast[DType.uint8]()
),
loaded_length: -length,
}
fn __bool__(self) -> Bool:
return self.length != 0
struct FileHandle:
"""File handle to an opened file."""
var handle: DTypePointer[DType.invalid]
"""The underlying pointer to the file handle."""
fn __init__(inout self):
"""Default constructor."""
self.handle = DTypePointer[DType.invalid]()
fn __init__(inout self, path: String, mode: String) raises:
"""Construct the FileHandle using the file path and mode.
Args:
path: The file path.
mode: The mode to open the file in (the mode can be "r" or "w" or "rw").
"""
self.__init__(path._strref_dangerous(), mode._strref_dangerous())
_ = path
_ = mode
fn __init__(inout self, path: StringRef, mode: StringRef) raises:
"""Construct the FileHandle using the file path and string.
Args:
path: The file path.
mode: The mode to open the file in (the mode can be "r" or "w" or "rw").
"""
var err_msg = _OwnedStringRef()
var handle = external_call[
"KGEN_CompilerRT_IO_FileOpen", DTypePointer[DType.invalid]
](path, mode, UnsafePointer.address_of(err_msg))
if err_msg:
self.handle = DTypePointer[DType.invalid]()
raise (err_msg^).consume_as_error()
self.handle = handle
@always_inline
fn __del__(owned self):
"""Closes the file handle."""
try:
self.close()
except:
pass
fn close(inout self) raises:
"""Closes the file handle."""
if not self.handle:
return
var err_msg = _OwnedStringRef()
external_call["KGEN_CompilerRT_IO_FileClose", NoneType](
self.handle, UnsafePointer.address_of(err_msg)
)
if err_msg:
raise (err_msg^).consume_as_error()
self.handle = DTypePointer[DType.invalid]()
fn __moveinit__(inout self, owned existing: Self):
"""Moves constructor for the file handle.
Args:
existing: The existing file handle.
"""
self.handle = existing.handle
existing.handle = DTypePointer[DType.invalid]()
@always_inline
fn read(self, size: Int64 = -1) raises -> String:
"""Reads data from a file and sets the file handle seek position. If
size is left as the default of -1, it will read to the end of the file.
Setting size to a number larger than what's in the file will set
String.size to the total number of bytes, and read all the data.
Args:
size: Requested number of bytes to read (Default: -1 = EOF).
Returns:
The contents of the file.
Raises:
An error if this file handle is invalid, or if the file read
returned a failure.
Examples:
Read the entire file into a String:
```mojo
var file = open("/tmp/example.txt", "r")
var string = file.read()
print(string)
```
Read the first 8 bytes, skip 2 bytes, and then read the next 8 bytes:
```mojo
import os
var file = open("/tmp/example.txt", "r")
var word1 = file.read(8)
print(word1)
_ = file.seek(2, os.SEEK_CUR)
var word2 = file.read(8)
print(word2)
```
Read the last 8 bytes in the file, then the first 8 bytes
```mojo
_ = file.seek(-8, os.SEEK_END)
var last_word = file.read(8)
print(last_word)
_ = file.seek(8, os.SEEK_SET) # os.SEEK_SET is the default start of file
var first_word = file.read(8)
print(first_word)
```
.
"""
if not self.handle:
raise Error("invalid file handle")
var size_copy: Int64 = size
var err_msg = _OwnedStringRef()
var buf = external_call[
"KGEN_CompilerRT_IO_FileRead", UnsafePointer[UInt8]
](
self.handle,
UnsafePointer.address_of(size_copy),
UnsafePointer.address_of(err_msg),
)
if err_msg:
raise (err_msg^).consume_as_error()
return String(buf, int(size_copy) + 1)
@always_inline
fn read[
type: DType
](self, ptr: DTypePointer[type], size: Int64 = -1) raises -> Int64:
"""Read data from the file into the pointer. Setting size will read up
to `sizeof(type) * size`. The default value of `size` is -1 which
will read to the end of the file. Starts reading from the file handle
seek pointer, and after reading adds `sizeof(type) * size` bytes to the
seek pointer.
Parameters:
type: The type that will the data will be represented as.
Args:
ptr: The pointer where the data will be read to.
size: Requested number of elements to read.
Returns:
The total amount of data that was read in bytes.
Raises:
An error if this file handle is invalid, or if the file read
returned a failure.
Examples:
```mojo
import os
alias file_name = "/tmp/example.txt"
var file = open(file_name, "r")
# Allocate and load 8 elements
var ptr = DTypePointer[DType.float32].alloc(8)
var bytes = file.read(ptr, 8)
print("bytes read", bytes)
var first_element = ptr.load(0)
print(first_element)
# Skip 2 elements
_ = file.seek(2 * sizeof[DType.float32](), os.SEEK_CUR)
# Allocate and load 8 more elements from file handle seek position
var ptr2 = DTypePointer[DType.float32].alloc(8)
var bytes2 = file.read(ptr2, 8)
var eleventh_element = ptr2[0]
var twelvth_element = ptr2[1]
print(eleventh_element, twelvth_element)
# Free the memory
ptr.free()
ptr2.free()
```
.
"""
if not self.handle:
raise Error("invalid file handle")
var size_copy = size * sizeof[type]()
var err_msg = _OwnedStringRef()
external_call["KGEN_CompilerRT_IO_FileReadToAddress", NoneType](
self.handle,
ptr,
UnsafePointer.address_of(size_copy),
UnsafePointer.address_of(err_msg),
)
if err_msg:
raise (err_msg^).consume_as_error()
return size_copy
fn read_bytes(self, size: Int64 = -1) raises -> List[UInt8]:
"""Reads data from a file and sets the file handle seek position. If
size is left as default of -1, it will read to the end of the file.
Setting size to a number larger than what's in the file will be handled
and set the List.size to the total number of bytes in the file.
Args:
size: Requested number of bytes to read (Default: -1 = EOF).
Returns:
The contents of the file.
Raises:
An error if this file handle is invalid, or if the file read
returned a failure.
Examples:
Reading the entire file into a List[Int8]:
```mojo
var file = open("/tmp/example.txt", "r")
var string = file.read_bytes()
```
Reading the first 8 bytes, skipping 2 bytes, and then reading the next
8 bytes:
```mojo
import os
var file = open("/tmp/example.txt", "r")
var list1 = file.read(8)
_ = file.seek(2, os.SEEK_CUR)
var list2 = file.read(8)
```
Reading the last 8 bytes in the file, then the first 8 bytes:
```mojo
import os
var file = open("/tmp/example.txt", "r")
_ = file.seek(-8, os.SEEK_END)
var last_data = file.read(8)
_ = file.seek(8, os.SEEK_SET) # os.SEEK_SET is the default start of file
var first_data = file.read(8)
```
.
"""
if not self.handle:
raise Error("invalid file handle")
var size_copy: Int64 = size
var err_msg = _OwnedStringRef()
var buf = external_call[
"KGEN_CompilerRT_IO_FileReadBytes", UnsafePointer[UInt8]
](
self.handle,
UnsafePointer.address_of(size_copy),
UnsafePointer.address_of(err_msg),
)
if err_msg:
raise (err_msg^).consume_as_error()
var list = List[UInt8](
unsafe_pointer=buf, size=int(size_copy), capacity=int(size_copy)
)
return list
fn seek(self, offset: UInt64, whence: UInt8 = os.SEEK_SET) raises -> UInt64:
"""Seeks to the given offset in the file.
Args:
offset: The byte offset to seek to.
whence: The reference point for the offset:
os.SEEK_SET = 0: start of file (Default).
os.SEEK_CUR = 1: current position.
os.SEEK_END = 2: end of file.
Raises:
An error if this file handle is invalid, or if file seek returned a
failure.
Returns:
The resulting byte offset from the start of the file.
Examples:
Skip 32 bytes from the current read position:
```mojo
import os
var f = open("/tmp/example.txt", "r")
f.seek(os.SEEK_CUR, 32)
```
Start from 32 bytes from the end of the file:
```mojo
import os
var f = open("/tmp/example.txt", "r")
f.seek(os.SEEK_END, -32)
```
.
"""
if not self.handle:
raise "invalid file handle"
debug_assert(
whence >= 0 and whence < 3,
"Second argument to `seek` must be between 0 and 2.",
)
var err_msg = _OwnedStringRef()
var pos = external_call["KGEN_CompilerRT_IO_FileSeek", UInt64](
self.handle, offset, whence, UnsafePointer.address_of(err_msg)
)
if err_msg:
raise (err_msg^).consume_as_error()
return pos
fn write(self, data: String) raises:
"""Write the data to the file.
Args:
data: The data to write to the file.
"""
self._write(data.unsafe_ptr(), len(data))
@always_inline
fn write(self, data: StringRef) raises:
"""Write the data to the file.
Args:
data: The data to write to the file.
"""
# TODO: Remove cast when transition to UInt8 strings is complete.
self._write(data.unsafe_ptr().bitcast[Int8](), len(data))
@always_inline
fn _write[
address_space: AddressSpace
](self, ptr: DTypePointer[DType.int8, address_space], len: Int) raises:
"""Write the data to the file.
Params:
address_space: The address space of the pointer.
Args:
ptr: The pointer to the data to write.
len: The length of the pointer (in bytes).
"""
if not self.handle:
raise Error("invalid file handle")
var err_msg = _OwnedStringRef()
external_call["KGEN_CompilerRT_IO_FileWrite", NoneType](
self.handle,
ptr.address,
len,
UnsafePointer.address_of(err_msg),
)
if err_msg:
raise (err_msg^).consume_as_error()
fn __enter__(owned self) -> Self:
"""The function to call when entering the context."""
return self^
fn _get_raw_fd(self) -> Int:
var i64_res = external_call[
"KGEN_CompilerRT_IO_GetFD",
Int64,
](self.handle)
return Int(i64_res.value)
fn open(path: String, mode: String) raises -> FileHandle:
"""Opens the file specified by path using the mode provided, returning a
FileHandle.
Args:
path: The path to the file to open.
mode: The mode to open the file in.
Returns:
A file handle.
"""
return FileHandle(path, mode)
fn open[
pathlike: os.PathLike
](path: pathlike, mode: String) raises -> FileHandle:
"""Opens the file specified by path using the mode provided, returning a
FileHandle.
Parameters:
pathlike: The a type conforming to the os.PathLike trait.
Args:
path: The path to the file to open.
mode: The mode to open the file in (the mode can be "r" or "w").
Returns:
A file handle.
"""
return FileHandle(path.__fspath__(), mode)
| mojo/stdlib/src/builtin/file.mojo | false |
<filename>mojo/stdlib/src/builtin/file_descriptor.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Higher level abstraction for file stream.
These are Mojo built-ins, so you don't need to import them.
For example, here's how to print to a file
```mojo
var f = open("my_file.txt", "r")
print("hello", file=f)
f.close()
```
"""
struct FileDescriptor:
"""File descriptor of a file."""
var value: Int
"""The underlying value of the file descriptor."""
fn __init__(inout self):
"""Default constructor to stdout."""
self.value = 1
fn __init__(inout self, x: Int):
"""Constructs the file descriptor from an integer.
Args:
x: The integer.
"""
self.value = x
fn __init__(inout self, f: FileHandle):
"""Constructs the file descriptor from a file handle.
Args:
f: The file handle.
"""
self.value = f._get_raw_fd()
| mojo/stdlib/src/builtin/file_descriptor.mojo | false |
<filename>mojo/stdlib/src/builtin/float_literal.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the FloatLiteral class.
These are Mojo built-ins, so you don't need to import them.
"""
from builtin._math import Ceilable, CeilDivable, Floorable, Truncable
# ===----------------------------------------------------------------------===#
# FloatLiteral
# ===----------------------------------------------------------------------===#
@value
@nonmaterializable(Float64)
@register_passable("trivial")
struct FloatLiteral(
Absable,
Boolable,
Ceilable,
CeilDivable,
Comparable,
Floorable,
Intable,
Roundable,
Stringable,
Truncable,
):
"""Mojo floating point literal type."""
alias fp_type = __mlir_type.`!kgen.float_literal`
var value: Self.fp_type
"""The underlying storage for the floating point value."""
# ===------------------------------------------------------------------===#
# Constructors
# ===------------------------------------------------------------------===#
@always_inline("nodebug")
fn __init__(inout self, value: Self.fp_type):
"""Create a FloatLiteral value from a kgen.float_literal value.
Args:
value: The float value.
"""
self.value = value
@always_inline("nodebug")
fn __init__(inout self, value: IntLiteral):
"""Convert an IntLiteral to a FloatLiteral value.
Args:
value: The IntLiteral value.
"""
self.value = __mlir_op.`kgen.int_literal.to_float_literal`(value.value)
alias nan = Self(__mlir_attr.`#kgen.float_literal<nan>`)
alias infinity = Self(__mlir_attr.`#kgen.float_literal<inf>`)
alias negative_infinity = Self(__mlir_attr.`#kgen.float_literal<neg_inf>`)
alias negative_zero = Self(__mlir_attr.`#kgen.float_literal<neg_zero>`)
@always_inline("nodebug")
fn is_nan(self) -> Bool:
"""Return whether the FloatLiteral is nan.
Since `nan == nan` is False, this provides a way to check for nan-ness.
Returns:
True, if the value is nan, False otherwise.
"""
return __mlir_op.`kgen.float_literal.isa`[
special = __mlir_attr.`#kgen<float_literal.special_values nan>`
](self.value)
@always_inline("nodebug")
fn is_neg_zero(self) -> Bool:
"""Return whether the FloatLiteral is negative zero.
Since `FloatLiteral.negative_zero == 0.0` is True, this provides a way
to check if the FloatLiteral is negative zero.
Returns:
True, if the value is negative zero, False otherwise.
"""
return __mlir_op.`kgen.float_literal.isa`[
special = __mlir_attr.`#kgen<float_literal.special_values neg_zero>`
](self.value)
@always_inline("nodebug")
fn _is_normal(self) -> Bool:
"""Return whether the FloatLiteral is a normal (i.e. not special) value.
Returns:
True, if the value is a normal float, False otherwise.
"""
return __mlir_op.`kgen.float_literal.isa`[
special = __mlir_attr.`#kgen<float_literal.special_values normal>`
](self.value)
# ===------------------------------------------------------------------===#
# Conversion Operators
# ===------------------------------------------------------------------===#
@always_inline("nodebug")
fn __str__(self) -> String:
"""Get the float as a string.
Returns:
A string representation.
"""
return str(Float64(self))
@always_inline("nodebug")
fn __int_literal__(self) -> IntLiteral:
"""Casts the floating point value to an IntLiteral. If there is a
fractional component, then the value is truncated towards zero.
Eg. `(4.5).__int_literal__()` returns `4`, and `(-3.7).__int_literal__()`
returns `-3`.
Returns:
The value as an integer.
"""
return IntLiteral(
__mlir_op.`kgen.float_literal.to_int_literal`(self.value)
)
@always_inline("nodebug")
fn __int__(self) -> Int:
"""Converts the FloatLiteral value to an Int. If there is a fractional
component, then the value is truncated towards zero.
Eg. `(4.5).__int__()` returns `4`, and `(-3.7).__int__()` returns `-3`.
Returns:
The value as an integer.
"""
return self.__int_literal__().__int__()
# ===------------------------------------------------------------------===#
# Unary Operators
# ===------------------------------------------------------------------===#
@always_inline("nodebug")
fn __bool__(self) -> Bool:
"""A FloatLiteral value is true if it is non-zero.
Returns:
True if non-zero.
"""
return self != 0.0
@always_inline("nodebug")
fn __neg__(self) -> FloatLiteral:
"""Return the negation of the FloatLiteral value.
Returns:
The negated FloatLiteral value.
"""
return self * Self(-1)
@always_inline("nodebug")
fn __abs__(self) -> Self:
"""Return the absolute value of the FloatLiteral.
Returns:
The absolute value.
"""
if self > 0:
return self
return -self
@always_inline("nodebug")
fn __floor__(self) -> Self:
"""Return the floor value of the FloatLiteral.
Returns:
The floor value.
"""
# Handle special values first.
if not self._is_normal():
return self
# __int_literal__ rounds towards zero, so it's correct for integers and
# positive values.
var truncated: IntLiteral = self.__int_literal__()
# Ensure this equality doesn't hit any implicit conversions.
if self >= 0 or self.__eq__(Self(truncated)):
return truncated
return truncated - 1
@always_inline("nodebug")
fn __ceil__(self) -> Self:
"""Return the ceiling value of the FloatLiteral.
Returns:
The ceiling value.
"""
# Handle special values first.
if not self._is_normal():
return self
# __int_literal__ rounds towards zero, so it's correct for integers and
# negative values.
var truncated: IntLiteral = self.__int_literal__()
# Ensure this equality doesn't hit any implicit conversions.
if self <= 0 or self.__eq__(Self(truncated)):
return truncated
return truncated + 1
@always_inline("nodebug")
fn __trunc__(self) -> Self:
"""Truncates the floating point literal. If there is a fractional
component, then the value is truncated towards zero.
For example, `(4.5).__trunc__()` returns `4.0`, and `(-3.7).__trunc__()`
returns `-3.0`.
Returns:
The truncated FloatLiteral value.
"""
# Handle special values first.
if not self._is_normal():
return self
return Self(self.__int_literal__())
fn __round__(self) -> Self:
"""Return the rounded value of the FloatLiteral.
Returns:
The rounded value.
"""
# Handle special values first.
if not self._is_normal():
return self
alias one = __mlir_attr.`#kgen.int_literal<1> : !kgen.int_literal`
alias neg_one = __mlir_attr.`#kgen.int_literal<-1> : !kgen.int_literal`
var truncated: IntLiteral = self.__int_literal__()
var abs_diff = abs(self - truncated)
var plus_one = one if self > 0 else neg_one
if abs_diff == 0.5:
# Round to the nearest even number.
if truncated % 2 == 0:
return Self(truncated)
else:
return Self(truncated + plus_one)
elif abs_diff > 0.5:
return Self(truncated + plus_one)
else:
return Self(truncated)
@always_inline("nodebug")
fn __round__(self, ndigits: Int) -> Self:
"""Return the rounded value of the FloatLiteral.
Args:
ndigits: The number of digits to round to. Defaults to 0.
Returns:
The rounded value.
"""
# Handle special values first.
if not self._is_normal():
return self
alias one = __mlir_attr.`#kgen.int_literal<1> : !kgen.int_literal`
alias neg_one = __mlir_attr.`#kgen.int_literal<-1> : !kgen.int_literal`
alias ten = __mlir_attr.`#kgen.int_literal<10> : !kgen.int_literal`
var multiplier = one
var target: Self = self
# TODO: Use IntLiteral.__pow__() when it's implemented.
for _ in range(abs(ndigits)):
multiplier = __mlir_op.`kgen.int_literal.binop`[
oper = __mlir_attr.`#kgen<int_literal.binop_kind mul>`
](multiplier, ten)
if ndigits > 0:
target *= Self(multiplier)
elif ndigits < 0:
target /= Self(multiplier)
else:
return self.__round__()
var truncated: IntLiteral = target.__int_literal__()
var result: Self
var abs_diff = abs(target - truncated)
var plus_one = one if self > 0 else neg_one
if abs_diff == 0.5:
# Round to the nearest even number.
if truncated % 2 == 0:
result = Self(truncated)
else:
result = Self(truncated + plus_one)
elif abs_diff <= 0.5:
result = Self(truncated)
else:
result = Self(truncated + plus_one)
if ndigits >= 0:
result /= Self(multiplier)
elif ndigits < 0:
result *= Self(multiplier)
return result
# ===------------------------------------------------------------------===#
# Arithmetic Operators
# ===------------------------------------------------------------------===#
@always_inline("nodebug")
fn __add__(self, rhs: FloatLiteral) -> FloatLiteral:
"""Add two FloatLiterals.
Args:
rhs: The value to add.
Returns:
The sum of the two values.
"""
return __mlir_op.`kgen.float_literal.binop`[
oper = __mlir_attr.`#kgen<float_literal.binop_kind add>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __sub__(self, rhs: FloatLiteral) -> FloatLiteral:
"""Subtract two FloatLiterals.
Args:
rhs: The value to subtract.
Returns:
The difference of the two values.
"""
return __mlir_op.`kgen.float_literal.binop`[
oper = __mlir_attr.`#kgen<float_literal.binop_kind sub>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __mul__(self, rhs: FloatLiteral) -> FloatLiteral:
"""Multiply two FloatLiterals.
Args:
rhs: The value to multiply.
Returns:
The product of the two values.
"""
return __mlir_op.`kgen.float_literal.binop`[
oper = __mlir_attr.`#kgen<float_literal.binop_kind mul>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __truediv__(self, rhs: FloatLiteral) -> FloatLiteral:
"""Divide two FloatLiterals.
Args:
rhs: The value to divide.
Returns:
The quotient of the two values.
"""
# TODO - Python raises an error on divide by 0.0 or -0.0
return __mlir_op.`kgen.float_literal.binop`[
oper = __mlir_attr.`#kgen<float_literal.binop_kind truediv>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __floordiv__(self, rhs: Self) -> Self:
"""Returns self divided by rhs, rounded down to the nearest integer.
Args:
rhs: The divisor value.
Returns:
`floor(self / rhs)` value.
"""
return self.__truediv__(rhs).__floor__()
@always_inline("nodebug")
fn __mod__(self, rhs: Self) -> Self:
"""Return the remainder of self divided by rhs.
Args:
rhs: The value to divide on.
Returns:
The remainder of dividing self by rhs.
"""
return self.__divmod__(rhs)[1]
@always_inline("nodebug")
fn __divmod__(self, rhs: Self) -> Tuple[Self, Self]:
"""Return a tuple with the quotient and the remainder of self divided by rhs.
Args:
rhs: The value to divide on.
Returns:
The tuple with the dividend and the remainder
"""
var quotient: Self = self.__floordiv__(rhs)
var remainder: Self = self - (quotient * rhs)
return quotient, remainder
fn __rfloordiv__(self, rhs: Self) -> Self:
"""Returns rhs divided by self, rounded down to the nearest integer.
Args:
rhs: The value to be divided by self.
Returns:
`floor(rhs / self)` value.
"""
return rhs // self
# TODO - maybe __pow__?
# ===------------------------------------------------------------------===#
# In-place Arithmetic Operators
# ===------------------------------------------------------------------===#
@always_inline("nodebug")
fn __iadd__(inout self, rhs: FloatLiteral):
"""In-place addition operator.
Args:
rhs: The value to add.
"""
self = self + rhs
@always_inline("nodebug")
fn __isub__(inout self, rhs: FloatLiteral):
"""In-place subtraction operator.
Args:
rhs: The value to subtract.
"""
self = self - rhs
@always_inline("nodebug")
fn __imul__(inout self, rhs: FloatLiteral):
"""In-place multiplication operator.
Args:
rhs: The value to multiply.
"""
self = self * rhs
@always_inline("nodebug")
fn __itruediv__(inout self, rhs: FloatLiteral):
"""In-place division.
Args:
rhs: The value to divide.
"""
self = self / rhs
# ===------------------------------------------------------------------===#
# Reversed Operators
# ===------------------------------------------------------------------===#
@always_inline("nodebug")
fn __radd__(self, rhs: FloatLiteral) -> FloatLiteral:
"""Reversed addition operator.
Args:
rhs: The value to add.
Returns:
The sum of this and the given value.
"""
return rhs + self
@always_inline("nodebug")
fn __rsub__(self, rhs: FloatLiteral) -> FloatLiteral:
"""Reversed subtraction operator.
Args:
rhs: The value to subtract from.
Returns:
The result of subtracting this from the given value.
"""
return rhs - self
@always_inline("nodebug")
fn __rmul__(self, rhs: FloatLiteral) -> FloatLiteral:
"""Reversed multiplication operator.
Args:
rhs: The value to multiply.
Returns:
The product of the given number and this.
"""
return rhs * self
@always_inline("nodebug")
fn __rtruediv__(self, rhs: FloatLiteral) -> FloatLiteral:
"""Reversed division.
Args:
rhs: The value to be divided by this.
Returns:
The result of dividing the given value by this.
"""
return rhs / self
# ===------------------------------------------------------------------===#
# Comparison Operators
# ===------------------------------------------------------------------===#
@always_inline("nodebug")
fn __eq__(self, rhs: FloatLiteral) -> Bool:
"""Compare for equality.
Args:
rhs: The value to compare.
Returns:
True if they are equal.
"""
return __mlir_op.`kgen.float_literal.cmp`[
pred = __mlir_attr.`#kgen<float_literal.cmp_pred eq>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __ne__(self, rhs: FloatLiteral) -> Bool:
"""Compare for inequality.
Args:
rhs: The value to compare.
Returns:
True if they are not equal.
"""
return __mlir_op.`kgen.float_literal.cmp`[
pred = __mlir_attr.`#kgen<float_literal.cmp_pred ne>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __lt__(self, rhs: FloatLiteral) -> Bool:
"""Less than comparison.
Args:
rhs: The value to compare.
Returns:
True if this value is less than `rhs`.
"""
return __mlir_op.`kgen.float_literal.cmp`[
pred = __mlir_attr.`#kgen<float_literal.cmp_pred lt>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __le__(self, rhs: FloatLiteral) -> Bool:
"""Less than or equal to comparison.
Args:
rhs: The value to compare.
Returns:
True if this value is less than or equal to `rhs`.
"""
return __mlir_op.`kgen.float_literal.cmp`[
pred = __mlir_attr.`#kgen<float_literal.cmp_pred le>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __gt__(self, rhs: FloatLiteral) -> Bool:
"""Greater than comparison.
Args:
rhs: The value to compare.
Returns:
True if this value is greater than `rhs`.
"""
return __mlir_op.`kgen.float_literal.cmp`[
pred = __mlir_attr.`#kgen<float_literal.cmp_pred gt>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __ge__(self, rhs: FloatLiteral) -> Bool:
"""Greater than or equal to comparison.
Args:
rhs: The value to compare.
Returns:
True if this value is greater than or equal to `rhs`.
"""
return __mlir_op.`kgen.float_literal.cmp`[
pred = __mlir_attr.`#kgen<float_literal.cmp_pred ge>`
](self.value, rhs.value)
| mojo/stdlib/src/builtin/float_literal.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Provides the `hex` and `bin` functions.
These are Mojo built-ins, so you don't need to import them.
"""
from collections import Optional
from utils import InlineArray
alias _DEFAULT_DIGIT_CHARS = "0123456789abcdefghijklmnopqrstuvwxyz"
# ===----------------------------------------------------------------------===#
# bin
# ===----------------------------------------------------------------------===#
@always_inline
fn bin[
type: DType
](num: Scalar[type], prefix: StringLiteral = "0b", /) -> String:
"""Return the binary string representation an integral value.
```mojo
print(bin(123))
print(bin(-123))
```
```plaintext
'0b1111011'
'-0b1111011'
```
Parameters:
type: The data type of the integral scalar.
Args:
num: An integral scalar value.
prefix: The prefix of the formatted int.
Returns:
The binary string representation of num.
"""
return _try_format_int(num, 2, prefix=prefix)
# Need this until we have constraints to stop the compiler from matching this
# directly to bin[type: DType](num: Scalar[type]).
@always_inline("nodebug")
fn bin(b: Scalar[DType.bool], prefix: StringLiteral = "0b", /) -> String:
"""Returns the binary representation of a scalar bool.
Args:
b: A scalar bool value.
prefix: The prefix of the formatted int.
Returns:
The binary string representation of b.
"""
return bin(b.cast[DType.int8]())
@always_inline("nodebug")
fn bin[T: Indexer](num: T, prefix: StringLiteral = "0b", /) -> String:
"""Returns the binary representation of an indexer type.
Parameters:
T: The Indexer type.
Args:
num: An indexer value.
prefix: The prefix of the formatted int.
Returns:
The binary string representation of num.
"""
return bin(Scalar[DType.index](index(num)))
# ===----------------------------------------------------------------------===#
# hex
# ===----------------------------------------------------------------------===#
@always_inline
fn hex[
type: DType
](value: Scalar[type], prefix: StringLiteral = "0x", /) -> String:
"""Returns the hex string representation of the given integer.
The hexadecimal representation is a base-16 encoding of the integer value.
The returned string will be prefixed with "0x" to indicate that the
subsequent digits are hex.
Parameters:
type: The type of the Scalar to represent in hexadecimal.
Args:
value: The integer value to format.
prefix: The prefix of the formatted int.
Returns:
A string containing the hex representation of the given integer.
"""
return _try_format_int(value, 16, prefix=prefix)
@always_inline
fn hex[T: Indexer](value: T, prefix: StringLiteral = "0x", /) -> String:
"""Returns the hex string representation of the given integer.
The hexadecimal representation is a base-16 encoding of the integer value.
The returned string will be prefixed with "0x" to indicate that the
subsequent digits are hex.
Parameters:
T: The indexer type to represent in hexadecimal.
Args:
value: The integer value to format.
prefix: The prefix of the formatted int.
Returns:
A string containing the hex representation of the given integer.
"""
return hex[DType.index](index(value), prefix)
@always_inline
fn hex(value: Scalar[DType.bool], prefix: StringLiteral = "0x", /) -> String:
"""Returns the hex string representation of the given scalar bool.
The hexadecimal representation is a base-16 encoding of the bool.
The returned string will be prefixed with "0x" to indicate that the
subsequent digits are hex.
Args:
value: The bool value to format.
prefix: The prefix of the formatted int.
Returns:
A string containing the hex representation of the given bool.
"""
return hex(value.cast[DType.int8]())
# ===----------------------------------------------------------------------===#
# Integer formatting utilities
# ===----------------------------------------------------------------------===#
fn _try_format_int[
type: DType
](value: Scalar[type], radix: Int = 10, prefix: StringLiteral = "",) -> String:
try:
return _format_int(value, radix, prefix=prefix)
except e:
# This should not be reachable as _format_int only throws if we pass
# incompatible radix and custom digit chars, which we aren't doing
# above.
return abort[String](
"unexpected exception formatting value as hexadecimal: " + str(e)
)
fn _format_int[
type: DType
](
value: Scalar[type],
radix: Int = 10,
digit_chars: StringLiteral = _DEFAULT_DIGIT_CHARS,
prefix: StringLiteral = "",
) raises -> String:
var string = String()
var fmt = string._unsafe_to_formatter()
_write_int(fmt, value, radix, digit_chars, prefix)
return string^
@always_inline
fn _write_int[
type: DType
](
inout fmt: Formatter,
value: Scalar[type],
radix: Int = 10,
digit_chars: StringLiteral = _DEFAULT_DIGIT_CHARS,
prefix: StringLiteral = "",
) raises:
var err = _try_write_int(fmt, value, radix, digit_chars, prefix)
if err:
raise err.value()[]
@always_inline
fn _try_write_int[
type: DType
](
inout fmt: Formatter,
value: Scalar[type],
radix: Int = 10,
digit_chars: StringLiteral = _DEFAULT_DIGIT_CHARS,
prefix: StringLiteral = "",
) -> Optional[Error]:
"""Writes a formatted string representation of the given integer using the specified radix.
The maximum supported radix is 36 unless a custom `digit_chars` mapping is
provided.
"""
#
# Check that the radix and available digit characters are valid
#
constrained[type.is_integral(), "Expected integral"]()
if radix < 2:
return Error("Unable to format integer to string with radix < 2")
if radix > len(digit_chars):
return Error(
"Unable to format integer to string when provided radix is larger "
"than length of available digit value characters"
)
if not len(digit_chars) >= 2:
return Error(
"Unable to format integer to string when provided digit_chars"
" mapping len is not >= 2"
)
#
# Process the integer value into its corresponding digits
#
# TODO(#26444, Unicode support): Get an array of Character, not bytes.
var digit_chars_array = digit_chars.unsafe_ptr()
# Prefix a '-' if the original int was negative and make positive.
if value < 0:
fmt.write_str("-")
# Add the custom number prefix, e.g. "0x" commonly used for hex numbers.
# This comes *after* the minus sign, if present.
fmt.write_str(prefix)
if value == 0:
# TODO: Replace with safe digit_chars[:1] syntax.
# SAFETY:
# This static lifetime is valid as long as we're using a
# `StringLiteral` for `digit_chars`.
var zero = StringSlice[False, ImmutableStaticLifetime](
# TODO: Remove cast after transition to UInt8 strings is complete.
unsafe_from_utf8_ptr=digit_chars_array.bitcast[UInt8](),
len=1,
)
fmt.write_str(zero)
return
#
# Create a buffer to store the formatted value
#
# Stack allocate enough bytes to store any formatted 64-bit integer
# TODO: use a dynamic size when #2194 is resolved
alias CAPACITY: Int = 64
var buf = InlineArray[Int8, CAPACITY](unsafe_uninitialized=True)
# Start the buf pointer at the end. We will write the least-significant
# digits later in the buffer, and then decrement the pointer to move
# earlier in the buffer as we write the more-significant digits.
var offset = CAPACITY - 1
#
# Write the digits of the number
#
var remaining_int = value
@parameter
fn process_digits[get_digit_value: fn () capturing -> Scalar[type]]():
while remaining_int:
var digit_value = get_digit_value()
# Write the char representing the value of the least significant
# digit.
buf[offset] = digit_chars_array[int(digit_value)]
# Position the offset to write the next digit.
offset -= 1
# Drop the least significant digit
remaining_int /= radix
if remaining_int >= 0:
@parameter
fn pos_digit_value() -> Scalar[type]:
return remaining_int % radix
process_digits[pos_digit_value]()
else:
@parameter
fn neg_digit_value() -> Scalar[type]:
return abs(remaining_int % -radix)
process_digits[neg_digit_value]()
# Re-add +1 byte since the loop ended so we didn't write another char.
offset += 1
var buf_ptr = buf.unsafe_ptr() + offset
# Calculate the length of the buffer we've filled. This is the number of
# bytes from our final `buf_ptr` to the end of the buffer.
var len = CAPACITY - offset
# SAFETY:
# Create a slice to only those bytes in `buf` that have been initialized.
var str_slice = StringSlice[False, __lifetime_of(buf)](
# TODO: Remove cast after transition to UInt8 strings is complete.
unsafe_from_utf8_ptr=buf_ptr.bitcast[UInt8](),
len=len,
)
fmt.write_str(str_slice)
return None
| mojo/stdlib/src/builtin/format_int.mojo | false |
<filename>mojo/stdlib/src/builtin/hash.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the `Hashable` trait and `hash()` built-in function.
There are a few main tools in this module:
- `Hashable` trait for types implementing `__hash__(self) -> Int`
- `hash[T: Hashable](hashable: T) -> Int` built-in function.
- A `hash()` implementation for arbitrary byte strings,
`hash(data: DTypePointer[DType.int8], n: Int) -> Int`,
is the workhorse function, which implements efficient hashing via SIMD
vectors. See the documentation of this function for more details on the hash
implementation.
- `hash(SIMD)` and `hash(Int8)` implementations
These are useful helpers to specialize for the general bytes implementation.
"""
from builtin.dtype import _uint_type_of_width
import random
from sys.ffi import _get_global
from memory import memcpy, memset_zero, stack_allocation
# TODO remove this import onece InlineArray is moved to collections
from utils import InlineArray
# ===----------------------------------------------------------------------=== #
# Implementation
# ===----------------------------------------------------------------------=== #
# This hash secret is XOR-ed with the final hash value for common hash functions.
# Doing so can help prevent DDOS attacks on data structures relying on these
# hash functions. See `hash(bytes, n)` documentation for more details.
# TODO(27659): This is always 0 right now
# var HASH_SECRET = int(random.random_ui64(0, UInt64.MAX)
fn _HASH_SECRET() -> Int:
var ptr = _get_global[
"HASH_SECRET", _initialize_hash_secret, _destroy_hash_secret
]()
return ptr.bitcast[Int]()[0]
fn _initialize_hash_secret(
payload: UnsafePointer[NoneType],
) -> UnsafePointer[NoneType]:
var secret = random.random_ui64(0, UInt64.MAX)
var data = UnsafePointer[Int].alloc(1)
data[] = int(secret)
return data.bitcast[NoneType]()
fn _destroy_hash_secret(p: UnsafePointer[NoneType]):
p.free()
trait Hashable:
"""A trait for types which specify a function to hash their data.
This hash function will be used for applications like hash maps, and
don't need to be cryptographically secure. A good hash function will
hash similar / common types to different values, and in particular
the _low order bits_ of the hash, which are used in smaller dictionaries,
should be sensitive to any changes in the data structure. If your type's
hash function doesn't meet this criteria it will get poor performance in
common hash map implementations.
```mojo
@value
struct Foo(Hashable):
fn __hash__(self) -> Int:
return 4 # chosen by fair random dice roll
var foo = Foo()
print(hash(foo))
```
"""
fn __hash__(self) -> Int:
"""Return a 64-bit hash of the type's data."""
...
fn hash[T: Hashable](hashable: T) -> Int:
"""Hash a Hashable type using its underlying hash implementation.
Parameters:
T: Any Hashable type.
Args:
hashable: The input data to hash.
Returns:
A 64-bit integer hash based on the underlying implementation.
"""
return hashable.__hash__()
fn _djbx33a_init[type: DType, size: Int]() -> SIMD[type, size]:
return SIMD[type, size](5361)
fn _djbx33a_hash_update[
type: DType, size: Int
](data: SIMD[type, size], next: SIMD[type, size]) -> SIMD[type, size]:
return data * 33 + next
# Based on the hash function used by ankerl::unordered_dense::hash
# https://martin.ankerl.com/2022/08/27/hashmap-bench-01/#ankerl__unordered_dense__hash
fn _ankerl_init[type: DType, size: Int]() -> SIMD[type, size]:
alias int_type = _uint_type_of_width[type.bitwidth()]()
alias init = Int64(-7046029254386353131).cast[int_type]()
return SIMD[type, size](bitcast[type, 1](init))
fn _ankerl_hash_update[
type: DType, size: Int
](data: SIMD[type, size], next: SIMD[type, size]) -> SIMD[type, size]:
# compute the hash as though the type is uint
alias int_type = _uint_type_of_width[type.bitwidth()]()
var data_int = bitcast[int_type, size](data)
var next_int = bitcast[int_type, size](next)
var result = (data_int * next_int) ^ next_int
return bitcast[type, size](result)
alias _HASH_INIT = _djbx33a_init
alias _HASH_UPDATE = _djbx33a_hash_update
# This is incrementally better than DJBX33A, in that it fixes some of the
# performance issue we've been seeing with Dict. It's still not ideal as
# a long-term hash function.
@always_inline
fn _hash_simd[type: DType, size: Int](data: SIMD[type, size]) -> Int:
"""Hash a SIMD byte vector using direct DJBX33A hash algorithm.
See `hash(bytes, n)` documentation for more details.
Parameters:
type: The SIMD dtype of the input data.
size: The SIMD width of the input data.
Args:
data: The input data to hash.
Returns:
A 64-bit integer hash. This hash is _not_ suitable for
cryptographic purposes, but will have good low-bit
hash collision statistical properties for common data structures.
"""
@parameter
if type == DType.bool:
return _hash_simd(data.cast[DType.int8]())
var hash_data = _ankerl_init[type, size]()
hash_data = _ankerl_hash_update(hash_data, data)
alias int_type = _uint_type_of_width[type.bitwidth()]()
var final_data = bitcast[int_type, 1](hash_data[0]).cast[DType.uint64]()
@parameter
fn hash_value[i: Int]():
final_data = _ankerl_hash_update(
final_data,
bitcast[int_type, 1](hash_data[i + 1]).cast[DType.uint64](),
)
unroll[hash_value, size - 1]()
return int(final_data)
fn hash(bytes: DTypePointer[DType.uint8], n: Int) -> Int:
"""Hash a byte array using a SIMD-modified DJBX33A hash algorithm.
Similar to `hash(bytes: DTypePointer[DType.int8], n: Int) -> Int` but
takes a `DTypePointer[DType.uint8]` instead of `DTypePointer[DType.int8]`.
See the overload for a complete description of the algorithm.
Args:
bytes: The byte array to hash.
n: The length of the byte array.
Returns:
A 64-bit integer hash. This hash is _not_ suitable for
cryptographic purposes, but will have good low-bit
hash collision statistical properties for common data structures.
"""
return hash(bytes.bitcast[DType.int8](), n)
# TODO: Remove this overload once we have finished the transition to uint8
# for bytes. See https://github.com/modularml/mojo/issues/2317
fn hash(bytes: DTypePointer[DType.int8], n: Int) -> Int:
"""Hash a byte array using a SIMD-modified hash algorithm.
_This hash function is not suitable for cryptographic purposes._ The
algorithm is easy to reverse and produce deliberate hash collisions.
The hash function is designed to have relatively good mixing and statistical
properties for use in hash-based data structures. We _do_ however initialize
a random hash secret which is mixed into the final hash output. This can help
prevent DDOS attacks on applications which make use of this function for
dictionary hashing. As a consequence, hash values are deterministic within an
individual runtime instance ie. a value will always hash to the same thing,
but in between runs this value will change based on the hash secret.
We take advantage of Mojo's first-class SIMD support to create a
SIMD-vectorized hash function, using some simple hash algorithm as a base.
- Interpret those bytes as a SIMD vector, padded with zeros to align
to the system SIMD width.
- Apply the simple hash function parallelized across SIMD vectors.
- Hash the final SIMD vector state to reduce to a single value.
Python uses DJBX33A with a hash secret for smaller strings, and
then the SipHash algorithm for longer strings. The arguments and tradeoffs
are well documented in PEP 456. We should consider this and deeper
performance/security tradeoffs as Mojo evolves.
References:
- [Wikipedia: Non-cryptographic hash function](https://en.wikipedia.org/wiki/Non-cryptographic_hash_function)
- [Python PEP 456](https://peps.python.org/pep-0456/)
- [PHP Hash algorithm and collisions](https://www.phpinternalsbook.com/php5/hashtables/hash_algorithm.html)
```mojo
from random import rand
var n = 64
var rand_bytes = DTypePointer[DType.int8].alloc(n)
rand(rand_bytes, n)
hash(rand_bytes, n)
```
Args:
bytes: The byte array to hash.
n: The length of the byte array.
Returns:
A 64-bit integer hash. This hash is _not_ suitable for
cryptographic purposes, but will have good low-bit
hash collision statistical properties for common data structures.
"""
alias type = DType.uint64
alias type_width = type.bitwidth() // DType.int8.bitwidth()
alias simd_width = simdwidthof[type]()
# stride is the byte length of the whole SIMD vector
alias stride = type_width * simd_width
# Compute our SIMD strides and tail length
# n == k * stride + r
var k = n // stride
var r = n % stride
debug_assert(n == k * stride + r, "wrong hash tail math")
# 1. Reinterpret the underlying data as a larger int type
var simd_data = bytes.bitcast[type]()
# 2. Compute the hash, but strided across the SIMD vector width.
var hash_data = _HASH_INIT[type, simd_width]()
for i in range(k):
var update = simd_data.load[width=simd_width](i * simd_width)
hash_data = _HASH_UPDATE(hash_data, update)
# 3. Copy the tail data (smaller than the SIMD register) into
# a final hash state update vector that's stack-allocated.
if r != 0:
var remaining = InlineArray[Int8, stride](unsafe_uninitialized=True)
var ptr = DTypePointer[DType.int8](
UnsafePointer.address_of(remaining).bitcast[Int8]()
)
memcpy(ptr, bytes + k * stride, r)
memset_zero(ptr + r, stride - r) # set the rest to 0
var last_value = ptr.bitcast[type]().load[width=simd_width]()
hash_data = _HASH_UPDATE(hash_data, last_value)
# Now finally, hash the final SIMD vector state.
return _hash_simd(hash_data)
| mojo/stdlib/src/builtin/hash.mojo | false |
<filename>mojo/stdlib/src/builtin/int.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the Int class.
These are Mojo built-ins, so you don't need to import them.
"""
from collections import KeyElement
from builtin._math import Ceilable, CeilDivable, Floorable, Truncable
from builtin.hash import _hash_simd
from builtin.string import _calc_initial_buffer_size
from builtin.io import _snprintf
from builtin.format_int import _try_write_int
from builtin.simd import _format_scalar
from utils._visualizers import lldb_formatter_wrapping_type
from utils._format import Formattable, Formatter
from utils import InlineArray
# ===----------------------------------------------------------------------=== #
# Indexer
# ===----------------------------------------------------------------------=== #
trait Indexer:
"""This trait denotes a type that can be used to index a container that
handles integral index values.
This solves the issue of being able to index data structures such as `List`
with the various integral types without being too broad and allowing types
that are coercible to `Int` (e.g. floating point values that have `__int__`
method). In contrast to `Intable`, types conforming to `Indexer` must be
convertible to `Int` in a lossless way.
Note that types conforming to `Indexer` are implicitly convertible to `Int`.
"""
fn __index__(self) -> Int:
"""Return the index value.
Returns:
The index value of the object.
"""
...
# ===----------------------------------------------------------------------=== #
# index
# ===----------------------------------------------------------------------=== #
@always_inline("nodebug")
fn index[T: Indexer](idx: T, /) -> Int:
"""Returns the value of `__index__` for the given value.
Parameters:
T: A type conforming to the `Indexer` trait.
Args:
idx: The value.
Returns:
An `Int` respresenting the index value.
"""
return idx.__index__()
# ===----------------------------------------------------------------------=== #
# Intable
# ===----------------------------------------------------------------------=== #
trait Intable:
"""The `Intable` trait describes a type that can be converted to an Int.
Any type that conforms to `Intable` or
[`IntableRaising`](/mojo/stdlib/builtin/int/IntableRaising) works with
the built-in [`int()`](/mojo/stdlib/builtin/int/int-function) function.
This trait requires the type to implement the `__int__()` method. For
example:
```mojo
@value
struct Foo(Intable):
var i: Int
fn __int__(self) -> Int:
return self.i
```
Now you can use the `int()` function to convert a `Foo` to an
`Int`:
```mojo
var foo = Foo(42)
print(int(foo) == 42)
```
```plaintext
True
```
**Note:** If the `__int__()` method can raise an error, use the
[`IntableRaising`](/mojo/stdlib/builtin/int/intableraising) trait
instead.
"""
fn __int__(self) -> Int:
"""Get the integral representation of the value.
Returns:
The integral representation of the value.
"""
...
trait IntableRaising:
"""
The `IntableRaising` trait describes a type can be converted to an Int, but
the conversion might raise an error.
Any type that conforms to [`Intable`](/mojo/stdlib/builtin/int/Intable)
or `IntableRaising` works with the built-in
[`int()`](/mojo/stdlib/builtin/int/int-function) function.
This trait requires the type to implement the `__int__()` method, which can
raise an error. For example:
```mojo
@value
struct Foo(IntableRaising):
var i: Int
fn __int__(self) raises -> Int:
return self.i
```
Now you can use the `int()` function to convert a `Foo` to an
`Int`:
```mojo
fn main() raises:
var x = Foo(42)
print(int(x) == 42)
```
```plaintext
True
```
"""
fn __int__(self) raises -> Int:
"""Get the integral representation of the value.
Returns:
The integral representation of the type.
Raises:
If the type does not have an integral representation.
"""
...
# ===----------------------------------------------------------------------=== #
# int
# ===----------------------------------------------------------------------=== #
@always_inline
fn int[T: Intable](value: T) -> Int:
"""Get the Int representation of the value.
Parameters:
T: The Intable type.
Args:
value: The object to get the integral representation of.
Returns:
The integral representation of the value.
"""
return value.__int__()
@always_inline
fn int[T: IntableRaising](value: T) raises -> Int:
"""Get the Int representation of the value.
Parameters:
T: The Intable type.
Args:
value: The object to get the integral representation of.
Returns:
The integral representation of the value.
Raises:
If the type does not have an integral representation.
"""
return value.__int__()
fn int(value: String, base: Int = 10) raises -> Int:
"""Parses the given string as an integer in the given base and returns that value.
For example, `atol("19")` returns `19`. If the given string cannot be parsed
as an integer value, an error is raised. For example, `atol("hi")` raises an
error.
If base is 0 the the string is parsed as an Integer literal,
see: https://docs.python.org/3/reference/lexical_analysis.html#integers
Args:
value: A string to be parsed as an integer in the given base.
base: Base used for conversion, value must be between 2 and 36, or 0.
Returns:
An integer value that represents the string, or otherwise raises.
"""
return atol(value, base)
# ===----------------------------------------------------------------------=== #
# Int
# ===----------------------------------------------------------------------=== #
@lldb_formatter_wrapping_type
@value
@register_passable("trivial")
struct Int(
Absable,
Boolable,
Ceilable,
CeilDivable,
Comparable,
Floorable,
Formattable,
Indexer,
Intable,
KeyElement,
Powable,
Roundable,
Stringable,
Truncable,
):
"""This type represents an integer value."""
var value: __mlir_type.index
"""The underlying storage for the integer value."""
alias MAX = int(Scalar[DType.index].MAX)
"""Returns the maximum integer value."""
alias MIN = int(Scalar[DType.index].MIN)
"""Returns the minimum value of type."""
@always_inline("nodebug")
fn __init__(inout self):
"""Default constructor that produces zero."""
self.value = __mlir_op.`index.constant`[value = __mlir_attr.`0:index`]()
@always_inline("nodebug")
fn __init__(inout self, value: __mlir_type.index):
"""Construct Int from the given index value.
Args:
value: The init value.
"""
self.value = value
@always_inline("nodebug")
fn __init__(inout self, value: __mlir_type.`!pop.scalar<si16>`):
"""Construct Int from the given Int16 value.
Args:
value: The init value.
"""
self.value = __mlir_op.`pop.cast_to_builtin`[_type = __mlir_type.index](
__mlir_op.`pop.cast`[_type = __mlir_type.`!pop.scalar<index>`](
value
)
)
@always_inline("nodebug")
fn __init__(inout self, value: __mlir_type.`!pop.scalar<si32>`):
"""Construct Int from the given Int32 value.
Args:
value: The init value.
"""
self.value = __mlir_op.`pop.cast_to_builtin`[_type = __mlir_type.index](
__mlir_op.`pop.cast`[_type = __mlir_type.`!pop.scalar<index>`](
value
)
)
@always_inline("nodebug")
fn __init__(inout self, value: __mlir_type.`!pop.scalar<si64>`):
"""Construct Int from the given Int64 value.
Args:
value: The init value.
"""
self.value = __mlir_op.`pop.cast_to_builtin`[_type = __mlir_type.index](
__mlir_op.`pop.cast`[_type = __mlir_type.`!pop.scalar<index>`](
value
)
)
@always_inline("nodebug")
fn __init__(inout self, value: __mlir_type.`!pop.scalar<index>`):
"""Construct Int from the given Index value.
Args:
value: The init value.
"""
self.value = __mlir_op.`pop.cast_to_builtin`[_type = __mlir_type.index](
__mlir_op.`pop.cast`[_type = __mlir_type.`!pop.scalar<index>`](
value
)
)
@always_inline("nodebug")
fn __init__(inout self, value: IntLiteral):
"""Construct Int from the given IntLiteral value.
Args:
value: The init value.
"""
self = value.__int__()
@always_inline("nodebug")
fn __init__[IndexerTy: Indexer](inout self, value: IndexerTy):
"""Construct Int from the given Indexer value.
Parameters:
IndexerTy: A type conforming to Indexer.
Args:
value: The init value.
"""
self = value.__index__()
@always_inline("nodebug")
fn __int__(self) -> Int:
"""Gets the integral value (this is an identity function for Int).
Returns:
The value as an integer.
"""
return self
fn __str__(self) -> String:
"""Get the integer as a string.
Returns:
A string representation.
"""
return String.format_sequence(self)
fn format_to(self, inout writer: Formatter):
"""
Formats this integer to the provided formatter.
Args:
writer: The formatter to write to.
"""
@parameter
if triple_is_nvidia_cuda():
var err = _try_write_int(writer, Int64(self))
if err:
abort(
"unreachable: unexpected write int failure condition: "
+ str(err.value()[])
)
else:
_format_scalar(writer, Int64(self))
fn __repr__(self) -> String:
"""Get the integer as a string. Returns the same `String` as `__str__`.
Returns:
A string representation.
"""
return str(self)
@always_inline("nodebug")
fn __mlir_index__(self) -> __mlir_type.index:
"""Convert to index.
Returns:
The corresponding __mlir_type.index value.
"""
return self.value
@always_inline("nodebug")
fn __lt__(self, rhs: Int) -> Bool:
"""Compare this Int to the RHS using LT comparison.
Args:
rhs: The other Int to compare against.
Returns:
True if this Int is less-than the RHS Int and False otherwise.
"""
return __mlir_op.`index.cmp`[
pred = __mlir_attr.`#index<cmp_predicate slt>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __le__(self, rhs: Int) -> Bool:
"""Compare this Int to the RHS using LE comparison.
Args:
rhs: The other Int to compare against.
Returns:
True if this Int is less-or-equal than the RHS Int and False
otherwise.
"""
return __mlir_op.`index.cmp`[
pred = __mlir_attr.`#index<cmp_predicate sle>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __eq__(self, rhs: Int) -> Bool:
"""Compare this Int to the RHS using EQ comparison.
Args:
rhs: The other Int to compare against.
Returns:
True if this Int is equal to the RHS Int and False otherwise.
"""
return __mlir_op.`index.cmp`[
pred = __mlir_attr.`#index<cmp_predicate eq>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __ne__(self, rhs: Int) -> Bool:
"""Compare this Int to the RHS using NE comparison.
Args:
rhs: The other Int to compare against.
Returns:
True if this Int is non-equal to the RHS Int and False otherwise.
"""
return __mlir_op.`index.cmp`[
pred = __mlir_attr.`#index<cmp_predicate ne>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __gt__(self, rhs: Int) -> Bool:
"""Compare this Int to the RHS using GT comparison.
Args:
rhs: The other Int to compare against.
Returns:
True if this Int is greater-than the RHS Int and False otherwise.
"""
return __mlir_op.`index.cmp`[
pred = __mlir_attr.`#index<cmp_predicate sgt>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __ge__(self, rhs: Int) -> Bool:
"""Compare this Int to the RHS using GE comparison.
Args:
rhs: The other Int to compare against.
Returns:
True if this Int is greater-or-equal than the RHS Int and False
otherwise.
"""
return __mlir_op.`index.cmp`[
pred = __mlir_attr.`#index<cmp_predicate sge>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __bool__(self) -> Bool:
"""Convert this Int to Bool.
Returns:
False Bool value if the value is equal to 0 and True otherwise.
"""
return self != 0
@always_inline("nodebug")
fn __index__(self) -> Int:
"""Return self converted to an integer, if self is suitable for use as
an index into a list.
For Int type this is simply the value.
Returns:
The corresponding Int value.
"""
return self
@always_inline("nodebug")
fn __pos__(self) -> Int:
"""Return +self.
Returns:
The +self value.
"""
return self
@always_inline("nodebug")
fn __neg__(self) -> Int:
"""Return -self.
Returns:
The -self value.
"""
return __mlir_op.`index.mul`(
self.value,
__mlir_op.`index.constant`[value = __mlir_attr.`-1:index`](),
)
@always_inline("nodebug")
fn __abs__(self) -> Self:
"""Return the absolute value of the Int value.
Returns:
The absolute value.
"""
return -self if self < 0 else self
@always_inline("nodebug")
fn __ceil__(self) -> Self:
"""Return the ceiling of the Int value, which is itself.
Returns:
The Int value itself.
"""
return self
@always_inline("nodebug")
fn __floor__(self) -> Self:
"""Return the floor of the Int value, which is itself.
Returns:
The Int value itself.
"""
return self
@always_inline("nodebug")
fn __round__(self) -> Self:
"""Return the rounded value of the Int value, which is itself.
Returns:
The Int value itself.
"""
return self
@always_inline("nodebug")
fn __round__(self, ndigits: Int) -> Self:
"""Return the rounded value of the Int value, which is itself.
Args:
ndigits: The number of digits to round to.
Returns:
The Int value itself if ndigits >= 0 else the rounded value.
"""
if ndigits >= 0:
return self
return self - (self % 10 ** -(ndigits))
@always_inline("nodebug")
fn __trunc__(self) -> Self:
"""Return the truncated Int value, which is itself.
Returns:
The Int value itself.
"""
return self
@always_inline("nodebug")
fn __invert__(self) -> Int:
"""Return ~self.
Returns:
The ~self value.
"""
return self ^ -1
@always_inline("nodebug")
fn __add__(self, rhs: Int) -> Int:
"""Return `self + rhs`.
Args:
rhs: The value to add.
Returns:
`self + rhs` value.
"""
return __mlir_op.`index.add`(self.value, rhs.value)
@always_inline("nodebug")
fn __sub__(self, rhs: Int) -> Int:
"""Return `self - rhs`.
Args:
rhs: The value to subtract.
Returns:
`self - rhs` value.
"""
return __mlir_op.`index.sub`(self.value, rhs.value)
@always_inline("nodebug")
fn __mul__(self, rhs: Int) -> Int:
"""Return `self * rhs`.
Args:
rhs: The value to multiply with.
Returns:
`self * rhs` value.
"""
return __mlir_op.`index.mul`(self.value, rhs.value)
fn __truediv__(self, rhs: Int) -> Float64:
"""Return the floating point division of `self` and `rhs`.
Args:
rhs: The value to divide on.
Returns:
`float(self)/float(rhs)` value.
"""
return Float64(self) / Float64(rhs)
@always_inline("nodebug")
fn _positive_div(self, rhs: Int) -> Int:
"""Return the division of `self` and `rhs` assuming that the arguments
are both positive.
Args:
rhs: The value to divide on.
Returns:
The integer division of `self` and `rhs` .
"""
return __mlir_op.`index.divs`(self.value, rhs.value)
@always_inline("nodebug")
fn _positive_rem(self, rhs: Int) -> Int:
"""Return the modulus of `self` and `rhs` assuming that the arguments
are both positive.
Args:
rhs: The value to divide on.
Returns:
The integer modulus of `self` and `rhs` .
"""
return __mlir_op.`index.rems`(self.value, rhs.value)
@always_inline("nodebug")
fn __floordiv__(self, rhs: Int) -> Int:
"""Return the division of `self` and `rhs` rounded down to the nearest
integer.
Args:
rhs: The value to divide on.
Returns:
`floor(self/rhs)` value.
"""
if rhs == 0:
# this should raise an exception.
return 0
var div: Int = self._positive_div(rhs)
if self > 0 and rhs > 0:
return div
var mod = self - div * rhs
if ((rhs < 0) ^ (self < 0)) and mod:
return div - 1
return div
@always_inline("nodebug")
fn __mod__(self, rhs: Int) -> Int:
"""Return the remainder of self divided by rhs.
Args:
rhs: The value to divide on.
Returns:
The remainder of dividing self by rhs.
"""
if rhs == 0:
# this should raise an exception.
return 0
if rhs > 0 and self > 0:
return self._positive_rem(rhs)
var div: Int = self._positive_div(rhs)
var mod = self - div * rhs
if ((rhs < 0) ^ (self < 0)) and mod:
return mod + rhs
return mod
@always_inline("nodebug")
fn __divmod__(self, rhs: Int) -> Tuple[Int, Int]:
"""Computes both the quotient and remainder using integer division.
Args:
rhs: The value to divide on.
Returns:
The quotient and remainder as a `Tuple(self // rhs, self % rhs)`.
"""
if rhs == 0:
return 0, 0
var div: Int = self._positive_div(rhs)
if rhs > 0 and self > 0:
return div, self._positive_rem(rhs)
var mod = self - div * rhs
if ((rhs < 0) ^ (self < 0)) and mod:
return div - 1, mod + rhs
return div, mod
@always_inline("nodebug")
fn __pow__(self, exp: Self) -> Self:
"""Return the value raised to the power of the given exponent.
Computes the power of an integer using the Russian Peasant Method.
Args:
exp: The exponent value.
Returns:
The value of `self` raised to the power of `exp`.
"""
if exp < 0:
# Not defined for Integers, this should raise an
# exception.
return 0
var res: Int = 1
var x = self
var n = exp
while n > 0:
if n & 1 != 0:
res *= x
x *= x
n >>= 1
return res
@always_inline("nodebug")
fn __lshift__(self, rhs: Int) -> Int:
"""Return `self << rhs`.
Args:
rhs: The value to shift with.
Returns:
`self << rhs`.
"""
if rhs < 0:
# this should raise an exception.
return 0
return __mlir_op.`index.shl`(self.value, rhs.value)
@always_inline("nodebug")
fn __rshift__(self, rhs: Int) -> Int:
"""Return `self >> rhs`.
Args:
rhs: The value to shift with.
Returns:
`self >> rhs`.
"""
if rhs < 0:
# this should raise an exception.
return 0
return __mlir_op.`index.shrs`(self.value, rhs.value)
@always_inline("nodebug")
fn __and__(self, rhs: Int) -> Int:
"""Return `self & rhs`.
Args:
rhs: The RHS value.
Returns:
`self & rhs`.
"""
return __mlir_op.`index.and`(self.value, rhs.value)
@always_inline("nodebug")
fn __xor__(self, rhs: Int) -> Int:
"""Return `self ^ rhs`.
Args:
rhs: The RHS value.
Returns:
`self ^ rhs`.
"""
return __mlir_op.`index.xor`(self.value, rhs.value)
@always_inline("nodebug")
fn __or__(self, rhs: Int) -> Int:
"""Return `self | rhs`.
Args:
rhs: The RHS value.
Returns:
`self | rhs`.
"""
return __mlir_op.`index.or`(self.value, rhs.value)
# ===----------------------------------------------------------------------===#
# In place operations.
# ===----------------------------------------------------------------------===#
@always_inline("nodebug")
fn __iadd__(inout self, rhs: Int):
"""Compute `self + rhs` and save the result in self.
Args:
rhs: The RHS value.
"""
self = self + rhs
@always_inline("nodebug")
fn __isub__(inout self, rhs: Int):
"""Compute `self - rhs` and save the result in self.
Args:
rhs: The RHS value.
"""
self = self - rhs
@always_inline("nodebug")
fn __imul__(inout self, rhs: Int):
"""Compute self*rhs and save the result in self.
Args:
rhs: The RHS value.
"""
self = self * rhs
fn __itruediv__(inout self, rhs: Int):
"""Compute `self / rhs`, convert to int, and save the result in self.
Since `floor(self / rhs)` is equivalent to `self // rhs`, this yields
the same as `__ifloordiv__`.
Args:
rhs: The RHS value.
"""
self = self // rhs
@always_inline("nodebug")
fn __ifloordiv__(inout self, rhs: Int):
"""Compute `self // rhs` and save the result in self.
Args:
rhs: The RHS value.
"""
self = self // rhs
fn __imod__(inout self, rhs: Int):
"""Compute `self % rhs` and save the result in self.
Args:
rhs: The RHS value.
"""
self = self % rhs
@always_inline("nodebug")
fn __ipow__(inout self, rhs: Int):
"""Compute `pow(self, rhs)` and save the result in self.
Args:
rhs: The RHS value.
"""
self = self**rhs
@always_inline("nodebug")
fn __ilshift__(inout self, rhs: Int):
"""Compute `self << rhs` and save the result in self.
Args:
rhs: The RHS value.
"""
self = self << rhs
@always_inline("nodebug")
fn __irshift__(inout self, rhs: Int):
"""Compute `self >> rhs` and save the result in self.
Args:
rhs: The RHS value.
"""
self = self >> rhs
@always_inline("nodebug")
fn __iand__(inout self, rhs: Int):
"""Compute `self & rhs` and save the result in self.
Args:
rhs: The RHS value.
"""
self = self & rhs
@always_inline("nodebug")
fn __ixor__(inout self, rhs: Int):
"""Compute `self ^ rhs` and save the result in self.
Args:
rhs: The RHS value.
"""
self = self ^ rhs
@always_inline("nodebug")
fn __ior__(inout self, rhs: Int):
"""Compute self|rhs and save the result in self.
Args:
rhs: The RHS value.
"""
self = self | rhs
# ===----------------------------------------------------------------------===#
# Reversed operations
# ===----------------------------------------------------------------------===#
@always_inline("nodebug")
fn __radd__(self, value: Int) -> Int:
"""Return `value + self`.
Args:
value: The other value.
Returns:
`value + self`.
"""
return self + value
@always_inline("nodebug")
fn __rsub__(self, value: Int) -> Int:
"""Return `value - self`.
Args:
value: The other value.
Returns:
`value - self`.
"""
return value - self
@always_inline("nodebug")
fn __rmul__(self, value: Int) -> Int:
"""Return `value * self`.
Args:
value: The other value.
Returns:
`value * self`.
"""
return self * value
@always_inline("nodebug")
fn __rfloordiv__(self, value: Int) -> Int:
"""Return `value // self`.
Args:
value: The other value.
Returns:
`value // self`.
"""
return value // self
@always_inline("nodebug")
fn __rmod__(self, value: Int) -> Int:
"""Return `value % self`.
Args:
value: The other value.
Returns:
`value % self`.
"""
return value % self
@always_inline("nodebug")
fn __rpow__(self, value: Int) -> Int:
"""Return `pow(value,self)`.
Args:
value: The other value.
Returns:
`pow(value,self)`.
"""
return value**self
@always_inline("nodebug")
fn __rlshift__(self, value: Int) -> Int:
"""Return `value << self`.
Args:
value: The other value.
Returns:
`value << self`.
"""
return value << self
@always_inline("nodebug")
fn __rrshift__(self, value: Int) -> Int:
"""Return `value >> self`.
Args:
value: The other value.
Returns:
`value >> self`.
"""
return value >> self
@always_inline("nodebug")
fn __rand__(self, value: Int) -> Int:
"""Return `value & self`.
Args:
value: The other value.
Returns:
`value & self`.
"""
return value & self
@always_inline("nodebug")
fn __ror__(self, value: Int) -> Int:
"""Return `value | self`.
Args:
value: The other value.
Returns:
`value | self`.
"""
return value | self
@always_inline("nodebug")
fn __rxor__(self, value: Int) -> Int:
"""Return `value ^ self`.
Args:
value: The other value.
Returns:
`value ^ self`.
"""
return value ^ self
fn __hash__(self) -> Int:
"""Hash the int using builtin hash.
Returns:
A 64-bit hash value. This value is _not_ suitable for cryptographic
uses. Its intended usage is for data structures. See the `hash`
builtin documentation for more details.
"""
# TODO(MOCO-636): switch to DType.index
return _hash_simd(Scalar[DType.int64](self))
| mojo/stdlib/src/builtin/int.mojo | false |
<filename>mojo/stdlib/src/builtin/int_literal.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the IntLiteral class."""
from builtin._math import Ceilable, CeilDivable, Floorable, Truncable
@value
@nonmaterializable(Int)
@register_passable("trivial")
struct IntLiteral(
Absable,
Boolable,
Ceilable,
CeilDivable,
Comparable,
Floorable,
Intable,
Roundable,
Stringable,
Truncable,
Indexer,
):
"""This type represents a static integer literal value with
infinite precision. They can't be materialized at runtime and
must be lowered to other integer types (like Int), but allow for
compile-time operations that would overflow on Int and other fixed
precision integer types.
"""
alias _mlir_type = __mlir_type.`!kgen.int_literal`
var value: Self._mlir_type
"""The underlying storage for the integer value."""
alias _one = IntLiteral(
__mlir_attr.`#kgen.int_literal<1> : !kgen.int_literal`
)
@always_inline("nodebug")
fn __init__(inout self):
"""Default constructor."""
self.value = __mlir_attr.`#kgen.int_literal<0> : !kgen.int_literal`
@always_inline("nodebug")
fn __init__(inout self, value: __mlir_type.`!kgen.int_literal`):
"""Construct IntLiteral from the given mlir !kgen.int_literal value.
Args:
value: The init value.
"""
self.value = value
@always_inline("nodebug")
fn __int__(self) -> Int:
"""Convert from IntLiteral to Int.
Returns:
The value as an integer.
"""
return Int(self.__as_mlir_index())
@always_inline("nodebug")
fn _bit_width(self) -> IntLiteral:
"""Get the (signed) bit width of the IntLiteral.
Returns:
The bit width.
"""
return __mlir_op.`kgen.int_literal.bit_width`(self.value)
@always_inline
fn __str__(self) -> String:
"""Convert from IntLiteral to String.
Returns:
The value as a string.
"""
return str(Int(self))
@always_inline("nodebug")
fn __as_mlir_index(self) -> __mlir_type.index:
"""Convert from IntLiteral to index.
Returns:
The corresponding __mlir_type.index value.
"""
return __mlir_op.`kgen.int_literal.convert`[_type = __mlir_type.index](
self.value
)
@always_inline("nodebug")
fn __lt__(self, rhs: Self) -> Bool:
"""Compare this IntLiteral to the RHS using LT comparison.
Args:
rhs: The other IntLiteral to compare against.
Returns:
True if this IntLiteral is less-than the RHS IntLiteral and False otherwise.
"""
return __mlir_op.`kgen.int_literal.cmp`[
pred = __mlir_attr.`#kgen<int_literal.cmp_pred lt>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __le__(self, rhs: Self) -> Bool:
"""Compare this IntLiteral to the RHS using LE comparison.
Args:
rhs: The other IntLiteral to compare against.
Returns:
True if this IntLiteral is less-or-equal than the RHS IntLiteral and False
otherwise.
"""
return __mlir_op.`kgen.int_literal.cmp`[
pred = __mlir_attr.`#kgen<int_literal.cmp_pred le>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __eq__(self, rhs: Self) -> Bool:
"""Compare this IntLiteral to the RHS using EQ comparison.
Args:
rhs: The other IntLiteral to compare against.
Returns:
True if this IntLiteral is equal to the RHS IntLiteral and False otherwise.
"""
return __mlir_op.`kgen.int_literal.cmp`[
pred = __mlir_attr.`#kgen<int_literal.cmp_pred eq>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __ne__(self, rhs: Self) -> Bool:
"""Compare this IntLiteral to the RHS using NE comparison.
Args:
rhs: The other IntLiteral to compare against.
Returns:
True if this IntLiteral is non-equal to the RHS IntLiteral and False otherwise.
"""
return __mlir_op.`kgen.int_literal.cmp`[
pred = __mlir_attr.`#kgen<int_literal.cmp_pred ne>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __gt__(self, rhs: Self) -> Bool:
"""Compare this IntLiteral to the RHS using GT comparison.
Args:
rhs: The other IntLiteral to compare against.
Returns:
True if this IntLiteral is greater-than the RHS IntLiteral and False otherwise.
"""
return __mlir_op.`kgen.int_literal.cmp`[
pred = __mlir_attr.`#kgen<int_literal.cmp_pred gt>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __ge__(self, rhs: Self) -> Bool:
"""Compare this IntLiteral to the RHS using GE comparison.
Args:
rhs: The other IntLiteral to compare against.
Returns:
True if this IntLiteral is greater-or-equal than the RHS IntLiteral and False
otherwise.
"""
return __mlir_op.`kgen.int_literal.cmp`[
pred = __mlir_attr.`#kgen<int_literal.cmp_pred ge>`
](self.value, rhs.value)
@always_inline("nodebug")
fn __bool__(self) -> Bool:
"""Convert this IntLiteral to Bool.
Returns:
False Bool value if the value is equal to 0 and True otherwise.
"""
return self != Self()
@always_inline("nodebug")
fn __index__(self) -> Int:
"""Return self converted to an integer, if self is suitable for use as
an index into a list.
Returns:
The corresponding Int value.
"""
return self.__int__()
@always_inline("nodebug")
fn __pos__(self) -> Self:
"""Return +self.
Returns:
The +self value.
"""
return self
@always_inline("nodebug")
fn __neg__(self) -> Self:
"""Return -self.
Returns:
The -self value.
"""
return Self() - self
@always_inline("nodebug")
fn __abs__(self) -> Self:
"""Return the absolute value of the IntLiteral value.
Returns:
The absolute value.
"""
if self >= 0:
return self
return -self
@always_inline("nodebug")
fn __ceil__(self) -> Self:
"""Return the ceiling of the IntLiteral value, which is itself.
Returns:
The IntLiteral value itself.
"""
return self
@always_inline("nodebug")
fn __floor__(self) -> Self:
"""Return the floor of the IntLiteral value, which is itself.
Returns:
The IntLiteral value itself.
"""
return self
@always_inline("nodebug")
fn __round__(self) -> Self:
"""Return the rounded value of the IntLiteral value, which is itself.
Returns:
The IntLiteral value itself.
"""
return self
@always_inline("nodebug")
fn __trunc__(self) -> Self:
"""Return the truncated of the IntLiteral value, which is itself.
Returns:
The IntLiteral value itself.
"""
return self
@always_inline("nodebug")
fn __divmod__(self, rhs: Self) -> Tuple[Self, Self]:
"""Return the quotient and remainder of the division of self by rhs.
Args:
rhs: The value to divide on.
Returns:
The quotient and remainder of the division.
"""
var quotient: Self = self.__floordiv__(rhs)
var remainder: Self = self - (quotient * rhs)
return quotient, remainder
@always_inline("nodebug")
fn __round__(self, ndigits: Int) -> Self:
"""Return the rounded value of the IntLiteral value, which is itself.
Args:
ndigits: The number of digits to round to.
Returns:
The IntLiteral value itself if ndigits >= 0 else the rounded value.
"""
if ndigits >= 0:
return self
alias one = __mlir_attr.`#kgen.int_literal<1> : !kgen.int_literal`
alias ten = __mlir_attr.`#kgen.int_literal<10> : !kgen.int_literal`
var multiplier = one
# TODO: Use IntLiteral.__pow__() when it's implemented.
for _ in range(-ndigits):
multiplier = __mlir_op.`kgen.int_literal.binop`[
oper = __mlir_attr.`#kgen<int_literal.binop_kind mul>`
](multiplier, ten)
alias Pair = Tuple[Self, Self]
var mod: IntLiteral = self % Self(multiplier)
if mod * 2 >= multiplier:
mod -= multiplier
return self - mod
@always_inline("nodebug")
fn __invert__(self) -> Self:
"""Return ~self.
Returns:
The ~self value.
"""
return self ^ (Self() - Self._one)
@always_inline("nodebug")
fn __add__(self, rhs: Self) -> Self:
"""Return `self + rhs`.
Args:
rhs: The value to add.
Returns:
`self + rhs` value.
"""
return Self(
__mlir_op.`kgen.int_literal.binop`[
oper = __mlir_attr.`#kgen<int_literal.binop_kind add>`
](self.value, rhs.value)
)
@always_inline("nodebug")
fn __sub__(self, rhs: Self) -> Self:
"""Return `self - rhs`.
Args:
rhs: The value to subtract.
Returns:
`self - rhs` value.
"""
return Self(
__mlir_op.`kgen.int_literal.binop`[
oper = __mlir_attr.`#kgen<int_literal.binop_kind sub>`
](self.value, rhs.value)
)
@always_inline("nodebug")
fn __mul__(self, rhs: Self) -> Self:
"""Return `self * rhs`.
Args:
rhs: The value to multiply with.
Returns:
`self * rhs` value.
"""
return Self(
__mlir_op.`kgen.int_literal.binop`[
oper = __mlir_attr.`#kgen<int_literal.binop_kind mul>`
](self.value, rhs.value)
)
# TODO: implement __pow__
@always_inline("nodebug")
fn __floordiv__(self, rhs: Self) -> Self:
"""Return `self // rhs`.
Args:
rhs: The value to divide with.
Returns:
`self // rhs` value.
"""
if rhs == Self():
# this should raise an exception.
return Self()
return Self(
__mlir_op.`kgen.int_literal.binop`[
oper = __mlir_attr.`#kgen<int_literal.binop_kind floordiv>`
](self.value, rhs.value)
)
@always_inline("nodebug")
fn __mod__(self, rhs: Self) -> Self:
"""Return the remainder of self divided by rhs.
Args:
rhs: The value to divide on.
Returns:
The remainder of dividing self by rhs.
"""
if rhs == Self():
# this should raise an exception.
return Self()
return Self(
__mlir_op.`kgen.int_literal.binop`[
oper = __mlir_attr.`#kgen<int_literal.binop_kind mod>`
](self.value, rhs.value)
)
@always_inline("nodebug")
fn __lshift__(self, rhs: Self) -> Self:
"""Return `self << rhs`.
Args:
rhs: The value to shift with.
Returns:
`self << rhs`.
"""
if rhs < Self():
# this should raise an exception.
return Self()
return Self(
__mlir_op.`kgen.int_literal.binop`[
oper = __mlir_attr.`#kgen<int_literal.binop_kind lshift>`
](self.value, rhs.value)
)
@always_inline("nodebug")
fn __rshift__(self, rhs: Self) -> Self:
"""Return `self >> rhs`.
Args:
rhs: The value to shift with.
Returns:
`self >> rhs`.
"""
if rhs < Self():
# this should raise an exception.
return Self()
return Self(
__mlir_op.`kgen.int_literal.binop`[
oper = __mlir_attr.`#kgen<int_literal.binop_kind rshift>`
](self.value, rhs.value)
)
@always_inline("nodebug")
fn __and__(self, rhs: Self) -> Self:
"""Return `self & rhs`.
Args:
rhs: The RHS value.
Returns:
`self & rhs`.
"""
return Self(
__mlir_op.`kgen.int_literal.binop`[
oper = __mlir_attr.`#kgen<int_literal.binop_kind and>`
](self.value, rhs.value)
)
@always_inline("nodebug")
fn __xor__(self, rhs: Self) -> Self:
"""Return `self ^ rhs`.
Args:
rhs: The RHS value.
Returns:
`self ^ rhs`.
"""
return Self(
__mlir_op.`kgen.int_literal.binop`[
oper = __mlir_attr.`#kgen<int_literal.binop_kind xor>`
](self.value, rhs.value)
)
@always_inline("nodebug")
fn __or__(self, rhs: Self) -> Self:
"""Return `self | rhs`.
Args:
rhs: The RHS value.
Returns:
`self | rhs`.
"""
return Self(
__mlir_op.`kgen.int_literal.binop`[
oper = __mlir_attr.`#kgen<int_literal.binop_kind or>`
](self.value, rhs.value)
)
# ===----------------------------------------------------------------------===#
# In place operations.
# ===----------------------------------------------------------------------===#
@always_inline("nodebug")
fn __iadd__(inout self, rhs: Self):
"""Compute `self + rhs` and save the result in self.
Args:
rhs: The RHS value.
"""
self = self + rhs
@always_inline("nodebug")
fn __isub__(inout self, rhs: Self):
"""Compute `self - rhs` and save the result in self.
Args:
rhs: The RHS value.
"""
self = self - rhs
@always_inline("nodebug")
fn __imul__(inout self, rhs: Self):
"""Compute self*rhs and save the result in self.
Args:
rhs: The RHS value.
"""
self = self * rhs
@always_inline("nodebug")
fn __ifloordiv__(inout self, rhs: Self):
"""Compute self//rhs and save the result in self.
Args:
rhs: The RHS value.
"""
self = self // rhs
@always_inline("nodebug")
fn __ilshift__(inout self, rhs: Self):
"""Compute `self << rhs` and save the result in self.
Args:
rhs: The RHS value.
"""
self = self << rhs
@always_inline("nodebug")
fn __irshift__(inout self, rhs: Self):
"""Compute `self >> rhs` and save the result in self.
Args:
rhs: The RHS value.
"""
self = self >> rhs
@always_inline("nodebug")
fn __iand__(inout self, rhs: Self):
"""Compute `self & rhs` and save the result in self.
Args:
rhs: The RHS value.
"""
self = self & rhs
@always_inline("nodebug")
fn __ixor__(inout self, rhs: Self):
"""Compute `self ^ rhs` and save the result in self.
Args:
rhs: The RHS value.
"""
self = self ^ rhs
@always_inline("nodebug")
fn __ior__(inout self, rhs: Self):
"""Compute self|rhs and save the result in self.
Args:
rhs: The RHS value.
"""
self = self | rhs
# ===----------------------------------------------------------------------===#
# Reversed operations
# ===----------------------------------------------------------------------===#
@always_inline("nodebug")
fn __radd__(self, value: Self) -> Self:
"""Return `value + self`.
Args:
value: The other value.
Returns:
`value + self`.
"""
return self + value
@always_inline("nodebug")
fn __rsub__(self, value: Self) -> Self:
"""Return `value - self`.
Args:
value: The other value.
Returns:
`value - self`.
"""
return value - self
@always_inline("nodebug")
fn __rmul__(self, value: Self) -> Self:
"""Return `value * self`.
Args:
value: The other value.
Returns:
`value * self`.
"""
return self * value
@always_inline("nodebug")
fn __rfloordiv__(self, value: Self) -> Self:
"""Return `value // self`.
Args:
value: The other value.
Returns:
`value // self`.
"""
return value // self
@always_inline("nodebug")
fn __rlshift__(self, value: Self) -> Self:
"""Return `value << self`.
Args:
value: The other value.
Returns:
`value << self`.
"""
return value << self
@always_inline("nodebug")
fn __rrshift__(self, value: Self) -> Self:
"""Return `value >> self`.
Args:
value: The other value.
Returns:
`value >> self`.
"""
return value >> self
@always_inline("nodebug")
fn __rand__(self, value: Self) -> Self:
"""Return `value & self`.
Args:
value: The other value.
Returns:
`value & self`.
"""
return value & self
@always_inline("nodebug")
fn __ror__(self, value: Self) -> Self:
"""Return `value | self`.
Args:
value: The other value.
Returns:
`value | self`.
"""
return value | self
@always_inline("nodebug")
fn __rxor__(self, value: Self) -> Self:
"""Return `value ^ self`.
Args:
value: The other value.
Returns:
`value ^ self`.
"""
return value ^ self
| mojo/stdlib/src/builtin/int_literal.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Provides utilities for working with input/output.
These are Mojo built-ins, so you don't need to import them.
"""
from sys import (
bitwidthof,
os_is_windows,
triple_is_nvidia_cuda,
external_call,
stdout,
)
from builtin.dtype import _get_dtype_printf_format
from builtin.builtin_list import _LITRefPackHelper
from builtin.file_descriptor import FileDescriptor
from memory import UnsafePointer
from utils import StringRef, unroll
from utils._format import Formattable, Formatter, write_to
# ===----------------------------------------------------------------------=== #
# _file_handle
# ===----------------------------------------------------------------------=== #
fn _dup(fd: Int32) -> Int32:
@parameter
if os_is_windows():
return external_call["_dup", Int32](fd)
else:
return external_call["dup", Int32](fd)
@value
@register_passable("trivial")
struct _fdopen:
alias STDOUT = 1
alias STDERR = 2
var handle: UnsafePointer[NoneType]
fn __init__(inout self, stream_id: FileDescriptor):
"""Creates a file handle to the stdout/stderr stream.
Args:
stream_id: The stream id
"""
alias mode = "a"
var handle: UnsafePointer[NoneType]
@parameter
if os_is_windows():
handle = external_call["_fdopen", UnsafePointer[NoneType]](
_dup(stream_id.value), mode.unsafe_ptr()
)
else:
handle = external_call["fdopen", UnsafePointer[NoneType]](
_dup(stream_id.value), mode.unsafe_ptr()
)
self.handle = handle
fn __enter__(self) -> Self:
return self
fn __exit__(self):
"""Closes the file handle."""
_ = external_call["fclose", Int32](self.handle)
# ===----------------------------------------------------------------------=== #
# _flush
# ===----------------------------------------------------------------------=== #
@no_inline
fn _flush(file: FileDescriptor = stdout):
with _fdopen(file) as fd:
_ = external_call["fflush", Int32](fd)
# ===----------------------------------------------------------------------=== #
# _printf
# ===----------------------------------------------------------------------=== #
@no_inline
fn _printf[
fmt: StringLiteral, *types: AnyType
](*arguments: *types, file: FileDescriptor = stdout):
# The argument pack will contain references for each value in the pack,
# but we want to pass their values directly into the C snprintf call. Load
# all the members of the pack.
var kgen_pack = _LITRefPackHelper(arguments._value).get_as_kgen_pack()
# FIXME(37129): Cannot use get_loaded_kgen_pack because vtables on types
# aren't stripped off correctly.
var loaded_pack = __mlir_op.`kgen.pack.load`(kgen_pack)
@parameter
if triple_is_nvidia_cuda():
_ = external_call["vprintf", Int32](
fmt.unsafe_ptr(), UnsafePointer.address_of(loaded_pack)
)
else:
with _fdopen(file) as fd:
_ = __mlir_op.`pop.external_call`[
func = "KGEN_CompilerRT_fprintf".value,
variadicType = __mlir_attr[
`(`,
`!kgen.pointer<none>,`,
`!kgen.pointer<scalar<si8>>`,
`) -> !pop.scalar<si32>`,
],
_type=Int32,
](fd, fmt.unsafe_ptr(), loaded_pack)
# ===----------------------------------------------------------------------=== #
# _snprintf
# ===----------------------------------------------------------------------=== #
@no_inline
fn _snprintf[
fmt: StringLiteral, *types: AnyType
](str: UnsafePointer[UInt8], size: Int, *arguments: *types) -> Int:
"""Writes a format string into an output pointer.
Parameters:
fmt: A format string.
types: The types of arguments interpolated into the format string.
Args:
str: A pointer into which the format string is written.
size: At most, `size - 1` bytes are written into the output string.
arguments: Arguments interpolated into the format string.
Returns:
The number of bytes written into the output string.
"""
# The argument pack will contain references for each value in the pack,
# but we want to pass their values directly into the C snprintf call. Load
# all the members of the pack.
var kgen_pack = _LITRefPackHelper(arguments._value).get_as_kgen_pack()
# FIXME(37129): Cannot use get_loaded_kgen_pack because vtables on types
# aren't stripped off correctly.
var loaded_pack = __mlir_op.`kgen.pack.load`(kgen_pack)
return int(
__mlir_op.`pop.external_call`[
func = "snprintf".value,
variadicType = __mlir_attr[
`(`,
`!kgen.pointer<scalar<si8>>,`,
`!pop.scalar<index>, `,
`!kgen.pointer<scalar<si8>>`,
`) -> !pop.scalar<si32>`,
],
_type=Int32,
](str, size, fmt.unsafe_ptr(), loaded_pack)
)
@no_inline
fn _snprintf_scalar[
type: DType,
float_format: StringLiteral = "%.17g",
](buffer: UnsafePointer[UInt8], size: Int, x: Scalar[type]) -> Int:
@parameter
if type == DType.bool:
if x:
return _snprintf["True"](buffer, size)
else:
return _snprintf["False"](buffer, size)
elif type.is_integral() or type == DType.address:
return _snprintf[_get_dtype_printf_format[type]()](buffer, size, x)
elif (
type == DType.float16 or type == DType.bfloat16 or type == DType.float32
):
# We need to cast the value to float64 to print it.
return _float_repr[float_format](buffer, size, x.cast[DType.float64]())
elif type == DType.float64:
return _float_repr[float_format](buffer, size, rebind[Float64](x))
return 0
# ===----------------------------------------------------------------------=== #
# Helper functions to print a single pop scalar without spacing or new line.
# ===----------------------------------------------------------------------=== #
@no_inline
fn _float_repr[
fmt: StringLiteral = "%.17g"
](buffer: UnsafePointer[UInt8], size: Int, x: Float64) -> Int:
# Using `%.17g` with decimal check is equivalent to CPython's fallback path
# when its more complex dtoa library (forked from
# https://github.com/dtolnay/dtoa) is not available.
var n = _snprintf[fmt](buffer, size, x.value)
# If the buffer isn't big enough to add anything, then just return.
if n + 2 >= size:
return n
# Don't do anything fancy. Just insert ".0" if there is no decimal and this
# is not in exponent form.
var p = buffer
alias minus = ord("-")
alias dot = ord(".")
if p[] == minus:
p += 1
while p[] != 0 and isdigit(p[]):
p += 1
if p[]:
return n
p[] = dot
p += 1
p[] = ord("0")
p += 1
p[] = 0
return n + 2
# ===----------------------------------------------------------------------=== #
# _put
# ===----------------------------------------------------------------------=== #
@no_inline
fn _put(x: Int, file: FileDescriptor = stdout):
"""Prints a scalar value.
Args:
x: The value to print.
file: The output stream.
"""
_printf[_get_dtype_printf_format[DType.index]()](x, file=file)
@no_inline
fn _put_simd_scalar[type: DType](x: Scalar[type]):
"""Prints a scalar value.
Parameters:
type: The DType of the value.
Args:
x: The value to print.
"""
alias format = _get_dtype_printf_format[type]()
@parameter
if type == DType.bool:
_put["True"]() if x else _put["False"]()
elif type.is_integral() or type == DType.address:
_printf[format](x)
elif type.is_floating_point():
@parameter
if triple_is_nvidia_cuda():
_printf[format](x.cast[DType.float64]())
else:
_put(str(x))
else:
constrained[False, "invalid dtype"]()
@no_inline
fn _put[type: DType, simd_width: Int](x: SIMD[type, simd_width]):
"""Prints a scalar value.
Parameters:
type: The DType of the value.
simd_width: The SIMD width.
Args:
x: The value to print.
"""
alias format = _get_dtype_printf_format[type]()
@parameter
if simd_width == 1:
_put_simd_scalar(x[0])
elif type.is_integral():
_put["["]()
@parameter
for i in range(simd_width):
_put_simd_scalar(x[i])
if i != simd_width - 1:
_put[", "]()
_put["]"]()
else:
_put(str(x))
@no_inline
fn _put(x: String, file: FileDescriptor = stdout):
# 'x' is borrowed, so we know it will outlive the call to print.
_put(x._strref_dangerous(), file=file)
@no_inline
fn _put(x: StringRef, file: FileDescriptor = stdout):
# Avoid printing "(null)" for an empty/default constructed `String`
var str_len = len(x)
if not str_len:
return
@parameter
if triple_is_nvidia_cuda():
var tmp = 0
var arg_ptr = UnsafePointer.address_of(tmp)
_ = external_call["vprintf", Int32](
x.data, arg_ptr.bitcast[UnsafePointer[NoneType]]()
)
else:
alias MAX_STR_LEN = 0x1000_0000
# The string can be printed, so that's fine.
if str_len < MAX_STR_LEN:
_printf["%.*s"](x.length, x.data, file=file)
return
# The string is large, then we need to chunk it.
var p = x.data
while str_len:
var ll = min(str_len, MAX_STR_LEN)
_printf["%.*s"](ll, p, file=file)
str_len -= ll
p += ll
@no_inline
fn _put[x: StringLiteral](file: FileDescriptor = stdout):
_put(StringRef(x), file=file)
@no_inline
fn _put(x: DType, file: FileDescriptor = stdout):
_put(str(x), file=file)
# ===----------------------------------------------------------------------=== #
# print
# ===----------------------------------------------------------------------=== #
@no_inline
fn print[
*Ts: Stringable
](*values: *Ts, flush: Bool = False, file: FileDescriptor = stdout):
"""Prints elements to the text stream. Each element is separated by a
whitespace and followed by a newline character.
Parameters:
Ts: The elements types.
Args:
values: The elements to print.
flush: If set to true, then the stream is forcibly flushed.
file: The output stream.
"""
_print(values, sep=" ", end="\n", flush=flush, file=file)
@no_inline
fn print[
*Ts: Stringable, EndTy: Stringable
](
*values: *Ts,
end: EndTy,
flush: Bool = False,
file: FileDescriptor = stdout,
):
"""Prints elements to the text stream. Each element is separated by a
whitespace and followed by `end`.
Parameters:
Ts: The elements types.
EndTy: The type of end argument.
Args:
values: The elements to print.
end: The String to write after printing the elements.
flush: If set to true, then the stream is forcibly flushed.
file: The output stream.
"""
_print(values, sep=" ", end=str(end), flush=flush, file=file)
@no_inline
fn print[
SepTy: Stringable, *Ts: Stringable
](*values: *Ts, sep: SepTy, flush: Bool = False, file: FileDescriptor = stdout):
"""Prints elements to the text stream. Each element is separated by `sep`
and followed by a newline character.
Parameters:
SepTy: The type of separator.
Ts: The elements types.
Args:
values: The elements to print.
sep: The separator used between elements.
flush: If set to true, then the stream is forcibly flushed.
file: The output stream.
"""
_print(values, sep=str(sep), end="\n", flush=flush, file=file)
@no_inline
fn print[
SepTy: Stringable, EndTy: Stringable, *Ts: Stringable
](
*values: *Ts,
sep: SepTy,
end: EndTy,
flush: Bool = False,
file: FileDescriptor = stdout,
):
"""Prints elements to the text stream. Each element is separated by `sep`
and followed by `end`.
Parameters:
SepTy: The type of separator.
EndTy: The type of end argument.
Ts: The elements types.
Args:
values: The elements to print.
sep: The separator used between elements.
end: The String to write after printing the elements.
flush: If set to true, then the stream is forcibly flushed.
file: The output stream.
"""
_print(values, sep=str(sep), end=str(end), flush=flush, file=file)
@no_inline
fn _print[
*Ts: Stringable
](
values: VariadicPack[_, _, Stringable, Ts],
*,
sep: String,
end: String,
flush: Bool,
file: FileDescriptor,
):
@parameter
fn print_with_separator[i: Int, T: Stringable](value: T):
_put(str(value), file=file)
@parameter
if i < values.__len__() - 1:
_put(sep, file=file)
values.each_idx[print_with_separator]()
_put(end, file=file)
if flush:
_flush(file=file)
# ===----------------------------------------------------------------------=== #
# print_fmt
# ===----------------------------------------------------------------------=== #
# TODO:
# Finish transition to using non-allocating formatting abstractions by
# default, replace `print` with this function.
@no_inline
fn _print_fmt[
T: Formattable, *Ts: Formattable
](
first: T,
*rest: *Ts,
sep: StringLiteral = " ",
end: StringLiteral = "\n",
flush: Bool = False,
):
"""Prints elements to the text stream. Each element is separated by `sep`
and followed by `end`.
This print function does not perform unnecessary intermediate String
allocations during formatting.
Parameters:
T: The first element type.
Ts: The remaining element types.
Args:
first: The first element.
rest: The remaining elements.
sep: The separator used between elements.
end: The String to write after printing the elements.
flush: If set to true, then the stream is forcibly flushed.
"""
var writer = Formatter.stdout()
write_to(writer, first)
@parameter
fn print_elt[T: Formattable](a: T):
write_to(writer, sep, a)
rest.each[print_elt]()
write_to(writer, end)
# TODO: What is a flush function that works on CUDA?
@parameter
if not triple_is_nvidia_cuda():
if flush:
_flush()
| mojo/stdlib/src/builtin/io.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Provides the `len()` function and its associated traits.
These are Mojo built-ins, so you don't need to import them.
"""
# ===----------------------------------------------------------------------=== #
# Sized
# ===----------------------------------------------------------------------=== #
trait Sized:
"""The `Sized` trait describes a type that has an integer length (such as a
string or array).
Any type that conforms to `Sized` or
[`SizedRaising`](/mojo/stdlib/builtin/len/SizedRaising) works with the
built-in [`len()`](/mojo/stdlib/builtin/len/len) function.
The `Sized` trait requires a type to implement the `__len__()`
method. For example:
```mojo
@value
struct Foo(Sized):
var length: Int
fn __len__(self) -> Int:
return self.length
```
You can pass an instance of `Foo` to the `len()` function to get its
length:
```mojo
var foo = Foo(42)
print(len(foo) == 42)
```
```plaintext
True
```
**Note:** If the `__len__()` method can raise an error, use the
[`SizedRaising`](/mojo/stdlib/builtin/len/SizedRaising) trait instead.
"""
fn __len__(self) -> Int:
"""Get the length of the type.
Returns:
The length of the type.
"""
...
trait SizedRaising:
"""The `SizedRaising` trait describes a type that has an integer length,
which might raise an error if the length can't be determined.
Any type that conforms to [`Sized`](/mojo/stdlib/builtin/len/Sized) or
`SizedRaising` works with the built-in
[`len()`](/mojo/stdlib/builtin/len/len) function.
The `SizedRaising` trait requires a type to implement the `__len__()`
method, which can raise an error. For example:
```mojo
@value
struct Foo(SizedRaising):
var length: Int
fn __len__(self) raises -> Int:
if self.length < 0:
raise Error("Length is negative")
return self.length
```
You can pass an instance of `Foo` to the `len()` function to get its
length:
```mojo
fn main() raises:
var foo = Foo(42)
print(len(foo) == 42)
```
```plaintext
True
```
"""
fn __len__(self) raises -> Int:
"""Get the length of the type.
Returns:
The length of the type.
Raises:
If the length cannot be computed.
"""
...
# ===----------------------------------------------------------------------=== #
# len
# ===----------------------------------------------------------------------=== #
@always_inline
fn len[T: Sized](value: T) -> Int:
"""Get the length of a value.
Parameters:
T: The Sized type.
Args:
value: The object to get the length of.
Returns:
The length of the object.
"""
return value.__len__()
@always_inline
fn len[T: SizedRaising](value: T) raises -> Int:
"""Get the length of a value.
Parameters:
T: The Sized type.
Args:
value: The object to get the length of.
Returns:
The length of the object.
Raises:
If the length cannot be computed.
"""
return value.__len__()
| mojo/stdlib/src/builtin/len.mojo | false |
<filename>mojo/stdlib/src/builtin/math.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Defines basic math functions for use in the open source parts of the standard
library since the `math` package is currently closed source and cannot be
depended on in the open source parts of the standard library.
These are Mojo built-ins, so you don't need to import them.
"""
# ===----------------------------------------------------------------------=== #
# abs
# ===----------------------------------------------------------------------=== #
trait Absable:
"""
The `Absable` trait describes a type that defines an absolute value
operation.
Types that conform to `Absable` will work with the builtin `abs` function.
The absolute value operation always returns the same type as the input.
For example:
```mojo
struct Point(Absable):
var x: Float64
var y: Float64
fn __abs__(self) -> Self:
return sqrt(self.x * self.x + self.y * self.y)
```
"""
# TODO(MOCO-333): Reconsider the signature when we have parametric traits or
# associated types.
fn __abs__(self) -> Self:
...
@always_inline
fn abs[T: Absable](value: T) -> T:
"""Get the absolute value of the given object.
Parameters:
T: The type conforming to Absable.
Args:
value: The object to get the absolute value of.
Returns:
The absolute value of the object.
"""
return value.__abs__()
# TODO: https://github.com/modularml/modular/issues/38694
# TODO: Remove this
@always_inline
fn abs(value: IntLiteral) -> IntLiteral:
"""Get the absolute value of the given IntLiteral.
Args:
value: The IntLiteral to get the absolute value of.
Returns:
The absolute value of the IntLiteral.
"""
return value.__abs__()
# TODO: https://github.com/modularml/modular/issues/38694
# TODO: Remove this
@always_inline
fn abs(value: FloatLiteral) -> FloatLiteral:
"""Get the absolute value of the given FloatLiteral.
Args:
value: The FloatLiteral to get the absolute value of.
Returns:
The absolute value of the FloatLiteral.
"""
return value.__abs__()
# ===----------------------------------------------------------------------=== #
# divmod
# ===----------------------------------------------------------------------=== #
fn divmod(numerator: Int, denominator: Int) -> Tuple[Int, Int]:
"""Performs integer division and returns the quotient and the remainder.
Currently supported only for integers. Support for more standard library
types like Int8, Int16... is planned.
This method calls `a.__divmod__(b)`, thus, the actual implementation of
divmod should go in the `__divmod__` method of the struct of `a`.
Args:
numerator: The dividend.
denominator: The divisor.
Returns:
A `Tuple` containing the quotient and the remainder.
"""
return numerator.__divmod__(denominator)
# ===----------------------------------------------------------------------=== #
# max
# ===----------------------------------------------------------------------=== #
@always_inline
fn max(x: Int, y: Int) -> Int:
"""Gets the maximum of two integers.
Args:
x: Integer input to max.
y: Integer input to max.
Returns:
Maximum of x and y.
"""
return __mlir_op.`index.maxs`(x.value, y.value)
@always_inline
fn max[
type: DType, simd_width: Int
](x: SIMD[type, simd_width], y: SIMD[type, simd_width]) -> SIMD[
type, simd_width
]:
"""Performs elementwise maximum of x and y.
An element of the result SIMD vector will be the maximum of the
corresponding elements in x and y.
Parameters:
type: The `dtype` of the input and output SIMD vector.
simd_width: The width of the input and output SIMD vector.
Args:
x: First SIMD vector.
y: Second SIMD vector.
Returns:
A SIMD vector containing the elementwise maximum of x and y.
"""
return x.max(y)
# ===----------------------------------------------------------------------=== #
# min
# ===----------------------------------------------------------------------=== #
@always_inline
fn min(x: Int, y: Int) -> Int:
"""Gets the minimum of two integers.
Args:
x: Integer input to max.
y: Integer input to max.
Returns:
Minimum of x and y.
"""
return __mlir_op.`index.mins`(x.value, y.value)
@always_inline
fn min[
type: DType, simd_width: Int
](x: SIMD[type, simd_width], y: SIMD[type, simd_width]) -> SIMD[
type, simd_width
]:
"""Gets the elementwise minimum of x and y.
An element of the result SIMD vector will be the minimum of the
corresponding elements in x and y.
Parameters:
type: The `dtype` of the input and output SIMD vector.
simd_width: The width of the input and output SIMD vector.
Args:
x: First SIMD vector.
y: Second SIMD vector.
Returns:
A SIMD vector containing the elementwise minimum of x and y.
"""
return x.min(y)
# ===----------------------------------------------------------------------=== #
# pow
# ===----------------------------------------------------------------------=== #
trait Powable:
"""
The `Powable` trait describes a type that defines a power operation (i.e.
exponentiation) with the same base and exponent types.
Types that conform to `Powable` will work with the builtin `pow` function,
which will return the same type as the inputs.
TODO: add example
"""
# TODO(MOCO-333): Reconsider the signature when we have parametric traits or
# associated types.
fn __pow__(self, exp: Self) -> Self:
"""Return the value raised to the power of the given exponent.
Args:
exp: The exponent value.
Returns:
The value of `self` raised to the power of `exp`.
"""
...
fn pow[T: Powable](base: T, exp: T) -> T:
"""Computes the `base` raised to the power of the `exp`.
Parameters:
T: A type conforming to the `Powable` trait.
Args:
base: The base of the power operation.
exp: The exponent of the power operation.
Returns:
The `base` raised to the power of the `exp`.
"""
return base.__pow__(exp)
fn pow(base: SIMD, exp: Int) -> __type_of(base):
"""Computes elementwise value of a SIMD vector raised to the power of the
given integer.
Args:
base: The first input argument.
exp: The second input argument.
Returns:
The `base` elementwise raised raised to the power of `exp`.
"""
return base.__pow__(exp)
# ===----------------------------------------------------------------------=== #
# round
# ===----------------------------------------------------------------------=== #
trait Roundable:
"""
The `Roundable` trait describes a type that defines a rounding operation.
Types that conform to `Roundable` will work with the builtin `round`
function. The round operation always returns the same type as the input.
For example:
```mojo
@value
struct Complex(Roundable):
var re: Float64
var im: Float64
fn __round__(self) -> Self:
return Self(round(re), round(im))
```
"""
# TODO(MOCO-333): Reconsider the signature when we have parametric traits or
# associated types.
fn __round__(self) -> Self:
...
fn __round__(self, ndigits: Int) -> Self:
...
@always_inline
fn round[T: Roundable](value: T) -> T:
"""Get the rounded value of the given object.
Parameters:
T: The type conforming to Roundable.
Args:
value: The object to get the rounded value of.
Returns:
The rounded value of the object.
"""
return value.__round__()
@always_inline
fn round[T: Roundable](value: T, ndigits: Int) -> T:
"""Get the rounded value of the given object.
Parameters:
T: The type conforming to Roundable.
Args:
value: The object to get the rounded value of.
ndigits: The number of digits to round to.
Returns:
The rounded value of the object.
"""
return value.__round__(ndigits)
| mojo/stdlib/src/builtin/math.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Defines the object type, which is used to represent untyped values.
These are Mojo built-ins, so you don't need to import them.
"""
from collections import Dict, List
from sys.intrinsics import _type_is_eq
from memory import memcmp, memcpy
from memory import Arc
from memory.unsafe_pointer import move_from_pointee
from utils import StringRef, unroll, Variant
# ===----------------------------------------------------------------------=== #
# _ObjectImpl
# ===----------------------------------------------------------------------=== #
@register_passable("trivial")
struct _NoneMarker:
"""This is a trivial class to indicate that an object is `None`."""
pass
@register_passable("trivial")
struct _ImmutableString:
"""Python strings are immutable. This class is marked as trivially register
passable because its memory will be managed by `_ObjectImpl`. It is a
pointer and integer pair. Memory will be dynamically allocated.
"""
var data: UnsafePointer[Int8]
"""The pointer to the beginning of the string contents. It is not
null-terminated."""
var length: Int
"""The length of the string."""
@always_inline
fn __init__(inout self, data: UnsafePointer[Int8], length: Int):
self.data = data.address
self.length = length
@always_inline
fn string_compare(self, rhs: _ImmutableString) -> Int:
var res = memcmp(self.data, rhs.data, min(self.length, rhs.length))
if res != 0:
return -1 if res < 0 else 1
if self.length == rhs.length:
return 0
return -1 if self.length < rhs.length else 1
struct _RefCountedList:
"""Python objects have the behavior that bool, int, float, and str are
passed by value but lists and dictionaries are passed by reference. In order
to model this behavior, lists and dictionaries are implemented as
ref-counted data types.
"""
var impl: Arc[List[_ObjectImpl]]
"""The list value."""
fn __init__(inout self):
self.impl = Arc[List[_ObjectImpl]](List[_ObjectImpl]())
@register_passable("trivial")
struct _RefCountedListRef:
# FIXME(#3335): Use indirection to avoid a recursive struct definition.
var lst: UnsafePointer[NoneType]
"""The reference to the list."""
@always_inline
fn __init__(inout self):
var ptr = UnsafePointer[_RefCountedList].alloc(1)
__get_address_as_uninit_lvalue(ptr.address) = _RefCountedList()
self.lst = ptr.bitcast[NoneType]()
@always_inline
fn copy(self) -> Self:
_ = self.lst.bitcast[_RefCountedList]()[].impl
return Self {lst: self.lst}
fn release(self):
var ptr = self.lst.bitcast[_RefCountedList]()[].impl
struct _RefCountedAttrsDict:
"""This type contains the attribute dictionary for a dynamic object. The
attribute dictionary is constructed once with a fixed number of elements.
Those elements can be modified, but elements cannot be added or deleted
after the dictionary is implemented. Because attribute are accessed
directly with `x.attr`, the key will always be a `StringLiteral`.
"""
var impl: Arc[Dict[StringLiteral, _ObjectImpl]]
"""The implementation of the map."""
fn __init__(inout self):
self.impl = Arc[Dict[StringLiteral, _ObjectImpl]](
Dict[StringLiteral, _ObjectImpl]()
)
@always_inline
fn set(inout self, key: StringLiteral, value: _ObjectImpl) raises:
if key in self.impl[]:
self.impl[][key].destroy()
self.impl[][key] = value
return
raise Error(
"AttributeError: Object does not have an attribute of name '"
+ key
+ "'"
)
@always_inline
fn get(self, key: StringLiteral) raises -> _ObjectImpl:
var iter = self.impl[].find(key)
if iter:
return iter.value()[]
raise Error(
"AttributeError: Object does not have an attribute of name '"
+ key
+ "'"
)
struct Attr:
"""A generic object's attributes are set on construction, after which the
attributes can be read and modified, but no attributes may be removed or
added.
"""
var key: StringLiteral
"""The name of the attribute."""
var value: object
"""The value of the attribute."""
@always_inline
fn __init__(inout self, key: StringLiteral, owned value: object):
"""Initializes the attribute with a key and value.
Args:
key: The string literal key.
value: The object value of the attribute.
"""
self.key = key
self.value = value^
@register_passable("trivial")
struct _RefCountedAttrsDictRef:
# FIXME(#3335): Use indirection to avoid a recursive struct definition.
# FIXME(#12604): Distinguish this type from _RefCountedListRef.
var attrs: UnsafePointer[Int8]
"""The reference to the dictionary."""
@always_inline
fn __init__(inout self, values: VariadicListMem[Attr, _, _]):
var ptr = UnsafePointer[_RefCountedAttrsDict].alloc(1)
__get_address_as_uninit_lvalue(ptr.address) = _RefCountedAttrsDict()
# Elements can only be added on construction.
for i in range(len(values)):
ptr[].impl[]._insert(values[i].key, values[i].value._value.copy())
self.attrs = ptr.bitcast[Int8]()
@always_inline
fn copy(self) -> Self:
_ = self.attrs.bitcast[_RefCountedAttrsDict]()[].impl
return Self {attrs: self.attrs}
fn release(self):
var ptr = self.attrs.bitcast[_RefCountedAttrsDict]()[].impl
@register_passable("trivial")
struct _Function:
# The MLIR function type has two arguments:
# 1. The self value, or the single argument.
# 2. None, or an additional argument.
var value: UnsafePointer[Int16]
"""The function pointer."""
@always_inline
fn __init__[FnT: AnyTrivialRegType](inout self, value: FnT):
# FIXME: No "pointer bitcast" for signature function pointers.
var f = UnsafePointer[Int16]()
UnsafePointer.address_of(f).bitcast[FnT]()[] = value
self.value = f
alias fn0 = fn () raises -> object
"""Nullary function type."""
alias fn1 = fn (object) raises -> object
"""Unary function type."""
alias fn2 = fn (object, object) raises -> object
"""Binary function type."""
alias fn3 = fn (object, object, object) raises -> object
"""Ternary function type."""
@always_inline
fn invoke(owned self) raises -> object:
return UnsafePointer.address_of(self.value).bitcast[Self.fn0]()[]()
@always_inline
fn invoke(owned self, arg0: object) raises -> object:
return UnsafePointer.address_of(self.value).bitcast[Self.fn1]()[](arg0)
@always_inline
fn invoke(owned self, arg0: object, arg1: object) raises -> object:
return UnsafePointer.address_of(self.value).bitcast[Self.fn2]()[](
arg0, arg1
)
@always_inline
fn invoke(
owned self, arg0: object, arg1: object, arg2: object
) raises -> object:
return UnsafePointer.address_of(self.value).bitcast[Self.fn3]()[](
arg0, arg1, arg2
)
struct _ObjectImpl(CollectionElement, Stringable):
"""This class is the underlying implementation of the value of an `object`.
It is a variant of primitive types and pointers to implementations of more
complex types.
We choose Int64 and Float64 to store all integer and float values respectively.
TODO: These should be BigInt and BigFloat one day.
"""
alias type = Variant[
_NoneMarker,
Bool,
Int64,
Float64,
_ImmutableString,
_RefCountedListRef,
_Function,
_RefCountedAttrsDictRef,
]
"""The variant value type."""
var value: Self.type
"""The value of the object. It is a variant of the possible object values
kinds."""
alias none: Int = 0
"""Type discriminator indicating none."""
alias bool: Int = 1
"""Type discriminator indicating a bool."""
alias int: Int = 2
"""Type discriminator indicating an int."""
alias float: Int = 3
"""Type discriminator indicating a float."""
alias str: Int = 4
"""Type discriminator indicating a string."""
alias list: Int = 5
"""Type discriminator indicating a list."""
alias dict: Int = 8 # TODO
"""Type discriminator indicating a dictionary."""
alias function: Int = 6
"""Type discriminator indicating a function."""
alias obj: Int = 7
"""Type discriminator indicating an object."""
# ===------------------------------------------------------------------=== #
# Constructors
# ===------------------------------------------------------------------=== #
@always_inline
fn __init__(inout self, value: Self.type):
self.value = value
@always_inline
fn __init__(inout self):
self.value = Self.type(_NoneMarker {})
@always_inline
fn __init__(inout self, value: Bool):
self.value = Self.type(value)
@always_inline
fn __init__[dt: DType](inout self, value: SIMD[dt, 1]):
@parameter
if dt.is_integral():
self.value = Self.type(value)
else:
self.value = Self.type(value)
@always_inline
fn __init__(inout self, value: _ImmutableString):
self.value = Self.type(value)
@always_inline
fn __init__(inout self, value: _RefCountedListRef):
self.value = Self.type(value)
@always_inline
fn __init__(inout self, value: _Function):
self.value = Self.type(value)
@always_inline
fn __init__(inout self, value: _RefCountedAttrsDictRef):
self.value = Self.type(value)
@always_inline
fn __copyinit__(inout self, existing: Self):
self = existing.value
@always_inline
fn __moveinit__(inout self, owned other: Self):
self = other.value^
@always_inline
fn copy(self) -> Self:
if self.is_str():
var str = self.get_as_string()
var impl = _ImmutableString(
UnsafePointer[Int8].alloc(str.length), str.length
)
memcpy(
dest=impl.data,
src=str.data,
count=str.length,
)
return impl
if self.is_list():
return self.get_as_list().copy()
if self.is_obj():
return self.get_obj_attrs().copy()
return self
@always_inline
fn destroy(self):
if self.is_str():
self.get_as_string().data.free()
elif self.is_list():
self.get_as_list().release()
elif self.is_obj():
self.get_obj_attrs().release()
# ===------------------------------------------------------------------=== #
# Value Query
# ===------------------------------------------------------------------=== #
@always_inline
fn is_none(self) -> Bool:
return self.value.isa[_NoneMarker]()
@always_inline
fn is_bool(self) -> Bool:
return self.value.isa[Bool]()
@always_inline
fn is_int(self) -> Bool:
return self.value.isa[Int64]()
@always_inline
fn is_float(self) -> Bool:
return self.value.isa[Float64]()
@always_inline
fn is_str(self) -> Bool:
return self.value.isa[_ImmutableString]()
@always_inline
fn is_list(self) -> Bool:
return self.value.isa[_RefCountedListRef]()
@always_inline
fn is_dict(self) -> Bool:
return False
@always_inline
fn is_func(self) -> Bool:
return self.value.isa[_Function]()
@always_inline
fn is_obj(self) -> Bool:
return self.value.isa[_RefCountedAttrsDictRef]()
# get a copy
@always_inline
fn get_as_bool(self) -> Bool:
return self.value[Bool]
@always_inline
fn get_as_int(self) -> Int64:
return self.value[Int64]
@always_inline
fn get_as_float(self) -> Float64:
return self.value[Float64]
@always_inline
fn get_as_string(self) -> _ImmutableString:
return self.value[_ImmutableString]
@always_inline
fn get_as_list(self) -> _RefCountedListRef:
return self.value[_RefCountedListRef]
@always_inline
fn get_as_func(self) -> _Function:
return self.value[_Function]
@always_inline
fn get_obj_attrs(self) -> _RefCountedAttrsDictRef:
return self.value[_RefCountedAttrsDictRef]
@always_inline
fn get_type_id(self) -> Int:
if self.is_none():
return Self.none
if self.is_bool():
return Self.bool
if self.is_int():
return Self.int
if self.is_float():
return Self.float
if self.is_str():
return Self.str
if self.is_list():
return Self.list
if self.is_func():
return Self.function
debug_assert(self.is_obj(), "expected a generic object")
return Self.obj
@always_inline
fn _get_type_name(self) -> String:
"""Returns the name (in lowercase) of the specific object type."""
if self.is_none():
return "none"
if self.is_bool():
return "bool"
if self.is_int():
return "int"
if self.is_float():
return "float"
if self.is_str():
return "str"
if self.is_list():
return "list"
if self.is_func():
return "function"
debug_assert(self.is_obj(), "expected a generic object")
return "obj"
# ===------------------------------------------------------------------=== #
# Type Conversion
# ===------------------------------------------------------------------=== #
@always_inline
fn convert_bool_to_float(self) -> Self:
return Float64(1.0 if self.get_as_bool() else 0.0)
@always_inline
fn convert_bool_to_int(self) -> Self:
return Int64(1 if self.get_as_bool() else 0)
@always_inline
fn convert_int_to_float(self) -> Self:
return self.get_as_int().cast[DType.float64]()
@staticmethod
fn coerce_comparison_type(inout lhs: _ObjectImpl, inout rhs: _ObjectImpl):
"""Coerces two values of arithmetic type to the appropriate
lowest-common denominator type for performing comparisons, in order of
increasing priority: bool, int, and then float.
"""
var lhsId = lhs.get_type_id()
var rhsId = rhs.get_type_id()
if lhsId == rhsId:
return
@parameter
fn convert(inout value: _ObjectImpl, id: Int, to: Int):
if to == Self.int:
value = value.convert_bool_to_int()
else:
if id == Self.bool:
value = value.convert_bool_to_float()
else:
value = value.convert_int_to_float()
if lhsId > rhsId:
convert(rhs, rhsId, lhsId)
else:
convert(lhs, lhsId, rhsId)
@staticmethod
fn coerce_arithmetic_type(inout lhs: _ObjectImpl, inout rhs: _ObjectImpl):
"""Coerces two values of arithmetic type to the appropriate
lowest-common denominator type for performing arithmetic operations.
Bools are always converted to integers, to match Python's behavior.
"""
if lhs.is_bool():
lhs = lhs.convert_bool_to_int()
if rhs.is_bool():
rhs = rhs.convert_bool_to_int()
if lhs.is_float() == rhs.is_float():
return
if lhs.is_float():
rhs = rhs.convert_int_to_float()
else:
lhs = lhs.convert_int_to_float()
@staticmethod
fn coerce_integral_type(inout lhs: _ObjectImpl, inout rhs: _ObjectImpl):
"""Coerces two values of integral type to the appropriate
lowest-common denominator type for performing bitwise operations.
"""
if lhs.is_int() == rhs.is_int():
return
if lhs.is_int():
rhs = rhs.convert_bool_to_int()
else:
lhs = lhs.convert_bool_to_int()
fn __str__(self) -> String:
"""Returns the name (in lowercase) of the specific object type."""
if self.is_none():
return "None"
if self.is_bool():
return str(self.get_as_bool())
if self.is_int():
return str(self.get_as_int())
if self.is_float():
return str(self.get_as_float())
if self.is_str():
return (
"'"
+ str(
StringRef(
self.get_as_string().data, self.get_as_string().length
)
)
+ "'"
)
if self.is_func():
return "Function at address " + hex(int(self.get_as_func().value))
if self.is_list():
var res = String("[")
for j in range(self.get_list_length()):
if j != 0:
res += ", "
res += str(object(self.get_list_element(j)))
res += "]"
return res
var ptr = self.get_obj_attrs_ptr()
var res = String("{")
var print_sep = False
for entry in ptr[].impl[].items():
if print_sep:
res += ", "
res += (
"'"
+ str(entry[].key)
+ "' = "
+ str(object(entry[].value.copy()))
)
print_sep = True
res += "}"
return res
# ===------------------------------------------------------------------=== #
# List Functions
# ===------------------------------------------------------------------=== #
@always_inline
fn get_list_ptr(self) -> Arc[List[_ObjectImpl]]:
return self.get_as_list().lst.bitcast[_RefCountedList]()[].impl
@always_inline
fn list_append(self, value: Self):
var ptr = self.get_list_ptr()
ptr[].append(value.value)
@always_inline
fn get_list_length(self) -> Int:
var ptr = self.get_list_ptr()
return len(ptr[])
@always_inline
fn get_list_element(self, i: Int) -> _ObjectImpl:
var ptr = self.get_list_ptr()
return ptr[][i].copy()
@always_inline
fn set_list_element(self, i: Int, value: _ObjectImpl):
var ptr = self.get_list_ptr()
ptr[][i].destroy()
ptr[][i] = value
# ===------------------------------------------------------------------=== #
# Object Attribute Functions
# ===------------------------------------------------------------------=== #
@always_inline
fn get_obj_attrs_ptr(self) -> UnsafePointer[_RefCountedAttrsDict]:
return self.get_obj_attrs().attrs.bitcast[_RefCountedAttrsDict]()
@always_inline
fn set_obj_attr(self, key: StringLiteral, value: _ObjectImpl) raises:
self.get_obj_attrs_ptr()[].set(key, value)
@always_inline
fn get_obj_attr(self, key: StringLiteral) raises -> _ObjectImpl:
return self.get_obj_attrs_ptr()[].get(key).copy()
# ===----------------------------------------------------------------------=== #
# object
# ===----------------------------------------------------------------------=== #
struct object(IntableRaising, Boolable, Stringable):
"""Represents an object without a concrete type.
This is the type of arguments in `def` functions that do not have a type
annotation, such as the type of `x` in `def f(x): pass`. A value of any type
can be passed in as the `x` argument in this case, and so that value is
used to construct this `object` type.
"""
var _value: _ObjectImpl
"""The underlying value of the object."""
alias nullary_function = _Function.fn0
"""Nullary function type."""
alias unary_function = _Function.fn1
"""Unary function type."""
alias binary_function = _Function.fn2
"""Binary function type."""
alias ternary_function = _Function.fn3
"""Ternary function type."""
# ===------------------------------------------------------------------=== #
# Constructors
# ===------------------------------------------------------------------=== #
@always_inline
fn __init__(inout self):
"""Initializes the object with a `None` value."""
self._value = _ObjectImpl()
@always_inline
fn __init__(inout self, impl: _ObjectImpl):
"""Initializes the object with an implementation value. This is meant for
internal use only.
Args:
impl: The object implementation.
"""
self._value = impl
@always_inline
fn __init__(inout self, none: NoneType):
"""Initializes a none value object from a `None` literal.
Args:
none: None.
"""
self._value = _ObjectImpl()
@always_inline
fn __init__(inout self, value: Int):
"""Initializes the object with an integer value.
Args:
value: The integer value.
"""
self._value = Int64(value)
@always_inline
fn __init__(inout self, value: Float64):
"""Initializes the object with an floating-point value.
Args:
value: The float value.
"""
self._value = value
@always_inline
fn __init__[dt: DType](inout self, value: SIMD[dt, 1]):
"""Initializes the object with a generic scalar value. If the scalar
value type is bool, it is converted to a boolean. Otherwise, it is
converted to the appropriate integer or floating point type.
Parameters:
dt: The scalar value type.
Args:
value: The scalar value.
"""
@parameter
if dt == DType.bool:
self._value = value.__bool__()
else:
self._value = value
@always_inline
fn __init__(inout self, value: Bool):
"""Initializes the object from a bool.
Args:
value: The boolean value.
"""
self._value = value
@always_inline
fn __init__(inout self, value: StringLiteral):
"""Initializes the object from a string literal.
Args:
value: The string value.
"""
self = object(StringRef(value))
@always_inline
fn __init__(inout self, value: StringRef):
"""Initializes the object from a string reference.
Args:
value: The string value.
"""
var impl = _ImmutableString(
UnsafePointer[Int8].alloc(value.length), value.length
)
memcpy(
impl.data,
# TODO: Remove bitcast once transition to UInt8 strings is complete.
value.unsafe_ptr().bitcast[Int8](),
value.length,
)
self._value = impl
@always_inline
fn __init__[*Ts: Movable](inout self, value: ListLiteral[Ts]):
"""Initializes the object from a list literal.
Parameters:
Ts: The list element types.
Args:
value: The list value.
"""
self._value = _RefCountedListRef()
@parameter
@always_inline
fn append[i: Int]():
# We need to rebind the element to one we know how to convert from.
# FIXME: This doesn't handle implicit conversions or nested lists.
alias T = Ts[i]
@parameter
if _type_is_eq[T, Int]():
self._append(value.get[i, Int]())
elif _type_is_eq[T, Float64]():
self._append(value.get[i, Float64]())
elif _type_is_eq[T, Bool]():
self._append(value.get[i, Bool]())
elif _type_is_eq[T, StringRef]():
self._append(value.get[i, StringRef]())
elif _type_is_eq[T, StringLiteral]():
self._append(value.get[i, StringLiteral]())
else:
constrained[
False, "cannot convert nested list element to object"
]()
unroll[append, len(VariadicList(Ts))]()
@always_inline
fn __init__(inout self, func: Self.nullary_function):
"""Initializes an object from a function that takes no arguments.
Args:
func: The function.
"""
self._value = _Function(func)
@always_inline
fn __init__(inout self, func: Self.unary_function):
"""Initializes an object from a function that takes one argument.
Args:
func: The function.
"""
self._value = _Function(func)
@always_inline
fn __init__(inout self, func: Self.binary_function):
"""Initializes an object from a function that takes two arguments.
Args:
func: The function.
"""
self._value = _Function(func)
@always_inline
fn __init__(inout self, func: Self.ternary_function):
"""Initializes an object from a function that takes three arguments.
Args:
func: The function.
"""
self._value = _Function(func)
@always_inline
fn __init__(inout self, *attrs: Attr):
"""Initializes the object with a sequence of zero or more attributes.
Args:
attrs: Zero or more attributes.
"""
self._value = _RefCountedAttrsDictRef(attrs)
@always_inline
fn __moveinit__(inout self, owned existing: object):
"""Move the value of an object.
Args:
existing: The object to move.
"""
self._value = existing._value
existing._value = _ObjectImpl()
@always_inline
fn __copyinit__(inout self, existing: object):
"""Copies the object. This clones the underlying string value and
increases the refcount of lists or dictionaries.
Args:
existing: The object to copy.
"""
self._value = existing._value.copy()
@always_inline
fn __del__(owned self):
"""Delete the object and release any owned memory."""
self._value.destroy()
# ===------------------------------------------------------------------=== #
# Conversion
# ===------------------------------------------------------------------=== #
fn __bool__(self) -> Bool:
"""Performs conversion to bool according to Python semantics. Integers
and floats are true if they are non-zero, and strings and lists are true
if they are non-empty.
Returns:
Whether the object is considered true.
"""
if self._value.is_bool():
return self._value.get_as_bool()
# Integers or floats are true if they are non-zero.
if self._value.is_int():
return (self._value.get_as_int() != 0).__bool__()
if self._value.is_float():
return (self._value.get_as_float() != 0.0).__bool__()
if self._value.is_str():
# Strings are true if they are non-empty.
return self._value.get_as_string().length != 0
debug_assert(self._value.is_list(), "expected a list")
return self._value.get_list_length() != 0
fn __int__(self) raises -> Int:
"""Performs conversion to integer according to Python
semantics.
Returns:
The Int representation of the object.
"""
if self._value.is_bool():
return 1 if self._value.get_as_bool() else 0
if self._value.is_int():
return int(self._value.get_as_int())
if self._value.is_float():
return int(self._value.get_as_float())
raise "object type cannot be converted to an integer"
@always_inline
fn __str__(self) -> String:
"""Performs conversion to string according to Python
semantics.
Returns:
The String representation of the object.
"""
return str(self._value)
# ===------------------------------------------------------------------=== #
# Comparison Operators
# ===------------------------------------------------------------------=== #
@always_inline
fn _comparison_type_check(self) raises:
"""Throws an error if the object cannot be arithmetically compared."""
if not (
self._value.is_bool()
or self._value.is_int()
or self._value.is_float()
):
raise Error("TypeError: not a valid comparison type")
@staticmethod
@always_inline
fn _comparison_op[
fp_func: fn (Float64, Float64) -> Scalar[DType.bool],
int_func: fn (Int64, Int64) -> Scalar[DType.bool],
bool_func: fn (Bool, Bool) -> Bool,
](lhs: object, rhs: object) raises -> object:
"""Dispatches comparison operator depending on the type.
Parameters:
fp_func: Floating point comparator.
int_func: Integer comparator.
bool_func: Boolean comparator.
Args:
lhs: The left hand value.
rhs: The right hand value.
Returns:
The comparison result.
"""
lhs._comparison_type_check()
rhs._comparison_type_check()
var lhsValue = lhs._value
var rhsValue = rhs._value
_ObjectImpl.coerce_comparison_type(lhsValue, rhsValue)
if lhsValue.is_float():
return fp_func(lhsValue.get_as_float(), rhsValue.get_as_float())
if lhsValue.is_int():
return int_func(lhsValue.get_as_int(), rhsValue.get_as_int())
debug_assert(lhsValue.is_bool(), "expected both values to be bool")
return bool_func(lhsValue.get_as_bool(), rhsValue.get_as_bool())
@always_inline
fn _string_compare(self, rhs: object) -> Int:
return self._value.get_as_string().string_compare(
rhs._value.get_as_string()
)
@always_inline
fn _list_compare(self, rhs: object) raises -> Int:
var llen = self._value.get_list_length()
var rlen = self._value.get_list_length()
var cmp_len = min(llen, rlen)
for i in range(cmp_len):
var lelt: object = self._value.get_list_element(i)
var relt: object = rhs._value.get_list_element(i)
if lelt < relt:
return -1
if lelt > relt:
return 1
if llen < rlen:
return -1
if llen > rlen:
return 1
return 0
fn __lt__(self, rhs: object) raises -> object:
"""Less-than comparator. This lexicographically compares strings and
lists.
Args:
rhs: Right hand value.
Returns:
True if the object is less than the right hard argument.
"""
if self._value.is_str() and rhs._value.is_str():
return self._string_compare(rhs) < 0
if self._value.is_list() and rhs._value.is_list():
return self._list_compare(rhs) < 0
@always_inline
fn bool_fn(lhs: Bool, rhs: Bool) -> Bool:
return not lhs and rhs
return Self._comparison_op[Float64.__lt__, Int64.__lt__, bool_fn](
self, rhs
)
fn __le__(self, rhs: object) raises -> object:
"""Less-than-or-equal to comparator. This lexicographically
compares strings and lists.
Args:
rhs: Right hand value.
Returns:
True if the object is less than or equal to the right hard argument.
"""
if self._value.is_str() and rhs._value.is_str():
return self._string_compare(rhs) <= 0
if self._value.is_list() and rhs._value.is_list():
return self._list_compare(rhs) <= 0
@always_inline
fn bool_fn(lhs: Bool, rhs: Bool) -> Bool:
return lhs == rhs or not lhs
return Self._comparison_op[Float64.__le__, Int64.__le__, bool_fn](
self, rhs
)
fn __eq__(self, rhs: object) raises -> object:
"""Equality comparator. This compares the elements of strings
and lists.
Args:
rhs: Right hand value.
Returns:
True if the objects are equal.
"""
if self._value.is_str() and rhs._value.is_str():
return self._string_compare(rhs) == 0
if self._value.is_list() and rhs._value.is_list():
return self._list_compare(rhs) == 0
@always_inline
fn bool_fn(lhs: Bool, rhs: Bool) -> Bool:
return lhs == rhs
return Self._comparison_op[Float64.__eq__, Int64.__eq__, bool_fn](
self, rhs
)
fn __ne__(self, rhs: object) raises -> object:
"""Inequality comparator. This compares the elements of strings
and lists.
Args:
rhs: Right hand value.
Returns:
True if the objects are not equal.
"""
if self._value.is_str() and rhs._value.is_str():
return self._string_compare(rhs) != 0
if self._value.is_list() and rhs._value.is_list():
return self._list_compare(rhs) != 0
@always_inline
fn bool_fn(lhs: Bool, rhs: Bool) -> Bool:
return lhs != rhs
return Self._comparison_op[Float64.__ne__, Int64.__ne__, bool_fn](
self, rhs
)
fn __gt__(self, rhs: object) raises -> object:
"""Greater-than comparator. This lexicographically compares the
elements of strings and lists.
Args:
rhs: Right hand value.
Returns:
True if the left hand value is greater.
"""
if self._value.is_str() and rhs._value.is_str():
return self._string_compare(rhs) > 0
if self._value.is_list() and rhs._value.is_list():
return self._list_compare(rhs) > 0
@always_inline
fn bool_fn(lhs: Bool, rhs: Bool) -> Bool:
return lhs and not rhs
return Self._comparison_op[Float64.__gt__, Int64.__gt__, bool_fn](
self, rhs
)
fn __ge__(self, rhs: object) raises -> object:
"""Greater-than-or-equal-to comparator. This lexicographically
compares the elements of strings and lists.
Args:
rhs: Right hand value.
Returns:
True if the left hand value is greater than or equal to the right
hand value.
"""
if self._value.is_str() and rhs._value.is_str():
return self._string_compare(rhs) >= 0
if self._value.is_list() and rhs._value.is_list():
return self._list_compare(rhs) >= 0
@always_inline
fn bool_fn(lhs: Bool, rhs: Bool) -> Bool:
return lhs == rhs or lhs
return Self._comparison_op[Float64.__ge__, Int64.__ge__, bool_fn](
self, rhs
)
# ===------------------------------------------------------------------=== #
# Arithmetic Operators
# ===------------------------------------------------------------------=== #
@always_inline
fn _arithmetic_type_check(self) raises:
"""Throws an error if the object is not arithmetic."""
if not (
self._value.is_bool()
or self._value.is_int()
or self._value.is_float()
):
raise Error("TypeError: not a valid arithmetic type")
@always_inline
fn _arithmetic_integral_type_check(self) raises:
"""Throws an error if the object is not an integral type."""
if not (self._value.is_bool() or self._value.is_int()):
raise Error("TypeError: not a valid integral type")
@staticmethod
@always_inline
fn _arithmetic_binary_op[
fp_func: fn (Float64, Float64) -> Float64,
int_func: fn (Int64, Int64) -> Int64,
](lhs: object, rhs: object) raises -> object:
"""Generic arithmetic operator. Bool values are treated as
integers in arithmetic operators.
Parameters:
fp_func: Floating point operator.
int_func: Integer operator.
Returns:
The arithmetic operation result.
"""
lhs._arithmetic_type_check()
rhs._arithmetic_type_check()
var lhsValue = lhs._value
var rhsValue = rhs._value
_ObjectImpl.coerce_arithmetic_type(lhsValue, rhsValue)
if lhsValue.is_float():
return fp_func(lhsValue.get_as_float(), rhsValue.get_as_float())
return int_func(lhsValue.get_as_int(), rhsValue.get_as_int())
@staticmethod
@always_inline
fn _arithmetic_bitwise_op[
int_func: fn (Int64, Int64) -> Int64,
bool_func: fn (Bool, Bool) -> Bool,
](lhs: object, rhs: object) raises -> object:
"""Generic bitwise operator.
Parameters:
int_func: Integer operator.
bool_func: Boolean operator.
Returns:
The bitwise operation result.
"""
lhs._arithmetic_integral_type_check()
rhs._arithmetic_integral_type_check()
var lhsValue = lhs._value
var rhsValue = rhs._value
_ObjectImpl.coerce_integral_type(lhsValue, rhsValue)
if lhsValue.is_int():
return int_func(lhsValue.get_as_int(), rhsValue.get_as_int())
return bool_func(lhsValue.get_as_bool(), rhsValue.get_as_bool())
@always_inline
fn __neg__(self) raises -> object:
"""Negation operator. Only valid for bool, int, and float
types. Negation on any bool value converts it to an integer.
Returns:
The negative of the current value.
"""
if self._value.is_bool():
return -self._value.convert_bool_to_int().get_as_int()
if self._value.is_int():
return -self._value.get_as_int()
if self._value.is_float():
return -self._value.get_as_float()
raise Error("TypeError: cannot apply negation to this type")
@always_inline
fn __invert__(self) raises -> object:
"""Invert value operator. This is only valid for bool and int
values.
Returns:
The inverted value.
"""
if self._value.is_bool():
return ~self._value.get_as_bool()
if self._value.is_int():
return ~self._value.get_as_int()
raise Error("TypeError: cannot invert values of this type")
@always_inline
fn __add__(self, rhs: object) raises -> object:
"""Addition and concatenation operator. For arithmetic types, this
function will compute the sum of the left and right hand values. For
strings and lists, this function will concat the objects.
Args:
rhs: Right hand value.
Returns:
The sum or concatenated values.
"""
if self._value.is_str() and rhs._value.is_str():
var lhsStr = self._value.get_as_string()
var rhsStr = rhs._value.get_as_string()
var length = lhsStr.length + rhsStr.length
var impl = _ImmutableString(
UnsafePointer[Int8].alloc(length), length
)
memcpy(impl.data, lhsStr.data, lhsStr.length)
memcpy(impl.data + lhsStr.length, rhsStr.data, rhsStr.length)
var result = object()
result._value = impl
return result
if self._value.is_list() and rhs._value.is_list():
var result2 = object([])
for i in range(self.__len__()):
result2.append(self[i])
for j in range(rhs.__len__()):
result2.append(rhs[j])
return result2
return Self._arithmetic_binary_op[Float64.__add__, Int64.__add__](
self, rhs
)
@always_inline
fn __sub__(self, rhs: object) raises -> object:
"""Subtraction operator. Valid only for arithmetic types.
Args:
rhs: Right hand value.
Returns:
The difference.
"""
return Self._arithmetic_binary_op[Float64.__sub__, Int64.__sub__](
self, rhs
)
@always_inline
fn __mul__(self, rhs: object) raises -> object:
"""Multiplication operator. Valid only for arithmetic types.
Args:
rhs: Right hand value.
Returns:
The product.
"""
return Self._arithmetic_binary_op[Float64.__mul__, Int64.__mul__](
self, rhs
)
@always_inline
fn __pow__(self, exp: object) raises -> object:
"""Exponentiation operator. Valid only for arithmetic types.
Args:
exp: Exponent value.
Returns:
The left hand value raised to the power of the right hand value.
"""
return Self._arithmetic_binary_op[Float64.__pow__, Int64.__pow__](
self, exp
)
@always_inline
fn __mod__(self, rhs: object) raises -> object:
"""Modulo operator. Valid only for arithmetic types.
Args:
rhs: Right hand value.
Returns:
The left hand value mod the right hand value.
"""
return Self._arithmetic_binary_op[Float64.__mod__, Int64.__mod__](
self, rhs
)
@always_inline
fn __truediv__(self, rhs: object) raises -> object:
"""True division operator. Valid only for arithmetic types.
Args:
rhs: Right hand value.
Returns:
The left hand value true divide the right hand value.
"""
return Self._arithmetic_binary_op[
Float64.__truediv__, Int64.__truediv__
](self, rhs)
@always_inline
fn __floordiv__(self, rhs: object) raises -> object:
"""Floor division operator. Valid only for arithmetic types.
Args:
rhs: Right hand value.
Returns:
The left hand value floor divide the right hand value.
"""
return Self._arithmetic_binary_op[
Float64.__floordiv__, Int64.__floordiv__
](self, rhs)
@always_inline
fn __lshift__(self, rhs: object) raises -> object:
"""Left shift operator. Valid only for arithmetic types.
Args:
rhs: Right hand value.
Returns:
The left hand value left shifted by the right hand value.
"""
self._arithmetic_integral_type_check()
rhs._arithmetic_integral_type_check()
return object(self._value.get_as_int() << rhs._value.get_as_int())
@always_inline
fn __rshift__(self, rhs: object) raises -> object:
"""Right shift operator. Valid only for arithmetic types.
Args:
rhs: Right hand value.
Returns:
The left hand value right shifted by the right hand value.
"""
self._arithmetic_integral_type_check()
rhs._arithmetic_integral_type_check()
return object(self._value.get_as_int() >> rhs._value.get_as_int())
@always_inline
fn __and__(self, rhs: object) raises -> object:
"""Bitwise AND operator.
Args:
rhs: Right hand value.
Returns:
The current value if it is False.
"""
return Self._arithmetic_bitwise_op[Int64.__and__, Bool.__and__](
self, rhs
)
@always_inline
fn __or__(self, rhs: object) raises -> object:
"""Bitwise OR operator.
Args:
rhs: Right hand value.
Returns:
The current value if it is True.
"""
return Self._arithmetic_bitwise_op[Int64.__or__, Bool.__or__](self, rhs)
@always_inline
fn __xor__(self, rhs: object) raises -> object:
"""Bitwise XOR operator.
Args:
rhs: Right hand value.
Returns:
The current value if it is True.
"""
return Self._arithmetic_bitwise_op[Int64.__xor__, Bool.__xor__](
self, rhs
)
# ===------------------------------------------------------------------=== #
# In-Place Operators
# ===------------------------------------------------------------------=== #
@always_inline
fn __iadd__(inout self, rhs: object) raises:
"""In-place addition or concatenation operator.
Args:
rhs: Right hand value.
"""
self = self + rhs
@always_inline
fn __isub__(inout self, rhs: object) raises:
"""In-place subtraction operator.
Args:
rhs: Right hand value.
"""
self = self - rhs
@always_inline
fn __imul__(inout self, rhs: object) raises:
"""In-place multiplication operator.
Args:
rhs: Right hand value.
"""
self = self * rhs
@always_inline
fn __ipow__(inout self, rhs: object) raises:
"""In-place exponentiation operator.
Args:
rhs: Right hand value.
"""
self = self**rhs
@always_inline
fn __imod__(inout self, rhs: object) raises:
"""In-place modulo operator.
Args:
rhs: Right hand value.
"""
self = self % rhs
@always_inline
fn __itruediv__(inout self, rhs: object) raises:
"""In-place true division operator.
Args:
rhs: Right hand value.
"""
self = self / rhs
@always_inline
fn __ifloordiv__(inout self, rhs: object) raises:
"""In-place floor division operator.
Args:
rhs: Right hand value.
"""
self = self // rhs
@always_inline
fn __ilshift__(inout self, rhs: object) raises:
"""In-place left shift operator.
Args:
rhs: Right hand value.
"""
self = self << rhs
@always_inline
fn __irshift__(inout self, rhs: object) raises:
"""In-place right shift operator.
Args:
rhs: Right hand value.
"""
self = self >> rhs
@always_inline
fn __iand__(inout self, rhs: object) raises:
"""In-place AND operator.
Args:
rhs: Right hand value.
"""
self = self & rhs
@always_inline
fn __ior__(inout self, rhs: object) raises:
"""In-place OR operator.
Args:
rhs: Right hand value.
"""
self = self | rhs
@always_inline
fn __ixor__(inout self, rhs: object) raises:
"""In-place XOR operator.
Args:
rhs: Right hand value.
"""
self = self ^ rhs
# ===------------------------------------------------------------------=== #
# Reversed Operators
# ===------------------------------------------------------------------=== #
@always_inline
fn __radd__(self, lhs: object) raises -> object:
"""Reverse addition or concatenation operator.
Args:
lhs: Left hand value.
Returns:
The sum or concatenated value.
"""
return lhs + self
@always_inline
fn __rsub__(self, lhs: object) raises -> object:
"""Reverse subtraction operator.
Args:
lhs: Left hand value.
Returns:
The result of subtracting this from the left-hand-side value.
"""
return lhs - self
@always_inline
fn __rmul__(self, lhs: object) raises -> object:
"""Reverse multiplication operator.
Args:
lhs: Left hand value.
Returns:
The product.
"""
return lhs * self
@always_inline
fn __rpow__(self, lhs: object) raises -> object:
"""Reverse exponentiation operator.
Args:
lhs: Left hand value.
Returns:
The left hand value raised to the power of the right hand value.
"""
return lhs**self
@always_inline
fn __rmod__(self, lhs: object) raises -> object:
"""Reverse modulo operator.
Args:
lhs: Left hand value.
Returns:
The left hand value mod the right hand value.
"""
return lhs % self
@always_inline
fn __rtruediv__(self, lhs: object) raises -> object:
"""Reverse true division operator.
Args:
lhs: Left hand value.
Returns:
The left hand value divide the right hand value.
"""
return lhs / self
@always_inline
fn __rfloordiv__(self, lhs: object) raises -> object:
"""Reverse floor division operator.
Args:
lhs: Left hand value.
Returns:
The left hand value floor divide the right hand value.
"""
return lhs // self
@always_inline
fn __rlshift__(self, lhs: object) raises -> object:
"""Reverse left shift operator.
Args:
lhs: Left hand value.
Returns:
The left hand value left shifted by the right hand value.
"""
return lhs << self
@always_inline
fn __rrshift__(self, lhs: object) raises -> object:
"""Reverse right shift operator.
Args:
lhs: Left hand value.
Returns:
The left hand value right shifted by the right hand value.
"""
return lhs >> self
@always_inline
fn __rand__(self, lhs: object) raises -> object:
"""Reverse AND operator.
Args:
lhs: Left hand value.
Returns:
The bitwise AND of the left-hand-side value and this.
"""
return lhs & self
@always_inline
fn __ror__(self, lhs: object) raises -> object:
"""Reverse OR operator.
Args:
lhs: Left hand value.
Returns:
The bitwise OR of the left-hand-side value and this.
"""
return lhs | self
@always_inline
fn __rxor__(self, lhs: object) raises -> object:
"""Reverse XOR operator.
Args:
lhs: Left hand value.
Returns:
The bitwise XOR of the left-hand-side value and this.
"""
return lhs ^ self
# ===------------------------------------------------------------------=== #
# Interface
# ===------------------------------------------------------------------=== #
@always_inline
fn append(self, value: object) raises:
"""Appends a value to the list.
Args:
value: The value to append.
"""
if self._value.is_obj():
_ = object(self._value.get_obj_attr("append"))(self, value)
return
if not self._value.is_list():
raise Error("TypeError: can only append to lists")
self._append(value)
@always_inline
fn _append(self, value: object):
self._value.list_append(value._value.copy())
@always_inline
fn __len__(self) raises -> Int:
"""Returns the "length" of the object. Only strings, lists, and
dictionaries have lengths.
Returns:
The length of the string value or the number of elements in the list
or dictionary value.
"""
if self._value.is_str():
return self._value.get_as_string().length
if self._value.is_list():
return self._value.get_list_length()
raise Error("TypeError: only strings and lists have length")
@staticmethod
@always_inline
fn _convert_index_to_int(i: object) raises -> Int:
if i._value.is_bool():
return i._value.convert_bool_to_int().get_as_int().value
elif not i._value.is_int():
raise Error("TypeError: string indices must be integers")
return i._value.get_as_int().value
@always_inline
fn __getitem__(self, i: object) raises -> object:
"""Gets the i-th item from the object. This is only valid for strings,
lists, and dictionaries.
Args:
i: The string or list index, or dictionary key.
Returns:
The value at the index or key.
"""
if self._value.is_obj():
return object(self._value.get_obj_attr("__getitem__"))(self, i)
if not self._value.is_str() and not self._value.is_list():
raise Error("TypeError: can only index into lists and strings")
var index = Self._convert_index_to_int(i)
if self._value.is_str():
var impl = _ImmutableString(UnsafePointer[Int8].alloc(1), 1)
initialize_pointee_copy(
impl.data,
move_from_pointee(self._value.get_as_string().data + index),
)
return _ObjectImpl(impl)
return self._value.get_list_element(i._value.get_as_int().value)
@always_inline
fn __getitem__(self, *index: object) raises -> object:
"""Gets the i-th item from the object, where i is a tuple of indices.
Args:
index: A compound index.
Returns:
The value at the index.
"""
var value = self
for i in index:
value = value[i[]]
return value
@always_inline
fn __setitem__(self, i: object, value: object) raises -> None:
"""Sets the i-th item in the object. This is only valid for strings,
lists, and dictionaries.
Args:
i: The string or list index, or dictionary key.
value: The value to set.
"""
if self._value.is_obj():
_ = object(self._value.get_obj_attr("__setitem__"))(self, i, value)
return
if self._value.is_str():
raise Error(
"TypeError: 'str' object does not support item assignment"
)
if not self._value.is_list():
raise Error("TypeError: can only assign items in lists")
var index = Self._convert_index_to_int(i)
self._value.set_list_element(index.value, value._value.copy())
@always_inline
fn __setitem__(self, i: object, j: object, value: object) raises:
"""Sets the (i, j)-th element in the object.
FIXME: We need this because `obj[i, j] = value` will attempt to invoke
this method with 3 arguments, and we can only have variadics as the last
argument.
Args:
i: The first index.
j: The second index.
value: The value to set.
"""
self[i][j] = value
@always_inline
fn __getattr__(self, key: StringLiteral) raises -> object:
if not self._value.is_obj():
raise Error(
"TypeError: Type '"
+ self._value._get_type_name()
+ "' does not have attribute '"
+ key
+ "'"
)
return self._value.get_obj_attr(key)
@always_inline
fn __setattr__(inout self, key: StringLiteral, value: object) raises:
if not self._value.is_obj():
raise Error(
"TypeError: Type '"
+ self._value._get_type_name()
+ "' does not have attribute '"
+ key
+ "'"
)
self._value.set_obj_attr(key, value._value.copy())
@always_inline
fn __call__(self) raises -> object:
if not self._value.is_func():
raise Error("TypeError: Object is not a function")
return self._value.get_as_func().invoke()
@always_inline
fn __call__(self, arg0: object) raises -> object:
if not self._value.is_func():
raise Error("TypeError: Object is not a function")
return self._value.get_as_func().invoke(arg0)
@always_inline
fn __call__(self, arg0: object, arg1: object) raises -> object:
if not self._value.is_func():
raise Error("TypeError: Object is not a function")
return self._value.get_as_func().invoke(arg0, arg1)
@always_inline
fn __call__(
self, arg0: object, arg1: object, arg2: object
) raises -> object:
if not self._value.is_func():
raise Error("TypeError: Object is not a function")
return self._value.get_as_func().invoke(arg0, arg1, arg2)
| mojo/stdlib/src/builtin/object.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements a 'range' call.
These are Mojo built-ins, so you don't need to import them.
"""
from python import PythonObject
# FIXME(MOCO-658): Explicit conformance to these traits shouldn't be needed.
from builtin._stubs import _IntIterable, _StridedIterable
# ===----------------------------------------------------------------------=== #
# Utilities
# ===----------------------------------------------------------------------=== #
# TODO: use math.ceildiv when open sourced.
@always_inline
fn _div_ceil_positive(numerator: Int, denominator: Int) -> Int:
"""Divides an integer by another integer, and round up to the nearest
integer.
Constraints:
Will raise an exception if denominator is zero.
Assumes that both inputs are positive.
Args:
numerator: The numerator.
denominator: The denominator.
Returns:
The ceiling of numerator divided by denominator.
"""
debug_assert(denominator != 0, "divide by zero")
return (numerator + denominator - 1)._positive_div(denominator)
@always_inline
fn _sign(x: Int) -> Int:
if x > 0:
return 1
if x < 0:
return -1
return 0
# ===----------------------------------------------------------------------=== #
# Range
# ===----------------------------------------------------------------------=== #
@register_passable("trivial")
struct _ZeroStartingRange(Sized, ReversibleRange, _IntIterable):
var curr: Int
var end: Int
@always_inline("nodebug")
fn __init__(inout self, end: Int):
self.curr = max(0, end)
self.end = self.curr
@always_inline("nodebug")
fn __iter__(self) -> Self:
return self
@always_inline
fn __next__(inout self) -> Int:
var curr = self.curr
self.curr -= 1
return self.end - curr
@always_inline("nodebug")
fn __len__(self) -> Int:
return self.curr
@always_inline("nodebug")
fn __getitem__(self, idx: Int) -> Int:
return index(idx)
@always_inline("nodebug")
fn __reversed__(self) -> _StridedRange:
return range(self.end - 1, -1, -1)
@value
@register_passable("trivial")
struct _SequentialRange(Sized, ReversibleRange, _IntIterable):
var start: Int
var end: Int
@always_inline("nodebug")
fn __iter__(self) -> Self:
return self
@always_inline
fn __next__(inout self) -> Int:
var start = self.start
self.start += 1
return start
@always_inline("nodebug")
fn __len__(self) -> Int:
# FIXME(#38392):
# return max(0, self.end - self.start)
return self.end - self.start if self.start < self.end else 0
@always_inline("nodebug")
fn __getitem__(self, idx: Int) -> Int:
return self.start + index(idx)
@always_inline("nodebug")
fn __reversed__(self) -> _StridedRange:
return range(self.end - 1, self.start - 1, -1)
@value
@register_passable("trivial")
struct _StridedRangeIterator(Sized):
var start: Int
var end: Int
var step: Int
@always_inline
fn __len__(self) -> Int:
if self.step > 0 and self.start < self.end:
return self.end - self.start
elif self.step < 0 and self.start > self.end:
return self.start - self.end
else:
return 0
@always_inline
fn __next__(inout self) -> Int:
var result = self.start
self.start += self.step
return result
@value
@register_passable("trivial")
struct _StridedRange(Sized, ReversibleRange, _StridedIterable):
var start: Int
var end: Int
var step: Int
@always_inline("nodebug")
fn __init__(inout self, end: Int):
self.start = 0
self.end = end
self.step = 1
@always_inline("nodebug")
fn __init__(inout self, start: Int, end: Int):
self.start = start
self.end = end
self.step = 1
@always_inline("nodebug")
fn __iter__(self) -> _StridedRangeIterator:
return _StridedRangeIterator(self.start, self.end, self.step)
@always_inline
fn __next__(inout self) -> Int:
var result = self.start
self.start += self.step
return result
@always_inline("nodebug")
fn __len__(self) -> Int:
# FIXME(#38392)
# if (self.step > 0) == (self.start > self.end):
# return 0
return _div_ceil_positive(abs(self.start - self.end), abs(self.step))
@always_inline("nodebug")
fn __getitem__(self, idx: Int) -> Int:
return self.start + index(idx) * self.step
@always_inline("nodebug")
fn __reversed__(self) -> _StridedRange:
var shifted_end = self.end - _sign(self.step)
var start = shifted_end - ((shifted_end - self.start) % self.step)
var end = self.start - self.step
var step = -self.step
return range(start, end, step)
@always_inline("nodebug")
fn range[type: Intable](end: type) -> _ZeroStartingRange:
"""Constructs a [0; end) Range.
Parameters:
type: The type of the end value.
Args:
end: The end of the range.
Returns:
The constructed range.
"""
return _ZeroStartingRange(int(end))
@always_inline
fn range[type: IntableRaising](end: type) raises -> _ZeroStartingRange:
"""Constructs a [0; end) Range.
Parameters:
type: The type of the end value.
Args:
end: The end of the range.
Returns:
The constructed range.
"""
return _ZeroStartingRange(int(end))
@always_inline("nodebug")
fn range[t0: Intable, t1: Intable](start: t0, end: t1) -> _SequentialRange:
"""Constructs a [start; end) Range.
Parameters:
t0: The type of the start value.
t1: The type of the end value.
Args:
start: The start of the range.
end: The end of the range.
Returns:
The constructed range.
"""
return _SequentialRange(int(start), int(end))
@always_inline("nodebug")
fn range[
t0: IntableRaising, t1: IntableRaising
](start: t0, end: t1) raises -> _SequentialRange:
"""Constructs a [start; end) Range.
Parameters:
t0: The type of the start value.
t1: The type of the end value.
Args:
start: The start of the range.
end: The end of the range.
Returns:
The constructed range.
"""
return _SequentialRange(int(start), int(end))
@always_inline
fn range[
t0: Intable, t1: Intable, t2: Intable
](start: t0, end: t1, step: t2) -> _StridedRange:
"""Constructs a [start; end) Range with a given step.
Parameters:
t0: The type of the start value.
t1: The type of the end value.
t2: The type of the step value.
Args:
start: The start of the range.
end: The end of the range.
step: The step for the range.
Returns:
The constructed range.
"""
return _StridedRange(int(start), int(end), int(step))
@always_inline
fn range[
t0: IntableRaising, t1: IntableRaising, t2: IntableRaising
](start: t0, end: t1, step: t2) raises -> _StridedRange:
"""Constructs a [start; end) Range with a given step.
Parameters:
t0: The type of the start value.
t1: The type of the end value.
t2: The type of the step value.
Args:
start: The start of the range.
end: The end of the range.
step: The step for the range.
Returns:
The constructed range.
"""
return _StridedRange(int(start), int(end), int(step))
| mojo/stdlib/src/builtin/range.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements type rebind.
These are Mojo built-ins, so you don't need to import them.
"""
@always_inline("nodebug")
fn rebind[
dest_type: AnyTrivialRegType,
src_type: AnyTrivialRegType,
](val: src_type) -> dest_type:
"""Statically assert that a parameter input type `src_type` resolves to the
same type as a parameter result type `dest_type` after function
instantiation and "rebind" the input to the result type.
This function is meant to be used in uncommon cases where a parametric type
depends on the value of a constrained parameter in order to manually refine
the type with the constrained parameter value.
Parameters:
dest_type: The type to rebind to.
src_type: The original type.
Args:
val: The value to rebind.
Returns:
The rebound value of `dest_type`.
"""
return __mlir_op.`kgen.rebind`[_type=dest_type](val)
| mojo/stdlib/src/builtin/rebind.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Provide the `repr` function.
The functions and traits provided here are built-ins, so you don't need to import them.
"""
trait Representable:
"""A trait that describes a type that has a String representation.
Any type that conforms to the `Representable` trait can be used with the
`repr` function. Any conforming type must also implement the `__repr__` method.
Here is an example:
```mojo
@value
struct Dog(Representable):
var name: String
var age: Int
fn __repr__(self) -> String:
return "Dog(name=" + repr(self.name) + ", age=" + repr(self.age) + ")"
var dog = Dog("Rex", 5)
print(repr(dog))
# Dog(name='Rex', age=5)
```
The method `__repr__` should compute the "official" string representation of a type.
If at all possible, this should look like a valid Mojo expression
that could be used to recreate a struct instance with the same
value (given an appropriate environment).
So a returned String of the form `module_name.SomeStruct(arg1=value1, arg2=value2)` is advised.
If this is not possible, a string of the form `<...some useful description...>`
should be returned.
The return value must be a `String` instance.
This is typically used for debugging, so it is important that the representation is information-rich and unambiguous.
Note that when computing the string representation of a collection (`Dict`, `List`, `Set`, etc...),
the `repr` function is called on each element, not the `str()` function.
"""
fn __repr__(self) -> String:
"""Get the string representation of the type instance, if possible, compatible with Mojo syntax.
Returns:
The string representation of the instance.
"""
pass
fn repr[T: Representable](value: T) -> String:
"""Returns the string representation of the given value.
Args:
value: The value to get the string representation of.
Parameters:
T: The type of `value`. Must implement the `Representable` trait.
Returns:
The string representation of the given value.
"""
return value.__repr__()
fn repr(value: None) -> String:
"""Returns the string representation of `None`.
Args:
value: A `None` value.
Returns:
The string representation of `None`.
"""
return "None"
| mojo/stdlib/src/builtin/repr.mojo | false |
<filename>mojo/stdlib/src/builtin/reversed.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Provides the `reversed` function for reverse iteration over collections.
These are Mojo built-ins, so you don't need to import them.
"""
from .range import _StridedRange
from collections.list import _ListIter
from collections.dict import _DictKeyIter, _DictValueIter, _DictEntryIter
# ===----------------------------------------------------------------------=== #
# Reversible
# ===----------------------------------------------------------------------=== #
trait ReversibleRange:
"""
The `ReversibleRange` trait describes a range that can be reversed.
Any type that conforms to `ReversibleRange` works with the builtin
[`reversed()`](/mojo/stdlib/builtin/reversed.html) functions.
The `ReversibleRange` trait requires the type to define the `__reversed__()`
method.
**Note**: iterators are currently non-raising.
"""
# TODO: general `Reversible` trait that returns an iterator.
# iterators currently check __len__() instead of raising an exception
# so there is no ReversibleRaising trait yet.
fn __reversed__(self) -> _StridedRange:
"""Get a reversed iterator for the type.
**Note**: iterators are currently non-raising.
Returns:
The reversed iterator of the type.
"""
...
# ===----------------------------------------------------------------------=== #
# reversed
# ===----------------------------------------------------------------------=== #
fn reversed[T: ReversibleRange](value: T) -> _StridedRange:
"""Get a reversed iterator of the input range.
**Note**: iterators are currently non-raising.
Parameters:
T: The type conforming to ReversibleRange.
Args:
value: The range to get the reversed iterator of.
Returns:
The reversed iterator of the range.
"""
return value.__reversed__()
fn reversed[
T: CollectionElement
](
value: Reference[List[T], _, _],
) -> _ListIter[
T, value.is_mutable, value.lifetime, False
]:
"""Get a reversed iterator of the input list.
**Note**: iterators are currently non-raising.
Parameters:
T: The type of the elements in the list.
Args:
value: The list to get the reversed iterator of.
Returns:
The reversed iterator of the list.
"""
return value[].__reversed__()
fn reversed[
K: KeyElement,
V: CollectionElement,
](
value: Reference[Dict[K, V], _, _],
) -> _DictKeyIter[
K, V, value.is_mutable, value.lifetime, False
]:
"""Get a reversed iterator of the input dict.
**Note**: iterators are currently non-raising.
Parameters:
K: The type of the keys in the dict.
V: The type of the values in the dict.
Args:
value: The dict to get the reversed iterator of.
Returns:
The reversed iterator of the dict keys.
"""
return value[].__reversed__()
fn reversed[
mutability: Bool,
self_life: AnyLifetime[mutability].type,
K: KeyElement,
V: CollectionElement,
dict_mutability: Bool,
dict_lifetime: AnyLifetime[dict_mutability].type,
](
value: Reference[
_DictValueIter[K, V, dict_mutability, dict_lifetime],
mutability,
self_life,
]._mlir_type,
) -> _DictValueIter[K, V, dict_mutability, dict_lifetime, False]:
"""Get a reversed iterator of the input dict values.
**Note**: iterators are currently non-raising.
Parameters:
mutability: Whether the reference to the dict is mutable.
self_life: The lifetime of the dict.
K: The type of the keys in the dict.
V: The type of the values in the dict.
dict_mutability: Whether the reference to the dict values is mutable.
dict_lifetime: The lifetime of the dict values.
Args:
value: The dict values to get the reversed iterator of.
Returns:
The reversed iterator of the dict values.
"""
return Reference(value)[].__reversed__[mutability, self_life]()
fn reversed[
mutability: Bool,
self_life: AnyLifetime[mutability].type,
K: KeyElement,
V: CollectionElement,
dict_mutability: Bool,
dict_lifetime: AnyLifetime[dict_mutability].type,
](
value: Reference[
_DictEntryIter[K, V, dict_mutability, dict_lifetime],
mutability,
self_life,
]._mlir_type,
) -> _DictEntryIter[K, V, dict_mutability, dict_lifetime, False]:
"""Get a reversed iterator of the input dict items.
**Note**: iterators are currently non-raising.
Parameters:
mutability: Whether the reference to the dict is mutable.
self_life: The lifetime of the dict.
K: The type of the keys in the dict.
V: The type of the values in the dict.
dict_mutability: Whether the reference to the dict items is mutable.
dict_lifetime: The lifetime of the dict items.
Args:
value: The dict items to get the reversed iterator of.
Returns:
The reversed iterator of the dict items.
"""
var src = Reference(value)[].src
return _DictEntryIter[K, V, dict_mutability, dict_lifetime, False](
src[]._reserved - 1, 0, src
)
| mojo/stdlib/src/builtin/reversed.mojo | false |
<filename>mojo/stdlib/src/builtin/simd.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements SIMD struct.
These are Mojo built-ins, so you don't need to import them.
"""
from bit import pop_count
from sys import (
llvm_intrinsic,
has_neon,
is_x86,
triple_is_nvidia_cuda,
simdwidthof,
_RegisterPackType,
)
from builtin._math import Ceilable, CeilDivable, Floorable, Truncable
from builtin.hash import _hash_simd
from memory import bitcast
from utils.numerics import (
FPUtils,
isnan as _isnan,
nan as _nan,
max_finite as _max_finite,
min_finite as _min_finite,
max_or_inf as _max_or_inf,
min_or_neg_inf as _min_or_neg_inf,
)
from utils._visualizers import lldb_formatter_wrapping_type
from utils import InlineArray, StringSlice
from .dtype import (
_integral_type_of,
_get_dtype_printf_format,
_scientific_notation_digits,
)
from .io import _snprintf_scalar, _printf, _print_fmt
from .string import _calc_initial_buffer_size, _calc_format_buffer_size
# ===----------------------------------------------------------------------=== #
# Type Aliases
# ===----------------------------------------------------------------------=== #
alias Scalar = SIMD[size=1]
"""Represents a scalar dtype."""
alias Int8 = Scalar[DType.int8]
"""Represents an 8-bit signed scalar integer."""
alias UInt8 = Scalar[DType.uint8]
"""Represents an 8-bit unsigned scalar integer."""
alias Int16 = Scalar[DType.int16]
"""Represents a 16-bit signed scalar integer."""
alias UInt16 = Scalar[DType.uint16]
"""Represents a 16-bit unsigned scalar integer."""
alias Int32 = Scalar[DType.int32]
"""Represents a 32-bit signed scalar integer."""
alias UInt32 = Scalar[DType.uint32]
"""Represents a 32-bit unsigned scalar integer."""
alias Int64 = Scalar[DType.int64]
"""Represents a 64-bit signed scalar integer."""
alias UInt64 = Scalar[DType.uint64]
"""Represents a 64-bit unsigned scalar integer."""
alias BFloat16 = Scalar[DType.bfloat16]
"""Represents a 16-bit brain floating point value."""
alias Float16 = Scalar[DType.float16]
"""Represents a 16-bit floating point value."""
alias Float32 = Scalar[DType.float32]
"""Represents a 32-bit floating point value."""
alias Float64 = Scalar[DType.float64]
"""Represents a 64-bit floating point value."""
# ===----------------------------------------------------------------------=== #
# Utilities
# ===----------------------------------------------------------------------=== #
@always_inline("nodebug")
fn _simd_construction_checks[type: DType, size: Int]():
"""Checks if the SIMD size is valid.
The SIMD size is valid if it is a power of two and is positive.
Parameters:
type: The data type of SIMD vector elements.
size: The number of elements in the SIMD vector.
"""
constrained[type != DType.invalid, "simd type cannot be DType.invalid"]()
constrained[size > 0, "simd width must be > 0"]()
constrained[size & (size - 1) == 0, "simd width must be power of 2"]()
constrained[
type != DType.bfloat16 or not has_neon(),
"bf16 is not supported for ARM architectures",
]()
@always_inline("nodebug")
fn _unchecked_zero[type: DType, size: Int]() -> SIMD[type, size]:
var zero = __mlir_op.`pop.cast`[
_type = __mlir_type[`!pop.scalar<`, type.value, `>`]
](
__mlir_op.`kgen.param.constant`[
_type = __mlir_type[`!pop.scalar<index>`],
value = __mlir_attr[`#pop.simd<0> : !pop.scalar<index>`],
]()
)
return SIMD[type, size] {
value: __mlir_op.`pop.simd.splat`[
_type = __mlir_type[`!pop.simd<`, size.value, `, `, type.value, `>`]
](zero)
}
# ===----------------------------------------------------------------------=== #
# SIMD
# ===----------------------------------------------------------------------=== #
@lldb_formatter_wrapping_type
@register_passable("trivial")
struct SIMD[type: DType, size: Int = simdwidthof[type]()](
Absable,
Boolable,
Ceilable,
CeilDivable,
CollectionElement,
Floorable,
Hashable,
Intable,
Powable,
Roundable,
Sized,
Stringable,
Truncable,
Representable,
):
"""Represents a small vector that is backed by a hardware vector element.
SIMD allows a single instruction to be executed across the multiple data
elements of the vector.
Constraints:
The size of the SIMD vector to be positive and a power of 2.
Parameters:
type: The data type of SIMD vector elements.
size: The size of the SIMD vector.
"""
alias _Mask = SIMD[DType.bool, size]
alias element_type = type
var value: __mlir_type[`!pop.simd<`, size.value, `, `, type.value, `>`]
"""The underlying storage for the vector."""
alias MAX = Self(_max_or_inf[type]())
"""Gets the maximum value for the SIMD value, potentially +inf."""
alias MIN = Self(_min_or_neg_inf[type]())
"""Gets the minimum value for the SIMD value, potentially -inf."""
alias MAX_FINITE = Self(_max_finite[type]())
"""Returns the maximum finite value of SIMD value."""
alias MIN_FINITE = Self(_min_finite[type]())
"""Returns the minimum (lowest) finite value of SIMD value."""
@always_inline("nodebug")
fn __init__(inout self):
"""Default initializer of the SIMD vector.
By default the SIMD vectors are initialized to all zeros.
"""
_simd_construction_checks[type, size]()
self = _unchecked_zero[type, size]()
@always_inline("nodebug")
fn __init__(inout self, value: SIMD[DType.float64, 1]):
"""Initializes the SIMD vector with a float.
The value is splatted across all the elements of the SIMD
vector.
Args:
value: The input value.
"""
_simd_construction_checks[type, size]()
var casted = __mlir_op.`pop.cast`[
_type = __mlir_type[`!pop.simd<1,`, type.value, `>`]
](value.value)
var vec = __mlir_op.`pop.simd.splat`[
_type = __mlir_type[`!pop.simd<`, size.value, `, `, type.value, `>`]
](casted)
self.value = vec
@always_inline("nodebug")
fn __init__(inout self, value: Int):
"""Initializes the SIMD vector with an integer.
The integer value is splatted across all the elements of the SIMD
vector.
Args:
value: The input value.
"""
_simd_construction_checks[type, size]()
var t0 = __mlir_op.`pop.cast_from_builtin`[
_type = __mlir_type.`!pop.scalar<index>`
](value.value)
var casted = __mlir_op.`pop.cast`[
_type = __mlir_type[`!pop.simd<1,`, type.value, `>`]
](t0)
self.value = __mlir_op.`pop.simd.splat`[
_type = __mlir_type[`!pop.simd<`, size.value, `, `, type.value, `>`]
](casted)
@always_inline("nodebug")
fn __init__(inout self, value: IntLiteral):
"""Initializes the SIMD vector with an integer.
The integer value is splatted across all the elements of the SIMD
vector.
Args:
value: The input value.
"""
_simd_construction_checks[type, size]()
var tn1 = __mlir_op.`kgen.int_literal.convert`[
_type = __mlir_type.si128
](value.value)
var t0 = __mlir_op.`pop.cast_from_builtin`[
_type = __mlir_type.`!pop.scalar<si128>`
](tn1)
var casted = __mlir_op.`pop.cast`[
_type = __mlir_type[`!pop.simd<1,`, type.value, `>`]
](t0)
self.value = __mlir_op.`pop.simd.splat`[
_type = __mlir_type[`!pop.simd<`, size.value, `, `, type.value, `>`]
](casted)
@always_inline("nodebug")
fn __init__(inout self, value: Bool):
"""Initializes the SIMD vector with a bool value.
The bool value is splatted across all elements of the SIMD vector.
Args:
value: The bool value.
"""
_simd_construction_checks[type, size]()
var casted = __mlir_op.`pop.cast`[
_type = __mlir_type[`!pop.simd<1,`, type.value, `>`]
](value._as_scalar_bool())
self.value = __mlir_op.`pop.simd.splat`[
_type = __mlir_type[`!pop.simd<`, size.value, `, `, type.value, `>`]
](casted)
@always_inline("nodebug")
fn __init__(
inout self,
value: __mlir_type[`!pop.simd<`, size.value, `, `, type.value, `>`],
):
"""Initializes the SIMD vector with the underlying mlir value.
Args:
value: The input value.
"""
_simd_construction_checks[type, size]()
self.value = value
# Construct via a variadic type which has the same number of elements as
# the SIMD value.
@always_inline("nodebug")
fn __init__(inout self, *elems: Scalar[type]):
"""Constructs a SIMD vector via a variadic list of elements.
If there is just one input value, then it is splatted to all elements
of the SIMD vector. Otherwise, the input values are assigned to the
corresponding elements of the SIMD vector.
Constraints:
The number of input values is 1 or equal to size of the SIMD
vector.
Args:
elems: The variadic list of elements from which the SIMD vector is
constructed.
"""
_simd_construction_checks[type, size]()
var num_elements: Int = len(elems)
if num_elements == 1:
# Construct by broadcasting a scalar.
self.value = __mlir_op.`pop.simd.splat`[
_type = __mlir_type[
`!pop.simd<`,
size.value,
`, `,
type.value,
`>`,
]
](elems[0].value)
return
# TODO: Make this a compile-time check when possible.
debug_assert(
size == num_elements,
(
"mismatch in the number of elements in the SIMD variadic"
" constructor"
),
)
self = Self()
@parameter
for i in range(size):
self[i] = elems[i]
@always_inline("nodebug")
fn __init__(inout self, value: FloatLiteral):
"""Initializes the SIMD vector with a float.
The value is splatted across all the elements of the SIMD
vector.
Args:
value: The input value.
"""
_simd_construction_checks[type, size]()
# TODO (#36686): This introduces uneeded casts here to work around
# parameter if issues.
@parameter
if type == DType.float16:
self = SIMD[type, size](
__mlir_op.`pop.simd.splat`[
_type = __mlir_type[
`!pop.simd<`, size.value, `,`, type.value, `>`
]
](
__mlir_op.`pop.cast`[
_type = __mlir_type[`!pop.scalar<`, type.value, `>`]
](
__mlir_op.`pop.cast_from_builtin`[
_type = __mlir_type[`!pop.scalar<f16>`]
](
__mlir_op.`kgen.float_literal.convert`[
_type = __mlir_type.f16
](value.value)
)
)
)
)
elif type == DType.bfloat16:
self = Self(
__mlir_op.`pop.simd.splat`[
_type = __mlir_type[
`!pop.simd<`, size.value, `,`, type.value, `>`
]
](
__mlir_op.`pop.cast`[
_type = __mlir_type[`!pop.scalar<`, type.value, `>`]
](
__mlir_op.`pop.cast_from_builtin`[
_type = __mlir_type[`!pop.scalar<bf16>`]
](
__mlir_op.`kgen.float_literal.convert`[
_type = __mlir_type.bf16
](value.value)
)
)
)
)
elif type == DType.float32:
self = Self(
__mlir_op.`pop.simd.splat`[
_type = __mlir_type[
`!pop.simd<`, size.value, `,`, type.value, `>`
]
](
__mlir_op.`pop.cast`[
_type = __mlir_type[`!pop.scalar<`, type.value, `>`]
](
__mlir_op.`pop.cast_from_builtin`[
_type = __mlir_type[`!pop.scalar<f32>`]
](
__mlir_op.`kgen.float_literal.convert`[
_type = __mlir_type.f32
](value.value)
)
)
)
)
else:
self = Self(
__mlir_op.`pop.simd.splat`[
_type = __mlir_type[
`!pop.simd<`, size.value, `,`, type.value, `>`
]
](
__mlir_op.`pop.cast`[
_type = __mlir_type[`!pop.scalar<`, type.value, `>`]
](
__mlir_op.`pop.cast_from_builtin`[
_type = __mlir_type[`!pop.scalar<f64>`]
](
__mlir_op.`kgen.float_literal.convert`[
_type = __mlir_type.f64
](value.value)
)
)
)
)
@always_inline("nodebug")
fn __len__(self) -> Int:
"""Gets the length of the SIMD vector.
Returns:
The length of the SIMD vector.
"""
return self.size
@always_inline("nodebug")
fn __bool__(self) -> Bool:
"""Converts the SIMD scalar into a boolean value.
Constraints:
The size of the SIMD vector must be 1.
Returns:
True if the SIMD scalar is non-zero and False otherwise.
"""
constrained[
size == 1,
(
"The truth value of a SIMD vector with more than one element is"
" ambiguous. Use the builtin `any()` or `all()` functions"
" instead."
),
]()
return rebind[Scalar[DType.bool]](self.cast[DType.bool]()).value
@staticmethod
@always_inline("nodebug")
fn splat(x: Scalar[type]) -> Self:
"""Splats (broadcasts) the element onto the vector.
Args:
x: The input scalar value.
Returns:
A new SIMD vector whose elements are the same as the input value.
"""
_simd_construction_checks[type, size]()
return Self {
value: __mlir_op.`pop.simd.splat`[
_type = __mlir_type[
`!pop.simd<`, size.value, `, `, type.value, `>`
]
](x.value)
}
@always_inline("nodebug")
fn cast[target: DType](self) -> SIMD[target, size]:
"""Casts the elements of the SIMD vector to the target element type.
Parameters:
target: The target DType.
Returns:
A new SIMD vector whose elements have been casted to the target
element type.
"""
@parameter
if type == target:
return rebind[SIMD[target, size]](self)
@parameter
if has_neon() and (type == DType.bfloat16 or target == DType.bfloat16):
# BF16 support on neon systems is not supported.
return _unchecked_zero[target, size]()
@parameter
if type == DType.bool:
return self.select(SIMD[target, size](1), SIMD[target, size](0))
elif target == DType.bool:
return rebind[SIMD[target, size]](self != 0)
elif type == DType.bfloat16:
var cast_result = _bfloat16_to_f32(
rebind[SIMD[DType.bfloat16, size]](self)
).cast[target]()
return rebind[SIMD[target, size]](cast_result)
elif target == DType.bfloat16:
return rebind[SIMD[target, size]](
_f32_to_bfloat16(self.cast[DType.float32]())
)
elif target == DType.address:
var index_val = __mlir_op.`pop.cast`[
_type = __mlir_type[`!pop.simd<`, size.value, `, index>`]
](self.value)
var tmp = SIMD[DType.address, size](
__mlir_op.`pop.index_to_pointer`[
_type = __mlir_type[
`!pop.simd<`,
size.value,
`, address >`,
]
](index_val)
)
return rebind[SIMD[target, size]](tmp)
elif (type == DType.address) and target.is_integral():
var index_tmp = SIMD[DType.index, size](
__mlir_op.`pop.pointer_to_index`[
_type = __mlir_type[
`!pop.simd<`,
size.value,
`, `,
DType.index.value,
`>`,
]
](
rebind[
__mlir_type[
`!pop.simd<`,
size.value,
`, address >`,
]
](self.value)
)
)
return index_tmp.cast[target]()
else:
return __mlir_op.`pop.cast`[
_type = __mlir_type[
`!pop.simd<`,
size.value,
`, `,
target.value,
`>`,
]
](self.value)
@always_inline("nodebug")
fn __int__(self) -> Int:
"""Casts to the value to an Int. If there is a fractional component,
then the fractional part is truncated.
Constraints:
The size of the SIMD vector must be 1.
Returns:
The value as an integer.
"""
constrained[size == 1, "expected a scalar type"]()
return __mlir_op.`pop.cast`[_type = __mlir_type.`!pop.scalar<index>`](
rebind[Scalar[type]](self).value
)
@always_inline
fn __str__(self) -> String:
"""Get the SIMD as a string.
Returns:
A string representation.
"""
return String.format_sequence(self)
@always_inline
fn __repr__(self) -> String:
"""Get the representation of the SIMD value e.g. "SIMD[DType.int8, 2](1, 2)".
Returns:
The representation of the SIMD value.
"""
var output = String()
var writer = output._unsafe_to_formatter()
self.format_to[use_scientific_notation=True](writer)
var values = output.as_string_slice()
@parameter
if size > 1:
# TODO: Fix when slice indexing is implemented on StringSlice
values = StringSlice(unsafe_from_utf8=output.as_bytes_slice()[1:-1])
return (
"SIMD[" + type.__repr__() + ", " + str(size) + "](" + values + ")"
)
@always_inline
fn format_to(self, inout writer: Formatter):
"""
Formats this SIMD value to the provided formatter.
Args:
writer: The formatter to write to.
"""
self.format_to[use_scientific_notation=False](writer)
# This overload is required to keep SIMD compliant with the Formattable
# trait, and the call to `String.format_sequence(self)` in SIMD.__str__ will
# fail to compile.
fn format_to[use_scientific_notation: Bool](self, inout writer: Formatter):
"""
Formats this SIMD value to the provided formatter.
Parameters:
use_scientific_notation: Whether floats should use scientific notation.
This parameter does not apply to integer types.
Args:
writer: The formatter to write to.
"""
# Print an opening `[`.
@parameter
if size > 1:
writer.write_str("[")
# Print each element.
for i in range(size):
var element = self[i]
# Print separators between each element.
if i != 0:
writer.write_str(", ")
@parameter
if triple_is_nvidia_cuda():
@parameter
if type.is_floating_point():
# get_dtype_printf_format hardcodes 17 digits of precision.
_printf["%g"](element)
else:
# FIXME(MSTDL-406):
# This prints "out of band" with the `Formatter` passed
# in, meaning this will only work if `Formatter` is an
# unbuffered wrapper around printf (which Formatter.stdout
# currently is by default).
#
# This is a workaround to permit debug formatting of
# floating-point values on GPU, where printing to stdout
# is the only way the Formatter framework is currently
# used.
_printf[_get_dtype_printf_format[type]()](element)
else:
@parameter
if use_scientific_notation and type.is_floating_point():
alias float_format = "%." + _scientific_notation_digits[
type
]() + "e"
_format_scalar[type, float_format](writer, element)
else:
_format_scalar(writer, element)
# Print a closing `]`.
@parameter
if size > 1:
writer.write_str("]")
@always_inline("nodebug")
fn __add__(self, rhs: Self) -> Self:
"""Computes `self + rhs`.
Args:
rhs: The rhs value.
Returns:
A new vector whose element at position `i` is computed as
`self[i] + rhs[i]`.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
return __mlir_op.`pop.add`(self.value, rhs.value)
@always_inline("nodebug")
fn __sub__(self, rhs: Self) -> Self:
"""Computes `self - rhs`.
Args:
rhs: The rhs value.
Returns:
A new vector whose element at position `i` is computed as
`self[i] - rhs[i]`.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
return __mlir_op.`pop.sub`(self.value, rhs.value)
@always_inline("nodebug")
fn __mul__(self, rhs: Self) -> Self:
"""Computes `self * rhs`.
Args:
rhs: The rhs value.
Returns:
A new vector whose element at position `i` is computed as
`self[i] * rhs[i]`.
"""
@parameter
if type == DType.bool:
return (rebind[Self._Mask](self) & rebind[Self._Mask](rhs)).cast[
type
]()
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
return __mlir_op.`pop.mul`(self.value, rhs.value)
@always_inline("nodebug")
fn __truediv__(self, rhs: Self) -> Self:
"""Computes `self / rhs`.
Args:
rhs: The rhs value.
Returns:
A new vector whose element at position `i` is computed as
`self[i] / rhs[i]`.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
return __mlir_op.`pop.div`(self.value, rhs.value)
@always_inline("nodebug")
fn __floordiv__(self, rhs: Self) -> Self:
"""Returns the division of self and rhs rounded down to the nearest
integer.
Constraints:
The element type of the SIMD vector must be numeric.
Args:
rhs: The value to divide with.
Returns:
`floor(self / rhs)` value.
"""
constrained[type.is_numeric(), "the type must be numeric"]()
if not any(rhs):
# this should raise an exception.
return 0
var div = self / rhs
@parameter
if type.is_floating_point():
return div.__floor__()
elif type.is_unsigned():
return div
else:
if all((self > 0) & (rhs > 0)):
return div
var mod = self - div * rhs
var mask = ((rhs < 0) ^ (self < 0)) & (mod != 0)
return div - mask.cast[type]()
@always_inline("nodebug")
fn __rfloordiv__(self, rhs: Self) -> Self:
"""Returns the division of rhs and self rounded down to the nearest
integer.
Constraints:
The element type of the SIMD vector must be numeric.
Args:
rhs: The value to divide by self.
Returns:
`floor(rhs / self)` value.
"""
constrained[type.is_numeric(), "the type must be numeric"]()
return rhs // self
@always_inline("nodebug")
fn __mod__(self, rhs: Self) -> Self:
"""Returns the remainder of self divided by rhs.
Args:
rhs: The value to divide on.
Returns:
The remainder of dividing self by rhs.
"""
constrained[type.is_numeric(), "the type must be numeric"]()
if not any(rhs):
# this should raise an exception.
return 0
@parameter
if type.is_unsigned():
return __mlir_op.`pop.rem`(self.value, rhs.value)
else:
var div = self / rhs
@parameter
if type.is_floating_point():
div = llvm_intrinsic["llvm.trunc", Self, has_side_effect=False](
div
)
var mod = self - div * rhs
var mask = ((rhs < 0) ^ (self < 0)) & (mod != 0)
return mod + mask.select(rhs, Self(0))
@always_inline("nodebug")
fn __rmod__(self, value: Self) -> Self:
"""Returns `value mod self`.
Args:
value: The other value.
Returns:
`value mod self`.
"""
constrained[type.is_numeric(), "the type must be numeric"]()
return value % self
@always_inline("nodebug")
fn __pow__(self, exp: Int) -> Self:
"""Computes the vector raised to the power of the input integer value.
Args:
exp: The exponent value.
Returns:
A SIMD vector where each element is raised to the power of the
specified exponent value.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
return _pow[type, size, DType.index](self, exp)
# TODO(#22771): remove this overload.
@always_inline("nodebug")
fn __pow__(self, exp: Self) -> Self:
"""Computes the vector raised elementwise to the right hand side power.
Args:
exp: The exponent value.
Returns:
A SIMD vector where each element is raised to the power of the
specified exponent value.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
return _pow(self, exp)
@always_inline("nodebug")
fn __lt__(self, rhs: Self) -> Self._Mask:
"""Compares two SIMD vectors using less-than comparison.
Args:
rhs: The rhs of the operation.
Returns:
A new bool SIMD vector of the same size whose element at position
`i` is True or False depending on the expression
`self[i] < rhs[i]`.
"""
return __mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred lt>`](
self.value, rhs.value
)
@always_inline("nodebug")
fn __le__(self, rhs: Self) -> Self._Mask:
"""Compares two SIMD vectors using less-than-or-equal comparison.
Args:
rhs: The rhs of the operation.
Returns:
A new bool SIMD vector of the same size whose element at position
`i` is True or False depending on the expression
`self[i] <= rhs[i]`.
"""
return __mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred le>`](
self.value, rhs.value
)
@always_inline("nodebug")
fn __eq__(self, rhs: Self) -> Self._Mask:
"""Compares two SIMD vectors using equal-to comparison.
Args:
rhs: The rhs of the operation.
Returns:
A new bool SIMD vector of the same size whose element at position
`i` is True or False depending on the expression
`self[i] == rhs[i]`.
"""
@parameter # Because of #30525, we roll our own implementation for eq.
if has_neon() and type == DType.bfloat16:
var int_self = bitcast[_integral_type_of[type](), size](self)
var int_rhs = bitcast[_integral_type_of[type](), size](rhs)
return int_self == int_rhs
return __mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred eq>`](
self.value, rhs.value
)
@always_inline("nodebug")
fn __ne__(self, rhs: Self) -> Self._Mask:
"""Compares two SIMD vectors using not-equal comparison.
Args:
rhs: The rhs of the operation.
Returns:
A new bool SIMD vector of the same size whose element at position
`i` is True or False depending on the expression
`self[i] != rhs[i]`.
"""
@parameter # Because of #30525, we roll our own implementation for ne.
if has_neon() and type == DType.bfloat16:
var int_self = bitcast[_integral_type_of[type](), size](self)
var int_rhs = bitcast[_integral_type_of[type](), size](rhs)
return int_self != int_rhs
return __mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred ne>`](
self.value, rhs.value
)
@always_inline("nodebug")
fn __gt__(self, rhs: Self) -> Self._Mask:
"""Compares two SIMD vectors using greater-than comparison.
Args:
rhs: The rhs of the operation.
Returns:
A new bool SIMD vector of the same size whose element at position
`i` is True or False depending on the expression
`self[i] > rhs[i]`.
"""
return __mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred gt>`](
self.value, rhs.value
)
@always_inline("nodebug")
fn __ge__(self, rhs: Self) -> Self._Mask:
"""Compares two SIMD vectors using greater-than-or-equal comparison.
Args:
rhs: The rhs of the operation.
Returns:
A new bool SIMD vector of the same size whose element at position
`i` is True or False depending on the expression
`self[i] >= rhs[i]`.
"""
return __mlir_op.`pop.cmp`[pred = __mlir_attr.`#pop<cmp_pred ge>`](
self.value, rhs.value
)
# ===------------------------------------------------------------------=== #
# Unary operations.
# ===------------------------------------------------------------------=== #
@always_inline("nodebug")
fn __pos__(self) -> Self:
"""Defines the unary `+` operation.
Returns:
This SIMD vector.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
return self
@always_inline("nodebug")
fn __neg__(self) -> Self:
"""Defines the unary `-` operation.
Returns:
The negation of this SIMD vector.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
return __mlir_op.`pop.neg`(self.value)
@always_inline
fn _bits_to_float[dest_type: DType](self) -> SIMD[dest_type, size]:
"""Bitcasts the integer value to a floating-point value.
Parameters:
dest_type: DType to bitcast the input SIMD vector to.
Returns:
A floating-point representation of the integer value.
"""
alias integral_type = FPUtils[type].integral_type
return bitcast[dest_type, size](self.cast[integral_type]())
@always_inline
fn _float_to_bits[dest_type: DType](self) -> SIMD[dest_type, size]:
"""Bitcasts the floating-point value to an integer value.
Parameters:
dest_type: DType to bitcast the input SIMD vector to.
Returns:
An integer representation of the floating-point value.
"""
alias integral_type = FPUtils[type].integral_type
var v = bitcast[integral_type, size](self)
return v.cast[dest_type]()
@always_inline
fn __abs__(self) -> Self:
"""Defines the absolute value operation.
Returns:
The absolute value of this SIMD vector.
"""
@parameter
if type.is_unsigned() or type.is_bool():
return self
elif type.is_floating_point():
alias integral_type = FPUtils[type].integral_type
var m = self._float_to_bits[integral_type]()
return (m & (FPUtils[type].sign_mask() - 1))._bits_to_float[type]()
else:
return (self < 0).select(-self, self)
fn _floor_ceil_trunc_impl[intrinsic: StringLiteral](self) -> Self:
constrained[
intrinsic == "llvm.floor"
or intrinsic == "llvm.ceil"
or intrinsic == "llvm.trunc",
"unsupported intrinsic",
]()
@parameter
if type.is_bool() or type.is_integral():
return self
@parameter
if has_neon() and type == DType.bfloat16:
return (
self.cast[DType.float32]()
._floor_ceil_trunc_impl[intrinsic]()
.cast[type]()
)
return llvm_intrinsic[intrinsic, Self, has_side_effect=False](self)
@always_inline("nodebug")
fn __floor__(self) -> Self:
"""Performs elementwise floor on the elements of a SIMD vector.
Returns:
The elementwise floor of this SIMD vector.
"""
return self._floor_ceil_trunc_impl["llvm.floor"]()
@always_inline("nodebug")
fn __ceil__(self) -> Self:
"""Performs elementwise ceiling on the elements of a SIMD vector.
Returns:
The elementwise ceiling of this SIMD vector.
"""
return self._floor_ceil_trunc_impl["llvm.ceil"]()
@always_inline("nodebug")
fn __trunc__(self) -> Self:
"""Performs elementwise truncation on the elements of a SIMD vector.
Returns:
The elementwise truncated values of this SIMD vector.
"""
return self._floor_ceil_trunc_impl["llvm.trunc"]()
fn clamp(self, lower_bound: Self, upper_bound: Self) -> Self:
"""Clamps the values in a SIMD vector to be in a certain range.
Clamp cuts values in the input SIMD vector off at the upper bound and
lower bound values. For example, SIMD vector `[0, 1, 2, 3]` clamped to
a lower bound of 1 and an upper bound of 2 would return `[1, 1, 2, 2]`.
Args:
lower_bound: Minimum of the range to clamp to.
upper_bound: Maximum of the range to clamp to.
Returns:
A new SIMD vector containing x clamped to be within lower_bound and
upper_bound.
"""
return self.min(upper_bound).max(lower_bound)
@always_inline("nodebug")
fn roundeven(self) -> Self:
"""Performs elementwise banker's rounding on the elements of a SIMD
vector.
This rounding goes to the nearest integer with ties toward the nearest
even integer.
Returns:
The elementwise banker's rounding of this SIMD vector.
"""
return llvm_intrinsic["llvm.roundeven", Self, has_side_effect=False](
self
)
@always_inline("nodebug")
fn __round__(self) -> Self:
"""Performs elementwise rounding on the elements of a SIMD vector.
This rounding goes to the nearest integer with ties away from zero.
Returns:
The elementwise rounded value of this SIMD vector.
"""
return llvm_intrinsic["llvm.round", Self, has_side_effect=False](self)
@always_inline("nodebug")
fn __round__(self, ndigits: Int) -> Self:
"""Performs elementwise rounding on the elements of a SIMD vector.
This rounding goes to the nearest integer with ties away from zero.
Args:
ndigits: The number of digits to round to.
Returns:
The elementwise rounded value of this SIMD vector.
"""
# TODO: see how can we implement this.
return llvm_intrinsic["llvm.round", Self, has_side_effect=False](self)
# ===------------------------------------------------------------------=== #
# In place operations.
# ===------------------------------------------------------------------=== #
@always_inline("nodebug")
fn __iadd__(inout self, rhs: Self):
"""Performs in-place addition.
The vector is mutated where each element at position `i` is computed as
`self[i] + rhs[i]`.
Args:
rhs: The rhs of the addition operation.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
self = self + rhs
@always_inline("nodebug")
fn __isub__(inout self, rhs: Self):
"""Performs in-place subtraction.
The vector is mutated where each element at position `i` is computed as
`self[i] - rhs[i]`.
Args:
rhs: The rhs of the operation.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
self = self - rhs
@always_inline("nodebug")
fn __imul__(inout self, rhs: Self):
"""Performs in-place multiplication.
The vector is mutated where each element at position `i` is computed as
`self[i] * rhs[i]`.
Args:
rhs: The rhs of the operation.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
self = self * rhs
@always_inline("nodebug")
fn __itruediv__(inout self, rhs: Self):
"""In-place true divide operator.
The vector is mutated where each element at position `i` is computed as
`self[i] / rhs[i]`.
Args:
rhs: The rhs of the operation.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
self = self / rhs
@always_inline("nodebug")
fn __ifloordiv__(inout self, rhs: Self):
"""In-place flood div operator.
The vector is mutated where each element at position `i` is computed as
`self[i] // rhs[i]`.
Args:
rhs: The rhs of the operation.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
self = self // rhs
@always_inline("nodebug")
fn __imod__(inout self, rhs: Self):
"""In-place mod operator.
The vector is mutated where each element at position `i` is computed as
`self[i] % rhs[i]`.
Args:
rhs: The rhs of the operation.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
self = self.__mod__(rhs)
@always_inline("nodebug")
fn __ipow__(inout self, rhs: Int):
"""In-place pow operator.
The vector is mutated where each element at position `i` is computed as
`pow(self[i], rhs)`.
Args:
rhs: The rhs of the operation.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
self = self.__pow__(rhs)
# ===------------------------------------------------------------------=== #
# Checked operations
# ===------------------------------------------------------------------=== #
@always_inline
fn add_with_overflow(self, rhs: Self) -> (Self, Self._Mask):
"""Computes `self + rhs` and a mask of which indices overflowed.
Args:
rhs: The rhs value.
Returns:
A tuple with the results of the operation and a mask for overflows.
The first is a new vector whose element at position `i` is computed
as `self[i] + rhs[i]`. The second item is a vector of booleans where
a `1` at position `i` represents `self[i] + rhs[i]` overflowed.
"""
constrained[type.is_integral()]()
@parameter
if type.is_signed():
var result = llvm_intrinsic[
"llvm.sadd.with.overflow",
_RegisterPackType[Self, Self._Mask],
Self,
Self,
](self, rhs)
return (result[0], result[1])
else:
var result = llvm_intrinsic[
"llvm.uadd.with.overflow",
_RegisterPackType[Self, Self._Mask],
Self,
Self,
](self, rhs)
return (result[0], result[1])
@always_inline
fn sub_with_overflow(self, rhs: Self) -> (Self, Self._Mask):
"""Computes `self - rhs` and a mask of which indices overflowed.
Args:
rhs: The rhs value.
Returns:
A tuple with the results of the operation and a mask for overflows.
The first is a new vector whose element at position `i` is computed
as `self[i] - rhs[i]`. The second item is a vector of booleans where
a `1` at position `i` represents `self[i] - rhs[i]` overflowed.
"""
constrained[type.is_integral()]()
@parameter
if type.is_signed():
var result = llvm_intrinsic[
"llvm.ssub.with.overflow",
_RegisterPackType[Self, Self._Mask],
Self,
Self,
](self, rhs)
return (result[0], result[1])
else:
var result = llvm_intrinsic[
"llvm.usub.with.overflow",
_RegisterPackType[Self, Self._Mask],
Self,
Self,
](self, rhs)
return (result[0], result[1])
@always_inline
fn mul_with_overflow(self, rhs: Self) -> (Self, Self._Mask):
"""Computes `self * rhs` and a mask of which indices overflowed.
Args:
rhs: The rhs value.
Returns:
A tuple with the results of the operation and a mask for overflows.
The first is a new vector whose element at position `i` is computed
as `self[i] * rhs[i]`. The second item is a vector of booleans where
a `1` at position `i` represents `self[i] * rhs[i]` overflowed.
"""
constrained[type.is_integral()]()
@parameter
if type.is_signed():
var result = llvm_intrinsic[
"llvm.smul.with.overflow",
_RegisterPackType[Self, Self._Mask],
Self,
Self,
](self, rhs)
return (result[0], result[1])
else:
var result = llvm_intrinsic[
"llvm.umul.with.overflow",
_RegisterPackType[Self, Self._Mask],
Self,
Self,
](self, rhs)
return (result[0], result[1])
# ===------------------------------------------------------------------=== #
# Reversed operations
# ===------------------------------------------------------------------=== #
@always_inline("nodebug")
fn __radd__(self, value: Self) -> Self:
"""Returns `value + self`.
Args:
value: The other value.
Returns:
`value + self`.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
return value + self
@always_inline("nodebug")
fn __rsub__(self, value: Self) -> Self:
"""Returns `value - self`.
Args:
value: The other value.
Returns:
`value - self`.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
return value - self
@always_inline("nodebug")
fn __rmul__(self, value: Self) -> Self:
"""Returns `value * self`.
Args:
value: The other value.
Returns:
`value * self`.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
return value * self
@always_inline("nodebug")
fn __rtruediv__(self, value: Self) -> Self:
"""Returns `value / self`.
Args:
value: The other value.
Returns:
`value / self`.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
return value / self
# TODO: Move to global function.
@always_inline("nodebug")
fn fma(self, multiplier: Self, accumulator: Self) -> Self:
"""Performs a fused multiply-add operation, i.e.
`self*multiplier + accumulator`.
Args:
multiplier: The value to multiply.
accumulator: The value to accumulate.
Returns:
A new vector whose element at position `i` is computed as
`self[i]*multiplier[i] + accumulator[i]`.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
return __mlir_op.`pop.fma`(
self.value, multiplier.value, accumulator.value
)
# ===------------------------------------------------------------------=== #
# Bitwise operations
# ===------------------------------------------------------------------=== #
@always_inline("nodebug")
fn __and__(self, rhs: Self) -> Self:
"""Returns `self & rhs`.
Constraints:
The element type of the SIMD vector must be bool or integral.
Args:
rhs: The RHS value.
Returns:
`self & rhs`.
"""
constrained[
type.is_integral() or type.is_bool(),
"must be an integral or bool type",
]()
return __mlir_op.`pop.and`(self.value, rhs.value)
@always_inline("nodebug")
fn __iand__(inout self, rhs: Self):
"""Computes `self & rhs` and save the result in `self`.
Constraints:
The element type of the SIMD vector must be bool or integral.
Args:
rhs: The RHS value.
"""
constrained[
type.is_integral() or type.is_bool(),
"must be an integral or bool type",
]()
self = self & rhs
@always_inline("nodebug")
fn __rand__(self, value: Self) -> Self:
"""Returns `value & self`.
Constraints:
The element type of the SIMD vector must be bool or integral.
Args:
value: The other value.
Returns:
`value & self`.
"""
constrained[
type.is_integral() or type.is_bool(),
"must be an integral or bool type",
]()
return value & self
@always_inline("nodebug")
fn __xor__(self, rhs: Self) -> Self:
"""Returns `self ^ rhs`.
Constraints:
The element type of the SIMD vector must be bool or integral.
Args:
rhs: The RHS value.
Returns:
`self ^ rhs`.
"""
constrained[
type.is_integral() or type.is_bool(),
"must be an integral or bool type",
]()
return __mlir_op.`pop.xor`(self.value, rhs.value)
@always_inline("nodebug")
fn __ixor__(inout self, rhs: Self):
"""Computes `self ^ rhs` and save the result in `self`.
Constraints:
The element type of the SIMD vector must be bool or integral.
Args:
rhs: The RHS value.
"""
constrained[
type.is_integral() or type.is_bool(),
"must be an integral or bool type",
]()
self = self ^ rhs
@always_inline("nodebug")
fn __rxor__(self, value: Self) -> Self:
"""Returns `value ^ self`.
Constraints:
The element type of the SIMD vector must be bool or integral.
Args:
value: The other value.
Returns:
`value ^ self`.
"""
constrained[
type.is_integral() or type.is_bool(),
"must be an integral or bool type",
]()
return value ^ self
@always_inline("nodebug")
fn __or__(self, rhs: Self) -> Self:
"""Returns `self | rhs`.
Constraints:
The element type of the SIMD vector must be bool or integral.
Args:
rhs: The RHS value.
Returns:
`self | rhs`.
"""
constrained[
type.is_integral() or type.is_bool(),
"must be an integral or bool type",
]()
return __mlir_op.`pop.or`(self.value, rhs.value)
@always_inline("nodebug")
fn __ior__(inout self, rhs: Self):
"""Computes `self | rhs` and save the result in `self`.
Constraints:
The element type of the SIMD vector must be bool or integral.
Args:
rhs: The RHS value.
"""
constrained[
type.is_integral() or type.is_bool(),
"must be an integral or bool type",
]()
self = self | rhs
@always_inline("nodebug")
fn __ror__(self, value: Self) -> Self:
"""Returns `value | self`.
Constraints:
The element type of the SIMD vector must be bool or integral.
Args:
value: The other value.
Returns:
`value | self`.
"""
constrained[
type.is_integral() or type.is_bool(),
"must be an integral or bool type",
]()
return value | self
@always_inline("nodebug")
fn __invert__(self) -> Self:
"""Returns `~self`.
Constraints:
The element type of the SIMD vector must be boolean or integral.
Returns:
The `~self` value.
"""
constrained[
type.is_bool() or type.is_integral(),
"must be an bool or integral type",
]()
@parameter
if type.is_bool():
return self.select(Self(False), Self(True))
else:
return self ^ -1
# ===------------------------------------------------------------------=== #
# Shift operations
# ===------------------------------------------------------------------=== #
@always_inline("nodebug")
fn __lshift__(self, rhs: Self) -> Self:
"""Returns `self << rhs`.
Constraints:
The element type of the SIMD vector must be integral.
Args:
rhs: The RHS value.
Returns:
`self << rhs`.
"""
constrained[type.is_integral(), "must be an integral type"]()
debug_assert(all(rhs >= 0), "unhandled negative value")
return __mlir_op.`pop.shl`(self.value, rhs.value)
@always_inline("nodebug")
fn __rshift__(self, rhs: Self) -> Self:
"""Returns `self >> rhs`.
Constraints:
The element type of the SIMD vector must be integral.
Args:
rhs: The RHS value.
Returns:
`self >> rhs`.
"""
constrained[type.is_integral(), "must be an integral type"]()
debug_assert(all(rhs >= 0), "unhandled negative value")
return __mlir_op.`pop.shr`(self.value, rhs.value)
@always_inline("nodebug")
fn __ilshift__(inout self, rhs: Self):
"""Computes `self << rhs` and save the result in `self`.
Constraints:
The element type of the SIMD vector must be integral.
Args:
rhs: The RHS value.
"""
constrained[type.is_integral(), "must be an integral type"]()
self = self << rhs
@always_inline("nodebug")
fn __irshift__(inout self, rhs: Self):
"""Computes `self >> rhs` and save the result in `self`.
Constraints:
The element type of the SIMD vector must be integral.
Args:
rhs: The RHS value.
"""
constrained[type.is_integral(), "must be an integral type"]()
self = self >> rhs
@always_inline("nodebug")
fn __rlshift__(self, value: Self) -> Self:
"""Returns `value << self`.
Constraints:
The element type of the SIMD vector must be integral.
Args:
value: The other value.
Returns:
`value << self`.
"""
constrained[type.is_integral(), "must be an integral type"]()
return value << self
@always_inline("nodebug")
fn __rrshift__(self, value: Self) -> Self:
"""Returns `value >> self`.
Constraints:
The element type of the SIMD vector must be integral.
Args:
value: The other value.
Returns:
`value >> self`.
"""
constrained[type.is_integral(), "must be an integral type"]()
return value >> self
# ===------------------------------------------------------------------=== #
# Shuffle operations
# ===------------------------------------------------------------------=== #
@always_inline("nodebug")
fn _shuffle_list[
*mask: Int, output_size: Int = size
](self, other: Self) -> SIMD[type, output_size]:
"""Shuffles (also called blend) the values of the current vector with
the `other` value using the specified mask (permutation). The mask
values must be within `2 * len(self)`.
Parameters:
mask: The permutation to use in the shuffle.
output_size: The size of the output vector.
Args:
other: The other vector to shuffle with.
Returns:
A new vector with the same length as the mask where the value at
position `i` is `(self + other)[permutation[i]]`.
"""
@parameter
fn variadic_len[*mask: Int]() -> Int:
return __mlir_op.`pop.variadic.size`(mask)
@parameter
fn _convert_variadic_to_pop_array[
*mask: Int
]() -> __mlir_type[`!pop.array<`, output_size.value, `, `, Int, `>`]:
var array = __mlir_op.`kgen.undef`[
_type = __mlir_type[
`!pop.array<`, output_size.value, `, `, Int, `>`
]
]()
@parameter
for idx in range(output_size):
alias val = mask[idx]
constrained[
0 <= val < 2 * size,
"invalid index in the shuffle operation",
]()
var ptr = __mlir_op.`pop.array.gep`(
UnsafePointer.address_of(array).address, idx.value
)
__mlir_op.`pop.store`(val, ptr)
return array
alias length = variadic_len[mask]()
constrained[
output_size == length,
"size of the mask must match the output SIMD size",
]()
return __mlir_op.`pop.simd.shuffle`[
mask = _convert_variadic_to_pop_array[mask](),
_type = __mlir_type[
`!pop.simd<`, output_size.value, `, `, type.value, `>`
],
](self.value, other.value)
@always_inline("nodebug")
fn _shuffle_list[
output_size: Int, mask: StaticIntTuple[output_size]
](self, other: Self) -> SIMD[type, output_size]:
"""Shuffles (also called blend) the values of the current vector with
the `other` value using the specified mask (permutation). The mask
values must be within `2 * len(self)`.
Parameters:
output_size: The size of the output vector.
mask: The permutation to use in the shuffle.
Args:
other: The other vector to shuffle with.
Returns:
A new vector with the same length as the mask where the value at
position `i` is `(self + other)[permutation[i]]`.
"""
@parameter
for i in range(output_size):
constrained[
0 <= mask[i] < 2 * size,
"invalid index in the shuffle operation",
]()
return __mlir_op.`pop.simd.shuffle`[
mask = mask.data.array,
_type = __mlir_type[
`!pop.simd<`, output_size.value, `, `, type.value, `>`
],
](self.value, other.value)
@always_inline("nodebug")
fn shuffle[*mask: Int](self) -> Self:
"""Shuffles (also called blend) the values of the current vector with
the `other` value using the specified mask (permutation). The mask
values must be within `2 * len(self)`.
Parameters:
mask: The permutation to use in the shuffle.
Returns:
A new vector with the same length as the mask where the value at
position `i` is `(self)[permutation[i]]`.
"""
return self._shuffle_list[mask](self)
@always_inline("nodebug")
fn shuffle[*mask: Int](self, other: Self) -> Self:
"""Shuffles (also called blend) the values of the current vector with
the `other` value using the specified mask (permutation). The mask
values must be within `2 * len(self)`.
Parameters:
mask: The permutation to use in the shuffle.
Args:
other: The other vector to shuffle with.
Returns:
A new vector with the same length as the mask where the value at
position `i` is `(self + other)[permutation[i]]`.
"""
return self._shuffle_list[mask](other)
@always_inline("nodebug")
fn shuffle[mask: StaticIntTuple[size]](self) -> Self:
"""Shuffles (also called blend) the values of the current vector with
the `other` value using the specified mask (permutation). The mask
values must be within `2 * len(self)`.
Parameters:
mask: The permutation to use in the shuffle.
Returns:
A new vector with the same length as the mask where the value at
position `i` is `(self)[permutation[i]]`.
"""
return self._shuffle_list[size, mask](self)
@always_inline("nodebug")
fn shuffle[mask: StaticIntTuple[size]](self, other: Self) -> Self:
"""Shuffles (also called blend) the values of the current vector with
the `other` value using the specified mask (permutation). The mask
values must be within `2 * len(self)`.
Parameters:
mask: The permutation to use in the shuffle.
Args:
other: The other vector to shuffle with.
Returns:
A new vector with the same length as the mask where the value at
position `i` is `(self + other)[permutation[i]]`.
"""
return self._shuffle_list[size, mask](other)
# ===------------------------------------------------------------------=== #
# Indexing operations
# ===------------------------------------------------------------------=== #
@always_inline("nodebug")
fn __getitem__(self, idx: Int) -> Scalar[type]:
"""Gets an element from the vector.
Args:
idx: The element index.
Returns:
The value at position `idx`.
"""
return __mlir_op.`pop.simd.extractelement`[
_type = __mlir_type[`!pop.scalar<`, type.value, `>`]
](self.value, index(idx).value)
@always_inline("nodebug")
fn __setitem__(inout self, idx: Int, val: Scalar[type]):
"""Sets an element in the vector.
Args:
idx: The index to set.
val: The value to set.
"""
self.value = __mlir_op.`pop.simd.insertelement`(
self.value, val.value, index(idx).value
)
@always_inline("nodebug")
fn __setitem__(
inout self, idx: Int, val: __mlir_type[`!pop.scalar<`, type.value, `>`]
):
"""Sets an element in the vector.
Args:
idx: The index to set.
val: The value to set.
"""
self.value = __mlir_op.`pop.simd.insertelement`(
self.value, val, index(idx).value
)
fn __hash__(self) -> Int:
"""Hash the value using builtin hash.
Returns:
A 64-bit hash value. This value is _not_ suitable for cryptographic
uses. Its intended usage is for data structures. See the `hash`
builtin documentation for more details.
"""
return _hash_simd(self)
@always_inline("nodebug")
fn slice[
output_width: Int, /, *, offset: Int = 0
](self) -> SIMD[type, output_width]:
"""Returns a slice of the vector of the specified width with the given
offset.
Constraints:
`output_width + offset` must not exceed the size of this SIMD
vector.
Parameters:
output_width: The output SIMD vector size.
offset: The given offset for the slice.
Returns:
A new vector whose elements map to
`self[offset:offset+output_width]`.
"""
constrained[
0 < output_width + offset <= size,
"output width must be a positive integer less than simd size",
]()
@parameter
if output_width == 1:
return self[offset]
@parameter
if offset % simdwidthof[type]():
var tmp = SIMD[type, output_width]()
@parameter
for i in range(output_width):
tmp[i] = self[i + offset]
return tmp
return llvm_intrinsic[
"llvm.vector.extract",
SIMD[type, output_width],
has_side_effect=False,
](self, offset)
@always_inline("nodebug")
fn insert[*, offset: Int = 0](self, value: SIMD[type, _]) -> Self:
"""Returns a the vector where the elements between `offset` and
`offset + input_width` have been replaced with the elements in `value`.
Parameters:
offset: The offset to insert at.
Args:
value: The value to be inserted.
Returns:
A new vector whose elements at `self[offset:offset+input_width]`
contain the values of `value`.
"""
alias input_width = value.size
constrained[
0 < input_width + offset <= size,
"insertion position must not exceed the size of the vector",
]()
@parameter
if size == 1:
constrained[
input_width == 1, "the input width must be 1 if the size is 1"
]()
return rebind[Self](value)
# You cannot insert into a SIMD value at positions that are not a
# multiple of the SIMD width via the `llvm.vector.insert` intrinsic,
# so resort to a for loop. Note that this can be made more intelligent
# by dividing the problem into the offset, offset+val, val+input_width
# where val is a value to align the offset to the simdwidth.
@parameter
if offset % simdwidthof[type]():
var tmp = self
@parameter
for i in range(input_width):
tmp[i + offset] = value[i]
return tmp
return llvm_intrinsic[
"llvm.vector.insert", Self, has_side_effect=False
](self, value, offset)
@always_inline("nodebug")
fn join(self, other: Self) -> SIMD[type, 2 * size]:
"""Concatenates the two vectors together.
Args:
other: The other SIMD vector.
Returns:
A new vector `self_0, self_1, ..., self_n, other_0, ..., other_n`.
"""
@always_inline
@parameter
fn build_indices() -> StaticIntTuple[2 * size]:
var indices = StaticIntTuple[2 * size]()
@parameter
for i in range(2 * size):
indices[i] = i
return indices
return self._shuffle_list[2 * size, build_indices()](other)
@always_inline("nodebug")
fn interleave(self, other: Self) -> SIMD[type, 2 * size]:
"""Constructs a vector by interleaving two input vectors.
Args:
other: The other SIMD vector.
Returns:
A new vector `self_0, other_0, ..., self_n, other_n`.
"""
@parameter
if size == 1:
return SIMD[type, 2 * size](self[0], other[0])
return llvm_intrinsic[
"llvm.vector.interleave2",
SIMD[type, 2 * size],
has_side_effect=False,
](self, other)
alias _SIMDHalfType = SIMD[type, size // 2]
@always_inline("nodebug")
fn deinterleave(
self,
) -> (Self._SIMDHalfType, Self._SIMDHalfType):
"""Constructs two vectors by deinterleaving the even and odd lanes of
the vector.
Constraints:
The vector size must be greater than 1.
Returns:
Two vectors the first of the form `self_0, self_2, ..., self_{n-2}`
and the other being `self_1, self_3, ..., self_{n-1}`.
"""
constrained[size > 1, "the vector size must be greater than 1."]()
@parameter
if size == 2:
return (
rebind[Self._SIMDHalfType](self[0]),
rebind[Self._SIMDHalfType](self[1]),
)
var res = llvm_intrinsic[
"llvm.vector.deinterleave2",
_RegisterPackType[Self._SIMDHalfType, Self._SIMDHalfType],
has_side_effect=False,
](self)
return (
rebind[Self._SIMDHalfType](res[0]),
rebind[Self._SIMDHalfType](res[1]),
)
# ===------------------------------------------------------------------=== #
# Binary operations
# ===------------------------------------------------------------------=== #
@always_inline("nodebug")
fn min(self, other: Self) -> Self:
"""Computes the elementwise minimum between the two vectors.
Args:
other: The other SIMD vector.
Returns:
A new SIMD vector where each element at position `i` is
`min(self[i], other[i])`.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
return __mlir_op.`pop.min`(self.value, other.value)
@always_inline("nodebug")
fn max(self, other: Self) -> Self:
"""Computes the elementwise maximum between the two vectors.
Args:
other: The other SIMD vector.
Returns:
A new SIMD vector where each element at position `i` is
`max(self[i], other[i])`.
"""
constrained[type.is_numeric(), "the SIMD type must be numeric"]()
return __mlir_op.`pop.max`(self.value, other.value)
# ===------------------------------------------------------------------=== #
# Reduce operations
# ===------------------------------------------------------------------=== #
@always_inline
fn reduce[
func: fn[type: DType, width: Int] (
SIMD[type, width], SIMD[type, width]
) capturing -> SIMD[type, width],
size_out: Int = 1,
](self) -> SIMD[type, size_out]:
"""Reduces the vector using a provided reduce operator.
Parameters:
func: The reduce function to apply to elements in this SIMD.
size_out: The width of the reduction.
Constraints:
`size_out` must not exceed width of the vector.
Returns:
A new scalar which is the reduction of all vector elements.
"""
constrained[size_out <= size, "reduction cannot increase simd width"]()
@parameter
if size == size_out:
return rebind[SIMD[type, size_out]](self)
else:
alias half_size = size // 2
var lhs = self.slice[half_size, offset=0]()
var rhs = self.slice[half_size, offset=half_size]()
return func[type, half_size](lhs, rhs).reduce[func, size_out]()
@always_inline("nodebug")
fn reduce_max[size_out: Int = 1](self) -> SIMD[type, size_out]:
"""Reduces the vector using the `max` operator.
Parameters:
size_out: The width of the reduction.
Constraints:
`size_out` must not exceed width of the vector.
The element type of the vector must be integer or FP.
Returns:
The maximum element of the vector.
"""
@parameter
if size == 1:
return self[0]
@parameter
if is_x86() or size_out > 1:
@always_inline
@parameter
fn max_reduce_body[
type: DType, width: Int
](v1: SIMD[type, width], v2: SIMD[type, width]) -> SIMD[
type, width
]:
return v1.max(v2)
return self.reduce[max_reduce_body, size_out]()
@parameter
if type.is_floating_point():
return rebind[SIMD[type, size_out]](
llvm_intrinsic[
"llvm.vector.reduce.fmax",
Scalar[type],
has_side_effect=False,
](self)
)
@parameter
if type.is_unsigned():
return rebind[SIMD[type, size_out]](
llvm_intrinsic[
"llvm.vector.reduce.umax",
Scalar[type],
has_side_effect=False,
](self)
)
return rebind[SIMD[type, size_out]](
llvm_intrinsic[
"llvm.vector.reduce.smax", Scalar[type], has_side_effect=False
](self)
)
@always_inline("nodebug")
fn reduce_min[size_out: Int = 1](self) -> SIMD[type, size_out]:
"""Reduces the vector using the `min` operator.
Parameters:
size_out: The width of the reduction.
Constraints:
`size_out` must not exceed width of the vector.
The element type of the vector must be integer or FP.
Returns:
The minimum element of the vector.
"""
@parameter
if size == 1:
return self[0]
@parameter
if is_x86() or size_out > 1:
@always_inline
@parameter
fn min_reduce_body[
type: DType, width: Int
](v1: SIMD[type, width], v2: SIMD[type, width]) -> SIMD[
type, width
]:
return v1.min(v2)
return self.reduce[min_reduce_body, size_out]()
@parameter
if type.is_floating_point():
return rebind[SIMD[type, size_out]](
llvm_intrinsic[
"llvm.vector.reduce.fmin",
Scalar[type],
has_side_effect=False,
](self)
)
@parameter
if type.is_unsigned():
return rebind[SIMD[type, size_out]](
llvm_intrinsic[
"llvm.vector.reduce.umin",
Scalar[type],
has_side_effect=False,
](self)
)
return rebind[SIMD[type, size_out]](
llvm_intrinsic[
"llvm.vector.reduce.smin", Scalar[type], has_side_effect=False
](self)
)
@always_inline
fn reduce_add[size_out: Int = 1](self) -> SIMD[type, size_out]:
"""Reduces the vector using the `add` operator.
Parameters:
size_out: The width of the reduction.
Constraints:
`size_out` must not exceed width of the vector.
Returns:
The sum of all vector elements.
"""
@always_inline
@parameter
fn add_reduce_body[
type: DType, width: Int
](v1: SIMD[type, width], v2: SIMD[type, width]) -> SIMD[type, width]:
return v1 + v2
return self.reduce[add_reduce_body, size_out]()
@always_inline
fn reduce_mul[size_out: Int = 1](self) -> SIMD[type, size_out]:
"""Reduces the vector using the `mul` operator.
Parameters:
size_out: The width of the reduction.
Constraints:
`size_out` must not exceed width of the vector.
The element type of the vector must be integer or FP.
Returns:
The product of all vector elements.
"""
@always_inline
@parameter
fn mul_reduce_body[
type: DType, width: Int
](v1: SIMD[type, width], v2: SIMD[type, width]) -> SIMD[type, width]:
return v1 * v2
return self.reduce[mul_reduce_body, size_out]()
@always_inline
fn reduce_and[size_out: Int = 1](self) -> SIMD[type, size_out]:
"""Reduces the vector using the bitwise `&` operator.
Parameters:
size_out: The width of the reduction.
Constraints:
`size_out` must not exceed width of the vector.
The element type of the vector must be integer or boolean.
Returns:
The reduced vector.
"""
constrained[
size_out <= size, "`size_out` must not exceed width of the vector."
]()
constrained[
type.is_integral() or type.is_bool(),
"The element type of the vector must be integer or boolean.",
]()
@parameter
if size_out > 1:
@always_inline
@parameter
fn and_reduce_body[
type: DType, width: Int
](v1: SIMD[type, width], v2: SIMD[type, width]) -> SIMD[
type, width
]:
return v1 & v2
return self.reduce[and_reduce_body, size_out]()
@parameter
if size == 1:
return rebind[SIMD[type, size_out]](self)
return llvm_intrinsic[
"llvm.vector.reduce.and",
SIMD[type, size_out],
has_side_effect=False,
](self)
@always_inline
fn reduce_or[size_out: Int = 1](self) -> SIMD[type, size_out]:
"""Reduces the vector using the bitwise `|` operator.
Parameters:
size_out: The width of the reduction.
Constraints:
`size_out` must not exceed width of the vector.
The element type of the vector must be integer or boolean.
Returns:
The reduced vector.
"""
constrained[
size_out <= size, "`size_out` must not exceed width of the vector."
]()
constrained[
type.is_integral() or type.is_bool(),
"The element type of the vector must be integer or boolean.",
]()
@parameter
if size_out > 1:
@always_inline
@parameter
fn or_reduce_body[
type: DType, width: Int
](v1: SIMD[type, width], v2: SIMD[type, width]) -> SIMD[
type, width
]:
return v1 | v2
return self.reduce[or_reduce_body, size_out]()
@parameter
if size == 1:
return rebind[SIMD[type, size_out]](self)
return llvm_intrinsic[
"llvm.vector.reduce.or", SIMD[type, size_out], has_side_effect=False
](self)
@always_inline
fn reduce_bit_count(self) -> Int:
"""Returns the total number of bits set in the SIMD vector.
Constraints:
Must be either an integral or a boolean type.
Returns:
Count of set bits across all elements of the vector.
"""
@parameter
if type.is_bool():
return int(self.cast[DType.uint8]().reduce_add())
else:
constrained[
type.is_integral(), "Expected either integral or bool type"
]()
return int(pop_count(self).reduce_add())
# ===------------------------------------------------------------------=== #
# select
# ===------------------------------------------------------------------=== #
# TODO (7748): always_inline required to WAR LLVM codegen bug
@always_inline("nodebug")
fn select[
result_type: DType
](
self,
true_case: SIMD[result_type, size],
false_case: SIMD[result_type, size],
) -> SIMD[result_type, size]:
"""Selects the values of the `true_case` or the `false_case` based on
the current boolean values of the SIMD vector.
Parameters:
result_type: The element type of the input and output SIMD vectors.
Args:
true_case: The values selected if the positional value is True.
false_case: The values selected if the positional value is False.
Constraints:
The element type of the vector must be boolean.
Returns:
A new vector of the form
`[true_case[i] if elem else false_case[i] for i, elem in enumerate(self)]`.
"""
constrained[type.is_bool(), "the simd dtype must be bool"]()
return __mlir_op.`pop.simd.select`(
rebind[Self._Mask](self).value,
true_case.value,
false_case.value,
)
# ===------------------------------------------------------------------=== #
# Rotation operations
# ===------------------------------------------------------------------=== #
@always_inline
fn rotate_left[shift: Int](self) -> Self:
"""Shifts the elements of a SIMD vector to the left by `shift`
elements (with wrap-around).
Constraints:
`-size <= shift < size`
Parameters:
shift: The number of positions by which to rotate the elements of
SIMD vector to the left (with wrap-around).
Returns:
The SIMD vector rotated to the left by `shift` elements
(with wrap-around).
"""
constrained[
shift >= -size and shift < size,
"Constraint: -size <= shift < size",
]()
@parameter
if size == 1:
constrained[shift == 0, "for scalars the shift must be 0"]()
return self
return llvm_intrinsic[
"llvm.vector.splice", Self, has_side_effect=False
](self, self, Int32(shift))
@always_inline
fn rotate_right[shift: Int](self) -> Self:
"""Shifts the elements of a SIMD vector to the right by `shift`
elements (with wrap-around).
Constraints:
`-size < shift <= size`
Parameters:
shift: The number of positions by which to rotate the elements of
SIMD vector to the right (with wrap-around).
Returns:
The SIMD vector rotated to the right by `shift` elements
(with wrap-around).
"""
constrained[
shift > -size and shift <= size,
"Constraint: -size < shift <= size",
]()
@parameter
if size == 1:
constrained[shift == 0, "for scalars the shift must be 0"]()
return self
return self.rotate_left[-shift]()
# ===------------------------------------------------------------------=== #
# Shift operations
# ===------------------------------------------------------------------=== #
@always_inline
fn shift_left[shift: Int](self) -> Self:
"""Shifts the elements of a SIMD vector to the left by `shift`
elements (no wrap-around, fill with zero).
Constraints:
`0 <= shift <= size`
Parameters:
shift: The number of positions by which to rotate the elements of
SIMD vector to the left (no wrap-around, fill with zero).
Returns:
The SIMD vector rotated to the left by `shift` elements (no
wrap-around, fill with zero).
"""
constrained[
0 <= shift <= size,
(
"shift must be greater than or equal to 0 and less than equal"
" to the size"
),
]()
@parameter
if shift == 0:
return self
elif shift == size:
return 0
alias zero_simd = Self()
return llvm_intrinsic[
"llvm.vector.splice", Self, has_side_effect=False
](self, zero_simd, Int32(shift))
@always_inline
fn shift_right[shift: Int](self) -> Self:
"""Shifts the elements of a SIMD vector to the right by `shift`
elements (no wrap-around, fill with zero).
Constraints:
`0 <= shift <= size`
Parameters:
shift: The number of positions by which to rotate the elements of
SIMD vector to the right (no wrap-around, fill with zero).
Returns:
The SIMD vector rotated to the right by `shift` elements (no
wrap-around, fill with zero).
"""
# Note the order of the llvm_intrinsic arguments below differ from
# shift_left(), so we cannot directly reuse it here.
constrained[
0 <= shift <= size,
(
"shift must be greater than or equal to 0 and less than equal"
" to the size"
),
]()
@parameter
if shift == 0:
return self
elif shift == size:
return 0
alias zero_simd = Self()
return llvm_intrinsic[
"llvm.vector.splice", Self, has_side_effect=False
](zero_simd, self, Int32(-shift))
# ===----------------------------------------------------------------------=== #
# _pow
# ===----------------------------------------------------------------------=== #
@always_inline
fn _pow[
BaseTy: DType, simd_width: Int, ExpTy: DType
](base: SIMD[BaseTy, simd_width], exp: SIMD[ExpTy, simd_width]) -> __type_of(
base
):
"""Computes the power of the elements of a SIMD vector raised to the
corresponding elements of another SIMD vector.
Parameters:
BaseTy: The `dtype` of the `base` SIMD vector.
simd_width: The width of the input and output SIMD vectors.
ExpTy: The `dtype` of the `exp` SIMD vector.
Args:
base: Base of the power operation.
exp: Exponent of the power operation.
Returns:
A vector containing elementwise `base` raised to the power of `exp`.
"""
@parameter
if ExpTy.is_floating_point() and BaseTy == ExpTy:
var rhs_quotient = exp.__floor__()
if all((exp >= 0) & (rhs_quotient == exp)):
return _pow(base, rhs_quotient.cast[_integral_type_of[ExpTy]()]())
var result = __type_of(base)()
@parameter
if triple_is_nvidia_cuda():
_print_fmt(
"ABORT: pow with two floating point operands is not supported"
" on GPU"
)
abort()
else:
@parameter
for i in range(simd_width):
result[i] = llvm_intrinsic[
"llvm.pow", Scalar[BaseTy], has_side_effect=False
](base[i], exp[i])
return result
elif ExpTy.is_integral():
# Common cases
if all(exp == 2):
return base * base
if all(exp == 3):
return base * base * base
var result = __type_of(base)()
@parameter
for i in range(simd_width):
result[i] = _powi(base[i], exp[i].cast[DType.int32]())
return result
else:
constrained[False, "unsupported type combination"]()
return __type_of(base)()
@always_inline
fn _powi[type: DType](base: Scalar[type], exp: Int32) -> __type_of(base):
if type.is_integral() and exp < 0:
# Not defined for Integers, this should raise an
# exception.
debug_assert(False, "exponent < 0 is undefined for integers")
return 0
var a = base
var b = abs(exp) if type.is_floating_point() else exp
var res: Scalar[type] = 1
while b > 0:
if b & 1:
res *= a
a *= a
b >>= 1
@parameter
if type.is_floating_point():
if exp < 0:
return 1 / res
return res
# ===----------------------------------------------------------------------=== #
# bfloat16
# ===----------------------------------------------------------------------=== #
alias _fp32_bf16_mantissa_diff = FPUtils[
DType.float32
].mantissa_width() - FPUtils[DType.bfloat16].mantissa_width()
@always_inline
fn _bfloat16_to_f32_scalar(
val: Scalar[DType.bfloat16],
) -> Scalar[DType.float32]:
@parameter
if has_neon():
# BF16 support on neon systems is not supported.
return _unchecked_zero[DType.float32, 1]()
var bfloat_bits = FPUtils[DType.bfloat16].bitcast_to_integer(val)
return FPUtils[DType.float32].bitcast_from_integer(
bfloat_bits << _fp32_bf16_mantissa_diff
)
@always_inline
fn _bfloat16_to_f32[
size: Int
](val: SIMD[DType.bfloat16, size]) -> SIMD[DType.float32, size]:
@parameter
if has_neon():
# BF16 support on neon systems is not supported.
return _unchecked_zero[DType.float32, size]()
@always_inline
@parameter
fn wrapper_fn[
input_type: DType, result_type: DType
](val: Scalar[input_type]) capturing -> Scalar[result_type]:
return rebind[Scalar[result_type]](
_bfloat16_to_f32_scalar(rebind[Scalar[DType.bfloat16]](val))
)
return _simd_apply[wrapper_fn, DType.float32, size](val)
@always_inline
fn _f32_to_bfloat16_scalar(
val: Scalar[DType.float32],
) -> Scalar[DType.bfloat16]:
@parameter
if has_neon():
# BF16 support on neon systems is not supported.
return _unchecked_zero[DType.bfloat16, 1]()
if _isnan(val):
return -_nan[DType.bfloat16]() if FPUtils[DType.float32].get_sign(
val
) else _nan[DType.bfloat16]()
var float_bits = FPUtils[DType.float32].bitcast_to_integer(val)
var lsb = (float_bits >> _fp32_bf16_mantissa_diff) & 1
var rounding_bias = 0x7FFF + lsb
float_bits += rounding_bias
var bfloat_bits = float_bits >> _fp32_bf16_mantissa_diff
return FPUtils[DType.bfloat16].bitcast_from_integer(bfloat_bits)
@always_inline
fn _f32_to_bfloat16[
size: Int
](val: SIMD[DType.float32, size]) -> SIMD[DType.bfloat16, size]:
@parameter
if has_neon():
# BF16 support on neon systems is not supported.
return _unchecked_zero[DType.bfloat16, size]()
@always_inline
@parameter
fn wrapper_fn[
input_type: DType, result_type: DType
](val: Scalar[input_type]) capturing -> Scalar[result_type]:
return rebind[Scalar[result_type]](
_f32_to_bfloat16_scalar(rebind[Scalar[DType.float32]](val))
)
return _simd_apply[wrapper_fn, DType.bfloat16, size](val)
# ===----------------------------------------------------------------------=== #
# _simd_apply
# ===----------------------------------------------------------------------=== #
@always_inline
fn _simd_apply[
func: fn[input_type: DType, result_type: DType] (
Scalar[input_type]
) capturing -> Scalar[result_type],
result_type: DType,
simd_width: Int,
](x: SIMD[_, simd_width]) -> SIMD[result_type, simd_width]:
"""Returns a value whose elements corresponds to applying `func` to each
element in the vector.
Parameter:
simd_width: Width of the input and output SIMD vectors.
input_type: Type of the input to func.
result_type: Result type of func.
func: Function to apply to the SIMD vector.
Args:
x: the input value.
Returns:
A SIMD vector whose element at index `i` is `func(x[i])`.
"""
var result = SIMD[result_type, simd_width]()
@parameter
for i in range(simd_width):
result[i] = func[x.type, result_type](x[i])
return result
@always_inline
fn _simd_apply[
func: fn[lhs_type: DType, rhs_type: DType, result_type: DType] (
Scalar[lhs_type], Scalar[rhs_type]
) capturing -> Scalar[result_type],
result_type: DType,
simd_width: Int,
](x: SIMD[_, simd_width], y: SIMD[_, simd_width]) -> SIMD[
result_type, simd_width
]:
"""Returns a value whose elements corresponds to applying `func` to each
element in the vector.
Parameter:
simd_width: Width of the input and output SIMD vectors.
input_type: Type of the input to func.
result_type: Result type of func.
func: Function to apply to the SIMD vector.
Args:
x: the lhs input value.
y: the rhs input value.
Returns:
A SIMD vector whose element at index `i` is `func(x[i], y[i])`.
"""
var result = SIMD[result_type, simd_width]()
@parameter
for i in range(simd_width):
result[i] = func[x.type, y.type, result_type](x[i], y[i])
return result
# ===----------------------------------------------------------------------=== #
# _format_scalar
# ===----------------------------------------------------------------------=== #
fn _format_scalar[
dtype: DType,
float_format: StringLiteral = "%.17g",
](inout writer: Formatter, value: Scalar[dtype]):
# Stack allocate enough bytes to store any formatted Scalar value of any
# type.
alias size: Int = _calc_format_buffer_size[dtype]()
var buf = InlineArray[UInt8, size](fill=0)
var wrote = _snprintf_scalar[dtype, float_format](
buf.unsafe_ptr(),
size,
value,
)
# SAFETY:
# Create a slice to only those bytes in `buf` that have been initialized.
var str_slice = StringSlice[False, __lifetime_of(buf)](
unsafe_from_utf8_ptr=buf.unsafe_ptr(), len=wrote
)
writer.write_str(str_slice)
| mojo/stdlib/src/builtin/simd.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the built-in `sort` function.
These are Mojo built-ins, so you don't need to import them.
"""
from bit import countl_zero
from collections import List
from memory import Pointer, UnsafePointer
from sys import bitwidthof
# ===----------------------------------------------------------------------===#
# sort
# ===----------------------------------------------------------------------===#
alias _cmp_fn_type = fn[type: AnyTrivialRegType] (type, type) capturing -> Bool
fn _insertion_sort[
type: AnyTrivialRegType, cmp_fn: _cmp_fn_type
](array: Pointer[type], start: Int, end: Int):
"""Sort the array[start:end] slice"""
for i in range(start + 1, end):
var value = array[i]
var j = i
# Find the placement of the value in the array, shifting as we try to
# find the position. Throughout, we assume array[start:i] has already
# been sorted.
while j > start and not cmp_fn[type](array[j - 1], value):
array[j] = array[j - 1]
j -= 1
array[j] = value
fn _insertion_sort[
type: CollectionElement, cmp_fn: fn (type, type) capturing -> Bool
](array: UnsafePointer[type], start: Int, end: Int):
"""Sort the array[start:end] slice"""
for i in range(start + 1, end):
var value = array[i]
var j = i
# Find the placement of the value in the array, shifting as we try to
# find the position. Throughout, we assume array[start:i] has already
# been sorted.
while j > start and not cmp_fn(array[j - 1], value):
array[j] = array[j - 1]
j -= 1
array[j] = value
@always_inline
fn _partition[
type: AnyTrivialRegType, cmp_fn: _cmp_fn_type
](array: Pointer[type], start: Int, end: Int) -> Int:
if start == end:
return end
var pivot = start + (end - start) // 2
var pivot_value = array[pivot]
var left = start
var right = end - 2
swap(array[pivot], array[end - 1])
while left < right:
if cmp_fn[type](array[left], pivot_value):
left += 1
elif not cmp_fn[type](array[right], pivot_value):
right -= 1
else:
swap(array[left], array[right])
if cmp_fn[type](array[right], pivot_value):
right += 1
swap(array[end - 1], array[right])
return right
@always_inline
fn _partition[
type: CollectionElement, cmp_fn: fn (type, type) capturing -> Bool
](array: UnsafePointer[type], start: Int, end: Int) -> Int:
if start == end:
return end
var pivot = start + (end - start) // 2
var pivot_value = array[pivot]
var left = start
var right = end - 2
swap(array[pivot], array[end - 1])
while left < right:
if cmp_fn(array[left], pivot_value):
left += 1
elif not cmp_fn(array[right], pivot_value):
right -= 1
else:
swap(array[left], array[right])
if cmp_fn(array[right], pivot_value):
right += 1
swap(array[end - 1], array[right])
return right
fn _estimate_initial_height(size: Int) -> Int:
# Compute the log2 of the size rounded upward.
var log2 = int((bitwidthof[DType.index]() - 1) ^ countl_zero(size | 1))
return max(2, log2)
fn _quicksort[
type: AnyTrivialRegType, cmp_fn: _cmp_fn_type
](array: Pointer[type], size: Int):
if size == 0:
return
var stack = List[Int](capacity=_estimate_initial_height(size))
stack.append(0)
stack.append(size)
while len(stack) > 0:
var end = stack.pop()
var start = stack.pop()
var len = end - start
if len < 2:
continue
if len == 2:
_small_sort[2, type, cmp_fn](array + start)
continue
if len == 3:
_small_sort[3, type, cmp_fn](array + start)
continue
if len == 4:
_small_sort[4, type, cmp_fn](array + start)
continue
if len == 5:
_small_sort[5, type, cmp_fn](array + start)
continue
if len < 32:
_insertion_sort[type, cmp_fn](array, start, end)
continue
var pivot = _partition[type, cmp_fn](array, start, end)
stack.append(pivot + 1)
stack.append(end)
stack.append(start)
stack.append(pivot)
fn _quicksort[
type: CollectionElement, cmp_fn: fn (type, type) capturing -> Bool
](array: UnsafePointer[type], size: Int):
if size == 0:
return
var stack = List[Int](capacity=_estimate_initial_height(size))
stack.append(0)
stack.append(size)
while len(stack) > 0:
var end = stack.pop()
var start = stack.pop()
var len = end - start
if len < 2:
continue
if len < 8:
_insertion_sort[type, cmp_fn](array, start, end)
continue
var pivot = _partition[type, cmp_fn](array, start, end)
stack.append(pivot + 1)
stack.append(end)
stack.append(start)
stack.append(pivot)
# ===----------------------------------------------------------------------===#
# partition
# ===----------------------------------------------------------------------===#
fn partition[
type: AnyTrivialRegType, cmp_fn: _cmp_fn_type
](buff: Pointer[type], k: Int, size: Int):
"""Partition the input vector inplace such that first k elements are the
largest (or smallest if cmp_fn is <= operator) elements.
The ordering of the first k elements is undefined.
Parameters:
type: DType of the underlying data.
cmp_fn: Comparison functor of type, type) capturing -> Bool type.
Args:
buff: Input buffer.
k: Index of the partition element.
size: The length of the buffer.
"""
var stack = List[Int](capacity=_estimate_initial_height(size))
stack.append(0)
stack.append(size)
while len(stack) > 0:
var end = stack.pop()
var start = stack.pop()
var pivot = _partition[type, cmp_fn](buff, start, end)
if pivot == k:
break
elif k < pivot:
stack.append(start)
stack.append(pivot)
else:
stack.append(pivot + 1)
stack.append(end)
# ===----------------------------------------------------------------------===#
# sort
# ===----------------------------------------------------------------------===#
fn sort(inout buff: Pointer[Int], len: Int):
"""Sort the vector inplace.
The function doesn't return anything, the vector is updated inplace.
Args:
buff: Input buffer.
len: The length of the buffer.
"""
@parameter
fn _less_than_equal[type: AnyTrivialRegType](lhs: type, rhs: type) -> Bool:
return rebind[Int](lhs) <= rebind[Int](rhs)
_quicksort[Int, _less_than_equal](buff, len)
fn sort[type: DType](inout buff: Pointer[Scalar[type]], len: Int):
"""Sort the vector inplace.
The function doesn't return anything, the vector is updated inplace.
Parameters:
type: DType of the underlying data.
Args:
buff: Input buffer.
len: The length of the buffer.
"""
@parameter
fn _less_than_equal[ty: AnyTrivialRegType](lhs: ty, rhs: ty) -> Bool:
return rebind[Scalar[type]](lhs) <= rebind[Scalar[type]](rhs)
_quicksort[Scalar[type], _less_than_equal](buff, len)
fn sort(inout v: List[Int]):
"""Sort the vector inplace.
The function doesn't return anything, the vector is updated inplace.
Args:
v: Input integer vector to sort.
"""
# Downcast any pointer to register-passable pointer.
var ptr = rebind[Pointer[Int]](v.data)
sort(ptr, len(v))
fn sort[type: DType](inout v: List[Scalar[type]]):
"""Sort the vector inplace.
The function doesn't return anything, the vector is updated inplace.
Parameters:
type: DType of the underlying data.
Args:
v: Input vector to sort.
"""
var ptr = rebind[Pointer[Scalar[type]]](v.data)
sort[type](ptr, len(v))
fn sort[
type: CollectionElement,
cmp_fn: fn (type, type) capturing -> Bool,
](inout v: List[type]):
"""Sort the vector inplace.
The function doesn't return anything, the vector is updated inplace.
Parameters:
type: DType of the underlying data.
cmp_fn: The comparison function.
Args:
v: Input vector to sort.
"""
_quicksort[type, cmp_fn](v.data, len(v))
# ===----------------------------------------------------------------------===#
# sort networks
# ===----------------------------------------------------------------------===#
@always_inline
fn _sort2[
type: AnyTrivialRegType, cmp_fn: _cmp_fn_type
](array: Pointer[type], offset0: Int, offset1: Int):
var a = array[offset0]
var b = array[offset1]
if not cmp_fn[type](a, b):
array[offset0] = b
array[offset1] = a
@always_inline
fn _sort_partial_3[
type: AnyTrivialRegType, cmp_fn: _cmp_fn_type
](array: Pointer[type], offset0: Int, offset1: Int, offset2: Int):
var a = array[offset0]
var b = array[offset1]
var c = array[offset2]
var r = cmp_fn[type](c, a)
var t = c if r else a
if r:
array[offset2] = a
if cmp_fn[type](b, t):
array[offset0] = b
array[offset1] = t
elif r:
array[offset0] = t
@always_inline
fn _small_sort[
n: Int, type: AnyTrivialRegType, cmp_fn: _cmp_fn_type
](array: Pointer[type]):
@parameter
if n == 2:
_sort2[type, cmp_fn](array, 0, 1)
return
@parameter
if n == 3:
_sort2[type, cmp_fn](array, 1, 2)
_sort_partial_3[type, cmp_fn](array, 0, 1, 2)
return
@parameter
if n == 4:
_sort2[type, cmp_fn](array, 0, 2)
_sort2[type, cmp_fn](array, 1, 3)
_sort2[type, cmp_fn](array, 0, 1)
_sort2[type, cmp_fn](array, 2, 3)
_sort2[type, cmp_fn](array, 1, 2)
return
@parameter
if n == 5:
_sort2[type, cmp_fn](array, 0, 1)
_sort2[type, cmp_fn](array, 3, 4)
_sort_partial_3[type, cmp_fn](array, 2, 3, 4)
_sort2[type, cmp_fn](array, 1, 4)
_sort_partial_3[type, cmp_fn](array, 0, 2, 3)
_sort_partial_3[type, cmp_fn](array, 1, 2, 3)
return
# ===----------------------------------------------------------------------=== #
# Comparable elements list sorting
# ===----------------------------------------------------------------------=== #
@always_inline
fn insertion_sort[type: ComparableCollectionElement](inout list: List[type]):
"""Sort list of the order comparable elements in-place with insertion sort algorithm.
Parameters:
type: The order comparable collection element type.
Args:
list: The list of the order comparable elements which will be sorted in-place.
"""
for i in range(1, len(list)):
var key = list[i]
var j = i - 1
while j >= 0 and key < list[j]:
list[j + 1] = list[j]
j -= 1
list[j + 1] = key
fn _quick_sort[
type: ComparableCollectionElement
](inout list: List[type], low: Int, high: Int):
"""Sort section of the list, between low and high, with quick sort algorithm in-place.
Parameters:
type: The order comparable collection element type.
Args:
list: The list of the order comparable elements which will be sorted in-place.
low: Int value identifying the lowest index of the list section to be sorted.
high: Int value identifying the highest index of the list section to be sorted.
"""
@always_inline
@parameter
fn _partition(low: Int, high: Int) -> Int:
var pivot = list[high]
var i = low - 1
for j in range(low, high):
if list[j] <= pivot:
i += 1
list[j], list[i] = list[i], list[j]
list[i + 1], list[high] = list[high], list[i + 1]
return i + 1
if low < high:
var pi = _partition(low, high)
_quick_sort(list, low, pi - 1)
_quick_sort(list, pi + 1, high)
@always_inline
fn quick_sort[type: ComparableCollectionElement](inout list: List[type]):
"""Sort list of the order comparable elements in-place with quick sort algorithm.
Parameters:
type: The order comparable collection element type.
Args:
list: The list of the order comparable elements which will be sorted in-place.
"""
_quick_sort(list, 0, len(list) - 1)
fn sort[
type: ComparableCollectionElement, slist_ub: Int = 64
](inout list: List[type]):
"""Sort list of the order comparable elements in-place. This function picks the best algorithm based on the list length.
Parameters:
type: The order comparable collection element type.
slist_ub: The upper bound for a list size which is considered small.
Args:
list: The list of the scalars which will be sorted in-place.
"""
var count = len(list)
if count <= slist_ub:
insertion_sort(list) # small lists are best sorted with insertion sort
else:
quick_sort(list) # others are best sorted with quick sort
| mojo/stdlib/src/builtin/sort.mojo | false |
<filename>mojo/stdlib/src/builtin/str.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Provides the `str` function.
These are Mojo built-ins, so you don't need to import them.
"""
# ===----------------------------------------------------------------------=== #
# Stringable
# ===----------------------------------------------------------------------=== #
trait Stringable:
"""
The `Stringable` trait describes a type that can be converted to a
[`String`](/mojo/stdlib/builtin/string/String).
Any type that conforms to `Stringable` or
[`StringableRaising`](/mojo/stdlib/builtin/str/StringableRaising) works
with the built-in [`print()`](/mojo/stdlib/builtin/io/print) and
[`str()`](/mojo/stdlib/builtin/str/str) functions.
The `Stringable` trait requires the type to define the `__str__()` method.
For example:
```mojo
@value
struct Foo(Stringable):
var s: String
fn __str__(self) -> String:
return self.s
```
Now you can pass an instance of `Foo` to the `str()` function to get back a
`String`:
```mojo
var foo = Foo("test")
print(str(foo) == "test")
```
```plaintext
True
```
**Note:** If the `__str__()` method might raise an error, use the
[`StringableRaising`](/mojo/stdlib/builtin/str/StringableRaising)
trait, instead.
About the difference between `__repr__()` and `__str__()`:
The method `__repr__` compute the compute the "official" string representation of an object
while `__str__` computes the "informal" or nicely printable string representation of an object.
This method differs from `__repr__()` in that there is no expectation that `__str__()`
return a valid Mojo expression: a more convenient or concise representation can be used.
"""
fn __str__(self) -> String:
"""Get the string representation of the type.
Returns:
The string representation of the type.
"""
...
trait StringableRaising:
"""The StringableRaising trait describes a type that can be converted to a
[`String`](/mojo/stdlib/builtin/string/String).
Any type that conforms to
[`Stringable`](/mojo/stdlib/builtin/str/Stringable) or
`StringableRaising` works with the built-in
[`print()`](/mojo/stdlib/builtin/io/print) and
[`str()`](/mojo/stdlib/builtin/str/str) functions.
The `StringableRaising` trait requires the type to define the `__str__()`
method, which can raise an error. For example:
```mojo
@value
struct Foo(StringableRaising):
var s: String
fn __str__(self) raises -> String:
if self.s == "":
raise Error("Empty String")
return self.s
```
Now you can pass an instance of `Foo` to the `str()` function to get back a
`String`:
```mojo
fn main() raises:
var foo = Foo("test")
print(str(foo) == "test")
```
```plaintext
True
```
"""
fn __str__(self) raises -> String:
"""Get the string representation of the type.
Returns:
The string representation of the type.
Raises:
If there is an error when computing the string representation of the type.
"""
...
# ===----------------------------------------------------------------------=== #
# str
# ===----------------------------------------------------------------------=== #
@always_inline
fn str[T: Stringable](value: T) -> String:
"""Get the string representation of a value.
Parameters:
T: The type conforming to Stringable.
Args:
value: The object to get the string representation of.
Returns:
The string representation of the object.
"""
return value.__str__()
@always_inline
fn str(value: None) -> String:
"""Get the string representation of the `None` type.
Args:
value: The object to get the string representation of.
Returns:
The string representation of the object.
"""
return "None"
@always_inline
fn str[T: StringableRaising](value: T) raises -> String:
"""Get the string representation of a value.
Parameters:
T: The type conforming to Stringable.
Args:
value: The object to get the string representation of.
Returns:
The string representation of the object.
Raises:
If there is an error when computing the string representation of the type.
"""
return value.__str__()
| mojo/stdlib/src/builtin/str.mojo | false |
<filename>mojo/stdlib/src/builtin/string.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements basic object methods for working with strings.
These are Mojo built-ins, so you don't need to import them.
"""
from bit import countl_zero
from collections import List, KeyElement
from sys import llvm_intrinsic, bitwidthof
from memory import DTypePointer, LegacyPointer, UnsafePointer, memcmp, memcpy
from utils import StringRef, StaticIntTuple, Span, StringSlice
from utils._format import Formattable, Formatter, ToFormatter
# ===----------------------------------------------------------------------=== #
# ord
# ===----------------------------------------------------------------------=== #
fn ord(s: String) -> Int:
"""Returns an integer that represents the given one-character string.
Given a string representing one character, return an integer
representing the code point of that character. For example, `ord("a")`
returns the integer `97`. This is the inverse of the `chr()` function.
Args:
s: The input string, which must contain only a single character.
Returns:
An integer representing the code point of the given character.
"""
# UTF-8 to Unicode conversion: (represented as UInt32 BE)
# 1: 0aaaaaaa -> 00000000 00000000 00000000 0aaaaaaa a
# 2: 110aaaaa 10bbbbbb -> 00000000 00000000 00000aaa aabbbbbb a << 6 | b
# 3: 1110aaaa 10bbbbbb 10cccccc -> 00000000 00000000 aaaabbbb bbcccccc a << 12 | b << 6 | c
# 4: 11110aaa 10bbbbbb 10cccccc 10dddddd -> 00000000 000aaabb bbbbcccc ccdddddd a << 18 | b << 12 | c << 6 | d
var p = s.unsafe_ptr().bitcast[UInt8]()
var b1 = p[]
if (b1 >> 7) == 0: # This is 1 byte ASCII char
debug_assert(len(s) == 1, "input string length must be 1")
return int(b1)
var num_bytes = countl_zero(~b1)
debug_assert(len(s) == int(num_bytes), "input string must be one character")
var shift = int((6 * (num_bytes - 1)))
var b1_mask = 0b11111111 >> (num_bytes + 1)
var result = int(b1 & b1_mask) << shift
for _ in range(1, num_bytes):
p += 1
shift -= 6
result |= int(p[] & 0b00111111) << shift
return result
# ===----------------------------------------------------------------------=== #
# chr
# ===----------------------------------------------------------------------=== #
fn chr(c: Int) -> String:
"""Returns a string based on the given Unicode code point.
Returns the string representing a character whose code point is the integer
`c`. For example, `chr(97)` returns the string `"a"`. This is the inverse of
the `ord()` function.
Args:
c: An integer that represents a code point.
Returns:
A string containing a single character based on the given code point.
"""
# Unicode (represented as UInt32 BE) to UTF-8 conversion :
# 1: 00000000 00000000 00000000 0aaaaaaa -> 0aaaaaaa a
# 2: 00000000 00000000 00000aaa aabbbbbb -> 110aaaaa 10bbbbbb a >> 6 | 0b11000000, b | 0b10000000
# 3: 00000000 00000000 aaaabbbb bbcccccc -> 1110aaaa 10bbbbbb 10cccccc a >> 12 | 0b11100000, b >> 6 | 0b10000000, c | 0b10000000
# 4: 00000000 000aaabb bbbbcccc ccdddddd -> 11110aaa 10bbbbbb 10cccccc 10dddddd a >> 18 | 0b11110000, b >> 12 | 0b10000000, c >> 6 | 0b10000000, d | 0b10000000
if (c >> 7) == 0: # This is 1 byte ASCII char
return _chr_ascii(c)
@always_inline
fn _utf8_len(val: Int) -> Int:
debug_assert(
0 <= val <= 0x10FFFF, "Value is not a valid Unicode code point"
)
alias sizes = SIMD[DType.int32, 4](
0, 0b1111_111, 0b1111_1111_111, 0b1111_1111_1111_1111
)
var values = SIMD[DType.int32, 4](val)
var mask = values > sizes
return int(mask.cast[DType.uint8]().reduce_add())
var num_bytes = _utf8_len(c)
var p = DTypePointer[DType.uint8].alloc(num_bytes + 1)
var shift = 6 * (num_bytes - 1)
var mask = UInt8(0xFF) >> (num_bytes + 1)
var num_bytes_marker = UInt8(0xFF) << (8 - num_bytes)
p.store(((c >> shift) & mask) | num_bytes_marker)
for i in range(1, num_bytes):
shift -= 6
p.store(i, ((c >> shift) & 0b00111111) | 0b10000000)
p.store(num_bytes, 0)
return String(p.bitcast[DType.uint8](), num_bytes + 1)
# ===----------------------------------------------------------------------=== #
# ascii
# ===----------------------------------------------------------------------=== #
@always_inline("nodebug")
fn _chr_ascii(c: UInt8) -> String:
"""Returns a string based on the given ASCII code point.
Args:
c: An integer that represents a code point.
Returns:
A string containing a single character based on the given code point.
"""
return String(String._buffer_type(c, 0))
@always_inline("nodebug")
fn _repr_ascii(c: UInt8) -> String:
"""Returns a printable representation of the given ASCII code point.
Args:
c: An integer that represents a code point.
Returns:
A string containing a representation of the given code point.
"""
alias ord_tab = ord("\t")
alias ord_new_line = ord("\n")
alias ord_carriage_return = ord("\r")
alias ord_back_slash = ord("\\")
if c == ord_back_slash:
return r"\\"
elif isprintable(c):
return _chr_ascii(c)
elif c == ord_tab:
return r"\t"
elif c == ord_new_line:
return r"\n"
elif c == ord_carriage_return:
return r"\r"
else:
var uc = c.cast[DType.uint8]()
if uc < 16:
return hex(uc, r"\x0")
else:
return hex(uc, r"\x")
# TODO: This is currently the same as repr, should change with unicode strings
@always_inline("nodebug")
fn ascii(value: String) -> String:
"""Get the ASCII representation of the object.
Args:
value: The object to get the ASCII representation of.
Returns:
A string containing the ASCII representation of the object.
"""
return value.__repr__()
# ===----------------------------------------------------------------------=== #
# strtol
# ===----------------------------------------------------------------------=== #
@always_inline
fn _atol(str_ref: StringRef, base: Int = 10) raises -> Int:
"""Implementation of `atol` for StringRef inputs.
Please see its docstring for details.
"""
if (base != 0) and (base < 2 or base > 36):
raise Error("Base must be >= 2 and <= 36, or 0.")
if not str_ref:
raise Error(_atol_error(base, str_ref))
var real_base: Int
var ord_num_max: Int
var ord_letter_max = (-1, -1)
var result = 0
var is_negative: Bool = False
var start: Int = 0
var str_len = len(str_ref)
var buff = str_ref.unsafe_ptr()
for pos in range(start, str_len):
if _isspace(buff[pos]):
continue
if str_ref[pos] == "-":
is_negative = True
start = pos + 1
elif str_ref[pos] == "+":
start = pos + 1
else:
start = pos
break
if str_ref[start] == "0" and start + 1 < str_len:
if base == 2 and (
str_ref[start + 1] == "b" or str_ref[start + 1] == "B"
):
start += 2
elif base == 8 and (
str_ref[start + 1] == "o" or str_ref[start + 1] == "O"
):
start += 2
elif base == 16 and (
str_ref[start + 1] == "x" or str_ref[start + 1] == "X"
):
start += 2
alias ord_0 = ord("0")
# FIXME:
# Change this to `alias` after fixing support for __getitem__ of alias.
var ord_letter_min = (ord("a"), ord("A"))
alias ord_underscore = ord("_")
if base == 0:
var real_base_new_start = _identify_base(str_ref, start)
real_base = real_base_new_start[0]
start = real_base_new_start[1]
if real_base == -1:
raise Error(_atol_error(base, str_ref))
else:
real_base = base
if real_base <= 10:
ord_num_max = ord(str(real_base - 1))
else:
ord_num_max = ord("9")
ord_letter_max = (
ord("a") + (real_base - 11),
ord("A") + (real_base - 11),
)
var found_valid_chars_after_start = False
var has_space_after_number = False
# single underscores are only allowed between digits
# starting "was_last_digit_undescore" to true such that
# if the first digit is an undesrcore an error is raised
var was_last_digit_undescore = True
for pos in range(start, str_len):
var ord_current = int(buff[pos])
if ord_current == ord_underscore:
if was_last_digit_undescore:
raise Error(_atol_error(base, str_ref))
else:
was_last_digit_undescore = True
continue
else:
was_last_digit_undescore = False
if ord_0 <= ord_current <= ord_num_max:
result += ord_current - ord_0
found_valid_chars_after_start = True
elif ord_letter_min[0] <= ord_current <= ord_letter_max[0]:
result += ord_current - ord_letter_min[0] + 10
found_valid_chars_after_start = True
elif ord_letter_min[1] <= ord_current <= ord_letter_max[1]:
result += ord_current - ord_letter_min[1] + 10
found_valid_chars_after_start = True
elif _isspace(ord_current):
has_space_after_number = True
start = pos + 1
break
else:
raise Error(_atol_error(base, str_ref))
if pos + 1 < str_len and not _isspace(buff[pos + 1]):
var nextresult = result * real_base
if nextresult < result:
raise Error(
_atol_error(base, str_ref)
+ " String expresses an integer too large to store in Int."
)
result = nextresult
if was_last_digit_undescore or (not found_valid_chars_after_start):
raise Error(_atol_error(base, str_ref))
if has_space_after_number:
for pos in range(start, str_len):
if not _isspace(buff[pos]):
raise Error(_atol_error(base, str_ref))
if is_negative:
result = -result
return result
fn _atol_error(base: Int, str_ref: StringRef) -> String:
return (
"String is not convertible to integer with base "
+ str(base)
+ ": '"
+ str(str_ref)
+ "'"
)
fn _identify_base(str_ref: StringRef, start: Int) -> Tuple[Int, Int]:
var length = len(str_ref)
# just 1 digit, assume base 10
if start == (length - 1):
return 10, start
if str_ref[start] == "0":
var second_digit = str_ref[start + 1]
if second_digit == "b" or second_digit == "B":
return 2, start + 2
if second_digit == "o" or second_digit == "O":
return 8, start + 2
if second_digit == "x" or second_digit == "X":
return 16, start + 2
# checking for special case of all "0", "_" are also allowed
var was_last_character_underscore = False
for i in range(start + 1, length):
if str_ref[i] == "_":
if was_last_character_underscore:
return -1, -1
else:
was_last_character_underscore = True
continue
else:
was_last_character_underscore = False
if str_ref[i] != "0":
return -1, -1
elif ord("1") <= ord(str_ref[start]) <= ord("9"):
return 10, start
else:
return -1, -1
return 10, start
fn atol(str: String, base: Int = 10) raises -> Int:
"""Parses and returns the given string as an integer in the given base.
For example, `atol("19")` returns `19`. If base is 0 the the string is
parsed as an Integer literal, see: https://docs.python.org/3/reference/lexical_analysis.html#integers.
Raises:
If the given string cannot be parsed as an integer value. For example in
`atol("hi")`.
Args:
str: A string to be parsed as an integer in the given base.
base: Base used for conversion, value must be between 2 and 36, or 0.
Returns:
An integer value that represents the string, or otherwise raises.
"""
return _atol(str._strref_dangerous(), base)
fn _atof_error(str_ref: StringRef) -> Error:
return Error("String is not convertible to float: '" + str(str_ref) + "'")
@always_inline
fn _atof(str_ref: StringRef) raises -> Float64:
"""Implementation of `atof` for StringRef inputs.
Please see its docstring for details.
"""
if not str_ref:
raise _atof_error(str_ref)
var result: Float64 = 0.0
var exponent: Int = 0
var sign: Int = 1
alias ord_0 = UInt8(ord("0"))
alias ord_9 = UInt8(ord("9"))
alias ord_dot = UInt8(ord("."))
alias ord_plus = UInt8(ord("+"))
alias ord_minus = UInt8(ord("-"))
alias ord_f = UInt8(ord("f"))
alias ord_F = UInt8(ord("F"))
alias ord_e = UInt8(ord("e"))
alias ord_E = UInt8(ord("E"))
var start: Int = 0
var str_ref_strip = str_ref.strip()
var str_len = len(str_ref_strip)
var buff = str_ref_strip.unsafe_ptr()
# check sign, inf, nan
if buff[start] == ord_plus:
start += 1
elif buff[start] == ord_minus:
start += 1
sign = -1
if (str_len - start) >= 3:
if StringRef(buff + start, 3) == "nan":
return FloatLiteral.nan
if StringRef(buff + start, 3) == "inf":
return FloatLiteral.infinity * sign
# read before dot
for pos in range(start, str_len):
if ord_0 <= buff[pos] <= ord_9:
result = result * 10.0 + int(buff[pos] - ord_0)
start += 1
else:
break
# if dot -> read after dot
if buff[start] == ord_dot:
start += 1
for pos in range(start, str_len):
if ord_0 <= buff[pos] <= ord_9:
result = result * 10.0 + int(buff[pos] - ord_0)
exponent -= 1
else:
break
start += 1
# if e/E -> read scientific notation
if buff[start] == ord_e or buff[start] == ord_E:
start += 1
var sign: Int = 1
var shift: Int = 0
var has_number: Bool = False
for pos in range(start, str_len):
if buff[start] == ord_plus:
pass
elif buff[pos] == ord_minus:
sign = -1
elif ord_0 <= buff[start] <= ord_9:
has_number = True
shift = shift * 10 + int(buff[pos] - ord_0)
else:
break
start += 1
exponent += sign * shift
if not has_number:
raise _atof_error(str_ref)
# check for f/F at the end
if buff[start] == ord_f or buff[start] == ord_F:
start += 1
# check if string got fully parsed
if start != str_len:
raise _atof_error(str_ref)
# apply shift
# NOTE: Instead of `var result *= 10.0 ** exponent`, we calculate a positive
# integer factor as shift and multiply or divide by it based on the shift
# direction. This allows for better precision.
# TODO: investigate if there is a floating point arithmethic problem.
var shift: Int = 10 ** abs(exponent)
if exponent > 0:
result *= shift
if exponent < 0:
result /= shift
# apply sign
return result * sign
fn atof(str: String) raises -> Float64:
"""Parses the given string as a floating point and returns that value.
For example, `atof("2.25")` returns `2.25`.
Raises:
If the given string cannot be parsed as an floating point value, for
example in `atof("hi")`.
Args:
str: A string to be parsed as a floating point.
Returns:
An floating point value that represents the string, or otherwise raises.
"""
return _atof(str._strref_dangerous())
# ===----------------------------------------------------------------------=== #
# isdigit
# ===----------------------------------------------------------------------=== #
fn isdigit(c: UInt8) -> Bool:
"""Determines whether the given character is a digit [0-9].
Args:
c: The character to check.
Returns:
True if the character is a digit.
"""
alias ord_0 = ord("0")
alias ord_9 = ord("9")
return ord_0 <= int(c) <= ord_9
# ===----------------------------------------------------------------------=== #
# isupper
# ===----------------------------------------------------------------------=== #
fn isupper(c: UInt8) -> Bool:
"""Determines whether the given character is an uppercase character.
This currently only respects the default "C" locale, i.e. returns True iff
the character specified is one of "ABCDEFGHIJKLMNOPQRSTUVWXYZ".
Args:
c: The character to check.
Returns:
True if the character is uppercase.
"""
return _is_ascii_uppercase(c)
fn _is_ascii_uppercase(c: UInt8) -> Bool:
alias ord_a = ord("A")
alias ord_z = ord("Z")
return ord_a <= int(c) <= ord_z
# ===----------------------------------------------------------------------=== #
# islower
# ===----------------------------------------------------------------------=== #
fn islower(c: UInt8) -> Bool:
"""Determines whether the given character is an lowercase character.
This currently only respects the default "C" locale, i.e. returns True iff
the character specified is one of "abcdefghijklmnopqrstuvwxyz".
Args:
c: The character to check.
Returns:
True if the character is lowercase.
"""
return _is_ascii_lowercase(c)
fn _is_ascii_lowercase(c: UInt8) -> Bool:
alias ord_a = ord("a")
alias ord_z = ord("z")
return ord_a <= int(c) <= ord_z
# ===----------------------------------------------------------------------=== #
# _isspace
# ===----------------------------------------------------------------------=== #
fn _isspace(c: UInt8) -> Bool:
"""Determines whether the given character is a whitespace character.
This only respects the default "C" locale, i.e. returns True only if the
character specified is one of " \\t\\n\\r\\f\\v". For semantics similar
to Python, use `String.isspace()`.
Args:
c: The character to check.
Returns:
True iff the character is one of the whitespace characters listed above.
"""
# NOTE: a global LUT doesn't work at compile time so we can't use it here.
alias ` ` = UInt8(ord(" "))
alias `\t` = UInt8(ord("\t"))
alias `\n` = UInt8(ord("\n"))
alias `\r` = UInt8(ord("\r"))
alias `\f` = UInt8(ord("\f"))
alias `\v` = UInt8(ord("\v"))
# This compiles to something very clever that's even faster than a LUT.
return (
c == ` `
or c == `\t`
or c == `\n`
or c == `\r`
or c == `\f`
or c == `\v`
)
# ===----------------------------------------------------------------------=== #
# _isnewline
# ===----------------------------------------------------------------------=== #
fn _isnewline(s: String) -> Bool:
if len(s._buffer) != 2:
return False
# TODO: add \u2028 and \u2029 when they are properly parsed
# FIXME: \x85 is parsed but not encoded in utf-8
if s == "\x85":
return True
# NOTE: a global LUT doesn't work at compile time so we can't use it here.
alias `\n` = UInt8(ord("\n"))
alias `\r` = UInt8(ord("\r"))
alias `\f` = UInt8(ord("\f"))
alias `\v` = UInt8(ord("\v"))
alias `\x1c` = UInt8(ord("\x1c"))
alias `\x1d` = UInt8(ord("\x1d"))
alias `\x1e` = UInt8(ord("\x1e"))
var c = UInt8(ord(s))
return (
c == `\n`
or c == `\r`
or c == `\f`
or c == `\v`
or c == `\x1c`
or c == `\x1d`
or c == `\x1e`
)
# ===----------------------------------------------------------------------=== #
# isprintable
# ===----------------------------------------------------------------------=== #
fn isprintable(c: UInt8) -> Bool:
"""Determines whether the given character is a printable character.
Args:
c: The character to check.
Returns:
True if the character is a printable character, otherwise False.
"""
alias ord_space = ord(" ")
alias ord_tilde = ord("~")
return ord_space <= int(c) <= ord_tilde
# ===----------------------------------------------------------------------=== #
# String
# ===----------------------------------------------------------------------=== #
struct String(
Sized,
Stringable,
Representable,
IntableRaising,
KeyElement,
Comparable,
Boolable,
Formattable,
ToFormatter,
):
"""Represents a mutable string."""
# Fields
alias _buffer_type = List[UInt8]
var _buffer: Self._buffer_type
"""The underlying storage for the string."""
""" Useful string aliases. """
alias ASCII_LOWERCASE = String("abcdefghijklmnopqrstuvwxyz")
alias ASCII_UPPERCASE = String("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
alias ASCII_LETTERS = String.ASCII_LOWERCASE + String.ASCII_UPPERCASE
alias DIGITS = String("0123456789")
alias HEX_DIGITS = String.DIGITS + String("abcdef") + String("ABCDEF")
alias OCT_DIGITS = String("01234567")
alias PUNCTUATION = String("""!"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~""")
alias PRINTABLE = (
String.DIGITS
+ String.ASCII_LETTERS
+ String.PUNCTUATION
+ " \t\n\r\v\f" # single byte utf8 whitespaces
)
# ===------------------------------------------------------------------=== #
# Life cycle methods
# ===------------------------------------------------------------------=== #
@always_inline
fn __init__(inout self, owned impl: List[UInt8]):
"""Construct a string from a buffer of bytes.
The buffer must be terminated with a null byte:
```mojo
var buf = List[UInt8]()
buf.append(ord('H'))
buf.append(ord('i'))
buf.append(0)
var hi = String(buf)
```
Args:
impl: The buffer.
"""
debug_assert(
impl[-1] == 0,
"expected last element of String buffer to be null terminator",
)
self._buffer = impl^
@always_inline
fn __init__(inout self):
"""Construct an uninitialized string."""
self._buffer = Self._buffer_type()
@always_inline
fn __init__(inout self, str: StringRef):
"""Construct a string from a StringRef object.
Args:
str: The StringRef from which to construct this string object.
"""
var length = len(str)
var buffer = Self._buffer_type()
# +1 for null terminator, initialized to 0
buffer.resize(length + 1, 0)
memcpy(dest=buffer.data, src=str.data, count=length)
self = Self(buffer^)
@always_inline
fn __init__(inout self, str_slice: StringSlice):
"""Construct a string from a string slice.
This will allocate a new string that copies the string contents from
the provided string slice `str_slice`.
Args:
str_slice: The string slice from which to construct this string.
"""
# Calculate length in bytes
var length: Int = len(str_slice.as_bytes_slice())
var buffer = Self._buffer_type()
# +1 for null terminator, initialized to 0
buffer.resize(length + 1, 0)
memcpy(
dest=buffer.data,
src=str_slice.as_bytes_slice().unsafe_ptr(),
count=length,
)
self = Self(buffer^)
@always_inline
fn __init__(inout self, literal: StringLiteral):
"""Constructs a String value given a constant string.
Args:
literal: The input constant string.
"""
self = literal.__str__()
@always_inline
fn __init__(inout self, ptr: UnsafePointer[UInt8], len: Int):
"""Creates a string from the buffer. Note that the string now owns
the buffer.
The buffer must be terminated with a null byte.
Args:
ptr: The pointer to the buffer.
len: The length of the buffer, including the null terminator.
"""
# we don't know the capacity of ptr, but we'll assume it's the same or
# larger than len
self = Self(
Self._buffer_type(
unsafe_pointer=ptr.bitcast[UInt8](), size=len, capacity=len
)
)
@always_inline
fn __init__(inout self, ptr: LegacyPointer[UInt8], len: Int):
"""Creates a string from the buffer. Note that the string now owns
the buffer.
The buffer must be terminated with a null byte.
Args:
ptr: The pointer to the buffer.
len: The length of the buffer, including the null terminator.
"""
self = Self(
Self._buffer_type(
unsafe_pointer=UnsafePointer(ptr.address),
size=len,
capacity=len,
)
)
@always_inline
fn __init__(inout self, ptr: DTypePointer[DType.uint8], len: Int):
"""Creates a string from the buffer. Note that the string now owns
the buffer.
The buffer must be terminated with a null byte.
Args:
ptr: The pointer to the buffer.
len: The length of the buffer, including the null terminator.
"""
self = String(ptr.address, len)
@always_inline
fn __init__(inout self, obj: PythonObject):
"""Creates a string from a python object.
Args:
obj: A python object.
"""
self = str(obj)
@always_inline
fn __copyinit__(inout self, existing: Self):
"""Creates a deep copy of an existing string.
Args:
existing: The string to copy.
"""
self._buffer = existing._buffer
@always_inline
fn __moveinit__(inout self, owned existing: String):
"""Move the value of a string.
Args:
existing: The string to move.
"""
self._buffer = existing._buffer^
# ===------------------------------------------------------------------=== #
# Factory dunders
# ===------------------------------------------------------------------=== #
@staticmethod
fn format_sequence[*Ts: Formattable](*args: *Ts) -> Self:
"""
Construct a string by concatenating a sequence of formattable arguments.
Args:
args: A sequence of formattable arguments.
Parameters:
Ts: The types of the arguments to format. Each type must be satisfy
`Formattable`.
Returns:
A string formed by formatting the argument sequence.
"""
var output = String()
var writer = output._unsafe_to_formatter()
@parameter
fn write_arg[T: Formattable](arg: T):
arg.format_to(writer)
args.each[write_arg]()
return output^
@staticmethod
@always_inline
fn _from_bytes(owned buff: DTypePointer[DType.uint8]) -> String:
"""Construct a string from a sequence of bytes.
This does no validation that the given bytes are valid in any specific
String encoding.
Args:
buff: The buffer. This should have an existing terminator.
"""
return String(buff, len(StringRef(buff)) + 1)
@staticmethod
fn _from_bytes(owned buff: Self._buffer_type) -> String:
"""Construct a string from a sequence of bytes.
This does no validation that the given bytes are valid in any specific
String encoding.
Args:
buff: The buffer.
"""
# If a terminator does not already exist, then add it.
if buff[-1]:
buff.append(0)
return String(buff^)
# ===------------------------------------------------------------------=== #
# Operator dunders
# ===------------------------------------------------------------------=== #
fn __getitem__(self, idx: Int) -> String:
"""Gets the character at the specified position.
Args:
idx: The index value.
Returns:
A new string containing the character at the specified position.
"""
if idx < 0:
return self.__getitem__(len(self) + idx)
debug_assert(0 <= idx < len(self), "index must be in range")
var buf = Self._buffer_type(capacity=1)
buf.append(self._buffer[idx])
buf.append(0)
return String(buf^)
@always_inline
fn __getitem__(self, span: Slice) -> String:
"""Gets the sequence of characters at the specified positions.
Args:
span: A slice that specifies positions of the new substring.
Returns:
A new string containing the string at the specified positions.
"""
var adjusted_span = self._adjust_span(span)
var adjusted_span_len = adjusted_span.unsafe_indices()
if adjusted_span.step == 1:
return StringRef(self._buffer.data + span.start, adjusted_span_len)
var buffer = Self._buffer_type()
buffer.resize(adjusted_span_len + 1, 0)
var ptr = self.unsafe_uint8_ptr()
for i in range(adjusted_span_len):
buffer[i] = ptr[adjusted_span[i]]
buffer[adjusted_span_len] = 0
return Self(buffer^)
@always_inline
fn __eq__(self, other: String) -> Bool:
"""Compares two Strings if they have the same values.
Args:
other: The rhs of the operation.
Returns:
True if the Strings are equal and False otherwise.
"""
return not (self != other)
@always_inline
fn __ne__(self, other: String) -> Bool:
"""Compares two Strings if they do not have the same values.
Args:
other: The rhs of the operation.
Returns:
True if the Strings are not equal and False otherwise.
"""
return self._strref_dangerous() != other._strref_dangerous()
@always_inline
fn __lt__(self, rhs: String) -> Bool:
"""Compare this String to the RHS using LT comparison.
Args:
rhs: The other String to compare against.
Returns:
True if this String is strictly less than the RHS String and False otherwise.
"""
return self._strref_dangerous() < rhs._strref_dangerous()
@always_inline
fn __le__(self, rhs: String) -> Bool:
"""Compare this String to the RHS using LE comparison.
Args:
rhs: The other String to compare against.
Returns:
True iff this String is less than or equal to the RHS String.
"""
return not (rhs < self)
@always_inline
fn __gt__(self, rhs: String) -> Bool:
"""Compare this String to the RHS using GT comparison.
Args:
rhs: The other String to compare against.
Returns:
True iff this String is strictly greater than the RHS String.
"""
return rhs < self
@always_inline
fn __ge__(self, rhs: String) -> Bool:
"""Compare this String to the RHS using GE comparison.
Args:
rhs: The other String to compare against.
Returns:
True iff this String is greater than or equal to the RHS String.
"""
return not (self < rhs)
@always_inline
fn __add__(self, other: String) -> String:
"""Creates a string by appending another string at the end.
Args:
other: The string to append.
Returns:
The new constructed string.
"""
if not self:
return other
if not other:
return self
var self_len = len(self)
var other_len = len(other)
var total_len = self_len + other_len
var buffer = Self._buffer_type()
buffer.resize(total_len + 1, 0)
memcpy(
DTypePointer(buffer.data),
self.unsafe_uint8_ptr(),
self_len,
)
memcpy(
DTypePointer(buffer.data + self_len),
other.unsafe_uint8_ptr(),
other_len + 1, # Also copy the terminator
)
return Self(buffer^)
@always_inline
fn __radd__(self, other: String) -> String:
"""Creates a string by prepending another string to the start.
Args:
other: The string to prepend.
Returns:
The new constructed string.
"""
return other + self
@always_inline
fn __iadd__(inout self, other: String):
"""Appends another string to this string.
Args:
other: The string to append.
"""
if not self:
self = other
return
if not other:
return
var self_len = len(self)
var other_len = len(other)
var total_len = self_len + other_len
self._buffer.resize(total_len + 1, 0)
# Copy the data alongside the terminator.
memcpy(
dest=self.unsafe_ptr() + self_len,
src=other.unsafe_ptr(),
count=other_len + 1,
)
# ===------------------------------------------------------------------=== #
# Trait implementations
# ===------------------------------------------------------------------=== #
@always_inline
fn __bool__(self) -> Bool:
"""Checks if the string is not empty.
Returns:
True if the string length is greater than zero, and False otherwise.
"""
return len(self) > 0
@always_inline
fn __len__(self) -> Int:
"""Returns the string length.
Returns:
The string length.
"""
# Avoid returning -1 if the buffer is not initialized
if not self.unsafe_ptr():
return 0
# The negative 1 is to account for the terminator.
return len(self._buffer) - 1
@always_inline
fn __str__(self) -> String:
return self
@always_inline
fn __repr__(self) -> String:
"""Return a Mojo-compatible representation of the `String` instance.
Returns:
A new representation of the string.
"""
alias ord_squote = ord("'")
var result = String()
var use_dquote = False
for idx in range(len(self._buffer) - 1):
var char = self._buffer[idx]
result += _repr_ascii(char)
use_dquote = use_dquote or (char == ord_squote)
if use_dquote:
return '"' + result + '"'
else:
return "'" + result + "'"
# ===------------------------------------------------------------------=== #
# Methods
# ===------------------------------------------------------------------=== #
@always_inline
fn _adjust_span(self, span: Slice) -> Slice:
"""Adjusts the span based on the string length."""
var adjusted_span = span
if adjusted_span.start < 0:
adjusted_span.start = len(self) + adjusted_span.start
if not adjusted_span._has_end():
adjusted_span.end = len(self)
elif adjusted_span.end < 0:
adjusted_span.end = len(self) + adjusted_span.end
if span.step < 0:
var tmp = adjusted_span.end
adjusted_span.end = adjusted_span.start - 1
adjusted_span.start = tmp - 1
return adjusted_span
fn format_to(self, inout writer: Formatter):
"""
Formats this string to the provided formatter.
Args:
writer: The formatter to write to.
"""
writer.write_str(self.as_string_slice())
fn _unsafe_to_formatter(inout self) -> Formatter:
"""
Constructs a formatter that will write to this mutable string.
Safety:
The returned `Formatter` holds a mutable pointer to this `String`
value. This `String` MUST outlive the `Formatter` instance.
"""
fn write_to_string(ptr0: UnsafePointer[NoneType], strref: StringRef):
var ptr: UnsafePointer[String] = ptr0.bitcast[String]()
# FIXME:
# String.__iadd__ currently only accepts a String, meaning this
# RHS will allocate unnecessarily.
ptr[] += strref
return Formatter(
write_to_string,
# Arg data
UnsafePointer.address_of(self).bitcast[NoneType](),
)
fn join(self, *elems: Int) -> String:
"""Joins the elements from the tuple using the current string as a
delimiter.
Args:
elems: The input tuple.
Returns:
The joined string.
"""
if len(elems) == 0:
return ""
var curr = str(elems[0])
for i in range(1, len(elems)):
curr += self + str(elems[i])
return curr
fn join[*Types: Stringable](self, *elems: *Types) -> String:
"""Joins string elements using the current string as a delimiter.
Parameters:
Types: The types of the elements.
Args:
elems: The input values.
Returns:
The joined string.
"""
var result: String = ""
var is_first = True
@parameter
fn add_elt[T: Stringable](a: T):
if is_first:
is_first = False
else:
result += self
result += str(a)
elems.each[add_elt]()
return result
fn _strref_dangerous(self) -> StringRef:
"""
Returns an inner pointer to the string as a StringRef.
This functionality is extremely dangerous because Mojo eagerly releases
strings. Using this requires the use of the _strref_keepalive() method
to keep the underlying string alive long enough.
"""
return StringRef(self.unsafe_ptr(), len(self))
fn _strref_keepalive(self):
"""
A noop that keeps `self` alive through the call. This
can be carefully used with `_strref_dangerous()` to wield inner pointers
without the string getting deallocated early.
"""
pass
# TODO: Remove this method when #2317 is done
fn unsafe_ptr(self) -> UnsafePointer[Int8]:
"""Retrieves a pointer to the underlying memory.
Note that you should use `unsafe_uint8_ptr()` if you need to access the
pointer as we are now storing the bytes as UInt8.
See https://github.com/modularml/mojo/issues/2317 for more information.
Returns:
The pointer to the underlying memory.
"""
return self._buffer.data.bitcast[Int8]()
fn unsafe_uint8_ptr(self) -> UnsafePointer[UInt8]:
"""Retrieves a pointer to the underlying memory.
Returns:
The pointer to the underlying memory.
"""
return self._buffer.data.bitcast[UInt8]()
fn as_bytes(self) -> List[UInt8]:
"""Retrieves the underlying byte sequence encoding the characters in
this string.
This does not include the trailing null terminator.
Returns:
A sequence containing the encoded characters stored in this string.
"""
# TODO(lifetimes): Return a reference rather than a copy
var copy = self._buffer
var last = copy.pop()
debug_assert(
last == 0,
"expected last element of String buffer to be null terminator",
)
return copy
@always_inline
fn as_bytes_slice(
self: Reference[Self, _, _]
) -> Span[UInt8, self.is_mutable, self.lifetime]:
"""
Returns a contiguous slice of the bytes owned by this string.
This does not include the trailing null terminator.
Returns:
A contiguous slice pointing to the bytes owned by this string.
"""
return Span[UInt8, self.is_mutable, self.lifetime](
unsafe_ptr=self[]._buffer.unsafe_ptr(),
# Does NOT include the NUL terminator.
len=self[]._byte_length(),
)
@always_inline
fn as_string_slice(
self: Reference[Self, _, _]
) -> StringSlice[self.is_mutable, self.lifetime]:
"""Returns a string slice of the data owned by this string.
Returns:
A string slice pointing to the data owned by this string.
"""
var bytes = self[].as_bytes_slice()
# FIXME(MSTDL-160):
# Enforce UTF-8 encoding in String so this is actually
# guaranteed to be valid.
return StringSlice[self.is_mutable, self.lifetime](
unsafe_from_utf8=bytes
)
fn _byte_length(self) -> Int:
"""Get the string length in bytes.
This does not include the trailing null terminator in the count.
Returns:
The length of this StringLiteral in bytes, excluding null terminator.
"""
var buffer_len = len(self._buffer)
if buffer_len > 0:
return buffer_len - 1
else:
return buffer_len
fn _steal_ptr(inout self) -> DTypePointer[DType.int8]:
"""Transfer ownership of pointer to the underlying memory.
The caller is responsible for freeing up the memory.
Returns:
The pointer to the underlying memory.
"""
var ptr = self.unsafe_ptr()
self._buffer.data = UnsafePointer[UInt8]()
self._buffer.size = 0
self._buffer.capacity = 0
return ptr
fn count(self, substr: String) -> Int:
"""Return the number of non-overlapping occurrences of substring
`substr` in the string.
If sub is empty, returns the number of empty strings between characters
which is the length of the string plus one.
Args:
substr: The substring to count.
Returns:
The number of occurrences of `substr`.
"""
if not substr:
return len(self) + 1
var res = 0
var offset = 0
while True:
var pos = self.find(substr, offset)
if pos == -1:
break
res += 1
offset = pos + len(substr)
return res
fn __contains__(self, substr: String) -> Bool:
"""Returns True if the substring is contained within the current string.
Args:
substr: The substring to check.
Returns:
True if the string contains the substring.
"""
return substr._strref_dangerous() in self._strref_dangerous()
fn find(self, substr: String, start: Int = 0) -> Int:
"""Finds the offset of the first occurrence of `substr` starting at
`start`. If not found, returns -1.
Args:
substr: The substring to find.
start: The offset from which to find.
Returns:
The offset of `substr` relative to the beginning of the string.
"""
return self._strref_dangerous().find(
substr._strref_dangerous(), start=start
)
fn rfind(self, substr: String, start: Int = 0) -> Int:
"""Finds the offset of the last occurrence of `substr` starting at
`start`. If not found, returns -1.
Args:
substr: The substring to find.
start: The offset from which to find.
Returns:
The offset of `substr` relative to the beginning of the string.
"""
return self._strref_dangerous().rfind(
substr._strref_dangerous(), start=start
)
fn isspace(self) -> Bool:
"""Determines whether the given String is a python
whitespace String. This corresponds to Python's
[universal separators](
https://docs.python.org/3/library/stdtypes.html#str.splitlines)
`" \\t\\n\\r\\f\\v\\x1c\\x1e\\x85\\u2028\\u2029"`.
Returns:
True if the String is one of the whitespace characters
listed above, otherwise False."""
# TODO add line and paragraph separator as stringliteral
# once unicode escape secuences are accepted
# 0 is to build a String with null terminator
alias information_sep_four = List[UInt8](0x5C, 0x78, 0x31, 0x63, 0)
"""TODO: \\x1c"""
alias information_sep_two = List[UInt8](0x5C, 0x78, 0x31, 0x65, 0)
"""TODO: \\x1e"""
alias next_line = List[UInt8](0x78, 0x38, 0x35, 0)
"""TODO: \\x85"""
alias unicode_line_sep = List[UInt8](
0x20, 0x5C, 0x75, 0x32, 0x30, 0x32, 0x38, 0
)
"""TODO: \\u2028"""
alias unicode_paragraph_sep = List[UInt8](
0x20, 0x5C, 0x75, 0x32, 0x30, 0x32, 0x39, 0
)
"""TODO: \\u2029"""
@always_inline
fn compare(item1: List[UInt8], item2: List[UInt8], amnt: Int) -> Bool:
var ptr1 = DTypePointer(item1.unsafe_ptr())
var ptr2 = DTypePointer(item2.unsafe_ptr())
return memcmp(ptr1, ptr2, amnt) == 0
if len(self) == 1:
return _isspace(self._buffer.unsafe_get(0)[])
elif len(self) == 3:
return compare(self._buffer, next_line, 3)
elif len(self) == 4:
return compare(self._buffer, information_sep_four, 4) or compare(
self._buffer, information_sep_two, 4
)
elif len(self) == 7:
return compare(self._buffer, unicode_line_sep, 7) or compare(
self._buffer, unicode_paragraph_sep, 7
)
return False
fn split(self, sep: String, maxsplit: Int = -1) raises -> List[String]:
"""Split the string by a separator.
Args:
sep: The string to split on.
maxsplit: The maximum amount of items to split from String.
Defaults to unlimited.
Returns:
A List of Strings containing the input split by the separator.
Examples:
```mojo
# Splitting a space
_ = String("hello world").split(" ") # ["hello", "world"]
# Splitting adjacent separators
_ = String("hello,,world").split(",") # ["hello", "", "world"]
# Splitting with maxsplit
_ = String("1,2,3").split(",", 1) # ['1', '2,3']
```
.
"""
var output = List[String]()
var str_iter_len = len(self) - 1
var lhs = 0
var rhs = 0
var items = 0
var sep_len = len(sep)
if sep_len == 0:
raise Error("ValueError: empty separator")
while lhs <= str_iter_len:
rhs = self.find(sep, lhs)
if rhs == -1:
output.append(self[lhs:])
break
if maxsplit > -1:
if items == maxsplit:
output.append(self[lhs:])
break
items += 1
output.append(self[lhs:rhs])
lhs = rhs + sep_len
if self.endswith(sep):
output.append("")
return output
fn split(self, *, maxsplit: Int = -1) -> List[String]:
"""Split the string by every Whitespace separator.
Currently only uses C style separators.
Args:
maxsplit: The maximum amount of items to split from String. Defaults
to unlimited.
Returns:
A List of Strings containing the input split by the separator.
Examples:
```mojo
# Splitting an empty string or filled with whitespaces
_ = String(" ").split() # []
_ = String("").split() # []
# Splitting a string with leading, trailing, and middle whitespaces
_ = String(" hello world ").split() # ["hello", "world"]
```
.
"""
# TODO: implement and document splitting adjacent universal newlines:
# _ = String(
# "hello \\t\\n\\r\\f\\v\\x1c\\x1e\\x85\\u2028\\u2029world"
# ).split() # ["hello", "world"]
var output = List[String]()
var str_iter_len = len(self) - 1
var lhs = 0
var rhs = 0
var items = 0
# FIXME: this should iterate and build unicode strings
# and use self.isspace()
while lhs <= str_iter_len:
# Python adds all "whitespace chars" as one separator
# if no separator was specified
while lhs <= str_iter_len:
if not _isspace(self._buffer.unsafe_get(lhs)[]):
break
lhs += 1
# if it went until the end of the String, then
# it should be sliced up until the original
# start of the whitespace which was already appended
if lhs - 1 == str_iter_len:
break
elif lhs == str_iter_len:
# if the last char is not whitespace
output.append(self[str_iter_len])
break
rhs = lhs + 1
while rhs <= str_iter_len:
if _isspace(self._buffer.unsafe_get(rhs)[]):
break
rhs += 1
if maxsplit > -1:
if items == maxsplit:
output.append(self[lhs:])
break
items += 1
output.append(self[lhs:rhs])
lhs = rhs
return output
fn splitlines(self, keepends: Bool = False) -> List[String]:
"""Split the string at line boundaries.
Args:
keepends: If True, line breaks are kept in the resulting strings.
Returns:
A List of Strings containing the input split by line boundaries.
"""
var output = List[String]()
var length = len(self)
var current_offset = 0
while current_offset < length:
var loc = -1
var eol_length = 1
for i in range(current_offset, length):
var char = self[i]
var next_char = self[i + 1] if i + 1 < length else ""
if _isnewline(char):
loc = i
if char == "\r" and next_char == "\n":
eol_length = 2
break
else:
output.append(self[current_offset:])
break
if keepends:
output.append(self[current_offset : loc + eol_length])
else:
output.append(self[current_offset:loc])
current_offset = loc + eol_length
return output
fn replace(self, old: String, new: String) -> String:
"""Return a copy of the string with all occurrences of substring `old`
if replaced by `new`.
Args:
old: The substring to replace.
new: The substring to replace with.
Returns:
The string where all occurrences of `old` are replaced with `new`.
"""
if not old:
return self._interleave(new)
var occurrences = self.count(old)
if occurrences == -1:
return self
var self_start = self.unsafe_uint8_ptr()
var self_ptr = self.unsafe_uint8_ptr()
var new_ptr = new.unsafe_uint8_ptr()
var self_len = len(self)
var old_len = len(old)
var new_len = len(new)
var res = List[UInt8]()
res.reserve(self_len + (old_len - new_len) * occurrences + 1)
for _ in range(occurrences):
var curr_offset = int(self_ptr) - int(self_start)
var idx = self.find(old, curr_offset)
debug_assert(idx >= 0, "expected to find occurrence during find")
# Copy preceding unchanged chars
for _ in range(curr_offset, idx):
res.append(self_ptr[])
self_ptr += 1
# Insert a copy of the new replacement string
for i in range(new_len):
res.append(new_ptr[i])
self_ptr += old_len
while True:
var val = self_ptr[]
if val == 0:
break
res.append(self_ptr[])
self_ptr += 1
res.append(0)
return String(res^)
fn strip(self, chars: String) -> String:
"""Return a copy of the string with leading and trailing characters
removed.
Args:
chars: A set of characters to be removed. Defaults to whitespace.
Returns:
A copy of the string with no leading or trailing characters.
"""
return self.lstrip(chars).rstrip(chars)
fn strip(self) -> String:
"""Return a copy of the string with leading and trailing whitespaces
removed.
Returns:
A copy of the string with no leading or trailing whitespaces.
"""
return self.lstrip().rstrip()
fn rstrip(self, chars: String) -> String:
"""Return a copy of the string with trailing characters removed.
Args:
chars: A set of characters to be removed. Defaults to whitespace.
Returns:
A copy of the string with no trailing characters.
"""
var r_idx = len(self)
while r_idx > 0 and self[r_idx - 1] in chars:
r_idx -= 1
return self[:r_idx]
fn rstrip(self) -> String:
"""Return a copy of the string with trailing whitespaces removed.
Returns:
A copy of the string with no trailing whitespaces.
"""
# TODO: should use self.__iter__ and self.isspace()
var r_idx = len(self)
while r_idx > 0 and _isspace(self._buffer.unsafe_get(r_idx - 1)[]):
r_idx -= 1
return self[:r_idx]
fn lstrip(self, chars: String) -> String:
"""Return a copy of the string with leading characters removed.
Args:
chars: A set of characters to be removed. Defaults to whitespace.
Returns:
A copy of the string with no leading characters.
"""
var l_idx = 0
while l_idx < len(self) and self[l_idx] in chars:
l_idx += 1
return self[l_idx:]
fn lstrip(self) -> String:
"""Return a copy of the string with leading whitespaces removed.
Returns:
A copy of the string with no leading whitespaces.
"""
# TODO: should use self.__iter__ and self.isspace()
var l_idx = 0
while l_idx < len(self) and _isspace(self._buffer.unsafe_get(l_idx)[]):
l_idx += 1
return self[l_idx:]
fn __hash__(self) -> Int:
"""Hash the underlying buffer using builtin hash.
Returns:
A 64-bit hash value. This value is _not_ suitable for cryptographic
uses. Its intended usage is for data structures. See the `hash`
builtin documentation for more details.
"""
return hash(self._strref_dangerous())
fn _interleave(self, val: String) -> String:
var res = List[UInt8]()
var val_ptr = val.unsafe_uint8_ptr()
var self_ptr = self.unsafe_uint8_ptr()
res.reserve(len(val) * len(self) + 1)
for i in range(len(self)):
for j in range(len(val)):
res.append(val_ptr[j])
res.append(self_ptr[i])
res.append(0)
return String(res^)
fn lower(self) -> String:
"""Returns a copy of the string with all ASCII cased characters
converted to lowercase.
Returns:
A new string where cased letters have been converted to lowercase.
"""
# TODO(#26444):
# Support the Unicode standard casing behavior to handle cased letters
# outside of the standard ASCII letters.
return self._toggle_ascii_case[_is_ascii_uppercase]()
fn upper(self) -> String:
"""Returns a copy of the string with all ASCII cased characters
converted to uppercase.
Returns:
A new string where cased letters have been converted to uppercase.
"""
# TODO(#26444):
# Support the Unicode standard casing behavior to handle cased letters
# outside of the standard ASCII letters.
return self._toggle_ascii_case[_is_ascii_lowercase]()
@always_inline
fn _toggle_ascii_case[check_case: fn (UInt8) -> Bool](self) -> String:
var copy: String = self
var char_ptr = copy.unsafe_uint8_ptr()
for i in range(len(self)):
var char: UInt8 = char_ptr[i]
if check_case(char):
var lower = _toggle_ascii_case(char)
char_ptr[i] = lower
return copy
fn startswith(self, prefix: String, start: Int = 0, end: Int = -1) -> Bool:
"""Checks if the string starts with the specified prefix between start
and end positions. Returns True if found and False otherwise.
Args:
prefix: The prefix to check.
start: The start offset from which to check.
end: The end offset from which to check.
Returns:
True if the self[start:end] is prefixed by the input prefix.
"""
if end == -1:
return StringRef(
self.unsafe_ptr() + start, len(self) - start
).startswith(prefix._strref_dangerous())
return StringRef(self.unsafe_ptr() + start, end - start).startswith(
prefix._strref_dangerous()
)
fn endswith(self, suffix: String, start: Int = 0, end: Int = -1) -> Bool:
"""Checks if the string end with the specified suffix between start
and end positions. Returns True if found and False otherwise.
Args:
suffix: The suffix to check.
start: The start offset from which to check.
end: The end offset from which to check.
Returns:
True if the self[start:end] is suffixed by the input suffix.
"""
if end == -1:
return StringRef(
self.unsafe_ptr() + start, len(self) - start
).endswith(suffix._strref_dangerous())
return StringRef(self.unsafe_ptr() + start, end - start).endswith(
suffix._strref_dangerous()
)
fn removeprefix(self, prefix: String, /) -> String:
"""Returns a new string with the prefix removed if it was present.
For example:
```mojo
print(String('TestHook').removeprefix('Test'))
# 'Hook'
print(String('BaseTestCase').removeprefix('Test'))
# 'BaseTestCase'
```
Args:
prefix: The prefix to remove from the string.
Returns:
`string[len(prefix):]` if the string starts with the prefix string,
or a copy of the original string otherwise.
"""
if self.startswith(prefix):
return self[len(prefix) :]
return self
fn removesuffix(self, suffix: String, /) -> String:
"""Returns a new string with the suffix removed if it was present.
For example:
```mojo
print(String('TestHook').removesuffix('Hook'))
# 'Test'
print(String('BaseTestCase').removesuffix('Test'))
# 'BaseTestCase'
```
Args:
suffix: The suffix to remove from the string.
Returns:
`string[:-len(suffix)]` if the string ends with the suffix string,
or a copy of the original string otherwise.
"""
if self.endswith(suffix):
return self[: -len(suffix)]
return self
fn __int__(self) raises -> Int:
"""Parses the given string as a base-10 integer and returns that value.
For example, `int("19")` returns `19`. If the given string cannot be
parsed as an integer value, an error is raised. For example, `int("hi")`
raises an error.
Returns:
An integer value that represents the string, or otherwise raises.
"""
return atol(self)
fn __mul__(self, n: Int) -> String:
"""Concatenates the string `n` times.
Args:
n : The number of times to concatenate the string.
Returns:
The string concatenated `n` times.
"""
if n <= 0:
return ""
var len_self = len(self)
var count = len_self * n + 1
var buf = Self._buffer_type(capacity=count)
buf.resize(count, 0)
for i in range(n):
memcpy(
dest=buf.data + len_self * i,
src=self.unsafe_uint8_ptr(),
count=len_self,
)
return String(buf^)
# ===----------------------------------------------------------------------=== #
# Utilities
# ===----------------------------------------------------------------------=== #
fn _toggle_ascii_case(char: UInt8) -> UInt8:
"""Assuming char is a cased ASCII character, this function will return the
opposite-cased letter.
"""
# ASCII defines A-Z and a-z as differing only in their 6th bit,
# so converting is as easy as a bit flip.
return char ^ (1 << 5)
fn _calc_initial_buffer_size_int32(n0: Int) -> Int:
# See https://commaok.xyz/post/lookup_tables/ and
# https://lemire.me/blog/2021/06/03/computing-the-number-of-digits-of-an-integer-even-faster/
# for a description.
alias lookup_table = VariadicList[Int](
4294967296,
8589934582,
8589934582,
8589934582,
12884901788,
12884901788,
12884901788,
17179868184,
17179868184,
17179868184,
21474826480,
21474826480,
21474826480,
21474826480,
25769703776,
25769703776,
25769703776,
30063771072,
30063771072,
30063771072,
34349738368,
34349738368,
34349738368,
34349738368,
38554705664,
38554705664,
38554705664,
41949672960,
41949672960,
41949672960,
42949672960,
42949672960,
)
var n = UInt32(n0)
var log2 = int((bitwidthof[DType.uint32]() - 1) ^ countl_zero(n | 1))
return (n0 + lookup_table[int(log2)]) >> 32
fn _calc_initial_buffer_size_int64(n0: UInt64) -> Int:
var result: Int = 1
var n = n0
while True:
if n < 10:
return result
if n < 100:
return result + 1
if n < 1_000:
return result + 2
if n < 10_000:
return result + 3
n //= 10_000
result += 4
@always_inline
fn _calc_initial_buffer_size(n0: Int) -> Int:
var n = abs(n0)
var sign = 0 if n0 > 0 else 1
alias is_32bit_system = bitwidthof[DType.index]() == 32
# Add 1 for the terminator
@parameter
if is_32bit_system:
return sign + _calc_initial_buffer_size_int32(n) + 1
# The value only has low-bits.
if n >> 32 == 0:
return sign + _calc_initial_buffer_size_int32(n) + 1
return sign + _calc_initial_buffer_size_int64(n) + 1
fn _calc_initial_buffer_size(n: Float64) -> Int:
return 128 + 1 # Add 1 for the terminator
fn _calc_initial_buffer_size[type: DType](n0: Scalar[type]) -> Int:
@parameter
if type.is_integral():
var n = abs(n0)
var sign = 0 if n0 > 0 else 1
alias is_32bit_system = bitwidthof[DType.index]() == 32
@parameter
if is_32bit_system or bitwidthof[type]() <= 32:
return sign + _calc_initial_buffer_size_int32(int(n)) + 1
else:
return (
sign
+ _calc_initial_buffer_size_int64(n.cast[DType.uint64]())
+ 1
)
return 128 + 1 # Add 1 for the terminator
fn _calc_format_buffer_size[type: DType]() -> Int:
"""
Returns a buffer size in bytes that is large enough to store a formatted
number of the specified type.
"""
# TODO:
# Use a smaller size based on the `dtype`, e.g. we don't need as much
# space to store a formatted int8 as a float64.
@parameter
if type.is_integral():
return 64 + 1
else:
return 128 + 1 # Add 1 for the terminator
| mojo/stdlib/src/builtin/string.mojo | false |
<filename>mojo/stdlib/src/builtin/string_literal.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the StringLiteral class.
These are Mojo built-ins, so you don't need to import them.
"""
from memory import DTypePointer
from utils import StringRef
from utils._visualizers import lldb_formatter_wrapping_type
from utils._format import Formattable, Formatter
from .string import _atol
# ===----------------------------------------------------------------------===#
# StringLiteral
# ===----------------------------------------------------------------------===#
@lldb_formatter_wrapping_type
@register_passable("trivial")
struct StringLiteral(
Sized,
IntableRaising,
Stringable,
Representable,
KeyElement,
Boolable,
Formattable,
Comparable,
):
"""This type represents a string literal.
String literals are all null-terminated for compatibility with C APIs, but
this is subject to change. String literals store their length as an integer,
and this does not include the null terminator.
"""
alias type = __mlir_type.`!kgen.string`
var value: Self.type
"""The underlying storage for the string literal."""
@always_inline("nodebug")
fn __init__(inout self, value: Self.type):
"""Create a string literal from a builtin string type.
Args:
value: The string value.
"""
self.value = value
@always_inline("nodebug")
fn __len__(self) -> Int:
"""Get the string length.
Returns:
The length of this StringLiteral.
"""
# TODO(MSTDL-160):
# Properly count Unicode codepoints instead of returning this length
# in bytes.
return self._byte_length()
@always_inline
fn _byte_length(self) -> Int:
"""Get the string length in bytes.
Returns:
The length of this StringLiteral in bytes.
"""
return __mlir_op.`pop.string.size`(self.value)
@always_inline("nodebug")
fn unsafe_ptr(self) -> UnsafePointer[Int8]:
"""Get raw pointer to the underlying data.
Returns:
The raw pointer to the data.
"""
var ptr = DTypePointer[DType.int8](
__mlir_op.`pop.string.address`(self.value)
)
return UnsafePointer[Int8]._from_dtype_ptr(ptr)
@always_inline("nodebug")
fn unsafe_uint8_ptr(self) -> UnsafePointer[UInt8]:
"""Get raw pointer to the underlying data.
Returns:
The raw pointer to the data.
"""
return self.unsafe_ptr().bitcast[UInt8]()
@always_inline("nodebug")
fn as_uint8_ptr(self) -> DTypePointer[DType.uint8]:
"""Get raw pointer to the underlying data.
Returns:
The raw pointer to the data.
"""
return self.unsafe_ptr().bitcast[UInt8]()
@always_inline("nodebug")
fn __bool__(self) -> Bool:
"""Convert the string to a bool value.
Returns:
True if the string is not empty.
"""
return len(self) != 0
@always_inline("nodebug")
fn __add__(self, rhs: StringLiteral) -> StringLiteral:
"""Concatenate two string literals.
Args:
rhs: The string to concat.
Returns:
The concatenated string.
"""
return __mlir_op.`pop.string.concat`(self.value, rhs.value)
@always_inline("nodebug")
fn __eq__(self, rhs: StringLiteral) -> Bool:
"""Compare two string literals for equality.
Args:
rhs: The string to compare.
Returns:
True if they are equal.
"""
return not (self != rhs)
@always_inline("nodebug")
fn __ne__(self, rhs: StringLiteral) -> Bool:
"""Compare two string literals for inequality.
Args:
rhs: The string to compare.
Returns:
True if they are not equal.
"""
return StringRef(self) != StringRef(rhs)
@always_inline("nodebug")
fn __lt__(self, rhs: StringLiteral) -> Bool:
"""Compare this StringLiteral to the RHS using LT comparison.
Args:
rhs: The other StringLiteral to compare against.
Returns:
True if this StringLiteral is strictly less than the RHS StringLiteral and False otherwise.
"""
return StringRef(self) < StringRef(rhs)
@always_inline("nodebug")
fn __le__(self, rhs: StringLiteral) -> Bool:
"""Compare this StringLiteral to the RHS using LE comparison.
Args:
rhs: The other StringLiteral to compare against.
Returns:
True if this StringLiteral is less than or equal to the RHS StringLiteral and False otherwise.
"""
return not (rhs < self)
@always_inline("nodebug")
fn __gt__(self, rhs: StringLiteral) -> Bool:
"""Compare this StringLiteral to the RHS using GT comparison.
Args:
rhs: The other StringLiteral to compare against.
Returns:
True if this StringLiteral is strictly greater than the RHS StringLiteral and False otherwise.
"""
return rhs < self
@always_inline("nodebug")
fn __ge__(self, rhs: StringLiteral) -> Bool:
"""Compare this StringLiteral to the RHS using GE comparison.
Args:
rhs: The other StringLiteral to compare against.
Returns:
True if this StringLiteral is greater than or equal to the RHS StringLiteral and False otherwise.
"""
return not (self < rhs)
fn __hash__(self) -> Int:
"""Hash the underlying buffer using builtin hash.
Returns:
A 64-bit hash value. This value is _not_ suitable for cryptographic
uses. Its intended usage is for data structures. See the `hash`
builtin documentation for more details.
"""
return hash(self.unsafe_ptr(), len(self))
fn __str__(self) -> String:
"""Convert the string literal to a string.
Returns:
A new string.
"""
var string = String()
var length: Int = __mlir_op.`pop.string.size`(self.value)
var buffer = String._buffer_type()
var new_capacity = length + 1
buffer._realloc(new_capacity)
buffer.size = new_capacity
var uint8Ptr = __mlir_op.`pop.pointer.bitcast`[
_type = __mlir_type.`!kgen.pointer<scalar<ui8>>`
](__mlir_op.`pop.string.address`(self.value))
var data: DTypePointer[DType.uint8] = DTypePointer[DType.uint8](
uint8Ptr
)
memcpy(DTypePointer(buffer.data), data, length)
initialize_pointee_move(buffer.data + length, 0)
string._buffer = buffer^
return string
fn __repr__(self) -> String:
"""Return a representation of the `StringLiteral` instance.
You don't need to call this method directly, use `repr("...")` instead.
Returns:
A new representation of the string.
"""
return self.__str__().__repr__()
@always_inline
fn as_string_slice(self) -> StringSlice[False, ImmutableStaticLifetime]:
"""Returns a string slice of this static string literal.
Returns:
A string slice pointing to this static string literal.
"""
var bytes = self.as_bytes_slice()
# FIXME(MSTDL-160):
# Enforce UTF-8 encoding in StringLiteral so this is actually
# guaranteed to be valid.
return StringSlice[False, ImmutableStaticLifetime](
unsafe_from_utf8=bytes
)
@always_inline
fn as_bytes_slice(self) -> Span[UInt8, False, ImmutableStaticLifetime]:
"""
Returns a contiguous slice of the bytes owned by this string.
Returns:
A contiguous slice pointing to the bytes owned by this string.
"""
var ptr = self.unsafe_uint8_ptr()
return Span[UInt8, False, ImmutableStaticLifetime](
unsafe_ptr=ptr,
len=self._byte_length(),
)
fn format_to(self, inout writer: Formatter):
"""
Formats this string literal to the provided formatter.
Args:
writer: The formatter to write to.
"""
writer.write_str(self.as_string_slice())
fn __contains__(self, substr: StringLiteral) -> Bool:
"""Returns True if the substring is contained within the current string.
Args:
substr: The substring to check.
Returns:
True if the string contains the substring.
"""
return substr in StringRef(self)
fn find(self, substr: StringLiteral, start: Int = 0) -> Int:
"""Finds the offset of the first occurrence of `substr` starting at
`start`. If not found, returns -1.
Args:
substr: The substring to find.
start: The offset from which to find.
Returns:
The offset of `substr` relative to the beginning of the string.
"""
return StringRef(self).find(substr, start=start)
fn rfind(self, substr: StringLiteral, start: Int = 0) -> Int:
"""Finds the offset of the last occurrence of `substr` starting at
`start`. If not found, returns -1.
Args:
substr: The substring to find.
start: The offset from which to find.
Returns:
The offset of `substr` relative to the beginning of the string.
"""
return StringRef(self).rfind(substr, start=start)
fn __int__(self) raises -> Int:
"""Parses the given string as a base-10 integer and returns that value.
For example, `int("19")` returns `19`. If the given string cannot be parsed
as an integer value, an error is raised. For example, `int("hi")` raises an
error.
Returns:
An integer value that represents the string, or otherwise raises.
"""
return _atol(self)
| mojo/stdlib/src/builtin/string_literal.mojo | false |
<filename>mojo/stdlib/src/builtin/swap.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the built-in `swap` function.
These are Mojo built-ins, so you don't need to import them.
"""
@always_inline
fn swap[T: Movable](inout lhs: T, inout rhs: T):
"""Swaps the two given arguments.
Parameters:
T: Constrained to Copyable types.
Args:
lhs: Argument value swapped with rhs.
rhs: Argument value swapped with lhs.
"""
var tmp = lhs^
lhs = rhs^
rhs = tmp^
| mojo/stdlib/src/builtin/swap.mojo | false |
<filename>mojo/stdlib/src/builtin/tuple.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the Tuple type.
These are Mojo built-ins, so you don't need to import them.
"""
from utils._visualizers import lldb_formatter_wrapping_type
from memory.unsafe_pointer import (
initialize_pointee_move,
initialize_pointee_copy,
move_pointee,
)
from sys.intrinsics import _type_is_eq
# ===----------------------------------------------------------------------===#
# Tuple
# ===----------------------------------------------------------------------===#
@lldb_formatter_wrapping_type
struct Tuple[*element_types: Movable](Sized, Movable):
"""The type of a literal tuple expression.
A tuple consists of zero or more values, separated by commas.
Parameters:
element_types: The elements type.
"""
alias _mlir_type = __mlir_type[
`!kgen.pack<:!kgen.variadic<`,
Movable,
`> `,
+element_types,
`>`,
]
var storage: Self._mlir_type
"""The underlying storage for the tuple."""
@always_inline("nodebug")
fn __init__(inout self, owned *args: *element_types):
"""Construct the tuple.
Args:
args: Initial values.
"""
self = Self(storage=args^)
@always_inline("nodebug")
fn __init__(
inout self,
*,
owned storage: VariadicPack[_, _, Movable, element_types],
):
"""Construct the tuple from a low-level internal representation.
Args:
storage: The variadic pack storage to construct from.
"""
# Mark 'self.storage' as being initialized so we can work on it.
__mlir_op.`lit.ownership.mark_initialized`(
__get_mvalue_as_litref(self.storage)
)
@parameter
fn initialize_elt[idx: Int]():
move_pointee(
dst=UnsafePointer(self[idx]),
src=UnsafePointer(storage[idx]),
)
# Move each element into the tuple storage.
unroll[initialize_elt, Self.__len__()]()
# Mark the elements as already destroyed.
storage._is_owned = False
fn __del__(owned self):
"""Destructor that destroys all of the elements."""
# Run the destructor on each member, the destructor of !kgen.pack is
# trivial and won't do anything.
@parameter
fn destroy_elt[idx: Int]():
destroy_pointee(UnsafePointer(self[idx]))
unroll[destroy_elt, Self.__len__()]()
@always_inline("nodebug")
fn __moveinit__(inout self, owned existing: Self):
"""Move construct the tuple.
Args:
existing: The value to move from.
"""
# Mark 'storage' as being initialized so we can work on it.
__mlir_op.`lit.ownership.mark_initialized`(
__get_mvalue_as_litref(self.storage)
)
@parameter
fn initialize_elt[idx: Int]():
var existing_elt_ptr = UnsafePointer(existing[idx]).address
move_pointee(
src=UnsafePointer(existing[idx]), dst=UnsafePointer(self[idx])
)
unroll[initialize_elt, Self.__len__()]()
@always_inline
@staticmethod
fn __len__() -> Int:
"""Return the number of elements in the tuple.
Returns:
The tuple length.
"""
@parameter
fn variadic_size(
x: __mlir_type[`!kgen.variadic<`, Movable, `>`]
) -> Int:
return __mlir_op.`pop.variadic.size`(x)
alias result = variadic_size(element_types)
return result
@always_inline("nodebug")
fn __len__(self) -> Int:
"""Get the number of elements in the tuple.
Returns:
The tuple length.
"""
return Self.__len__()
@always_inline("nodebug")
fn __getitem__[
idx: Int
](self: Reference[Self, _, _]) -> ref [self.lifetime] element_types[
idx.value
]:
"""Get a reference to an element in the tuple.
Parameters:
idx: The element to return.
Returns:
A referece to the specified element.
"""
# Return a reference to an element at the specified index, propagating
# mutability of self.
var storage_kgen_ptr = UnsafePointer.address_of(self[].storage).address
# KGenPointer to the element.
var elt_kgen_ptr = __mlir_op.`kgen.pack.gep`[index = idx.value](
storage_kgen_ptr
)
# Use an immortal mut reference, which converts to self's lifetime.
return UnsafePointer(elt_kgen_ptr)[]
# TODO(#38268): Remove this method when references and parameter expressions
# cooperate better. We can't handle the use in test_simd without this.
@always_inline("nodebug")
fn get[i: Int, T: Movable](self) -> ref [__lifetime_of(self)] T:
"""Get a tuple element and rebind to the specified type.
Parameters:
i: The element index.
T: The element type.
Returns:
The tuple element at the requested index.
"""
return rebind[Reference[T, False, __lifetime_of(self)]](
Reference(self[i])
)[]
@always_inline("nodebug")
fn __contains__[T: EqualityComparable](self, value: T) -> Bool:
"""Verify if a given value is present in the tuple.
```mojo
var x = Tuple(1,2,True)
if 1 in x: print("x contains 1")
```
Args:
value: The value to find.
Parameters:
T: The type of the value argument. Must implement the
trait `EqualityComparable`.
Returns:
True if the value is contained in the tuple, False otherwise.
"""
@parameter
fn T_in_ts() -> Bool:
@parameter
for i in range(len(VariadicList(element_types))):
@parameter
if _type_is_eq[element_types[i], T]():
return True
return False
@parameter
if not T_in_ts():
return False
@parameter
for i in range(len(VariadicList(element_types))):
@parameter
if _type_is_eq[T, element_types[i]]():
var elt_ptr = UnsafePointer.address_of(self[i]).bitcast[T]()
if elt_ptr[].__eq__(value):
return True
return False
| mojo/stdlib/src/builtin/tuple.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Defines some type aliases.
These are Mojo built-ins, so you don't need to import them.
"""
alias AnyTrivialRegType = __mlir_type.`!kgen.type`
"""Represents any register passable Mojo data type."""
alias NoneType = __mlir_type.`!kgen.none`
"""Represents the absence of a value."""
alias ImmutableLifetime = __mlir_type.`!lit.lifetime<0>`
"""Immutable lifetime reference type."""
alias MutableLifetime = __mlir_type.`!lit.lifetime<1>`
"""Mutable lifetime reference type."""
alias ImmutableStaticLifetime = __mlir_attr.`#lit.lifetime<0>: !lit.lifetime<0>`
"""The immutable lifetime that lasts for the entire duration of program execution."""
alias MutableStaticLifetime = __mlir_attr.`#lit.lifetime<1>: !lit.lifetime<1>`
"""The mutable lifetime that lasts for the entire duration of program execution."""
# Helper to build !lit.lifetime type.
# TODO: Should be a parametric alias.
struct AnyLifetime[is_mutable: Bool]:
"""This represents a lifetime reference of potentially parametric type.
TODO: This should be replaced with a parametric type alias.
Parameters:
is_mutable: Whether the lifetime reference is mutable.
"""
alias type = __mlir_type[
`!lit.lifetime<`,
is_mutable.value,
`>`,
]
| mojo/stdlib/src/builtin/type_aliases.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Defines core value traits.
These are Mojo built-ins, so you don't need to import them.
"""
trait Movable:
"""The Movable trait denotes a type whose value can be moved.
Implement the `Movable` trait on `Foo` which requires the `__moveinit__`
method:
```mojo
struct Foo(Movable):
fn __init__(inout self):
pass
fn __moveinit__(inout self, owned existing: Self):
print("moving")
```
You can now use the ^ suffix to move the object instead of copying
it inside generic functions:
```mojo
fn return_foo[T: Movable](owned foo: T) -> T:
return foo^
var foo = Foo()
var res = return_foo(foo^)
```
```plaintext
moving
```
"""
fn __moveinit__(inout self, owned existing: Self, /):
"""Create a new instance of the value by moving the value of another.
Args:
existing: The value to move.
"""
...
trait Copyable:
"""The Copyable trait denotes a type whose value can be copied.
Example implementing the `Copyable` trait on `Foo` which requires the `__copyinit__`
method:
```mojo
struct Foo(Copyable):
var s: String
fn __init__(inout self, s: String):
self.s = s
fn __copyinit__(inout self, other: Self):
print("copying value")
self.s = other.s
```
You can now copy objects inside a generic function:
```mojo
fn copy_return[T: Copyable](foo: T) -> T:
var copy = foo
return copy
var foo = Foo("test")
var res = copy_return(foo)
```
```plaintext
copying value
```
"""
fn __copyinit__(inout self, existing: Self, /):
"""Create a new instance of the value by copying an existing one.
Args:
existing: The value to copy.
"""
...
trait ExplicitlyCopyable:
"""The ExplicitlyCopyable trait denotes a type whose value can be copied
explicitly.
Unlike `Copyable`, which denotes types that are _implicitly_ copyable, an
explicitly copyable type can only be copied when the explicit copy
initializer is called intentionally by the programmer.
An explicit copy initializer is just a normal `__init__` method that takes
a `borrowed` argument of `Self`.
Example implementing the `ExplicitlyCopyable` trait on `Foo` which requires
the `__init__(.., Self)` method:
```mojo
struct Foo(ExplicitlyCopyable):
var s: String
fn __init__(inout self, s: String):
self.s = s
fn __init__(inout self, copy: Self):
print("explicitly copying value")
self.s = copy.s
```
You can now copy objects inside a generic function:
```mojo
fn copy_return[T: ExplicitlyCopyable](foo: T) -> T:
var copy = T(foo)
return copy
var foo = Foo("test")
var res = copy_return(foo)
```
```plaintext
explicitly copying value
```
"""
fn __init__(inout self, other: Self):
"""Construct a deep copy of the provided value.
Args:
other: The value to copy.
"""
...
trait Defaultable:
"""The `Defaultable` trait describes a type with a default constructor.
Implementing the `Defaultable` trait requires the type to define
an `__init__` method with no arguments:
```mojo
struct Foo(Defaultable):
var s: String
fn __init__(inout self):
self.s = "default"
```
You can now construct a generic `Defaultable` type:
```mojo
fn default_init[T: Defaultable]() -> T:
return T()
var foo = default_init[Foo]()
print(foo.s)
```
```plaintext
default
```
"""
fn __init__(inout self):
"""Create a default instance of the value."""
...
trait CollectionElement(Copyable, Movable):
"""The CollectionElement trait denotes a trait composition
of the `Copyable` and `Movable` traits.
This is useful to have as a named entity since Mojo does not
currently support anonymous trait compositions to constrain
on `Copyable & Movable` in the parameter.
"""
pass
trait StringableCollectionElement(CollectionElement, Stringable):
"""The StringableCollectionElement trait denotes a trait composition
of the `CollectionElement` and `Stringable` traits.
This is useful to have as a named entity since Mojo does not
currently support anonymous trait compositions to constrain
on `CollectionElement & Stringable` in the parameter.
"""
pass
trait ComparableCollectionElement(CollectionElement, Comparable):
"""
This trait is a temporary solution to enable comparison of
collection elements as utilized in the `index` and `count` methods of
a list.
This approach will be revised with the introduction of conditional trait
conformances.
"""
pass
trait RepresentableCollectionElement(CollectionElement, Representable):
"""The RepresentableCollectionElement trait denotes a trait composition
of the `CollectionElement` and `Representable` traits.
This is useful to have as a named entity since Mojo does not
currently support anonymous trait compositions to constrain
on `CollectionElement & Representable` in the parameter.
"""
pass
trait BoolableCollectionElement(Boolable, CollectionElement):
"""The BoolableCollectionElement trait denotes a trait composition
of the `Boolable` and `CollectionElement` traits.
This is useful to have as a named entity since Mojo does not
currently support anonymous trait compositions to constrain
on `Boolable & CollectionElement` in the parameter.
"""
pass
trait BoolableKeyElement(Boolable, KeyElement):
"""The BoolableKeyElement trait denotes a trait composition
of the `Boolable` and `KeyElement` traits.
This is useful to have as a named entity since Mojo does not
currently support anonymous trait compositions to constrain
on `Boolable & KeyElement` in the parameter.
"""
pass
| mojo/stdlib/src/builtin/value.mojo | false |
<filename>mojo/stdlib/src/builtin/_closure.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
@register_passable
struct __ParameterClosureCaptureList[
fn_type: AnyTrivialRegType, fn_ref: fn_type
]:
var value: __mlir_type.`!kgen.pointer<none>`
# Parameter closure invariant requires this function be marked 'capturing'.
@parameter
@always_inline
fn __init__(inout self):
self.value = __mlir_op.`kgen.capture_list.create`[callee=fn_ref]()
@always_inline
fn __copyinit__(inout self, existing: Self):
self.value = __mlir_op.`kgen.capture_list.copy`[callee=fn_ref](
existing.value
)
@always_inline
fn __del__(owned self):
__mlir_op.`pop.aligned_free`(self.value)
@always_inline("nodebug")
fn expand(self):
__mlir_op.`kgen.capture_list.expand`(self.value)
fn __closure_wrapper_noop_dtor(
owned self: __mlir_type.`!kgen.pointer<none>`, /
):
pass
fn __closure_wrapper_noop_copy(
owned other: __mlir_type.`!kgen.pointer<none>`, /
) -> __mlir_type.`!kgen.pointer<none>`:
return other
| mojo/stdlib/src/builtin/_closure.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Provides decorators and utilities for interacting with Mojo documentation
generation and validation.
These are Mojo built-ins, so you don't need to import them.
"""
# ===-------------------------------------------------------------------===#
# doc_private
# ===-------------------------------------------------------------------===#
fn doc_private():
"""Indicate that the decorated declaration is private from the viewpoint
of documentation generation.
This decorator allows for hiding the documentation for a declaration during
generation. This is often used to hide `__init__`, and other special
methods, that are intended for internal consumption.
For example:
```mojo
struct Foo:
@doc_private
fn __init__(inout self):
"This should not be called directly, prefer Foo.create instead."
return
@staticmethod
fn create() -> Self:
return Self()
```
"""
return
| mojo/stdlib/src/builtin/_documentation.mojo | false |
<filename>mojo/stdlib/src/builtin/_hasher.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
trait _HashableWithHasher:
fn __hash__[H: _Hasher](self, inout hasher: H):
...
trait _Hasher:
fn __init__(inout self):
...
fn _update_with_bytes(
inout self, data: DTypePointer[DType.uint8], length: Int
):
...
fn _update_with_simd(inout self, value: SIMD[_, _]):
...
fn update[T: _HashableWithHasher](inout self, value: T):
...
fn finish(owned self) -> UInt64:
...
fn _hash_with_hasher[
HasherType: _Hasher, HashableType: _HashableWithHasher
](hashable: HashableType) -> UInt64:
var hasher = HasherType()
hasher.update(hashable)
var value = hasher^.finish()
return value
| mojo/stdlib/src/builtin/_hasher.mojo | false |
<filename>mojo/stdlib/src/builtin/_location.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements utilities to capture and represent source code location.
"""
@value
@register_passable("trivial")
struct _SourceLocation(Stringable):
"""Type to carry file name, line, and column information."""
var line: Int
var col: Int
var file_name: StringLiteral
fn __str__(self) -> String:
return str(self.file_name) + ":" + str(self.line) + ":" + str(self.col)
fn prefix[T: Stringable](self, msg: T) -> String:
"""Return the given message prefixed with the pretty-printer location.
Parameters:
T: The type of the message.
Args:
msg: The message to attach the prefix to.
"""
return "At " + str(self) + ": " + str(msg)
@always_inline("nodebug")
fn __source_location() -> _SourceLocation:
"""Returns the location where it's called.
This currently doesn't work when called in a parameter expression.
Returns:
The location information of the __source_location() call.
"""
var line: __mlir_type.index
var col: __mlir_type.index
var file_name: __mlir_type.`!kgen.string`
line, col, file_name = __mlir_op.`kgen.source_loc`[
_properties = __mlir_attr.`{inlineCount = 0 : i64}`,
_type = (
__mlir_type.index,
__mlir_type.index,
__mlir_type.`!kgen.string`,
),
]()
return _SourceLocation(line, col, file_name)
@always_inline("nodebug")
fn __call_location() -> _SourceLocation:
"""Returns the location where the enclosing function is called.
This should only be used in `@always_inline` or `@always_inline("nodebug")`
functions so that it returns the source location of where the enclosing
function is called at (even if inside another `@always_inline("nodebug")`
function).
This currently doesn't work when this or the enclosing function is called in
a parameter expression.
Returns:
The location information of where the enclosing function (i.e. the
function whose body __call_location() is used in) is called.
"""
var line: __mlir_type.index
var col: __mlir_type.index
var file_name: __mlir_type.`!kgen.string`
line, col, file_name = __mlir_op.`kgen.source_loc`[
_properties = __mlir_attr.`{inlineCount = 1 : i64}`,
_type = (
__mlir_type.index,
__mlir_type.index,
__mlir_type.`!kgen.string`,
),
]()
return _SourceLocation(line, col, file_name)
| mojo/stdlib/src/builtin/_location.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Module to contain some components of the future math module.
This is needed to work around some circular dependencies; all elements of this
module should be exposed by the current `math` module. The contents of this
module should be eventually moved to the `math` module when it's open sourced.
"""
# ===----------------------------------------------------------------------=== #
# Ceilable
# ===----------------------------------------------------------------------=== #
trait Ceilable:
"""
The `Ceilable` trait describes a type that defines a ceiling operation.
Types that conform to `Ceilable` will work with the builtin `ceil`
function. The ceiling operation always returns the same type as the input.
For example:
```mojo
from math import Ceilable, ceil
@value
struct Complex(Ceilable):
var re: Float64
var im: Float64
fn __ceil__(self) -> Self:
return Self(ceil(re), ceil(im))
```
"""
# TODO(MOCO-333): Reconsider the signature when we have parametric traits or
# associated types.
fn __ceil__(self) -> Self:
...
# ===----------------------------------------------------------------------=== #
# Floorable
# ===----------------------------------------------------------------------=== #
trait Floorable:
"""
The `Floorable` trait describes a type that defines a floor operation.
Types that conform to `Floorable` will work with the builtin `floor`
function. The floor operation always returns the same type as the input.
For example:
```mojo
from math import Floorable, floor
@value
struct Complex(Floorable):
var re: Float64
var im: Float64
fn __floor__(self) -> Self:
return Self(floor(re), floor(im))
```
"""
# TODO(MOCO-333): Reconsider the signature when we have parametric traits or
# associated types.
fn __floor__(self) -> Self:
...
# ===----------------------------------------------------------------------=== #
# CeilDivable
# ===----------------------------------------------------------------------=== #
trait CeilDivable:
"""
The `CeilDivable` trait describes a type that defines a ceil division
operation.
Types that conform to `CeilDivable` will work with the `math.ceildiv`
function.
For example:
```mojo
from math import CeilDivable
@value
struct Foo(CeilDivable):
var x: Float64
fn __floordiv__(self, other: Self) -> Self:
return self.x // other.x
fn __rfloordiv__(self, other: Self) -> Self:
return other // self
fn __neg__(self) -> Self:
return -self.x
```
"""
# TODO(MOCO-333): Reconsider these signatures when we have parametric traits
# or associated types.
fn __floordiv__(self, other: Self) -> Self:
...
fn __rfloordiv__(self, other: Self) -> Self:
...
fn __neg__(self) -> Self:
...
trait CeilDivableRaising:
"""
The `CeilDivable` trait describes a type that define a floor division and
negation operation that can raise.
Types that conform to `CeilDivableRaising` will work with the `//` operator
as well as the `math.ceildiv` function.
For example:
```mojo
from math import CeilDivableRaising
@value
struct Foo(CeilDivableRaising):
var x: object
fn __floordiv__(self, other: Self) raises -> Self:
return self.x // other.x
fn __rfloordiv__(self, other: Self) raises -> Self:
return other // self
fn __neg__(self) raises -> Self:
return -self.x
```
"""
# TODO(MOCO-333): Reconsider these signatures when we have parametric traits
# or associated types.
fn __floordiv__(self, other: Self) raises -> Self:
...
fn __rfloordiv__(self, other: Self) raises -> Self:
...
fn __neg__(self) raises -> Self:
...
# ===----------------------------------------------------------------------=== #
# Truncable
# ===----------------------------------------------------------------------=== #
trait Truncable:
"""
The `Truncable` trait describes a type that defines a truncation operation.
Types that conform to `Truncable` will work with the builtin `trunc`
function. The truncation operation always returns the same type as the
input.
For example:
```mojo
from math import Truncable, trunc
@value
struct Complex(Truncable):
var re: Float64
var im: Float64
fn __trunc__(self) -> Self:
return Self(trunc(re), trunc(im))
```
"""
# TODO(MOCO-333): Reconsider the signature when we have parametric traits or
# associated types.
fn __trunc__(self) -> Self:
...
# ===----------------------------------------------------------------------=== #
# gcd
# ===----------------------------------------------------------------------=== #
fn gcd(owned m: Int, owned n: Int, /) -> Int:
"""Compute the greatest common divisor of two integers.
Args:
m: The first integer.
n: The second integrer.
Returns:
The greatest common divisor of the two integers.
"""
while n:
m, n = n, m % n
return abs(m)
fn gcd(s: Span[Int], /) -> Int:
"""Computes the greatest common divisor of a span of integers.
Args:
s: A span containing a collection of integers.
Returns:
The greatest common divisor of all the integers in the span.
"""
if len(s) == 0:
return 0
var result = s[0]
for item in s[1:]:
result = gcd(item[], result)
if result == 1:
return result
return result
@always_inline
fn gcd(l: List[Int], /) -> Int:
"""Computes the greatest common divisor of a list of integers.
Args:
l: A list containing a collection of integers.
Returns:
The greatest common divisor of all the integers in the list.
"""
return gcd(Span(l))
fn gcd(*values: Int) -> Int:
"""Computes the greatest common divisor of a variadic number of integers.
Args:
values: A variadic list of integers.
Returns:
The greatest common divisor of the given integers.
"""
# TODO: Deduplicate when we can create a Span from VariadicList
if len(values) == 0:
return 0
var result = values[0]
for i in range(1, len(values)):
result = gcd(values[i], result)
if result == 1:
return result
return result
| mojo/stdlib/src/builtin/_math.mojo | false |
<filename>mojo/stdlib/src/builtin/_startup.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements functionality to start a mojo execution."""
from sys import external_call
@always_inline
fn _get_global[
name: StringLiteral,
init_fn: fn (UnsafePointer[NoneType]) -> UnsafePointer[NoneType],
destroy_fn: fn (UnsafePointer[NoneType]) -> None,
](
payload: UnsafePointer[NoneType] = UnsafePointer[NoneType]()
) -> UnsafePointer[NoneType]:
return external_call[
"KGEN_CompilerRT_GetGlobalOrCreate", UnsafePointer[NoneType]
](StringRef(name), payload, init_fn, destroy_fn)
fn _init_global_runtime(
ignored: UnsafePointer[NoneType],
) -> UnsafePointer[NoneType]:
"""Initialize the global runtime. This is a singleton that handle the common
case where the runtime has the same number of threads as the number of cores.
"""
return external_call[
"KGEN_CompilerRT_LLCL_CreateRuntime", UnsafePointer[NoneType]
](0)
fn _destroy_global_runtime(ptr: UnsafePointer[NoneType]):
"""Destroy the global runtime if ever used."""
external_call["KGEN_CompilerRT_LLCL_DestroyRuntime", NoneType](ptr)
@always_inline
fn _get_current_or_global_runtime() -> UnsafePointer[NoneType]:
"""Returns the current runtime, or returns the Mojo singleton global
runtime, creating it if it does not already exist. When Mojo is used within
the Modular Execution Engine the current runtime will be that already
constructed by the execution engine. If the user has already manually
constructed a runtime and added tasks to it, the current runtime for those
tasks will be that runtime. Otherwise, the singleton runtime is used, which
is created with number of threads equal to the number of cores.
"""
var current_runtime = external_call[
"KGEN_CompilerRT_LLCL_GetCurrentRuntime", UnsafePointer[NoneType]
]()
if current_runtime:
return current_runtime
return _get_global[
"Runtime", _init_global_runtime, _destroy_global_runtime
]()
fn __wrap_and_execute_main[
main_func: fn () -> None
](
argc: Int32,
argv: __mlir_type[`!kgen.pointer<!kgen.pointer<scalar<ui8>>>`],
) -> Int32:
"""Define a C-ABI compatible entry point for non-raising main function"""
# Initialize the global runtime.
_ = _get_current_or_global_runtime()
# Initialize the mojo argv with those provided.
external_call["KGEN_CompilerRT_SetArgV", NoneType](argc, argv)
# Call into the user main function.
main_func()
# Delete any globals we have allocated.
external_call["KGEN_CompilerRT_DestroyGlobals", NoneType]()
# Return OK.
return 0
fn __wrap_and_execute_raising_main[
main_func: fn () raises -> None
](
argc: Int32,
argv: __mlir_type[`!kgen.pointer<!kgen.pointer<scalar<ui8>>>`],
) -> Int32:
"""Define a C-ABI compatible entry point for a raising main function"""
# Initialize the global runtime.
_ = _get_current_or_global_runtime()
# Initialize the mojo argv with those provided.
external_call["KGEN_CompilerRT_SetArgV", NoneType](argc, argv)
# Call into the user main function.
try:
main_func()
except e:
print("Unhandled exception caught during execution:", e)
return 1
# Delete any globals we have allocated.
external_call["KGEN_CompilerRT_DestroyGlobals", NoneType]()
# Return OK.
return 0
fn __wrap_and_execute_object_raising_main[
main_func: fn () raises -> object
](
argc: Int32,
argv: __mlir_type[`!kgen.pointer<!kgen.pointer<scalar<ui8>>>`],
) -> Int32:
"""Define a C-ABI compatible entry point for a raising main function that
returns an object"""
fn wrapped_main() raises:
_ = main_func()
return __wrap_and_execute_raising_main[wrapped_main](argc, argv)
# A prototype of the main entry point, used by the compiled when synthesizing
# main.
fn __mojo_main_prototype(
argc: Int32, argv: __mlir_type[`!kgen.pointer<!kgen.pointer<scalar<ui8>>>`]
) -> Int32:
return 0
| mojo/stdlib/src/builtin/_startup.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
from builtin.range import _StridedRangeIterator
# ===----------------------------------------------------------------------===#
# __MLIRType
# ===----------------------------------------------------------------------===#
@register_passable("trivial")
struct __MLIRType[T: AnyTrivialRegType](Movable, Copyable):
var value: T
# ===----------------------------------------------------------------------===#
# parameter_for
# ===----------------------------------------------------------------------===#
trait _IntNext(Copyable):
fn __next__(inout self) -> Int:
...
trait _IntIter(_IntNext):
fn __len__(self) -> Int:
...
trait _IntIterable(_IntIter):
fn __iter__(self) -> Self:
...
trait _StridedIterable(_IntIter):
fn __iter__(self) -> _StridedRangeIterator:
...
struct _ParamForIterator[IteratorT: Copyable]:
var next_it: IteratorT
var value: Int
var stop: Bool
fn __init__(inout self, next_it: IteratorT, value: Int, stop: Bool):
self.next_it = next_it
self.value = value
self.stop = stop
fn declval[T: AnyType]() -> T:
constrained[False, "should only be used inside __type_of"]()
while True:
pass
fn parameter_for_generator[
T: _IntIterable,
](range: T) -> _ParamForIterator[__type_of(declval[T]().__iter__())]:
return _generator(range.__iter__())
fn parameter_for_generator[
T: _StridedIterable,
](range: T) -> _ParamForIterator[__type_of(declval[T]().__iter__())]:
return _generator(range.__iter__())
fn _generator[
IteratorT: _IntIter
](it: IteratorT) -> _ParamForIterator[IteratorT]:
if it.__len__() == 0:
return _ParamForIterator[IteratorT](
__mlir_attr[`#kgen.unknown : !kgen.paramref<`, IteratorT, `>`],
0,
True,
)
var next_it = it
var value = next_it.__next__()
return _ParamForIterator(next_it, value, False)
| mojo/stdlib/src/builtin/_stubs.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the builtin package."""
| mojo/stdlib/src/builtin/__init__.mojo | false |
<filename>mojo/stdlib/src/collections/dict.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Defines `Dict`, a collection that stores key-value pairs.
Dict provides an efficient, O(1) amortized
average-time complexity for insert, lookup, and removal of dictionary elements.
Its implementation closely mirrors Python's `dict` implementation:
- Performance and size are heavily optimized for small dictionaries, but can
scale to large dictionaries.
- Insertion order is implicitly preserved. Iteration over keys, values, and
items have a deterministic order based on insertion.
Key elements must implement the `KeyElement` trait, which encompasses
Movable, Hashable, and EqualityComparable. It also includes CollectionElement
and Copyable until we push references through the standard library types.
Value elements must be CollectionElements for a similar reason. Both key and
value types must always be Movable so we can resize the dictionary as it grows.
See the `Dict` docs for more details.
"""
from builtin.value import StringableCollectionElement
from .optional import Optional
trait KeyElement(CollectionElement, Hashable, EqualityComparable):
"""A trait composition for types which implement all requirements of
dictionary keys. Dict keys must minimally be Movable, Hashable,
and EqualityComparable for a hash map. Until we have references
they must also be copyable."""
pass
trait RepresentableKeyElement(KeyElement, Representable):
"""A trait composition for types which implement all requirements of
dictionary keys and Stringable."""
pass
@value
struct _DictEntryIter[
K: KeyElement,
V: CollectionElement,
dict_mutability: Bool,
dict_lifetime: AnyLifetime[dict_mutability].type,
forward: Bool = True,
]:
"""Iterator over immutable DictEntry references.
Parameters:
K: The key type of the elements in the dictionary.
V: The value type of the elements in the dictionary.
dict_mutability: Whether the reference to the dictionary is mutable.
dict_lifetime: The lifetime of the List
forward: The iteration direction. `False` is backwards.
"""
alias imm_dict_lifetime = __mlir_attr[
`#lit.lifetime.mutcast<`, dict_lifetime, `> : !lit.lifetime<1>`
]
alias ref_type = Reference[DictEntry[K, V], False, Self.imm_dict_lifetime]
var index: Int
var seen: Int
var src: Reference[Dict[K, V], dict_mutability, dict_lifetime]
fn __iter__(self) -> Self:
return self
@always_inline
fn __next__(inout self) -> Self.ref_type:
while True:
var opt_entry_ref = self.src[]._entries.__get_ref(self.index)
if opt_entry_ref[]:
@parameter
if forward:
self.index += 1
else:
self.index -= 1
self.seen += 1
return opt_entry_ref[].value()[]
@parameter
if forward:
self.index += 1
else:
self.index -= 1
fn __len__(self) -> Int:
return len(self.src[]) - self.seen
@value
struct _DictKeyIter[
K: KeyElement,
V: CollectionElement,
dict_mutability: Bool,
dict_lifetime: AnyLifetime[dict_mutability].type,
forward: Bool = True,
]:
"""Iterator over immutable Dict key references.
Parameters:
K: The key type of the elements in the dictionary.
V: The value type of the elements in the dictionary.
dict_mutability: Whether the reference to the vector is mutable.
dict_lifetime: The lifetime of the List
forward: The iteration direction. `False` is backwards.
"""
alias imm_dict_lifetime = __mlir_attr[
`#lit.lifetime.mutcast<`, dict_lifetime, `> : !lit.lifetime<1>`
]
alias ref_type = Reference[K, False, Self.imm_dict_lifetime]
alias dict_entry_iter = _DictEntryIter[
K, V, dict_mutability, dict_lifetime, forward
]
var iter: Self.dict_entry_iter
fn __iter__(self) -> Self:
return self
fn __next__(inout self) -> Self.ref_type:
return self.iter.__next__()[].key
fn __len__(self) -> Int:
return self.iter.__len__()
@value
struct _DictValueIter[
K: KeyElement,
V: CollectionElement,
dict_mutability: Bool,
dict_lifetime: AnyLifetime[dict_mutability].type,
forward: Bool = True,
]:
"""Iterator over Dict value references. These are mutable if the dict
is mutable.
Parameters:
K: The key type of the elements in the dictionary.
V: The value type of the elements in the dictionary.
dict_mutability: Whether the reference to the vector is mutable.
dict_lifetime: The lifetime of the List
forward: The iteration direction. `False` is backwards.
"""
alias ref_type = Reference[V, dict_mutability, dict_lifetime]
var iter: _DictEntryIter[K, V, dict_mutability, dict_lifetime, forward]
fn __iter__(self) -> Self:
return self
fn __reversed__[
mutability: Bool, self_life: AnyLifetime[mutability].type
](self) -> _DictValueIter[K, V, dict_mutability, dict_lifetime, False]:
var src = self.iter.src
return _DictValueIter(
_DictEntryIter[K, V, dict_mutability, dict_lifetime, False](
src[]._reserved - 1, 0, src
)
)
fn __next__(inout self) -> Self.ref_type:
var entry_ref = self.iter.__next__()
# Cast through a pointer to grant additional mutability because
# _DictEntryIter.next erases it.
return UnsafePointer.address_of(entry_ref[].value)[]
fn __len__(self) -> Int:
return self.iter.__len__()
@value
struct DictEntry[K: KeyElement, V: CollectionElement](CollectionElement):
"""Store a key-value pair entry inside a dictionary.
Parameters:
K: The key type of the dict. Must be Hashable+EqualityComparable.
V: The value type of the dict.
"""
var hash: Int
"""`key.__hash__()`, stored so hashing isn't re-computed during dict lookup."""
var key: K
"""The unique key for the entry."""
var value: V
"""The value associated with the key."""
fn __init__(inout self, owned key: K, owned value: V):
"""Create an entry from a key and value, computing the hash.
Args:
key: The key of the entry.
value: The value of the entry.
"""
self.hash = hash(key)
self.key = key^
self.value = value^
alias _EMPTY = -1
alias _REMOVED = -2
struct _DictIndex:
"""A compact dict-index type. Small dict indices are compressed
to smaller integer types to use less memory.
_DictIndex doesn't store its own size, so the size must be passed in to
its indexing methods.
Ideally this could be type-parameterized so that the size checks don't
need to be performed at runtime, but I couldn't find a way to express
this in the current type system.
"""
var data: DTypePointer[DType.invalid]
@always_inline
fn __init__(inout self, reserved: Int):
if reserved <= 128:
var data = DTypePointer[DType.int8].alloc(reserved)
for i in range(reserved):
data[i] = _EMPTY
self.data = data.bitcast[DType.invalid]()
elif reserved <= 2**16 - 2:
var data = DTypePointer[DType.int16].alloc(reserved)
for i in range(reserved):
data[i] = _EMPTY
self.data = data.bitcast[DType.invalid]()
elif reserved <= 2**32 - 2:
var data = DTypePointer[DType.int32].alloc(reserved)
for i in range(reserved):
data[i] = _EMPTY
self.data = data.bitcast[DType.invalid]()
else:
var data = DTypePointer[DType.int64].alloc(reserved)
for i in range(reserved):
data[i] = _EMPTY
self.data = data.bitcast[DType.invalid]()
fn copy(self, reserved: Int) -> Self:
var index = Self(reserved)
if reserved <= 128:
var data = self.data.bitcast[DType.int8]()
var new_data = index.data.bitcast[DType.int8]()
memcpy(new_data, data, reserved)
elif reserved <= 2**16 - 2:
var data = self.data.bitcast[DType.int16]()
var new_data = index.data.bitcast[DType.int16]()
memcpy(new_data, data, reserved)
elif reserved <= 2**32 - 2:
var data = self.data.bitcast[DType.int32]()
var new_data = index.data.bitcast[DType.int32]()
memcpy(new_data, data, reserved)
else:
var data = self.data.bitcast[DType.int64]()
var new_data = index.data.bitcast[DType.int64]()
memcpy(new_data, data, reserved)
return index^
fn __moveinit__(inout self, owned existing: Self):
self.data = existing.data
fn get_index(self, reserved: Int, slot: Int) -> Int:
if reserved <= 128:
var data = self.data.bitcast[DType.int8]()
return int(data.load(slot % reserved))
elif reserved <= 2**16 - 2:
var data = self.data.bitcast[DType.int16]()
return int(data.load(slot % reserved))
elif reserved <= 2**32 - 2:
var data = self.data.bitcast[DType.int32]()
return int(data.load(slot % reserved))
else:
var data = self.data.bitcast[DType.int64]()
return int(data.load(slot % reserved))
fn set_index(inout self, reserved: Int, slot: Int, value: Int):
if reserved <= 128:
var data = self.data.bitcast[DType.int8]()
return data.store(slot % reserved, value)
elif reserved <= 2**16 - 2:
var data = self.data.bitcast[DType.int16]()
return data.store(slot % reserved, value)
elif reserved <= 2**32 - 2:
var data = self.data.bitcast[DType.int32]()
return data.store(slot % reserved, value)
else:
var data = self.data.bitcast[DType.int64]()
return data.store(slot % reserved, value)
fn __del__(owned self):
self.data.free()
struct Dict[K: KeyElement, V: CollectionElement](
Sized, CollectionElement, Boolable
):
"""A container that stores key-value pairs.
The key type and value type must be specified statically, unlike a Python
dictionary, which can accept arbitrary key and value types.
The key type must implement the `KeyElement` trait, which encompasses
`Movable`, `Hashable`, and `EqualityComparable`. It also includes
`CollectionElement` and `Copyable` until we have references.
The value type must implement the `CollectionElement` trait.
Usage:
```mojo
from collections import Dict
var d = Dict[String, Int]()
d["a"] = 1
d["b"] = 2
print(len(d)) # prints 2
print(d["a"]) # prints 1
print(d.pop("b")) # prints 2
print(len(d)) # prints 1
```
Parameters:
K: The type of the dictionary key. Must be Hashable and EqualityComparable
so we can find the key in the map.
V: The value type of the dictionary. Currently must be CollectionElement.
"""
# Implementation:
#
# `Dict` provides an efficient, O(1) amortized average-time complexity for
# insert, lookup, and removal of dictionary elements.
#
# Its implementation closely mirrors Python's `dict` implementation:
#
# - Performance and size are heavily optimized for small dictionaries, but can
# scale to large dictionaries.
# - Insertion order is implicitly preserved. Once `__iter__` is implemented
# it will return a deterministic order based on insertion.
# - To achieve this, elements are stored in a dense array. Inserting a new
# element will append it to the entry list, and then that index will be stored
# in the dict's index hash map. Removing an element updates that index to
# a special `REMOVED` value for correctness of the probing sequence, and
# the entry in the entry list is marked as removed and the relevant data is freed.
# The entry can be re-used to insert a new element, but it can't be reset to
# `EMPTY` without compacting or resizing the dictionary.
# - The index probe sequence is taken directly from Python's dict implementation:
#
# ```mojo
# var slot = hash(key) % self._reserved
# var perturb = hash(key)
# while True:
# check_slot(slot)
# alias PERTURB_SHIFT = 5
# perturb >>= PERTURB_SHIFT
# slot = ((5 * slot) + perturb + 1) % self._reserved
# ```
#
# - Similarly to Python, we aim for a maximum load of 2/3, after which we resize
# to a larger dictionary.
# - In the case where many entries are being added and removed, the dictionary
# can fill up with `REMOVED` entries without being resized. In this case
# we will eventually "compact" the dictionary and shift entries towards
# the beginning to free new space while retaining insertion order.
#
# Key elements must implement the `KeyElement` trait, which encompasses
# Movable, Hashable, and EqualityComparable. It also includes CollectionElement
# and Copyable until we have references.
#
# Value elements must be CollectionElements for a similar reason. Both key and
# value types must always be Movable so we can resize the dictionary as it grows.
#
# Without conditional trait conformance, making a `__str__` representation for
# Dict is tricky. We'd need to add `Stringable` to the requirements for keys
# and values. This may be worth it.
#
# Invariants:
#
# - size = 2^k for integer k:
# This allows for faster entry slot lookups, since modulo can be
# optimized to a bit shift for powers of 2.
#
# - size <= 2/3 * _reserved
# If size exceeds this invariant, we double the size of the dictionary.
# This is the maximal "load factor" for the dict. Higher load factors
# trade off higher memory utilization for more frequent worst-case lookup
# performance. Lookup is O(n) in the worst case and O(1) in average case.
#
# - _n_entries <= 3/4 * _reserved
# If _n_entries exceeds this invariant, we compact the dictionary, retaining
# the insertion order while resetting _n_entries = size.
# As elements are removed, they retain marker entries for the probe sequence.
# The average case miss lookup (ie. `contains` check on a key not in the dict)
# is O(_reserved / (1 + _reserved - _n_entries)). At `(k-1)/k` this
# approaches `k` and is therefore O(1) average case. However, we want it to
# be _larger_ than the load factor: since `compact` is O(n), we don't
# don't churn and compact on repeated insert/delete, and instead amortize
# compaction cost to O(1) amortized cost.
# Fields
alias EMPTY = _EMPTY
alias REMOVED = _REMOVED
alias _initial_reservation = 8
var size: Int
"""The number of elements currently stored in the dict."""
var _n_entries: Int
"""The number of entries currently allocated."""
var _reserved: Int
"""The current reserved size of the dictionary."""
var _index: _DictIndex
var _entries: List[Optional[DictEntry[K, V]]]
# ===-------------------------------------------------------------------===#
# Life cycle methods
# ===-------------------------------------------------------------------===#
@always_inline
fn __init__(inout self):
"""Initialize an empty dictiontary."""
self.size = 0
self._n_entries = 0
self._reserved = Self._initial_reservation
self._index = _DictIndex(self._reserved)
self._entries = Self._new_entries(self._reserved)
@always_inline
fn __init__(inout self, existing: Self):
"""Copy an existing dictiontary.
Args:
existing: The existing dict.
"""
self.size = existing.size
self._n_entries = existing._n_entries
self._reserved = existing._reserved
self._index = existing._index.copy(existing._reserved)
self._entries = existing._entries
@staticmethod
fn fromkeys(keys: List[K], value: V) -> Self:
"""Create a new dictionary with keys from list and values set to value.
Args:
keys: The keys to set.
value: The value to set.
Returns:
The new dictionary.
"""
var dict = Dict[K, V]()
for key in keys:
dict[key[]] = value
return dict
@staticmethod
fn fromkeys(
keys: List[K], value: Optional[V] = None
) -> Dict[K, Optional[V]]:
"""Create a new dictionary with keys from list and values set to value.
Args:
keys: The keys to set.
value: The value to set.
Returns:
The new dictionary.
"""
var dict = Dict[K, Optional[V]]()
for key in keys:
dict[key[]] = value
return dict
fn __copyinit__(inout self, existing: Self):
"""Copy an existing dictiontary.
Args:
existing: The existing dict.
"""
self.size = existing.size
self._n_entries = existing._n_entries
self._reserved = existing._reserved
self._index = existing._index.copy(existing._reserved)
self._entries = existing._entries
fn __moveinit__(inout self, owned existing: Self):
"""Move data of an existing dict into a new one.
Args:
existing: The existing dict.
"""
self.size = existing.size
self._n_entries = existing._n_entries
self._reserved = existing._reserved
self._index = existing._index^
self._entries = existing._entries^
# ===-------------------------------------------------------------------===#
# Operator dunders
# ===-------------------------------------------------------------------===#
fn __getitem__(self, key: K) raises -> V:
"""Retrieve a value out of the dictionary.
Args:
key: The key to retrieve.
Returns:
The value associated with the key, if it's present.
Raises:
"KeyError" if the key isn't present.
"""
return self._find_ref(key)[]
# TODO(MSTDL-452): rename to __getitem__ returning a reference
fn __get_ref(
self: Reference[Self, _, _], key: K
) raises -> Reference[V, self.is_mutable, self.lifetime]:
"""Retrieve a value out of the dictionary.
Args:
key: The key to retrieve.
Returns:
The value associated with the key, if it's present.
Raises:
"KeyError" if the key isn't present.
"""
return self[]._find_ref(key)
fn __setitem__(inout self, owned key: K, owned value: V):
"""Set a value in the dictionary by key.
Args:
key: The key to associate with the specified value.
value: The data to store in the dictionary.
"""
self._insert(key^, value^)
fn __contains__(self, key: K) -> Bool:
"""Check if a given key is in the dictionary or not.
Args:
key: The key to check.
Returns:
True if there key exists in the dictionary, False otherwise.
"""
return self.find(key).__bool__()
fn __iter__(
self: Reference[Self, _, _],
) -> _DictKeyIter[K, V, self.is_mutable, self.lifetime]:
"""Iterate over the dict's keys as immutable references.
Returns:
An iterator of immutable references to the dictionary keys.
"""
return _DictKeyIter(_DictEntryIter(0, 0, self))
fn __reversed__(
self: Reference[Self, _, _]
) -> _DictKeyIter[K, V, self.is_mutable, self.lifetime, False]:
"""Iterate backwards over the dict keys, returning immutable references.
Returns:
A reversed iterator of immutable references to the dict keys.
"""
return _DictKeyIter(
_DictEntryIter[forward=False](self[]._reserved - 1, 0, self)
)
fn __or__(self, other: Self) -> Self:
"""Merge self with other and return the result as a new dict.
Args:
other: The dictionary to merge with.
Returns:
The result of the merge.
"""
var result = Dict(self)
result.update(other)
return result^
fn __ior__(inout self, other: Self):
"""Merge self with other in place.
Args:
other: The dictionary to merge with.
"""
self.update(other)
# ===-------------------------------------------------------------------===#
# Trait implementations
# ===-------------------------------------------------------------------===#
fn __len__(self) -> Int:
"""The number of elements currently stored in the dictionary."""
return self.size
fn __bool__(self) -> Bool:
"""Check if the dictionary is empty or not.
Returns:
`False` if the dictionary is empty, `True` if there is at least one element.
"""
return len(self).__bool__()
fn __str__[
T: RepresentableKeyElement, U: RepresentableCollectionElement
](self: Dict[T, U]) -> String:
"""Returns a string representation of a `Dict`.
Note that since we can't condition methods on a trait yet,
the way to call this method is a bit special. Here is an example below:
```mojo
var my_dict = Dict[Int, Float64]()
my_dict[1] = 1.1
my_dict[2] = 2.2
dict_as_string = my_dict.__str__()
print(dict_as_string)
# prints "{1: 1.1, 2: 2.2}"
```
When the compiler supports conditional methods, then a simple `str(my_dict)` will
be enough.
Note that both they keys and values' types must implement the `__repr__()` method
for this to work. See the `Representable` trait for more information.
Parameters:
T: The type of the keys in the Dict. Must implement the
traits `Representable` and `KeyElement`.
U: The type of the values in the Dict. Must implement the
traits `Representable` and `CollectionElement`.
Returns:
A string representation of the Dict.
"""
var minimum_capacity = self._minimum_size_of_string_representation()
var string_buffer = List[UInt8](capacity=minimum_capacity)
string_buffer.append(0) # Null terminator
var result = String(string_buffer^)
result += "{"
var i = 0
for key_value in self.items():
result += repr(key_value[].key) + ": " + repr(key_value[].value)
if i < len(self) - 1:
result += ", "
i += 1
result += "}"
return result
# ===-------------------------------------------------------------------===#
# Methods
# ===-------------------------------------------------------------------===#
fn _minimum_size_of_string_representation(self) -> Int:
# we do a rough estimation of the minimum number of chars that we'll see
# in the string representation, we assume that str(key) and str(value)
# will be both at least one char.
return (
2 # '{' and '}'
+ len(self) * 6 # str(key), str(value) ": " and ", "
- 2 # remove the last ", "
)
fn find(self, key: K) -> Optional[V]:
"""Find a value in the dictionary by key.
Args:
key: The key to search for in the dictionary.
Returns:
An optional value containing a copy of the value if it was present,
otherwise an empty Optional.
"""
try: # TODO(MOCO-604): push usage through
return self._find_ref(key)[]
except:
return None
# TODO(MOCO-604): Return Optional[Reference] instead of raising
fn _find_ref(
self: Reference[Self, _, _], key: K
) raises -> Reference[V, self.is_mutable, self.lifetime]:
"""Find a value in the dictionary by key.
Args:
key: The key to search for in the dictionary.
Returns:
An optional value containing a reference to the value if it is
present, otherwise an empty Optional.
"""
var hash = hash(key)
var found: Bool
var slot: Int
var index: Int
found, slot, index = self[]._find_index(hash, key)
if found:
var entry = self[]._entries.__get_ref(index)
debug_assert(entry[].__bool__(), "entry in index must be full")
return Reference(entry[].value()[].value)
raise "KeyError"
fn get(self, key: K) -> Optional[V]:
"""Get a value from the dictionary by key.
Args:
key: The key to search for in the dictionary.
Returns:
An optional value containing a copy of the value if it was present,
otherwise an empty Optional.
"""
return self.find(key)
fn get(self, key: K, default: V) -> V:
"""Get a value from the dictionary by key.
Args:
key: The key to search for in the dictionary.
default: Default value to return.
Returns:
A copy of the value if it was present, otherwise default.
"""
return self.find(key).or_else(default)
fn pop(inout self, key: K, owned default: Optional[V] = None) raises -> V:
"""Remove a value from the dictionary by key.
Args:
key: The key to remove from the dictionary.
default: Optionally provide a default value to return if the key
was not found instead of raising.
Returns:
The value associated with the key, if it was in the dictionary.
If it wasn't, return the provided default value instead.
Raises:
"KeyError" if the key was not present in the dictionary and no
default value was provided.
"""
var hash = hash(key)
var found: Bool
var slot: Int
var index: Int
found, slot, index = self._find_index(hash, key)
if found:
self._set_index(slot, Self.REMOVED)
var entry = self._entries.__get_ref(index)
debug_assert(entry[].__bool__(), "entry in index must be full")
var entry_value = entry[].unsafe_take()
entry[] = None
self.size -= 1
return entry_value.value^
elif default:
return default.value()[]
raise "KeyError"
fn keys(
self: Reference[Self, _, _]
) -> _DictKeyIter[K, V, self.is_mutable, self.lifetime]:
"""Iterate over the dict's keys as immutable references.
Returns:
An iterator of immutable references to the dictionary keys.
"""
return Self.__iter__(self)
fn values(
self: Reference[Self, _, _]
) -> _DictValueIter[K, V, self.is_mutable, self.lifetime]:
"""Iterate over the dict's values as references.
Returns:
An iterator of references to the dictionary values.
"""
return _DictValueIter(_DictEntryIter(0, 0, self))
fn items(
self: Reference[Self, _, _]
) -> _DictEntryIter[K, V, self.is_mutable, self.lifetime]:
"""Iterate over the dict's entries as immutable references.
These can't yet be unpacked like Python dict items, but you can
access the key and value as attributes ie.
```mojo
for e in dict.items():
print(e[].key, e[].value)
```
Returns:
An iterator of immutable references to the dictionary entries.
"""
return _DictEntryIter(0, 0, self)
fn update(inout self, other: Self, /):
"""Update the dictionary with the key/value pairs from other, overwriting existing keys.
The argument must be positional only.
Args:
other: The dictionary to update from.
"""
for entry in other.items():
self[entry[].key] = entry[].value
fn clear(inout self):
"""Remove all elements from the dictionary."""
self.size = 0
self._n_entries = 0
self._reserved = Self._initial_reservation
self._index = _DictIndex(self._reserved)
self._entries = Self._new_entries(self._reserved)
@staticmethod
@always_inline
fn _new_entries(reserved: Int) -> List[Optional[DictEntry[K, V]]]:
var entries = List[Optional[DictEntry[K, V]]](capacity=reserved)
for i in range(reserved):
entries.append(None)
return entries
fn _insert(inout self, owned key: K, owned value: V):
self._insert(DictEntry[K, V](key^, value^))
fn _insert(inout self, owned entry: DictEntry[K, V]):
self._maybe_resize()
var found: Bool
var slot: Int
var index: Int
found, slot, index = self._find_index(entry.hash, entry.key)
self._entries[index] = entry^
if not found:
self._set_index(slot, index)
self.size += 1
self._n_entries += 1
fn _get_index(self, slot: Int) -> Int:
return self._index.get_index(self._reserved, slot)
fn _set_index(inout self, slot: Int, index: Int):
return self._index.set_index(self._reserved, slot, index)
fn _next_index_slot(self, inout slot: Int, inout perturb: UInt64):
alias PERTURB_SHIFT = 5
perturb >>= PERTURB_SHIFT
slot = ((5 * slot) + int(perturb + 1)) % self._reserved
fn _find_empty_index(self, hash: Int) -> Int:
var slot = hash % self._reserved
var perturb = bitcast[DType.uint64](Int64(hash))
while True:
var index = self._get_index(slot)
if index == Self.EMPTY:
return slot
self._next_index_slot(slot, perturb)
fn _find_index(self, hash: Int, key: K) -> (Bool, Int, Int):
# Return (found, slot, index)
var slot = hash % self._reserved
var perturb = bitcast[DType.uint64](Int64(hash))
while True:
var index = self._get_index(slot)
if index == Self.EMPTY:
return (False, slot, self._n_entries)
elif index == Self.REMOVED:
pass
else:
var entry = self._entries.__get_ref(index)
debug_assert(entry[].__bool__(), "entry in index must be full")
if (
hash == entry[].value()[].hash
and key == entry[].value()[].key
):
return (True, slot, index)
self._next_index_slot(slot, perturb)
fn _over_load_factor(self) -> Bool:
return 3 * self.size > 2 * self._reserved
fn _over_compact_factor(self) -> Bool:
return 4 * self._n_entries > 3 * self._reserved
fn _maybe_resize(inout self):
if not self._over_load_factor():
if self._over_compact_factor():
self._compact()
return
self._reserved *= 2
self.size = 0
self._n_entries = 0
self._index = _DictIndex(self._reserved)
var old_entries = self._entries^
self._entries = self._new_entries(self._reserved)
for i in range(len(old_entries)):
var entry = old_entries.__get_ref(i)
if entry[]:
self._insert(entry[].unsafe_take())
fn _compact(inout self):
self._index = _DictIndex(self._reserved)
var right = 0
for left in range(self.size):
while not self._entries.__get_ref(right)[]:
right += 1
debug_assert(right < self._reserved, "Invalid dict state")
var entry = self._entries.__get_ref(right)
debug_assert(entry[].__bool__(), "Logic error")
var slot = self._find_empty_index(entry[].value()[].hash)
self._set_index(slot, left)
if left != right:
self._entries[left] = entry[].unsafe_take()
entry[] = None
right += 1
self._n_entries = self.size
struct OwnedKwargsDict[V: CollectionElement](Sized, CollectionElement):
"""Container used to pass owned variadic keyword arguments to functions.
This type mimics the interface of a dictionary with `String` keys, and
should be usable more-or-less like a dictionary. Notably, however, this type
should not be instantiated directly by users.
Parameters:
V: The value type of the dictionary. Currently must be CollectionElement.
"""
# Fields
alias key_type = String
var _dict: Dict[Self.key_type, V]
# ===-------------------------------------------------------------------===#
# Life cycle methods
# ===-------------------------------------------------------------------===#
fn __init__(inout self):
"""Initialize an empty keyword dictionary."""
self._dict = Dict[Self.key_type, V]()
fn __copyinit__(inout self, existing: Self):
"""Copy an existing keyword dictionary.
Args:
existing: The existing keyword dictionary.
"""
self._dict = existing._dict
fn __moveinit__(inout self, owned existing: Self):
"""Move data of an existing keyword dictionary into a new one.
Args:
existing: The existing keyword dictionary.
"""
self._dict = existing._dict^
# ===-------------------------------------------------------------------===#
# Operator dunders
# ===-------------------------------------------------------------------===#
@always_inline("nodebug")
fn __getitem__(self, key: Self.key_type) raises -> V:
"""Retrieve a value out of the keyword dictionary.
Args:
key: The key to retrieve.
Returns:
The value associated with the key, if it's present.
Raises:
"KeyError" if the key isn't present.
"""
return self._dict[key]
@always_inline("nodebug")
fn __setitem__(inout self, key: Self.key_type, value: V):
"""Set a value in the keyword dictionary by key.
Args:
key: The key to associate with the specified value.
value: The data to store in the dictionary.
"""
self._dict[key] = value
# ===-------------------------------------------------------------------===#
# Trait implementations
# ===-------------------------------------------------------------------===#
@always_inline("nodebug")
fn __contains__(self, key: Self.key_type) -> Bool:
"""Check if a given key is in the keyword dictionary or not.
Args:
key: The key to check.
Returns:
True if there key exists in the keyword dictionary, False
otherwise.
"""
return key in self._dict
@always_inline("nodebug")
fn __len__(self) -> Int:
"""The number of elements currently stored in the keyword dictionary."""
return len(self._dict)
# ===-------------------------------------------------------------------===#
# Methods
# ===-------------------------------------------------------------------===#
@always_inline("nodebug")
fn find(self, key: Self.key_type) -> Optional[V]:
"""Find a value in the keyword dictionary by key.
Args:
key: The key to search for in the dictionary.
Returns:
An optional value containing a copy of the value if it was present,
otherwise an empty Optional.
"""
return self._dict.find(key)
@always_inline("nodebug")
fn pop(
inout self, key: self.key_type, owned default: Optional[V] = None
) raises -> V:
"""Remove a value from the keyword dictionary by key.
Args:
key: The key to remove from the dictionary.
default: Optionally provide a default value to return if the key
was not found instead of raising.
Returns:
The value associated with the key, if it was in the dictionary.
If it wasn't, return the provided default value instead.
Raises:
"KeyError" if the key was not present in the dictionary and no
default value was provided.
"""
return self._dict.pop(key, default^)
fn __iter__(
self: Reference[Self, _, _]
) -> _DictKeyIter[Self.key_type, V, self.is_mutable, self.lifetime]:
"""Iterate over the keyword dict's keys as immutable references.
Returns:
An iterator of immutable references to the dictionary keys.
"""
# TODO(#36448): Use this instead of the current workaround
# return self._dict.__iter__()
return _DictKeyIter(_DictEntryIter(0, 0, self[]._dict))
fn keys(
self: Reference[Self, _, _],
) -> _DictKeyIter[Self.key_type, V, self.is_mutable, self.lifetime]:
"""Iterate over the keyword dict's keys as immutable references.
Returns:
An iterator of immutable references to the dictionary keys.
"""
# TODO(#36448): Use this instead of the current workaround
# return self._dict.keys()
return Self.__iter__(self)
fn values(
self: Reference[Self, _, _],
) -> _DictValueIter[Self.key_type, V, self.is_mutable, self.lifetime]:
"""Iterate over the keyword dict's values as references.
Returns:
An iterator of references to the dictionary values.
"""
# TODO(#36448): Use this instead of the current workaround
# return self._dict.values()
return _DictValueIter(_DictEntryIter(0, 0, self[]._dict))
fn items(
self: Reference[Self, _, _]
) -> _DictEntryIter[Self.key_type, V, self.is_mutable, self.lifetime]:
"""Iterate over the keyword dictionary's entries as immutable references.
These can't yet be unpacked like Python dict items, but you can
access the key and value as attributes ie.
```mojo
for e in dict.items():
print(e[].key, e[].value)
```
Returns:
An iterator of immutable references to the dictionary entries.
"""
# TODO(#36448): Use this instead of the current workaround
# return self[]._dict.items()
return _DictEntryIter(0, 0, self[]._dict)
@always_inline("nodebug")
fn _insert(inout self, owned key: Self.key_type, owned value: V):
self._dict._insert(key^, value^)
@always_inline("nodebug")
fn _insert(inout self, key: StringLiteral, owned value: V):
self._insert(String(key), value^)
| mojo/stdlib/src/collections/dict.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Defines the `InlineList` type.
You can import these APIs from the `collections` package. For example:
```mojo
from collections import InlineList
```
"""
from utils import InlineArray
from sys.intrinsics import _type_is_eq
# ===----------------------------------------------------------------------===#
# InlineList
# ===----------------------------------------------------------------------===#
@value
struct _InlineListIter[
T: CollectionElement,
capacity: Int,
list_mutability: Bool,
list_lifetime: AnyLifetime[list_mutability].type,
forward: Bool = True,
]:
"""Iterator for InlineList.
Parameters:
T: The type of the elements in the list.
capacity: The maximum number of elements that the list can hold.
list_mutability: Whether the reference to the list is mutable.
list_lifetime: The lifetime of the List
forward: The iteration direction. `False` is backwards.
"""
alias list_type = InlineList[T, capacity]
var index: Int
var src: Reference[Self.list_type, list_mutability, list_lifetime]
fn __iter__(self) -> Self:
return self
fn __next__(
inout self,
) -> Reference[T, list_mutability, list_lifetime]:
@parameter
if forward:
self.index += 1
return self.src[][self.index - 1]
else:
self.index -= 1
return self.src[][self.index]
fn __len__(self) -> Int:
@parameter
if forward:
return len(self.src[]) - self.index
else:
return self.index
# TODO: Provide a smarter default for the capacity.
struct InlineList[ElementType: CollectionElement, capacity: Int = 16](Sized):
"""A list allocated on the stack with a maximum size known at compile time.
It is backed by an `InlineArray` and an `Int` to represent the size.
This struct has the same API as a regular `List`, but it is not possible to change the
capacity. In other words, it has a fixed maximum size.
This is typically faster than a `List` as it is only stack-allocated and does not require
any dynamic memory allocation.
Parameters:
ElementType: The type of the elements in the list.
capacity: The maximum number of elements that the list can hold.
"""
var _array: InlineArray[ElementType, capacity]
var _size: Int
@always_inline
fn __init__(inout self):
"""This constructor creates an empty InlineList."""
self._array = InlineArray[ElementType, capacity](
unsafe_uninitialized=True
)
self._size = 0
# TODO: Avoid copying elements in once owned varargs
# allow transfers.
fn __init__(inout self, *values: ElementType):
"""Constructs a list from the given values.
Args:
values: The values to populate the list with.
"""
debug_assert(len(values) <= capacity, "List is full.")
self = Self()
for value in values:
self.append(value[])
@always_inline
fn __len__(self) -> Int:
"""Returns the length of the list."""
return self._size
@always_inline
fn append(inout self, owned value: ElementType):
"""Appends a value to the list.
Args:
value: The value to append.
"""
debug_assert(self._size < capacity, "List is full.")
self._array[self._size] = value^
self._size += 1
@always_inline
fn __getitem__(
self: Reference[Self, _, _], owned idx: Int
) -> ref [self.lifetime] Self.ElementType:
"""Get a `Reference` to the element at the given index.
Args:
idx: The index of the item.
Returns:
A reference to the item at the given index.
"""
debug_assert(
-self[]._size <= idx < self[]._size, "Index must be within bounds."
)
if idx < 0:
idx += len(self[])
return self[]._array[idx]
@always_inline
fn __del__(owned self):
"""Destroy all the elements in the list and free the memory."""
for i in range(self._size):
destroy_pointee(UnsafePointer(self._array[i]))
fn __iter__(
self: Reference[Self, _, _],
) -> _InlineListIter[ElementType, capacity, self.is_mutable, self.lifetime]:
"""Iterate over elements of the list, returning immutable references.
Returns:
An iterator of immutable references to the list elements.
"""
return _InlineListIter(0, self)
@always_inline
fn __contains__[
C: ComparableCollectionElement
](self: Self, value: C) -> Bool:
"""Verify if a given value is present in the list.
```mojo
var x = InlineList[Int](1,2,3)
if 3 in x: print("x contains 3")
```
Parameters:
C: The type of the elements in the list. Must implement the
traits `EqualityComparable` and `CollectionElement`.
Args:
value: The value to find.
Returns:
True if the value is contained in the list, False otherwise.
"""
constrained[
_type_is_eq[ElementType, C](), "value type is not self.ElementType"
]()
for i in self:
if value == rebind[Reference[C, False, __lifetime_of(self)]](i)[]:
return True
return False
@always_inline
fn count[C: ComparableCollectionElement](self: Self, value: C) -> Int:
"""Counts the number of occurrences of a value in the list.
```mojo
var my_list = InlineList[Int](1, 2, 3)
print(my_list.count(1))
```
Parameters:
C: The type of the elements in the list. Must implement the
traits `EqualityComparable` and `CollectionElement`.
Args:
value: The value to count.
Returns:
The number of occurrences of the value in the list.
"""
constrained[
_type_is_eq[ElementType, C](), "value type is not self.ElementType"
]()
var count = 0
for elem in self:
if (
value
== rebind[Reference[C, False, __lifetime_of(self)]](elem)[]
):
count += 1
return count
@always_inline
fn __bool__(self) -> Bool:
"""Checks whether the list has any elements or not.
Returns:
`False` if the list is empty, `True` if there is at least one element.
"""
return len(self) > 0
| mojo/stdlib/src/collections/inline_list.mojo | false |
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Defines the List type.
You can import these APIs from the `collections` package. For example:
```mojo
from collections import List
```
"""
from memory import UnsafePointer, Reference
from memory.unsafe_pointer import move_pointee, move_from_pointee
from sys.intrinsics import _type_is_eq
from .optional import Optional
from utils import Span
# ===----------------------------------------------------------------------===#
# List
# ===----------------------------------------------------------------------===#
@value
struct _ListIter[
T: CollectionElement,
list_mutability: Bool,
list_lifetime: AnyLifetime[list_mutability].type,
forward: Bool = True,
]:
"""Iterator for List.
Parameters:
T: The type of the elements in the list.
list_mutability: Whether the reference to the list is mutable.
list_lifetime: The lifetime of the List
forward: The iteration direction. `False` is backwards.
"""
alias list_type = List[T]
var index: Int
var src: Reference[Self.list_type, list_mutability, list_lifetime]
fn __iter__(self) -> Self:
return self
fn __next__(
inout self,
) -> Reference[T, list_mutability, list_lifetime]:
@parameter
if forward:
self.index += 1
return self.src[].__get_ref(self.index - 1)
else:
self.index -= 1
return self.src[].__get_ref(self.index)
fn __len__(self) -> Int:
@parameter
if forward:
return len(self.src[]) - self.index
else:
return self.index
struct List[T: CollectionElement](CollectionElement, Sized, Boolable):
"""The `List` type is a dynamically-allocated list.
It supports pushing and popping from the back resizing the underlying
storage as needed. When it is deallocated, it frees its memory.
Parameters:
T: The type of the elements.
"""
# Fields
var data: UnsafePointer[T]
"""The underlying storage for the list."""
var size: Int
"""The number of elements in the list."""
var capacity: Int
"""The amount of elements that can fit in the list without resizing it."""
# ===-------------------------------------------------------------------===#
# Life cycle methods
# ===-------------------------------------------------------------------===#
fn __init__(inout self):
"""Constructs an empty list."""
self.data = UnsafePointer[T]()
self.size = 0
self.capacity = 0
fn __init__(inout self, existing: Self):
"""Creates a deep copy of the given list.
Args:
existing: The list to copy.
"""
self.__init__(capacity=existing.capacity)
for e in existing:
self.append(e[])
fn __init__(inout self, *, capacity: Int):
"""Constructs a list with the given capacity.
Args:
capacity: The requested capacity of the list.
"""
self.data = UnsafePointer[T].alloc(capacity)
self.size = 0
self.capacity = capacity
# TODO: Avoid copying elements in once owned varargs
# allow transfers.
fn __init__(inout self, *values: T):
"""Constructs a list from the given values.
Args:
values: The values to populate the list with.
"""
self = Self(capacity=len(values))
for value in values:
self.append(value[])
fn __init__(inout self, span: Span[T]):
"""Constructs a list from the a Span of values.
Args:
span: The span of values to populate the list with.
"""
self = Self(capacity=len(span))
for value in span:
self.append(value[])
fn __init__(
inout self: Self,
*,
unsafe_pointer: UnsafePointer[T],
size: Int,
capacity: Int,
):
"""Constructs a list from a pointer, its size, and its capacity.
Args:
unsafe_pointer: The pointer to the data.
size: The number of elements in the list.
capacity: The capacity of the list.
"""
self.data = unsafe_pointer
self.size = size
self.capacity = capacity
fn __moveinit__(inout self, owned existing: Self):
"""Move data of an existing list into a new one.
Args:
existing: The existing list.
"""
self.data = existing.data
self.size = existing.size
self.capacity = existing.capacity
fn __copyinit__(inout self, existing: Self):
"""Creates a deepcopy of the given list.
Args:
existing: The list to copy.
"""
self = Self(capacity=existing.capacity)
for i in range(len(existing)):
self.append(existing[i])
@always_inline
fn __del__(owned self):
"""Destroy all elements in the list and free its memory."""
for i in range(self.size):
destroy_pointee(self.data + i)
if self.data:
self.data.free()
# ===-------------------------------------------------------------------===#
# Operator dunders
# ===-------------------------------------------------------------------===#
fn __setitem__(inout self, idx: Int, owned value: T):
"""Sets a list element at the given index.
Args:
idx: The index of the element.
value: The value to assign.
"""
var normalized_idx = idx
debug_assert(
-self.size <= normalized_idx < self.size,
"index must be within bounds",
)
if normalized_idx < 0:
normalized_idx += len(self)
destroy_pointee(self.data + normalized_idx)
initialize_pointee_move(self.data + normalized_idx, value^)
@always_inline
fn __contains__[
T2: ComparableCollectionElement
](self: List[T], value: T2) -> Bool:
"""Verify if a given value is present in the list.
```mojo
var x = List[Int](1,2,3)
if 3 in x: print("x contains 3")
```
Parameters:
T2: The type of the elements in the list. Must implement the
traits `EqualityComparable` and `CollectionElement`.
Args:
value: The value to find.
Returns:
True if the value is contained in the list, False otherwise.
"""
constrained[_type_is_eq[T, T2](), "value type is not self.T"]()
for i in self:
if rebind[Reference[T2, False, __lifetime_of(self)]](i)[] == value:
return True
return False
@always_inline("nodebug")
fn __mul__(self, x: Int) -> Self:
"""Multiplies the list by x and returns a new list.
Args:
x: The multiplier number.
Returns:
The new list.
"""
# avoid the copy since it would be cleared immediately anyways
if x == 0:
return Self()
var result = List(self)
result.__mul(x)
return result^
@always_inline("nodebug")
fn __imul__(inout self, x: Int):
"""Multiplies the list by x in place.
Args:
x: The multiplier number.
"""
self.__mul(x)
@always_inline("nodebug")
fn __add__(self, owned other: Self) -> Self:
"""Concatenates self with other and returns the result as a new list.
Args:
other: List whose elements will be combined with the elements of self.
Returns:
The newly created list.
"""
var result = List(self)
result.extend(other^)
return result^
@always_inline("nodebug")
fn __iadd__(inout self, owned other: Self):
"""Appends the elements of other into self.
Args:
other: List whose elements will be appended to self.
"""
self.extend(other^)
fn __iter__(
self: Reference[Self, _, _],
) -> _ListIter[T, self.is_mutable, self.lifetime]:
"""Iterate over elements of the list, returning immutable references.
Returns:
An iterator of immutable references to the list elements.
"""
return _ListIter(0, self)
fn __reversed__(
self: Reference[Self, _, _]
) -> _ListIter[T, self.is_mutable, self.lifetime, False]:
"""Iterate backwards over the list, returning immutable references.
Returns:
A reversed iterator of immutable references to the list elements.
"""
return _ListIter[forward=False](len(self[]), self)
# ===-------------------------------------------------------------------===#
# Trait implementations
# ===-------------------------------------------------------------------===#
fn __len__(self) -> Int:
"""Gets the number of elements in the list.
Returns:
The number of elements in the list.
"""
return self.size
fn __bool__(self) -> Bool:
"""Checks whether the list has any elements or not.
Returns:
`False` if the list is empty, `True` if there is at least one element.
"""
return len(self) > 0
fn __str__[U: RepresentableCollectionElement](self: List[U]) -> String:
"""Returns a string representation of a `List`.
Note that since we can't condition methods on a trait yet,
the way to call this method is a bit special. Here is an example below:
```mojo
var my_list = List[Int](1, 2, 3)
print(my_list.__str__())
```
When the compiler supports conditional methods, then a simple `str(my_list)` will
be enough.
The elements' type must implement the `__repr__()` for this to work.
Parameters:
U: The type of the elements in the list. Must implement the
traits `Representable` and `CollectionElement`.
Returns:
A string representation of the list.
"""
# we do a rough estimation of the number of chars that we'll see
# in the final string, we assume that str(x) will be at least one char.
var minimum_capacity = (
2 # '[' and ']'
+ len(self) * 3 # str(x) and ", "
- 2 # remove the last ", "
)
var string_buffer = List[UInt8](capacity=minimum_capacity)
string_buffer.append(0) # Null terminator
var result = String(string_buffer^)
result += "["
for i in range(len(self)):
result += repr(self[i])
if i < len(self) - 1:
result += ", "
result += "]"
return result
fn __repr__[U: RepresentableCollectionElement](self: List[U]) -> String:
"""Returns a string representation of a `List`.
Note that since we can't condition methods on a trait yet,
the way to call this method is a bit special. Here is an example below:
```mojo
var my_list = List[Int](1, 2, 3)
print(my_list.__repr__(my_list))
```
When the compiler supports conditional methods, then a simple `repr(my_list)` will
be enough.
The elements' type must implement the `__repr__()` for this to work.
Parameters:
U: The type of the elements in the list. Must implement the
traits `Representable` and `CollectionElement`.
Returns:
A string representation of the list.
"""
return self.__str__()
# ===-------------------------------------------------------------------===#
# Methods
# ===-------------------------------------------------------------------===#
@always_inline
fn _realloc(inout self, new_capacity: Int):
var new_data = UnsafePointer[T].alloc(new_capacity)
for i in range(self.size):
move_pointee(src=self.data + i, dst=new_data + i)
if self.data:
self.data.free()
self.data = new_data
self.capacity = new_capacity
@always_inline
fn append(inout self, owned value: T):
"""Appends a value to this list.
Args:
value: The value to append.
"""
if self.size >= self.capacity:
self._realloc(max(1, self.capacity * 2))
initialize_pointee_move(self.data + self.size, value^)
self.size += 1
@always_inline
fn insert(inout self, i: Int, owned value: T):
"""Inserts a value to the list at the given index.
`a.insert(len(a), value)` is equivalent to `a.append(value)`.
Args:
i: The index for the value.
value: The value to insert.
"""
debug_assert(i <= self.size, "insert index out of range")
var normalized_idx = i
if i < 0:
normalized_idx = max(0, len(self) + i)
var earlier_idx = len(self)
var later_idx = len(self) - 1
self.append(value^)
for _ in range(normalized_idx, len(self) - 1):
var earlier_ptr = self.data + earlier_idx
var later_ptr = self.data + later_idx
var tmp = move_from_pointee(earlier_ptr)
move_pointee(src=later_ptr, dst=earlier_ptr)
initialize_pointee_move(later_ptr, tmp^)
earlier_idx -= 1
later_idx -= 1
@always_inline
fn __mul(inout self, x: Int):
"""Appends the original elements of this list x-1 times.
```mojo
var a = List[Int](1, 2)
a.__mul(2) # a = [1, 2, 1, 2]
```
Args:
x: The multiplier number.
"""
if x == 0:
self.clear()
return
var orig = List(self)
self.reserve(len(self) * x)
for i in range(x - 1):
self.extend(orig)
@always_inline
fn extend(inout self, owned other: List[T]):
"""Extends this list by consuming the elements of `other`.
Args:
other: List whose elements will be added in order at the end of this list.
"""
var final_size = len(self) + len(other)
var other_original_size = len(other)
self.reserve(final_size)
# Defensively mark `other` as logically being empty, as we will be doing
# consuming moves out of `other`, and so we want to avoid leaving `other`
# in a partially valid state where some elements have been consumed
# but are still part of the valid `size` of the list.
#
# That invalid intermediate state of `other` could potentially be
# visible outside this function if a `__moveinit__()` constructor were
# to throw (not currently possible AFAIK though) part way through the
# logic below.
other.size = 0
var dest_ptr = self.data + len(self)
for i in range(other_original_size):
var src_ptr = other.data + i
# This (TODO: optimistically) moves an element directly from the
# `other` list into this list using a single `T.__moveinit()__`
# call, without moving into an intermediate temporary value
# (avoiding an extra redundant move constructor call).
move_pointee(src=src_ptr, dst=dest_ptr)
dest_ptr = dest_ptr + 1
# Update the size now that all new elements have been moved into this
# list.
self.size = final_size
@always_inline
fn pop(inout self, i: Int = -1) -> T:
"""Pops a value from the list at the given index.
Args:
i: The index of the value to pop.
Returns:
The popped value.
"""
debug_assert(-len(self) <= i < len(self), "pop index out of range")
var normalized_idx = i
if i < 0:
normalized_idx += len(self)
var ret_val = move_from_pointee(self.data + normalized_idx)
for j in range(normalized_idx + 1, self.size):
move_pointee(src=self.data + j, dst=self.data + j - 1)
self.size -= 1
if self.size * 4 < self.capacity:
if self.capacity > 1:
self._realloc(self.capacity // 2)
return ret_val^
@always_inline
fn reserve(inout self, new_capacity: Int):
"""Reserves the requested capacity.
If the current capacity is greater or equal, this is a no-op.
Otherwise, the storage is reallocated and the date is moved.
Args:
new_capacity: The new capacity.
"""
if self.capacity >= new_capacity:
return
self._realloc(new_capacity)
@always_inline
fn resize(inout self, new_size: Int, value: T):
"""Resizes the list to the given new size.
If the new size is smaller than the current one, elements at the end
are discarded. If the new size is larger than the current one, the
list is appended with new values elements up to the requested size.
Args:
new_size: The new size.
value: The value to use to populate new elements.
"""
if new_size <= self.size:
self.resize(new_size)
else:
self.reserve(new_size)
for i in range(new_size, self.size):
destroy_pointee(self.data + i)
for i in range(self.size, new_size):
initialize_pointee_copy(self.data + i, value)
self.size = new_size
@always_inline
fn resize(inout self, new_size: Int):
"""Resizes the list to the given new size.
With no new value provided, the new size must be smaller than or equal
to the current one. Elements at the end are discarded.
Args:
new_size: The new size.
"""
debug_assert(
new_size <= self.size,
(
"New size must be smaller than or equal to current size when no"
" new value is provided."
),
)
for i in range(new_size, self.size):
destroy_pointee(self.data + i)
self.size = new_size
self.reserve(new_size)
fn reverse(inout self):
"""Reverses the elements of the list."""
try:
self._reverse()
except:
abort("unreachable: default _reverse start unexpectedly fails")
# This method is private to avoid exposing the non-Pythonic `start` argument.
@always_inline
fn _reverse(inout self, start: Int = 0) raises:
"""Reverses the elements of the list at positions after `start`.
Args:
start: An integer indicating the position after which to reverse elements.
"""
var start_idx = start if start >= 0 else len(self) + start
if start_idx < 0 or start_idx > len(self):
raise "IndexError: start index out of range."
var earlier_idx = start_idx
var later_idx = len(self) - 1
var effective_len = len(self) - start_idx
var half_len = effective_len // 2
for _ in range(half_len):
var earlier_ptr = self.data + earlier_idx
var later_ptr = self.data + later_idx
var tmp = move_from_pointee(earlier_ptr)
move_pointee(src=later_ptr, dst=earlier_ptr)
initialize_pointee_move(later_ptr, tmp^)
earlier_idx += 1
later_idx -= 1
# TODO: Remove explicit self type when issue 1876 is resolved.
fn index[
C: ComparableCollectionElement
](
self: Reference[List[C]],
value: C,
start: Int = 0,
stop: Optional[Int] = None,
) raises -> Int:
"""
Returns the index of the first occurrence of a value in a list
restricted by the range given the start and stop bounds.
```mojo
var my_list = List[Int](1, 2, 3)
print(my_list.index(2)) # prints `1`
```
Args:
value: The value to search for.
start: The starting index of the search, treated as a slice index
(defaults to 0).
stop: The ending index of the search, treated as a slice index
(defaults to None, which means the end of the list).
Parameters:
C: The type of the elements in the list. Must implement the
`ComparableCollectionElement` trait.
Returns:
The index of the first occurrence of the value in the list.
Raises:
ValueError: If the value is not found in the list.
"""
var start_normalized = start
var stop_normalized: Int
if stop is None:
# Default end
stop_normalized = len(self[])
else:
stop_normalized = stop.value()[]
if start_normalized < 0:
start_normalized += len(self[])
if stop_normalized < 0:
stop_normalized += len(self[])
start_normalized = _clip(start_normalized, 0, len(self[]))
stop_normalized = _clip(stop_normalized, 0, len(self[]))
for i in range(start_normalized, stop_normalized):
if self[][i] == value:
return i
raise "ValueError: Given element is not in list"
fn clear(inout self):
"""Clears the elements in the list."""
for i in range(self.size):
destroy_pointee(self.data + i)
self.size = 0
fn steal_data(inout self) -> UnsafePointer[T]:
"""Take ownership of the underlying pointer from the list.
Returns:
The underlying data.
"""
var ptr = self.data
self.data = UnsafePointer[T]()
self.size = 0
self.capacity = 0
return ptr
@always_inline
fn _adjust_span(self, span: Slice) -> Slice:
"""Adjusts the span based on the list length."""
var adjusted_span = span
if adjusted_span.start < 0:
adjusted_span.start = len(self) + adjusted_span.start
if not adjusted_span._has_end():
adjusted_span.end = len(self)
elif adjusted_span.end < 0:
adjusted_span.end = len(self) + adjusted_span.end
if span.step < 0:
var tmp = adjusted_span.end
adjusted_span.end = adjusted_span.start - 1
adjusted_span.start = tmp - 1
return adjusted_span
@always_inline
fn __getitem__(self, span: Slice) -> Self:
"""Gets the sequence of elements at the specified positions.
Args:
span: A slice that specifies positions of the new list.
Returns:
A new list containing the list at the specified span.
"""
var adjusted_span = self._adjust_span(span)
var adjusted_span_len = adjusted_span.unsafe_indices()
if not adjusted_span_len:
return Self()
var res = Self(capacity=adjusted_span_len)
for i in range(adjusted_span_len):
res.append(self[adjusted_span[i]])
return res^
@always_inline
fn __getitem__(self, idx: Int) -> T:
"""Gets a copy of the list element at the given index.
FIXME(lifetimes): This should return a reference, not a copy!
Args:
idx: The index of the element.
Returns:
A copy of the element at the given index.
"""
var normalized_idx = idx
debug_assert(
-self.size <= normalized_idx < self.size,
"index must be within bounds",
)
if normalized_idx < 0:
normalized_idx += len(self)
return (self.data + normalized_idx)[]
# TODO(30737): Replace __getitem__ with this, but lots of places use it
fn __get_ref(
self: Reference[Self, _, _], i: Int
) -> Reference[T, self.is_mutable, self.lifetime]:
"""Gets a reference to the list element at the given index.
Args:
i: The index of the element.
Returns:
An immutable reference to the element at the given index.
"""
var normalized_idx = i
if i < 0:
normalized_idx += self[].size
return self[].unsafe_get(normalized_idx)
@always_inline
fn unsafe_get(
self: Reference[Self, _, _], idx: Int
) -> Reference[Self.T, self.is_mutable, self.lifetime]:
"""Get a reference to an element of self without checking index bounds.
Users should consider using `__getitem__` instead of this method as it
is unsafe. If an index is out of bounds, this method will not abort, it
will be considered undefined behavior.
Note that there is no wraparound for negative indices, caution is
advised. Using negative indices is considered undefined behavior. Never
use `my_list.unsafe_get(-1)` to get the last element of the list.
Instead, do `my_list.unsafe_get(len(my_list) - 1)`.
Args:
idx: The index of the element to get.
Returns:
A reference to the element at the given index.
"""
debug_assert(
0 <= idx < len(self[]),
(
"The index provided must be within the range [0, len(List) -1]"
" when using List.unsafe_get()"
),
)
return (self[].data + idx)[]
fn count[T: ComparableCollectionElement](self: List[T], value: T) -> Int:
"""Counts the number of occurrences of a value in the list.
Note that since we can't condition methods on a trait yet,
the way to call this method is a bit special. Here is an example below.
```mojo
var my_list = List[Int](1, 2, 3)
print(my_list.count(1))
```
When the compiler supports conditional methods, then a simple `my_list.count(1)` will
be enough.
Parameters:
T: The type of the elements in the list. Must implement the
traits `EqualityComparable` and `CollectionElement`.
Args:
value: The value to count.
Returns:
The number of occurrences of the value in the list.
"""
var count = 0
for elem in self:
if elem[] == value:
count += 1
return count
@always_inline
fn unsafe_ptr(self) -> UnsafePointer[T]:
"""Retrieves a pointer to the underlying memory.
Returns:
The UnsafePointer to the underlying memory.
"""
return self.data
fn _clip(value: Int, start: Int, end: Int) -> Int:
return max(start, min(value, end))
| mojo/stdlib/src/collections/list.mojo | false |
<filename>mojo/stdlib/src/collections/optional.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Defines Optional, a type modeling a value which may or may not be present.
Optional values can be thought of as a type-safe nullable pattern.
Your value can take on a value or `None`, and you need to check
and explicitly extract the value to get it out.
```mojo
from collections import Optional
var a = Optional(1)
var b = Optional[Int](None)
if a:
print(a.value()[]) # prints 1
if b: # bool(b) is False, so no print
print(b.value()[])
var c = a.or_else(2)
var d = b.or_else(2)
print(c) # prints 1
print(d) # prints 2
```
"""
from utils import Variant
# TODO(27780): NoneType can't currently conform to traits
@value
struct _NoneType(CollectionElement):
pass
# ===----------------------------------------------------------------------===#
# Optional
# ===----------------------------------------------------------------------===#
@value
struct Optional[T: CollectionElement](CollectionElement, Boolable):
"""A type modeling a value which may or may not be present.
Optional values can be thought of as a type-safe nullable pattern.
Your value can take on a value or `None`, and you need to check
and explicitly extract the value to get it out.
Currently T is required to be a `CollectionElement` so we can implement
copy/move for Optional and allow it to be used in collections itself.
```mojo
from collections import Optional
var a = Optional(1)
var b = Optional[Int](None)
if a:
print(a.value()[]) # prints 1
if b: # bool(b) is False, so no print
print(b.value()[])
var c = a.or_else(2)
var d = b.or_else(2)
print(c) # prints 1
print(d) # prints 2
```
Parameters:
T: The type of value stored in the Optional.
"""
# Fields
# _NoneType comes first so its index is 0.
# This means that Optionals that are 0-initialized will be None.
alias _type = Variant[_NoneType, T]
var _value: Self._type
# ===-------------------------------------------------------------------===#
# Life cycle methods
# ===-------------------------------------------------------------------===#
fn __init__(inout self):
"""Construct an empty Optional."""
self._value = Self._type(_NoneType())
fn __init__(inout self, owned value: T):
"""Construct an Optional containing a value.
Args:
value: The value to store in the optional.
"""
self._value = Self._type(value^)
fn __init__(inout self, value: NoneType):
"""Construct an empty Optional.
Args:
value: Must be exactly `None`.
"""
self = Self()
# ===-------------------------------------------------------------------===#
# Operator dunders
# ===-------------------------------------------------------------------===#
fn __is__(self, other: NoneType) -> Bool:
"""Return `True` if the Optional has no value.
It allows you to use the following syntax: `if my_optional is None:`
Args:
other: The value to compare to (None).
Returns:
True if the Optional has no value and False otherwise.
"""
return not self.__bool__()
fn __isnot__(self, other: NoneType) -> Bool:
"""Return `True` if the Optional has a value.
It allows you to use the following syntax: `if my_optional is not None:`.
Args:
other: The value to compare to (None).
Returns:
True if the Optional has a value and False otherwise.
"""
return self.__bool__()
# ===-------------------------------------------------------------------===#
# Trait implementations
# ===-------------------------------------------------------------------===#
fn __bool__(self) -> Bool:
"""Return true if the Optional has a value.
Returns:
True if the optional has a value and False otherwise.
"""
return not self._value.isa[_NoneType]()
fn __invert__(self) -> Bool:
"""Return False if the optional has a value.
Returns:
False if the optional has a value and True otherwise.
"""
return not self
# ===-------------------------------------------------------------------===#
# Methods
# ===-------------------------------------------------------------------===#
@always_inline
fn value(
self: Reference[Self, _, _]
) -> Reference[T, self.is_mutable, self.lifetime]:
"""Retrieve a reference to the value of the Optional.
This check to see if the optional contains a value.
If you call this without first verifying the optional with __bool__()
eg. by `if my_option:` or without otherwise knowing that it contains a
value (for instance with `or_else`), the program will abort
Returns:
A reference to the contained data of the option as a Reference[T].
"""
if not self[].__bool__():
abort(".value() on empty Optional")
return self[].unsafe_value()
@always_inline
fn unsafe_value(
self: Reference[Self, _, _]
) -> Reference[T, self.is_mutable, self.lifetime]:
"""Unsafely retrieve a reference to the value of the Optional.
This doesn't check to see if the optional contains a value.
If you call this without first verifying the optional with __bool__()
eg. by `if my_option:` or without otherwise knowing that it contains a
value (for instance with `or_else`), you'll get garbage unsafe data out.
Returns:
A reference to the contained data of the option as a Reference[T].
"""
debug_assert(self[].__bool__(), ".value() on empty Optional")
return self[]._value[T]
@always_inline
fn _value_copy(self) -> T:
"""Unsafely retrieve the value out of the Optional.
Note: only used for Optionals when used in a parameter context
due to compiler bugs. In general, prefer using the public `Optional.value()`
function that returns a `Reference[T]`.
"""
debug_assert(self.__bool__(), ".value() on empty Optional")
return self._value[T]
fn take(inout self) -> T:
"""Move the value out of the Optional.
The caller takes ownership over the new value, which is moved
out of the Optional, and the Optional is left in an empty state.
This check to see if the optional contains a value.
If you call this without first verifying the optional with __bool__()
eg. by `if my_option:` or without otherwise knowing that it contains a
value (for instance with `or_else`), you'll get garbage unsafe data out.
Returns:
The contained data of the option as an owned T value.
"""
if not self.__bool__():
abort(".take() on empty Optional")
return self.unsafe_take()
fn unsafe_take(inout self) -> T:
"""Unsafely move the value out of the Optional.
The caller takes ownership over the new value, which is moved
out of the Optional, and the Optional is left in an empty state.
This check to see if the optional contains a value.
If you call this without first verifying the optional with __bool__()
eg. by `if my_option:` or without otherwise knowing that it contains a
value (for instance with `or_else`), the program will abort!
Returns:
The contained data of the option as an owned T value.
"""
debug_assert(self.__bool__(), ".unsafe_take() on empty Optional")
return self._value.unsafe_replace[_NoneType, T](_NoneType())
fn or_else(self, default: T) -> T:
"""Return the underlying value contained in the Optional or a default value if the Optional's underlying value is not present.
Args:
default: The new value to use if no value was present.
Returns:
The underlying value contained in the Optional or a default value.
"""
if self.__bool__():
return self._value[T]
return default
# ===----------------------------------------------------------------------===#
# OptionalReg
# ===----------------------------------------------------------------------===#
@register_passable("trivial")
struct OptionalReg[T: AnyTrivialRegType](Boolable):
"""A register-passable optional type.
This struct optionally contains a value. It only works with trivial register
passable types at the moment.
Parameters:
T: The type of value stored in the Optional.
"""
# Fields
alias _mlir_type = __mlir_type[`!kgen.variant<`, T, `, i1>`]
var _value: Self._mlir_type
# ===-------------------------------------------------------------------===#
# Life cycle methods
# ===-------------------------------------------------------------------===#
fn __init__(inout self):
"""Create an optional with a value of None."""
self = Self(None)
fn __init__(inout self, value: T):
"""Create an optional with a value.
Args:
value: The value.
"""
self._value = __mlir_op.`kgen.variant.create`[
_type = Self._mlir_type, index = Int(0).value
](value)
fn __init__(inout self, value: NoneType):
"""Create an optional without a value from a None literal.
Args:
value: The None value.
"""
self._value = __mlir_op.`kgen.variant.create`[
_type = Self._mlir_type, index = Int(1).value
](__mlir_attr.false)
# ===-------------------------------------------------------------------===#
# Operator dunders
# ===-------------------------------------------------------------------===#
fn __is__(self, other: NoneType) -> Bool:
"""Return `True` if the Optional has no value.
It allows you to use the following syntax: `if my_optional is None:`
Args:
other: The value to compare to (None).
Returns:
True if the Optional has no value and False otherwise.
"""
return not self.__bool__()
fn __isnot__(self, other: NoneType) -> Bool:
"""Return `True` if the Optional has a value.
It allows you to use the following syntax: `if my_optional is not None:`
Args:
other: The value to compare to (None).
Returns:
True if the Optional has a value and False otherwise.
"""
return self.__bool__()
# ===-------------------------------------------------------------------===#
# Trait implementations
# ===-------------------------------------------------------------------===#
fn __bool__(self) -> Bool:
"""Return true if the optional has a value.
Returns:
True if the optional has a value and False otherwise.
"""
return __mlir_op.`kgen.variant.is`[index = Int(0).value](self._value)
# ===-------------------------------------------------------------------===#
# Methods
# ===-------------------------------------------------------------------===#
@always_inline
fn value(self) -> T:
"""Get the optional value.
Returns:
The contained value.
"""
return __mlir_op.`kgen.variant.take`[index = Int(0).value](self._value)
| mojo/stdlib/src/collections/optional.mojo | false |
<filename>mojo/stdlib/src/collections/set.mojo
# ===----------------------------------------------------------------------=== #
# Copyright (c) 2024, Modular Inc. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions:
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===----------------------------------------------------------------------=== #
"""Implements the Set datatype."""
from .dict import Dict, KeyElement, _DictEntryIter, _DictKeyIter
struct Set[T: KeyElement](Sized, Comparable, Hashable, Boolable):
"""A set data type.
O(1) average-case amortized add, remove, and membership check.
```mojo
from collections import Set
var set = Set[Int](1, 2, 3)
print(len(set)) # 3
set.add(4)
for element in set:
print(element[])
set -= Set[Int](3, 4, 5)
print(set == Set[Int](1, 2)) # True
print(set | Set[Int](0, 1) == Set[Int](0, 1, 2)) # True
var element = set.pop()
print(len(set)) # 1
```
Parameters:
T: The element type of the set. Must implement KeyElement.
"""
# Fields
var _data: Dict[T, NoneType]
# ===-------------------------------------------------------------------===#
# Life cycle methods
# ===-------------------------------------------------------------------===#
fn __init__(inout self, *ts: T):
"""Construct a set from initial elements.
Args:
ts: Variadic of elements to add to the set.
"""
self._data = Dict[T, NoneType]()
for t in ts:
self.add(t[])
fn __init__(inout self, elements: Self):
"""Explicitly copy another Set instance.
Args:
elements: An existing set to copy.
"""
self.__init__()
for e in elements:
self.add(e[])
fn __init__(inout self, elements: List[T]):
"""Construct a set from a List of elements.
Args:
elements: A vector of elements to add to the set.
"""
self.__init__()
for e in elements:
self.add(e[])
fn __moveinit__(inout self, owned other: Self):
"""Move constructor.
Args:
other: The existing Set instance to move from.
"""
self._data = other._data^
# ===-------------------------------------------------------------------===#
# Operator dunders
# ===-------------------------------------------------------------------===#
fn __contains__(self, t: T) -> Bool:
"""Whether or not the set contains an element.
Args:
t: The element to check membership in the set.
Returns:
Whether or not the set contains the element.
"""
return t in self._data
fn __eq__(self, other: Self) -> Bool:
"""Set equality.
Args:
other: Another Set instance to check equality against.
Returns:
True if the sets contain the same elements and False otherwise.
"""
if len(self) != len(other):
return False
for e in self:
if e[] not in other:
return False
return True
fn __ne__(self, other: Self) -> Bool:
"""Set inequality.
Args:
other: Another Set instance to check equality against.
Returns:
True if the sets are different and False otherwise.
"""
return not (self == other)
fn __and__(self, other: Self) -> Self:
"""The set intersection operator.
Args:
other: Another Set instance to intersect with this one.
Returns:
A new set containing only the elements which appear in both
this set and the `other` set.
"""
return self.intersection(other)
fn __iand__(inout self, other: Self):
"""In-place set intersection.
Updates the set to contain only the elements which are already in
the set and are also contained in the `other` set.
Args:
other: Another Set instance to intersect with this one.
"""
self.intersection_update(other)
fn __or__(self, other: Self) -> Self:
"""The set union operator.
Args:
other: Another Set instance to union with this one.
Returns:
A new set containing any elements which appear in either
this set or the `other` set.
"""
return self.union(other)
fn __ior__(inout self, other: Self):
"""In-place set union.
Updates the set to contain all elements in the `other` set
as well as keeping all elements it already contained.
Args:
other: Another Set instance to union with this one.
"""
self.update(other)
fn __sub__(self, other: Self) -> Self:
"""Set subtraction.
Args:
other: Another Set instance to subtract from this one.
Returns:
A new set containing elements of this set, but not containing
any elements which were in the `other` set.
"""
return self.difference(other)
fn __isub__(inout self, other: Self):
"""In-place set subtraction.
Updates the set to remove any elements from the `other` set.
Args:
other: Another Set instance to subtract from this one.
"""
self.difference_update(other)
fn __le__(self, other: Self) -> Bool:
"""Overloads the <= operator for sets. Works like as `issubset` method.
Args:
other: Another Set instance to check against.
Returns:
True if this set is a subset of the `other` set, False otherwise.
"""
return self.issubset(other)
fn __ge__(self, other: Self) -> Bool:
"""Overloads the >= operator for sets. Works like as `issuperset` method.
Args:
other: Another Set instance to check against.
Returns:
True if this set is a superset of the `other` set, False otherwise.
"""
return self.issuperset(other)
fn __gt__(self, other: Self) -> Bool:
"""Overloads the > operator for strict superset comparison of sets.
Args:
other: The set to compare against for the strict superset relationship.
Returns:
True if the set is a strict superset of the `other` set, False otherwise.
"""
return self >= other and self != other
fn __lt__(self, other: Self) -> Bool:
"""Overloads the < operator for strict subset comparison of sets.
Args:
other: The set to compare against for the strict subset relationship.
Returns:
True if the set is a strict subset of the `other` set, False otherwise.
"""
return self <= other and self != other
fn __xor__(self, other: Self) -> Self:
"""Overloads the ^ operator for sets. Works like as `symmetric_difference` method.
Args:
other: The set to find the symmetric difference with.
Returns:
A new set containing the symmetric difference of the two sets.
"""
return self.symmetric_difference(other)
fn __ixor__(inout self, other: Self):
"""Overloads the ^= operator. Works like as `symmetric_difference_update` method.
Updates the set with the symmetric difference of itself and another set.
Args:
other: The set to find the symmetric difference with.
"""
self.symmetric_difference_update(other)
# ===-------------------------------------------------------------------===#
# Trait implementations
# ===-------------------------------------------------------------------===#
fn __bool__(self) -> Bool:
"""Whether the set is non-empty or not.
Returns:
True if the set is non-empty, False if it is empty.
"""
return len(self).__bool__()
fn __len__(self) -> Int:
"""The size of the set.
Returns:
The number of elements in the set.
"""
return len(self._data)
fn __hash__(self) -> Int:
"""A hash value of the elements in the set.
The hash value is order independent, so s1 == s2 -> hash(s1) == hash(s2).
Returns:
A hash value of the set suitable for non-cryptographic purposes.
"""
var hash_value = 0
# Hash combination needs to be commutative so iteration order
# doesn't impact the hash value.
for e in self:
hash_value ^= hash(e[])
return hash_value
# ===-------------------------------------------------------------------===#
# Methods
# ===-------------------------------------------------------------------===#
fn __iter__(
self: Reference[Self, _, _],
) -> _DictKeyIter[T, NoneType, self.is_mutable, self.lifetime]:
"""Iterate over elements of the set, returning immutable references.
Returns:
An iterator of immutable references to the set elements.
"""
# here we rely on Set being a trivial wrapper of a Dict
return _DictKeyIter(_DictEntryIter(0, 0, self[]._data))
fn add(inout self, t: T):
"""Add an element to the set.
Args:
t: The element to add to the set.
"""
self._data[t] = None
fn remove(inout self, t: T) raises:
"""Remove an element from the set.
Args:
t: The element to remove from the set.
Raises:
If the element isn't in the set to remove.
"""
self._data.pop(t)
fn pop(inout self) raises -> T:
"""Remove any one item from the set, and return it.
As an implementation detail this will remove the first item
according to insertion order. This is practically useful
for breadth-first search implementations.
Returns:
The element which was removed from the set.
Raises:
If the set is empty.
"""
if not self:
raise "Pop on empty set"
var iter = self.__iter__()
var first = iter.__next__()[]
self.remove(first)
return first
fn union(self, other: Self) -> Self:
"""Set union.
Args:
other: Another Set instance to union with this one.
Returns:
A new set containing any elements which appear in either
this set or the `other` set.
"""
var result = Set(self)
for o in other:
result.add(o[])
return result^
fn intersection(self, other: Self) -> Self:
"""Set intersection.
Args:
other: Another Set instance to intersect with this one.
Returns:
A new set containing only the elements which appear in both
this set and the `other` set.
"""
var result = Set[T]()
for v in self:
if v[] in other:
result.add(v[])
return result^
fn difference(self, other: Self) -> Self:
"""Set difference.
Args:
other: Another Set instance to find the difference with this one.
Returns:
A new set containing elements that are in this set but not in
the `other` set.
"""
var result = Set[T]()
for e in self:
if e[] not in other:
result.add(e[])
return result^
fn update(inout self, other: Self):
"""In-place set update.
Updates the set to contain all elements in the `other` set
as well as keeping all elements it already contained.
Args:
other: Another Set instance to union with this one.
"""
for e in other:
self.add(e[])
fn intersection_update(inout self, other: Self):
"""In-place set intersection update.
Updates the set by retaining only elements found in both this set and the `other` set,
removing all other elements. The result is the intersection of this set with `other`.
Args:
other: Another Set instance to intersect with this one.
"""
# Possible to do this without an extra allocation, but need to be
# careful about concurrent iteration + mutation
self.difference_update(self - other)
fn difference_update(inout self, other: Self):
"""In-place set subtraction.
Updates the set by removing all elements found in the `other` set,
effectively keeping only elements that are unique to this set.
Args:
other: Another Set instance to subtract from this one.
"""
for o in other:
try:
self.remove(o[])
except:
pass
fn issubset(self, other: Self) -> Bool:
"""Check if this set is a subset of another set.
Args:
other: Another Set instance to check against.
Returns:
True if this set is a subset of the `other` set, False otherwise.
"""
if len(self) > len(other):
return False
for element in self:
if element[] not in other:
return False
return True
fn isdisjoint(self, other: Self) -> Bool:
"""Check if this set is disjoint with another set.
Args:
other: Another Set instance to check against.
Returns:
True if this set is disjoint with the `other` set, False otherwise.
"""
for element in self:
if element[] in other:
return False
return True
fn issuperset(self, other: Self) -> Bool:
"""Check if this set is a superset of another set.
Args:
other: Another Set instance to check against.
Returns:
True if this set is a superset of the `other` set, False otherwise.
"""
if len(self) < len(other):
return False
for element in other:
if element[] not in self:
return False
return True
fn symmetric_difference(self, other: Self) -> Self:
"""Returns the symmetric difference of two sets.
Args:
other: The set to find the symmetric difference with.
Returns:
A new set containing the symmetric difference of the two sets.
"""
var result = Set[T]()
for element in self:
if element[] not in other:
result.add(element[])
for element in other:
if element[] not in self:
result.add(element[])
return result^
fn symmetric_difference_update(inout self, other: Self):
"""Updates the set with the symmetric difference of itself and another set.
Args:
other: The set to find the symmetric difference with.
"""
self = self.symmetric_difference(other)
fn discard(inout self, value: T):
"""Remove a value from the set if it exists. Pass otherwise.
Args:
value: The element to remove from the set.
"""
try:
self._data.pop(value)
except:
pass
fn clear(inout self) raises:
"""Removes all elements from the set.
This method modifies the set in-place, removing all of its elements.
After calling this method, the set will be empty.
"""
for _ in range(len(self)):
var a = self.pop()
#! This code below (without using range function) won't pass tests
#! It leaves set with one remaining item. Is this a bug?
# for _ in self:
# var a = self.pop()
| mojo/stdlib/src/collections/set.mojo | false |