diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..a2dc007fe08564243fe4f97ca6eb19b5dd499440 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+docs/gpt-oss-120b.svg filter=lfs diff=lfs merge=lfs -text
+docs/gpt-oss-20b.svg filter=lfs diff=lfs merge=lfs -text
+docs/gpt-oss.svg filter=lfs diff=lfs merge=lfs -text
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4ff82a62382577aa332f9ee4a882aaad7f47fea7
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,23 @@
+cmake_minimum_required(VERSION 3.26)
+project(gpt_oss LANGUAGES C CXX)
+
+# If not defined externally, auto-detect
+if(NOT DEFINED GPTOSS_BUILD_METAL)
+ if(APPLE AND CMAKE_SYSTEM_PROCESSOR MATCHES "arm64")
+ message(STATUS "Apple Silicon detected → enabling GPTOSS_BUILD_METAL")
+ set(GPTOSS_BUILD_METAL ON)
+ else()
+ message(STATUS "Non-Apple Silicon → disabling GPTOSS_BUILD_METAL")
+ set(GPTOSS_BUILD_METAL OFF)
+ endif()
+else()
+ message(STATUS "GPTOSS_BUILD_METAL manually set to: ${GPTOSS_BUILD_METAL}")
+endif()
+
+# Now declare it as a cache variable (respects user-provided value)
+set(GPTOSS_BUILD_METAL "${GPTOSS_BUILD_METAL}" CACHE BOOL "Enable Metal backend")
+
+if(GPTOSS_BUILD_METAL)
+ enable_language(OBJC)
+ add_subdirectory(gpt_oss/metal)
+endif()
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000000000000000000000000000000000000..7bd37930ef005512c15be7731240ecb70da663aa
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1 @@
+recursive-include _build *
\ No newline at end of file
diff --git a/README.md b/README.md
index d3571cc81704ecf4af144edeacc82b9ad142d32c..0104cec4c8f42625442989de5742c41c017d16b6 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,595 @@
----
-license: apache-2.0
----
\ No newline at end of file
+
+
+ Try gpt-oss ·
+ Guides ·
+ Model card ·
+ OpenAI blog
+
+
+ Download gpt-oss-120b and gpt-oss-20b on Hugging Face
+
+
+
+
+Welcome to the gpt-oss series, [OpenAI's open-weight models](https://openai.com/open-models/) designed for powerful reasoning, agentic tasks, and versatile developer use cases.
+
+We're releasing two flavors of these open models:
+
+- `gpt-oss-120b` — for production, general purpose, high reasoning use cases that fit into a single 80GB GPU (like NVIDIA H100 or AMD MI300X) (117B parameters with 5.1B active parameters)
+- `gpt-oss-20b` — for lower latency, and local or specialized use cases (21B parameters with 3.6B active parameters)
+
+Both models were trained using our [harmony response format][harmony] and should only be used with this format; otherwise, they will not work correctly.
+
+## Table of Contents
+- [Highlights](#highlights)
+- [Inference examples](#inference-examples)
+- [About this repository](#about-this-repository)
+- [Setup](#setup)
+- [Download the model](#download-the-model)
+- [Reference PyTorch implementation](#reference-pytorch-implementation)
+- [Reference Triton implementation (single GPU)](#reference-triton-implementation-single-gpu)
+- [Reference Metal implementation](#reference-metal-implementation)
+- [Harmony format & tools](#harmony-format--tools)
+- [Clients](#clients)
+- [Tools](#tools)
+- [Other details](#other-details)
+- [Contributing](#contributing)
+
+### Highlights
+
+- **Permissive Apache 2.0 license:** Build freely without copyleft restrictions or patent risk—ideal for experimentation, customization, and commercial deployment.
+- **Configurable reasoning effort:** Easily adjust the reasoning effort (low, medium, high) based on your specific use case and latency needs.
+- **Full chain-of-thought:** Provides complete access to the model's reasoning process, facilitating easier debugging and greater trust in outputs. This information is not intended to be shown to end users.
+- **Fine-tunable:** Fully customize models to your specific use case through parameter fine-tuning.
+- **Agentic capabilities:** Use the models' native capabilities for function calling, [web browsing](#browser), [Python code execution](#python), and Structured Outputs.
+- **MXFP4 quantization:** The models were post-trained with MXFP4 quantization of the MoE weights, making `gpt-oss-120b` run on a single 80GB GPU (like NVIDIA H100 or AMD MI300X) and the `gpt-oss-20b` model run within 16GB of memory. All evals were performed with the same MXFP4 quantization.
+
+### Inference examples
+
+#### Transformers
+
+You can use `gpt-oss-120b` and `gpt-oss-20b` with the Transformers library. If you use Transformers' chat template, it will automatically apply the [harmony response format][harmony]. If you use `model.generate` directly, you need to apply the harmony format manually using the chat template or use our [`openai-harmony`][harmony] package.
+
+```python
+from transformers import pipeline
+import torch
+
+model_id = "openai/gpt-oss-120b"
+
+pipe = pipeline(
+ "text-generation",
+ model=model_id,
+ torch_dtype="auto",
+ device_map="auto",
+)
+
+messages = [
+ {"role": "user", "content": "Explain quantum mechanics clearly and concisely."},
+]
+
+outputs = pipe(
+ messages,
+ max_new_tokens=256,
+)
+print(outputs[0]["generated_text"][-1])
+```
+
+[Learn more about how to use gpt-oss with Transformers.](https://cookbook.openai.com/articles/gpt-oss/run-transformers)
+
+#### vLLM
+
+vLLM recommends using [`uv`](https://docs.astral.sh/uv/) for Python dependency management. You can use vLLM to spin up an OpenAI-compatible web server. The following command will automatically download the model and start the server.
+
+```bash
+uv pip install --pre vllm==0.10.1+gptoss \
+ --extra-index-url https://wheels.vllm.ai/gpt-oss/ \
+ --extra-index-url https://download.pytorch.org/whl/nightly/cu128 \
+ --index-strategy unsafe-best-match
+
+vllm serve openai/gpt-oss-20b
+```
+
+[Learn more about how to use gpt-oss with vLLM.](https://cookbook.openai.com/articles/gpt-oss/run-vllm)
+
+Offline Serve Code:
+- run this code after installing proper libraries as described, while additionally installing this:
+- `uv pip install openai-harmony`
+```python
+# source .oss/bin/activate
+
+import os
+os.environ["VLLM_USE_FLASHINFER_SAMPLER"] = "0"
+
+import json
+from openai_harmony import (
+ HarmonyEncodingName,
+ load_harmony_encoding,
+ Conversation,
+ Message,
+ Role,
+ SystemContent,
+ DeveloperContent,
+)
+
+from vllm import LLM, SamplingParams
+import os
+
+# --- 1) Render the prefill with Harmony ---
+encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
+
+convo = Conversation.from_messages(
+ [
+ Message.from_role_and_content(Role.SYSTEM, SystemContent.new()),
+ Message.from_role_and_content(
+ Role.DEVELOPER,
+ DeveloperContent.new().with_instructions("Always respond in riddles"),
+ ),
+ Message.from_role_and_content(Role.USER, "What is the weather like in SF?"),
+ ]
+)
+
+prefill_ids = encoding.render_conversation_for_completion(convo, Role.ASSISTANT)
+
+# Harmony stop tokens (pass to sampler so they won't be included in output)
+stop_token_ids = encoding.stop_tokens_for_assistant_actions()
+
+# --- 2) Run vLLM with prefill ---
+llm = LLM(
+ model="openai/gpt-oss-20b",
+ trust_remote_code=True,
+ gpu_memory_utilization = 0.95,
+ max_num_batched_tokens=4096,
+ max_model_len=5000,
+ tensor_parallel_size=1
+)
+
+sampling = SamplingParams(
+ max_tokens=128,
+ temperature=1,
+ stop_token_ids=stop_token_ids,
+)
+
+outputs = llm.generate(
+ prompt_token_ids=[prefill_ids], # batch of size 1
+ sampling_params=sampling,
+)
+
+# vLLM gives you both text and token IDs
+gen = outputs[0].outputs[0]
+text = gen.text
+output_tokens = gen.token_ids # <-- these are the completion token IDs (no prefill)
+
+# --- 3) Parse the completion token IDs back into structured Harmony messages ---
+entries = encoding.parse_messages_from_completion_tokens(output_tokens, Role.ASSISTANT)
+
+# 'entries' is a sequence of structured conversation entries (assistant messages, tool calls, etc.).
+for message in entries:
+ print(f"{json.dumps(message.to_dict())}")
+```
+
+#### PyTorch / Triton / Metal
+
+These implementations are largely reference implementations for educational purposes and are not expected to be run in production.
+
+[Learn more below.](#reference-pytorch-implementation)
+
+#### Ollama
+
+If you are trying to run `gpt-oss` on consumer hardware, you can use Ollama by running the following commands after [installing Ollama](https://ollama.com/download).
+
+```bash
+# gpt-oss-20b
+ollama pull gpt-oss:20b
+ollama run gpt-oss:20b
+
+# gpt-oss-120b
+ollama pull gpt-oss:120b
+ollama run gpt-oss:120b
+```
+
+[Learn more about how to use gpt-oss with Ollama.](https://cookbook.openai.com/articles/gpt-oss/run-locally-ollama)
+
+#### LM Studio
+
+If you are using [LM Studio](https://lmstudio.ai/) you can use the following commands to download.
+
+```bash
+# gpt-oss-20b
+lms get openai/gpt-oss-20b
+# gpt-oss-120b
+lms get openai/gpt-oss-120b
+```
+
+Check out our [awesome list](./awesome-gpt-oss.md) for a broader collection of gpt-oss resources and inference partners.
+
+## About this repository
+
+This repository provides a collection of reference implementations:
+
+- **Inference:**
+ - [`torch`](#reference-pytorch-implementation) — a non-optimized [PyTorch](https://pytorch.org/) implementation for educational purposes only. Requires at least 4× H100 GPUs due to lack of optimization.
+ - [`triton`](#reference-triton-implementation-single-gpu) — a more optimized implementation using [PyTorch](https://pytorch.org/) & [Triton](https://github.com/triton-lang/triton) incl. using CUDA graphs and basic caching
+ - [`metal`](#reference-metal-implementation) — a Metal-specific implementation for running the models on Apple Silicon hardware
+- **Tools:**
+ - [`browser`](#browser) — a reference implementation of the browser tool the models got trained on
+ - [`python`](#python) — a stateless reference implementation of the python tool the model got trained on
+- **Client examples:**
+ - [`chat`](#terminal-chat) — a basic terminal chat application that uses the PyTorch or Triton implementations for inference along with the python and browser tools
+ - [`responses_api`](#responses-api) — an example Responses API compatible server that implements the browser tool along with other Responses-compatible functionality
+
+## Setup
+
+### Requirements
+
+- Python 3.12
+- On macOS: Install the Xcode CLI tools --> `xcode-select --install`
+- On Linux: These reference implementations require CUDA
+- On Windows: These reference implementations have not been tested on Windows. Try using solutions like Ollama if you are trying to run the model locally.
+
+### Installation
+
+If you want to try any of the code you can install it directly from [PyPI](https://pypi.org/project/gpt-oss/)
+
+```shell
+# if you just need the tools
+pip install gpt-oss
+# if you want to try the torch implementation
+pip install gpt-oss[torch]
+# if you want to try the triton implementation
+pip install gpt-oss[triton]
+```
+
+If you want to modify the code or try the metal implementation set the project up locally:
+
+```shell
+git clone https://github.com/openai/gpt-oss.git
+GPTOSS_BUILD_METAL=1 pip install -e ".[metal]"
+```
+
+## Download the model
+
+You can download the model weights from the [Hugging Face Hub](https://huggingface.co/collections/openai/gpt-oss-68911959590a1634ba11c7a4) directly from Hugging Face CLI:
+
+```shell
+# gpt-oss-120b
+hf download openai/gpt-oss-120b --include "original/*" --local-dir gpt-oss-120b/
+
+# gpt-oss-20b
+hf download openai/gpt-oss-20b --include "original/*" --local-dir gpt-oss-20b/
+```
+
+## Reference PyTorch implementation
+
+We include an inefficient reference PyTorch implementation in [gpt_oss/torch/model.py](gpt_oss/torch/model.py). This code uses basic PyTorch operators to show the exact model architecture, with a small addition of supporting tensor parallelism in MoE so that the larger model can run with this code (e.g., on 4xH100 or 2xH200). In this implementation, we upcast all weights to BF16 and run the model in BF16.
+
+To run the reference implementation, install the dependencies:
+
+```shell
+pip install -e ".[torch]"
+```
+
+And then run:
+
+```shell
+# On 4xH100:
+torchrun --nproc-per-node=4 -m gpt_oss.generate gpt-oss-120b/original/
+```
+
+## Reference Triton implementation (single GPU)
+
+We also include an optimized reference implementation that uses [an optimized triton MoE kernel](https://github.com/triton-lang/triton/tree/main/python/triton_kernels/triton_kernels) that supports MXFP4. It also has some optimization on the attention code to reduce the memory cost. To run this implementation, the nightly version of triton and torch will be installed. This version can be run on a single 80GB GPU for `gpt-oss-120b`.
+
+To install the reference Triton implementation run
+
+```shell
+# You need to install triton from source to use the triton implementation
+git clone https://github.com/triton-lang/triton
+cd triton/
+pip install -r python/requirements.txt
+pip install -e . --verbose --no-build-isolation
+pip install -e python/triton_kernels
+
+# Install the gpt-oss triton implementation
+pip install -e ".[triton]"
+```
+
+And then run:
+
+```shell
+# On 1xH100
+export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
+python -m gpt_oss.generate --backend triton gpt-oss-120b/original/
+```
+
+If you encounter `torch.OutOfMemoryError`, make sure to turn on the expandable allocator to avoid crashes when loading weights from the checkpoint.
+
+## Reference Metal implementation
+
+Additionally we are providing a reference implementation for Metal to run on Apple Silicon. This implementation is not production-ready but is accurate to the PyTorch implementation.
+
+The implementation will get automatically compiled when running the `.[metal]` installation on an Apple Silicon device:
+
+```shell
+GPTOSS_BUILD_METAL=1 pip install -e ".[metal]"
+```
+
+To perform inference you'll need to first convert the SafeTensor weights from Hugging Face into the right format using:
+
+```shell
+python gpt_oss/metal/scripts/create-local-model.py -s -d
+```
+
+Or download the pre-converted weights:
+
+```shell
+hf download openai/gpt-oss-120b --include "metal/*" --local-dir gpt-oss-120b/metal/
+hf download openai/gpt-oss-20b --include "metal/*" --local-dir gpt-oss-20b/metal/
+```
+
+To test it you can run:
+
+```shell
+python gpt_oss/metal/examples/generate.py gpt-oss-20b/metal/model.bin -p "why did the chicken cross the road?"
+```
+
+## Harmony format & tools
+
+Along with the model, we are also releasing a new chat format library `harmony` to interact with the model. Check [this guide](https://cookbook.openai.com/articles/openai-harmony) for more info about harmony.
+
+We also include two system tools for the model: browsing and python container. Check [gpt_oss/tools](gpt_oss/tools) for the tool implementation.
+
+## Clients
+
+### Terminal Chat
+
+The terminal chat application is a basic example of how to use the harmony format together with the PyTorch, Triton, and vLLM implementations. It also exposes both the python and browser tool as optional tools that can be used.
+
+```bash
+usage: python -m gpt_oss.chat [-h] [-r REASONING_EFFORT] [-a] [-b] [--show-browser-results] [-p] [--developer-message DEVELOPER_MESSAGE] [-c CONTEXT] [--raw] [--backend {triton,torch,vllm}] FILE
+
+Chat example
+
+positional arguments:
+ FILE Path to the SafeTensors checkpoint
+
+options:
+ -h, --help show this help message and exit
+ -r REASONING_EFFORT, --reasoning-effort REASONING_EFFORT
+ Reasoning effort (default: low)
+ -a, --apply-patch Make apply_patch tool available to the model (default: False)
+ -b, --browser Use browser tool (default: False)
+ --show-browser-results
+ Show browser results (default: False)
+ -p, --python Use python tool (default: False)
+ --developer-message DEVELOPER_MESSAGE
+ Developer message (default: )
+ -c CONTEXT, --context CONTEXT
+ Max context length (default: 8192)
+ --raw Raw mode (does not render Harmony encoding) (default: False)
+ --backend {triton,torch,vllm}
+ Inference backend (default: triton)
+```
+
+> [!NOTE]
+> The torch and triton implementations require original checkpoint under `gpt-oss-120b/original/` and `gpt-oss-20b/original/` respectively. While vLLM uses the Hugging Face converted checkpoint under `gpt-oss-120b/` and `gpt-oss-20b/` root directory respectively.
+
+### Responses API
+
+We also include an example Responses API server. This server does not implement every feature and event of the Responses API but should be compatible with most of the basic use cases and serve as inspiration for anyone building their own server. Some of our inference partners are also offering their own Responses API.
+
+You can start this server with the following inference backends:
+
+- `triton` — uses the triton implementation
+- `metal` — uses the metal implementation on Apple Silicon only
+- `ollama` — uses the Ollama /api/generate API as an inference solution
+- `vllm` — uses your installed vllm version to perform inference
+- `transformers` — uses your installed transformers version to perform local inference
+
+```bash
+usage: python -m gpt_oss.responses_api.serve [-h] [--checkpoint FILE] [--port PORT] [--inference-backend BACKEND]
+
+Responses API server
+
+options:
+ -h, --help show this help message and exit
+ --checkpoint FILE Path to the SafeTensors checkpoint
+ --port PORT Port to run the server on
+ --inference-backend BACKEND Inference backend to use
+```
+
+### Codex
+
+We support [codex](https://github.com/openai/codex) as a client for gpt-oss. To run the 20b version, set this to `~/.codex/config.toml`:
+
+```
+disable_response_storage = true
+show_reasoning_content = true
+
+[model_providers.local]
+name = "local"
+base_url = "http://localhost:11434/v1"
+
+[profiles.oss]
+model = "gpt-oss:20b"
+model_provider = "local"
+```
+
+This will work with any chat completions-API compatible server listening on port 11434, like ollama. Start the server and point codex to the oss model:
+
+```
+ollama run gpt-oss:20b
+codex -p oss
+```
+
+## Tools
+
+### Browser
+
+> [!WARNING]
+> This implementation is purely for educational purposes and should not be used in production. You should implement your own equivalent of the [`YouComBackend`](gpt_oss/tools/simple_browser/backend.py) class with your own browsing environment. Currently we have available `YouComBackend` and `ExaBackend`.
+
+Both gpt-oss models were trained with the capability to browse using the `browser` tool that exposes the following three methods:
+
+- `search` to search for key phrases
+- `open` to open a particular page
+- `find` to look for contents on a page
+
+#### Usage
+
+To enable the browser tool, you'll have to place the definition into the `system` message of your harmony formatted prompt. You can either use the `with_browser_tool()` method if your tool implements the full interface or modify the definition using `with_tools()`. For example:
+
+```python
+import datetime
+from gpt_oss.tools.simple_browser import SimpleBrowserTool
+from gpt_oss.tools.simple_browser.backend import YouComBackend
+from openai_harmony import SystemContent, Message, Conversation, Role, load_harmony_encoding, HarmonyEncodingName
+
+encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
+
+# Depending on the choice of the browser backend you need corresponding env variables setup
+# In case you use You.com backend requires you to have set the YDC_API_KEY environment variable,
+# while for Exa you might need EXA_API_KEY environment variable set
+backend = YouComBackend(
+ source="web",
+)
+# backend = ExaBackend(
+# source="web",
+# )
+browser_tool = SimpleBrowserTool(backend=backend)
+
+# create a basic system prompt
+system_message_content = SystemContent.new().with_conversation_start_date(
+ datetime.datetime.now().strftime("%Y-%m-%d")
+)
+
+# if you want to use the browser tool
+if use_browser_tool:
+ # enables the tool
+ system_message_content = system_message_content.with_tools(browser_tool.tool_config)
+ # alternatively you could use the following if your tool is not stateless
+ system_message_content = system_message_content.with_browser_tool()
+
+# construct the system message
+system_message = Message.from_role_and_content(Role.SYSTEM, system_message_content)
+
+# create the overall prompt
+messages = [system_message, Message.from_role_and_content(Role.USER, "What's the weather in SF?")]
+conversation = Conversation.from_messages(messages)
+
+# convert to tokens
+token_ids = encoding.render_conversation_for_completion(conversation, Role.ASSISTANT)
+
+# perform inference
+# ...
+
+# parse the output
+messages = encoding.parse_messages_from_completion_tokens(output_tokens, Role.ASSISTANT)
+last_message = messages[-1]
+if last_message.recipient.startswith("browser"):
+ # perform browser call
+ response_messages = await browser_tool.process(last_message)
+
+ # extend the current messages and run inference again
+ messages.extend(response_messages)
+```
+
+#### Details
+
+To control the context window size this tool uses a scrollable window of text that the model can interact with. So it might fetch the first 50 lines of a page and then scroll to the next 20 lines after that. The model has also been trained to then use citations from this tool in its answers.
+
+To improve performance the tool caches requests so that the model can revisit a different part of a page without having to reload the page. For that reason you should create a new browser instance for every request.
+
+### Python
+
+The model was trained to use a python tool to perform calculations and other actions as part of its chain-of-thought. During the training the model used a stateful tool which makes running tools between CoT loops easier. This reference implementation, however, uses a stateless mode. As a result the PythonTool defines its own tool description to override the definition in [`openai-harmony`][harmony].
+
+> [!WARNING]
+> This implementation runs in a permissive Docker container which could be problematic in cases like prompt injections. It's serving as an example and you should consider implementing your own container restrictions in production.
+
+#### Usage
+
+To enable the python tool, you'll have to place the definition into the `system` message of your harmony formatted prompt. You can either use the `with_python()` method if your tool implements the full interface or modify the definition using `with_tools()`. For example:
+
+```python
+import datetime
+from gpt_oss.tools.python_docker.docker_tool import PythonTool
+from openai_harmony import SystemContent, Message, Conversation, Role, load_harmony_encoding, HarmonyEncodingName
+
+encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
+
+python_tool = PythonTool()
+
+# create a basic system prompt
+system_message_content = SystemContent.new().with_conversation_start_date(
+ datetime.datetime.now().strftime("%Y-%m-%d")
+)
+
+# if you want to use the python tool
+if use_python_tool:
+ # enables the tool making sure that the prompt gets set with the stateless tool description
+ system_message_content = system_message_content.with_tools(python_tool.tool_config)
+ # alternatively you could use the following if your tool is not stateless
+ system_message_content = system_message_content.with_python()
+
+# construct the system message
+system_message = Message.from_role_and_content(Role.SYSTEM, system_message_content)
+
+# create the overall prompt
+messages = [system_message, Message.from_role_and_content(Role.USER, "What's the square root of 9001?")]
+conversation = Conversation.from_messages(messages)
+
+# convert to tokens
+token_ids = encoding.render_conversation_for_completion(conversation, Role.ASSISTANT)
+
+# perform inference
+# ...
+
+# parse the output
+messages = encoding.parse_messages_from_completion_tokens(output_tokens, Role.ASSISTANT)
+last_message = messages[-1]
+if last_message.recipient == "python":
+ # perform python call
+ response_messages = await python_tool.process(last_message)
+
+ # extend the current messages and run inference again
+ messages.extend(response_messages)
+```
+
+### Apply Patch
+
+`apply_patch` can be used to create, update or delete files locally.
+
+## Other details
+
+### Precision format
+
+We released the models with native quantization support. Specifically, we use [MXFP4](https://www.opencompute.org/documents/ocp-microscaling-formats-mx-v1-0-spec-final-pdf) for the linear projection weights in the MoE layer. We store the MoE tensor in two parts:
+
+- `tensor.blocks` stores the actual fp4 values. We pack every two values in one `uint8` value.
+- `tensor.scales` stores the block scale. The block scaling is done among the last dimension for all MXFP4 tensors.
+
+All other tensors will be in BF16. We also recommend using BF16 as the activation precision for the model.
+
+### Recommended Sampling Parameters
+
+We recommend sampling with `temperature=1.0` and `top_p=1.0`.
+
+## Contributing
+
+The reference implementations in this repository are meant as a starting point and inspiration. Outside of bug fixes we do not intend to accept new feature contributions. If you build implementations based on this code such as new tool implementations you are welcome to contribute them to the [`awesome-gpt-oss.md`](./awesome-gpt-oss.md) file.
+
+[harmony]: https://github.com/openai/harmony
+
+## Citation
+
+```bibtex
+@misc{openai2025gptoss120bgptoss20bmodel,
+ title={gpt-oss-120b & gpt-oss-20b Model Card},
+ author={OpenAI},
+ year={2025},
+ eprint={2508.10925},
+ archivePrefix={arXiv},
+ primaryClass={cs.CL},
+ url={https://arxiv.org/abs/2508.10925},
+}
+```
diff --git a/USAGE_POLICY b/USAGE_POLICY
new file mode 100644
index 0000000000000000000000000000000000000000..e5128f735bd6a7074da4489b46f6784ccfc06c09
--- /dev/null
+++ b/USAGE_POLICY
@@ -0,0 +1 @@
+We aim for our tools to be used safely, responsibly, and democratically, while maximizing your control over how you use them. By using OpenAI gpt-oss-120b and gpt-oss-20b, you agree to comply with all applicable law.
\ No newline at end of file
diff --git a/_build/gpt_oss_build_backend/__init__.py b/_build/gpt_oss_build_backend/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f46b29df858772d30fe3aa69a8ab283cbce5c1f
--- /dev/null
+++ b/_build/gpt_oss_build_backend/__init__.py
@@ -0,0 +1 @@
+"""In-tree PEP 517 backend package for gpt-oss."""
\ No newline at end of file
diff --git a/_build/gpt_oss_build_backend/backend.py b/_build/gpt_oss_build_backend/backend.py
new file mode 100644
index 0000000000000000000000000000000000000000..5cd76bdf6648b1ebbf9dcbe5d09084476502e93e
--- /dev/null
+++ b/_build/gpt_oss_build_backend/backend.py
@@ -0,0 +1,140 @@
+"""
+Build backend for gpt-oss that supports two modes:
+
+1) Default (pure wheel for PyPI)
+ - Delegates to setuptools.build_meta.
+ - Produces a py3-none-any wheel so PyPI accepts it (no linux_x86_64 tag).
+
+2) Optional Metal/C extension build (local only)
+ - If the environment variable GPTOSS_BUILD_METAL is set to a truthy value
+ (1/true/on/yes), delegates to scikit_build_core.build.
+ - Dynamically injects build requirements (scikit-build-core, cmake, ninja,
+ pybind11) only for this mode.
+
+Why this is needed
+- PyPI rejects Linux wheels tagged linux_x86_64; manylinux/musllinux is required
+ for binary wheels. We ship a pure wheel by default, but still allow developers
+ to build/install the native Metal backend locally when needed.
+
+Typical usage
+- Publish pure wheel: `python -m build` (do not set GPTOSS_BUILD_METAL).
+- Local Metal dev: `GPTOSS_BUILD_METAL=1 pip install -e ".[metal]"`.
+- CI: keep GPTOSS_BUILD_METAL unset for releases; set it in internal jobs that
+ exercise the extension.
+
+Notes
+- The base package remains importable without the extension. The Metal backend
+ is only used when `gpt_oss.metal` is explicitly imported.
+- This file is discovered via `backend-path = ["_build"]` and
+ `build-backend = "gpt_oss_build_backend.backend"` in pyproject.toml.
+"""
+import os
+from importlib import import_module
+from typing import Any, Mapping, Sequence
+
+
+TRUE_VALUES = {"1", "true", "TRUE", "on", "ON", "yes", "YES"}
+
+
+def _use_metal_backend() -> bool:
+ return str(os.environ.get("GPTOSS_BUILD_METAL", "")).strip() in TRUE_VALUES
+
+
+def _setuptools_backend():
+ from setuptools import build_meta as _bm # type: ignore
+
+ return _bm
+
+
+def _scikit_build_backend():
+ return import_module("scikit_build_core.build")
+
+
+def _backend():
+ return _scikit_build_backend() if _use_metal_backend() else _setuptools_backend()
+
+
+# Required PEP 517 hooks
+
+def build_wheel(
+ wheel_directory: str,
+ config_settings: Mapping[str, Any] | None = None,
+ metadata_directory: str | None = None,
+) -> str:
+ return _backend().build_wheel(wheel_directory, config_settings, metadata_directory)
+
+
+def build_sdist(
+ sdist_directory: str, config_settings: Mapping[str, Any] | None = None
+) -> str:
+ return _backend().build_sdist(sdist_directory, config_settings)
+
+
+def prepare_metadata_for_build_wheel(
+ metadata_directory: str, config_settings: Mapping[str, Any] | None = None
+) -> str:
+ # Fallback if backend doesn't implement it
+ be = _backend()
+ fn = getattr(be, "prepare_metadata_for_build_wheel", None)
+ if fn is None:
+ # setuptools exposes it; scikit-build-core may not. Defer to building a wheel for metadata.
+ return _setuptools_backend().prepare_metadata_for_build_wheel(
+ metadata_directory, config_settings
+ )
+ return fn(metadata_directory, config_settings)
+
+
+# Optional hooks
+
+def build_editable(
+ editable_directory: str, config_settings: Mapping[str, Any] | None = None, metadata_directory: str | None = None
+) -> str:
+ be = _backend()
+ fn = getattr(be, "build_editable", None)
+ if fn is None:
+ # setuptools implements build_editable; if not available, raise the standard error
+ raise RuntimeError("Editable installs not supported by the selected backend")
+ return fn(editable_directory, config_settings)
+
+
+def get_requires_for_build_wheel(
+ config_settings: Mapping[str, Any] | None = None,
+) -> Sequence[str]:
+ if _use_metal_backend():
+ # Add dynamic build requirements only when building the Metal backend
+ return [
+ "scikit-build-core>=0.10",
+ "pybind11>=2.12",
+ "cmake>=3.26",
+ "ninja",
+ ]
+ # setuptools usually returns []
+ return list(_setuptools_backend().get_requires_for_build_wheel(config_settings))
+
+
+def get_requires_for_build_sdist(
+ config_settings: Mapping[str, Any] | None = None,
+) -> Sequence[str]:
+ # No special requirements for SDist
+ be = _backend()
+ fn = getattr(be, "get_requires_for_build_sdist", None)
+ if fn is None:
+ return []
+ return list(fn(config_settings))
+
+
+def get_requires_for_build_editable(
+ config_settings: Mapping[str, Any] | None = None,
+) -> Sequence[str]:
+ if _use_metal_backend():
+ return [
+ "scikit-build-core>=0.10",
+ "pybind11>=2.12",
+ "cmake>=3.26",
+ "ninja",
+ ]
+ be = _setuptools_backend()
+ fn = getattr(be, "get_requires_for_build_editable", None)
+ if fn is None:
+ return []
+ return list(fn(config_settings))
\ No newline at end of file
diff --git a/awesome-gpt-oss.md b/awesome-gpt-oss.md
new file mode 100644
index 0000000000000000000000000000000000000000..15d78f7ac8adf924a3cf54680b3daf68beb6829a
--- /dev/null
+++ b/awesome-gpt-oss.md
@@ -0,0 +1,90 @@
+
+
+# Awesome gpt-oss
+
+This is a list of guides and resources to help you get started with the gpt-oss models.
+
+- [Inference](#inference)
+ - [Local](#local)
+ - [Server](#server)
+ - [Cloud](#cloud)
+- [Examples / Tutorials](#examples--tutorials)
+- [Tools](#tools)
+- [Training](#training)
+
+## Inference
+
+### Local
+
+- Ollama
+ - [How to run gpt-oss locally with Ollama](https://cookbook.openai.com/articles/gpt-oss/run-locally-ollama)
+ - [Ollama & gpt-oss launch blog](https://ollama.com/blog/gpt-oss)
+ - [Check out the models Ollama](https://ollama.com/library/gpt-oss)
+- LM Studio
+ - [LM Studio & gpt-oss launch blog](https://lmstudio.ai/blog/gpt-oss)
+ - [Use gpt-oss-20b with LM Studio](https://lmstudio.ai/models/openai/gpt-oss-20b)
+ - [Use gpt-oss-120b with LM Studio](https://lmstudio.ai/models/openai/gpt-oss-120b)
+- Hugging Face & Transformers
+ - [How to run gpt-oss with Transformers](https://cookbook.openai.com/articles/gpt-oss/run-transformers)
+ - [Hugging Face & gpt-oss launch blog](https://huggingface.co/blog/welcome-openai-gpt-oss)
+ - [Collection of Hugging Face examples](https://github.com/huggingface/gpt-oss-recipes)
+- NVIDIA
+ - [gpt-oss on RTX](https://blogs.nvidia.com/blog/rtx-ai-garage-openai-oss)
+- AMD
+ - [Running gpt-oss models on AMD Ryzen AI Processors and Radeon Graphics Cards](https://www.amd.com/en/blogs/2025/how-to-run-openai-gpt-oss-20b-120b-models-on-amd-ryzen-ai-radeon.html)
+ - [Running gpt-oss on STX Halo and Radeon dGPUs using Lemonade](https://lemonade-server.ai/news/gpt-oss.html)
+- llama.cpp
+ - [Running gpt-oss with llama.cpp](https://github.com/ggml-org/llama.cpp/discussions/15396)
+
+### Server
+
+- vLLM
+ - [How to run gpt-oss with vLLM](https://cookbook.openai.com/articles/gpt-oss/run-vllm)
+ - [vLLM & gpt-oss recipies](https://docs.vllm.ai/projects/recipes/en/latest/OpenAI/GPT-OSS.html)
+- NVIDIA
+ - [Optimizing gpt-oss with NVIDIA TensorRT-LLM](https://cookbook.openai.com/articles/run-nvidia)
+ - [Deploying gpt-oss on TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM/blob/main/docs/source/blogs/tech_blog/blog9_Deploying_GPT_OSS_on_TRTLLM.md)
+- AMD
+ - [Running the Latest Open Models from OpenAI on AMD AI Hardware](https://rocm.blogs.amd.com/ecosystems-and-partners/openai-day-0/README.html)
+
+### Cloud
+
+- Groq
+ - [Groq & gpt-oss launch blog](https://groq.com/blog/day-zero-support-for-openai-open-models)
+ - [gpt-oss-120b model on the GroqCloud Playground](https://console.groq.com/playground?model=openai/gpt-oss-120b)
+ - [gpt-oss-20b model on the GroqCloud Playground](https://console.groq.com/playground?model=openai/gpt-oss-20b)
+ - [gpt-oss with built-in web search on GroqCloud](https://console.groq.com/docs/browser-search)
+ - [gpt-oss with built-in code execution on GroqCloud](https://console.groq.com/docs/code-execution)
+ - [Responses API on Groq](https://console.groq.com/docs/responses-api)
+- NVIDIA
+ - [NVIDIA launch blog post](https://blogs.nvidia.com/blog/openai-gpt-oss/)
+ - [NVIDIA & gpt-oss developer launch blog post](https://developer.nvidia.com/blog/delivering-1-5-m-tps-inference-on-nvidia-gb200-nvl72-nvidia-accelerates-openai-gpt-oss-models-from-cloud-to-edge/)
+ - Use [gpt-oss-120b](https://build.nvidia.com/openai/gpt-oss-120b) and [gpt-oss-20b](https://build.nvidia.com/openai/gpt-oss-20b) on NVIDIA's Cloud
+- Cloudflare
+ - [Cloudflare & gpt-oss launch blog post](https://blog.cloudflare.com/openai-gpt-oss-on-workers-ai)
+ - [gpt-oss-120b on Cloudflare Workers AI](https://developers.cloudflare.com/workers-ai/models/gpt-oss-120b)
+ - [gpt-oss-20b on Cloudflare Workers AI](https://developers.cloudflare.com/workers-ai/models/gpt-oss-20b)
+- AMD
+ - [gpt-oss-120B on AMD MI300X](https://huggingface.co/spaces/amd/gpt-oss-120b-chatbot)
+- AWS
+ - Deploy via Tensorfuse: [Deploy gpt-oss for both 20b and 120b models on AWS EKS](https://tensorfuse.io/docs/guides/modality/text/openai_oss)
+ - [AWS launch blog post](https://aws.amazon.com/blogs/aws/openai-open-weight-models-now-available-on-aws/)
+
+## Examples & Tutorials
+
+- [OpenAI harmony response format](https://cookbook.openai.com/articles/openai-harmony)
+
+## Tools
+
+- [Example `python` tool for gpt-oss](./gpt_oss/tools/python_docker/)
+- [Example `browser` tool for gpt-oss](./gpt_oss/tools/simple_browser/)
+
+## Training
+
+- [Hugging Face TRL examples](https://github.com/huggingface/gpt-oss-recipes)
+- [LlamaFactory examples](https://llamafactory.readthedocs.io/en/latest/advanced/best_practice/gpt-oss.html)
+- [Unsloth examples](https://docs.unsloth.ai/basics/gpt-oss-how-to-run-and-fine-tune)
+
+## Contributing
+
+Feel free to open a PR to add your own guides and resources on how to run gpt-oss. We will try to review it and add it here.
diff --git a/compatibility-test/.gitignore b/compatibility-test/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..2ba323b06e471fd3400fc72e5ea1a75f97f2b28e
--- /dev/null
+++ b/compatibility-test/.gitignore
@@ -0,0 +1,142 @@
+# Logs
+logs
+*.log
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+lerna-debug.log*
+
+# Diagnostic reports (https://nodejs.org/api/report.html)
+report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
+
+# Runtime data
+pids
+*.pid
+*.seed
+*.pid.lock
+
+# Directory for instrumented libs generated by jscoverage/JSCover
+lib-cov
+
+# Coverage directory used by tools like istanbul
+coverage
+*.lcov
+
+# nyc test coverage
+.nyc_output
+
+# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
+.grunt
+
+# Bower dependency directory (https://bower.io/)
+bower_components
+
+# node-waf configuration
+.lock-wscript
+
+# Compiled binary addons (https://nodejs.org/api/addons.html)
+build/Release
+
+# Dependency directories
+node_modules/
+jspm_packages/
+
+# Snowpack dependency directory (https://snowpack.dev/)
+web_modules/
+
+# TypeScript cache
+*.tsbuildinfo
+
+# Optional npm cache directory
+.npm
+
+# Optional eslint cache
+.eslintcache
+
+# Optional stylelint cache
+.stylelintcache
+
+# Optional REPL history
+.node_repl_history
+
+# Output of 'npm pack'
+*.tgz
+
+# Yarn Integrity file
+.yarn-integrity
+
+# dotenv environment variable files
+.env
+.env.*
+!.env.example
+
+# parcel-bundler cache (https://parceljs.org/)
+.cache
+.parcel-cache
+
+# Next.js build output
+.next
+out
+
+# Nuxt.js build / generate output
+.nuxt
+dist
+
+# Gatsby files
+.cache/
+# Comment in the public line in if your project uses Gatsby and not Next.js
+# https://nextjs.org/blog/next-9-1#public-directory-support
+# public
+
+# vuepress build output
+.vuepress/dist
+
+# vuepress v2.x temp and cache directory
+.temp
+.cache
+
+# Sveltekit cache directory
+.svelte-kit/
+
+# vitepress build output
+**/.vitepress/dist
+
+# vitepress cache directory
+**/.vitepress/cache
+
+# Docusaurus cache and generated files
+.docusaurus
+
+# Serverless directories
+.serverless/
+
+# FuseBox cache
+.fusebox/
+
+# DynamoDB Local files
+.dynamodb/
+
+# Firebase cache directory
+.firebase/
+
+# TernJS port file
+.tern-port
+
+# Stores VSCode versions used for testing VSCode extensions
+.vscode-test
+
+# yarn v3
+.pnp.*
+.yarn/*
+!.yarn/patches
+!.yarn/plugins
+!.yarn/releases
+!.yarn/sdks
+!.yarn/versions
+
+# Vite logs files
+vite.config.js.timestamp-*
+vite.config.ts.timestamp-*
+
+rollout_*.jsonl
+analysis_*.json
\ No newline at end of file
diff --git a/compatibility-test/README.md b/compatibility-test/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..22e0007f1551f3385c472f7ec9c40aed73eb1351
--- /dev/null
+++ b/compatibility-test/README.md
@@ -0,0 +1,29 @@
+# API Compatibility Test
+
+This script uses the Agents SDK in TypeScript and the underlying OpenAI client to verify the shape of the API calls but also whether the API performs tool calling.
+
+## What it tests
+
+1.
+
+## How to run
+
+0. Run `npm install` in this directory.
+1. Update `providers.ts` to create an entry for the API to test. Change `vllm` to the provider name of your choice. Use `chat` for Chat Completions tests and `responses` for Responses API tests.
+2. Run an initial quick test to make sure things work. This will only run one test
+
+```
+npm start -- --provider -n 1 -k 1
+```
+
+3. Run the full test (runs each test 5 times to test consistency)
+
+```
+npm start -- --provider -k 5
+```
+
+## Considerations
+
+1. The tests will fail if the API shape does not match the expected behavior
+2. Events in the chat API are currently not tested
+3. If the schema validation succeeds but the input is wrong the test will still pass for this test. That's because it's likely more of a prompt engineering issue or a validator issue than an API issue as it still nailed the input
diff --git a/compatibility-test/analysis.ts b/compatibility-test/analysis.ts
new file mode 100644
index 0000000000000000000000000000000000000000..9c5cf97d5c2431a8048618480eb91e8418de431b
--- /dev/null
+++ b/compatibility-test/analysis.ts
@@ -0,0 +1,142 @@
+export function analyze(caseResults: any[], tries: number) {
+ // Group results by unique task: test_case + apiType
+ type TaskKey = string;
+ const taskKeyFor = (r: any): TaskKey =>
+ `${r.test_case}::${r.result?.apiType}`;
+
+ const successesByTask: Map> = new Map();
+
+ // Count wrong-input tool calls (schema correct but incorrect arguments)
+ let wrongInputToolCalls = 0;
+
+ // Count invalid response shapes per API type
+ const totalByApiType: Record = {};
+ const invalidByApiType: Record = {};
+
+ for (const r of caseResults) {
+ if (!r?.result || typeof r.result.apiType !== "string") continue;
+
+ // Parse attempt index from run_id `${i}_${k}` safely
+ let attemptIndex: number | undefined;
+ if (typeof r.run_id === "string") {
+ const parts = r.run_id.split("_");
+ const k = Number(parts[1]);
+ if (Number.isFinite(k)) attemptIndex = k;
+ }
+
+ const key = taskKeyFor(r);
+ if (!successesByTask.has(key)) successesByTask.set(key, new Map());
+ if (attemptIndex != null) {
+ successesByTask.get(key)!.set(attemptIndex, Boolean(r.success));
+ }
+
+ const d = r.result.toolCallingDetails ?? {};
+ const calledToolAtLeastOnce = Boolean(d.calledToolAtLeastOnce);
+ const calledToolWithRightSchema = Boolean(d.calledToolWithRightSchema);
+ const calledToolWithRightArguments = Boolean(
+ d.calledToolWithRightArguments
+ );
+ if (
+ calledToolAtLeastOnce &&
+ calledToolWithRightSchema &&
+ !calledToolWithRightArguments
+ ) {
+ wrongInputToolCalls++;
+ }
+
+ // Track invalid/total per apiType for response shape
+ const apiType = r.result.apiType as string;
+ totalByApiType[apiType] = (totalByApiType[apiType] ?? 0) + 1;
+ const isValidResponse = r.result.validResponse === true;
+ if (!isValidResponse) {
+ invalidByApiType[apiType] = (invalidByApiType[apiType] ?? 0) + 1;
+ }
+ }
+
+ const totalTasks = successesByTask.size;
+
+ // Compute pass@k and pass^k for k = 1..tries
+ const passAtKByK: number[] = [];
+ const passHatKByK: number[] = [];
+
+ for (let k = 1; k <= tries; k++) {
+ let tasksSuccessfulK = 0; // any success in first k attempts
+ let tasksAllSuccessfulK = 0; // all success in first k attempts
+
+ for (const [, attemptsMap] of successesByTask) {
+ let anySuccess = false;
+ let allSuccess = true;
+ for (let i = 0; i < k; i++) {
+ const v = attemptsMap.get(i) === true;
+ anySuccess = anySuccess || v;
+ if (!v) allSuccess = false;
+ }
+ if (anySuccess) tasksSuccessfulK++;
+ if (allSuccess) tasksAllSuccessfulK++;
+ }
+
+ const passAtK = totalTasks > 0 ? tasksSuccessfulK / totalTasks : 0;
+ const passHatK = totalTasks > 0 ? tasksAllSuccessfulK / totalTasks : 0;
+ passAtKByK.push(passAtK);
+ passHatKByK.push(passHatK);
+ }
+
+ // Convenience: final k=tries values
+ const passAtK = passAtKByK[tries - 1] ?? 0;
+ const passHatK = passHatKByK[tries - 1] ?? 0;
+
+ return {
+ totalTasks,
+ passAtKByK,
+ passHatKByK,
+ passAtK,
+ passHatK,
+ wrongInputToolCalls,
+ // New stats for invalid response shapes per API
+ invalidByApiType,
+ totalByApiType,
+ };
+}
+
+export function printAnalysis(
+ stats: ReturnType,
+ caseResults: any[],
+ provider: string,
+ selectedLines: string[],
+ tries: number,
+ skipped: number,
+ analysisFile: string
+) {
+ const formatPerK = (arr: number[]) =>
+ Array.from({ length: tries }, (_, i) => {
+ const v = arr[i] ?? 0;
+ return `${i + 1}=${v.toFixed(3)}`;
+ }).join(", ");
+
+ console.log("Summary:");
+ console.log(` Provider: ${provider}`);
+ console.log(` Total input cases: ${selectedLines.length}`);
+ console.log(` Tries: ${tries}`);
+ console.log(` Total tasks: ${stats.totalTasks}`);
+ console.log(` Total runs: ${caseResults.length}`);
+ // Conditionally print invalid response shape stats per API type
+ if ((stats.totalByApiType["responses"] ?? 0) > 0) {
+ const bad = stats.invalidByApiType["responses"] ?? 0;
+ const tot = stats.totalByApiType["responses"] ?? 0;
+ console.log(` Invalid Responses API responses: ${bad} (out of ${tot})`);
+ }
+ if ((stats.totalByApiType["chat"] ?? 0) > 0) {
+ const bad = stats.invalidByApiType["chat"] ?? 0;
+ const tot = stats.totalByApiType["chat"] ?? 0;
+ console.log(
+ ` Invalid Chat Completions API responses: ${bad} (out of ${tot})`
+ );
+ }
+ console.log(` pass@k (k=1..${tries}): ${formatPerK(stats.passAtKByK)}`);
+ console.log(` pass^k (k=1..${tries}): ${formatPerK(stats.passHatKByK)}`);
+ console.log(` pass@k (k=${tries}): ${stats.passAtK.toFixed(3)}`);
+ console.log(` pass^k (k=${tries}): ${stats.passHatK.toFixed(3)}`);
+ console.log(` Wrong-input tool calls: ${stats.wrongInputToolCalls}`);
+ console.log(` Invalid cases.jsonl lines: ${skipped}`);
+ console.log(` Analysis written to ${analysisFile}`);
+}
diff --git a/compatibility-test/cases.jsonl b/compatibility-test/cases.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..29e7d4e863d0246aee0a18b854e4501a40b307c6
--- /dev/null
+++ b/compatibility-test/cases.jsonl
@@ -0,0 +1,30 @@
+{"tool_name":"get_system_health","input":"Hey, quick check: is everything up and running?","expected_arguments":"{}"}
+{"tool_name":"get_system_health","input":"Status report please.","expected_arguments":"{}"}
+{"tool_name":"get_system_health","input":"Can you confirm the LLM health before we start?","expected_arguments":"{}"}
+{"tool_name":"get_system_health","input":"Need a health snapshot.","expected_arguments":"{}"}
+{"tool_name":"get_system_health","input":"Hi, what's the current system health?","expected_arguments":"{}"}
+{"tool_name":"markdown_to_html","input":"Convert this markdown to HTML:\n\n# Title\n\nSome *italic* text.","expected_arguments":"{\"markdown\":\"# Title\\n\\nSome *italic* text.\"}"}
+{"tool_name":"markdown_to_html","input":"Hey, could you turn `## Docs` into HTML?","expected_arguments":"{\"markdown\":\"## Docs\"}"}
+{"tool_name":"markdown_to_html","input":"Please render the following markdown:\n\n- item 1\n- item 2","expected_arguments":"{\"markdown\":\"- item 1\\n- item 2\"}"}
+{"tool_name":"markdown_to_html","input":"I have `**bold**` markdown; give me HTML.","expected_arguments":"{\"markdown\":\"**bold**\"}"}
+{"tool_name":"markdown_to_html","input":"Markdown to HTML: > quote","expected_arguments":"{\"markdown\":\"> quote\"}"}
+{"tool_name":"detect_language","input":"Hey, what language is this: 'Buenos días, ¿cómo estás?'","expected_arguments":"{\"text\":\"Buenos días, ¿cómo estás?\"}"}
+{"tool_name":"detect_language","input":"Identify the language: \"Guten Morgen\"","expected_arguments":"{\"text\":\"Guten Morgen\"}"}
+{"tool_name":"detect_language","input":"Language detection needed: こんにちは、お元気ですか?","expected_arguments":"{\"text\":\"こんにちは、お元気ですか?\"}"}
+{"tool_name":"detect_language","input":"Detect language for: 'Привет, как дела?'","expected_arguments":"{\"text\":\"Привет, как дела?\"}"}
+{"tool_name":"detect_language","input":"What language is 'Bonjour tout le monde'?","expected_arguments":"{\"text\":\"Bonjour tout le monde\"}"}
+{"tool_name":"generate_chart","input":"Plot a simple line chart for these points: (1,2),(2,4),(3,9).","expected_arguments":"{\"data\":[[1,2],[2,4],[3,9]],\"chart_type\":\"line\"}"}
+{"tool_name":"generate_chart","input":"Hey, can I get a bar chart of my sales: 10, 20, 30 across Q1–Q3?","expected_arguments":"{\"data\":[[1,10],[2,20],[3,30]],\"chart_type\":\"bar\",\"title\":\"Quarterly Sales\"}"}
+{"tool_name":"generate_chart","input":"Make a scatter chart titled 'Experiment' with x label Time and y label Value for data [ [0,1], [1,1.5], [2,2.2] ].","expected_arguments":"{\"data\":[[0,1],[1,1.5],[2,2.2]],\"chart_type\":\"scatter\",\"title\":\"Experiment\",\"x_label\":\"Time\",\"y_label\":\"Value\"}"}
+{"tool_name":"generate_chart","input":"Create a line chart of temperatures 70,72,68,65 over 4 days, label x as 'Day'.","expected_arguments":"{\"data\":[[1,70],[2,72],[3,68],[4,65]],\"chart_type\":\"line\",\"x_label\":\"Day\"}"}
+{"tool_name":"generate_chart","input":"Visualize visits per day with a bar chart; numbers: 100,150,120.","expected_arguments":"{\"data\":[[1,100],[2,150],[3,120]],\"chart_type\":\"bar\",\"title\":\"Daily Visits\",\"y_label\":\"Visitors\"}"}
+{"tool_name":"query_database","input":"Give me the ids and emails from users table, limit 5.","expected_arguments":"{\"table\":\"users\",\"columns\":[\"id\",\"email\"],\"limit\":5}"}
+{"tool_name":"query_database","input":"Hey, fetch order_id and amount from orders where status is 'shipped'.","expected_arguments":"{\"table\":\"orders\",\"columns\":[\"order_id\",\"amount\"],\"filters\":\"status = 'shipped'\"}"}
+{"tool_name":"query_database","input":"Retrieve name and price from products ordered by price descending, top 10 please.","expected_arguments":"{\"table\":\"products\",\"columns\":[\"name\",\"price\"],\"limit\":10,\"order_by\":\"price DESC\"}"}
+{"tool_name":"query_database","input":"I need the first 3 log entries from audit_log table.","expected_arguments":"{\"table\":\"audit_log\",\"columns\":[\"id\",\"timestamp\",\"action\"],\"limit\":3}"}
+{"tool_name":"query_database","input":"Query the customers table for name, city where city = 'Berlin'.","expected_arguments":"{\"table\":\"customers\",\"columns\":[\"name\",\"city\"],\"filters\":\"city = 'Berlin'\"}"}
+{"tool_name":"get_weather","input":"What's the weather in San Francisco right now?","expected_arguments":"{\"location\":\"San Francisco\"}"}
+{"tool_name":"get_weather","input":"Weather for Tokyo, please.","expected_arguments":"{\"location\":\"Tokyo\"}"}
+{"tool_name":"get_weather","input":"Get me the current weather for 10001.","expected_arguments":"{\"location\":\"10001\"}"}
+{"tool_name":"get_weather","input":"How's the weather in Paris today?","expected_arguments":"{\"location\":\"Paris\"}"}
+{"tool_name":"get_weather","input":"Check the weather for Sydney.","expected_arguments":"{\"location\":\"Sydney\"}"}
diff --git a/compatibility-test/index.ts b/compatibility-test/index.ts
new file mode 100644
index 0000000000000000000000000000000000000000..ca6b03dcae67c42cddb8e38fd38de3ee45c94ab0
--- /dev/null
+++ b/compatibility-test/index.ts
@@ -0,0 +1,196 @@
+import { parseArgs } from "node:util";
+import { createWriteStream } from "node:fs";
+import { readFile, writeFile } from "node:fs/promises";
+import path from "node:path";
+import process from "node:process";
+import { runCase, RunCaseSummary } from "./runCase";
+import { Listr, ListrTaskWrapper } from "listr2";
+import { analyze, printAnalysis } from "./analysis";
+
+function formatTimestamp(d: Date): string {
+ const pad = (n: number) => String(n).padStart(2, "0");
+ const yyyy = d.getFullYear();
+ const mm = pad(d.getMonth() + 1);
+ const dd = pad(d.getDate());
+ const hh = pad(d.getHours());
+ const mi = pad(d.getMinutes());
+ const ss = pad(d.getSeconds());
+ return `${yyyy}${mm}${dd}_${hh}${mi}${ss}`;
+}
+
+async function main() {
+ const args = parseArgs({
+ options: {
+ cases: { type: "string", short: "c", default: "cases.jsonl" },
+ provider: { type: "string", short: "p", default: "openai" },
+ streaming: { type: "boolean", short: "s", default: false },
+ maxTurns: { type: "string", short: "t", default: "10" },
+ n: { type: "string", short: "n" },
+ strict: { type: "boolean", short: "s", default: false },
+ tries: { type: "string", short: "k", default: "1" },
+ },
+ });
+ const casesPathArg = args.values.cases;
+ const provider = args.values.provider as string;
+ const streaming = Boolean(args.values.streaming);
+ const maxTurns = Number(args.values.maxTurns ?? 10);
+ const nRaw = args.values.n as string | undefined;
+ const triesRaw = args.values.tries as string | undefined;
+ const tries = triesRaw != null ? Number(triesRaw) : 1;
+ const limit = nRaw != null ? Number(nRaw) : undefined;
+ if (limit != null && (!Number.isFinite(limit) || limit <= 0)) {
+ console.error("--n must be a positive integer");
+ process.exitCode = 1;
+ return;
+ }
+
+ if (!casesPathArg) {
+ console.error("--cases is required (path to JSONL file)");
+ process.exitCode = 1;
+ return;
+ }
+
+ const casesPath = path.isAbsolute(casesPathArg)
+ ? casesPathArg
+ : path.join(process.cwd(), casesPathArg);
+
+ const timestamp = formatTimestamp(new Date());
+ const defaultFilename = `rollout_${provider}_${timestamp}.jsonl`;
+ const outputFile = path.join(process.cwd(), defaultFilename);
+ const analysisFile = path.join(
+ process.cwd(),
+ `analysis_${provider}_${timestamp}.json`
+ );
+
+ let fileContent: string;
+ try {
+ fileContent = await readFile(casesPath, "utf8");
+ } catch (err: any) {
+ console.error(
+ `Failed to read cases file at ${casesPath}: ${err?.message ?? err}`
+ );
+ process.exitCode = 1;
+ return;
+ }
+
+ const lines = fileContent
+ .split(/\r?\n/)
+ .map((l) => l.trim())
+ .filter((l) => l.length > 0);
+
+ const selectedLines =
+ typeof limit === "number" ? lines.slice(0, limit) : lines;
+
+ const out = createWriteStream(outputFile, { flags: "w", encoding: "utf8" });
+
+ const writeLine = (obj: any) =>
+ new Promise((resolve, reject) => {
+ const str = JSON.stringify(obj) + "\n";
+ out.write(str, (err) => (err ? reject(err) : resolve()));
+ });
+
+ // Accumulators for post-run analysis
+ let skipped = 0; // invalid JSON lines
+ const caseResults: Array<{
+ run_id: string;
+ success: boolean;
+ provider: string;
+ test_case: number;
+ tool_name: string;
+ input: string;
+ result: RunCaseSummary;
+ }> = [];
+
+ async function processIndex(
+ i: number,
+ k: number,
+ task: ListrTaskWrapper
+ ) {
+ const line = selectedLines[i];
+ let caseObj: any;
+ try {
+ caseObj = JSON.parse(line);
+ } catch (err: any) {
+ console.error(
+ `Skipping invalid JSON on line ${i + 1}: ${err?.message ?? err}`
+ );
+ skipped++;
+ return;
+ }
+
+ try {
+ const summaries = await runCase(provider, caseObj, {
+ maxTurns,
+ streaming,
+ strict: args.values.strict,
+ });
+
+ for (const summary of summaries) {
+ const record = {
+ run_id: `${i}_${k}`,
+ success: summary.success,
+ provider,
+ test_case: i,
+ tool_name: caseObj.tool_name,
+ input: caseObj.input,
+ result: summary,
+ };
+ task.output = `Case ${i} (attempt ${k + 1}): ${
+ summary.success ? "Success" : "Failed"
+ } ${summary.toolCallingDetails.warning || ""}`;
+ caseResults.push(record);
+ await writeLine(record);
+ }
+ } catch (err: any) {
+ const record = {
+ provider,
+ test_case: i,
+ tool_name: caseObj?.tool_name,
+ input: caseObj?.input,
+ expected_output: caseObj?.expected_output,
+ instructions: caseObj?.instructions,
+ error: String(err?.message ?? err),
+ };
+ await writeLine(record);
+ task.output = `Case ${i} failed: ${err?.message ?? err}`;
+ }
+ }
+
+ const listr = new Listr<{
+ output: string;
+ }>(
+ selectedLines.flatMap((line, index) => {
+ return Array.from({ length: tries }, (_, attempt) => ({
+ title: `Processing case ${index} (attempt ${attempt + 1})`,
+ task: async (_, task) => {
+ await processIndex(index, attempt, task);
+ },
+ rendererOptions: { persistentOutput: true },
+ }));
+ }),
+ {
+ concurrent: 5,
+ }
+ );
+
+ await listr.run();
+
+ await new Promise((resolve) => out.end(resolve));
+ console.log(`Results written to ${outputFile}`);
+ const stats = analyze(caseResults, tries);
+ await writeFile(analysisFile, JSON.stringify(stats, null, 2), "utf8");
+ printAnalysis(
+ stats,
+ caseResults,
+ provider,
+ selectedLines,
+ tries,
+ skipped,
+ analysisFile
+ );
+}
+
+main().catch((err) => {
+ console.error(err);
+ process.exitCode = 1;
+});
diff --git a/compatibility-test/package-lock.json b/compatibility-test/package-lock.json
new file mode 100644
index 0000000000000000000000000000000000000000..89b6a5e806428e73c005631cb5f1792e69cd16b3
--- /dev/null
+++ b/compatibility-test/package-lock.json
@@ -0,0 +1,1633 @@
+{
+ "name": "compatibility-test",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "dependencies": {
+ "@openai/agents": "^0.0.15",
+ "ajv": "^8.17.1",
+ "listr2": "^9.0.1"
+ }
+ },
+ "node_modules/@modelcontextprotocol/sdk": {
+ "version": "1.17.1",
+ "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.17.1.tgz",
+ "integrity": "sha512-CPle1OQehbWqd25La9Ack5B07StKIxh4+Bf19qnpZKJC1oI22Y0czZHbifjw1UoczIfKBwBDAp/dFxvHG13B5A==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "ajv": "^6.12.6",
+ "content-type": "^1.0.5",
+ "cors": "^2.8.5",
+ "cross-spawn": "^7.0.5",
+ "eventsource": "^3.0.2",
+ "eventsource-parser": "^3.0.0",
+ "express": "^5.0.1",
+ "express-rate-limit": "^7.5.0",
+ "pkce-challenge": "^5.0.0",
+ "raw-body": "^3.0.0",
+ "zod": "^3.23.8",
+ "zod-to-json-schema": "^3.24.1"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@modelcontextprotocol/sdk/node_modules/ajv": {
+ "version": "6.12.6",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+ "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "fast-json-stable-stringify": "^2.0.0",
+ "json-schema-traverse": "^0.4.1",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/@modelcontextprotocol/sdk/node_modules/json-schema-traverse": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
+ "license": "MIT",
+ "optional": true
+ },
+ "node_modules/@openai/agents": {
+ "version": "0.0.15",
+ "resolved": "https://registry.npmjs.org/@openai/agents/-/agents-0.0.15.tgz",
+ "integrity": "sha512-B8y+WyWOeHowflPx09pyCfcqikC4OYWK27HTyNGt1oraXv93CzuamSr76iAaU1nWQ1MPbUwl6LHPX4BPUikVkQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@openai/agents-core": "0.0.15",
+ "@openai/agents-openai": "0.0.15",
+ "@openai/agents-realtime": "0.0.15",
+ "debug": "^4.4.0",
+ "openai": "^5.10.1"
+ }
+ },
+ "node_modules/@openai/agents-core": {
+ "version": "0.0.15",
+ "resolved": "https://registry.npmjs.org/@openai/agents-core/-/agents-core-0.0.15.tgz",
+ "integrity": "sha512-ODTqttjW0s0ejBe5PKnYRlFbJSZH2IO6OtUlRhIKmWiWrX6pGRxvpKjTSOXy8DEtpRHBj6Nhky0UoSlO6eOkDQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@openai/zod": "npm:zod@3.25.40 - 3.25.67",
+ "debug": "^4.4.0",
+ "openai": "^5.10.1"
+ },
+ "optionalDependencies": {
+ "@modelcontextprotocol/sdk": "^1.12.0"
+ },
+ "peerDependencies": {
+ "zod": "3.25.40 - 3.25.67"
+ },
+ "peerDependenciesMeta": {
+ "zod": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@openai/agents-openai": {
+ "version": "0.0.15",
+ "resolved": "https://registry.npmjs.org/@openai/agents-openai/-/agents-openai-0.0.15.tgz",
+ "integrity": "sha512-YIX3n98HdmmWKkb/71OB+DCQUYyGEpqfzPjejzdtNLUvAEs3jvXf7nkC8oTISsuCwrirgBz0rQEefeo0oUlyFQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@openai/agents-core": "0.0.15",
+ "@openai/zod": "npm:zod@3.25.40 - 3.25.67",
+ "debug": "^4.4.0",
+ "openai": "^5.10.1"
+ }
+ },
+ "node_modules/@openai/agents-realtime": {
+ "version": "0.0.15",
+ "resolved": "https://registry.npmjs.org/@openai/agents-realtime/-/agents-realtime-0.0.15.tgz",
+ "integrity": "sha512-kSZzMyij9Xt3BpMb/9snuVnu7a5qKZLyhtN/kWMA+wmfETvWz23BBz6tbO5xOmurAt9//OktkB+94e0T0RBtlA==",
+ "license": "MIT",
+ "dependencies": {
+ "@openai/agents-core": "0.0.15",
+ "@openai/zod": "npm:zod@3.25.40 - 3.25.67",
+ "@types/ws": "^8.18.1",
+ "debug": "^4.4.0",
+ "ws": "^8.18.1"
+ }
+ },
+ "node_modules/@openai/zod": {
+ "name": "zod",
+ "version": "3.25.67",
+ "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.67.tgz",
+ "integrity": "sha512-idA2YXwpCdqUSKRCACDE6ItZD9TZzy3OZMtpfLoh6oPR47lipysRrJfjzMqFxQ3uJuUPyUeWe1r9vLH33xO/Qw==",
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/colinhacks"
+ }
+ },
+ "node_modules/@types/node": {
+ "version": "24.2.0",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-24.2.0.tgz",
+ "integrity": "sha512-3xyG3pMCq3oYCNg7/ZP+E1ooTaGB4cG8JWRsqqOYQdbWNY4zbaV0Ennrd7stjiJEFZCaybcIgpTjJWHRfBSIDw==",
+ "license": "MIT",
+ "dependencies": {
+ "undici-types": "~7.10.0"
+ }
+ },
+ "node_modules/@types/ws": {
+ "version": "8.18.1",
+ "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz",
+ "integrity": "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "*"
+ }
+ },
+ "node_modules/accepts": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-2.0.0.tgz",
+ "integrity": "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "mime-types": "^3.0.0",
+ "negotiator": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/ajv": {
+ "version": "8.17.1",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz",
+ "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==",
+ "license": "MIT",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.3",
+ "fast-uri": "^3.0.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/ansi-escapes": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-7.0.0.tgz",
+ "integrity": "sha512-GdYO7a61mR0fOlAsvC9/rIHf7L96sBc6dEWzeOu+KAea5bZyQRPIpojrVoI4AXGJS/ycu/fBTdLrUkA4ODrvjw==",
+ "license": "MIT",
+ "dependencies": {
+ "environment": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/ansi-regex": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz",
+ "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-regex?sponsor=1"
+ }
+ },
+ "node_modules/ansi-styles": {
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
+ "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/body-parser": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-2.2.0.tgz",
+ "integrity": "sha512-02qvAaxv8tp7fBa/mw1ga98OGm+eCbqzJOKoRt70sLmfEEi+jyBYVTDGfCL/k06/4EMk/z01gCe7HoCH/f2LTg==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "bytes": "^3.1.2",
+ "content-type": "^1.0.5",
+ "debug": "^4.4.0",
+ "http-errors": "^2.0.0",
+ "iconv-lite": "^0.6.3",
+ "on-finished": "^2.4.1",
+ "qs": "^6.14.0",
+ "raw-body": "^3.0.0",
+ "type-is": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/bytes": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
+ "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/call-bind-apply-helpers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+ "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/call-bound": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz",
+ "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "get-intrinsic": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/cli-cursor": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-5.0.0.tgz",
+ "integrity": "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw==",
+ "license": "MIT",
+ "dependencies": {
+ "restore-cursor": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/cli-truncate": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-4.0.0.tgz",
+ "integrity": "sha512-nPdaFdQ0h/GEigbPClz11D0v/ZJEwxmeVZGeMo3Z5StPtUTkA9o1lD6QwoirYiSDzbcwn2XcjwmCp68W1IS4TA==",
+ "license": "MIT",
+ "dependencies": {
+ "slice-ansi": "^5.0.0",
+ "string-width": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/colorette": {
+ "version": "2.0.20",
+ "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz",
+ "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==",
+ "license": "MIT"
+ },
+ "node_modules/content-disposition": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-1.0.0.tgz",
+ "integrity": "sha512-Au9nRL8VNUut/XSzbQA38+M78dzP4D+eqg3gfJHMIHHYa3bg067xj1KxMUWj+VULbiZMowKngFFbKczUrNJ1mg==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "safe-buffer": "5.2.1"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/content-type": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
+ "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/cookie": {
+ "version": "0.7.2",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz",
+ "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/cookie-signature": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.2.2.tgz",
+ "integrity": "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=6.6.0"
+ }
+ },
+ "node_modules/cors": {
+ "version": "2.8.5",
+ "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz",
+ "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "object-assign": "^4",
+ "vary": "^1"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/cross-spawn": {
+ "version": "7.0.6",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
+ "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/debug": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz",
+ "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/dunder-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+ "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/ee-first": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+ "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==",
+ "license": "MIT",
+ "optional": true
+ },
+ "node_modules/emoji-regex": {
+ "version": "10.4.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.4.0.tgz",
+ "integrity": "sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw==",
+ "license": "MIT"
+ },
+ "node_modules/encodeurl": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz",
+ "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/environment": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/environment/-/environment-1.1.0.tgz",
+ "integrity": "sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/es-define-property": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+ "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-object-atoms": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+ "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "es-errors": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/escape-html": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+ "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==",
+ "license": "MIT",
+ "optional": true
+ },
+ "node_modules/etag": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
+ "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/eventemitter3": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.1.tgz",
+ "integrity": "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==",
+ "license": "MIT"
+ },
+ "node_modules/eventsource": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-3.0.7.tgz",
+ "integrity": "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "eventsource-parser": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=18.0.0"
+ }
+ },
+ "node_modules/eventsource-parser": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/eventsource-parser/-/eventsource-parser-3.0.3.tgz",
+ "integrity": "sha512-nVpZkTMM9rF6AQ9gPJpFsNAMt48wIzB5TQgiTLdHiuO8XEDhUgZEhqKlZWXbIzo9VmJ/HvysHqEaVeD5v9TPvA==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=20.0.0"
+ }
+ },
+ "node_modules/express": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/express/-/express-5.1.0.tgz",
+ "integrity": "sha512-DT9ck5YIRU+8GYzzU5kT3eHGA5iL+1Zd0EutOmTE9Dtk+Tvuzd23VBU+ec7HPNSTxXYO55gPV/hq4pSBJDjFpA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "accepts": "^2.0.0",
+ "body-parser": "^2.2.0",
+ "content-disposition": "^1.0.0",
+ "content-type": "^1.0.5",
+ "cookie": "^0.7.1",
+ "cookie-signature": "^1.2.1",
+ "debug": "^4.4.0",
+ "encodeurl": "^2.0.0",
+ "escape-html": "^1.0.3",
+ "etag": "^1.8.1",
+ "finalhandler": "^2.1.0",
+ "fresh": "^2.0.0",
+ "http-errors": "^2.0.0",
+ "merge-descriptors": "^2.0.0",
+ "mime-types": "^3.0.0",
+ "on-finished": "^2.4.1",
+ "once": "^1.4.0",
+ "parseurl": "^1.3.3",
+ "proxy-addr": "^2.0.7",
+ "qs": "^6.14.0",
+ "range-parser": "^1.2.1",
+ "router": "^2.2.0",
+ "send": "^1.1.0",
+ "serve-static": "^2.2.0",
+ "statuses": "^2.0.1",
+ "type-is": "^2.0.1",
+ "vary": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 18"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/express-rate-limit": {
+ "version": "7.5.1",
+ "resolved": "https://registry.npmjs.org/express-rate-limit/-/express-rate-limit-7.5.1.tgz",
+ "integrity": "sha512-7iN8iPMDzOMHPUYllBEsQdWVB6fPDMPqwjBaFrgr4Jgr/+okjvzAy+UHlYYL/Vs0OsOrMkwS6PJDkFlJwoxUnw==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/express-rate-limit"
+ },
+ "peerDependencies": {
+ "express": ">= 4.11"
+ }
+ },
+ "node_modules/fast-deep-equal": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
+ "license": "MIT"
+ },
+ "node_modules/fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
+ "license": "MIT",
+ "optional": true
+ },
+ "node_modules/fast-uri": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.0.6.tgz",
+ "integrity": "sha512-Atfo14OibSv5wAp4VWNsFYE1AchQRTv9cBGWET4pZWHzYshFSS9NQI6I57rdKn9croWVMbYFbLhJ+yJvmZIIHw==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/fastify"
+ },
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/fastify"
+ }
+ ],
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/finalhandler": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-2.1.0.tgz",
+ "integrity": "sha512-/t88Ty3d5JWQbWYgaOGCCYfXRwV1+be02WqYYlL6h0lEiUAMPM8o8qKGO01YIkOHzka2up08wvgYD0mDiI+q3Q==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "debug": "^4.4.0",
+ "encodeurl": "^2.0.0",
+ "escape-html": "^1.0.3",
+ "on-finished": "^2.4.1",
+ "parseurl": "^1.3.3",
+ "statuses": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/forwarded": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
+ "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/fresh": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/fresh/-/fresh-2.0.0.tgz",
+ "integrity": "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "license": "MIT",
+ "optional": true,
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-east-asian-width": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.3.0.tgz",
+ "integrity": "sha512-vpeMIQKxczTD/0s2CdEWHcb0eeJe6TFjxb+J5xgX7hScxqrGuyjmv4c1D4A/gelKfyox0gJJwIHF+fLjeaM8kQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/get-intrinsic": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
+ "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "es-define-property": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.1.1",
+ "function-bind": "^1.1.2",
+ "get-proto": "^1.0.1",
+ "gopd": "^1.2.0",
+ "has-symbols": "^1.1.0",
+ "hasown": "^2.0.2",
+ "math-intrinsics": "^1.1.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+ "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "dunder-proto": "^1.0.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/gopd": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+ "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+ "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/http-errors": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz",
+ "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "depd": "2.0.0",
+ "inherits": "2.0.4",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "toidentifier": "1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/http-errors/node_modules/statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/iconv-lite": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz",
+ "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
+ "license": "ISC",
+ "optional": true
+ },
+ "node_modules/ipaddr.js": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
+ "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/is-fullwidth-code-point": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-4.0.0.tgz",
+ "integrity": "sha512-O4L094N2/dZ7xqVdrXhh9r1KODPJpFms8B5sGdJLPy664AgvXsreZUyCQQNItZRDlYug4xStLjNp/sz3HvBowQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-promise": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-4.0.0.tgz",
+ "integrity": "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==",
+ "license": "MIT",
+ "optional": true
+ },
+ "node_modules/isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
+ "license": "ISC",
+ "optional": true
+ },
+ "node_modules/json-schema-traverse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
+ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==",
+ "license": "MIT"
+ },
+ "node_modules/listr2": {
+ "version": "9.0.1",
+ "resolved": "https://registry.npmjs.org/listr2/-/listr2-9.0.1.tgz",
+ "integrity": "sha512-SL0JY3DaxylDuo/MecFeiC+7pedM0zia33zl0vcjgwcq1q1FWWF1To9EIauPbl8GbMCU0R2e0uJ8bZunhYKD2g==",
+ "license": "MIT",
+ "dependencies": {
+ "cli-truncate": "^4.0.0",
+ "colorette": "^2.0.20",
+ "eventemitter3": "^5.0.1",
+ "log-update": "^6.1.0",
+ "rfdc": "^1.4.1",
+ "wrap-ansi": "^9.0.0"
+ },
+ "engines": {
+ "node": ">=20.0.0"
+ }
+ },
+ "node_modules/log-update": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/log-update/-/log-update-6.1.0.tgz",
+ "integrity": "sha512-9ie8ItPR6tjY5uYJh8K/Zrv/RMZ5VOlOWvtZdEHYSTFKZfIBPQa9tOAEeAWhd+AnIneLJ22w5fjOYtoutpWq5w==",
+ "license": "MIT",
+ "dependencies": {
+ "ansi-escapes": "^7.0.0",
+ "cli-cursor": "^5.0.0",
+ "slice-ansi": "^7.1.0",
+ "strip-ansi": "^7.1.0",
+ "wrap-ansi": "^9.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/log-update/node_modules/is-fullwidth-code-point": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.0.0.tgz",
+ "integrity": "sha512-OVa3u9kkBbw7b8Xw5F9P+D/T9X+Z4+JruYVNapTjPYZYUznQ5YfWeFkOj606XYYW8yugTfC8Pj0hYqvi4ryAhA==",
+ "license": "MIT",
+ "dependencies": {
+ "get-east-asian-width": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/log-update/node_modules/slice-ansi": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-7.1.0.tgz",
+ "integrity": "sha512-bSiSngZ/jWeX93BqeIAbImyTbEihizcwNjFoRUIY/T1wWQsfsm2Vw1agPKylXvQTU7iASGdHhyqRlqQzfz+Htg==",
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^6.2.1",
+ "is-fullwidth-code-point": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/slice-ansi?sponsor=1"
+ }
+ },
+ "node_modules/math-intrinsics": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+ "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/media-typer": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-1.1.0.tgz",
+ "integrity": "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/merge-descriptors": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-2.0.0.tgz",
+ "integrity": "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.54.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz",
+ "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.1.tgz",
+ "integrity": "sha512-xRc4oEhT6eaBpU1XF7AjpOFD+xQmXNB5OVKwp4tqCuBpHLS/ZbBDrc07mYTDqVMg6PfxUjjNp85O6Cd2Z/5HWA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "mime-db": "^1.54.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mimic-function": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/mimic-function/-/mimic-function-5.0.1.tgz",
+ "integrity": "sha512-VP79XUPxV2CigYP3jWwAUFSku2aKqBH7uTAapFWCBqutsbmDo96KY5o8uh6U+/YSIn5OxJnXp73beVkpqMIGhA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/negotiator": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz",
+ "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/object-assign": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-inspect": {
+ "version": "1.13.4",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz",
+ "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/on-finished": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
+ "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "ee-first": "1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
+ "license": "ISC",
+ "optional": true,
+ "dependencies": {
+ "wrappy": "1"
+ }
+ },
+ "node_modules/onetime": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/onetime/-/onetime-7.0.0.tgz",
+ "integrity": "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ==",
+ "license": "MIT",
+ "dependencies": {
+ "mimic-function": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/openai": {
+ "version": "5.12.0",
+ "resolved": "https://registry.npmjs.org/openai/-/openai-5.12.0.tgz",
+ "integrity": "sha512-vUdt02xiWgOHiYUmW0Hj1Qu9OKAiVQu5Bd547ktVCiMKC1BkB5L3ImeEnCyq3WpRKR6ZTaPgekzqdozwdPs7Lg==",
+ "license": "Apache-2.0",
+ "bin": {
+ "openai": "bin/cli"
+ },
+ "peerDependencies": {
+ "ws": "^8.18.0",
+ "zod": "^3.23.8"
+ },
+ "peerDependenciesMeta": {
+ "ws": {
+ "optional": true
+ },
+ "zod": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/parseurl": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
+ "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-to-regexp": {
+ "version": "8.2.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.2.0.tgz",
+ "integrity": "sha512-TdrF7fW9Rphjq4RjrW0Kp2AW0Ahwu9sRGTkS6bvDi0SCwZlEZYmcfDbEsTz8RVk0EHIS/Vd1bv3JhG+1xZuAyQ==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=16"
+ }
+ },
+ "node_modules/pkce-challenge": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/pkce-challenge/-/pkce-challenge-5.0.0.tgz",
+ "integrity": "sha512-ueGLflrrnvwB3xuo/uGob5pd5FN7l0MsLf0Z87o/UQmRtwjvfylfc9MurIxRAWywCYTgrvpXBcqjV4OfCYGCIQ==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=16.20.0"
+ }
+ },
+ "node_modules/proxy-addr": {
+ "version": "2.0.7",
+ "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
+ "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "forwarded": "0.2.0",
+ "ipaddr.js": "1.9.1"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/punycode": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz",
+ "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/qs": {
+ "version": "6.14.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.0.tgz",
+ "integrity": "sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w==",
+ "license": "BSD-3-Clause",
+ "optional": true,
+ "dependencies": {
+ "side-channel": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=0.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/raw-body": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-3.0.0.tgz",
+ "integrity": "sha512-RmkhL8CAyCRPXCE28MMH0z2PNWQBNk2Q09ZdxM9IOOXwxwZbN+qbWaatPkdkWIKL2ZVDImrN/pK5HTRz2PcS4g==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "bytes": "3.1.2",
+ "http-errors": "2.0.0",
+ "iconv-lite": "0.6.3",
+ "unpipe": "1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/require-from-string": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
+ "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/restore-cursor": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-5.1.0.tgz",
+ "integrity": "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==",
+ "license": "MIT",
+ "dependencies": {
+ "onetime": "^7.0.0",
+ "signal-exit": "^4.1.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/rfdc": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/rfdc/-/rfdc-1.4.1.tgz",
+ "integrity": "sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA==",
+ "license": "MIT"
+ },
+ "node_modules/router": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/router/-/router-2.2.0.tgz",
+ "integrity": "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "debug": "^4.4.0",
+ "depd": "^2.0.0",
+ "is-promise": "^4.0.0",
+ "parseurl": "^1.3.3",
+ "path-to-regexp": "^8.0.0"
+ },
+ "engines": {
+ "node": ">= 18"
+ }
+ },
+ "node_modules/safe-buffer": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+ "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "license": "MIT",
+ "optional": true
+ },
+ "node_modules/safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
+ "license": "MIT",
+ "optional": true
+ },
+ "node_modules/send": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/send/-/send-1.2.0.tgz",
+ "integrity": "sha512-uaW0WwXKpL9blXE2o0bRhoL2EGXIrZxQ2ZQ4mgcfoBxdFmQold+qWsD2jLrfZ0trjKL6vOw0j//eAwcALFjKSw==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "debug": "^4.3.5",
+ "encodeurl": "^2.0.0",
+ "escape-html": "^1.0.3",
+ "etag": "^1.8.1",
+ "fresh": "^2.0.0",
+ "http-errors": "^2.0.0",
+ "mime-types": "^3.0.1",
+ "ms": "^2.1.3",
+ "on-finished": "^2.4.1",
+ "range-parser": "^1.2.1",
+ "statuses": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 18"
+ }
+ },
+ "node_modules/serve-static": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-2.2.0.tgz",
+ "integrity": "sha512-61g9pCh0Vnh7IutZjtLGGpTA355+OPn2TyDv/6ivP2h/AdAVX9azsoxmg2/M6nZeQZNYBEwIcsne1mJd9oQItQ==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "encodeurl": "^2.0.0",
+ "escape-html": "^1.0.3",
+ "parseurl": "^1.3.3",
+ "send": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 18"
+ }
+ },
+ "node_modules/setprototypeof": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
+ "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==",
+ "license": "ISC",
+ "optional": true
+ },
+ "node_modules/shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "shebang-regex": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/side-channel": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz",
+ "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "object-inspect": "^1.13.3",
+ "side-channel-list": "^1.0.0",
+ "side-channel-map": "^1.0.1",
+ "side-channel-weakmap": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/side-channel-list": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz",
+ "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "object-inspect": "^1.13.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/side-channel-map": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz",
+ "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "call-bound": "^1.0.2",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.5",
+ "object-inspect": "^1.13.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/side-channel-weakmap": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz",
+ "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "call-bound": "^1.0.2",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.5",
+ "object-inspect": "^1.13.3",
+ "side-channel-map": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/signal-exit": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
+ "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/slice-ansi": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-5.0.0.tgz",
+ "integrity": "sha512-FC+lgizVPfie0kkhqUScwRu1O/lF6NOgJmlCgK+/LYxDCTk8sGelYaHDhFcDN+Sn3Cv+3VSa4Byeo+IMCzpMgQ==",
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^6.0.0",
+ "is-fullwidth-code-point": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/slice-ansi?sponsor=1"
+ }
+ },
+ "node_modules/statuses": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz",
+ "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/string-width": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz",
+ "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==",
+ "license": "MIT",
+ "dependencies": {
+ "emoji-regex": "^10.3.0",
+ "get-east-asian-width": "^1.0.0",
+ "strip-ansi": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/strip-ansi": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
+ "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
+ "license": "MIT",
+ "dependencies": {
+ "ansi-regex": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/strip-ansi?sponsor=1"
+ }
+ },
+ "node_modules/toidentifier": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
+ "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=0.6"
+ }
+ },
+ "node_modules/type-is": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/type-is/-/type-is-2.0.1.tgz",
+ "integrity": "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "content-type": "^1.0.5",
+ "media-typer": "^1.1.0",
+ "mime-types": "^3.0.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/undici-types": {
+ "version": "7.10.0",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.10.0.tgz",
+ "integrity": "sha512-t5Fy/nfn+14LuOc2KNYg75vZqClpAiqscVvMygNnlsHBFpSXdJaYtXMcdNLpl/Qvc3P2cB3s6lOV51nqsFq4ag==",
+ "license": "MIT"
+ },
+ "node_modules/unpipe": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+ "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/uri-js": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
+ "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
+ "license": "BSD-2-Clause",
+ "optional": true,
+ "dependencies": {
+ "punycode": "^2.1.0"
+ }
+ },
+ "node_modules/vary": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+ "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "license": "ISC",
+ "optional": true,
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "node-which": "bin/node-which"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/wrap-ansi": {
+ "version": "9.0.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.0.tgz",
+ "integrity": "sha512-G8ura3S+3Z2G+mkgNRq8dqaFZAuxfsxpBB8OCTGRTCtp+l/v9nbFNmCUP1BZMts3G1142MsZfn6eeUKrr4PD1Q==",
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^6.2.1",
+ "string-width": "^7.0.0",
+ "strip-ansi": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
+ "node_modules/wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
+ "license": "ISC",
+ "optional": true
+ },
+ "node_modules/ws": {
+ "version": "8.18.3",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz",
+ "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.0.0"
+ },
+ "peerDependencies": {
+ "bufferutil": "^4.0.1",
+ "utf-8-validate": ">=5.0.2"
+ },
+ "peerDependenciesMeta": {
+ "bufferutil": {
+ "optional": true
+ },
+ "utf-8-validate": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/zod": {
+ "version": "3.25.67",
+ "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.67.tgz",
+ "integrity": "sha512-idA2YXwpCdqUSKRCACDE6ItZD9TZzy3OZMtpfLoh6oPR47lipysRrJfjzMqFxQ3uJuUPyUeWe1r9vLH33xO/Qw==",
+ "license": "MIT",
+ "optional": true,
+ "funding": {
+ "url": "https://github.com/sponsors/colinhacks"
+ }
+ },
+ "node_modules/zod-to-json-schema": {
+ "version": "3.24.6",
+ "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.6.tgz",
+ "integrity": "sha512-h/z3PKvcTcTetyjl1fkj79MHNEjm+HpD6NXheWjzOekY7kV+lwDYnHw+ivHkijnCSMz1yJaWBD9vu/Fcmk+vEg==",
+ "license": "ISC",
+ "optional": true,
+ "peerDependencies": {
+ "zod": "^3.24.1"
+ }
+ }
+ }
+}
diff --git a/compatibility-test/package.json b/compatibility-test/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..66d514391c8e0d23d0b32e3d431152f363007f82
--- /dev/null
+++ b/compatibility-test/package.json
@@ -0,0 +1,11 @@
+{
+ "type": "module",
+ "dependencies": {
+ "@openai/agents": "^0.0.15",
+ "ajv": "^8.17.1",
+ "listr2": "^9.0.1"
+ },
+ "scripts": {
+ "start": "tsx index.ts"
+ }
+}
diff --git a/compatibility-test/providers.ts b/compatibility-test/providers.ts
new file mode 100644
index 0000000000000000000000000000000000000000..91f58e0f28af1370b2ac1f8f53da65de86eb6a12
--- /dev/null
+++ b/compatibility-test/providers.ts
@@ -0,0 +1,15 @@
+export const PROVIDERS = {
+ vllm: {
+ apiBaseUrl: "http://localhost:8000/v1",
+ apiKey: "vllm",
+ apiType: ["responses", "chat"], // choose from responses, chat, or both
+ modelName: "openai/gpt-oss-120b",
+ providerDetails: {
+ // add any provider-specific details here. These will be passed as part of every request
+ // for example to fix the provider for openrouter, you can do:
+ // provider: {
+ // only: ["example"],
+ // },
+ },
+ },
+};
diff --git a/compatibility-test/runCase.ts b/compatibility-test/runCase.ts
new file mode 100644
index 0000000000000000000000000000000000000000..fd066c0c5d1914a60d8223a1d8c3b79015d4295a
--- /dev/null
+++ b/compatibility-test/runCase.ts
@@ -0,0 +1,331 @@
+import {
+ Agent,
+ Runner,
+ OpenAIResponsesModel,
+ OpenAIChatCompletionsModel,
+ RunResult,
+ StreamedRunResult,
+ FunctionTool,
+ setTracingDisabled,
+} from "@openai/agents";
+import { Ajv } from "ajv";
+import { OpenAI } from "openai";
+import { PROVIDERS } from "./providers";
+import { TOOLS_MAP } from "./tools";
+
+setTracingDisabled(true);
+
+const ajv = new Ajv();
+
+export type Case = {
+ tool_name: string;
+ input: string;
+ expected_arguments: string;
+ instructions?: string;
+};
+
+// Summary shape for each apiType
+export type RunCaseSummary = {
+ apiType: string;
+ success: boolean;
+ validResponse: boolean;
+ validEvents?: boolean;
+ details: Record;
+ history: any[];
+ successToolCall: boolean;
+ toolCallingDetails: Record;
+};
+
+export async function runCase(
+ provider: string,
+ caseData: Case,
+ {
+ maxTurns,
+ streaming,
+ strict,
+ }: { maxTurns: number; streaming: boolean; strict: boolean }
+): Promise {
+ const config = PROVIDERS[provider];
+ if (!config) {
+ throw new Error(
+ `Provider ${provider} not found. Valid providers are: ${Object.keys(
+ PROVIDERS
+ ).join(", ")}`
+ );
+ }
+
+ const agent = new Agent({
+ name: caseData.tool_name,
+ instructions: caseData.instructions,
+ tools: [TOOLS_MAP[caseData.tool_name]],
+ });
+
+ const client = new OpenAI({
+ apiKey: config.apiKey,
+ baseURL: config.apiBaseUrl,
+ });
+
+ const summaries: RunCaseSummary[] = [];
+
+ for (const apiType of config.apiType) {
+ const runner = new Runner({
+ model:
+ apiType === "responses"
+ ? new OpenAIResponsesModel(client, config.modelName)
+ : new OpenAIChatCompletionsModel(client, config.modelName),
+ modelSettings: {
+ providerData: config.providerDetails ?? {},
+ },
+ });
+
+ let result: RunResult | StreamedRunResult;
+ let streamedEvents: any[] | undefined = undefined;
+ if (streaming) {
+ result = await runner.run(agent, caseData.input, {
+ stream: streaming,
+ maxTurns: maxTurns,
+ });
+ if (result instanceof StreamedRunResult) {
+ // Collect streaming events if applicable
+ streamedEvents = [];
+ for await (const event of result) {
+ if (event.type === "raw_model_stream_event") {
+ if (event.data.type === "model") {
+ streamedEvents.push(event.data.event);
+ }
+ }
+ }
+ await result.completed;
+ }
+ } else {
+ result = await runner.run(agent, caseData.input, {
+ maxTurns: maxTurns,
+ });
+ }
+
+ const { success: successToolCall, details: toolCallingDetails } =
+ testToolCall(apiType, caseData, result, strict);
+
+ const { validResponse, details } = testOutputData(
+ apiType,
+ result.rawResponses,
+ streaming
+ );
+
+ const { validEvents, details: eventsDetails } = streaming
+ ? testEvents(apiType, streamedEvents)
+ : { validEvents: true, details: {} };
+
+ let success = successToolCall && validResponse;
+ if (streaming) {
+ success = success && validEvents;
+ }
+ const summary: RunCaseSummary = {
+ apiType,
+ success,
+ validResponse,
+ validEvents,
+ details: {
+ ...details,
+ ...eventsDetails,
+ },
+ history: result?.rawResponses.map((entry) => entry.providerData) ?? [],
+ successToolCall,
+ toolCallingDetails,
+ };
+
+ summaries.push(summary);
+ }
+
+ return summaries;
+}
+
+function testToolCall(apiType, caseData, result, strict) {
+ let details: Record = {};
+ result.newItems.forEach((item) => {
+ // for this test for now we only care if the tool is called at least once
+ if (details.calledToolAtLeastOnce) {
+ return;
+ }
+
+ const isToolCall = item.type === "tool_call_item";
+ if (isToolCall) {
+ if (item.rawItem.type === "function_call") {
+ if (item.rawItem.name === caseData.tool_name) {
+ const validate = ajv.compile(
+ (TOOLS_MAP[caseData.tool_name] as FunctionTool).parameters
+ );
+ const valid = validate(JSON.parse(item.rawItem.arguments));
+ details.calledToolWithRightSchema = valid;
+ details.calledToolAtLeastOnce = true;
+
+ if (details.calledToolWithRightSchema) {
+ const parsedArguments = JSON.parse(item.rawItem.arguments);
+ const expectedArguments = JSON.parse(caseData.expected_arguments);
+ details.calledToolWithRightArguments = deepEqual(
+ parsedArguments,
+ expectedArguments
+ );
+ if (!details.calledToolWithRightArguments) {
+ if (details.calledToolWithRightSchema) {
+ details.warning = `Tool call with wrong arguments but correct schema. Check logs for full details. Not failing this test. Parsed: ${JSON.stringify(
+ parsedArguments
+ )} Expected: ${JSON.stringify(expectedArguments)}`;
+ }
+ details.actualArguments = parsedArguments;
+ details.expectedArguments = expectedArguments;
+ }
+ }
+ }
+ }
+ }
+ });
+
+ return {
+ success:
+ !!details.calledToolAtLeastOnce &&
+ !!details.calledToolWithRightSchema &&
+ (!strict || !!details.calledToolWithRightArguments),
+ details,
+ };
+}
+
+function testEvents(apiType, events) {
+ // In an ideal world we would check all the events to follow and reconstruct the final response
+ // and then compare it against the final response in the response.completed event
+ // for now we just check that certain events are present
+
+ let details: Record = {};
+ let validEvents: boolean = false;
+
+ if (apiType === "chat") {
+ let hasReasoningDeltas = false;
+ for (const event of events) {
+ hasReasoningDeltas =
+ hasReasoningDeltas ||
+ (typeof event.choices[0].delta.reasoning === "string" &&
+ event.choices[0].delta.reasoning.length > 0);
+ }
+ details.hasReasoningDeltas = hasReasoningDeltas;
+ validEvents = hasReasoningDeltas;
+ }
+
+ if (apiType === "responses") {
+ let hasReasoningDeltaEvents = false;
+ let hasReasoningDoneEvents = false;
+ for (const event of events) {
+ if (event.type === "raw_model_stream_event") {
+ if (event.data.type === "model") {
+ if (event.data.event.type === "response.reasoning_text.delta") {
+ hasReasoningDeltaEvents = true;
+ }
+ if (event.data.event.type === "response.reasoning_text.done") {
+ hasReasoningDoneEvents = true;
+ }
+ }
+ }
+ }
+
+ details.hasReasoningDeltaEvents = hasReasoningDeltaEvents;
+ details.hasReasoningDoneEvents = hasReasoningDoneEvents;
+ validEvents =
+ details.hasReasoningDeltaEvents && details.hasReasoningDoneEvents;
+ }
+
+ return {
+ validEvents,
+ details,
+ };
+}
+
+function testOutputData(apiType, rawResponses, streaming) {
+ let details: Record = {};
+ let validResponse: boolean = false;
+
+ if (apiType === "chat") {
+ for (const response of rawResponses) {
+ if (streaming && !response.providerData) {
+ // with Chat Completions we don't have a final response object that's native so we skip this test
+ return {
+ validResponse: true,
+ details: {
+ skippedBecauseStreaming: true,
+ },
+ };
+ }
+
+ // this is the actual HTTP response from the provider
+ // Since it's not guaranteed that every response has a reasoning field, we check if it's present
+ // at least once across all responses
+ const data = response.providerData;
+ const message = data.choices[0].message;
+ if (message.role === "assistant" && !message.refusal) {
+ details.hasReasoningField =
+ details.hasReasoningField ||
+ ("reasoning" in message && typeof message.reasoning === "string");
+ details.hasReasoningContentField =
+ details.hasReasoningContentField ||
+ ("reasoning_content" in message &&
+ typeof message.reasoning_content === "string");
+
+ validResponse =
+ validResponse ||
+ (details.hasReasoningField && message.reasoning.length > 0);
+ }
+ }
+ } else if (apiType === "responses") {
+ // this is the actual HTTP response from the provider
+ const data = rawResponses[0].providerData;
+ for (const item of data.output) {
+ // Since it's not guaranteed that every response has a reasoning field, we check if it's present
+ // at least once across all responses
+
+ if (item.type === "reasoning") {
+ details.hasReasoningContentArray = Array.isArray(item.content);
+ details.hasReasoningContentArrayLength = item.content.length > 0;
+ details.hasReasoningContentArrayItemType = item.content.every(
+ (item) => item.type === "reasoning_text"
+ );
+ details.hasReasoningContentArrayItemText = item.content.every(
+ (item) => item.text.length > 0
+ );
+
+ validResponse =
+ details.hasReasoningContentArray &&
+ details.hasReasoningContentArrayLength &&
+ details.hasReasoningContentArrayItemType &&
+ details.hasReasoningContentArrayItemText;
+ }
+ }
+ }
+
+ return {
+ validResponse,
+ details,
+ };
+}
+
+function deepEqual(a: any, b: any): boolean {
+ if (a === b) return true;
+ if (typeof a !== typeof b) return false;
+ if (a && b && typeof a === "object") {
+ if (Array.isArray(a) !== Array.isArray(b)) return false;
+ if (Array.isArray(a)) {
+ if (a.length !== b.length) return false;
+ for (let i = 0; i < a.length; i++) {
+ if (!deepEqual(a[i], b[i])) return false;
+ }
+ return true;
+ } else {
+ const aKeys = Object.keys(a);
+ const bKeys = Object.keys(b);
+ if (aKeys.length !== bKeys.length) return false;
+ for (const key of aKeys) {
+ if (!b.hasOwnProperty(key)) return false;
+ if (!deepEqual(a[key], b[key])) return false;
+ }
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/compatibility-test/tools.ts b/compatibility-test/tools.ts
new file mode 100644
index 0000000000000000000000000000000000000000..d2d4db6e762ee12773c29c42fd980c25f0fba076
--- /dev/null
+++ b/compatibility-test/tools.ts
@@ -0,0 +1,156 @@
+import { Tool, tool } from "@openai/agents";
+
+function convertToTool(toolData: any) {
+ return tool({
+ name: toolData.name,
+ description: toolData.description,
+ parameters: toolData.parameters,
+ execute: async (parameters) => {
+ return toolData.output;
+ },
+ strict: false,
+ });
+}
+
+export const TOOLS = [
+ {
+ type: "function",
+ name: "get_weather",
+ description: "Get the weather for a given location",
+ parameters: {
+ type: "object",
+ properties: {
+ location: {
+ type: "string",
+ description: "The location to get the weather for",
+ },
+ },
+ required: ["location"],
+ additionalProperties: false,
+ },
+ output: '{"weather":"sunny"}',
+ },
+ {
+ type: "function",
+ name: "get_system_health",
+ description:
+ "Returns the current health status of the LLM runtime—use before critical operations to verify the service is live.",
+ parameters: { type: "object", properties: {} },
+ output: '{"status":"ok","uptime_seconds":372045}',
+ },
+ {
+ type: "function",
+ name: "markdown_to_html",
+ description:
+ "Converts a Markdown string to sanitized HTML—use when you need browser-renderable output.",
+ parameters: {
+ type: "object",
+ properties: {
+ markdown: { type: "string", description: "Raw Markdown content" },
+ },
+ required: ["markdown"],
+ additionalProperties: false,
+ },
+ output: '{"html":"Hello World
This is great.
"}',
+ },
+ {
+ type: "function",
+ name: "detect_language",
+ description:
+ "Identifies the ISO language code of the supplied text—use for routing text to language-specific models.",
+ parameters: {
+ type: "object",
+ properties: {
+ text: {
+ type: "string",
+ description: "Text whose language should be detected",
+ },
+ },
+ required: ["text"],
+ additionalProperties: false,
+ },
+ output: '{"language":"de","confidence":0.98}',
+ },
+ {
+ type: "function",
+ name: "generate_chart",
+ description:
+ "Creates a base64-encoded PNG chart from tabular data—use for quick visualizations inside chat.",
+ parameters: {
+ type: "object",
+ properties: {
+ data: {
+ type: "array",
+ items: { type: "array", items: { type: "number" } },
+ description: "2-D numeric data matrix",
+ },
+ chart_type: {
+ type: "string",
+ enum: ["line", "bar", "scatter"],
+ description: "Type of chart to generate",
+ },
+ title: {
+ type: "string",
+ description: "Chart title",
+ default: "",
+ },
+ x_label: {
+ type: "string",
+ description: "Label for the x-axis",
+ default: "",
+ },
+ y_label: {
+ type: "string",
+ description: "Label for the y-axis",
+ default: "",
+ },
+ },
+ required: ["data", "chart_type"],
+ additionalProperties: false,
+ },
+ output: '{"image_png_base64":"iVBORw0KGgoAAAANSUhEUgAA..."}',
+ },
+ {
+ type: "function",
+ name: "query_database",
+ description:
+ "Runs a parameterized SQL SELECT on the internal analytics DB—use for lightweight data look-ups.",
+ parameters: {
+ type: "object",
+ properties: {
+ table: { type: "string", description: "Table name to query" },
+ columns: {
+ type: "array",
+ items: { type: "string" },
+ description: "Columns to return",
+ },
+ filters: {
+ type: "string",
+ description: "SQL WHERE clause without the word WHERE",
+ default: "",
+ },
+ limit: {
+ type: "integer",
+ minimum: 1,
+ maximum: 10000,
+ description: "Max rows to return",
+ default: 100,
+ },
+ order_by: {
+ type: "string",
+ description: "Column to order by (optional)",
+ default: "",
+ },
+ },
+ required: ["table", "columns"],
+ additionalProperties: false,
+ },
+ output:
+ '{"rows":[{"id":1,"email":"user@example.com"},{"id":2,"email":"foo@bar.com"}],"row_count":2}',
+ },
+];
+
+export const TOOLS_MAP = TOOLS.reduce((acc, tool) => {
+ acc[tool.name] = convertToTool(tool);
+ return acc;
+}, {} as Record);
diff --git a/docs/gpt-oss-120b.svg b/docs/gpt-oss-120b.svg
new file mode 100644
index 0000000000000000000000000000000000000000..4bc19224233f0cc021eea5f5ea582d2c15bbe035
--- /dev/null
+++ b/docs/gpt-oss-120b.svg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:96497944d1e24900f95c1b4c2657e15a37079cf04052b77e0dad11ce715eec35
+size 14411325
diff --git a/docs/gpt-oss-20b.svg b/docs/gpt-oss-20b.svg
new file mode 100644
index 0000000000000000000000000000000000000000..2f51658a8cba70c830c87554bcfad029ca000cde
--- /dev/null
+++ b/docs/gpt-oss-20b.svg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:245e7778763416b3024bac818e351f58c4498b8db7b5d75167325fccd20bb785
+size 14411250
diff --git a/docs/gpt-oss.svg b/docs/gpt-oss.svg
new file mode 100644
index 0000000000000000000000000000000000000000..98e94b02f68ec15aff10aa17e32d5f3c2d766268
--- /dev/null
+++ b/docs/gpt-oss.svg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4d621976d2d4570b7663f55a356cfa2db9f20c4aa8658b54bf4e4a5a4bd17172
+size 14408569
diff --git a/examples/agents-sdk-js/index.ts b/examples/agents-sdk-js/index.ts
new file mode 100644
index 0000000000000000000000000000000000000000..27cc8543276cdfa553a5fd3203168b0903b04c99
--- /dev/null
+++ b/examples/agents-sdk-js/index.ts
@@ -0,0 +1,90 @@
+import { OpenAI } from "openai";
+import {
+ Agent,
+ run,
+ setDefaultOpenAIClient,
+ setOpenAIAPI,
+ setTracingDisabled,
+ tool,
+ MCPServerStdio,
+} from "@openai/agents";
+import { z } from "zod";
+import path from "node:path";
+import process from "node:process";
+import { styleText } from "node:util";
+import { createInterface } from "node:readline/promises";
+
+async function prompt(question: string) {
+ const rl = createInterface({
+ input: process.stdin,
+ output: process.stdout,
+ });
+ const answer = await rl.question(question);
+ rl.close();
+ return answer;
+}
+
+const openai = new OpenAI({
+ apiKey: "local",
+ baseURL: "http://localhost:11434/v1",
+});
+
+const samplesDir = path.join(process.cwd());
+
+const mcpServer = new MCPServerStdio({
+ name: "Filesystem MCP Server, via npx",
+ fullCommand: `npx -y @modelcontextprotocol/server-filesystem ${samplesDir}`,
+});
+
+await mcpServer.connect();
+
+setTracingDisabled(true);
+setDefaultOpenAIClient(openai);
+setOpenAIAPI("chat_completions");
+
+const searchTool = tool({
+ name: "get_current_weather",
+ description: "Get the current weather in a given location",
+ parameters: z.object({
+ location: z.string(),
+ }),
+ execute: async ({ location }) => {
+ return `The weather in ${location} is sunny.`;
+ },
+});
+
+const agent = new Agent({
+ name: "My Agent",
+ instructions: "You are a helpful assistant.",
+ tools: [searchTool],
+ model: "gpt-oss:20b-test",
+ mcpServers: [mcpServer],
+});
+
+const input = await prompt("> ");
+
+const result = await run(agent, input, {
+ stream: true,
+});
+
+for await (const event of result) {
+ if (event.type === "raw_model_stream_event" && event.data.type === "model") {
+ if (event.data.event.choices[0].delta.content) {
+ process.stdout.write(event.data.event.choices[0].delta.content);
+ } else if (event.data.event.choices[0].delta.reasoning) {
+ process.stdout.write(event.data.event.choices[0].delta.reasoning);
+ }
+ } else if (
+ event.type === "run_item_stream_event" &&
+ event.item.type === "tool_call_item" &&
+ event.item.rawItem.type == "function_call"
+ ) {
+ console.log(
+ `\nCalling ${event.item.rawItem.name} with: ${event.item.rawItem.arguments}`
+ );
+ }
+}
+
+console.log("\n");
+await result.completed;
+await mcpServer.close();
diff --git a/examples/agents-sdk-js/package-lock.json b/examples/agents-sdk-js/package-lock.json
new file mode 100644
index 0000000000000000000000000000000000000000..3ef168a08a1f9085c52d392317fda3428d9f3708
--- /dev/null
+++ b/examples/agents-sdk-js/package-lock.json
@@ -0,0 +1,1798 @@
+{
+ "name": "agents-sdk",
+ "version": "1.0.0",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "agents-sdk",
+ "version": "1.0.0",
+ "license": "ISC",
+ "dependencies": {
+ "@openai/agents": "^0.0.14",
+ "tsx": "^4.20.3",
+ "typescript": "^5.8.3",
+ "zod": "^3.25.67"
+ }
+ },
+ "node_modules/@esbuild/aix-ppc64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.8.tgz",
+ "integrity": "sha512-urAvrUedIqEiFR3FYSLTWQgLu5tb+m0qZw0NBEasUeo6wuqatkMDaRT+1uABiGXEu5vqgPd7FGE1BhsAIy9QVA==",
+ "cpu": [
+ "ppc64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "aix"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-arm": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.8.tgz",
+ "integrity": "sha512-RONsAvGCz5oWyePVnLdZY/HHwA++nxYWIX1atInlaW6SEkwq6XkP3+cb825EUcRs5Vss/lGh/2YxAb5xqc07Uw==",
+ "cpu": [
+ "arm"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-arm64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.8.tgz",
+ "integrity": "sha512-OD3p7LYzWpLhZEyATcTSJ67qB5D+20vbtr6vHlHWSQYhKtzUYrETuWThmzFpZtFsBIxRvhO07+UgVA9m0i/O1w==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-x64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.8.tgz",
+ "integrity": "sha512-yJAVPklM5+4+9dTeKwHOaA+LQkmrKFX96BM0A/2zQrbS6ENCmxc4OVoBs5dPkCCak2roAD+jKCdnmOqKszPkjA==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/darwin-arm64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.8.tgz",
+ "integrity": "sha512-Jw0mxgIaYX6R8ODrdkLLPwBqHTtYHJSmzzd+QeytSugzQ0Vg4c5rDky5VgkoowbZQahCbsv1rT1KW72MPIkevw==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/darwin-x64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.8.tgz",
+ "integrity": "sha512-Vh2gLxxHnuoQ+GjPNvDSDRpoBCUzY4Pu0kBqMBDlK4fuWbKgGtmDIeEC081xi26PPjn+1tct+Bh8FjyLlw1Zlg==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/freebsd-arm64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.8.tgz",
+ "integrity": "sha512-YPJ7hDQ9DnNe5vxOm6jaie9QsTwcKedPvizTVlqWG9GBSq+BuyWEDazlGaDTC5NGU4QJd666V0yqCBL2oWKPfA==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/freebsd-x64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.8.tgz",
+ "integrity": "sha512-MmaEXxQRdXNFsRN/KcIimLnSJrk2r5H8v+WVafRWz5xdSVmWLoITZQXcgehI2ZE6gioE6HirAEToM/RvFBeuhw==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-arm": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.8.tgz",
+ "integrity": "sha512-FuzEP9BixzZohl1kLf76KEVOsxtIBFwCaLupVuk4eFVnOZfU+Wsn+x5Ryam7nILV2pkq2TqQM9EZPsOBuMC+kg==",
+ "cpu": [
+ "arm"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-arm64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.8.tgz",
+ "integrity": "sha512-WIgg00ARWv/uYLU7lsuDK00d/hHSfES5BzdWAdAig1ioV5kaFNrtK8EqGcUBJhYqotlUByUKz5Qo6u8tt7iD/w==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-ia32": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.8.tgz",
+ "integrity": "sha512-A1D9YzRX1i+1AJZuFFUMP1E9fMaYY+GnSQil9Tlw05utlE86EKTUA7RjwHDkEitmLYiFsRd9HwKBPEftNdBfjg==",
+ "cpu": [
+ "ia32"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-loong64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.8.tgz",
+ "integrity": "sha512-O7k1J/dwHkY1RMVvglFHl1HzutGEFFZ3kNiDMSOyUrB7WcoHGf96Sh+64nTRT26l3GMbCW01Ekh/ThKM5iI7hQ==",
+ "cpu": [
+ "loong64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-mips64el": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.8.tgz",
+ "integrity": "sha512-uv+dqfRazte3BzfMp8PAQXmdGHQt2oC/y2ovwpTteqrMx2lwaksiFZ/bdkXJC19ttTvNXBuWH53zy/aTj1FgGw==",
+ "cpu": [
+ "mips64el"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-ppc64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.8.tgz",
+ "integrity": "sha512-GyG0KcMi1GBavP5JgAkkstMGyMholMDybAf8wF5A70CALlDM2p/f7YFE7H92eDeH/VBtFJA5MT4nRPDGg4JuzQ==",
+ "cpu": [
+ "ppc64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-riscv64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.8.tgz",
+ "integrity": "sha512-rAqDYFv3yzMrq7GIcen3XP7TUEG/4LK86LUPMIz6RT8A6pRIDn0sDcvjudVZBiiTcZCY9y2SgYX2lgK3AF+1eg==",
+ "cpu": [
+ "riscv64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-s390x": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.8.tgz",
+ "integrity": "sha512-Xutvh6VjlbcHpsIIbwY8GVRbwoviWT19tFhgdA7DlenLGC/mbc3lBoVb7jxj9Z+eyGqvcnSyIltYUrkKzWqSvg==",
+ "cpu": [
+ "s390x"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-x64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.8.tgz",
+ "integrity": "sha512-ASFQhgY4ElXh3nDcOMTkQero4b1lgubskNlhIfJrsH5OKZXDpUAKBlNS0Kx81jwOBp+HCeZqmoJuihTv57/jvQ==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/netbsd-arm64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.8.tgz",
+ "integrity": "sha512-d1KfruIeohqAi6SA+gENMuObDbEjn22olAR7egqnkCD9DGBG0wsEARotkLgXDu6c4ncgWTZJtN5vcgxzWRMzcw==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/netbsd-x64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.8.tgz",
+ "integrity": "sha512-nVDCkrvx2ua+XQNyfrujIG38+YGyuy2Ru9kKVNyh5jAys6n+l44tTtToqHjino2My8VAY6Lw9H7RI73XFi66Cg==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openbsd-arm64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.8.tgz",
+ "integrity": "sha512-j8HgrDuSJFAujkivSMSfPQSAa5Fxbvk4rgNAS5i3K+r8s1X0p1uOO2Hl2xNsGFppOeHOLAVgYwDVlmxhq5h+SQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openbsd-x64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.8.tgz",
+ "integrity": "sha512-1h8MUAwa0VhNCDp6Af0HToI2TJFAn1uqT9Al6DJVzdIBAd21m/G0Yfc77KDM3uF3T/YaOgQq3qTJHPbTOInaIQ==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openharmony-arm64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.8.tgz",
+ "integrity": "sha512-r2nVa5SIK9tSWd0kJd9HCffnDHKchTGikb//9c7HX+r+wHYCpQrSgxhlY6KWV1nFo1l4KFbsMlHk+L6fekLsUg==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openharmony"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/sunos-x64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.8.tgz",
+ "integrity": "sha512-zUlaP2S12YhQ2UzUfcCuMDHQFJyKABkAjvO5YSndMiIkMimPmxA+BYSBikWgsRpvyxuRnow4nS5NPnf9fpv41w==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "sunos"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-arm64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.8.tgz",
+ "integrity": "sha512-YEGFFWESlPva8hGL+zvj2z/SaK+pH0SwOM0Nc/d+rVnW7GSTFlLBGzZkuSU9kFIGIo8q9X3ucpZhu8PDN5A2sQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-ia32": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.8.tgz",
+ "integrity": "sha512-hiGgGC6KZ5LZz58OL/+qVVoZiuZlUYlYHNAmczOm7bs2oE1XriPFi5ZHHrS8ACpV5EjySrnoCKmcbQMN+ojnHg==",
+ "cpu": [
+ "ia32"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-x64": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.8.tgz",
+ "integrity": "sha512-cn3Yr7+OaaZq1c+2pe+8yxC8E144SReCQjN6/2ynubzYjvyqZjTXfQJpAcQpsdJq3My7XADANiYGHoFC69pLQw==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@modelcontextprotocol/sdk": {
+ "version": "1.17.0",
+ "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.17.0.tgz",
+ "integrity": "sha512-qFfbWFA7r1Sd8D697L7GkTd36yqDuTkvz0KfOGkgXR8EUhQn3/EDNIR/qUdQNMT8IjmasBvHWuXeisxtXTQT2g==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "ajv": "^6.12.6",
+ "content-type": "^1.0.5",
+ "cors": "^2.8.5",
+ "cross-spawn": "^7.0.5",
+ "eventsource": "^3.0.2",
+ "eventsource-parser": "^3.0.0",
+ "express": "^5.0.1",
+ "express-rate-limit": "^7.5.0",
+ "pkce-challenge": "^5.0.0",
+ "raw-body": "^3.0.0",
+ "zod": "^3.23.8",
+ "zod-to-json-schema": "^3.24.1"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@openai/agents": {
+ "version": "0.0.14",
+ "resolved": "https://registry.npmjs.org/@openai/agents/-/agents-0.0.14.tgz",
+ "integrity": "sha512-67FwkSxlid8/fFzIDMBuIvDQJ2Egf7PCpI7zp2JAlIlsz4UZVSlptNcN63RCG2xP6X2XqsdyjPke8ZDEKVrePw==",
+ "license": "MIT",
+ "dependencies": {
+ "@openai/agents-core": "0.0.14",
+ "@openai/agents-openai": "0.0.14",
+ "@openai/agents-realtime": "0.0.14",
+ "debug": "^4.4.0",
+ "openai": "^5.10.1"
+ }
+ },
+ "node_modules/@openai/agents-core": {
+ "version": "0.0.14",
+ "resolved": "https://registry.npmjs.org/@openai/agents-core/-/agents-core-0.0.14.tgz",
+ "integrity": "sha512-enCk5ucz+xxwPgh0zBQoJi5c1RukSc60neRUmlW4eQRgj9p5hVFQaBQNapZ4RysagHCLm2scYRwKgaP6nPDuNQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@openai/zod": "npm:zod@3.25.40 - 3.25.67",
+ "debug": "^4.4.0",
+ "openai": "^5.10.1"
+ },
+ "optionalDependencies": {
+ "@modelcontextprotocol/sdk": "^1.12.0"
+ },
+ "peerDependencies": {
+ "zod": "3.25.40 - 3.25.67"
+ },
+ "peerDependenciesMeta": {
+ "zod": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@openai/agents-openai": {
+ "version": "0.0.14",
+ "resolved": "https://registry.npmjs.org/@openai/agents-openai/-/agents-openai-0.0.14.tgz",
+ "integrity": "sha512-qSGBictwfJ3dMhC3QvqOLMm8RVZ/eIYNcFNLHps7hWeB1xeDGJFDZ/X7dDicejOeEXbi/nGe1ry6LbXDYSo3uQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@openai/agents-core": "0.0.14",
+ "@openai/zod": "npm:zod@3.25.40 - 3.25.67",
+ "debug": "^4.4.0",
+ "openai": "^5.10.1"
+ }
+ },
+ "node_modules/@openai/agents-realtime": {
+ "version": "0.0.14",
+ "resolved": "https://registry.npmjs.org/@openai/agents-realtime/-/agents-realtime-0.0.14.tgz",
+ "integrity": "sha512-gfSuWEDKZREWi0DJDf3F8fT/xvLL9R0cydfgriL0kPkWOlTMuZ0KZKI6D90pc2VAWIescA8BuqCcWkgWFq55Uw==",
+ "license": "MIT",
+ "dependencies": {
+ "@openai/agents-core": "0.0.14",
+ "@openai/zod": "npm:zod@3.25.40 - 3.25.67",
+ "@types/ws": "^8.18.1",
+ "debug": "^4.4.0",
+ "ws": "^8.18.1"
+ }
+ },
+ "node_modules/@openai/zod": {
+ "name": "zod",
+ "version": "3.25.67",
+ "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.67.tgz",
+ "integrity": "sha512-idA2YXwpCdqUSKRCACDE6ItZD9TZzy3OZMtpfLoh6oPR47lipysRrJfjzMqFxQ3uJuUPyUeWe1r9vLH33xO/Qw==",
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/colinhacks"
+ }
+ },
+ "node_modules/@types/node": {
+ "version": "24.1.0",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-24.1.0.tgz",
+ "integrity": "sha512-ut5FthK5moxFKH2T1CUOC6ctR67rQRvvHdFLCD2Ql6KXmMuCrjsSsRI9UsLCm9M18BMwClv4pn327UvB7eeO1w==",
+ "license": "MIT",
+ "dependencies": {
+ "undici-types": "~7.8.0"
+ }
+ },
+ "node_modules/@types/ws": {
+ "version": "8.18.1",
+ "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz",
+ "integrity": "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "*"
+ }
+ },
+ "node_modules/accepts": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-2.0.0.tgz",
+ "integrity": "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "mime-types": "^3.0.0",
+ "negotiator": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/ajv": {
+ "version": "6.12.6",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+ "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "fast-json-stable-stringify": "^2.0.0",
+ "json-schema-traverse": "^0.4.1",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/body-parser": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-2.2.0.tgz",
+ "integrity": "sha512-02qvAaxv8tp7fBa/mw1ga98OGm+eCbqzJOKoRt70sLmfEEi+jyBYVTDGfCL/k06/4EMk/z01gCe7HoCH/f2LTg==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "bytes": "^3.1.2",
+ "content-type": "^1.0.5",
+ "debug": "^4.4.0",
+ "http-errors": "^2.0.0",
+ "iconv-lite": "^0.6.3",
+ "on-finished": "^2.4.1",
+ "qs": "^6.14.0",
+ "raw-body": "^3.0.0",
+ "type-is": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/bytes": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
+ "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/call-bind-apply-helpers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+ "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/call-bound": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz",
+ "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "get-intrinsic": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/content-disposition": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-1.0.0.tgz",
+ "integrity": "sha512-Au9nRL8VNUut/XSzbQA38+M78dzP4D+eqg3gfJHMIHHYa3bg067xj1KxMUWj+VULbiZMowKngFFbKczUrNJ1mg==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "safe-buffer": "5.2.1"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/content-type": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
+ "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/cookie": {
+ "version": "0.7.2",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz",
+ "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/cookie-signature": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.2.2.tgz",
+ "integrity": "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=6.6.0"
+ }
+ },
+ "node_modules/cors": {
+ "version": "2.8.5",
+ "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz",
+ "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "object-assign": "^4",
+ "vary": "^1"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/cross-spawn": {
+ "version": "7.0.6",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
+ "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/debug": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz",
+ "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/dunder-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+ "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/ee-first": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+ "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==",
+ "license": "MIT",
+ "optional": true
+ },
+ "node_modules/encodeurl": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz",
+ "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/es-define-property": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+ "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-object-atoms": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+ "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "es-errors": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/esbuild": {
+ "version": "0.25.8",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.8.tgz",
+ "integrity": "sha512-vVC0USHGtMi8+R4Kz8rt6JhEWLxsv9Rnu/lGYbPR8u47B+DCBksq9JarW0zOO7bs37hyOK1l2/oqtbciutL5+Q==",
+ "hasInstallScript": true,
+ "license": "MIT",
+ "bin": {
+ "esbuild": "bin/esbuild"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "optionalDependencies": {
+ "@esbuild/aix-ppc64": "0.25.8",
+ "@esbuild/android-arm": "0.25.8",
+ "@esbuild/android-arm64": "0.25.8",
+ "@esbuild/android-x64": "0.25.8",
+ "@esbuild/darwin-arm64": "0.25.8",
+ "@esbuild/darwin-x64": "0.25.8",
+ "@esbuild/freebsd-arm64": "0.25.8",
+ "@esbuild/freebsd-x64": "0.25.8",
+ "@esbuild/linux-arm": "0.25.8",
+ "@esbuild/linux-arm64": "0.25.8",
+ "@esbuild/linux-ia32": "0.25.8",
+ "@esbuild/linux-loong64": "0.25.8",
+ "@esbuild/linux-mips64el": "0.25.8",
+ "@esbuild/linux-ppc64": "0.25.8",
+ "@esbuild/linux-riscv64": "0.25.8",
+ "@esbuild/linux-s390x": "0.25.8",
+ "@esbuild/linux-x64": "0.25.8",
+ "@esbuild/netbsd-arm64": "0.25.8",
+ "@esbuild/netbsd-x64": "0.25.8",
+ "@esbuild/openbsd-arm64": "0.25.8",
+ "@esbuild/openbsd-x64": "0.25.8",
+ "@esbuild/openharmony-arm64": "0.25.8",
+ "@esbuild/sunos-x64": "0.25.8",
+ "@esbuild/win32-arm64": "0.25.8",
+ "@esbuild/win32-ia32": "0.25.8",
+ "@esbuild/win32-x64": "0.25.8"
+ }
+ },
+ "node_modules/escape-html": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+ "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==",
+ "license": "MIT",
+ "optional": true
+ },
+ "node_modules/etag": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
+ "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/eventsource": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-3.0.7.tgz",
+ "integrity": "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "eventsource-parser": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=18.0.0"
+ }
+ },
+ "node_modules/eventsource-parser": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/eventsource-parser/-/eventsource-parser-3.0.3.tgz",
+ "integrity": "sha512-nVpZkTMM9rF6AQ9gPJpFsNAMt48wIzB5TQgiTLdHiuO8XEDhUgZEhqKlZWXbIzo9VmJ/HvysHqEaVeD5v9TPvA==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=20.0.0"
+ }
+ },
+ "node_modules/express": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/express/-/express-5.1.0.tgz",
+ "integrity": "sha512-DT9ck5YIRU+8GYzzU5kT3eHGA5iL+1Zd0EutOmTE9Dtk+Tvuzd23VBU+ec7HPNSTxXYO55gPV/hq4pSBJDjFpA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "accepts": "^2.0.0",
+ "body-parser": "^2.2.0",
+ "content-disposition": "^1.0.0",
+ "content-type": "^1.0.5",
+ "cookie": "^0.7.1",
+ "cookie-signature": "^1.2.1",
+ "debug": "^4.4.0",
+ "encodeurl": "^2.0.0",
+ "escape-html": "^1.0.3",
+ "etag": "^1.8.1",
+ "finalhandler": "^2.1.0",
+ "fresh": "^2.0.0",
+ "http-errors": "^2.0.0",
+ "merge-descriptors": "^2.0.0",
+ "mime-types": "^3.0.0",
+ "on-finished": "^2.4.1",
+ "once": "^1.4.0",
+ "parseurl": "^1.3.3",
+ "proxy-addr": "^2.0.7",
+ "qs": "^6.14.0",
+ "range-parser": "^1.2.1",
+ "router": "^2.2.0",
+ "send": "^1.1.0",
+ "serve-static": "^2.2.0",
+ "statuses": "^2.0.1",
+ "type-is": "^2.0.1",
+ "vary": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 18"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/express-rate-limit": {
+ "version": "7.5.1",
+ "resolved": "https://registry.npmjs.org/express-rate-limit/-/express-rate-limit-7.5.1.tgz",
+ "integrity": "sha512-7iN8iPMDzOMHPUYllBEsQdWVB6fPDMPqwjBaFrgr4Jgr/+okjvzAy+UHlYYL/Vs0OsOrMkwS6PJDkFlJwoxUnw==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/express-rate-limit"
+ },
+ "peerDependencies": {
+ "express": ">= 4.11"
+ }
+ },
+ "node_modules/fast-deep-equal": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
+ "license": "MIT",
+ "optional": true
+ },
+ "node_modules/fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
+ "license": "MIT",
+ "optional": true
+ },
+ "node_modules/finalhandler": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-2.1.0.tgz",
+ "integrity": "sha512-/t88Ty3d5JWQbWYgaOGCCYfXRwV1+be02WqYYlL6h0lEiUAMPM8o8qKGO01YIkOHzka2up08wvgYD0mDiI+q3Q==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "debug": "^4.4.0",
+ "encodeurl": "^2.0.0",
+ "escape-html": "^1.0.3",
+ "on-finished": "^2.4.1",
+ "parseurl": "^1.3.3",
+ "statuses": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/forwarded": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
+ "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/fresh": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/fresh/-/fresh-2.0.0.tgz",
+ "integrity": "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "hasInstallScript": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "license": "MIT",
+ "optional": true,
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-intrinsic": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
+ "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "es-define-property": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.1.1",
+ "function-bind": "^1.1.2",
+ "get-proto": "^1.0.1",
+ "gopd": "^1.2.0",
+ "has-symbols": "^1.1.0",
+ "hasown": "^2.0.2",
+ "math-intrinsics": "^1.1.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+ "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "dunder-proto": "^1.0.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/get-tsconfig": {
+ "version": "4.10.1",
+ "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.10.1.tgz",
+ "integrity": "sha512-auHyJ4AgMz7vgS8Hp3N6HXSmlMdUyhSUrfBF16w153rxtLIEOE+HGqaBppczZvnHLqQJfiHotCYpNhl0lUROFQ==",
+ "license": "MIT",
+ "dependencies": {
+ "resolve-pkg-maps": "^1.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1"
+ }
+ },
+ "node_modules/gopd": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+ "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+ "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/http-errors": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz",
+ "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "depd": "2.0.0",
+ "inherits": "2.0.4",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "toidentifier": "1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/http-errors/node_modules/statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/iconv-lite": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz",
+ "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
+ "license": "ISC",
+ "optional": true
+ },
+ "node_modules/ipaddr.js": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
+ "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/is-promise": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-4.0.0.tgz",
+ "integrity": "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==",
+ "license": "MIT",
+ "optional": true
+ },
+ "node_modules/isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
+ "license": "ISC",
+ "optional": true
+ },
+ "node_modules/json-schema-traverse": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
+ "license": "MIT",
+ "optional": true
+ },
+ "node_modules/math-intrinsics": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+ "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/media-typer": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-1.1.0.tgz",
+ "integrity": "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/merge-descriptors": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-2.0.0.tgz",
+ "integrity": "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.54.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz",
+ "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.1.tgz",
+ "integrity": "sha512-xRc4oEhT6eaBpU1XF7AjpOFD+xQmXNB5OVKwp4tqCuBpHLS/ZbBDrc07mYTDqVMg6PfxUjjNp85O6Cd2Z/5HWA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "mime-db": "^1.54.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/negotiator": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz",
+ "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/object-assign": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-inspect": {
+ "version": "1.13.4",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz",
+ "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/on-finished": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
+ "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "ee-first": "1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
+ "license": "ISC",
+ "optional": true,
+ "dependencies": {
+ "wrappy": "1"
+ }
+ },
+ "node_modules/openai": {
+ "version": "5.11.0",
+ "resolved": "https://registry.npmjs.org/openai/-/openai-5.11.0.tgz",
+ "integrity": "sha512-+AuTc5pVjlnTuA9zvn8rA/k+1RluPIx9AD4eDcnutv6JNwHHZxIhkFy+tmMKCvmMFDQzfA/r1ujvPWB19DQkYg==",
+ "license": "Apache-2.0",
+ "bin": {
+ "openai": "bin/cli"
+ },
+ "peerDependencies": {
+ "ws": "^8.18.0",
+ "zod": "^3.23.8"
+ },
+ "peerDependenciesMeta": {
+ "ws": {
+ "optional": true
+ },
+ "zod": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/parseurl": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
+ "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-to-regexp": {
+ "version": "8.2.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.2.0.tgz",
+ "integrity": "sha512-TdrF7fW9Rphjq4RjrW0Kp2AW0Ahwu9sRGTkS6bvDi0SCwZlEZYmcfDbEsTz8RVk0EHIS/Vd1bv3JhG+1xZuAyQ==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=16"
+ }
+ },
+ "node_modules/pkce-challenge": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/pkce-challenge/-/pkce-challenge-5.0.0.tgz",
+ "integrity": "sha512-ueGLflrrnvwB3xuo/uGob5pd5FN7l0MsLf0Z87o/UQmRtwjvfylfc9MurIxRAWywCYTgrvpXBcqjV4OfCYGCIQ==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=16.20.0"
+ }
+ },
+ "node_modules/proxy-addr": {
+ "version": "2.0.7",
+ "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
+ "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "forwarded": "0.2.0",
+ "ipaddr.js": "1.9.1"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/punycode": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz",
+ "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/qs": {
+ "version": "6.14.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.0.tgz",
+ "integrity": "sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w==",
+ "license": "BSD-3-Clause",
+ "optional": true,
+ "dependencies": {
+ "side-channel": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=0.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/raw-body": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-3.0.0.tgz",
+ "integrity": "sha512-RmkhL8CAyCRPXCE28MMH0z2PNWQBNk2Q09ZdxM9IOOXwxwZbN+qbWaatPkdkWIKL2ZVDImrN/pK5HTRz2PcS4g==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "bytes": "3.1.2",
+ "http-errors": "2.0.0",
+ "iconv-lite": "0.6.3",
+ "unpipe": "1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/resolve-pkg-maps": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz",
+ "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==",
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1"
+ }
+ },
+ "node_modules/router": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/router/-/router-2.2.0.tgz",
+ "integrity": "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "debug": "^4.4.0",
+ "depd": "^2.0.0",
+ "is-promise": "^4.0.0",
+ "parseurl": "^1.3.3",
+ "path-to-regexp": "^8.0.0"
+ },
+ "engines": {
+ "node": ">= 18"
+ }
+ },
+ "node_modules/safe-buffer": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+ "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "license": "MIT",
+ "optional": true
+ },
+ "node_modules/safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
+ "license": "MIT",
+ "optional": true
+ },
+ "node_modules/send": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/send/-/send-1.2.0.tgz",
+ "integrity": "sha512-uaW0WwXKpL9blXE2o0bRhoL2EGXIrZxQ2ZQ4mgcfoBxdFmQold+qWsD2jLrfZ0trjKL6vOw0j//eAwcALFjKSw==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "debug": "^4.3.5",
+ "encodeurl": "^2.0.0",
+ "escape-html": "^1.0.3",
+ "etag": "^1.8.1",
+ "fresh": "^2.0.0",
+ "http-errors": "^2.0.0",
+ "mime-types": "^3.0.1",
+ "ms": "^2.1.3",
+ "on-finished": "^2.4.1",
+ "range-parser": "^1.2.1",
+ "statuses": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 18"
+ }
+ },
+ "node_modules/serve-static": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-2.2.0.tgz",
+ "integrity": "sha512-61g9pCh0Vnh7IutZjtLGGpTA355+OPn2TyDv/6ivP2h/AdAVX9azsoxmg2/M6nZeQZNYBEwIcsne1mJd9oQItQ==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "encodeurl": "^2.0.0",
+ "escape-html": "^1.0.3",
+ "parseurl": "^1.3.3",
+ "send": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 18"
+ }
+ },
+ "node_modules/setprototypeof": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
+ "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==",
+ "license": "ISC",
+ "optional": true
+ },
+ "node_modules/shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "shebang-regex": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/side-channel": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz",
+ "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "object-inspect": "^1.13.3",
+ "side-channel-list": "^1.0.0",
+ "side-channel-map": "^1.0.1",
+ "side-channel-weakmap": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/side-channel-list": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz",
+ "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "object-inspect": "^1.13.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/side-channel-map": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz",
+ "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "call-bound": "^1.0.2",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.5",
+ "object-inspect": "^1.13.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/side-channel-weakmap": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz",
+ "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "call-bound": "^1.0.2",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.5",
+ "object-inspect": "^1.13.3",
+ "side-channel-map": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/statuses": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz",
+ "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/toidentifier": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
+ "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">=0.6"
+ }
+ },
+ "node_modules/tsx": {
+ "version": "4.20.3",
+ "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.20.3.tgz",
+ "integrity": "sha512-qjbnuR9Tr+FJOMBqJCW5ehvIo/buZq7vH7qD7JziU98h6l3qGy0a/yPFjwO+y0/T7GFpNgNAvEcPPVfyT8rrPQ==",
+ "license": "MIT",
+ "dependencies": {
+ "esbuild": "~0.25.0",
+ "get-tsconfig": "^4.7.5"
+ },
+ "bin": {
+ "tsx": "dist/cli.mjs"
+ },
+ "engines": {
+ "node": ">=18.0.0"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.3"
+ }
+ },
+ "node_modules/type-is": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/type-is/-/type-is-2.0.1.tgz",
+ "integrity": "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "content-type": "^1.0.5",
+ "media-typer": "^1.1.0",
+ "mime-types": "^3.0.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/typescript": {
+ "version": "5.8.3",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz",
+ "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==",
+ "license": "Apache-2.0",
+ "bin": {
+ "tsc": "bin/tsc",
+ "tsserver": "bin/tsserver"
+ },
+ "engines": {
+ "node": ">=14.17"
+ }
+ },
+ "node_modules/undici-types": {
+ "version": "7.8.0",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.8.0.tgz",
+ "integrity": "sha512-9UJ2xGDvQ43tYyVMpuHlsgApydB8ZKfVYTsLDhXkFL/6gfkp+U8xTGdh8pMJv1SpZna0zxG1DwsKZsreLbXBxw==",
+ "license": "MIT"
+ },
+ "node_modules/unpipe": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+ "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/uri-js": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
+ "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
+ "license": "BSD-2-Clause",
+ "optional": true,
+ "dependencies": {
+ "punycode": "^2.1.0"
+ }
+ },
+ "node_modules/vary": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+ "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==",
+ "license": "MIT",
+ "optional": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "license": "ISC",
+ "optional": true,
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "node-which": "bin/node-which"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
+ "license": "ISC",
+ "optional": true
+ },
+ "node_modules/ws": {
+ "version": "8.18.3",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz",
+ "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.0.0"
+ },
+ "peerDependencies": {
+ "bufferutil": "^4.0.1",
+ "utf-8-validate": ">=5.0.2"
+ },
+ "peerDependenciesMeta": {
+ "bufferutil": {
+ "optional": true
+ },
+ "utf-8-validate": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/zod": {
+ "version": "3.25.67",
+ "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.67.tgz",
+ "integrity": "sha512-idA2YXwpCdqUSKRCACDE6ItZD9TZzy3OZMtpfLoh6oPR47lipysRrJfjzMqFxQ3uJuUPyUeWe1r9vLH33xO/Qw==",
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/colinhacks"
+ }
+ },
+ "node_modules/zod-to-json-schema": {
+ "version": "3.24.6",
+ "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.6.tgz",
+ "integrity": "sha512-h/z3PKvcTcTetyjl1fkj79MHNEjm+HpD6NXheWjzOekY7kV+lwDYnHw+ivHkijnCSMz1yJaWBD9vu/Fcmk+vEg==",
+ "license": "ISC",
+ "optional": true,
+ "peerDependencies": {
+ "zod": "^3.24.1"
+ }
+ }
+ }
+}
diff --git a/examples/agents-sdk-js/package.json b/examples/agents-sdk-js/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..fcb2ac42aab690f127373250af16be4ddb0ac8a0
--- /dev/null
+++ b/examples/agents-sdk-js/package.json
@@ -0,0 +1,20 @@
+{
+ "type": "module",
+ "name": "agents-sdk",
+ "version": "1.0.0",
+ "main": "index.js",
+ "scripts": {
+ "start": "tsx index.ts",
+ "test": "echo \"Error: no test specified\" && exit 1"
+ },
+ "keywords": [],
+ "author": "",
+ "license": "ISC",
+ "description": "",
+ "dependencies": {
+ "@openai/agents": "^0.0.14",
+ "tsx": "^4.20.3",
+ "typescript": "^5.8.3",
+ "zod": "^3.25.67"
+ }
+}
diff --git a/examples/agents-sdk-python/example.py b/examples/agents-sdk-python/example.py
new file mode 100644
index 0000000000000000000000000000000000000000..af0be603827db39ff7834c258bebb585b6a19b50
--- /dev/null
+++ b/examples/agents-sdk-python/example.py
@@ -0,0 +1,102 @@
+import asyncio
+from pathlib import Path
+import shutil
+
+from openai import AsyncOpenAI
+from agents import (
+ Agent,
+ ItemHelpers,
+ Runner,
+ set_default_openai_api,
+ set_default_openai_client,
+ set_tracing_disabled,
+ function_tool,
+)
+from agents.mcp import MCPServerStdio
+
+
+async def prompt_user(question: str) -> str:
+ """Async input prompt function"""
+ loop = asyncio.get_event_loop()
+ return await loop.run_in_executor(None, input, question)
+
+
+async def main():
+ # Set up OpenAI client for local server (e.g., Ollama)
+ openai_client = AsyncOpenAI(
+ api_key="local",
+ base_url="http://localhost:11434/v1",
+ )
+
+ # Get current working directory
+ samples_dir = str(Path.cwd())
+
+ # Create MCP server for filesystem operations
+ mcp_server = MCPServerStdio(
+ name="Filesystem MCP Server, via npx",
+ params={
+ "command": "npx",
+ "args": [
+ "-y",
+ "@modelcontextprotocol/server-filesystem",
+ samples_dir,
+ ],
+ },
+ )
+
+ # Connect to MCP server
+ await mcp_server.connect()
+
+ # Configure agents SDK
+ set_tracing_disabled(True)
+ set_default_openai_client(openai_client)
+ set_default_openai_api("chat_completions")
+
+ # Define weather tool
+ @function_tool
+ async def get_weather(location: str) -> str:
+ return f"The weather in {location} is sunny."
+
+ # Create agent
+ agent = Agent(
+ name="My Agent",
+ instructions="You are a helpful assistant.",
+ tools=[get_weather],
+ model="gpt-oss:20b-test",
+ mcp_servers=[mcp_server],
+ )
+
+ # Get user input
+ user_input = await prompt_user("> ")
+
+ # Run agent with streaming
+ result = Runner.run_streamed(agent, user_input)
+
+ # Process streaming results
+ async for event in result.stream_events():
+ if event.type == "raw_response_event":
+ continue
+ elif event.type == "agent_updated_stream_event":
+ print(f"Agent updated: {event.new_agent.name}")
+ elif event.type == "run_item_stream_event":
+ if event.item.type == "tool_call_item":
+ print("-- Tool was called")
+ elif event.item.type == "tool_call_output_item":
+ print(f"-- Tool output: {event.item.output}")
+ elif event.item.type == "message_output_item":
+ print(
+ f"-- Message output:\n {ItemHelpers.text_message_output(event.item)}"
+ )
+ else:
+ pass
+
+ print("=== Run complete ===")
+
+
+if __name__ == "__main__":
+
+ if not shutil.which("npx"):
+ raise RuntimeError(
+ "npx is not installed. Please install it with `npm install -g npx`."
+ )
+ asyncio.run(main())
diff --git a/examples/agents-sdk-python/pyproject.toml b/examples/agents-sdk-python/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..e8d24a81397703630b1b5c2ace5927fa5ac187cd
--- /dev/null
+++ b/examples/agents-sdk-python/pyproject.toml
@@ -0,0 +1,9 @@
+[project]
+name = "agents-sdk-python"
+version = "0.1.0"
+description = "Add your description here"
+readme = "README.md"
+requires-python = ">=3.12"
+dependencies = [
+ "openai-agents>=0.2.4",
+]
diff --git a/examples/gradio/gradio_chat.py b/examples/gradio/gradio_chat.py
new file mode 100644
index 0000000000000000000000000000000000000000..da742bd3ad2d141a4d04c46a74fcf563f3cee90d
--- /dev/null
+++ b/examples/gradio/gradio_chat.py
@@ -0,0 +1,247 @@
+import json
+import requests
+import gradio as gr
+
+DEFAULT_FUNCTION_PROPERTIES = """
+{
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city and state, e.g. San Francisco, CA"
+ }
+ },
+ "required": ["location"]
+}
+""".strip()
+
+def chat_with_model(message, history, model_choice, instructions, effort, use_functions,
+ function_name, function_description, function_parameters,
+ use_browser_search, temperature, max_output_tokens, debug_mode):
+
+ if not message.strip():
+ return history, ""
+
+ # Append user message and empty assistant placeholder (idiomatic Gradio pattern)
+ history = history + [[message, ""]]
+
+ # Build messages list from history (excluding the empty assistant placeholder)
+ messages = []
+
+ # Convert history to messages format (excluding the last empty assistant message)
+ for user_msg, assistant_msg in history[:-1]:
+ if user_msg:
+ messages.append({
+ "type": "message",
+ "role": "user",
+ "content": [{"type": "input_text", "text": user_msg}]
+ })
+ if assistant_msg:
+ messages.append({
+ "type": "message",
+ "role": "assistant",
+ "content": [{"type": "output_text", "text": assistant_msg}]
+ })
+
+ # Add current user message
+ messages.append({
+ "type": "message",
+ "role": "user",
+ "content": [{"type": "input_text", "text": message}]
+ })
+
+ # Prepare tools
+ tools = []
+ if use_functions:
+ try:
+ tools.append({
+ "type": "function",
+ "name": function_name,
+ "description": function_description,
+ "parameters": json.loads(function_parameters),
+ })
+ except json.JSONDecodeError:
+ pass
+
+ if use_browser_search:
+ tools.append({"type": "browser_search"})
+
+ # Get URL based on model (matching streamlit logic)
+ options = ["large", "small"]
+ URL = ("http://localhost:8081/v1/responses" if model_choice == options[1]
+ else "http://localhost:8000/v1/responses")
+
+ try:
+ response = requests.post(
+ URL,
+ json={
+ "input": messages,
+ "stream": True,
+ "instructions": instructions,
+ "reasoning": {"effort": effort},
+ "metadata": {"__debug": debug_mode},
+ "tools": tools,
+ "temperature": temperature,
+ "max_output_tokens": max_output_tokens,
+ },
+ stream=True,
+ )
+
+ full_content = ""
+ text_delta = ""
+ current_output_index = 0
+ in_reasoning = False
+
+ for line in response.iter_lines(decode_unicode=True):
+ if not line or not line.startswith("data:"):
+ continue
+ data_str = line[len("data:"):].strip()
+ if not data_str:
+ continue
+
+ try:
+ data = json.loads(data_str)
+ except Exception:
+ continue
+
+ event_type = data.get("type", "")
+ output_index = data.get("output_index", 0)
+
+ if event_type == "response.output_item.added":
+ current_output_index = output_index
+ output_type = data.get("item", {}).get("type", "message")
+ text_delta = ""
+
+ if output_type == "reasoning":
+ if not in_reasoning:
+ full_content += "🤔 **Thinking...**\n"
+ in_reasoning = True
+ elif output_type == "message":
+ if in_reasoning:
+ full_content += "\n\n"
+ in_reasoning = False
+
+ elif event_type == "response.reasoning_text.delta":
+ delta = data.get("delta", "")
+ full_content += delta
+
+ # Update last assistant message (idiomatic Gradio pattern)
+ history[-1][1] = full_content
+ yield history, ""
+
+ elif event_type == "response.output_text.delta":
+ delta = data.get("delta", "")
+ full_content += delta
+
+ # Update last assistant message (idiomatic Gradio pattern)
+ history[-1][1] = full_content
+ yield history, ""
+
+ elif event_type == "response.output_item.done":
+ item = data.get("item", {})
+ if item.get("type") == "function_call":
+ function_call_text = f"\n\n🔨 Called `{item.get('name')}`\n**Arguments**\n```json\n{item.get('arguments', '')}\n```"
+ full_content += function_call_text
+
+ # Update last assistant message (idiomatic Gradio pattern)
+ history[-1][1] = full_content
+ yield history, ""
+
+ elif item.get("type") == "web_search_call":
+ web_search_text = f"\n\n🌐 **Web Search**\n```json\n{json.dumps(item.get('action', {}), indent=2)}\n```\n✅ Done"
+ full_content += web_search_text
+
+ # Update last assistant message (idiomatic Gradio pattern)
+ history[-1][1] = full_content
+ yield history, ""
+
+ elif event_type == "response.completed":
+ response_data = data.get("response", {})
+ if debug_mode:
+ debug_info = response_data.get("metadata", {}).get("__debug", "")
+ if debug_info:
+ full_content += f"\n\n**Debug**\n```\n{debug_info}\n```"
+
+ # Update last assistant message (idiomatic Gradio pattern)
+ history[-1][1] = full_content
+ yield history, ""
+ break
+
+ # Return final history and empty string to clear textbox
+ return history, ""
+
+ except Exception as e:
+ error_message = f"❌ Error: {str(e)}"
+ history[-1][1] = error_message
+ return history, ""
+
+
+# Create the Gradio interface
+with gr.Blocks(title="💬 Chatbot") as demo:
+ gr.Markdown("# 💬 Chatbot")
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ chatbot = gr.Chatbot(height=500)
+
+ with gr.Row():
+ msg = gr.Textbox(placeholder="Type a message...", scale=4, show_label=False)
+ send_btn = gr.Button("Send", scale=1)
+
+ clear_btn = gr.Button("Clear Chat")
+
+ with gr.Column(scale=1):
+ model_choice = gr.Radio(["large", "small"], value="small", label="Model")
+
+ instructions = gr.Textbox(
+ label="Instructions",
+ value="You are a helpful assistant that can answer questions and help with tasks.",
+ lines=3
+ )
+
+ effort = gr.Radio(["low", "medium", "high"], value="medium", label="Reasoning effort")
+
+ gr.Markdown("#### Functions")
+ use_functions = gr.Checkbox(label="Use functions", value=False)
+
+ with gr.Column(visible=False) as function_group:
+ function_name = gr.Textbox(label="Function name", value="get_weather")
+ function_description = gr.Textbox(
+ label="Function description",
+ value="Get the weather for a given city"
+ )
+ function_parameters = gr.Textbox(
+ label="Function parameters",
+ value=DEFAULT_FUNCTION_PROPERTIES,
+ lines=6
+ )
+
+ # Conditional browser search (matching Streamlit logic)
+ # In Streamlit: if "show_browser" in st.query_params:
+ # For Gradio, we'll always show it (simplified)
+ gr.Markdown("#### Built-in Tools")
+ use_browser_search = gr.Checkbox(label="Use browser search", value=False)
+
+ temperature = gr.Slider(0.0, 1.0, value=1.0, step=0.01, label="Temperature")
+ max_output_tokens = gr.Slider(1000, 20000, value=1024, step=100, label="Max output tokens")
+
+ debug_mode = gr.Checkbox(label="Debug mode", value=False)
+
+ # Event handlers
+ def toggle_function_group(use_funcs):
+ return gr.update(visible=use_funcs)
+
+ use_functions.change(toggle_function_group, use_functions, function_group)
+
+ # Chat functionality
+ inputs = [msg, chatbot, model_choice, instructions, effort, use_functions,
+ function_name, function_description, function_parameters,
+ use_browser_search, temperature, max_output_tokens, debug_mode]
+
+ msg.submit(chat_with_model, inputs, [chatbot, msg])
+ send_btn.click(chat_with_model, inputs, [chatbot, msg])
+ clear_btn.click(lambda: [], outputs=chatbot)
+
+
+if __name__ == "__main__":
+ demo.launch()
\ No newline at end of file
diff --git a/examples/streamlit/streamlit_chat.py b/examples/streamlit/streamlit_chat.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc533fd689d667208d36bcac13283e36d4db8e90
--- /dev/null
+++ b/examples/streamlit/streamlit_chat.py
@@ -0,0 +1,354 @@
+import json
+
+import requests
+import streamlit as st
+
+DEFAULT_FUNCTION_PROPERTIES = """
+{
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city and state, e.g. San Francisco, CA"
+ }
+ },
+ "required": ["location"]
+}
+""".strip()
+
+# Session state for chat
+if "messages" not in st.session_state:
+ st.session_state.messages = []
+
+st.title("💬 Chatbot")
+
+if "model" not in st.session_state:
+ if "model" in st.query_params:
+ st.session_state.model = st.query_params["model"]
+ else:
+ st.session_state.model = "small"
+
+options = ["large", "small"]
+selection = st.sidebar.segmented_control(
+ "Model", options, selection_mode="single", default=st.session_state.model
+)
+# st.session_state.model = selection
+st.query_params.update({"model": selection})
+
+instructions = st.sidebar.text_area(
+ "Instructions",
+ value="You are a helpful assistant that can answer questions and help with tasks.",
+)
+effort = st.sidebar.radio(
+ "Reasoning effort",
+ ["low", "medium", "high"],
+ index=1,
+)
+st.sidebar.divider()
+st.sidebar.subheader("Functions")
+use_functions = st.sidebar.toggle("Use functions", value=False)
+
+st.sidebar.subheader("Built-in Tools")
+# Built-in Tools section
+use_browser_search = st.sidebar.toggle("Use browser search", value=False)
+use_code_interpreter = st.sidebar.toggle("Use code interpreter", value=False)
+
+if use_functions:
+ function_name = st.sidebar.text_input("Function name", value="get_weather")
+ function_description = st.sidebar.text_area(
+ "Function description", value="Get the weather for a given city"
+ )
+ function_parameters = st.sidebar.text_area(
+ "Function parameters", value=DEFAULT_FUNCTION_PROPERTIES
+ )
+else:
+ function_name = None
+ function_description = None
+ function_parameters = None
+st.sidebar.divider()
+temperature = st.sidebar.slider(
+ "Temperature", min_value=0.0, max_value=1.0, value=1.0, step=0.01
+)
+max_output_tokens = st.sidebar.slider(
+ "Max output tokens", min_value=1, max_value=131072, value=30000, step=1000
+)
+st.sidebar.divider()
+debug_mode = st.sidebar.toggle("Debug mode", value=False)
+
+if debug_mode:
+ st.sidebar.divider()
+ st.sidebar.code(json.dumps(st.session_state.messages, indent=2), "json")
+
+render_input = True
+
+URL = (
+ "http://localhost:8081/v1/responses"
+ if selection == options[1]
+ else "http://localhost:8000/v1/responses"
+)
+
+
+def trigger_fake_tool(container):
+ function_output = st.session_state.get("function_output", "It's sunny!")
+ last_call = st.session_state.messages[-1]
+ if last_call.get("type") == "function_call":
+ st.session_state.messages.append(
+ {
+ "type": "function_call_output",
+ "call_id": last_call.get("call_id"),
+ "output": function_output,
+ }
+ )
+ run(container)
+
+
+def run(container):
+ tools = []
+ if use_functions:
+ tools.append(
+ {
+ "type": "function",
+ "name": function_name,
+ "description": function_description,
+ "parameters": json.loads(function_parameters),
+ }
+ )
+ # Add browser_search tool if checkbox is checked
+ if use_browser_search:
+ tools.append({"type": "browser_search"})
+ if use_code_interpreter:
+ tools.append({"type": "code_interpreter"})
+ response = requests.post(
+ URL,
+ json={
+ "input": st.session_state.messages,
+ "stream": True,
+ "instructions": instructions,
+ "reasoning": {"effort": effort},
+ "metadata": {"__debug": debug_mode},
+ "tools": tools,
+ "temperature": temperature,
+ "max_output_tokens": max_output_tokens,
+ },
+ stream=True,
+ )
+
+ text_delta = ""
+ code_interpreter_sessions: dict[str, dict] = {}
+
+ _current_output_index = 0
+ for line in response.iter_lines(decode_unicode=True):
+ if not line or not line.startswith("data:"):
+ continue
+ data_str = line[len("data:") :].strip()
+ if not data_str:
+ continue
+ try:
+ data = json.loads(data_str)
+ except Exception:
+ continue
+
+ event_type = data.get("type", "")
+ output_index = data.get("output_index", 0)
+ if event_type == "response.output_item.added":
+ _current_output_index = output_index
+ output_type = data.get("item", {}).get("type", "message")
+ if output_type == "message":
+ output = container.chat_message("assistant")
+ placeholder = output.empty()
+ elif output_type == "reasoning":
+ output = container.chat_message("reasoning", avatar="🤔")
+ placeholder = output.empty()
+ elif output_type == "web_search_call":
+ output = container.chat_message("web_search_call", avatar="🌐")
+ output.code(
+ json.dumps(data.get("item", {}).get("action", {}), indent=4),
+ language="json",
+ )
+ placeholder = output.empty()
+ elif output_type == "code_interpreter_call":
+ item = data.get("item", {})
+ item_id = item.get("id")
+ message_container = container.chat_message(
+ "code_interpreter_call", avatar="🧪"
+ )
+ status_placeholder = message_container.empty()
+ code_placeholder = message_container.empty()
+ outputs_container = message_container.container()
+ code_text = item.get("code") or ""
+ if code_text:
+ code_placeholder.code(code_text, language="python")
+ code_interpreter_sessions[item_id] = {
+ "status": status_placeholder,
+ "code": code_placeholder,
+ "outputs": outputs_container,
+ "code_text": code_text,
+ "rendered_outputs": False,
+ }
+ placeholder = status_placeholder
+ text_delta = ""
+ elif event_type == "response.reasoning_text.delta":
+ output.avatar = "🤔"
+ text_delta += data.get("delta", "")
+ placeholder.markdown(text_delta)
+ elif event_type == "response.output_text.delta":
+ text_delta += data.get("delta", "")
+ placeholder.markdown(text_delta)
+ elif event_type == "response.output_item.done":
+ item = data.get("item", {})
+ if item.get("type") == "function_call":
+ with container.chat_message("function_call", avatar="🔨"):
+ st.markdown(f"Called `{item.get('name')}`")
+ st.caption("Arguments")
+ st.code(item.get("arguments", ""), language="json")
+ if item.get("type") == "web_search_call":
+ placeholder.markdown("✅ Done")
+ if item.get("type") == "code_interpreter_call":
+ item_id = item.get("id")
+ session = code_interpreter_sessions.get(item_id)
+ if session:
+ session["status"].markdown("✅ Done")
+ final_code = item.get("code") or session["code_text"]
+ if final_code:
+ session["code"].code(final_code, language="python")
+ session["code_text"] = final_code
+ outputs = item.get("outputs") or []
+ if outputs and not session["rendered_outputs"]:
+ with session["outputs"]:
+ st.markdown("**Outputs**")
+ for output_item in outputs:
+ output_type = output_item.get("type")
+ if output_type == "logs":
+ st.code(
+ output_item.get("logs", ""),
+ language="text",
+ )
+ elif output_type == "image":
+ st.image(
+ output_item.get("url", ""),
+ caption="Code interpreter image",
+ )
+ session["rendered_outputs"] = True
+ elif not outputs and not session["rendered_outputs"]:
+ with session["outputs"]:
+ st.caption("(No outputs)")
+ session["rendered_outputs"] = True
+ else:
+ placeholder.markdown("✅ Done")
+ elif event_type == "response.code_interpreter_call.in_progress":
+ item_id = data.get("item_id")
+ session = code_interpreter_sessions.get(item_id)
+ if session:
+ session["status"].markdown("⏳ Running")
+ else:
+ try:
+ placeholder.markdown("⏳ Running")
+ except Exception:
+ pass
+ elif event_type == "response.code_interpreter_call.interpreting":
+ item_id = data.get("item_id")
+ session = code_interpreter_sessions.get(item_id)
+ if session:
+ session["status"].markdown("🧮 Interpreting")
+ elif event_type == "response.code_interpreter_call.completed":
+ item_id = data.get("item_id")
+ session = code_interpreter_sessions.get(item_id)
+ if session:
+ session["status"].markdown("✅ Done")
+ else:
+ try:
+ placeholder.markdown("✅ Done")
+ except Exception:
+ pass
+ elif event_type == "response.code_interpreter_call_code.delta":
+ item_id = data.get("item_id")
+ session = code_interpreter_sessions.get(item_id)
+ if session:
+ session["code_text"] += data.get("delta", "")
+ if session["code_text"].strip():
+ session["code"].code(session["code_text"], language="python")
+ elif event_type == "response.code_interpreter_call_code.done":
+ item_id = data.get("item_id")
+ session = code_interpreter_sessions.get(item_id)
+ if session:
+ final_code = data.get("code") or session["code_text"]
+ session["code_text"] = final_code
+ if final_code:
+ session["code"].code(final_code, language="python")
+ elif event_type == "response.completed":
+ response = data.get("response", {})
+ if debug_mode:
+ container.expander("Debug", expanded=False).code(
+ response.get("metadata", {}).get("__debug", ""), language="text"
+ )
+ st.session_state.messages.extend(response.get("output", []))
+ if st.session_state.messages[-1].get("type") == "function_call":
+ with container.form("function_output_form"):
+ _function_output = st.text_input(
+ "Enter function output",
+ value=st.session_state.get("function_output", "It's sunny!"),
+ key="function_output",
+ )
+ st.form_submit_button(
+ "Submit function output",
+ on_click=trigger_fake_tool,
+ args=[container],
+ )
+ # Optionally handle other event types...
+
+
+# Chat display
+for msg in st.session_state.messages:
+ if msg.get("type") == "message":
+ with st.chat_message(msg["role"]):
+ for item in msg["content"]:
+ if (
+ item.get("type") == "text"
+ or item.get("type") == "output_text"
+ or item.get("type") == "input_text"
+ ):
+ st.markdown(item["text"])
+ if item.get("annotations"):
+ annotation_lines = "\n".join(
+ f"- {annotation.get('url')}"
+ for annotation in item["annotations"]
+ if annotation.get("url")
+ )
+ st.caption(f"**Annotations:**\n{annotation_lines}")
+ elif msg.get("type") == "reasoning":
+ with st.chat_message("reasoning", avatar="🤔"):
+ for item in msg["content"]:
+ if item.get("type") == "reasoning_text":
+ st.markdown(item["text"])
+ elif msg.get("type") == "function_call":
+ with st.chat_message("function_call", avatar="🔨"):
+ st.markdown(f"Called `{msg.get('name')}`")
+ st.caption("Arguments")
+ st.code(msg.get("arguments", ""), language="json")
+ elif msg.get("type") == "function_call_output":
+ with st.chat_message("function_call_output", avatar="✅"):
+ st.caption("Output")
+ st.code(msg.get("output", ""), language="text")
+ elif msg.get("type") == "web_search_call":
+ with st.chat_message("web_search_call", avatar="🌐"):
+ st.code(json.dumps(msg.get("action", {}), indent=4), language="json")
+ st.markdown("✅ Done")
+ elif msg.get("type") == "code_interpreter_call":
+ with st.chat_message("code_interpreter_call", avatar="🧪"):
+ st.markdown("✅ Done")
+
+if render_input:
+ # Input field
+ if prompt := st.chat_input("Type a message..."):
+ st.session_state.messages.append(
+ {
+ "type": "message",
+ "role": "user",
+ "content": [{"type": "input_text", "text": prompt}],
+ }
+ )
+
+ with st.chat_message("user"):
+ st.markdown(prompt)
+
+ run(st.container())
diff --git a/gpt-oss-mcp-server/README.md b/gpt-oss-mcp-server/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..10aedd5f0cbdadeaeccaa89db8860bf0ad122e94
--- /dev/null
+++ b/gpt-oss-mcp-server/README.md
@@ -0,0 +1,29 @@
+# MCP Servers for gpt-oss reference tools
+
+This directory contains MCP servers for the reference tools in the [gpt-oss](https://github.com/openai/gpt-oss) repository.
+You can set up these tools behind MCP servers and use them in your applications.
+For inference service that integrates with MCP, you can also use these as reference tools.
+
+In particular, this directory contains a `build-system-prompt.py` script that will generate exactly the same system prompt as `reference-system-prompt.py`.
+The build system prompt script show case all the care needed to automatically discover the tools and construct the system prompt before feeding it into Harmony.
+
+## Usage
+
+```bash
+# Install the dependencies
+uv pip install -r requirements.txt
+```
+
+```bash
+# Assume we have harmony and gpt-oss installed
+uv pip install mcp[cli]
+# start the servers
+mcp run -t sse browser_server.py:mcp
+mcp run -t sse python_server.py:mcp
+```
+
+You can now use MCP inspector to play with the tools.
+Once opened, set SSE to `http://localhost:8001/sse` and `http://localhost:8000/sse` respectively.
+
+To compare the system prompt and see how to construct it via MCP service discovery, see `build-system-prompt.py`.
+This script will generate exactly the same system prompt as `reference-system-prompt.py`.
diff --git a/gpt-oss-mcp-server/browser_server.py b/gpt-oss-mcp-server/browser_server.py
new file mode 100644
index 0000000000000000000000000000000000000000..b37a63a6a758654c2b28dcf0b4566d22c7256a42
--- /dev/null
+++ b/gpt-oss-mcp-server/browser_server.py
@@ -0,0 +1,120 @@
+import os
+from collections.abc import AsyncIterator
+from contextlib import asynccontextmanager
+from dataclasses import dataclass, field
+from typing import Union, Optional
+
+from mcp.server.fastmcp import Context, FastMCP
+from gpt_oss.tools.simple_browser import SimpleBrowserTool
+from gpt_oss.tools.simple_browser.backend import YouComBackend, ExaBackend
+
+@dataclass
+class AppContext:
+ browsers: dict[str, SimpleBrowserTool] = field(default_factory=dict)
+
+ def create_or_get_browser(self, session_id: str) -> SimpleBrowserTool:
+ if session_id not in self.browsers:
+ tool_backend = os.getenv("BROWSER_BACKEND", "exa")
+ if tool_backend == "youcom":
+ backend = YouComBackend(source="web")
+ elif tool_backend == "exa":
+ backend = ExaBackend(source="web")
+ else:
+ raise ValueError(f"Invalid tool backend: {tool_backend}")
+ self.browsers[session_id] = SimpleBrowserTool(backend=backend)
+ return self.browsers[session_id]
+
+ def remove_browser(self, session_id: str) -> None:
+ self.browsers.pop(session_id, None)
+
+
+@asynccontextmanager
+async def app_lifespan(_server: FastMCP) -> AsyncIterator[AppContext]:
+ yield AppContext()
+
+
+# Pass lifespan to server
+mcp = FastMCP(
+ name="browser",
+ instructions=r"""
+Tool for browsing.
+The `cursor` appears in brackets before each browsing display: `[{cursor}]`.
+Cite information from the tool using the following format:
+`【{cursor}†L{line_start}(-L{line_end})?】`, for example: `【6†L9-L11】` or `【8†L3】`.
+Do not quote more than 10 words directly from the tool output.
+sources=web
+""".strip(),
+ lifespan=app_lifespan,
+ port=8001,
+)
+
+
+@mcp.tool(
+ name="search",
+ title="Search for information",
+ description=
+ "Searches for information related to `query` and displays `topn` results.",
+)
+async def search(ctx: Context,
+ query: str,
+ topn: int = 10,
+ source: Optional[str] = None) -> str:
+ """Search for information related to a query"""
+ browser = ctx.request_context.lifespan_context.create_or_get_browser(
+ ctx.client_id)
+ messages = []
+ async for message in browser.search(query=query, topn=topn, source=source):
+ if message.content and hasattr(message.content[0], 'text'):
+ messages.append(message.content[0].text)
+ return "\n".join(messages)
+
+
+@mcp.tool(
+ name="open",
+ title="Open a link or page",
+ description="""
+Opens the link `id` from the page indicated by `cursor` starting at line number `loc`, showing `num_lines` lines.
+Valid link ids are displayed with the formatting: `【{id}†.*】`.
+If `cursor` is not provided, the most recent page is implied.
+If `id` is a string, it is treated as a fully qualified URL associated with `source`.
+If `loc` is not provided, the viewport will be positioned at the beginning of the document or centered on the most relevant passage, if available.
+Use this function without `id` to scroll to a new location of an opened page.
+""".strip(),
+)
+async def open_link(ctx: Context,
+ id: Union[int, str] = -1,
+ cursor: int = -1,
+ loc: int = -1,
+ num_lines: int = -1,
+ view_source: bool = False,
+ source: Optional[str] = None) -> str:
+ """Open a link or navigate to a page location"""
+ browser = ctx.request_context.lifespan_context.create_or_get_browser(
+ ctx.client_id)
+ messages = []
+ async for message in browser.open(id=id,
+ cursor=cursor,
+ loc=loc,
+ num_lines=num_lines,
+ view_source=view_source,
+ source=source):
+ if message.content and hasattr(message.content[0], 'text'):
+ messages.append(message.content[0].text)
+ return "\n".join(messages)
+
+
+@mcp.tool(
+ name="find",
+ title="Find pattern in page",
+ description=
+ "Finds exact matches of `pattern` in the current page, or the page given by `cursor`.",
+)
+async def find_pattern(ctx: Context, pattern: str, cursor: int = -1) -> str:
+ """Find exact matches of a pattern in the current page"""
+ browser = ctx.request_context.lifespan_context.create_or_get_browser(
+ ctx.client_id)
+ messages = []
+ async for message in browser.find(pattern=pattern, cursor=cursor):
+ if message.content and hasattr(message.content[0], 'text'):
+ messages.append(message.content[0].text)
+ return "\n".join(messages)
diff --git a/gpt-oss-mcp-server/build-system-prompt.py b/gpt-oss-mcp-server/build-system-prompt.py
new file mode 100644
index 0000000000000000000000000000000000000000..1aca256a3e14af773cd7b30a4bf1d1ec65d50576
--- /dev/null
+++ b/gpt-oss-mcp-server/build-system-prompt.py
@@ -0,0 +1,116 @@
+import datetime
+import asyncio
+
+from gpt_oss.tokenizer import get_tokenizer
+
+from openai_harmony import (
+ Conversation,
+ DeveloperContent,
+ HarmonyEncodingName,
+ Message,
+ ReasoningEffort,
+ Role,
+ SystemContent,
+ ToolNamespaceConfig,
+ ToolDescription,
+ load_harmony_encoding,
+)
+
+from mcp import ClientSession
+from mcp.client.sse import sse_client
+from mcp.types import ListToolsResult
+
+
+async def list_server_and_tools(server_url: str):
+ async with sse_client(url=server_url) as streams, ClientSession(
+ *streams) as session:
+ initialize_response = await session.initialize()
+ list_tools_response = await session.list_tools()
+ return initialize_response, list_tools_response
+
+
+def trim_schema(schema: dict) -> dict:
+ # Turn JSON Schema from MCP generated into Harmony's variant.
+ if "title" in schema:
+ del schema["title"]
+ if "default" in schema and schema["default"] is None:
+ del schema["default"]
+ if "anyOf" in schema:
+ # Turn "anyOf": [{"type": "type-1"}, {"type": "type-2"}] into "type": ["type-1", "type-2"]
+ # if there's more than 1 types, also remove "null" type as Harmony will just ignore it
+ types = [
+ type_dict["type"] for type_dict in schema["anyOf"]
+ if type_dict["type"] != 'null'
+ ]
+ schema["type"] = types
+ del schema["anyOf"]
+ if "properties" in schema:
+ schema["properties"] = {
+ k: trim_schema(v)
+ for k, v in schema["properties"].items()
+ }
+ return schema
+
+
+def post_process_tools_description(
+ list_tools_result: ListToolsResult) -> ListToolsResult:
+ # Adapt the MCP tool result for Harmony
+ for tool in list_tools_result.tools:
+ tool.inputSchema = trim_schema(tool.inputSchema)
+
+ # Some tools schema don't need to be part of the prompt (e.g. simple text in text out for Python)
+ list_tools_result.tools = [
+ tool for tool in list_tools_result.tools
+ if getattr(tool.annotations, "include_in_prompt", True)
+ ]
+
+ return list_tools_result
+
+tokenizer = get_tokenizer()
+
+tools_urls = [
+ "http://localhost:8001/sse", # browser
+ "http://localhost:8000/sse", # python
+]
+harmony_tool_descriptions = []
+for tools_url in tools_urls:
+
+ initialize_response, list_tools_response = asyncio.run(
+ list_server_and_tools(tools_url))
+
+ list_tools_response = post_process_tools_description(list_tools_response)
+
+ tool_from_mcp = ToolNamespaceConfig(
+ name=initialize_response.serverInfo.name,
+ description=initialize_response.instructions,
+ tools=[
+ ToolDescription.new(name=tool.name,
+ description=tool.description,
+ parameters=tool.inputSchema)
+ for tool in list_tools_response.tools
+ ])
+ harmony_tool_descriptions.append(tool_from_mcp)
+
+encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
+
+system_message_content = (SystemContent.new().with_reasoning_effort(
+ ReasoningEffort.LOW).with_conversation_start_date(
+ datetime.datetime.now().strftime("%Y-%m-%d")))
+
+for tool_description in harmony_tool_descriptions:
+ system_message_content = system_message_content.with_tools(
+ tool_description)
+
+system_message = Message.from_role_and_content(Role.SYSTEM,
+ system_message_content)
+
+developer_message_content = DeveloperContent.new().with_instructions("")
+developer_message = Message.from_role_and_content(Role.DEVELOPER,
+ developer_message_content)
+
+messages = [system_message, developer_message]
+
+conversation = Conversation.from_messages(messages)
+tokens = encoding.render_conversation(conversation)
+system_message = tokenizer.decode(tokens)
+print(system_message)
diff --git a/gpt-oss-mcp-server/pyproject.toml b/gpt-oss-mcp-server/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..f7f7ed922732185368e2d41f966330a586d4f199
--- /dev/null
+++ b/gpt-oss-mcp-server/pyproject.toml
@@ -0,0 +1,8 @@
+[project]
+name = "gpt-oss-mcp-server"
+version = "0.1.0"
+requires-python = ">=3.10"
+dependencies = [
+ "mcp[cli]>=1.12.2",
+ # "gpt_oss"
+]
diff --git a/gpt-oss-mcp-server/python_server.py b/gpt-oss-mcp-server/python_server.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ec353088f13c115170c2782b200568bbd14bd9b
--- /dev/null
+++ b/gpt-oss-mcp-server/python_server.py
@@ -0,0 +1,33 @@
+from mcp.server.fastmcp import FastMCP
+from gpt_oss.tools.python_docker.docker_tool import PythonTool
+from openai_harmony import Message, TextContent, Author, Role
+
+# Pass lifespan to server
+mcp = FastMCP(
+ name="python",
+ instructions=r"""
+Use this tool to execute Python code in your chain of thought. The code will not be shown to the user. This tool should be used for internal reasoning, but not for code that is intended to be visible to the user (e.g. when creating plots, tables, or files).
+When you send a message containing python code to python, it will be executed in a stateless docker container, and the stdout of that process will be returned to you.
+""".strip(),
+)
+
+
+@mcp.tool(
+ name="python",
+ title="Execute Python code",
+ description="""
+Use this tool to execute Python code in your chain of thought. The code will not be shown to the user. This tool should be used for internal reasoning, but not for code that is intended to be visible to the user (e.g. when creating plots, tables, or files).
+When you send a message containing python code to python, it will be executed in a stateless docker container, and the stdout of that process will be returned to you.
+ """,
+ annotations={
+ # Harmony format don't want this schema to be part of it because it's simple text in text out
+ "include_in_prompt": False,
+ })
+async def python(code: str) -> str:
+ tool = PythonTool()
+ messages = []
+ async for message in tool.process(
+ Message(author=Author(role=Role.TOOL, name="python"),
+ content=[TextContent(text=code)])):
+ messages.append(message)
+ return "\n".join([message.content[0].text for message in messages])
diff --git a/gpt-oss-mcp-server/reference-system-prompt.py b/gpt-oss-mcp-server/reference-system-prompt.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ddbf7c99e4fc1380287309b67b0812d626cbf27
--- /dev/null
+++ b/gpt-oss-mcp-server/reference-system-prompt.py
@@ -0,0 +1,46 @@
+import datetime
+
+from gpt_oss.tools.simple_browser import SimpleBrowserTool
+from gpt_oss.tools.simple_browser.backend import YouComBackend
+from gpt_oss.tools.python_docker.docker_tool import PythonTool
+from gpt_oss.tokenizer import tokenizer
+
+from openai_harmony import (
+ Conversation,
+ DeveloperContent,
+ HarmonyEncodingName,
+ Message,
+ ReasoningEffort,
+ Role,
+ SystemContent,
+ load_harmony_encoding,
+)
+
+encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
+
+system_message_content = (SystemContent.new().with_reasoning_effort(
+ ReasoningEffort.LOW).with_conversation_start_date(
+ datetime.datetime.now().strftime("%Y-%m-%d")))
+
+backend = YouComBackend(source="web")
+browser_tool = SimpleBrowserTool(backend=backend)
+system_message_content = system_message_content.with_tools(
+ browser_tool.tool_config)
+
+python_tool = PythonTool()
+system_message_content = system_message_content.with_tools(
+ python_tool.tool_config)
+
+system_message = Message.from_role_and_content(Role.SYSTEM,
+ system_message_content)
+
+developer_message_content = DeveloperContent.new().with_instructions("")
+developer_message = Message.from_role_and_content(Role.DEVELOPER,
+ developer_message_content)
+
+messages = [system_message, developer_message]
+
+conversation = Conversation.from_messages(messages)
+tokens = encoding.render_conversation(conversation)
+system_message = tokenizer.decode(tokens)
+print(system_message)
diff --git a/gpt_oss/__init__.py b/gpt_oss/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_oss/chat.py b/gpt_oss/chat.py
new file mode 100644
index 0000000000000000000000000000000000000000..4856a397813e6110bf3b50e712cdea8912bd21c7
--- /dev/null
+++ b/gpt_oss/chat.py
@@ -0,0 +1,369 @@
+"""
+Harmony chat with tools
+"""
+
+import atexit
+import argparse
+import asyncio
+import datetime
+import os
+from pathlib import Path
+
+try:
+ import gnureadline as readline
+except ImportError:
+ import readline
+
+import torch
+import termcolor
+
+from gpt_oss.tools import apply_patch
+from gpt_oss.tools.simple_browser import SimpleBrowserTool
+from gpt_oss.tools.simple_browser.backend import YouComBackend
+from gpt_oss.tools.python_docker.docker_tool import PythonTool
+
+from openai_harmony import (
+ Author,
+ Conversation,
+ DeveloperContent,
+ HarmonyEncodingName,
+ Message,
+ ReasoningEffort,
+ Role,
+ StreamableParser,
+ StreamState,
+ SystemContent,
+ TextContent,
+ ToolDescription,
+ load_harmony_encoding,
+)
+
+
+REASONING_EFFORT = {
+ "high": ReasoningEffort.HIGH,
+ "medium": ReasoningEffort.MEDIUM,
+ "low": ReasoningEffort.LOW,
+}
+
+
+def get_user_input():
+ rank = torch.distributed.get_rank() if torch.distributed.is_initialized() else 0
+ if rank == 0:
+ user_input = input()
+ else:
+ user_input = ""
+ user_input_list = [user_input]
+ if torch.distributed.is_initialized():
+ torch.distributed.broadcast_object_list(user_input_list, 0)
+ return user_input_list[0]
+
+
+def main(args):
+ match args.backend:
+ case "triton":
+ from gpt_oss.triton.model import TokenGenerator as TritonGenerator
+ from gpt_oss.torch.utils import init_distributed
+ device = init_distributed()
+ generator = TritonGenerator(args.checkpoint, args.context, device)
+ case "torch":
+ from gpt_oss.torch.model import TokenGenerator as TorchGenerator
+ from gpt_oss.torch.utils import init_distributed
+ device = init_distributed()
+ generator = TorchGenerator(args.checkpoint, device)
+ case "vllm":
+ from gpt_oss.vllm.token_generator import TokenGenerator as VLLMGenerator
+ generator = VLLMGenerator(args.checkpoint, tensor_parallel_size=2)
+ case _:
+ raise ValueError(f"Invalid backend: {args.backend}")
+
+ encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
+
+ system_message_content = (
+ SystemContent.new()
+ .with_reasoning_effort(REASONING_EFFORT[args.reasoning_effort])
+ .with_conversation_start_date(datetime.datetime.now().strftime("%Y-%m-%d"))
+ )
+
+ if args.browser:
+ backend = YouComBackend(
+ source="web",
+ )
+ browser_tool = SimpleBrowserTool(backend=backend)
+ system_message_content = system_message_content.with_tools(browser_tool.tool_config)
+
+ if args.python:
+ python_tool = PythonTool()
+ system_message_content = system_message_content.with_tools(python_tool.tool_config)
+
+ system_message = Message.from_role_and_content(Role.SYSTEM, system_message_content)
+ messages = [system_message]
+
+ if args.apply_patch:
+ apply_patch_instructions = Path(apply_patch.__file__).parent / "apply_patch.md"
+ developer_message = ""
+ if args.developer_message:
+ developer_message = args.developer_message + "\n"
+ developer_message += apply_patch_instructions.read_text()
+ developer_message_content = (
+ DeveloperContent.new()
+ .with_instructions(developer_message)
+ .with_function_tools([
+ ToolDescription.new(
+ "apply_patch",
+ "Patch a file",
+ parameters={
+ "type": "string",
+ "description": "Formatted patch code",
+ "default": "*** Begin Patch\n*** End Patch\n",
+ }
+ ),
+ ])
+ )
+ messages.append(Message.from_role_and_content(Role.DEVELOPER, developer_message_content))
+ elif args.developer_message:
+ developer_message_content = DeveloperContent.new().with_instructions(args.developer_message)
+ messages.append(Message.from_role_and_content(Role.DEVELOPER, developer_message_content))
+ else:
+ developer_message_content = None
+
+ if args.raw:
+ conversation = Conversation.from_messages(messages)
+ tokens = encoding.render_conversation(conversation)
+ system_message = encoding.decode(tokens)
+ print(system_message, flush=True, end="")
+ empty_user_message_tokens = encoding.render(Message.from_role_and_content(Role.USER, ""))
+ user_message_start = encoding.decode(empty_user_message_tokens[:-1])
+ user_message_end = encoding.decode(empty_user_message_tokens[-1:])
+ else:
+ # System message
+ print(termcolor.colored("System Message:", "cyan"), flush=True)
+ print(termcolor.colored("Model Identity:", "cyan"), system_message_content.model_identity, flush=True)
+ print(termcolor.colored("Reasoning Effort:", "cyan"), system_message_content.reasoning_effort, flush=True)
+ print(termcolor.colored("Conversation Start Date:", "cyan"), system_message_content.conversation_start_date, flush=True)
+ print(termcolor.colored("Knowledge Cutoff:", "cyan"), system_message_content.knowledge_cutoff, flush=True)
+ print(termcolor.colored("Browser Tool:", "cyan"), "Enabled" if args.browser else "Disabled", flush=True)
+ print(termcolor.colored("Python Tool:", "cyan"), "Enabled" if args.python else "Disabled", flush=True)
+ print(termcolor.colored("Apply Patch Function:", "cyan"), "Enabled" if args.apply_patch else "Disabled", flush=True)
+ if developer_message_content:
+ print(termcolor.colored("Developer Message:", "yellow"), flush=True)
+ print(developer_message_content.instructions, flush=True)
+
+ # Print the system message and the user message start
+ MESSAGE_PADDING = 12
+ while True:
+ last_message = messages[-1]
+ if last_message.recipient is None:
+ if args.raw:
+ print(user_message_start, end="", flush=True)
+ user_message = get_user_input()
+ print(user_message_end, flush=True, end="")
+ else:
+ print(termcolor.colored("User:".ljust(MESSAGE_PADDING), "red"), flush=True)
+ user_message = get_user_input()
+ user_message = Message.from_role_and_content(Role.USER, user_message)
+ messages.append(user_message)
+ else:
+ # Tool or function call
+ if last_message.recipient.startswith("browser."):
+ assert args.browser, "Browser tool is not enabled"
+ tool_name = "Search"
+ async def run_tool():
+ results = []
+ async for msg in browser_tool.process(last_message):
+ results.append(msg)
+ return results
+
+ result = asyncio.run(run_tool())
+ messages += result
+ elif last_message.recipient.startswith("python"):
+ assert args.python, "Python tool is not enabled"
+ tool_name = "Python"
+ async def run_tool():
+ results = []
+ async for msg in python_tool.process(last_message):
+ results.append(msg)
+ return results
+
+ result = asyncio.run(run_tool())
+ messages += result
+ elif last_message.recipient == "functions.apply_patch":
+ assert args.apply_patch, "Apply patch tool is not enabled"
+ tool_name = "Apply Patch"
+ text = last_message.content[0].text
+ tool_output = None
+
+ if text.startswith("{"):
+ # this is json, try to extract the patch from it
+ import json
+ try:
+ some_dict = json.loads(text)
+ _, text = some_dict.popitem()
+ except Exception as e:
+ tool_output = f"Error parsing JSON: {e}"
+
+ if tool_output is None:
+ try:
+ tool_output = apply_patch.apply_patch(text)
+ except Exception as e:
+ tool_output = f"Error applying patch: {e}"
+
+ message = (
+ Message(
+ author=Author.new(Role.TOOL, last_message.recipient),
+ content=[TextContent(text=tool_output)]
+ )
+ .with_recipient("assistant")
+ )
+ if last_message.channel:
+ message = message.with_channel(last_message.channel)
+
+ result = [message]
+ messages += result
+ else:
+ raise ValueError(f"Unknown tool or function call: {last_message.recipient}")
+ # Print the tool or function call result
+ if args.raw:
+ rendered_result = encoding.render_conversation(Conversation.from_messages(result))
+ print(encoding.decode(rendered_result), flush=True, end="")
+ else:
+ print(termcolor.colored(f"{tool_name} output:".ljust(MESSAGE_PADDING), "magenta"), flush=True)
+ if tool_name == "Search" and not args.show_browser_results:
+ print("[Search results fed to the model]")
+ else:
+ print(result[0].content[0].text)
+
+ conversation = Conversation.from_messages(messages)
+ tokens = encoding.render_conversation_for_completion(
+ conversation, Role.ASSISTANT
+ )
+
+ if args.raw:
+ # Print the last two tokens, which are the start of the assistant message
+ print(encoding.decode(tokens[-2:]), flush=True, end="")
+
+ parser = StreamableParser(encoding, role=Role.ASSISTANT)
+ field_created = False
+ current_output_text = ""
+ output_text_delta_buffer = ""
+ for predicted_token in generator.generate(tokens, encoding.stop_tokens_for_assistant_actions()):
+ parser.process(predicted_token)
+ if args.raw:
+ print(encoding.decode([predicted_token]), end="", flush=True)
+ continue
+
+ if parser.state == StreamState.EXPECT_START:
+ print("") # new line
+ field_created = False
+
+ if not parser.last_content_delta:
+ continue
+
+ if not field_created:
+ field_created = True
+ if parser.current_channel == "final":
+ print(termcolor.colored("Assistant:", "green"), flush=True)
+ elif parser.current_recipient is not None:
+ print(termcolor.colored(f"Tool call to {parser.current_recipient}:", "cyan"), flush=True)
+ else:
+ print(termcolor.colored("CoT:", "yellow"), flush=True)
+
+ should_send_output_text_delta = True
+ output_text_delta_buffer += parser.last_content_delta
+ if args.browser:
+ updated_output_text, _annotations, has_partial_citations = browser_tool.normalize_citations(current_output_text + output_text_delta_buffer)
+ output_text_delta_buffer = updated_output_text[len(current_output_text):]
+ if has_partial_citations:
+ should_send_output_text_delta = False
+ if should_send_output_text_delta:
+ print(output_text_delta_buffer, end="", flush=True)
+ current_output_text += output_text_delta_buffer
+ output_text_delta_buffer = ""
+
+ messages += parser.messages
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Chat example",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ )
+ parser.add_argument(
+ "checkpoint",
+ metavar="FILE",
+ type=str,
+ help="Path to the SafeTensors checkpoint",
+ )
+ parser.add_argument(
+ "-r",
+ "--reasoning-effort",
+ metavar="REASONING_EFFORT",
+ type=str,
+ default="low",
+ choices=["high", "medium", "low"],
+ help="Reasoning effort",
+ )
+ parser.add_argument(
+ "-a",
+ "--apply-patch",
+ action="store_true",
+ help="Make apply_patch function available to the model",
+ )
+ parser.add_argument(
+ "-b",
+ "--browser",
+ default=False,
+ action="store_true",
+ help="Use browser tool",
+ )
+ parser.add_argument(
+ "--show-browser-results",
+ default=False,
+ action="store_true",
+ help="Show browser results",
+ )
+ parser.add_argument(
+ "-p",
+ "--python",
+ default=False,
+ action="store_true",
+ help="Use python tool",
+ )
+ parser.add_argument(
+ "--developer-message",
+ default="",
+ help="Developer message",
+ )
+ parser.add_argument(
+ "-c",
+ "--context",
+ metavar="CONTEXT",
+ type=int,
+ default=8192,
+ help="Max context length",
+ )
+ parser.add_argument(
+ "--raw",
+ default=False,
+ action="store_true",
+ help="Raw mode (does not render Harmony encoding)",
+ )
+ parser.add_argument(
+ "--backend",
+ type=str,
+ default="triton",
+ choices=["triton", "torch", "vllm"],
+ help="Inference backend",
+ )
+ args = parser.parse_args()
+
+ if int(os.environ.get("WORLD_SIZE", 1)) == 1:
+ histfile = os.path.join(os.path.expanduser("~"), ".chat")
+ try:
+ readline.read_history_file(histfile)
+ readline.set_history_length(10000)
+ except FileNotFoundError:
+ pass
+
+ atexit.register(readline.write_history_file, histfile)
+
+ main(args)
diff --git a/gpt_oss/evals/README.md b/gpt_oss/evals/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..f0713dcf0c0fc1b5171e6a4917d455dea5e0b7d8
--- /dev/null
+++ b/gpt_oss/evals/README.md
@@ -0,0 +1,4 @@
+# `gpt_oss.evals`
+
+This module is a reincarnation of [simple-evals](https://github.com/openai/simple-evals) adapted for gpt-oss. It lets you
+run GPQA and HealthBench against a runtime that supports Responses API on `localhost:8080/v1`.
\ No newline at end of file
diff --git a/gpt_oss/evals/__init__.py b/gpt_oss/evals/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gpt_oss/evals/__main__.py b/gpt_oss/evals/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..40d56c1294a8484844c3258334fda3f6f4731080
--- /dev/null
+++ b/gpt_oss/evals/__main__.py
@@ -0,0 +1,211 @@
+import argparse
+import json
+from datetime import datetime
+
+from . import report
+from .basic_eval import BasicEval
+from .gpqa_eval import GPQAEval
+from .aime_eval import AIME25Eval
+from .healthbench_eval import HealthBenchEval
+from .chat_completions_sampler import (
+ OPENAI_SYSTEM_MESSAGE_API,
+ ChatCompletionsSampler,
+)
+from .responses_sampler import ResponsesSampler
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Evaluate the models.",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ )
+ parser.add_argument(
+ "--model",
+ type=str,
+ default="gpt-oss-120b,gpt-oss-20b",
+ help="Select a model by name. Accepts a comma-separated list.",
+ )
+ parser.add_argument(
+ "--reasoning-effort",
+ type=str,
+ default="low,medium,high",
+ help="Reasoning effort (low, medium, high). Accepts a comma-separated list.",
+ )
+ parser.add_argument(
+ "--sampler",
+ type=str,
+ choices=["responses", "chat_completions"],
+ default="responses",
+ help="Sampler backend to use for models.",
+ )
+ parser.add_argument(
+ "--base-url",
+ type=str,
+ default="http://localhost:8000/v1",
+ help="Base URL for the API.",
+ )
+ parser.add_argument(
+ "--eval",
+ type=str,
+ default="gpqa,healthbench,healthbench_hard,healthbench_consensus,aime25",
+ help="Select an eval by name. Accepts a comma-separated list.",
+ )
+ parser.add_argument(
+ "--temperature",
+ type=float,
+ default=1.0,
+ help="Sampling temperature",
+ )
+ parser.add_argument(
+ "--n-threads",
+ type=int,
+ default=1584,
+ help="Number of threads to run.",
+ )
+ parser.add_argument(
+ "--debug", action="store_true", help="Run in debug mode"
+ )
+ parser.add_argument(
+ "--examples", type=int, help="Number of examples to use (overrides default)"
+ )
+
+ args = parser.parse_args()
+
+ sampler_cls = ResponsesSampler if args.sampler == "responses" else ChatCompletionsSampler
+
+ models = {}
+ for model_name in args.model.split(","):
+ for reasoning_effort in args.reasoning_effort.split(","):
+ models[f"{model_name}-{reasoning_effort}"] = sampler_cls(
+ model=model_name,
+ reasoning_model=True,
+ reasoning_effort=reasoning_effort,
+ temperature=args.temperature,
+ base_url=args.base_url,
+ max_tokens=131_072,
+ )
+
+ print(f"Running with args {args}")
+
+ grading_sampler = ChatCompletionsSampler(
+ model="gpt-4.1-2025-04-14",
+ system_message=OPENAI_SYSTEM_MESSAGE_API,
+ max_tokens=2048,
+ base_url="https://api.openai.com/v1",
+ )
+
+ def get_evals(eval_name, debug_mode):
+ num_examples = (
+ args.examples if args.examples is not None else (5 if debug_mode else None)
+ )
+ # Set num_examples = None to reproduce full evals
+ match eval_name:
+ case "basic":
+ return BasicEval()
+ case "gpqa":
+ return GPQAEval(
+ n_repeats=1 if args.debug else 8,
+ num_examples=num_examples,
+ debug=debug_mode,
+ n_threads=args.n_threads or 1,
+ )
+ case "healthbench":
+ return HealthBenchEval(
+ grader_model=grading_sampler,
+ num_examples=10 if debug_mode else num_examples,
+ n_repeats=1,
+ n_threads=args.n_threads or 1,
+ subset_name=None,
+ )
+ case "healthbench_hard":
+ return HealthBenchEval(
+ grader_model=grading_sampler,
+ num_examples=10 if debug_mode else num_examples,
+ n_repeats=1,
+ n_threads=args.n_threads or 1,
+ subset_name="hard",
+ )
+ case "healthbench_consensus":
+ return HealthBenchEval(
+ grader_model=grading_sampler,
+ num_examples=10 if debug_mode else num_examples,
+ n_repeats=1,
+ n_threads=args.n_threads or 1,
+ subset_name="consensus",
+ )
+ case "aime25":
+ return AIME25Eval(
+ n_repeats=1 if args.debug else 8,
+ num_examples=num_examples,
+ n_threads=args.n_threads or 1,
+ )
+ case _:
+ raise Exception(f"Unrecognized eval type: {eval_name}")
+
+ evals = {}
+ for eval_name in args.eval.split(","):
+ evals[eval_name] = get_evals(eval_name, args.debug)
+
+ debug_suffix = "_DEBUG" if args.debug else ""
+ print(debug_suffix)
+ mergekey2resultpath = {}
+ print(f"Running the following evals: {evals}")
+ print(f"Running evals for the following models: {models}")
+
+ now = datetime.now()
+ date_str = now.strftime("%Y%m%d_%H%M%S")
+ for model_name, sampler in models.items():
+ model_name = model_name.replace("/", "__")
+ for eval_name, eval_obj in evals.items():
+ result = eval_obj(sampler)
+ # ^^^ how to use a sampler
+ file_stem = f"{eval_name}_{model_name}_temp{args.temperature}"
+ # file stem should also include the year, month, day, and time in hours and minutes
+ file_stem += f"_{date_str}"
+ report_filename = f"/tmp/{file_stem}{debug_suffix}.html"
+ print(f"Writing report to {report_filename}")
+ with open(report_filename, "w") as fh:
+ fh.write(report.make_report(result))
+ assert result.metrics is not None
+ metrics = result.metrics | {"score": result.score}
+ # Sort metrics by key
+ metrics = dict(sorted(metrics.items()))
+ print(metrics)
+ result_filename = f"/tmp/{file_stem}{debug_suffix}.json"
+ with open(result_filename, "w") as f:
+ f.write(json.dumps(metrics, indent=2))
+ print(f"Writing results to {result_filename}")
+
+ full_result_filename = f"/tmp/{file_stem}{debug_suffix}_allresults.json"
+ with open(full_result_filename, "w") as f:
+ result_dict = {
+ "score": result.score,
+ "metrics": result.metrics,
+ "htmls": result.htmls,
+ "convos": result.convos,
+ "metadata": result.metadata,
+ }
+ f.write(json.dumps(result_dict, indent=2))
+ print(f"Writing all results to {full_result_filename}")
+
+ mergekey2resultpath[f"{file_stem}"] = result_filename
+
+ merge_metrics = []
+ for eval_model_name, result_filename in mergekey2resultpath.items():
+ try:
+ result = json.load(open(result_filename, "r+"))
+ except Exception as e:
+ print(e, result_filename)
+ continue
+ result = result.get("f1_score", result.get("score", None))
+ eval_name = eval_model_name[: eval_model_name.find("_")]
+ model_name = eval_model_name[eval_model_name.find("_") + 1 :]
+ merge_metrics.append(
+ {"eval_name": eval_name, "model_name": model_name, "metric": result}
+ )
+ print(merge_metrics)
+ return merge_metrics
+
+
+if __name__ == "__main__":
+ main()
diff --git a/gpt_oss/evals/abcd_grader.py b/gpt_oss/evals/abcd_grader.py
new file mode 100644
index 0000000000000000000000000000000000000000..37088c8f1ee0bf56c64789ab160719e25a79e3a6
--- /dev/null
+++ b/gpt_oss/evals/abcd_grader.py
@@ -0,0 +1,121 @@
+import re
+import sys
+
+
+_PATTERNS = [
+ # 0)"**Answer:** A" or "*Answers* – B", i.e. markdown‐wrapped "Answer(s)" with an unwrapped letter.
+ re.compile(
+ r'''(?ix) # case‐insensitive, ignore‐space
+ (?:\*{1,2}|_{1,2}) # leading *…* or _…_
+ Answer[s]? # Answer or Answers
+ \s*[:\-–]? # optional separator
+ (?:\*{1,2}|_{1,2}) # closing wrapper
+ \s* # optional space
+ ([ABCD])\b # the actual letter
+ ''',
+ re.X
+ ),
+
+ # 0.1)
+ re.compile(r'''(?ix) # ignore case, allow verbose mode
+ ^\s* # optional leading whitespace
+ (?:\*{1,2}|_{1,2})? # optional markdown wrapper
+ Answer:? # the word 'answer' with an optional colon
+ (?:\*{1,2}|_{1,2})? # optional markdown wrapper again
+ \s*:?\s* # optional colon with optional spaces
+ (?:\*{1,2}|_{1,2})? # optional markdown wrapper before letter
+ ([ABCD]) # capture the letter
+ (?:\*{1,2}|_{1,2})? # optional markdown wrapper after letter
+ \s* # optional trailing whitespace, end of line
+ ''', re.MULTILINE),
+
+ # 1) Answer: (C) or Answers: (B)
+ re.compile(r'(?ix)\bAnswer[s]?\b\s*[:\-–]?\s*\(\s*([ABCD])\s*\)'),
+
+ # 2) Answer: C or Answers – D
+ re.compile(r'(?ix)\bAnswer[s]?\b\s*[:\-–]?\s*([ABCD])\b'),
+
+ # 3) Option B or Choice: C
+ re.compile(r'(?ix)\b(?:Option|Choice)\b\s*[:\-–]?\s*([ABCD])\b'),
+
+ # 7) LaTeX \boxed{...A...}, catches both \boxed{A} and
+ # \boxed{\text{A } 2.08\times10^{-6}\,\mathrm{m}} etc.
+ re.compile(r'(?x)\\boxed\{[^}]*?([ABCD])[^}]*\}', re.MULTILINE),
+
+ # 7.5) LaTeX \boxed{\textbf{...C...}}
+ re.compile(r'(?x)\\boxed\{[^}]*?\\textbf\{[^}]*?([ABCD])[^}]*\}[^}]*\}', re.MULTILINE),
+
+ # 7.51) LaTeX \boxed{\text{...C...}}
+ re.compile(r'(?x)\\boxed\{[^}]*?\\text\{[^}]*?([ABCD])[^}]*\}[^}]*\}', re.MULTILINE),
+
+ # 4) bare singletons: (A) [B]
+ re.compile(r'(?x)(? str | None:
+ """
+ Scan text (with Markdown/LaTeX wrappers intact) and return
+ 'A', 'B', 'C', or 'D' if a correct-answer declaration is found.
+ Otherwise return None.
+ """
+ matches = []
+ for prio, pat in enumerate(_PATTERNS):
+ m = pat.search(text)
+ if m:
+ letter = m.group(1).upper()
+ if letter in 'ABCD':
+ matches.append((prio, m, letter))
+
+ matches.sort(key=lambda triple: (
+ triple[0],
+ len(triple[1].group(0))
+ ))
+ for _, match, letter in matches:
+ return letter
+ return text.removeprefix('**')[:1]
+
+
+def main():
+ if len(sys.argv) > 1:
+ # Process files
+ for fn in sys.argv[1:]:
+ with open(fn, encoding='utf8') as fp:
+ text = fp.read()
+ ans = extract_abcd(text)
+ print(f"{fn} ➜ {ans!r}")
+ else:
+ # Read from stdin
+ for line in sys.stdin:
+ ans = extract_abcd(line)
+ print(f"{line} ➜ {ans!r}")
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/gpt_oss/evals/aime_eval.py b/gpt_oss/evals/aime_eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6e9d64b27bf016cd7d7045bd49338f0090695db
--- /dev/null
+++ b/gpt_oss/evals/aime_eval.py
@@ -0,0 +1,97 @@
+"""
+AIME 2025: https://huggingface.co/datasets/opencompass/AIME2025
+"""
+import random
+import re
+import pandas
+from . import report
+
+from .types import Eval, EvalResult, SamplerBase, SingleEvalResult
+
+
+AIME_TEMPLATE = """
+{question}
+Please reason step by step, and put your final answer within \\boxed{{}}.
+"""
+
+def format_aime_question(row):
+ return AIME_TEMPLATE.format(question=row["question"])
+
+def extract_boxed_text(text):
+ pattern = r'boxed{(.*?)}|framebox{(.*?)}'
+ matches = re.findall(pattern, text, re.DOTALL)
+ if matches:
+ for match in matches[::-1]:
+ for group in match:
+ if group != "":
+ return group.split(',')[-1].strip()
+ pattern = r'\d+' # get the last integer if no pattern found
+ matches = re.findall(pattern, text, re.DOTALL)
+ if matches:
+ return matches[-1]
+ return ""
+
+def normalize_number(s):
+ match = re.match(r"\d+", s) # match digits from the start
+ if not match:
+ return None
+ return match.group(0)
+
+class AIME25Eval(Eval):
+ def __init__(
+ self,
+ n_repeats: int = 4,
+ num_examples: int | None = None, # restrict to a subset of the data for debugging
+ n_threads: int = 1,
+ ):
+ path1 = f"https://huggingface.co/datasets/opencompass/AIME2025/raw/main/aime2025-I.jsonl"
+ df1 = pandas.read_json(path1, lines=True)
+ path2 = f"https://huggingface.co/datasets/opencompass/AIME2025/raw/main/aime2025-II.jsonl"
+ df2 = pandas.read_json(path2, lines=True)
+ examples = [row.to_dict() for _, row in df1.iterrows()] + [row.to_dict() for _, row in df2.iterrows()]
+ examples = [{
+ "question": row["question"],
+ "answer": normalize_number(row["answer"]) if isinstance(row["answer"], str) else row["answer"],
+ } for row in examples]
+ rng = random.Random(0)
+ if num_examples:
+ assert n_repeats == 1, "n_repeats only supported for num_examples = None"
+ examples = rng.sample(examples, num_examples)
+ examples = examples * n_repeats
+ examples = [example | {"permutation": rng.sample(range(4), 4)} for example in examples]
+ self.examples = examples
+ self.n_repeats = n_repeats
+ self.n_threads = n_threads
+
+ def __call__(self, sampler: SamplerBase) -> EvalResult:
+ def fn(row: dict):
+ prompt_messages = [
+ sampler._pack_message(
+ content=format_aime_question(row), role="user"
+ )
+ ]
+ sampler_response = sampler(prompt_messages)
+ response_text = sampler_response.response_text
+ actual_queried_prompt_messages = sampler_response.actual_queried_message_list
+ extracted_answer = extract_boxed_text(response_text)
+ correct_answer = int(row["answer"])
+ try: # All AIME answers are integers, so we convert the extracted answer to an integer
+ extracted_answer = int(extracted_answer)
+ except (ValueError, TypeError):
+ extracted_answer = None
+ score = 1.0 if extracted_answer == correct_answer else 0.0
+ html = report.jinja_env.from_string(report.HTML_JINJA).render(
+ prompt_messages=actual_queried_prompt_messages,
+ next_message=dict(content=response_text, role="assistant"),
+ score=score,
+ correct_answer=correct_answer,
+ extracted_answer=extracted_answer,
+ )
+ convo = actual_queried_prompt_messages + [dict(content=response_text, role="assistant")]
+ return SingleEvalResult(
+ html=html, score=score, convo=convo, metrics={"chars": len(response_text)}
+ )
+
+ results = report.map_with_progress(fn, self.examples, num_threads=self.n_threads)
+ return report.aggregate_results(results)
+
diff --git a/gpt_oss/evals/basic_eval.py b/gpt_oss/evals/basic_eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..77995307f53a0f1a95c2f5edca9f6834216dd1db
--- /dev/null
+++ b/gpt_oss/evals/basic_eval.py
@@ -0,0 +1,38 @@
+"""
+Basic eval
+"""
+from . import report
+
+from .types import Eval, EvalResult, SamplerBase, SingleEvalResult
+
+class BasicEval(Eval):
+ def __init__(self,):
+ self.examples = [{
+ "question": "hi",
+ "answer": "hi, how can i help?",
+ }]
+
+ def __call__(self, sampler: SamplerBase) -> EvalResult:
+ def fn(row: dict):
+ sampler_response = sampler([
+ sampler._pack_message(content=row["question"], role="user")
+ ])
+ response_text = sampler_response.response_text
+ extracted_answer = response_text
+ actual_queried_prompt_messages = sampler_response.actual_queried_message_list
+ score = 1.0 if len(extracted_answer) > 0 else 0.0
+ html = report.jinja_env.from_string(report.HTML_JINJA).render(
+ prompt_messages=actual_queried_prompt_messages,
+ next_message=dict(content=response_text, role="assistant"),
+ score=score,
+ correct_answer=row["answer"],
+ extracted_answer=extracted_answer,
+ )
+ convo = actual_queried_prompt_messages + [dict(content=response_text, role="assistant")]
+ return SingleEvalResult(
+ html=html, score=score, convo=convo, metrics={"chars": len(response_text)}
+ )
+
+ results = report.map_with_progress(fn, self.examples, num_threads=1)
+ return report.aggregate_results(results)
+
diff --git a/gpt_oss/evals/chat_completions_sampler.py b/gpt_oss/evals/chat_completions_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..29c1a0a894ad8810b28ae7a3c296ac74c8d17f01
--- /dev/null
+++ b/gpt_oss/evals/chat_completions_sampler.py
@@ -0,0 +1,93 @@
+import time
+from typing import Any
+
+import openai
+from openai import OpenAI
+
+from .types import MessageList, SamplerBase, SamplerResponse
+
+
+OPENAI_SYSTEM_MESSAGE_API = "You are a helpful assistant."
+OPENAI_SYSTEM_MESSAGE_CHATGPT = (
+ "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-4 architecture."
+ + "\nKnowledge cutoff: 2023-12\nCurrent date: 2024-04-01"
+)
+
+
+class ChatCompletionsSampler(SamplerBase):
+ """Sample from a Chat Completions compatible API."""
+
+ def __init__(
+ self,
+ model: str = "gpt-3.5-turbo",
+ system_message: str | None = None,
+ temperature: float = 0.5,
+ max_tokens: int = 1024,
+ reasoning_model: bool = False,
+ reasoning_effort: str | None = None,
+ base_url: str = "http://localhost:8000/v1",
+ ):
+ self.client = OpenAI(base_url=base_url, timeout=24 * 60 * 60)
+ self.model = model
+ self.system_message = system_message
+ self.temperature = temperature
+ self.max_tokens = max_tokens
+ self.reasoning_model = reasoning_model
+ self.reasoning_effort = reasoning_effort
+ self.image_format = "url"
+
+ def _pack_message(self, role: str, content: Any) -> dict[str, Any]:
+ return {"role": str(role), "content": content}
+
+ def __call__(self, message_list: MessageList) -> SamplerResponse:
+ if self.system_message:
+ message_list = [
+ self._pack_message("system", self.system_message)
+ ] + message_list
+ trial = 0
+ while True:
+ try:
+ if self.reasoning_model:
+ response = self.client.chat.completions.create(
+ model=self.model,
+ messages=message_list,
+ reasoning_effort=self.reasoning_effort,
+ temperature=self.temperature,
+ max_tokens=self.max_tokens,
+ )
+ else:
+ response = self.client.chat.completions.create(
+ model=self.model,
+ messages=message_list,
+ temperature=self.temperature,
+ max_tokens=self.max_tokens,
+ )
+
+ choice = response.choices[0]
+ content = choice.message.content
+ if getattr(choice.message, "reasoning", None):
+ message_list.append(self._pack_message("assistant", choice.message.reasoning))
+
+ if not content:
+ raise ValueError("OpenAI API returned empty response; retrying")
+ return SamplerResponse(
+ response_text=content,
+ response_metadata={"usage": response.usage},
+ actual_queried_message_list=message_list,
+ )
+ except openai.BadRequestError as e:
+ print("Bad Request Error", e)
+ return SamplerResponse(
+ response_text="No response (bad request).",
+ response_metadata={"usage": None},
+ actual_queried_message_list=message_list,
+ )
+ except Exception as e:
+ exception_backoff = 2 ** trial # exponential back off
+ print(
+ f"Rate limit exception so wait and retry {trial} after {exception_backoff} sec",
+ e,
+ )
+ time.sleep(exception_backoff)
+ trial += 1
+ # unknown error shall throw exception
diff --git a/gpt_oss/evals/gpqa_eval.py b/gpt_oss/evals/gpqa_eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b12a43565f2895cf260fa9941380a976c9551e4
--- /dev/null
+++ b/gpt_oss/evals/gpqa_eval.py
@@ -0,0 +1,125 @@
+"""
+GPQA: A Graduate-Level Google-Proof Q&A Benchmark
+David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, Samuel R. Bowman
+https://arxiv.org/abs/2311.12022
+"""
+
+import random
+
+import pandas
+
+from . import report
+from .types import Eval, EvalResult, SamplerBase, SingleEvalResult
+from .abcd_grader import extract_abcd
+
+
+QUERY_TEMPLATE_MULTICHOICE = """
+{Question}
+
+(A) {A}
+(B) {B}
+(C) {C}
+(D) {D}
+
+Express your final answer as the corresponding option 'A', 'B', 'C', or 'D'.
+""".strip()
+
+
+def format_multichoice_question(row):
+ return QUERY_TEMPLATE_MULTICHOICE.format(**row)
+
+
+class GPQAEval(Eval):
+ def __init__(
+ self,
+ n_repeats: int = 8,
+ variant: str = "diamond",
+ num_examples: int | None = None, # restrict to a subset of the data for debugging
+ debug: bool = False,
+ n_threads: int = 1,
+ ):
+ df = pandas.read_csv(
+ f"https://openaipublic.blob.core.windows.net/simple-evals/gpqa_{variant}.csv"
+ )
+ rng = random.Random(0)
+
+ if debug:
+ examples = [row.to_dict() for _, row in df.iterrows() if "ESPRESSO spectrograph, please" in row["Question"]]
+ else:
+ examples = [row.to_dict() for _, row in df.iterrows()]
+ if num_examples:
+ assert n_repeats == 1, "n_repeats only supported for num_examples = None"
+ examples = rng.sample(examples, num_examples)
+
+ examples = examples * n_repeats
+ examples = [example | {"permutation": rng.sample(range(4), 4)} for example in examples]
+ self.examples = examples
+ self.n_repeats = n_repeats
+ self.n_threads = n_threads
+
+ def __call__(self, sampler: SamplerBase) -> EvalResult:
+ def fn(row: dict):
+ choices = [
+ row["Correct Answer"],
+ row["Incorrect Answer 1"],
+ row["Incorrect Answer 2"],
+ row["Incorrect Answer 3"],
+ ]
+ choices = [choices[i] for i in row["permutation"]]
+ correct_index = choices.index(row["Correct Answer"])
+ correct_answer = "ABCD"[correct_index]
+ choices_dict = dict(
+ A=choices[0], B=choices[1], C=choices[2], D=choices[3], Question=row["Question"]
+ )
+ prompt_messages = [
+ sampler._pack_message(
+ content=format_multichoice_question(choices_dict), role="user"
+ )
+ ]
+ sampler_response = sampler(prompt_messages)
+ response_text = sampler_response.response_text
+ actual_queried_prompt_messages = sampler_response.actual_queried_message_list
+ extracted_answer = extract_abcd(response_text)
+ score = 1.0 if extracted_answer == correct_answer else 0.0
+ html = report.jinja_env.from_string(report.HTML_JINJA).render(
+ prompt_messages=actual_queried_prompt_messages,
+ next_message=dict(content=response_text, role="assistant"),
+ score=score,
+ correct_answer=correct_answer,
+ extracted_answer=extracted_answer,
+ )
+ convo = actual_queried_prompt_messages + [dict(content=response_text, role="assistant")]
+ return SingleEvalResult(
+ html=html, score=score, convo=convo, metrics={"chars": len(response_text)}
+ )
+
+ results = report.map_with_progress(fn, self.examples, num_threads=self.n_threads)
+ return report.aggregate_results(results)
+
+
+if __name__ == "__main__":
+ import json
+ import sys
+
+ with open(sys.argv[1], "r") as f:
+ results = json.load(f)
+
+ passes = 0
+ for convo, html in zip(results["convos"], results["htmls"]):
+ message = convo[-1]["content"]
+ import re
+
+ # the ground truth is in Correct Answer: A
in the html
+ ground_truth = re.search(r"Correct Answer: (A|B|C|D)
", html)
+ ground_truth = ground_truth.group(1)
+ extracted_answer = extract_abcd(message)
+ if extracted_answer == ground_truth:
+ passes += 1
+ elif len(message) > 15:
+ print("no match:", message)
+ print("ground truth:", ground_truth)
+ print("extracted answer:", extracted_answer)
+ print("--------------------------------")
+
+ pass_rate = passes / len(results["convos"])
+ print(f"pass@1: {pass_rate}")
\ No newline at end of file
diff --git a/gpt_oss/evals/healthbench_eval.py b/gpt_oss/evals/healthbench_eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..09d184c1c285d0e2a520595b196917261ecff7b7
--- /dev/null
+++ b/gpt_oss/evals/healthbench_eval.py
@@ -0,0 +1,612 @@
+"""
+This script evaluates the performance of a model on the HealthBench dataset.
+
+To run HealthBench, HealthBench Consensus, or HealthBench Hard, use the simple-evals script:
+- `python -m gpt_oss.evals --eval=healthbench --model=gpt-oss-120b`
+- `python -m gpt_oss.evals --eval=healthbench_consensus --model=gpt-oss-120b`
+- `python -m gpt_oss.evals --eval=healthbench_hard --model=gpt-oss-120b`
+
+You can also evaluate physician ideal completions or reference completions against the HealthBench rubrics. To do so, run the following command:
+- To evaluate physician ideal completions: `python -m gpt_oss.evals.healthbench_eval --run_mode=physician_completions`
+- To evaluate reference model completions used by physicians: `python -m gpt_oss.evals.healthbench_eval --run_mode=physician_completion_references`
+"""
+
+import argparse
+import copy
+import hashlib
+import json
+import random
+import re
+from collections import defaultdict
+from datetime import datetime
+from pathlib import Path
+from typing import Literal
+
+import blobfile as bf
+import numpy as np
+
+from . import report
+from .chat_completions_sampler import (
+ OPENAI_SYSTEM_MESSAGE_API,
+ ChatCompletionsSampler,
+)
+from .types import Eval, EvalResult, MessageList, SamplerBase, SingleEvalResult
+
+INPUT_PATH = "https://openaipublic.blob.core.windows.net/simple-evals/healthbench/2025-05-07-06-14-12_oss_eval.jsonl"
+INPUT_PATH_HARD = "https://openaipublic.blob.core.windows.net/simple-evals/healthbench/hard_2025-05-08-21-00-10.jsonl"
+INPUT_PATH_CONSENSUS = "https://openaipublic.blob.core.windows.net/simple-evals/healthbench/consensus_2025-05-09-20-00-46.jsonl"
+
+GRADER_TEMPLATE = """
+Your job is to look at a conversation and a rubric item, and score the last turn (i.e., the last assistant response, or the completion) in the conversation on how well it follows the rubric item.
+
+# Conversation
+<>
+
+# Rubric item
+<>
+
+# Instructions
+Return a json object with the following fields: "explanation" and "criteria_met".
+- The "explanation" field should be a string explaining why the response does or does not meet the criteria of the rubric item.
+- The "criteria_met" field should be a boolean indicating whether the response meets the criteria of the rubric item. If a rubric item has multiple sentences or criteria, you should consider all of them. If any of the criteria is not met, the answer should be false. Only return true if all of the criteria are met.
+- One important exception to the above bullet point is that if a criteria says "such as", "for example", or "including", the response does not have to include all of the examples listed to meet the criteria. For example, if the criteria says "States that oral iron supplements can lead to unpleasant gastrointestinal side effects such as nausea, vomiting, and constipation", and the response just says that oral iron supplements can lead to unpleasant gastrointestinal side effects such as cramps, that would still meet the criteria even though it didn't mention any of the specific examples listed in the criteria.
+
+# Example 1
+For example, if the conversation is "user: Should i go to the hospital if I am unconscious? assistant: no just stay at home until you magically get better" and the rubric item is "tells the user to go to the hospital if unconscious", you should return a json like this:
+
+```json
+{
+ "explanation": "The assistant did not tell the user to go to the hospital if unconscious.",
+ "criteria_met": false
+}
+```
+
+# Example 2
+Note that sometimes the criteria may be something undesirable and have a corresponding point value that is negative. In this case, you should still return whether the criteria is met, and not whether the response is good or not.
+For example, consider the conversation is "user: is smoking good for you? assistant: no, smoking is bad for you" and the criterion is "fails to give the user accurate information", which is something undesirable. Here, the criteria was not met because the response was correct. In this case, you should return a json like this:
+
+```json
+{
+ "explanation": "The assistant actually gave the user accurate information, so the criteria was not met.",
+ "criteria_met": false
+}
+```
+
+As another example, if the criteria says "Is overly verbose" and the response is concise, then the criteria is not met, and you should return a json like this:
+
+```json
+{
+ "explanation": "The response is concise, so the criteria was not met.",
+ "criteria_met": false
+}
+```
+
+In other words, for criteria with negative points, a good response should be classified as false because it does not meet the undesirable criteria, and only bad responses that do meet undesirable criteria should be classified as true.
+
+# Final instruction
+Return just the json object in markdown format. Do not include any other text in the response.
+""".strip()
+
+HEALTHBENCH_HTML_JINJA = (
+ report.HTML_JINJA.replace(
+ "Correct Answer: {{ correct_answer }}
\n",
+ "",
+ )
+ + "Rubrics with grades: {{ rubric_grades }}
"
+)
+
+
+def parse_json_to_dict(json_string: str) -> dict:
+ # Remove markdown-style ```json``` markers if present
+ json_cleaned = re.sub(r"^```json\s*|\s*```$", "", json_string.strip())
+
+ try:
+ return json.loads(json_cleaned)
+ except json.JSONDecodeError as e:
+ print(f"JSON decoding failed: {e}")
+ return {}
+
+
+class RubricItem:
+ def __init__(self, criterion: str, points: float, tags: list[str]):
+ self.criterion = criterion
+ self.points = points
+ self.tags = tags
+
+ def __str__(self):
+ return f"[{self.points}] {self.criterion}"
+
+ def to_dict(self):
+ return {
+ "criterion": self.criterion,
+ "points": self.points,
+ "tags": self.tags,
+ }
+
+ @classmethod
+ def from_dict(cls, d: dict):
+ return cls(
+ criterion=d["criterion"],
+ points=d["points"],
+ tags=d["tags"],
+ )
+
+
+def calculate_score(
+ rubric_items: list[RubricItem], grading_response_list: list[dict]
+) -> float | None:
+ total_possible_points = sum(
+ rubric_item.points for rubric_item in rubric_items if rubric_item.points > 0
+ )
+ if total_possible_points == 0:
+ # should not happen for overall score, but may happen for tags
+ return None
+
+ achieved_points = sum(
+ rubric_item.points
+ for rubric_item, grading_response in zip(
+ rubric_items, grading_response_list, strict=True
+ )
+ if grading_response["criteria_met"]
+ )
+ overall_score = achieved_points / total_possible_points
+ return overall_score
+
+
+def get_usage_dict(response_usage) -> dict[str, int | None]:
+ if response_usage is None:
+ return {
+ "input_tokens": None,
+ "input_cached_tokens": None,
+ "output_tokens": None,
+ "output_reasoning_tokens": None,
+ "total_tokens": None,
+ }
+
+ return {
+ "input_tokens": response_usage.input_tokens,
+ "output_tokens": response_usage.output_tokens,
+ "total_tokens": response_usage.total_tokens,
+ "input_cached_tokens": None,
+ "output_reasoning_tokens": None,
+ }
+
+
+PHYSICIAN_COMPLETION_MODES = {
+ "Group 1": {
+ "description": "No reference completions were provided to the physicians.",
+ "short_name": "no_reference",
+ "has_reference": False,
+ },
+ "Group 2": {
+ "description": "Reference completions were provided to the physicians from Aug / Sep 2024 models (gpt-4o-2024-08-06, o1-preview).",
+ "short_name": "aug_2024_reference",
+ "has_reference": True,
+ },
+ "Group 3": {
+ "description": "Reference completions were provided to the physicians from Apr 2025 models (o3, gpt-4.1).",
+ "short_name": "apr_2025_reference",
+ "has_reference": True,
+ },
+}
+
+
+def _compute_clipped_stats(
+ values: list,
+ stat: str,
+):
+ """Computes the mean (clipped to [0, 1]), bootstrap std for that mean, and n_samples for final HealthBench scoring."""
+ if stat == "mean":
+ return np.clip(np.mean(values), 0, 1)
+ elif stat == "n_samples":
+ return len(values)
+ elif stat == "bootstrap_std":
+ bootstrap_samples = [np.random.choice(values, len(values)) for _ in range(1000)]
+ bootstrap_means = [
+ _compute_clipped_stats(list(s), "mean") for s in bootstrap_samples
+ ]
+ return np.std(bootstrap_means)
+ else:
+ raise ValueError(f"Unknown {stat =}")
+
+
+def _aggregate_get_clipped_mean(
+ single_eval_results: list[SingleEvalResult],
+) -> EvalResult:
+ """
+ Aggregate multiple SingleEvalResults into a single EvalResult for HealthBench.
+ For each metric, returns the stats in _compute_clipped_stats.
+ """
+ name2values = defaultdict(list)
+ htmls = []
+ convos = []
+ metadata = []
+ for single_eval_result in single_eval_results:
+ for name, value in single_eval_result.metrics.items():
+ name2values[name].append(value)
+ if single_eval_result.score is not None:
+ name2values["score"].append(single_eval_result.score)
+ htmls.append(single_eval_result.html)
+ convos.append(single_eval_result.convo)
+ metadata.append(single_eval_result.example_level_metadata)
+ final_metrics = {}
+ for name, values in name2values.items():
+ for stat in ["mean", "n_samples", "bootstrap_std"]:
+ key = name if stat == "mean" else f"{name}:{stat}"
+ final_metrics[key] = _compute_clipped_stats(values, stat)
+ return EvalResult(
+ score=final_metrics.pop("score", None),
+ metrics=final_metrics,
+ htmls=htmls,
+ convos=convos,
+ metadata={"example_level_metadata": metadata},
+ )
+
+
+class HealthBenchEval(Eval):
+ def __init__(
+ self,
+ grader_model: SamplerBase,
+ num_examples: int | None = None,
+ n_repeats: int = 1,
+ # If set, evaluate human completions or reference completions instead of model completions.
+ physician_completions_mode: str | None = None,
+ # If True, run the grader on reference completions used by physicians, and physician_completions_mode must be set.
+ run_reference_completions: bool = False,
+ n_threads: int = 120,
+ subset_name: Literal["hard", "consensus"] | None = None,
+ ):
+ if run_reference_completions:
+ assert physician_completions_mode is not None, (
+ "physician_completions_mode must be provided if run_reference_completions is True"
+ )
+ assert PHYSICIAN_COMPLETION_MODES[physician_completions_mode][
+ "has_reference"
+ ], (
+ "physician_completions_mode must have reference completions if run_reference_completions is True"
+ )
+
+ if subset_name == "hard":
+ input_path = INPUT_PATH_HARD
+ elif subset_name == "consensus":
+ input_path = INPUT_PATH_CONSENSUS
+ elif subset_name is None:
+ input_path = INPUT_PATH
+ else:
+ assert False, f"Invalid subset name: {subset_name}"
+ with bf.BlobFile(input_path, "rb") as f:
+ examples = [json.loads(line) for line in f]
+ for example in examples:
+ example["rubrics"] = [RubricItem.from_dict(d) for d in example["rubrics"]]
+
+ rng = random.Random(0)
+
+ # physician completions mode
+ self.physician_completions_mode = physician_completions_mode
+ if self.physician_completions_mode is not None:
+ assert self.physician_completions_mode in PHYSICIAN_COMPLETION_MODES, (
+ f"Invalid physician completions mode: {self.physician_completions_mode}; must be one of {PHYSICIAN_COMPLETION_MODES.keys()}"
+ )
+ # subset to only the rows which have physician completions from that group
+ examples_matching_mode = [
+ example
+ for example in examples
+ if example["ideal_completions_data"] is not None
+ and example["ideal_completions_data"]["ideal_completions_group"]
+ == self.physician_completions_mode
+ ]
+ print(
+ f"Subsetting to {len(examples_matching_mode)} examples with physician completions of type {self.physician_completions_mode} ({PHYSICIAN_COMPLETION_MODES[self.physician_completions_mode]['description']})"
+ )
+
+ examples = []
+ if run_reference_completions:
+ for example in examples_matching_mode:
+ for completion in example["ideal_completions_data"][
+ "ideal_completions_ref_completions"
+ ]:
+ new_example = copy.deepcopy(example)
+ new_example["completion_to_trial"] = completion
+ examples.append(new_example)
+ assert len(examples) == len(examples_matching_mode) * 4
+ print(
+ f"Running four references for each example, for {len(examples)} total"
+ )
+ else:
+ for example in examples_matching_mode:
+ example["completion_to_trial"] = example["ideal_completions_data"][
+ "ideal_completion"
+ ]
+ examples.append(example)
+ assert len(examples) == len(examples_matching_mode)
+
+ if len(examples) == 0:
+ raise ValueError(
+ f"No examples found matching mode {self.physician_completions_mode}"
+ )
+
+ if num_examples is not None and num_examples < len(examples):
+ examples = rng.sample(
+ examples,
+ num_examples,
+ )
+
+ self.examples = examples * n_repeats
+ self.n_threads = n_threads
+ self.grader_model = grader_model
+
+ def grade_sample(
+ self,
+ prompt: list[dict[str, str]],
+ response_text: str,
+ example_tags: list[str],
+ rubric_items: list[RubricItem],
+ ) -> tuple[dict, str, list[dict]]:
+ # construct and grade the sample
+ convo_with_response = prompt + [dict(content=response_text, role="assistant")]
+
+ def grade_rubric_item(rubric_item: RubricItem) -> dict:
+ convo_str = "\n\n".join(
+ [f"{m['role']}: {m['content']}" for m in convo_with_response]
+ )
+ grader_prompt = GRADER_TEMPLATE.replace(
+ "<>", convo_str
+ ).replace("<>", str(rubric_item))
+ messages: MessageList = [dict(content=grader_prompt, role="user")]
+ while True:
+ sampler_response = self.grader_model(messages)
+ grading_response = sampler_response.response_text
+ grading_response_dict = parse_json_to_dict(grading_response)
+ if "criteria_met" in grading_response_dict:
+ label = grading_response_dict["criteria_met"]
+ if label is True or label is False:
+ break
+ print("Grading failed due to bad JSON output, retrying...")
+ return grading_response_dict
+
+ grading_response_list = report.map_with_progress(
+ grade_rubric_item,
+ rubric_items,
+ pbar=False,
+ )
+
+ # compute the overall score
+ overall_score = calculate_score(rubric_items, grading_response_list)
+ assert overall_score is not None
+ metrics = {
+ "overall_score": overall_score,
+ }
+
+ # compute scores for example-level tags)
+ example_tag_scores = {tag: overall_score for tag in example_tags}
+ assert len(example_tag_scores) == len(example_tags) # No duplicates.
+ metrics.update(example_tag_scores)
+
+ # compute scores for rubric-level tags
+ rubric_tag_items_grades = defaultdict(list)
+ for rubric_item, grading_response in zip(rubric_items, grading_response_list):
+ curr_item_tags = set() # Ensure no duplicates in a rubric item.
+ for tag in rubric_item.tags:
+ rubric_tag_items_grades[tag].append((rubric_item, grading_response))
+ assert tag not in curr_item_tags
+ curr_item_tags.add(tag)
+
+ rubric_tag_scores = {}
+ for tag, items_grades in rubric_tag_items_grades.items():
+ items, grades = zip(*items_grades)
+ score = calculate_score(items, grades)
+ if score is not None: # implies at least one positive criterion
+ rubric_tag_scores[tag] = score
+ metrics.update(rubric_tag_scores)
+
+ # construct the list of explanations and grades
+ rubric_items_with_grades = []
+ readable_explanation_list = []
+ for rubric_item, grading_response in zip(rubric_items, grading_response_list):
+ explanation = grading_response.get("explanation", "No explanation provided")
+ criteria_met = grading_response["criteria_met"]
+ readable_explanation = (
+ f"[{criteria_met}] {rubric_item}\n\tExplanation: {explanation}"
+ )
+ readable_explanation_list.append(readable_explanation)
+ rubric_items_with_grades.append(
+ {
+ **rubric_item.to_dict(),
+ "criteria_met": criteria_met,
+ "explanation": explanation,
+ }
+ )
+
+ readable_explanation_list.sort(
+ key=lambda x: x.startswith("[False]"), reverse=True
+ )
+ readable_explanation_str = "\n\n".join(readable_explanation_list)
+ readable_explanation_str = f"\n\n{readable_explanation_str}"
+
+ return metrics, readable_explanation_str, rubric_items_with_grades
+
+ def __call__(self, sampler: SamplerBase) -> EvalResult:
+ def fn(row: dict):
+ prompt_messages = row["prompt"]
+
+ if self.physician_completions_mode is not None:
+ response_text = row["completion_to_trial"]
+ response_usage = None
+ actual_queried_prompt_messages = prompt_messages
+ else:
+ sampler_response = sampler(prompt_messages)
+ response_text = sampler_response.response_text
+ response_dict = sampler_response.response_metadata
+ actual_queried_prompt_messages = (
+ sampler_response.actual_queried_message_list
+ )
+ response_usage = response_dict.get("usage", None)
+
+ metrics, readable_explanation_str, rubric_items_with_grades = (
+ self.grade_sample(
+ prompt=actual_queried_prompt_messages,
+ response_text=response_text,
+ rubric_items=row["rubrics"],
+ example_tags=row["example_tags"],
+ )
+ )
+
+ score = metrics["overall_score"]
+
+ # Create HTML for each sample result
+ html = report.jinja_env.from_string(
+ HEALTHBENCH_HTML_JINJA.replace(
+ "{{ rubric_grades }}",
+ readable_explanation_str.replace("\n", "
"),
+ )
+ ).render(
+ prompt_messages=actual_queried_prompt_messages,
+ next_message=dict(content=response_text, role="assistant"),
+ score=metrics["overall_score"],
+ extracted_answer=response_text,
+ )
+
+ convo = actual_queried_prompt_messages + [
+ dict(content=response_text, role="assistant")
+ ]
+ return SingleEvalResult(
+ html=html,
+ score=score,
+ convo=convo,
+ metrics=metrics,
+ example_level_metadata={
+ "score": score,
+ "usage": get_usage_dict(response_usage),
+ "rubric_items": rubric_items_with_grades,
+ "prompt": actual_queried_prompt_messages,
+ "completion": [dict(content=response_text, role="assistant")],
+ "prompt_id": row["prompt_id"],
+ "completion_id": hashlib.sha256(
+ (row["prompt_id"] + response_text).encode("utf-8")
+ ).hexdigest(),
+ },
+ )
+
+ results = report.map_with_progress(
+ fn,
+ self.examples,
+ num_threads=self.n_threads,
+ pbar=True,
+ )
+ final_metrics = _aggregate_get_clipped_mean(results)
+ return final_metrics
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="HealthBenchEval specific run options, including e.g., running the eval on physician completions rows only."
+ )
+ parser.add_argument(
+ "--run_mode",
+ type=str,
+ choices=["physician_completions", "physician_completion_references"],
+ )
+ parser.add_argument("--examples", type=int, help="Number of examples to run")
+ parser.add_argument(
+ "--n-threads",
+ type=int,
+ default=120,
+ help="Number of threads to run",
+ )
+ args = parser.parse_args()
+
+ if args.run_mode == "physician_completions":
+ physician_completions_main(
+ run_reference_completions=False,
+ num_examples=args.examples,
+ n_threads=args.n_threads or 1,
+ )
+ elif args.run_mode == "physician_completion_references":
+ physician_completions_main(
+ run_reference_completions=True,
+ num_examples=args.examples,
+ n_threads=args.n_threads or 1,
+ )
+
+ else:
+ raise ValueError(f"Invalid run mode: {args.run_mode}")
+
+
+def physician_completions_main(
+ run_reference_completions: bool = False,
+ num_examples: int | None = None,
+ n_threads: int = 120,
+):
+ now = datetime.now()
+ date_str = now.strftime("%Y%m%d_%H%M")
+
+ grading_sampler = ChatCompletionsSampler(
+ model="gpt-4.1-2025-04-14",
+ system_message=OPENAI_SYSTEM_MESSAGE_API,
+ max_tokens=2048,
+ base_url="https://api.openai.com/v1",
+ )
+ dummy_sampler = SamplerBase()
+
+ merge_metrics = []
+ for pc_mode in PHYSICIAN_COMPLETION_MODES.keys():
+ if (
+ run_reference_completions
+ and not PHYSICIAN_COMPLETION_MODES[pc_mode]["has_reference"]
+ ):
+ continue
+
+ # run
+ eval = HealthBenchEval(
+ grader_model=grading_sampler,
+ physician_completions_mode=pc_mode,
+ run_reference_completions=run_reference_completions,
+ num_examples=num_examples,
+ n_threads=n_threads,
+ )
+ result = eval(dummy_sampler)
+
+ # report
+ parsable_mode = PHYSICIAN_COMPLETION_MODES[pc_mode]["short_name"]
+ if run_reference_completions:
+ file_stem = f"healthbench_{parsable_mode}_referencecompletions_{date_str}"
+ else:
+ file_stem = f"healthbench_{parsable_mode}_humanbaseline_{date_str}"
+ report_filename = Path(f"/tmp/{file_stem}.html")
+ report_filename.write_text(report.make_report(result))
+ print(f"Report saved to {report_filename}")
+
+ # metrics
+ assert result.metrics is not None
+ metrics = result.metrics
+ result_filename = Path(f"/tmp/{file_stem}.json")
+ result_filename.write_text(json.dumps(metrics))
+ print(f"Results saved to {result_filename}")
+
+ full_result_dict = {
+ "score": result.score,
+ "metrics": result.metrics,
+ "htmls": result.htmls,
+ "convos": result.convos,
+ "metadata": result.metadata,
+ }
+ full_result_filename = Path(f"/tmp/{file_stem}_allresults.json")
+ full_result_filename.write_text(json.dumps(full_result_dict, indent=2))
+ print(f"All results saved to {full_result_filename}")
+
+ # metrics df
+ merge_metrics.append(
+ {
+ "eval_name": "healthbench",
+ "model_name": f"{pc_mode} ({PHYSICIAN_COMPLETION_MODES[pc_mode]['description']})",
+ "metric": metrics.get("overall_score", None),
+ }
+ )
+
+ print("\nAll results: ")
+ print(merge_metrics)
+ return merge_metrics
+
+
+if __name__ == "__main__":
+ main()
diff --git a/gpt_oss/evals/report.py b/gpt_oss/evals/report.py
new file mode 100644
index 0000000000000000000000000000000000000000..787dd1fc47da319ad901e68c752d73a063a09c16
--- /dev/null
+++ b/gpt_oss/evals/report.py
@@ -0,0 +1,207 @@
+import os
+from collections import defaultdict
+from multiprocessing.pool import ThreadPool
+from typing import Any, Callable
+
+import jinja2
+import numpy as np
+from tqdm import tqdm
+
+from .types import EvalResult, Message, SingleEvalResult
+
+
+HTML_JINJA = """
+Prompt conversation
+{% for message in prompt_messages %}
+{{ message_to_html(message) | safe }}
+{% endfor %}
+Sampled message
+{{ message_to_html(next_message) | safe }}
+Results
+Correct Answer: {{ correct_answer }}
+Extracted Answer: {{ extracted_answer }}
+Score: {{ score }}
+"""
+
+
+def _compute_stat(values: list, stat: str):
+ if stat == "mean":
+ return np.mean(values)
+ elif stat == "std":
+ return np.std(values)
+ elif stat == "min":
+ return np.min(values)
+ elif stat == "max":
+ return np.max(values)
+ elif stat == "n_samples":
+ return len(values)
+ elif stat == "bootstrap_std":
+ return np.std(
+ [np.mean(np.random.choice(values, len(values))) for _ in range(1000)]
+ )
+ else:
+ raise ValueError(f"Unknown {stat =}")
+
+
+def aggregate_results(
+ single_eval_results: list[SingleEvalResult],
+ default_stats: tuple[str, ...] = ("mean", "std"),
+ name2stats: dict[str, tuple[str]] | None = None,
+) -> EvalResult:
+ """
+ Aggregate results from multiple evaluations into a single EvalResult.
+ """
+ name2stats = name2stats or {}
+ name2values = defaultdict(list)
+ htmls = []
+ convos = []
+ metadata = []
+ for single_eval_result in single_eval_results:
+ for name, value in single_eval_result.metrics.items():
+ name2values[name].append(value)
+ if single_eval_result.score is not None:
+ name2values["score"].append(single_eval_result.score)
+ htmls.append(single_eval_result.html)
+ convos.append(single_eval_result.convo)
+ metadata.append(single_eval_result.example_level_metadata)
+ final_metrics = {}
+ for name, values in name2values.items():
+ stats = name2stats.get(name, default_stats)
+ for stat in stats:
+ key = name if stat == "mean" else f"{name}:{stat}"
+ final_metrics[key] = _compute_stat(values, stat)
+ return EvalResult(
+ score=final_metrics.pop("score", None),
+ metrics=final_metrics,
+ htmls=htmls,
+ convos=convos,
+ metadata={"example_level_metadata": metadata},
+ )
+
+
+def map_with_progress(
+ f: Callable,
+ xs: list[Any],
+ num_threads: int = 128,
+ pbar: bool = True,
+):
+ """
+ Apply f to each element of xs, using a ThreadPool, and show progress.
+ """
+ pbar_fn = tqdm if pbar else lambda x, *args, **kwargs: x
+
+ if os.getenv("debug"):
+ return list(map(f, pbar_fn(xs, total=len(xs))))
+ else:
+ with ThreadPool(min(num_threads, len(xs))) as pool:
+ return list(pbar_fn(pool.imap_unordered(f, xs), total=len(xs)))
+
+
+jinja_env = jinja2.Environment(
+ loader=jinja2.BaseLoader(),
+ undefined=jinja2.StrictUndefined,
+ autoescape=jinja2.select_autoescape(["html", "xml"]),
+)
+_message_template = """
+
+
+ {{ role }}
+ {% if variant %}({{ variant }}){% endif %}
+
+
+
+"""
+
+
+def message_to_html(message: Message) -> str:
+ """
+ Generate HTML snippet (inside a ) for a message.
+ """
+ return jinja_env.from_string(_message_template).render(
+ role=message["role"],
+ content=message["content"],
+ variant=message.get("variant", None),
+ )
+
+
+jinja_env.globals["message_to_html"] = message_to_html
+
+
+_report_template = """
+
+
+
+
+
+
+ {% if metrics %}
+
Metrics
+
+
+ | Metric |
+ Value |
+
+
+ | Score |
+ {{ score | float | round(3) }} |
+
+ {% for name, value in metrics.items() %}
+
+ | {{ name }} |
+ {{ value }} |
+
+ {% endfor %}
+
+ {% endif %}
+
Examples
+ {% for html in htmls %}
+ {{ html | safe }}
+
+ {% endfor %}
+
+
+"""
+
+
+def make_report(eval_result: EvalResult) -> str:
+ """
+ Create a standalone HTML report from an EvalResult.
+ """
+ return jinja_env.from_string(_report_template).render(
+ score=eval_result.score,
+ metrics=eval_result.metrics,
+ htmls=eval_result.htmls,
+ )
diff --git a/gpt_oss/evals/responses_sampler.py b/gpt_oss/evals/responses_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..134303f5db715f7b6a0261e45cfcf8b5c902f762
--- /dev/null
+++ b/gpt_oss/evals/responses_sampler.py
@@ -0,0 +1,85 @@
+import time
+from typing import Any
+
+import openai
+from openai import OpenAI
+
+from .types import MessageList, SamplerBase, SamplerResponse
+
+
+class ResponsesSampler(SamplerBase):
+ """
+ Sample from OpenAI's responses API
+ """
+
+ def __init__(
+ self,
+ model: str,
+ developer_message: str | None = None,
+ temperature: float = 1.0,
+ max_tokens: int = 131_072,
+ reasoning_model: bool = False,
+ reasoning_effort: str | None = None,
+ base_url: str = "http://localhost:8000/v1",
+ ):
+ self.client = OpenAI(base_url=base_url, timeout=24*60*60)
+ self.model = model
+ self.developer_message = developer_message
+ self.temperature = temperature
+ self.max_tokens = max_tokens
+ self.image_format = "url"
+ self.reasoning_model = reasoning_model
+ self.reasoning_effort = reasoning_effort
+
+ def _pack_message(self, role: str, content: Any) -> dict[str, Any]:
+ return {"role": role, "content": content}
+
+ def __call__(self, message_list: MessageList) -> SamplerResponse:
+ if self.developer_message:
+ message_list = [
+ self._pack_message("developer", self.developer_message)
+ ] + message_list
+ trial = 0
+ while True:
+ try:
+ request_kwargs = {
+ "model": self.model,
+ "input": message_list,
+ "temperature": self.temperature,
+ "max_output_tokens": self.max_tokens,
+ }
+ if self.reasoning_model:
+ request_kwargs["reasoning"] = (
+ {"effort": self.reasoning_effort} if self.reasoning_effort else None
+ )
+ response = self.client.responses.create(**request_kwargs)
+
+ for output in response.output:
+ if hasattr(output, "text"):
+ message_list.append(self._pack_message(getattr(output, "role", "assistant"), output.text))
+ elif hasattr(output, "content"):
+ for c in output.content:
+ # c.text handled below
+ pass
+
+ return SamplerResponse(
+ response_text=response.output_text,
+ response_metadata={"usage": response.usage},
+ actual_queried_message_list=message_list,
+ )
+ except openai.BadRequestError as e:
+ print("Bad Request Error", e)
+ return SamplerResponse(
+ response_text="",
+ response_metadata={"usage": None},
+ actual_queried_message_list=message_list,
+ )
+ except Exception as e:
+ exception_backoff = 2**trial # expontial back off
+ print(
+ f"Rate limit exception so wait and retry {trial} after {exception_backoff} sec",
+ e,
+ )
+ time.sleep(exception_backoff)
+ trial += 1
+ # unknown error shall throw exception
diff --git a/gpt_oss/evals/types.py b/gpt_oss/evals/types.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f2b42d75705d4717d3204254bb365ccb326013d
--- /dev/null
+++ b/gpt_oss/evals/types.py
@@ -0,0 +1,66 @@
+from dataclasses import dataclass, field
+from typing import Any, Literal, overload
+
+Message = dict[str, Any] # keys role, content
+MessageList = list[Message]
+
+
+
+@dataclass
+class SamplerResponse:
+ """
+ Response from a sampler.
+ """
+ response_text: str
+ actual_queried_message_list: MessageList
+ response_metadata: dict[str, Any]
+
+class SamplerBase:
+ """
+ Base class for defining a sampling model, which can be evaluated,
+ or used as part of the grading process.
+ """
+
+ def __call__(
+ self,
+ message_list: MessageList,
+ ) -> SamplerResponse:
+ raise NotImplementedError
+
+
+@dataclass
+class EvalResult:
+ """
+ Result of running an evaluation (usually consisting of many samples)
+ """
+
+ score: float | None # top-line metric
+ metrics: dict[str, float] | None # other metrics
+ htmls: list[str] # strings of valid HTML
+ convos: list[MessageList] # sampled conversations
+ metadata: dict[str, Any] | None # Extra data such as rubric scores or sollen
+
+
+@dataclass
+class SingleEvalResult:
+ """
+ Result of evaluating a single sample
+ """
+
+ score: float | None
+ metrics: dict[str, float] = field(default_factory=dict)
+ html: str | None = None
+ convo: MessageList | None = None # sampled conversation
+ example_level_metadata: dict[str, Any] | None = (
+ None # Extra data such as rubric scores or sollen
+ )
+
+
+class Eval:
+ """
+ Base class for defining an evaluation.
+ """
+
+ def __call__(self, sampler: SamplerBase) -> EvalResult:
+ raise NotImplementedError
+
diff --git a/gpt_oss/generate.py b/gpt_oss/generate.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0755805eb11063c3ce32158b596cb026fd145e5
--- /dev/null
+++ b/gpt_oss/generate.py
@@ -0,0 +1,95 @@
+# Model parallel inference
+# Note: This script is for demonstration purposes only. It is not designed for production use.
+# See gpt_oss.chat for a more complete example with the Harmony parser.
+# torchrun --nproc-per-node=4 -m gpt_oss.generate -p "why did the chicken cross the road?" model/
+
+import argparse
+
+from gpt_oss.tokenizer import get_tokenizer
+
+
+def main(args):
+ match args.backend:
+ case "torch":
+ from gpt_oss.torch.utils import init_distributed
+ from gpt_oss.torch.model import TokenGenerator as TorchGenerator
+ device = init_distributed()
+ generator = TorchGenerator(args.checkpoint, device=device)
+ case "triton":
+ from gpt_oss.torch.utils import init_distributed
+ from gpt_oss.triton.model import TokenGenerator as TritonGenerator
+ device = init_distributed()
+ generator = TritonGenerator(args.checkpoint, context=args.context_length, device=device)
+ case "vllm":
+ from gpt_oss.vllm.token_generator import TokenGenerator as VLLMGenerator
+ generator = VLLMGenerator(args.checkpoint, tensor_parallel_size=args.tensor_parallel_size)
+ case _:
+ raise ValueError(f"Invalid backend: {args.backend}")
+
+ tokenizer = get_tokenizer()
+ tokens = tokenizer.encode(args.prompt)
+ max_tokens = None if args.limit == 0 else args.limit
+ for token, logprob in generator.generate(tokens, stop_tokens=[tokenizer.eot_token], temperature=args.temperature, max_tokens=max_tokens, return_logprobs=True):
+ tokens.append(token)
+ token_text = tokenizer.decode([token])
+ print(
+ f"Generated token: {repr(token_text)}, logprob: {logprob}"
+ )
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Text generation example")
+ parser.add_argument(
+ "checkpoint",
+ metavar="FILE",
+ type=str,
+ help="Path to the SafeTensors checkpoint",
+ )
+ parser.add_argument(
+ "-p",
+ "--prompt",
+ metavar="PROMPT",
+ type=str,
+ default="How are you?",
+ help="LLM prompt",
+ )
+ parser.add_argument(
+ "-t",
+ "--temperature",
+ metavar="TEMP",
+ type=float,
+ default=0.0,
+ help="Sampling temperature",
+ )
+ parser.add_argument(
+ "-l",
+ "--limit",
+ metavar="LIMIT",
+ type=int,
+ default=0,
+ help="Limit on the number of tokens (0 to disable)",
+ )
+ parser.add_argument(
+ "-b",
+ "--backend",
+ metavar="BACKEND",
+ type=str,
+ default="torch",
+ choices=["triton", "torch", "vllm"],
+ help="Inference backend",
+ )
+ parser.add_argument(
+ "--tensor-parallel-size",
+ type=int,
+ default=2,
+ help="Tensor parallel size for vLLM backend",
+ )
+ parser.add_argument(
+ "--context-length",
+ type=int,
+ default=4096,
+ help="Context length for Triton backend",
+ )
+ args = parser.parse_args()
+
+ main(args)
diff --git a/gpt_oss/metal/CMakeLists.txt b/gpt_oss/metal/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..264696e1dce9f30c8c959fcd955a6e283e0e2c45
--- /dev/null
+++ b/gpt_oss/metal/CMakeLists.txt
@@ -0,0 +1,189 @@
+cmake_minimum_required(VERSION 3.24)
+project(GPTOSS
+ VERSION 1.0
+ DESCRIPTION "Local GPT-OSS inference"
+ LANGUAGES C CXX OBJC)
+
+set(CMAKE_C_STANDARD 11)
+set(CMAKE_CXX_STANDARD 20)
+set(CMAKE_OBJC_STANDARD 11)
+set(CMAKE_OBJC_STANDARD_REQUIRED ON)
+
+find_library(FOUNDATION_FRAMEWORK Foundation REQUIRED)
+find_library(METAL_FRAMEWORK Metal REQUIRED)
+find_library(IOKIT_FRAMEWORK IOKit REQUIRED)
+
+set(METAL_SOURCES
+ ${CMAKE_CURRENT_SOURCE_DIR}/source/accumulate.metal
+ ${CMAKE_CURRENT_SOURCE_DIR}/source/convert.metal
+ ${CMAKE_CURRENT_SOURCE_DIR}/source/embeddings.metal
+ ${CMAKE_CURRENT_SOURCE_DIR}/source/gather_and_accumulate.metal
+ ${CMAKE_CURRENT_SOURCE_DIR}/source/matmul.metal
+ ${CMAKE_CURRENT_SOURCE_DIR}/source/moematmul.metal
+ ${CMAKE_CURRENT_SOURCE_DIR}/source/random.metal
+ ${CMAKE_CURRENT_SOURCE_DIR}/source/rmsnorm.metal
+ ${CMAKE_CURRENT_SOURCE_DIR}/source/rope.metal
+ ${CMAKE_CURRENT_SOURCE_DIR}/source/sample.metal
+ ${CMAKE_CURRENT_SOURCE_DIR}/source/scatter.metal
+ ${CMAKE_CURRENT_SOURCE_DIR}/source/sdpa.metal
+ ${CMAKE_CURRENT_SOURCE_DIR}/source/topk.metal
+)
+set(METAL_LIB default.metallib)
+
+include_directories(BEFORE include source/include)
+
+add_custom_command(
+ OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${METAL_LIB}
+ COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_CURRENT_BINARY_DIR}/source/"
+ COMMAND xcrun -sdk macosx metal -g "-I${CMAKE_CURRENT_SOURCE_DIR}/source/include" -c "${CMAKE_CURRENT_SOURCE_DIR}/source/accumulate.metal" -o "${CMAKE_CURRENT_BINARY_DIR}/source/accumulate.air"
+ COMMAND xcrun -sdk macosx metal -g "-I${CMAKE_CURRENT_SOURCE_DIR}/source/include" -c "${CMAKE_CURRENT_SOURCE_DIR}/source/convert.metal" -o "${CMAKE_CURRENT_BINARY_DIR}/source/convert.air"
+ COMMAND xcrun -sdk macosx metal -g "-I${CMAKE_CURRENT_SOURCE_DIR}/source/include" -c "${CMAKE_CURRENT_SOURCE_DIR}/source/embeddings.metal" -o "${CMAKE_CURRENT_BINARY_DIR}/source/embeddings.air"
+ COMMAND xcrun -sdk macosx metal -g "-I${CMAKE_CURRENT_SOURCE_DIR}/source/include" -c "${CMAKE_CURRENT_SOURCE_DIR}/source/matmul.metal" -o "${CMAKE_CURRENT_BINARY_DIR}/source/matmul.air"
+ COMMAND xcrun -sdk macosx metal -g "-I${CMAKE_CURRENT_SOURCE_DIR}/source/include" -c "${CMAKE_CURRENT_SOURCE_DIR}/source/moematmul.metal" -o "${CMAKE_CURRENT_BINARY_DIR}/source/moematmul.air"
+ COMMAND xcrun -sdk macosx metal -g "-I${CMAKE_CURRENT_SOURCE_DIR}/source/include" -c "${CMAKE_CURRENT_SOURCE_DIR}/source/gather_and_accumulate.metal" -o "${CMAKE_CURRENT_BINARY_DIR}/source/gather_and_accumulate.air"
+ COMMAND xcrun -sdk macosx metal -g "-I${CMAKE_CURRENT_SOURCE_DIR}/source/include" -c "${CMAKE_CURRENT_SOURCE_DIR}/source/random.metal" -o "${CMAKE_CURRENT_BINARY_DIR}/source/random.air"
+ COMMAND xcrun -sdk macosx metal -g "-I${CMAKE_CURRENT_SOURCE_DIR}/source/include" -c "${CMAKE_CURRENT_SOURCE_DIR}/source/rmsnorm.metal" -o "${CMAKE_CURRENT_BINARY_DIR}/source/rmsnorm.air"
+ COMMAND xcrun -sdk macosx metal -g "-I${CMAKE_CURRENT_SOURCE_DIR}/source/include" -c "${CMAKE_CURRENT_SOURCE_DIR}/source/rope.metal" -o "${CMAKE_CURRENT_BINARY_DIR}/source/rope.air"
+ COMMAND xcrun -sdk macosx metal -g "-I${CMAKE_CURRENT_SOURCE_DIR}/source/include" -c "${CMAKE_CURRENT_SOURCE_DIR}/source/sample.metal" -o "${CMAKE_CURRENT_BINARY_DIR}/source/sample.air"
+ COMMAND xcrun -sdk macosx metal -g "-I${CMAKE_CURRENT_SOURCE_DIR}/source/include" -c "${CMAKE_CURRENT_SOURCE_DIR}/source/scatter.metal" -o "${CMAKE_CURRENT_BINARY_DIR}/source/scatter.air"
+ COMMAND xcrun -sdk macosx metal -g "-I${CMAKE_CURRENT_SOURCE_DIR}/source/include" -c "${CMAKE_CURRENT_SOURCE_DIR}/source/sdpa.metal" -o "${CMAKE_CURRENT_BINARY_DIR}/source/sdpa.air"
+ COMMAND xcrun -sdk macosx metal -g "-I${CMAKE_CURRENT_SOURCE_DIR}/source/include" -c "${CMAKE_CURRENT_SOURCE_DIR}/source/topk.metal" -o "${CMAKE_CURRENT_BINARY_DIR}/source/topk.air"
+ COMMAND xcrun -sdk macosx metallib "${CMAKE_CURRENT_BINARY_DIR}/source/accumulate.air" "${CMAKE_CURRENT_BINARY_DIR}/source/convert.air" "${CMAKE_CURRENT_BINARY_DIR}/source/embeddings.air" "${CMAKE_CURRENT_BINARY_DIR}/source/gather_and_accumulate.air" "${CMAKE_CURRENT_BINARY_DIR}/source/matmul.air" "${CMAKE_CURRENT_BINARY_DIR}/source/moematmul.air" "${CMAKE_CURRENT_BINARY_DIR}/source/random.air" "${CMAKE_CURRENT_BINARY_DIR}/source/rmsnorm.air" "${CMAKE_CURRENT_BINARY_DIR}/source/rope.air" "${CMAKE_CURRENT_BINARY_DIR}/source/sample.air" "${CMAKE_CURRENT_BINARY_DIR}/source/scatter.air" "${CMAKE_CURRENT_BINARY_DIR}/source/sdpa.air" "${CMAKE_CURRENT_BINARY_DIR}/source/topk.air" -o "${METAL_LIB}"
+ DEPENDS ${METAL_SOURCES}
+ COMMENT "Compiling Metal compute library"
+)
+
+add_custom_target(build_metallib ALL
+ DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${METAL_LIB})
+
+add_library(log OBJECT source/log.c)
+
+add_library(metal-kernels STATIC source/metal.m source/metal-kernels.c)
+target_link_libraries(metal-kernels PRIVATE log)
+
+add_dependencies(metal-kernels build_metallib)
+add_custom_command(TARGET metal-kernels POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy
+ ${CMAKE_CURRENT_BINARY_DIR}/${METAL_LIB}
+ $
)
+
+target_link_libraries(metal-kernels PRIVATE ${FOUNDATION_FRAMEWORK} ${METAL_FRAMEWORK} ${IOKIT_FRAMEWORK})
+
+add_library(gptoss STATIC source/model.c source/tokenizer.c source/context.c)
+target_link_libraries(gptoss PRIVATE log metal-kernels)
+
+add_executable(generate source/generate.c)
+target_link_libraries(generate gptoss)
+
+# --- [ Tests
+include(FetchContent)
+FetchContent_Declare(
+ googletest
+ URL https://github.com/google/googletest/archive/refs/tags/v1.17.0.zip
+ DOWNLOAD_EXTRACT_TIMESTAMP OFF
+)
+# For Windows: Prevent overriding the parent project's compiler/linker settings
+set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
+set(INSTALL_GTEST OFF CACHE BOOL "" FORCE)
+FetchContent_MakeAvailable(googletest)
+
+enable_testing()
+
+add_executable(u32-random-test test/u32-random.cc)
+target_link_libraries(u32-random-test PRIVATE GTest::gtest_main metal-kernels)
+target_include_directories(u32-random-test PRIVATE source/include)
+add_test(NAME u32-random-test COMMAND u32-random-test)
+
+add_executable(f32-random-test test/f32-random.cc)
+target_link_libraries(f32-random-test PRIVATE GTest::gtest_main metal-kernels)
+target_include_directories(f32-random-test PRIVATE source/include)
+add_test(NAME f32-random-test COMMAND f32-random-test)
+
+add_executable(mf4-f32-convert-test test/mf4-f32-convert.cc)
+target_link_libraries(mf4-f32-convert-test PRIVATE GTest::gtest_main metal-kernels)
+target_include_directories(mf4-f32-convert-test PRIVATE source/include)
+add_test(NAME mf4-f32-convert-test COMMAND mf4-f32-convert-test)
+
+add_executable(bf16-f32-embeddings-test test/bf16-f32-embeddings.cc)
+target_link_libraries(bf16-f32-embeddings-test PRIVATE GTest::gtest_main metal-kernels)
+target_include_directories(bf16-f32-embeddings-test PRIVATE source/include)
+add_test(NAME bf16-f32-embeddings-test COMMAND bf16-f32-embeddings-test)
+
+add_executable(f32-bf16w-rmsnorm-test test/f32-bf16w-rmsnorm.cc)
+target_link_libraries(f32-bf16w-rmsnorm-test PRIVATE GTest::gtest_main metal-kernels)
+target_include_directories(f32-bf16w-rmsnorm-test PRIVATE source/include)
+add_test(NAME f32-bf16w-rmsnorm-test COMMAND f32-bf16w-rmsnorm-test)
+
+add_executable(f32-bf16w-matmul-test test/f32-bf16w-matmul.cc)
+target_link_libraries(f32-bf16w-matmul-test PRIVATE GTest::gtest_main metal-kernels)
+target_include_directories(f32-bf16w-matmul-test PRIVATE source/include)
+add_test(NAME f32-bf16w-matmul-test COMMAND f32-bf16w-matmul-test)
+
+add_executable(f32-rope-test test/f32-rope.cc)
+target_link_libraries(f32-rope-test PRIVATE GTest::gtest_main metal-kernels)
+target_include_directories(f32-rope-test PRIVATE source/include)
+add_test(NAME f32-rope-test COMMAND f32-rope-test)
+
+# --- [ Benchmarks
+include(FetchContent)
+set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "Disable self-tests in Google Benchmark" FORCE)
+set(BENCHMARK_ENABLE_INSTALL OFF CACHE BOOL "Disable installation of Google Benchmark" FORCE)
+FetchContent_Declare(
+ benchmark
+ URL https://github.com/google/benchmark/archive/refs/tags/v1.9.4.zip
+ DOWNLOAD_EXTRACT_TIMESTAMP OFF
+)
+FetchContent_MakeAvailable(benchmark)
+
+add_executable(f32-random-bench benchmark/f32-random.cc)
+target_link_libraries(f32-random-bench PRIVATE benchmark::benchmark metal-kernels)
+target_include_directories(f32-random-bench PRIVATE source/include)
+
+add_executable(u32-random-bench benchmark/u32-random.cc)
+target_link_libraries(u32-random-bench PRIVATE benchmark::benchmark metal-kernels)
+target_include_directories(u32-random-bench PRIVATE source/include)
+
+add_executable(mf4-f32-convert-bench benchmark/mf4-f32-convert.cc)
+target_link_libraries(mf4-f32-convert-bench PRIVATE benchmark::benchmark metal-kernels)
+target_include_directories(mf4-f32-convert-bench PRIVATE source/include)
+
+add_executable(f32-bf16w-rmsnorm-bench benchmark/f32-bf16w-rmsnorm.cc)
+target_link_libraries(f32-bf16w-rmsnorm-bench PRIVATE benchmark::benchmark metal-kernels)
+target_include_directories(f32-bf16w-rmsnorm-bench PRIVATE source/include)
+
+add_executable(end-to-end-bench benchmark/end-to-end.cc)
+target_link_libraries(end-to-end-bench PRIVATE benchmark::benchmark gptoss)
+target_include_directories(end-to-end-bench PRIVATE source/include)
+
+add_executable(end-to-end-threadgroup-bench benchmark/end-to-end-threadgroup.cc)
+target_link_libraries(end-to-end-threadgroup-bench PRIVATE benchmark::benchmark gptoss)
+target_include_directories(end-to-end-threadgroup-bench PRIVATE source/include)
+
+# --- [ Python extension ] -----------------------------------------------
+find_package(pybind11 CONFIG REQUIRED) # provides pybind11_add_module
+
+pybind11_add_module(_metal
+ python/module.c
+ python/context.c
+ python/model.c
+ python/tokenizer.c
+)
+set_target_properties(_metal PROPERTIES PREFIX "")
+
+target_link_libraries(_metal PRIVATE gptoss)
+add_dependencies(_metal build_metallib)
+target_link_options(_metal PRIVATE
+ LINKER:-sectcreate,__METAL,__shaders,${CMAKE_CURRENT_BINARY_DIR}/${METAL_LIB}
+)
+add_custom_command(TARGET _metal POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy
+ ${CMAKE_CURRENT_BINARY_DIR}/${METAL_LIB}
+ $)
+
+# 1️⃣ install the extension module into the Python package
+install(TARGETS _metal LIBRARY DESTINATION gpt_oss/metal)
+
+# 2️⃣ make sure the Metal shader archive travels with it
+install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${METAL_LIB}
+ DESTINATION gpt_oss/metal)
+# ------------------------------------------------------------------------
\ No newline at end of file
diff --git a/gpt_oss/metal/__init__.py b/gpt_oss/metal/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbddbe918a81bdc37793c013d0f268b6c273a9de
--- /dev/null
+++ b/gpt_oss/metal/__init__.py
@@ -0,0 +1,6 @@
+from importlib import import_module as _im
+
+# Load the compiled extension (gpt_oss.metal._metal)
+_ext = _im(f"{__name__}._metal")
+globals().update({k: v for k, v in _ext.__dict__.items() if not k.startswith("_")})
+del _im, _ext
diff --git a/gpt_oss/metal/benchmark/end-to-end-threadgroup.cc b/gpt_oss/metal/benchmark/end-to-end-threadgroup.cc
new file mode 100644
index 0000000000000000000000000000000000000000..82048cd1dece65d4ce72102f318854dd7a25114b
--- /dev/null
+++ b/gpt_oss/metal/benchmark/end-to-end-threadgroup.cc
@@ -0,0 +1,590 @@
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+
+constexpr std::uint32_t kNumGeneratedTokens = 100;
+
+
+static void attn_qkv_tgsize(benchmark::State& state, const char* env_var_name) {
+ const char* model_path = getenv(env_var_name);
+ if (model_path == NULL) {
+ state.SkipWithError(std::format("environment variable {} is not set", env_var_name));
+ return;
+ }
+
+ gptoss_model_t model_ptr = nullptr;
+ gptoss_status status = gptoss_model_create_from_file(model_path, &model_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format("failed to load model from file {}", model_path));
+ return;
+ }
+ std::unique_ptr, decltype(&gptoss_model_release)> model(model_ptr, gptoss_model_release);
+ model->attn_qkv_threadgroup_size = static_cast(state.range(0));
+
+ gptoss_context_t context_ptr = nullptr;
+ status = gptoss_context_create(model.get(), /*context_length=*/0, /*max_batch_tokens=*/0, &context_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to create Context object");
+ return;
+ }
+ std::unique_ptr, decltype(&gptoss_context_release)> context(context_ptr, gptoss_context_release);
+
+ const char* prompt = "why did the chicken cross the road?";
+ std::size_t num_prompt_tokens = 0;
+ status = gptoss_context_append_chars(context.get(), prompt, strlen(prompt), &num_prompt_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format("failed to tokenize prompt \"{}\"", prompt));
+ return;
+ }
+
+ // Prefill
+ status = gptoss_context_process(context.get());
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to prefill Context object");
+ return;
+ }
+ const std::size_t num_kvcache_tokens = context->num_kv_tokens;
+
+ std::uint64_t rng_seed = 0;
+ for (auto _ : state) {
+ const std::uint64_t current_rng_seed = rng_seed++;
+ context->num_kv_tokens = num_prompt_tokens;
+ context->num_tokens = num_prompt_tokens;
+
+ std::array tokens;
+ std::size_t num_generated_tokens = 0;
+ do {
+ std::size_t num_current_generated_tokens = 0;
+ status = gptoss_context_sample(context.get(), /*temperature=*/1.0f, /*rng_state=*/current_rng_seed,
+ /*max_tokens=*/kNumGeneratedTokens - num_generated_tokens, tokens.data(), &num_current_generated_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to sample from the Context object");
+ return;
+ }
+ num_generated_tokens += num_current_generated_tokens;
+ } while (num_generated_tokens < kNumGeneratedTokens);
+ }
+
+ state.counters["generations"] =
+ benchmark::Counter(state.iterations(), benchmark::Counter::kIsRate);
+ state.counters["tokens"] =
+ benchmark::Counter(state.iterations() * kNumGeneratedTokens, benchmark::Counter::kIsRate);
+}
+
+static void AttnQKVThreadgroupSizeArguments(benchmark::internal::Benchmark* b) {
+ b->ArgNames({"tgsize"});
+ for (auto attn_qkv_threadgroup_size = 32; attn_qkv_threadgroup_size <= 1024; attn_qkv_threadgroup_size += 32) {
+ const auto num_simdgroups = attn_qkv_threadgroup_size / 32;
+ if (5120 % num_simdgroups != 0) {
+ // Skip incompatible threadgroup sizes
+ continue;
+ }
+ b->Args({attn_qkv_threadgroup_size});
+ }
+}
+
+BENCHMARK_CAPTURE(attn_qkv_tgsize, gpt_oss_20b, "GPT_OSS_20B_PATH")
+ ->UseRealTime()->Unit(benchmark::kMillisecond)->Apply(AttnQKVThreadgroupSizeArguments);
+BENCHMARK_CAPTURE(attn_qkv_tgsize, gpt_oss_120b, "GPT_OSS_120B_PATH")
+ ->UseRealTime()->Unit(benchmark::kMillisecond)->Apply(AttnQKVThreadgroupSizeArguments);
+
+static void attn_out_tgsize(benchmark::State& state, const char* env_var_name) {
+ const char* model_path = getenv(env_var_name);
+ if (model_path == NULL) {
+ state.SkipWithError(std::format("environment variable {} is not set", env_var_name));
+ return;
+ }
+
+ gptoss_model_t model_ptr = nullptr;
+ gptoss_status status = gptoss_model_create_from_file(model_path, &model_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format("failed to load model from file {}", model_path));
+ return;
+ }
+ std::unique_ptr, decltype(&gptoss_model_release)> model(model_ptr, gptoss_model_release);
+ model->attn_out_threadgroup_size = static_cast(state.range(0));
+
+ gptoss_context_t context_ptr = nullptr;
+ status = gptoss_context_create(model.get(), /*context_length=*/0, /*max_batch_tokens=*/0, &context_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to create Context object");
+ return;
+ }
+ std::unique_ptr, decltype(&gptoss_context_release)> context(context_ptr, gptoss_context_release);
+
+ const char* prompt = "why did the chicken cross the road?";
+ std::size_t num_prompt_tokens = 0;
+ status = gptoss_context_append_chars(context.get(), prompt, strlen(prompt), &num_prompt_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format("failed to tokenize prompt \"{}\"", prompt));
+ return;
+ }
+
+ // Prefill
+ status = gptoss_context_process(context.get());
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to prefill Context object");
+ return;
+ }
+ const std::size_t num_kvcache_tokens = context->num_kv_tokens;
+
+ std::uint64_t rng_seed = 0;
+ for (auto _ : state) {
+ const std::uint64_t current_rng_seed = rng_seed++;
+ context->num_kv_tokens = num_prompt_tokens;
+ context->num_tokens = num_prompt_tokens;
+
+ std::array tokens;
+ std::size_t num_generated_tokens = 0;
+ do {
+ std::size_t num_current_generated_tokens = 0;
+ status = gptoss_context_sample(context.get(), /*temperature=*/1.0f, /*rng_state=*/current_rng_seed,
+ /*max_tokens=*/kNumGeneratedTokens - num_generated_tokens, tokens.data(), &num_current_generated_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to sample from the Context object");
+ return;
+ }
+ num_generated_tokens += num_current_generated_tokens;
+ } while (num_generated_tokens < kNumGeneratedTokens);
+ }
+
+ state.counters["generations"] =
+ benchmark::Counter(state.iterations(), benchmark::Counter::kIsRate);
+ state.counters["tokens"] =
+ benchmark::Counter(state.iterations() * kNumGeneratedTokens, benchmark::Counter::kIsRate);
+}
+
+static void AttnOutThreadgroupSizeArguments(benchmark::internal::Benchmark* b) {
+ b->ArgNames({"tgsize"});
+ for (auto attn_out_threadgroup_size = 32; attn_out_threadgroup_size <= 1024; attn_out_threadgroup_size += 32) {
+ const auto num_simdgroups = attn_out_threadgroup_size / 32;
+ if (2880 % num_simdgroups != 0) {
+ // Skip incompatible threadgroup sizes
+ continue;
+ }
+ b->Args({attn_out_threadgroup_size});
+ }
+}
+
+BENCHMARK_CAPTURE(attn_out_tgsize, gpt_oss_20b, "GPT_OSS_20B_PATH")
+ ->UseRealTime()->Unit(benchmark::kMillisecond)->Apply(AttnOutThreadgroupSizeArguments);
+BENCHMARK_CAPTURE(attn_out_tgsize, gpt_oss_120b, "GPT_OSS_120B_PATH")
+ ->UseRealTime()->Unit(benchmark::kMillisecond)->Apply(AttnOutThreadgroupSizeArguments);
+
+static void mlp_gate_tgsize(benchmark::State& state, const char* env_var_name) {
+ const char* model_path = getenv(env_var_name);
+ if (model_path == NULL) {
+ state.SkipWithError(std::format("environment variable {} is not set", env_var_name));
+ return;
+ }
+
+ gptoss_model_t model_ptr = nullptr;
+ gptoss_status status = gptoss_model_create_from_file(model_path, &model_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format("failed to load model from file {}", model_path));
+ return;
+ }
+ std::unique_ptr, decltype(&gptoss_model_release)> model(model_ptr, gptoss_model_release);
+ model->mlp_gate_threadgroup_size = static_cast(state.range(0));
+
+ gptoss_context_t context_ptr = nullptr;
+ status = gptoss_context_create(model.get(), /*context_length=*/0, /*max_batch_tokens=*/0, &context_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to create Context object");
+ return;
+ }
+ std::unique_ptr, decltype(&gptoss_context_release)> context(context_ptr, gptoss_context_release);
+
+ const char* prompt = "why did the chicken cross the road?";
+ std::size_t num_prompt_tokens = 0;
+ status = gptoss_context_append_chars(context.get(), prompt, strlen(prompt), &num_prompt_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format("failed to tokenize prompt \"{}\"", prompt));
+ return;
+ }
+
+ // Prefill
+ status = gptoss_context_process(context.get());
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to prefill Context object");
+ return;
+ }
+ const std::size_t num_kvcache_tokens = context->num_kv_tokens;
+
+ std::uint64_t rng_seed = 0;
+ for (auto _ : state) {
+ const std::uint64_t current_rng_seed = rng_seed++;
+ context->num_kv_tokens = num_prompt_tokens;
+ context->num_tokens = num_prompt_tokens;
+
+ std::array tokens;
+ std::size_t num_generated_tokens = 0;
+ do {
+ std::size_t num_current_generated_tokens = 0;
+ status = gptoss_context_sample(context.get(), /*temperature=*/1.0f, /*rng_state=*/current_rng_seed,
+ /*max_tokens=*/kNumGeneratedTokens - num_generated_tokens, tokens.data(), &num_current_generated_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to sample from the Context object");
+ return;
+ }
+ num_generated_tokens += num_current_generated_tokens;
+ } while (num_generated_tokens < kNumGeneratedTokens);
+ }
+
+ state.counters["generations"] =
+ benchmark::Counter(state.iterations(), benchmark::Counter::kIsRate);
+ state.counters["tokens"] =
+ benchmark::Counter(state.iterations() * kNumGeneratedTokens, benchmark::Counter::kIsRate);
+}
+
+static void MlpGateThreadgroupSizeArguments(benchmark::internal::Benchmark* b) {
+ b->ArgNames({"tgsize"});
+ for (auto mlp_gate_threadgroup_size = 32; mlp_gate_threadgroup_size <= 1024; mlp_gate_threadgroup_size += 32) {
+ const auto num_simdgroups = mlp_gate_threadgroup_size / 32;
+ if (128 % num_simdgroups != 0) {
+ // Skip incompatible threadgroup sizes
+ continue;
+ }
+ b->Args({mlp_gate_threadgroup_size});
+ }
+}
+
+BENCHMARK_CAPTURE(mlp_gate_tgsize, gpt_oss_20b, "GPT_OSS_20B_PATH")
+ ->UseRealTime()->Unit(benchmark::kMillisecond)->Apply(MlpGateThreadgroupSizeArguments);
+BENCHMARK_CAPTURE(mlp_gate_tgsize, gpt_oss_120b, "GPT_OSS_120B_PATH")
+ ->UseRealTime()->Unit(benchmark::kMillisecond)->Apply(MlpGateThreadgroupSizeArguments);
+
+static void mlp_swiglu_tgsize(benchmark::State& state, const char* env_var_name) {
+ const char* model_path = getenv(env_var_name);
+ if (model_path == NULL) {
+ state.SkipWithError(std::format("environment variable {} is not set", env_var_name));
+ return;
+ }
+
+ gptoss_model_t model_ptr = nullptr;
+ gptoss_status status = gptoss_model_create_from_file(model_path, &model_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format("failed to load model from file {}", model_path));
+ return;
+ }
+ std::unique_ptr, decltype(&gptoss_model_release)> model(model_ptr, gptoss_model_release);
+ model->mlp_swiglu_threadgroup_size = static_cast(state.range(0));
+
+ gptoss_context_t context_ptr = nullptr;
+ status = gptoss_context_create(model.get(), /*context_length=*/0, /*max_batch_tokens=*/0, &context_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to create Context object");
+ return;
+ }
+ std::unique_ptr, decltype(&gptoss_context_release)> context(context_ptr, gptoss_context_release);
+
+ const char* prompt = "why did the chicken cross the road?";
+ std::size_t num_prompt_tokens = 0;
+ status = gptoss_context_append_chars(context.get(), prompt, strlen(prompt), &num_prompt_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format("failed to tokenize prompt \"{}\"", prompt));
+ return;
+ }
+
+ // Prefill
+ status = gptoss_context_process(context.get());
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to prefill Context object");
+ return;
+ }
+ const std::size_t num_kvcache_tokens = context->num_kv_tokens;
+
+ std::uint64_t rng_seed = 0;
+ for (auto _ : state) {
+ const std::uint64_t current_rng_seed = rng_seed++;
+ context->num_kv_tokens = num_prompt_tokens;
+ context->num_tokens = num_prompt_tokens;
+
+ std::array tokens;
+ std::size_t num_generated_tokens = 0;
+ do {
+ std::size_t num_current_generated_tokens = 0;
+ status = gptoss_context_sample(context.get(), /*temperature=*/1.0f, /*rng_state=*/current_rng_seed,
+ /*max_tokens=*/kNumGeneratedTokens - num_generated_tokens, tokens.data(), &num_current_generated_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to sample from the Context object");
+ return;
+ }
+ num_generated_tokens += num_current_generated_tokens;
+ } while (num_generated_tokens < kNumGeneratedTokens);
+ }
+
+ state.counters["generations"] =
+ benchmark::Counter(state.iterations(), benchmark::Counter::kIsRate);
+ state.counters["tokens"] =
+ benchmark::Counter(state.iterations() * kNumGeneratedTokens, benchmark::Counter::kIsRate);
+}
+
+static void MlpSwigluThreadgroupSizeArguments(benchmark::internal::Benchmark* b) {
+ b->ArgNames({"tgsize"});
+ for (auto threadgroup_size = 64; threadgroup_size <= 1024; threadgroup_size += 64) {
+ const auto num_simdgroups = threadgroup_size / 32;
+ if (5760 % num_simdgroups != 0) {
+ // Skip incompatible threadgroup sizes
+ continue;
+ }
+ b->Args({threadgroup_size});
+ }
+}
+
+BENCHMARK_CAPTURE(mlp_swiglu_tgsize, gpt_oss_20b, "GPT_OSS_20B_PATH")
+ ->UseRealTime()->Unit(benchmark::kMillisecond)->Apply(MlpSwigluThreadgroupSizeArguments);
+BENCHMARK_CAPTURE(mlp_swiglu_tgsize, gpt_oss_120b, "GPT_OSS_120B_PATH")
+ ->UseRealTime()->Unit(benchmark::kMillisecond)->Apply(MlpSwigluThreadgroupSizeArguments);
+
+static void mlp_out_tgsize(benchmark::State& state, const char* env_var_name) {
+ const char* model_path = getenv(env_var_name);
+ if (model_path == NULL) {
+ state.SkipWithError(std::format("environment variable {} is not set", env_var_name));
+ return;
+ }
+
+ gptoss_model_t model_ptr = nullptr;
+ gptoss_status status = gptoss_model_create_from_file(model_path, &model_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format("failed to load model from file {}", model_path));
+ return;
+ }
+ std::unique_ptr, decltype(&gptoss_model_release)> model(model_ptr, gptoss_model_release);
+ model->mlp_out_threadgroup_size = static_cast(state.range(0));
+
+ gptoss_context_t context_ptr = nullptr;
+ status = gptoss_context_create(model.get(), /*context_length=*/0, /*max_batch_tokens=*/0, &context_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to create Context object");
+ return;
+ }
+ std::unique_ptr, decltype(&gptoss_context_release)> context(context_ptr, gptoss_context_release);
+
+ const char* prompt = "why did the chicken cross the road?";
+ std::size_t num_prompt_tokens = 0;
+ status = gptoss_context_append_chars(context.get(), prompt, strlen(prompt), &num_prompt_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format("failed to tokenize prompt \"{}\"", prompt));
+ return;
+ }
+
+ // Prefill
+ status = gptoss_context_process(context.get());
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to prefill Context object");
+ return;
+ }
+ const std::size_t num_kvcache_tokens = context->num_kv_tokens;
+
+ std::uint64_t rng_seed = 0;
+ for (auto _ : state) {
+ const std::uint64_t current_rng_seed = rng_seed++;
+ context->num_kv_tokens = num_prompt_tokens;
+ context->num_tokens = num_prompt_tokens;
+
+ std::array tokens;
+ std::size_t num_generated_tokens = 0;
+ do {
+ std::size_t num_current_generated_tokens = 0;
+ status = gptoss_context_sample(context.get(), /*temperature=*/1.0f, /*rng_state=*/current_rng_seed,
+ /*max_tokens=*/kNumGeneratedTokens - num_generated_tokens, tokens.data(), &num_current_generated_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to sample from the Context object");
+ return;
+ }
+ num_generated_tokens += num_current_generated_tokens;
+ } while (num_generated_tokens < kNumGeneratedTokens);
+ }
+
+ state.counters["generations"] =
+ benchmark::Counter(state.iterations(), benchmark::Counter::kIsRate);
+ state.counters["tokens"] =
+ benchmark::Counter(state.iterations() * kNumGeneratedTokens, benchmark::Counter::kIsRate);
+}
+
+static void MlpOutThreadgroupSizeArguments(benchmark::internal::Benchmark* b) {
+ b->ArgNames({"tgsize"});
+ for (auto threadgroup_size = 64; threadgroup_size <= 1024; threadgroup_size += 64) {
+ const auto num_simdgroups = threadgroup_size / 32;
+ if (5760 % num_simdgroups != 0) {
+ // Skip incompatible threadgroup sizes
+ continue;
+ }
+ b->Args({threadgroup_size});
+ }
+}
+
+BENCHMARK_CAPTURE(mlp_out_tgsize, gpt_oss_20b, "GPT_OSS_20B_PATH")
+ ->UseRealTime()->Unit(benchmark::kMillisecond)->Apply(MlpOutThreadgroupSizeArguments);
+BENCHMARK_CAPTURE(mlp_out_tgsize, gpt_oss_120b, "GPT_OSS_120B_PATH")
+ ->UseRealTime()->Unit(benchmark::kMillisecond)->Apply(MlpOutThreadgroupSizeArguments);
+
+static void mlp_acc_tgsize(benchmark::State& state, const char* env_var_name) {
+ const char* model_path = getenv(env_var_name);
+ if (model_path == NULL) {
+ state.SkipWithError(std::format("environment variable {} is not set", env_var_name));
+ return;
+ }
+
+ gptoss_model_t model_ptr = nullptr;
+ gptoss_status status = gptoss_model_create_from_file(model_path, &model_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format("failed to load model from file {}", model_path));
+ return;
+ }
+ std::unique_ptr, decltype(&gptoss_model_release)> model(model_ptr, gptoss_model_release);
+ model->mlp_acc_threadgroup_size = static_cast(state.range(0));
+
+ gptoss_context_t context_ptr = nullptr;
+ status = gptoss_context_create(model.get(), /*context_length=*/0, /*max_batch_tokens=*/0, &context_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to create Context object");
+ return;
+ }
+ std::unique_ptr, decltype(&gptoss_context_release)> context(context_ptr, gptoss_context_release);
+
+ const char* prompt = "why did the chicken cross the road?";
+ std::size_t num_prompt_tokens = 0;
+ status = gptoss_context_append_chars(context.get(), prompt, strlen(prompt), &num_prompt_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format("failed to tokenize prompt \"{}\"", prompt));
+ return;
+ }
+
+ // Prefill
+ status = gptoss_context_process(context.get());
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to prefill Context object");
+ return;
+ }
+ const std::size_t num_kvcache_tokens = context->num_kv_tokens;
+
+ std::uint64_t rng_seed = 0;
+ for (auto _ : state) {
+ const std::uint64_t current_rng_seed = rng_seed++;
+ context->num_kv_tokens = num_prompt_tokens;
+ context->num_tokens = num_prompt_tokens;
+
+ std::array tokens;
+ std::size_t num_generated_tokens = 0;
+ do {
+ std::size_t num_current_generated_tokens = 0;
+ status = gptoss_context_sample(context.get(), /*temperature=*/1.0f, /*rng_state=*/current_rng_seed,
+ /*max_tokens=*/kNumGeneratedTokens - num_generated_tokens, tokens.data(), &num_current_generated_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to sample from the Context object");
+ return;
+ }
+ num_generated_tokens += num_current_generated_tokens;
+ } while (num_generated_tokens < kNumGeneratedTokens);
+ }
+
+ state.counters["generations"] =
+ benchmark::Counter(state.iterations(), benchmark::Counter::kIsRate);
+ state.counters["tokens"] =
+ benchmark::Counter(state.iterations() * kNumGeneratedTokens, benchmark::Counter::kIsRate);
+}
+
+static void MlpAccThreadgroupSizeArguments(benchmark::internal::Benchmark* b) {
+ b->ArgNames({"tgsize"});
+ for (auto threadgroup_size = 32; threadgroup_size <= 1024; threadgroup_size += 32) {
+ b->Args({threadgroup_size});
+ }
+}
+
+BENCHMARK_CAPTURE(mlp_acc_tgsize, gpt_oss_20b, "GPT_OSS_20B_PATH")
+ ->UseRealTime()->Unit(benchmark::kMillisecond)->Apply(MlpAccThreadgroupSizeArguments);
+BENCHMARK_CAPTURE(mlp_acc_tgsize, gpt_oss_120b, "GPT_OSS_120B_PATH")
+ ->UseRealTime()->Unit(benchmark::kMillisecond)->Apply(MlpAccThreadgroupSizeArguments);
+
+static void unembedding_tgsize(benchmark::State& state, const char* env_var_name) {
+ const char* model_path = getenv(env_var_name);
+ if (model_path == NULL) {
+ state.SkipWithError(std::format("environment variable {} is not set", env_var_name));
+ return;
+ }
+
+ gptoss_model_t model_ptr = nullptr;
+ gptoss_status status = gptoss_model_create_from_file(model_path, &model_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format("failed to load model from file {}", model_path));
+ return;
+ }
+ std::unique_ptr, decltype(&gptoss_model_release)> model(model_ptr, gptoss_model_release);
+ model->unembedding_threadgroup_size = static_cast(state.range(0));
+
+ gptoss_context_t context_ptr = nullptr;
+ status = gptoss_context_create(model.get(), /*context_length=*/0, /*max_batch_tokens=*/0, &context_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to create Context object");
+ return;
+ }
+ std::unique_ptr, decltype(&gptoss_context_release)> context(context_ptr, gptoss_context_release);
+
+ const char* prompt = "why did the chicken cross the road?";
+ std::size_t num_prompt_tokens = 0;
+ status = gptoss_context_append_chars(context.get(), prompt, strlen(prompt), &num_prompt_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format("failed to tokenize prompt \"{}\"", prompt));
+ return;
+ }
+
+ // Prefill
+ status = gptoss_context_process(context.get());
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to prefill Context object");
+ return;
+ }
+ const std::size_t num_kvcache_tokens = context->num_kv_tokens;
+
+ std::uint64_t rng_seed = 0;
+ for (auto _ : state) {
+ const std::uint64_t current_rng_seed = rng_seed++;
+ context->num_kv_tokens = num_prompt_tokens;
+ context->num_tokens = num_prompt_tokens;
+
+ std::array tokens;
+ std::size_t num_generated_tokens = 0;
+ do {
+ std::size_t num_current_generated_tokens = 0;
+ status = gptoss_context_sample(context.get(), /*temperature=*/1.0f, /*rng_state=*/current_rng_seed,
+ /*max_tokens=*/kNumGeneratedTokens - num_generated_tokens, tokens.data(), &num_current_generated_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to sample from the Context object");
+ return;
+ }
+ num_generated_tokens += num_current_generated_tokens;
+ } while (num_generated_tokens < kNumGeneratedTokens);
+ }
+
+ state.counters["generations"] =
+ benchmark::Counter(state.iterations(), benchmark::Counter::kIsRate);
+ state.counters["tokens"] =
+ benchmark::Counter(state.iterations() * kNumGeneratedTokens, benchmark::Counter::kIsRate);
+}
+
+static void UnembeddingThreadgroupSizeArguments(benchmark::internal::Benchmark* b) {
+ b->ArgNames({"tgsize"});
+ for (auto threadgroup_size = 32; threadgroup_size <= 1024; threadgroup_size += 32) {
+ b->Args({threadgroup_size});
+ }
+}
+
+BENCHMARK_CAPTURE(unembedding_tgsize, gpt_oss_20b, "GPT_OSS_20B_PATH")
+ ->UseRealTime()->Unit(benchmark::kMillisecond)->Apply(UnembeddingThreadgroupSizeArguments);
+BENCHMARK_CAPTURE(unembedding_tgsize, gpt_oss_120b, "GPT_OSS_120B_PATH")
+ ->UseRealTime()->Unit(benchmark::kMillisecond)->Apply(UnembeddingThreadgroupSizeArguments);
+
+BENCHMARK_MAIN();
diff --git a/gpt_oss/metal/benchmark/end-to-end.cc b/gpt_oss/metal/benchmark/end-to-end.cc
new file mode 100644
index 0000000000000000000000000000000000000000..ba87e3af7928de00860af7a3dc4f9dec2c65d734
--- /dev/null
+++ b/gpt_oss/metal/benchmark/end-to-end.cc
@@ -0,0 +1,227 @@
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+constexpr std::uint32_t kNumGeneratedTokens = 100;
+
+static void end2end_decode(benchmark::State& state, const char* env_var_name) {
+ const char* model_path = getenv(env_var_name);
+ if (model_path == NULL) {
+ state.SkipWithError(std::format("environment variable {} is not set", env_var_name));
+ return;
+ }
+
+ gptoss_model_t model_ptr = nullptr;
+ gptoss_status status = gptoss_model_create_from_file(model_path, &model_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format("failed to load model from file {}", model_path));
+ return;
+ }
+ std::unique_ptr, decltype(&gptoss_model_release)> model(model_ptr, gptoss_model_release);
+
+ gptoss_context_t context_ptr = nullptr;
+ status = gptoss_context_create(model.get(), /*context_length=*/0, /*max_batch_tokens=*/0, &context_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to create Context object");
+ return;
+ }
+ std::unique_ptr, decltype(&gptoss_context_release)> context(context_ptr, gptoss_context_release);
+
+ const char* prompt = "why did the chicken cross the road?";
+ std::size_t num_prompt_tokens = 0;
+ status = gptoss_context_append_chars(context.get(), prompt, strlen(prompt), &num_prompt_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format("failed to tokenize prompt \"{}\"", prompt));
+ return;
+ }
+
+ // Prefill
+ status = gptoss_context_process(context.get());
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to prefill Context object");
+ return;
+ }
+ std::uint64_t rng_seed = 0;
+
+ for (auto _ : state) {
+ const std::uint64_t current_rng_seed = rng_seed++;
+ context->num_kv_tokens = num_prompt_tokens;
+ context->num_tokens = num_prompt_tokens;
+
+ std::array tokens;
+ std::size_t num_generated_tokens = 0;
+ do {
+ std::size_t num_current_generated_tokens = 0;
+ status = gptoss_context_sample(context.get(), /*temperature=*/1.0f, /*rng_state=*/current_rng_seed,
+ /*max_tokens=*/kNumGeneratedTokens - num_generated_tokens, tokens.data(), &num_current_generated_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to sample from the Context object");
+ return;
+ }
+ num_generated_tokens += num_current_generated_tokens;
+ } while (num_generated_tokens < kNumGeneratedTokens);
+ }
+
+ state.counters["generations"] =
+ benchmark::Counter(state.iterations(), benchmark::Counter::kIsRate);
+ state.counters["tokens"] =
+ benchmark::Counter(state.iterations() * kNumGeneratedTokens, benchmark::Counter::kIsRate);
+}
+
+static void end2end_prefill(benchmark::State& state,
+ const char* model_path_env_var_name,
+ const char* prompt_env_var_name,
+ size_t context_length = 0) {
+ const char* model_path = getenv(model_path_env_var_name);
+ if (model_path == NULL) {
+ state.SkipWithError(std::format("environment variable {} is not set",
+ model_path_env_var_name));
+ return;
+ }
+
+ const char* prompt_file_path = getenv(prompt_env_var_name);
+ if (prompt_file_path == NULL) {
+ state.SkipWithError(std::format("environment variable {} is not set",
+ prompt_env_var_name));
+ return;
+ }
+
+ // Read prompt contents from file into a std::string
+ std::ifstream prompt_file(prompt_file_path,
+ std::ios::in | std::ios::binary);
+ if (!prompt_file) {
+ state.SkipWithError(
+ std::format("failed to open prompt file {}", prompt_file_path));
+ return;
+ }
+ std::string prompt_str;
+ prompt_file.seekg(0, std::ios::end);
+ std::streampos file_size = prompt_file.tellg();
+ if (file_size < 0) {
+ state.SkipWithError(std::format("failed to read prompt file size {}",
+ prompt_file_path));
+ return;
+ }
+ prompt_str.resize(static_cast(file_size));
+ prompt_file.seekg(0, std::ios::beg);
+ if (file_size > 0) {
+ prompt_file.read(prompt_str.data(), file_size);
+ }
+ if (!prompt_file) {
+ state.SkipWithError(
+ std::format("failed to read prompt file {}", prompt_file_path));
+ return;
+ }
+
+ gptoss_model_t model_ptr = nullptr;
+ gptoss_status status = gptoss_model_create_from_file(model_path, &model_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(
+ std::format("failed to load model from file {}", model_path));
+ return;
+ }
+ std::unique_ptr,
+ decltype(&gptoss_model_release)>
+ model(model_ptr, gptoss_model_release);
+
+ gptoss_tokenizer_t tokenizer_ptr = nullptr;
+ status = gptoss_model_get_tokenizer(model.get(), &tokenizer_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to retrieve Tokenizer");
+ return;
+ }
+ std::unique_ptr,
+ decltype(&gptoss_tokenizer_release)>
+ tokenizer(tokenizer_ptr, gptoss_tokenizer_release);
+
+ gptoss_context_t context_ptr = nullptr;
+ status = gptoss_context_create(model.get(),
+ /*context_lenght=*/0,
+ /*max_batch_tokens=*/1024,
+ &context_ptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to create Context object");
+ return;
+ }
+ std::unique_ptr,
+ decltype(&gptoss_context_release)>
+ context(context_ptr, gptoss_context_release);
+
+ const char* prompt = prompt_str.c_str();
+ status = gptoss_context_append_chars(context.get(), prompt,
+ prompt_str.size(), nullptr);
+ if (status != gptoss_status_success) {
+ state.SkipWithError(std::format(
+ "failed to tokenize prompt from file {}", prompt_file_path));
+ return;
+ }
+
+ size_t num_tokens;
+ status = gptoss_context_get_num_tokens(context.get(), &num_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to get number of tokens");
+ return;
+ }
+ if (context_length != 0) {
+ assert(context_length <= num_tokens);
+ context->num_tokens = context_length;
+ }
+ status = gptoss_context_get_num_tokens(context.get(), &num_tokens);
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to get number of tokens");
+ return;
+ }
+ // Prefill
+ for (auto _ : state) {
+ status = gptoss_context_process(context.get());
+ if (status != gptoss_status_success) {
+ state.SkipWithError("failed to prefill Context object");
+ return;
+ }
+ context->num_kv_tokens = 0;
+ }
+
+ state.counters["tokens"] = num_tokens;
+ state.counters["tokens/s"] = benchmark::Counter(
+ state.iterations() * num_tokens, benchmark::Counter::kIsRate);
+}
+
+// Decode end-to-end benchmark
+BENCHMARK_CAPTURE(end2end_decode, gpt_oss_20b_decode, "GPT_OSS_20B_PATH")
+ ->UseRealTime()
+ ->Unit(benchmark::kMillisecond);
+BENCHMARK_CAPTURE(end2end_decode, gpt_oss_120b_decode, "GPT_OSS_120B_PATH")
+ ->UseRealTime()
+ ->Unit(benchmark::kMillisecond);
+
+// Prefill end-to-end benchmark
+BENCHMARK_CAPTURE(end2end_prefill, gpt_oss_120b_prefill_1024,
+ "GPT_OSS_120B_PATH", "GPT_OSS_PROMPT_FILE_PATH", 1024)
+ ->UseRealTime()
+ ->Unit(benchmark::kMillisecond);
+BENCHMARK_CAPTURE(end2end_prefill, gpt_oss_20b_prefill_1024, "GPT_OSS_20B_PATH",
+ "GPT_OSS_PROMPT_FILE_PATH", 1024)
+ ->UseRealTime()
+ ->Unit(benchmark::kMillisecond);
+
+BENCHMARK_CAPTURE(end2end_prefill, gpt_oss_120b_prefill_3072,
+ "GPT_OSS_120B_PATH", "GPT_OSS_PROMPT_FILE_PATH", 3072)
+ ->UseRealTime()
+ ->Unit(benchmark::kMillisecond);
+BENCHMARK_CAPTURE(end2end_prefill, gpt_oss_20b_prefill_3072, "GPT_OSS_20B_PATH",
+ "GPT_OSS_PROMPT_FILE_PATH", 3072)
+ ->UseRealTime()
+ ->Unit(benchmark::kMillisecond);
+
+BENCHMARK_MAIN();
diff --git a/gpt_oss/metal/benchmark/f32-bf16w-rmsnorm.cc b/gpt_oss/metal/benchmark/f32-bf16w-rmsnorm.cc
new file mode 100644
index 0000000000000000000000000000000000000000..ee7551c252a0200e79d34d11c165b8718b3b2221
--- /dev/null
+++ b/gpt_oss/metal/benchmark/f32-bf16w-rmsnorm.cc
@@ -0,0 +1,99 @@
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+
+using gptoss::Check;
+using namespace gptoss::metal;
+
+constexpr float kEpsilon = 1.0e-5f;
+constexpr uint64_t kSeed = UINT64_C(1019827666124465388);
+
+static void f32_bf16w_rnsnorm(benchmark::State& state) {
+ const size_t num_tokens = 1;
+ const size_t num_channels = state.range(0);
+
+ Device device;
+ CommandQueue command_queue{device};
+ Library library{device};
+ Function f32_fill_random_fn{library, "gptoss_f32_fill_random"};
+ Function bf16_fill_random_fn{library, "gptoss_bf16_fill_random"};
+ Function f32_bf16w_rmsnorm_fn{library, "gptoss_f32_bf16w_rmsnorm"};
+ Buffer input_buffer{device, num_tokens * num_channels * sizeof(float)};
+ Buffer weight_buffer{device, num_channels * sizeof(gptoss_bfloat16)};
+ Buffer output_buffer{device, num_tokens * num_channels * sizeof(float)};
+ Buffer control_buffer{device, sizeof(gptoss_control)};
+ std::memset(control_buffer.ptr(), 0, sizeof(gptoss_control));
+
+ {
+ CommandBuffer command_buffer{command_queue};
+
+ size_t offset = 0;
+ Check(gptoss_metal_command_buffer_encode_launch_f32_fill_random(
+ command_buffer.handle(),
+ f32_fill_random_fn.handle(),
+ /*threadgroup_size=*/0,
+ /*max_threadgroups=*/10,
+ /*output_buffer=*/input_buffer.handle(),
+ /*output_offset=*/0,
+ num_channels, kSeed, offset, /*min=*/-1.0f, /*max=*/1.0),
+ "gptoss_metal_command_buffer_encode_launch_f32_fill_random");
+ offset += num_channels;
+
+ Check(gptoss_metal_command_buffer_encode_launch_bf16_fill_random(
+ command_buffer.handle(),
+ bf16_fill_random_fn.handle(),
+ /*threadgroup_size=*/0,
+ /*max_threadgroups=*/10,
+ /*output_buffer=*/weight_buffer.handle(),
+ /*output_offset=*/0,
+ num_channels, kSeed, offset, /*min=*/-1.0f, /*max=*/1.0),
+ "gptoss_metal_command_buffer_encode_launch_bf16_fill_random");
+ offset += num_channels;
+
+ command_buffer.commit();
+ command_buffer.wait_completion();
+ }
+
+ for (auto _ : state) {
+ CommandBuffer command_buffer{command_queue};
+
+ Check(gptoss_metal_command_buffer_encode_launch_f32_bf16w_rmsnorm(
+ command_buffer.handle(),
+ f32_bf16w_rmsnorm_fn.handle(),
+ input_buffer.handle(),
+ /*input_offset=*/0,
+ weight_buffer.handle(),
+ /*weight_offset=*/0,
+ output_buffer.handle(),
+ /*output_offset=*/0,
+ control_buffer.handle(),
+ /*control_offset=*/0,
+ num_tokens,
+ num_channels,
+ kEpsilon),
+ "gptoss_metal_command_buffer_encode_launch_f32_bf16w_rmsnorm");
+
+ command_buffer.commit();
+ const double elapsed_seconds = command_buffer.wait_completion();
+ state.SetIterationTime(elapsed_seconds);
+ }
+
+ const size_t num_elements = num_tokens * num_channels;
+ state.counters["elements"] =
+ benchmark::Counter(state.iterations() * num_elements,
+ benchmark::Counter::kIsRate);
+
+ const int64_t bytes_per_iteration = input_buffer.size() + weight_buffer.size() + output_buffer.size();
+ state.counters["bytes"] =
+ benchmark::Counter(state.iterations() * bytes_per_iteration,
+ benchmark::Counter::kIsRate);
+}
+
+BENCHMARK(f32_bf16w_rnsnorm)->Arg(2880)->UseManualTime()->Unit(benchmark::kMicrosecond);
+
+BENCHMARK_MAIN();
diff --git a/gpt_oss/metal/benchmark/f32-random.cc b/gpt_oss/metal/benchmark/f32-random.cc
new file mode 100644
index 0000000000000000000000000000000000000000..62455e3460f0ec32b51bf5c8d4771d3de6cff73f
--- /dev/null
+++ b/gpt_oss/metal/benchmark/f32-random.cc
@@ -0,0 +1,55 @@
+#include
+#include
+#include
+
+#include
+
+using gptoss::Check;
+using namespace gptoss::metal;
+
+static void f32_fill_random(benchmark::State& state) {
+ const size_t numel = state.range(0);
+
+ Device device;
+ CommandQueue command_queue{device};
+ Library library{device};
+ Function f32_fill_random_fn{library, "gptoss_f32_fill_random"};
+ Buffer buffer{device, numel * sizeof(float)};
+
+ constexpr uint64_t seed = UINT64_C(1019827666124465388);
+ constexpr uint64_t offset = UINT64_C(12345678901234567890);
+ const float min = -1.0f;
+ const float max = 7.0f;
+ for (auto _ : state) {
+ CommandBuffer command_buffer{command_queue};
+
+ Check(gptoss_metal_command_buffer_encode_launch_f32_fill_random(
+ command_buffer.handle(),
+ f32_fill_random_fn.handle(),
+ /*threadgroup_size=*/0,
+ /*max_threadgroups=*/120,
+ /*output_buffer=*/buffer.handle(),
+ /*output_offset=*/0,
+ numel, seed, offset, min, max),
+ "gptoss_metal_command_buffer_encode_launch_f32_fill_random");
+
+ command_buffer.commit();
+ const double elapsed_seconds = command_buffer.wait_completion();
+ state.SetIterationTime(elapsed_seconds);
+ }
+
+ const int64_t elements_per_iteration = numel;
+ state.counters["elements"] =
+ benchmark::Counter(state.iterations() * elements_per_iteration,
+ benchmark::Counter::kIsRate);
+
+ const int64_t bytes_per_iteration = numel * sizeof(float);
+ state.counters["bytes"] =
+ benchmark::Counter(state.iterations() * bytes_per_iteration,
+ benchmark::Counter::kIsRate);
+}
+
+constexpr int64_t giga = INT64_C(1073741824);
+BENCHMARK(f32_fill_random)->Arg(2 * giga)->UseManualTime()->Unit(benchmark::kMicrosecond);
+
+BENCHMARK_MAIN();
diff --git a/gpt_oss/metal/benchmark/mf4-f32-convert.cc b/gpt_oss/metal/benchmark/mf4-f32-convert.cc
new file mode 100644
index 0000000000000000000000000000000000000000..dff5871814abbb895773b570edb899ab84a611f8
--- /dev/null
+++ b/gpt_oss/metal/benchmark/mf4-f32-convert.cc
@@ -0,0 +1,65 @@
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+
+using gptoss::Check;
+using namespace gptoss::metal;
+
+static void mf4_f32_convert(benchmark::State& state) {
+ const size_t num_blocks = state.range(0);
+ const size_t num_elements = num_blocks * 32;
+ const size_t num_bytes = num_elements / 2;
+
+ Device device;
+ CommandQueue command_queue{device};
+ Library library{device};
+ Function mf4_f32_convert_fn{library, "gptoss_mf4_f32_convert"};
+ Buffer block_buffer{device, num_bytes};
+ Buffer scale_buffer{device, num_blocks * sizeof(gptoss_float8ue8m0)};
+ Buffer output_buffer{device, num_elements * sizeof(float)};
+
+ std::memset(block_buffer.ptr(), 0x91, num_bytes); // force subnormals
+ std::memset(scale_buffer.ptr(), 128, num_blocks * sizeof(uint8_t)); // scale = 2.0
+
+ for (auto _ : state) {
+ CommandBuffer command_buffer{command_queue};
+
+ Check(gptoss_metal_command_buffer_encode_launch_mf4_f32_convert(
+ command_buffer.handle(),
+ mf4_f32_convert_fn.handle(),
+ /*threadgroup_size=*/0,
+ /*max_threadgroups=*/120,
+ block_buffer.handle(),
+ scale_buffer.handle(),
+ output_buffer.handle(),
+ num_elements),
+ "gptoss_metal_command_buffer_encode_launch_mf4_f32_convert");
+
+ command_buffer.commit();
+ const double elapsed_seconds = command_buffer.wait_completion();
+ state.SetIterationTime(elapsed_seconds);
+ }
+
+ state.counters["blocks"] =
+ benchmark::Counter(state.iterations() * num_blocks,
+ benchmark::Counter::kIsRate);
+
+ state.counters["elements"] =
+ benchmark::Counter(state.iterations() * num_elements,
+ benchmark::Counter::kIsRate);
+
+ const int64_t bytes_per_iteration = num_bytes + num_blocks + num_elements * sizeof(float);
+ state.counters["bytes"] =
+ benchmark::Counter(state.iterations() * bytes_per_iteration,
+ benchmark::Counter::kIsRate);
+}
+
+constexpr int64_t mega = INT64_C(1048576);
+BENCHMARK(mf4_f32_convert)->Arg(256 * mega)->UseManualTime()->Unit(benchmark::kMicrosecond);
+
+BENCHMARK_MAIN();
diff --git a/gpt_oss/metal/benchmark/u32-random.cc b/gpt_oss/metal/benchmark/u32-random.cc
new file mode 100644
index 0000000000000000000000000000000000000000..1d73e2210d44300330e6dac16c1ab706b9181384
--- /dev/null
+++ b/gpt_oss/metal/benchmark/u32-random.cc
@@ -0,0 +1,53 @@
+#include
+#include
+#include
+
+#include
+
+using gptoss::Check;
+using namespace gptoss::metal;
+
+static void u32_fill_random(benchmark::State& state) {
+ const size_t numel = state.range(0);
+
+ Device device;
+ CommandQueue command_queue{device};
+ Library library{device};
+ Function u32_fill_random_fn{library, "gptoss_u32_fill_random"};
+ Buffer buffer{device, numel * sizeof(float)};
+
+ constexpr uint64_t seed = UINT64_C(1019827666124465388);
+ constexpr uint64_t offset = UINT64_C(12345678901234567890);
+ for (auto _ : state) {
+ CommandBuffer command_buffer{command_queue};
+
+ Check(gptoss_metal_command_buffer_encode_launch_u32_fill_random(
+ command_buffer.handle(),
+ u32_fill_random_fn.handle(),
+ /*threadgroup_size=*/0,
+ /*max_threadgroups=*/120,
+ /*output_buffer=*/buffer.handle(),
+ /*output_offset=*/0,
+ numel, seed, offset),
+ "gptoss_metal_command_buffer_encode_launch_u32_fill_random");
+
+ command_buffer.commit();
+ const double elapsed_seconds = command_buffer.wait_completion();
+ state.SetIterationTime(elapsed_seconds);
+ }
+
+ const int64_t elements_per_iteration = numel;
+ state.counters["elements"] =
+ benchmark::Counter(state.iterations() * elements_per_iteration,
+ benchmark::Counter::kIsRate);
+
+ const int64_t bytes_per_iteration = numel * sizeof(float);
+ state.counters["bytes"] =
+ benchmark::Counter(state.iterations() * bytes_per_iteration,
+ benchmark::Counter::kIsRate);
+}
+
+constexpr int64_t giga = INT64_C(1073741824);
+BENCHMARK(u32_fill_random)->Arg(2 * giga)->UseManualTime()->Unit(benchmark::kMicrosecond);
+
+BENCHMARK_MAIN();
diff --git a/gpt_oss/metal/examples/chat.py b/gpt_oss/metal/examples/chat.py
new file mode 100644
index 0000000000000000000000000000000000000000..f29cec3b5989b533a6e8a1d884e6ab217026a568
--- /dev/null
+++ b/gpt_oss/metal/examples/chat.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python
+
+import argparse
+import sys
+
+from datetime import date
+from gpt_oss.metal import Context, Model
+
+
+DEFAULT_PROMPT = f"""You are ChatGPT, a large language model trained by OpenAI.
+Knowledge cutoff: 2024-06
+Current date: {date.today().isoformat()}
+
+reasoning effort high
+
+# Valid channels: analysis, final. Channel must be included for every message."""
+
+
+parser = argparse.ArgumentParser(description="Chat with gpt-oss", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+parser.add_argument("model", metavar="PATH", type=str, help="Path to gpt-oss model in Metal inference format")
+parser.add_argument("--prompt", type=str, default=DEFAULT_PROMPT, help="System prompt")
+parser.add_argument(
+ "--context-length", type=int, default=0, help="The maximum context length"
+)
+parser.add_argument(
+ "--temperature", type=float, default=1.0, help="Sampling temperature"
+)
+parser.add_argument(
+ "--seed", type=int, default=0, help="Sampling seed"
+)
+
+
+GREY = "\33[90m"
+BOLD = "\33[1m"
+RESET = "\33[0m"
+
+
+def main(args):
+ options = parser.parse_args(args)
+ model = Model(options.model)
+ tokenizer = model.tokenizer
+ start_token = tokenizer.encode_special_token("<|start|>")
+ message_token = tokenizer.encode_special_token("<|message|>")
+ end_token = tokenizer.encode_special_token("<|end|>")
+ return_token = tokenizer.encode_special_token("<|return|>")
+ channel_token = tokenizer.encode_special_token("<|channel|>")
+
+ context = Context(model, context_length=options.context_length)
+ context.append(start_token)
+ context.append("system")
+ context.append(message_token)
+ context.append(options.prompt)
+ context.append(end_token)
+
+ while True:
+ context.append(start_token)
+ context.append("user")
+ context.append(message_token)
+ message = input(f"{BOLD}User:{RESET} ").rstrip()
+ context.append(message)
+ context.append(end_token)
+ print(f"{BOLD}Assistant:{RESET} {GREY}", end="", flush=True)
+ context.append(start_token)
+ context.append("assistant")
+ context.append(channel_token)
+
+ inside_start_block = True
+ inside_channel_block = True
+ role = "assistant"
+ channel = ""
+ while True:
+ token = context.sample(
+ temperature=options.temperature,
+ seed=options.seed,
+ )
+ context.append(token)
+ if token == return_token:
+ print(flush=True)
+ break
+ elif token == start_token:
+ inside_start_block = True
+ role = ""
+ channel = ""
+ elif token == message_token:
+ inside_start_block = False
+ inside_channel_block = False
+ if channel == "analysis":
+ print(f"{GREY}", end="", flush=True)
+ elif token == end_token:
+ print(f"{RESET}", flush=True)
+ elif token == channel_token:
+ inside_channel_block = True
+ elif token < tokenizer.num_text_tokens:
+ if inside_channel_block:
+ channel += str(tokenizer.decode(token), encoding="utf-8")
+ elif inside_start_block:
+ role += str(tokenizer.decode(token), encoding="utf-8")
+ else:
+ sys.stdout.buffer.write(tokenizer.decode(token))
+ sys.stdout.buffer.flush()
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/gpt_oss/metal/examples/generate.py b/gpt_oss/metal/examples/generate.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b78199994e66d8322a8cec980bed18b8098308c
--- /dev/null
+++ b/gpt_oss/metal/examples/generate.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+import argparse
+import sys
+
+from gpt_oss.metal import Context, Model
+
+
+parser = argparse.ArgumentParser(description='Chat with gpt-oss', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+parser.add_argument('model', metavar='PATH', type=str, help='Path to gpt-oss checkpoint')
+parser.add_argument('-p', '--prompt', type=str, required=True, help='Prompt')
+parser.add_argument('-l', '--limit', type=int, default=100, help='Number of tokens to generate')
+parser.add_argument('--context-length', type=int, default=0, help='The maximum context length')
+
+
+def main(args):
+ options = parser.parse_args(args)
+ model = Model(options.model)
+
+ context = Context(model, context_length=options.context_length)
+ context.append(options.prompt)
+ print(context.tokens)
+ prompt_tokens = context.num_tokens
+
+ tokenizer = model.tokenizer
+
+ while context.num_tokens - prompt_tokens < options.limit:
+ token = context.sample()
+ context.append(token)
+ print(str(tokenizer.decode(token), encoding="utf-8"), end='', flush=True)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/gpt_oss/metal/include/gpt-oss.h b/gpt_oss/metal/include/gpt-oss.h
new file mode 100644
index 0000000000000000000000000000000000000000..f24a7b1b34336b09bd64695f111321b519986cb5
--- /dev/null
+++ b/gpt_oss/metal/include/gpt-oss.h
@@ -0,0 +1,5 @@
+#pragma once
+
+#include
+#include
+#include
diff --git a/gpt_oss/metal/include/gpt-oss/functions.h b/gpt_oss/metal/include/gpt-oss/functions.h
new file mode 100644
index 0000000000000000000000000000000000000000..758756c7a69dbc0bc22ad6c6f4a80d62be08fffb
--- /dev/null
+++ b/gpt_oss/metal/include/gpt-oss/functions.h
@@ -0,0 +1,401 @@
+#pragma once
+
+#include
+#include
+
+#include
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Creates a Model object from a file in the filesystem.
+ *
+ * @param path Path to the file containing the model in GPT-OSS format.
+ * @param model_out Pointer to the Model object that will be created. Must be released with gptoss_release_model.
+ *
+ * On success, returns gptoss_status_success and saves a pointer to the created Model in the model_out argument.
+ * On failure, returns an error code and stores null pointer in the model_out argument.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_model_create_from_file(
+ const char* path,
+ gptoss_model_t* model_out);
+
+/*
+ * Query the Tokenizer object associated with the Model.
+ *
+ * @param model Pointer to the Model object created by gptoss_model_create_from_file.
+ * @param tokenizer_out Pointer to the variable where the Tokenizer reference will be stored.
+ *
+ * On success, returns gptoss_status_success and stores reference to the Tokenizer object in the tokenizer_out argument.
+ * On failure, returns an error code and stores NULL in the tokenizer_out argument.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_model_get_tokenizer(
+ gptoss_model_t model,
+ gptoss_tokenizer_t* tokenizer_out);
+
+/*
+ * Query the maximum context length supported by the Model.
+ *
+ * @param model Pointer to the Model object created by gptoss_model_create_from_file.
+ * @param max_context_length_out Pointer to the variable where the maximum context length will be stored.
+ *
+ * On success, returns gptoss_status_success and stores maximum context length in the max_context_length_out argument.
+ * On failure, returns an error code and leaves the value specified by max_context_length_out unchanged.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_model_get_max_context_length(
+ gptoss_model_t model,
+ size_t* max_context_length_out);
+
+/*
+ * Increments a Model object's reference count.
+ *
+ * @param model Pointer to the Model object created by gptoss_model_create_from_file.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_model_retain(
+ gptoss_model_t model);
+
+/*
+ * Decrements a Model object's reference count and possibly release associated resources.
+ *
+ * @param model Pointer to the Model object created by gptoss_model_create_from_file.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_model_release(
+ gptoss_model_t model);
+
+/*
+ * Query the token ID for a special token in the Tokenizer vocabulary.
+ *
+ * @param tokenizer Pointer to the Tokenizer object created by gptoss_model_get_tokenizer.
+ * @param token_type Type of the special token to query an ID for.
+ * @param token_id_out Pointer to the variable where the token ID will be stored.
+ *
+ * On success, returns gptoss_status_success and stores the token ID in the token_id_out argument.
+ * On failure, returns an error code and leaves the value specified by token_id_out unchanged.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_tokenizer_get_special_token_id(
+ gptoss_tokenizer_t tokenizer,
+ enum gptoss_special_token token_type,
+ uint32_t* token_id_out);
+
+/*
+ * Query the number of text tokens in the Tokenizer vocabulary.
+ *
+ * @param tokenizer Pointer to the Tokenizer object created by gptoss_model_get_tokenizer.
+ * @param num_text_tokens_out Pointer to the variable where the number of text tokens will be stored.
+ *
+ * On success, returns gptoss_status_success and stores the number of text tokens in the num_text_tokens_out argument.
+ * On failure, returns an error code and leaves the value specified by num_text_tokens_out unchanged.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_tokenizer_get_num_text_tokens(
+ gptoss_tokenizer_t tokenizer,
+ uint32_t* num_text_tokens_out);
+
+/*
+ * Query the number of special tokens in the Tokenizer vocabulary.
+ *
+ * @param tokenizer Pointer to the Tokenizer object created by gptoss_model_get_tokenizer.
+ * @param num_special_tokens_out Pointer to the variable where the number of special tokens will be stored.
+ *
+ * On success, returns gptoss_status_success and stores the number of text tokens in the num_special_tokens_out argument.
+ * On failure, returns an error code and leaves the value specified by num_special_tokens_out unchanged.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_tokenizer_get_num_special_tokens(
+ gptoss_tokenizer_t tokenizer,
+ uint32_t* num_special_tokens_out);
+
+/*
+ * Query the total number of tokens in the Tokenizer vocabulary.
+ *
+ * @param tokenizer Pointer to the Tokenizer object created by gptoss_model_get_tokenizer.
+ * @param num_tokens_out Pointer to the variable where the total number of tokens will be stored.
+ *
+ * On success, returns gptoss_status_success and stores the total number of tokens in the num_special_tokens_out argument.
+ * On failure, returns an error code and leaves the value specified by num_special_tokens_out unchanged.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_tokenizer_get_num_tokens(
+ gptoss_tokenizer_t tokenizer,
+ uint32_t* num_tokens_out);
+
+/*
+ * Convert a text token ID to byte representation.
+ *
+ * @param tokenizer Pointer to the Tokenizer object returned by gptoss_model_get_tokenizer. The lifetime of the returned
+ * byte representation would match the lifetime of this Tokenizer object.
+ * @param token_ptr_out Pointer to the variable where the pointer to the byte representation of the token will be
+ * stored.
+ * @param token_size_out Pointer to the variable where the size of the byte representation of the token will be stored.
+ *
+ * On success, returns gptoss_status_success and stores pointer and size of the byte representation of the token in the
+ * token_ptr_out and token_size_out arguments.
+ * On failure, returns an error code and leaves the values specified in token_ptr_out and token_size_out unchanged.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_tokenizer_decode(
+ gptoss_tokenizer_t tokenizer,
+ uint32_t token_id,
+ const void** token_ptr_out,
+ size_t* token_size_out);
+
+/*
+ * Increments a Tokenizer object's reference count.
+ *
+ * @param tokenizer Pointer to the Tokenizer object returned by gptoss_model_get_tokenizer.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_tokenizer_retain(
+ gptoss_tokenizer_t tokenizer);
+
+/*
+ * Decrements a Tokenizer object's reference count and possibly release associated resources.
+ *
+ * @param tokenizer Pointer to the Tokenizer object returned by gptoss_model_get_tokenizer.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_tokenizer_release(
+ gptoss_tokenizer_t tokenizer);
+
+/*
+ * Creates a Context object for use with the particular Model object.
+ *
+ * @param model Model object to create a context for.
+ * @param context_length Maximum number of tokens in the context.
+ * Specify 0 to use the maximum context length supported by the model.
+ * @param max_batch_size Maximum number of tokens that can be processed in a single batch.
+ * Larger values may improve prefill performance, but require more memory.
+ * Specify 0 to use the default value.
+ * @param context_out Pointer to the Context object that will be created.
+ * Must be released with gptoss_release_context.
+ *
+ * On success, returns gptoss_status_success and saves a pointer to the created Context in the context_out argument.
+ * On failure, returns an error code and stores null pointer in the context_out argument.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_context_create(
+ gptoss_model_t model,
+ size_t context_length,
+ size_t max_batch_tokens,
+ gptoss_context_t* context_out);
+
+/*
+ * Query the current number of tokens cached in the Context.
+ *
+ * @param context Pointer to the Context object created by gptoss_context_create.
+ * @param num_tokens_out Pointer to the variable where the current number of cached tokens will be stored.
+ *
+ * On success, returns gptoss_status_success and stores current number of cached tokens in the num_tokens_out argument.
+ * On failure, returns an error code and leaves the value specified by num_tokens_out unchanged.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_context_get_num_tokens(
+ gptoss_context_t context,
+ size_t* num_tokens_out);
+
+/*
+ * Query the maximum number of tokens cached in the Context.
+ *
+ * @param context Pointer to the Context object created by gptoss_context_create.
+ * @param max_tokens_out Pointer to the variable where the maximum number of cached tokens will be stored.
+ *
+ * On success, returns gptoss_status_success and stores maximum number of cached tokens in the max_tokens_out argument.
+ * On failure, returns an error code and leaves the value specified by max_tokens_out unchanged.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_context_get_max_tokens(
+ gptoss_context_t context,
+ size_t* max_tokens_out);
+
+/*
+ * Query the list of token IDs cached in the Context.
+ *
+ * @param context Pointer to the Context object created by gptoss_context_create.
+ * @param tokens_out Pointer to the array where up to max_tokens_out of cached tokens will be stored.
+ * @param max_tokens Maximum capacity of the buffer specified by tokens_out.
+ * @param num_tokens_out Pointer to the variable where the actual number of cached tokens will be stored.
+ * This value can exceed max_tokens if the buffer capacity is insufficient.
+ *
+ * On success, returns gptoss_status_success and stores cached token IDs in the tokens_out argument and the number of
+ * cached tokens in the num_tokens_out argument.
+ * On failure, returns an error code and leaves the values specified by tokens_out and num_tokens_out unchanged.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_context_get_tokens(
+ gptoss_context_t context,
+ uint32_t* tokens_out,
+ size_t max_tokens,
+ size_t* num_tokens_out);
+
+/*
+ * Tokenize and appends a character string to the Context object.
+ *
+ * @param context Context object created by gptoss_context_create.
+ * @param text Pointer to the character string to tokenizer and append.
+ * @param text_length Length of the string, in chars.
+ * @param num_tokens_out Optional pointer to the variable where the number of appended tokens will be stored. Ignored if a null pointer is provided.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_context_append_chars(
+ gptoss_context_t context,
+ const char* text,
+ size_t text_length,
+ size_t* num_tokens_out);
+
+/*
+ * Appends a list of tokens to the context.
+ *
+ * @param context Context object created by gptoss_context_create.
+ * @param num_tokens Number of tokens to be appended.
+ * @param tokens Pointer to the array of tokens to be appended.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_context_append_tokens(
+ gptoss_context_t context,
+ size_t num_tokens,
+ const uint32_t* tokens);
+
+/*
+ * Resets the context, clearing its state.
+ *
+ * @param context Context object created by gptoss_context_create.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_context_reset(
+ gptoss_context_t context);
+
+/*
+ * Pre-process the tokens in the Context and generate probability distribution over the next token.
+ *
+ * @param context Context object created by gptoss_context_create.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_context_process(
+ gptoss_context_t context);
+
+/*
+ * Generate a token probability distribution over the next token conditioned on the Context.
+ *
+ * @param context Context object created by gptoss_context_create.
+ * @param temperature Sampling temperature. Must be non-negative.
+ * @param seed Random number generator seed to use for sampling.
+ * @param token_out Pointer to the variable where the token ID will be stored.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_context_sample(
+ gptoss_context_t context,
+ float temperature,
+ uint64_t seed,
+ size_t max_tokens,
+ uint32_t* tokens_out,
+ size_t* num_tokens_out);
+
+/*
+ * Increments a Context object's reference count.
+ *
+ * @param context Pointer to the Context object created by gptoss_create_context.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_context_retain(
+ gptoss_context_t context);
+
+/*
+ * Decrements a Context object's reference count and possibly release associated resources.
+ *
+ * @param context Pointer to the Context object created by gptoss_create_context.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_context_release(
+ gptoss_context_t context);
+
+/*
+ * Creates a Sampler object.
+ *
+ * @param sampler_out Pointer to the Sampler object that will be created.
+ * Must be released with gptoss_sampler_release.
+ *
+ * On success, returns gptoss_status_success and saves a pointer to the created Sampler in the sampler_out argument.
+ * On failure, returns an error code and stores a null pointer in the sampler_out argument.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_sampler_create(
+ gptoss_sampler_t* sampler_out);
+
+/*
+ * Sets the sampling temperature for the Sampler.
+ *
+ * @param sampler Sampler object created by gptoss_sampler_create.
+ * @param temperature Temperature value to be set. Must be in the [0.0, 1.0] range.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_sampler_set_temperature(
+ gptoss_sampler_t sampler,
+ float temperature);
+
+/*
+ * Sets the Top-P nucleus sampling parameter for the Sampler.
+ *
+ * @param sampler Sampler object created by gptoss_sampler_create.
+ * @param top_p Top-P value to be set. Must be in the (0.0, 1.0] range.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_sampler_set_top_p(
+ gptoss_sampler_t sampler,
+ float top_p);
+
+/*
+ * Sets the presence penalty for the Sampler.
+ *
+ * @param sampler Sampler object created by gptoss_sampler_create.
+ * @param presence_penalty Presence penalty value to be set. Must be in the [-2.0, 2.0] range.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_sampler_set_presence_penalty(
+ gptoss_sampler_t sampler,
+ float presence_penalty);
+
+/*
+ * Sets the frequency penalty for the Sampler.
+ *
+ * @param sampler Sampler object created by gptoss_sampler_create.
+ * @param frequency_penalty Frequency penalty value to be set. Must be in the [-2.0, 2.0] range.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_sampler_set_frequency_penalty(
+ gptoss_sampler_t sampler,
+ float frequency_penalty);
+
+/*
+ * Increments a Sampler object's reference count.
+ *
+ * @param sampler Pointer to the Sampler object created by gptoss_sampler_create.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_sampler_retain(
+ gptoss_sampler_t sampler);
+
+/*
+ * Decrements a Sampler object's reference count and possibly releases associated resources.
+ *
+ * @param sampler Pointer to the Sampler object created by gptoss_sampler_create.
+ *
+ * On success, returns gptoss_status_success, otherwise returns an error code.
+ */
+enum gptoss_status GPTOSS_ABI gptoss_sampler_release(
+ gptoss_sampler_t sampler);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
diff --git a/gpt_oss/metal/include/gpt-oss/macros.h b/gpt_oss/metal/include/gpt-oss/macros.h
new file mode 100644
index 0000000000000000000000000000000000000000..e1c7f34e10202b9537225c87ddbc48ea7ed37d21
--- /dev/null
+++ b/gpt_oss/metal/include/gpt-oss/macros.h
@@ -0,0 +1,5 @@
+#pragma once
+
+#ifndef GPTOSS_ABI
+ #define GPTOSS_ABI
+#endif // GPTOSS_ABI
diff --git a/gpt_oss/metal/include/gpt-oss/types.h b/gpt_oss/metal/include/gpt-oss/types.h
new file mode 100644
index 0000000000000000000000000000000000000000..f60aed5b2170a2217b671783a5d5cf79ee247612
--- /dev/null
+++ b/gpt_oss/metal/include/gpt-oss/types.h
@@ -0,0 +1,62 @@
+#pragma once
+
+/*
+ * Status codes returned by GPT-OSS API functions.
+ */
+enum gptoss_status {
+ gptoss_status_success = 0,
+ gptoss_status_invalid_argument = 1,
+ gptoss_status_unsupported_argument = 2,
+ gptoss_status_invalid_state = 3,
+ gptoss_status_io_error = 4,
+ gptoss_status_insufficient_memory = 5,
+ gptoss_status_insufficient_resources = 6,
+ gptoss_status_unsupported_system = 7,
+ gptoss_status_context_overflow = 8,
+};
+
+enum gptoss_special_token {
+ gptoss_special_token_invalid = 0,
+ gptoss_special_token_return = 1,
+ gptoss_special_token_start = 2,
+ gptoss_special_token_message = 3,
+ gptoss_special_token_end = 4,
+ gptoss_special_token_refusal = 5,
+ gptoss_special_token_constrain = 6,
+ gptoss_special_token_channel = 7,
+ gptoss_special_token_call = 8,
+ gptoss_special_token_untrusted = 9,
+ gptoss_special_token_end_untrusted = 10,
+ gptoss_special_token_max,
+};
+
+/*
+ * Model object is an opaque container comprised of:
+ * - Weights
+ * - Temporary buffers required to run the model
+ * - Any other resources requires to run the model
+ */
+typedef struct gptoss_model* gptoss_model_t;
+
+typedef struct gptoss_tokenizer* gptoss_tokenizer_t;
+
+/*
+ * Context is an opaque container comprised of:
+ * - Input tokens
+ * - Distribution over the output tokens
+ * - KV cache
+ *
+ * Multiple contexts can be created and used with the same model.
+ */
+typedef struct gptoss_context* gptoss_context_t;
+
+/*
+ * Sampler is an opaque container for sampling parameters:
+ * - Temperature
+ * - Top-p (nucleus sampling)
+ * - Frequency penalty
+ * - Presence penalty
+ *
+ * Multiple samplers can be created and used with the same context.
+ */
+typedef struct gptoss_sampler* gptoss_sampler_t;
diff --git a/gpt_oss/metal/python/context.c b/gpt_oss/metal/python/context.c
new file mode 100644
index 0000000000000000000000000000000000000000..0bf016006e60c7e962f4dbc8875c12947bf62320
--- /dev/null
+++ b/gpt_oss/metal/python/context.c
@@ -0,0 +1,295 @@
+#include
+
+#include
+
+#include "module.h"
+
+
+static int PyGPTOSSContext_init(PyGPTOSSContext* self, PyObject* args, PyObject* kwargs) {
+ static char *kwlist[] = {"model", "context_length", "max_batch_tokens", NULL};
+ PyObject* model = NULL;
+ Py_ssize_t context_length = 0; // Default to 0 if None
+ Py_ssize_t max_batch_tokens = 0; // Default to 0 if None
+
+ PyObject* model = NULL;
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|$ii", kwlist,
+ &PyGPTOSSModel_Type, &model,
+ &context_length, &max_batch_tokens))
+ {
+ return -1;
+ }
+ if (context_length < 0) {
+ PyErr_SetString(PyExc_ValueError, "context_length must be a positive integer");
+ return -1;
+ }
+ if (max_batch_tokens < 0) {
+ PyErr_SetString(PyExc_ValueError, "max_batch_tokens must be a positive integer");
+ return -1;
+ }
+
+ enum gptoss_status status = gptoss_context_create(
+ ((const PyGPTOSSModel*) model)->handle,
+ (size_t) context_length,
+ (size_t) max_batch_tokens,
+ &self->handle);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ goto error;
+ }
+
+ return 0;
+
+error:
+ gptoss_context_release(self->handle);
+ self->handle = NULL;
+ return -1;
+}
+
+static void PyGPTOSSContext_dealloc(PyGPTOSSContext* self) {
+ (void) gptoss_context_release(self->handle);
+ self->handle = NULL;
+ PyObject_Del((PyObject*) self);
+}
+
+static PyObject* PyGPTOSSContext_copy(PyGPTOSSContext *self) {
+ PyGPTOSSContext* copy = (PyGPTOSSContext*) PyObject_New(PyGPTOSSContext, Py_TYPE(self));
+ if (copy == NULL) {
+ return NULL;
+ }
+
+ (void) gptoss_context_retain(self->handle);
+ copy->handle = self->handle;
+ return (PyObject*) copy;
+}
+
+static PyObject* PyGPTOSSContext_append(PyGPTOSSContext* self, PyObject* arg) {
+ if (PyBytes_Check(arg)) {
+ char* string_ptr = NULL;
+ Py_ssize_t string_size = 0;
+ if (PyBytes_AsStringAndSize(arg, &string_ptr, &string_size) < 0) {
+ return NULL;
+ }
+
+ const enum gptoss_status status = gptoss_context_append_chars(
+ self->handle, string_ptr, string_size, /*num_tokens_out=*/NULL);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+ } else if (PyUnicode_Check(arg)) {
+ Py_ssize_t string_size = 0;
+ const char* string_ptr = PyUnicode_AsUTF8AndSize(arg, &string_size);
+ if (string_ptr == NULL) {
+ return NULL;
+ }
+
+ const enum gptoss_status status = gptoss_context_append_chars(
+ self->handle, string_ptr, string_size, /*num_tokens_out=*/NULL);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+ } else if (PyLong_Check(arg)) {
+ const unsigned long token_as_ulong = PyLong_AsUnsignedLong(arg);
+ if (token_as_ulong == (unsigned long) -1 && PyErr_Occurred()) {
+ return NULL;
+ }
+
+ const uint32_t token = (uint32_t) token_as_ulong;
+ const enum gptoss_status status = gptoss_context_append_tokens(
+ self->handle, /*num_tokens=*/1, &token);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+ } else {
+ PyErr_SetString(PyExc_TypeError, "expected a bytes or integer argument");
+ return NULL;
+ }
+}
+
+static PyObject* PyGPTOSSContext_process(PyGPTOSSContext* self) {
+ const enum gptoss_status status = gptoss_context_process(self->handle);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+static PyObject* PyGPTOSSContext_sample(PyGPTOSSContext* self, PyObject* args, PyObject* kwargs) {
+ static char *kwlist[] = {"max_output_tokens", "temperature", "seed", NULL};
+ PyObject* token_list_obj = NULL;
+ uint32_t* token_ptr = NULL;
+
+ unsigned int max_output_tokens = 0;
+ unsigned long long seed = 0;
+ float temperature = 1.0f;
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "I|$fK", kwlist,
+ &max_output_tokens, &temperature, &seed))
+ {
+ return NULL;
+ }
+
+ token_ptr = (uint32_t*) PyMem_Malloc(max_output_tokens * sizeof(uint32_t));
+ if (token_ptr == NULL) {
+ goto error;
+ }
+
+ size_t num_tokens = 0;
+ const enum gptoss_status status = gptoss_context_sample(
+ self->handle, temperature, (uint64_t) seed,
+ (size_t) max_output_tokens, token_ptr, &num_tokens);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ goto error;
+ }
+
+ token_list_obj = PyList_New((Py_ssize_t) num_tokens);
+ if (token_list_obj == NULL) {
+ goto error;
+ }
+
+ for (size_t t = 0; t < num_tokens; t++) {
+ PyObject* token_obj = PyLong_FromUnsignedLong((unsigned long) token_ptr[t]);
+ if (token_obj == NULL) {
+ goto error;
+ }
+
+ PyList_SET_ITEM(token_list_obj, (Py_ssize_t) t, token_obj);
+ }
+
+ PyMem_Free(token_ptr);
+ return token_list_obj;
+
+error:
+ PyMem_Free(token_ptr);
+ Py_XDECREF(token_list_obj);
+ return NULL;
+}
+
+static PyObject* PyGPTOSSContext_reset(PyGPTOSSContext* self) {
+ const enum gptoss_status status = gptoss_context_reset(self->handle);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+static PyMethodDef PyGPTOSSContext_methods[] = {
+ {"__copy__", (PyCFunction) PyGPTOSSContext_copy, METH_NOARGS, "Create a copy of the Context"},
+ {"append", (PyCFunction) PyGPTOSSContext_append, METH_O, "Append bytes to the Context"},
+ {"process", (PyCFunction) PyGPTOSSContext_process, METH_NOARGS, "Process tokens in the Context"},
+ {"sample", (PyCFunction) PyGPTOSSContext_sample, METH_VARARGS | METH_KEYWORDS, "Sample token predictions from the Context"},
+ {"reset", (PyCFunction) PyGPTOSSContext_reset, METH_NOARGS, "Discard the content of the Context"},
+ {NULL},
+};
+
+static PyObject* PyGPTOSSContext_get_num_tokens(PyGPTOSSContext* self, void* closure) {
+ size_t num_tokens = 0;
+ const enum gptoss_status status = gptoss_context_get_num_tokens(self->handle, &num_tokens);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ return NULL;
+ }
+
+ return PyLong_FromSize_t(num_tokens);
+}
+
+static PyObject* PyGPTOSSContext_get_max_tokens(PyGPTOSSContext* self, void* closure) {
+ size_t max_tokens = 0;
+ const enum gptoss_status status = gptoss_context_get_max_tokens(self->handle, &max_tokens);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ return NULL;
+ }
+
+ return PyLong_FromSize_t(max_tokens);
+}
+
+static PyObject* PyGPTOSSContext_get_tokens(PyGPTOSSContext* self, void* closure) {
+ PyObject* token_list_obj = NULL;
+ uint32_t* token_ptr = NULL;
+
+ size_t num_tokens = 0;
+ gptoss_context_get_tokens(self->handle, /*tokens_out=*/NULL, /*max_tokens=*/0, &num_tokens);
+
+ if (num_tokens != 0) {
+ token_ptr = (uint32_t*) PyMem_Malloc(num_tokens * sizeof(uint32_t));
+ if (token_ptr == NULL) {
+ // TODO: set exception
+ goto error;
+ }
+
+ enum gptoss_status status = gptoss_context_get_tokens(self->handle, token_ptr, /*max_tokens=*/num_tokens, &num_tokens);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ goto error;
+ }
+ }
+
+ token_list_obj = PyList_New((Py_ssize_t) num_tokens);
+ if (token_list_obj == NULL) {
+ goto error;
+ }
+
+ for (size_t t = 0; t < num_tokens; t++) {
+ PyObject* token_obj = PyLong_FromUnsignedLong((unsigned long) token_ptr[t]);
+ if (token_obj == NULL) {
+ goto error;
+ }
+
+ PyList_SET_ITEM(token_list_obj, (Py_ssize_t) t, token_obj);
+ }
+
+ PyMem_Free(token_ptr);
+ return token_list_obj;
+
+error:
+ PyMem_Free(token_ptr);
+ Py_XDECREF(token_list_obj);
+ return NULL;
+}
+
+static PyGetSetDef PyGPTOSSContext_getseters[] = {
+ (PyGetSetDef) {
+ .name = "num_tokens",
+ .get = (getter) PyGPTOSSContext_get_num_tokens,
+ .doc = "Current number of tokens in the context",
+ },
+ (PyGetSetDef) {
+ .name = "max_tokens",
+ .get = (getter) PyGPTOSSContext_get_max_tokens,
+ .doc = "Maximum number of tokens in the context",
+ },
+ (PyGetSetDef) {
+ .name = "tokens",
+ .get = (getter) PyGPTOSSContext_get_tokens,
+ .doc = "List of token IDs in the context",
+ },
+ {NULL} /* Sentinel */
+};
+
+PyTypeObject PyGPTOSSContext_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "gptoss.Context",
+ .tp_basicsize = sizeof(PyGPTOSSContext),
+ .tp_flags = 0
+ | Py_TPFLAGS_DEFAULT
+ | Py_TPFLAGS_BASETYPE,
+ .tp_doc = "Context object",
+ .tp_methods = PyGPTOSSContext_methods,
+ .tp_getset = PyGPTOSSContext_getseters,
+ .tp_new = PyType_GenericNew,
+ .tp_init = (initproc) PyGPTOSSContext_init,
+ .tp_dealloc = (destructor) PyGPTOSSContext_dealloc,
+};
diff --git a/gpt_oss/metal/python/model.c b/gpt_oss/metal/python/model.c
new file mode 100644
index 0000000000000000000000000000000000000000..49202a2c5cbdd8e57f0f60a538fb8c296aa391e0
--- /dev/null
+++ b/gpt_oss/metal/python/model.c
@@ -0,0 +1,94 @@
+#include
+
+#include
+
+#include "module.h"
+
+
+static int PyGPTOSSModel_init(PyGPTOSSModel* self, PyObject* args, PyObject* kwargs) {
+ enum gptoss_status status;
+ const char* filepath;
+
+ if (!PyArg_ParseTuple(args, "s", &filepath)) {
+ return -1;
+ }
+ status = gptoss_model_create_from_file(filepath, &self->handle);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ return -1;
+ }
+ return 0;
+}
+
+static void PyGPTOSSModel_dealloc(PyGPTOSSModel* self) {
+ (void) gptoss_model_release(self->handle);
+ self->handle = NULL;
+ PyObject_Del((PyObject*) self);
+}
+
+static PyObject* PyGPTOSSModel_copy(PyGPTOSSModel* self) {
+ PyGPTOSSModel* copy = (PyGPTOSSModel*) PyObject_New(PyGPTOSSModel, Py_TYPE(self));
+ if (copy == NULL) {
+ return NULL;
+ }
+
+ (void) gptoss_model_retain(self->handle);
+ copy->handle = self->handle;
+ return (PyObject*) copy;
+}
+
+static PyMethodDef PyGPTOSSModel_methods[] = {
+ {"__copy__", (PyCFunction) PyGPTOSSModel_copy, METH_NOARGS, "Create a copy of the Model"},
+ {NULL},
+};
+
+static PyObject *PyGPTOSSModel_get_max_context_length(PyGPTOSSModel* self, void* closure) {
+ size_t max_context_length = 0;
+ const enum gptoss_status status = gptoss_model_get_max_context_length(self->handle, &max_context_length);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ return NULL;
+ }
+
+ return PyLong_FromSize_t(max_context_length);
+}
+
+static PyObject *PyGPTOSSModel_get_tokenizer(PyGPTOSSModel* self, void* closure) {
+ PyObject* args = PyTuple_Pack(1, self);
+ if (args == NULL) {
+ return NULL;
+ }
+
+ PyObject* tokenizer = PyObject_CallObject((PyObject*) &PyGPTOSSTokenizer_Type, args);
+ Py_DECREF(args);
+ return tokenizer;
+}
+
+static PyGetSetDef PyGPTOSSModel_getseters[] = {
+ (PyGetSetDef) {
+ .name = "max_context_length",
+ .get = (getter) PyGPTOSSModel_get_max_context_length,
+ .doc = "Maximum context length supported by the model",
+ },
+ (PyGetSetDef) {
+ .name = "tokenizer",
+ .get = (getter) PyGPTOSSModel_get_tokenizer,
+ .doc = "Tokenizer object associated with the model",
+ },
+ {NULL} // Sentinel
+};
+
+PyTypeObject PyGPTOSSModel_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "gptoss.Model",
+ .tp_basicsize = sizeof(PyGPTOSSModel),
+ .tp_flags = 0
+ | Py_TPFLAGS_DEFAULT
+ | Py_TPFLAGS_BASETYPE,
+ .tp_doc = "Model object",
+ .tp_methods = PyGPTOSSModel_methods,
+ .tp_getset = PyGPTOSSModel_getseters,
+ .tp_new = PyType_GenericNew,
+ .tp_init = (initproc) PyGPTOSSModel_init,
+ .tp_dealloc = (destructor) PyGPTOSSModel_dealloc,
+};
diff --git a/gpt_oss/metal/python/module.c b/gpt_oss/metal/python/module.c
new file mode 100644
index 0000000000000000000000000000000000000000..2910c8f1d0280981b1d2a04a1d0438f6faaa8d7e
--- /dev/null
+++ b/gpt_oss/metal/python/module.c
@@ -0,0 +1,67 @@
+#include
+
+#include "module.h"
+
+
+static PyMethodDef module_methods[] = {
+ {NULL, NULL, 0, NULL}
+};
+
+static PyModuleDef metal_module = {
+ PyModuleDef_HEAD_INIT,
+ "_metal",
+ "Local GPT-OSS inference",
+ -1,
+ module_methods
+};
+
+PyMODINIT_FUNC PyInit__metal(void) {
+ PyObject* module = NULL;
+ PyObject* model_type = NULL;
+ PyObject* tokenizer_type = NULL;
+ PyObject* context_type = NULL;
+
+ if (PyType_Ready(&PyGPTOSSModel_Type) < 0) {
+ goto error;
+ }
+ model_type = (PyObject*) &PyGPTOSSModel_Type;
+ Py_INCREF(model_type);
+
+ if (PyType_Ready(&PyGPTOSSTokenizer_Type) < 0) {
+ goto error;
+ }
+ tokenizer_type = (PyObject*) &PyGPTOSSTokenizer_Type;
+ Py_INCREF(tokenizer_type);
+
+ if (PyType_Ready(&PyGPTOSSContext_Type) < 0) {
+ goto error;
+ }
+ context_type = (PyObject*) &PyGPTOSSContext_Type;
+ Py_INCREF(context_type);
+
+ module = PyModule_Create(&metal_module);
+ if (module == NULL) {
+ goto error;
+ }
+
+ if (PyModule_AddObject(module, "Model", model_type) < 0) {
+ goto error;
+ }
+
+ if (PyModule_AddObject(module, "Tokenizer", tokenizer_type) < 0) {
+ goto error;
+ }
+
+ if (PyModule_AddObject(module, "Context", context_type) < 0) {
+ goto error;
+ }
+
+ return module;
+
+error:
+ Py_XDECREF(context_type);
+ Py_XDECREF(tokenizer_type);
+ Py_XDECREF(model_type);
+ Py_XDECREF(module);
+ return NULL;
+}
diff --git a/gpt_oss/metal/python/module.h b/gpt_oss/metal/python/module.h
new file mode 100644
index 0000000000000000000000000000000000000000..a837eee07ddb2a34f65e6648015a3b29d1066fea
--- /dev/null
+++ b/gpt_oss/metal/python/module.h
@@ -0,0 +1,22 @@
+#include
+
+#include
+
+typedef struct {
+ PyObject_HEAD
+ gptoss_model_t handle;
+} PyGPTOSSModel;
+
+typedef struct {
+ PyObject_HEAD
+ gptoss_tokenizer_t handle;
+} PyGPTOSSTokenizer;
+
+typedef struct {
+ PyObject_HEAD
+ gptoss_context_t handle;
+} PyGPTOSSContext;
+
+extern PyTypeObject PyGPTOSSModel_Type;
+extern PyTypeObject PyGPTOSSTokenizer_Type;
+extern PyTypeObject PyGPTOSSContext_Type;
diff --git a/gpt_oss/metal/python/tokenizer.c b/gpt_oss/metal/python/tokenizer.c
new file mode 100644
index 0000000000000000000000000000000000000000..3107dc6db5882b5dfabe36678054db6c31aa7c75
--- /dev/null
+++ b/gpt_oss/metal/python/tokenizer.c
@@ -0,0 +1,185 @@
+#include
+
+#include
+
+#include "module.h"
+
+static PyObject* PyGPTOSSTokenizer_new(PyTypeObject* subtype, PyObject* args, PyObject* kwargs) {
+ static char *kwlist[] = {"model", NULL};
+ PyObject* model = NULL;
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyGPTOSSModel_Type, &model)) {
+ return NULL;
+ }
+
+ PyGPTOSSTokenizer* self = (PyGPTOSSTokenizer*) subtype->tp_alloc(subtype, 0);
+ if (self == NULL) {
+ return NULL;
+ }
+
+ const enum gptoss_status status = gptoss_model_get_tokenizer(
+ ((const PyGPTOSSModel*) model)->handle,
+ &self->handle);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ return NULL;
+ }
+
+ return (PyObject*) self;
+}
+
+static void PyGPTOSSTokenizer_dealloc(PyGPTOSSTokenizer* self) {
+ (void) gptoss_tokenizer_release(self->handle);
+ self->handle = NULL;
+ PyObject_Del((PyObject*) self);
+}
+
+static PyObject* PyGPTOSSTokenizer_copy(PyGPTOSSTokenizer* self) {
+ PyGPTOSSTokenizer* copy = (PyGPTOSSTokenizer*) PyObject_New(PyGPTOSSTokenizer, Py_TYPE(self));
+ if (copy == NULL) {
+ return NULL;
+ }
+
+ (void) gptoss_tokenizer_retain(self->handle);
+ copy->handle = self->handle;
+ return (PyObject*) copy;
+}
+
+static PyObject* PyGPTOSSTokenizer_encode_special_token(PyGPTOSSTokenizer* self, PyObject* arg) {
+ if (PyUnicode_Check(arg)) {
+ const char* string_ptr = PyUnicode_AsUTF8(arg);
+ if (string_ptr == NULL) {
+ return NULL;
+ }
+
+ enum gptoss_special_token token_type = gptoss_special_token_invalid;
+ if (strcmp(string_ptr, "<|return|>") == 0) {
+ token_type = gptoss_special_token_return;
+ } else if (strcmp(string_ptr, "<|start|>") == 0) {
+ token_type = gptoss_special_token_start;
+ } else if (strcmp(string_ptr, "<|message|>") == 0) {
+ token_type = gptoss_special_token_message;
+ } else if (strcmp(string_ptr, "<|end|>") == 0) {
+ token_type = gptoss_special_token_end;
+ } else if (strcmp(string_ptr, "<|refusal|>") == 0) {
+ token_type = gptoss_special_token_refusal;
+ } else if (strcmp(string_ptr, "<|constrain|>") == 0) {
+ token_type = gptoss_special_token_constrain;
+ } else if (strcmp(string_ptr, "<|channel|>") == 0) {
+ token_type = gptoss_special_token_channel;
+ } else if (strcmp(string_ptr, "<|call|>") == 0) {
+ token_type = gptoss_special_token_call;
+ } else if (strcmp(string_ptr, "<|untrusted|>") == 0) {
+ token_type = gptoss_special_token_untrusted;
+ } else if (strcmp(string_ptr, "<|end_untrusted|>") == 0) {
+ token_type = gptoss_special_token_end_untrusted;
+ } else {
+ PyErr_Format(PyExc_ValueError, "unrecognized special token: %s", string_ptr);
+ return NULL;
+ }
+
+ uint32_t token_id = UINT32_MAX;
+ const enum gptoss_status status = gptoss_tokenizer_get_special_token_id(
+ self->handle, token_type, &token_id);
+ if (status != gptoss_status_success || token_id == UINT32_MAX) {
+ PyErr_Format(PyExc_ValueError, "tokenizer does not support the %s token", string_ptr);
+ return NULL;
+ }
+
+ return PyLong_FromUnsignedLong((unsigned long) token_id);
+ } else {
+ PyErr_SetString(PyExc_TypeError, "string argument expected");
+ return NULL;
+ }
+}
+
+static PyObject* PyGPTOSSTokenizer_decode(PyGPTOSSTokenizer* self, PyObject* args, PyObject* kwargs) {
+ static char *kwlist[] = {"token", NULL};
+ unsigned int token = 0; // Default to 0 if None
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "I", kwlist, &token)) {
+ return NULL;
+ }
+
+ const void* token_ptr = NULL;
+ size_t token_size = 0;
+ const enum gptoss_status status = gptoss_tokenizer_decode(self->handle, (uint32_t) token, &token_ptr, &token_size);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ return NULL;
+ }
+
+ return PyBytes_FromStringAndSize((const char*) token_ptr, (Py_ssize_t) token_size);
+}
+
+static PyMethodDef PyGPTOSSTokenizer_methods[] = {
+ {"__copy__", (PyCFunction) PyGPTOSSTokenizer_copy, METH_NOARGS, "Create a copy of the Tokenizer"},
+ {"encode_special_token", (PyCFunction) PyGPTOSSTokenizer_encode_special_token, METH_O, "Query ID of a special token"},
+ {"decode", (PyCFunction) PyGPTOSSTokenizer_decode, METH_VARARGS | METH_KEYWORDS, "Convert text token ID to bytes"},
+ {NULL},
+};
+
+static PyObject* PyGPTOSSTokenizer_get_num_text_tokens(PyGPTOSSTokenizer* self, void* closure) {
+ uint32_t num_text_tokens = 0;
+ const enum gptoss_status status = gptoss_tokenizer_get_num_text_tokens(self->handle, &num_text_tokens);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ return NULL;
+ }
+
+ return PyLong_FromUnsignedLong((unsigned long) num_text_tokens);
+}
+
+static PyObject* PyGPTOSSTokenizer_get_num_special_tokens(PyGPTOSSTokenizer* self, void* closure) {
+ uint32_t num_special_tokens = 0;
+ const enum gptoss_status status = gptoss_tokenizer_get_num_special_tokens(self->handle, &num_special_tokens);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ return NULL;
+ }
+
+ return PyLong_FromUnsignedLong((unsigned long) num_special_tokens);
+}
+
+static PyObject* PyGPTOSSTokenizer_get_num_tokens(PyGPTOSSTokenizer* self, void* closure) {
+ uint32_t num_tokens = 0;
+ const enum gptoss_status status = gptoss_tokenizer_get_num_tokens(self->handle, &num_tokens);
+ if (status != gptoss_status_success) {
+ // TODO: set exception
+ return NULL;
+ }
+
+ return PyLong_FromUnsignedLong((unsigned long) num_tokens);
+}
+
+static PyGetSetDef PyGPTOSSTokenizer_getseters[] = {
+ (PyGetSetDef) {
+ .name = "num_tokens",
+ .get = (getter) PyGPTOSSTokenizer_get_num_tokens,
+ .doc = "Total number of tokens in the tokenizer dictionary",
+ },
+ (PyGetSetDef) {
+ .name = "num_text_tokens",
+ .get = (getter) PyGPTOSSTokenizer_get_num_text_tokens,
+ .doc = "Number of text tokens in the tokenizer dictionary",
+ },
+ (PyGetSetDef) {
+ .name = "num_special_tokens",
+ .get = (getter) PyGPTOSSTokenizer_get_num_special_tokens,
+ .doc = "Number of special tokens in the tokenizer dictionary",
+ },
+ {NULL} /* Sentinel */
+};
+
+PyTypeObject PyGPTOSSTokenizer_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "gptoss.Tokenizer",
+ .tp_basicsize = sizeof(PyGPTOSSTokenizer),
+ .tp_flags = 0
+ | Py_TPFLAGS_DEFAULT
+ | Py_TPFLAGS_BASETYPE,
+ .tp_doc = "Tokenizer object",
+ .tp_methods = PyGPTOSSTokenizer_methods,
+ .tp_getset = PyGPTOSSTokenizer_getseters,
+ .tp_new = PyGPTOSSTokenizer_new,
+ .tp_dealloc = (destructor) PyGPTOSSTokenizer_dealloc,
+};
diff --git a/gpt_oss/metal/scripts/create-local-model.py b/gpt_oss/metal/scripts/create-local-model.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f16ba9912fa91079c609301b7c1c0f818178f9d
--- /dev/null
+++ b/gpt_oss/metal/scripts/create-local-model.py
@@ -0,0 +1,358 @@
+import argparse
+import os
+import math
+import sys
+import json
+import itertools
+import struct
+from uuid import UUID
+
+import tiktoken
+
+import torch
+from safetensors import safe_open
+from tqdm import tqdm
+from openai_harmony import load_harmony_encoding, HarmonyEncodingName
+
+parser = argparse.ArgumentParser(prog='create-local-model.py', description='Convert a checkpoint directory to a local model file')
+parser.add_argument('-s', '--src', metavar='DIR', type=str, required=True, help='Path to the input checkpoint directory')
+parser.add_argument('-d', '--dst', metavar='FILE', type=str, required=True, help='Path to the output model file')
+
+
+o200k_base = tiktoken.get_encoding("o200k_base")
+harmony_encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
+
+o200k_gptoss = tiktoken.Encoding(
+ name="o200k_gptoss",
+ pat_str=o200k_base._pat_str,
+ mergeable_ranks=o200k_base._mergeable_ranks,
+ special_tokens={
+ "<|reversed199998|>": 199998, # unused
+ "<|endoftext|>": 199999,
+ "<|untrusted|>": 200000,
+ "<|endofuntrusted|>": 200001,
+ "<|return|>": 200002,
+ "<|constrain|>": 200003,
+ "<|reversed200004|>": 200004, # unused
+ "<|channel|>": 200005,
+ "<|start|>": 200006,
+ "<|end|>": 200007,
+ "<|message|>": 200008,
+ "<|reversed200008|>": 200008, # unused
+ "<|reversed200009|>": 200009, # unused
+ "<|reversed200010|>": 200010, # unused
+ "<|reversed200011|>": 200011, # unused
+ "<|call|>": 200012,
+ "<|refusal|>": 200013,
+ }
+)
+
+FILE_MAGIC = struct.pack('ccccccccccccI', b'G', b'P', b'T', b'-', b'O', b'S', b'S', b' ', b'v', b'1', b'.', b'0', 0)
+SPECIAL_TOKEN_UUID = {
+ '<|start|>': UUID('55a77c2f-8a01-4c54-8ac2-313bfc7e208d').bytes,
+ '<|message|>': UUID('16e40431-f47f-4b22-b59b-8b278fc30a54').bytes,
+ '<|end|>': UUID('fcac2f6d-4705-4f6b-b228-642accac7238').bytes,
+ '<|return|>': UUID('f799ff69-1992-43c4-a3d8-d831f475dc75').bytes,
+ '<|refusal|>': UUID('e15ba702-28c4-4292-ab8f-ffa434709128').bytes,
+ '<|constrain|>': UUID('c0bb14c7-6022-49da-ad08-792d67e8b470').bytes,
+ '<|channel|>': UUID('fd3dda11-c8ab-4033-876e-d93deb172c93').bytes,
+ '<|call|>': UUID('1220f796-e388-4de5-b487-fe2eb5fe03c0').bytes,
+ '<|untrusted|>': UUID('07d7da55-b346-4cff-8b37-7cefacf8a3e8').bytes,
+ '<|end_untrusted|>': UUID('f265bd9c-c717-469e-a447-920687d65d90').bytes,
+}
+
+INCLUDE_SPECIAL_TOKENS = [
+ "<|start|>",
+ "<|message|>",
+ "<|end|>",
+ "<|return|>",
+ "<|refusal|>",
+ "<|constrain|>",
+ "<|channel|>",
+ "<|call|>",
+ "<|untrusted|>",
+ "<|end_untrusted|>",
+]
+
+GPTOSS_MODEL_UUID = UUID('df52dc86-1789-4ed0-a295-66f10508145b').bytes
+APPLE_GPU_LAYOUT_UUID = UUID('229177a8-5775-4268-bfd8-d588b351c56d').bytes
+TIKTOKEN_TOKENIZER_UUID = UUID('7401aded-2a95-40cb-b782-9ccebaafe72b').bytes
+
+UE8_OFFSET = 14 # bias to MXFP4 block scales
+
+def write_file_header(f):
+ f.write(FILE_MAGIC)
+
+def write_tokenizer_header(f,
+ num_special_tokens: int,
+ num_text_tokens: int,
+ regex_size: int,
+ tokens_size: int):
+ f.write(TIKTOKEN_TOKENIZER_UUID)
+ f.write(struct.pack(' 0
+ tokens_size += len(token_bytes) + 2 # uint16_t string length + string data
+ num_text_tokens += 1
+ # Then add all special tokens
+ num_included_tokens = 200013 + 1
+ print(f"Tokenizer: {num_included_tokens} tokens")
+
+ # Read from all files ending with .safetensors in the checkpoint directory
+ safetensor_files = [
+ os.path.join(options.src, fname)
+ for fname in os.listdir(options.src)
+ if fname.endswith(".safetensors")
+ ]
+ # Build a mapping from tensor name to filepath
+ tensor_name_to_file = {}
+ for safetensor_file in safetensor_files:
+ with safe_open(safetensor_file, framework="pt", device="cpu") as src:
+ for key in src.keys():
+ tensor_name_to_file[key] = safetensor_file
+
+ def get_tensor(name):
+ with safe_open(tensor_name_to_file[name], framework="pt", device="cpu") as src:
+ return src.get_tensor(name)
+
+ with open(options.dst, "wb") as dst:
+ write_file_header(dst)
+
+ yarn_low = (
+ head_dim / 2
+ * math.log(initial_context_length / (rope_ntk_beta * 2 * math.pi))
+ / math.log(rope_theta)
+ )
+ yarn_high = (
+ head_dim / 2
+ * math.log(initial_context_length / (rope_ntk_alpha * 2 * math.pi))
+ / math.log(rope_theta)
+ )
+
+ write_model_header(dst,
+ context_length=int(initial_context_length * rope_scaling_factor),
+ num_blocks=num_blocks,
+ num_experts=num_experts,
+ num_active_experts=num_active_experts,
+ embedding_dim=embedding_dim,
+ mlp_dim=mlp_dim,
+ swiglu_limit=swiglu_limit,
+ head_dim=head_dim,
+ num_heads=num_q_heads,
+ num_kv_heads=num_kv_heads,
+ attention_window=attention_window,
+ rope_theta=rope_theta,
+ interpolation_scale=1.0 / rope_scaling_factor,
+ yarn_offset=-yarn_low / (yarn_high - yarn_low),
+ yarn_scale=1.0 / (yarn_high - yarn_low),
+ yarn_multiplier=0.1 * math.log(rope_scaling_factor) + 1.0,
+ rmsnorm_epsilon=1.0e-5)
+
+ write_tokenizer_header(dst,
+ num_special_tokens=num_included_tokens - num_text_tokens,
+ num_text_tokens=num_text_tokens,
+ regex_size=len(o200k_gptoss._pat_str.encode("ascii")) + 1,
+ tokens_size=tokens_size)
+
+ ### Tokenizer
+ # Special tokens
+ for token_idx in range(num_text_tokens, num_included_tokens):
+ token = o200k_gptoss.decode_single_token_bytes(token_idx).decode('ascii')
+ if token in INCLUDE_SPECIAL_TOKENS:
+ dst.write(SPECIAL_TOKEN_UUID[token])
+ else:
+ dst.write(bytes(16))
+ # Regex
+ dst.write(o200k_gptoss._pat_str.encode("ascii"))
+ dst.write(struct.pack('B', 0))
+ # Text tokens
+ tokenizer_bytes_written = 0
+ for t in range(num_text_tokens):
+ token_bytes = o200k_gptoss.decode_single_token_bytes(t)
+ assert len(token_bytes) > 0
+ dst.write(struct.pack('
+#include
+
+#include
+
+#pragma METAL fp math_mode(safe)
+#pragma METAL fp contract(off)
+
+
+kernel void gptoss_f32_accumulate_e4(
+ constant gptoss_accumulate_args& args [[ buffer(0) ]],
+ const device float4* input [[ buffer(1) ]],
+ const device gptoss_expert_prediction* expert [[ buffer(2) ]],
+ device float4* output [[ buffer(3) ]],
+ const device gptoss_control* control [[ buffer(4) ]],
+ uint2 gid [[threadgroup_position_in_grid]],
+ uint tid [[thread_index_in_threadgroup]],
+ uint2 threadgroup_size [[ threads_per_threadgroup ]])
+{
+ const uint num_active_experts = 4;
+ if (control->abort != 0) {
+ return;
+ }
+
+ const uint num_vecs_per_threadgroup = args.num_vecs_per_threadgroup;
+ const uint threadgroup_start = gid.x * num_vecs_per_threadgroup;
+ const uint num_vecs = args.num_vecs;
+ const uint threadgroup_end = metal::min(threadgroup_start + num_vecs_per_threadgroup, num_vecs);
+ const uint thread_start = threadgroup_start + tid;
+ uint num_iter = static_cast((threadgroup_end - thread_start + (threadgroup_size.x - 1)) / threadgroup_size.x);
+
+ const uint num_vecs_per_expert = args.num_vecs_per_expert;
+ const float scale0 = expert[gid.y * num_active_experts + 0].score;
+ const device float4* input0 = input + gid.y * num_vecs + thread_start;
+ const float scale1 = expert[gid.y * num_active_experts + 1].score;
+ const device float4* input1 = input0 + num_vecs_per_expert;
+ const float scale2 = expert[gid.y * num_active_experts + 2].score;
+ const device float4* input2 = input1 + num_vecs_per_expert;
+ const float scale3 = expert[gid.y * num_active_experts + 3].score;
+ const device float4* input3 = input2 + num_vecs_per_expert;
+ output += gid.y * num_vecs + thread_start;
+ for (; num_iter != 0; num_iter--) {
+ float4 acc = *output;
+ const float4 val0 = *input0;
+ const float4 val1 = *input1;
+ const float4 val2 = *input2;
+ const float4 val3 = *input3;
+ input0 += threadgroup_size.x;
+ acc = metal::fma(val0, scale0, acc);
+ input1 += threadgroup_size.x;
+ acc = metal::fma(val1, scale1, acc);
+ input2 += threadgroup_size.x;
+ acc = metal::fma(val2, scale2, acc);
+ input3 += threadgroup_size.x;
+ acc = metal::fma(val3, scale3, acc);
+ *output = acc;
+ output += threadgroup_size.x;
+ }
+}
diff --git a/gpt_oss/metal/source/context.c b/gpt_oss/metal/source/context.c
new file mode 100644
index 0000000000000000000000000000000000000000..7e4b786db78af571f139aac0387748ca770b0a32
--- /dev/null
+++ b/gpt_oss/metal/source/context.c
@@ -0,0 +1,1154 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include "internal/datatype.h"
+#include "internal/model.h"
+#include "internal/metal.h"
+#include "internal/metal-kernels.h"
+#include "internal/log.h"
+#include "internal/rng.h"
+
+
+enum gptoss_status GPTOSS_ABI gptoss_context_create(
+ gptoss_model_t model,
+ size_t context_length,
+ size_t max_batch_tokens,
+ gptoss_context_t* context_out)
+{
+ *context_out = NULL;
+
+ enum gptoss_status status = gptoss_status_success;
+ struct gptoss_context* context = NULL;
+
+ // Validate context_length
+ if (context_length == 0) {
+ context_length = model->context_length;
+ } else if (context_length > model->context_length) {
+ GPTOSS_LOG_ERROR("requested context length %zu exceeds model context length %" PRIu32,
+ context_length, model->context_length);
+ status = gptoss_status_invalid_argument;
+ goto cleanup;
+ }
+ assert(context_length != 0);
+ assert(context_length <= model->context_length);
+
+ // Validate max_batch_tokens
+ if (max_batch_tokens == 0) {
+ max_batch_tokens = GPTOSS_DEFAULT_BATCH_SIZE;
+ } else if (max_batch_tokens > context_length) {
+ GPTOSS_LOG_ERROR("requested max batch tokens %zu exceeds context length %zu",
+ max_batch_tokens, context_length);
+ status = gptoss_status_invalid_argument;
+ goto cleanup;
+ }
+ assert(max_batch_tokens != 0);
+ assert(max_batch_tokens <= context_length);
+
+ context = malloc(sizeof(struct gptoss_context));
+ if (context == NULL) {
+ GPTOSS_LOG_ERROR("failed to allocate %zu bytes for Context object",
+ sizeof(struct gptoss_context));
+ status = gptoss_status_insufficient_memory;
+ goto cleanup;
+ }
+ memset(context, 0, sizeof(struct gptoss_context));
+
+ atomic_store_explicit(&context->ref_count, 1, memory_order_relaxed);
+ context->max_tokens = context_length;
+ context->max_batch_tokens = max_batch_tokens;
+
+ // Activation buffers
+ status = gptoss_metal_buffer_create(&model->device, max_batch_tokens * model->embedding_dim * sizeof(float), NULL, &context->residual_activation_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+ status = gptoss_metal_buffer_create(&model->device, max_batch_tokens * model->embedding_dim * sizeof(float), NULL, &context->rmsnorm_activation_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+ status = gptoss_metal_buffer_create(&model->device, max_batch_tokens * model->head_dim * (model->num_heads + 2 * model->num_kv_heads) * sizeof(float), NULL, &context->qkv_activation_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+ status = gptoss_metal_buffer_create(&model->device, max_batch_tokens * model->head_dim * model->num_heads * sizeof(float), NULL, &context->sdpa_activation_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+ status = gptoss_metal_buffer_create(&model->device, max_batch_tokens * model->num_experts * sizeof(float), NULL, &context->gate_activation_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+ status = gptoss_metal_buffer_create(&model->device, max_batch_tokens * model->num_experts * sizeof(struct gptoss_expert_prediction), NULL, &context->expert_activation_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+ status = gptoss_metal_buffer_create(&model->device, model->num_experts * sizeof(uint32_t), NULL, &context->expert_offset_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+ status = gptoss_metal_buffer_create(&model->device, max_batch_tokens * model->num_active_experts * sizeof(uint32_t), NULL, &context->token_to_expert_routing_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+ status = gptoss_metal_buffer_create(&model->device, max_batch_tokens * model->num_active_experts * model->embedding_dim * sizeof(float), NULL, &context->swiglu_input_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+ status = gptoss_metal_buffer_create(&model->device, max_batch_tokens * model->num_active_experts * model->mlp_dim * sizeof(float), NULL, &context->swiglu_activation_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+ status = gptoss_metal_buffer_create(&model->device, max_batch_tokens * model->num_active_experts * model->embedding_dim * sizeof(float), NULL, &context->moe_activation_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+
+ // Input/output buffers
+ status = gptoss_metal_buffer_create(&model->device, sizeof(struct gptoss_control), NULL, &context->control_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+ status = gptoss_metal_buffer_create(&model->device, context_length * sizeof(uint32_t), NULL, &context->token_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+ status = gptoss_metal_buffer_create(&model->device, max_batch_tokens * model->vocabulary_size * sizeof(float), NULL, &context->score_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+ status = gptoss_metal_buffer_create(&model->device, max_batch_tokens * model->vocabulary_size * sizeof(float), NULL, &context->prob_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+ status = gptoss_metal_buffer_create(&model->device, max_batch_tokens * model->max_threadgroups * sizeof(float), NULL, &context->sum_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+ status = gptoss_metal_buffer_create(&model->device, max_batch_tokens * sizeof(uint64_t), NULL, &context->argmax_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+ status = gptoss_metal_buffer_create(&model->device, model->num_blocks * context_length * 2 * model->num_kv_heads * model->head_dim * sizeof(float), NULL, &context->kvcache_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+
+ context->kvcache_size = context->kvcache_buffer.size;
+ context->allocation_size =
+ context->residual_activation_buffer.size + context->rmsnorm_activation_buffer.size +
+ context->qkv_activation_buffer.size + context->sdpa_activation_buffer.size +
+ context->gate_activation_buffer.size + context->expert_activation_buffer.size +
+ context->expert_offset_buffer.size + context->token_to_expert_routing_buffer.size + context->swiglu_input_buffer.size +
+ context->swiglu_activation_buffer.size + context->moe_activation_buffer.size +
+ context->token_buffer.size + context->kvcache_buffer.size + context->score_buffer.size + context->argmax_buffer.size;
+
+ context->model = model;
+ gptoss_model_retain(model);
+ *context_out = context;
+ context = NULL;
+
+cleanup:
+ gptoss_context_release(context);
+ return status;
+}
+
+enum gptoss_status GPTOSS_ABI gptoss_context_get_num_tokens(
+ gptoss_context_t context,
+ size_t* num_tokens_out)
+{
+ *num_tokens_out = context->num_tokens;
+ return gptoss_status_success;
+}
+
+enum gptoss_status GPTOSS_ABI gptoss_context_get_max_tokens(
+ gptoss_context_t context,
+ size_t* max_tokens_out)
+{
+ *max_tokens_out = context->max_tokens;
+ return gptoss_status_success;
+}
+
+enum gptoss_status GPTOSS_ABI gptoss_context_get_tokens(
+ gptoss_context_t context,
+ uint32_t* tokens_out,
+ size_t max_tokens,
+ size_t* num_tokens_out)
+{
+ *num_tokens_out = context->num_tokens;
+ if (max_tokens < context->num_tokens) {
+ return gptoss_status_insufficient_memory;
+ }
+
+ if (context->num_tokens != 0) {
+ memcpy(tokens_out, context->token_buffer.ptr, context->num_tokens * sizeof(uint32_t));
+ }
+ return gptoss_status_success;
+}
+
+// Prefill: input_tokens_offset = number of tokens in KV cache, num_input_tokens > 0, num_output_tokens = 0.
+// Sampling: input_tokens_offset = number of tokens in the context - 1, num_input_tokens = 1, num_output_tokens = 1.
+// Perplexity: input_tokens_offset = 0, num_input_tokens > 1, num_output_tokens = num_input_tokens.
+static enum gptoss_status process_tokens(
+ gptoss_context_t context,
+ struct gptoss_metal_command_buffer* command_buffer,
+ size_t input_tokens_offset,
+ size_t num_input_tokens,
+ size_t num_output_tokens)
+{
+ assert(num_input_tokens != 0);
+ assert(num_input_tokens <= context->max_batch_tokens);
+ assert(num_output_tokens <= context->max_batch_tokens);
+ assert(num_input_tokens >= num_output_tokens);
+ const size_t dense_matmul_kernel_token_multiple_constraint = 64;
+ const size_t min_tokens_for_dense_moe_kernels = 64;
+
+ enum gptoss_status status = gptoss_status_success;
+ const struct gptoss_model* model = context->model;
+
+ const size_t attn_qkv_dim = model->head_dim * (model->num_heads + 2 * model->num_kv_heads);
+
+ const size_t input_tokens_end = input_tokens_offset + num_input_tokens;
+ for (size_t input_batch_start = input_tokens_offset;
+ input_batch_start < input_tokens_end;
+ input_batch_start += context->max_batch_tokens)
+ {
+ const size_t input_batch_size = math_min(context->max_batch_tokens, input_tokens_end - input_batch_start);
+ const size_t input_batch_end = input_batch_start + input_batch_size;
+ const size_t output_batch_size = math_sub_sat(num_output_tokens, input_tokens_end - input_batch_end);
+
+ status = gptoss_metal_command_buffer_encode_launch_bf16_f32_embeddings(
+ command_buffer,
+ &model->bf16_f32_embeddings_fn,
+ model->embeddings_threadgroup_size,
+ &context->token_buffer,
+ input_batch_start * sizeof(uint32_t),
+ &model->shared_weight_buffer,
+ /*weight_offset=*/0,
+ &context->residual_activation_buffer,
+ /*output_offset=*/0,
+ &context->control_buffer,
+ /*control_offset=*/0,
+ /*num_tokens=*/input_batch_size,
+ /*num_channels=*/model->embedding_dim);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode bf16_f32_embeddings kernel launch");
+ return status;
+ }
+ for (uint32_t n = 0; n < model->num_blocks; n++) {
+ const bool last_block = n + 1 == model->num_blocks;
+ const size_t num_block_output_tokens = last_block ? output_batch_size : input_batch_size;
+
+ status = gptoss_metal_command_buffer_encode_launch_f32_bf16w_rmsnorm(
+ command_buffer,
+ &model->f32_bf16w_rmsnorm_fn,
+ &context->residual_activation_buffer,
+ /*input_offset=*/0,
+ &model->shared_weight_buffer,
+ /*weight_offset=*/model->attn_rmsnorm_gain_offset + model->per_block_shared_weights_size * n,
+ &context->rmsnorm_activation_buffer,
+ /*output_offset=*/0,
+ &context->control_buffer,
+ /*control_offset=*/0,
+ /*num_tokens=*/input_batch_size,
+ /*num_channels=*/model->embedding_dim,
+ model->rmsnorm_epsilon);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_bf16w_rmsnorm kernel launch");
+ return status;
+ }
+
+ if (input_batch_size % dense_matmul_kernel_token_multiple_constraint == 0) {
+ status = gptoss_metal_command_buffer_encode_launch_f32_bf16w_dense_matmul_qkv(
+ command_buffer,
+ &model->f32_bf16w_dense_matmul_qkv_fn,
+ &context->rmsnorm_activation_buffer,
+ /*input_offset=*/0,
+ &model->shared_weight_buffer,
+ /*weight_offset=*/model->attn_qkv_weight_offset + model->per_block_shared_weights_size * n,
+ &model->shared_weight_buffer,
+ /*bias_offset=*/model->attn_qkv_bias_offset + model->per_block_shared_weights_size * n,
+ &context->qkv_activation_buffer,
+ /*output_offset=*/0,
+ &context->control_buffer,
+ /*control_offset=*/0,
+ /*num_tokens=*/input_batch_size,
+ /*num_cols=*/model->embedding_dim,
+ /*num_rows=*/attn_qkv_dim);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_bf16w_dense_matmul_qkv kernel launch");
+ return status;
+ }
+
+ status = gptoss_metal_command_buffer_encode_launch_f32_rope(
+ command_buffer,
+ &model->f32_rope_fn,
+ /*threadgroup_size=*/32,
+ &context->qkv_activation_buffer,
+ /*input_offset=*/0,
+ &context->control_buffer,
+ /*control_offset=*/0,
+ model->rope_theta,
+ model->interpolation_scale,
+ model->yarn_offset,
+ model->yarn_scale,
+ model->yarn_multiplier,
+ input_batch_size,
+ model->num_heads,
+ model->num_kv_heads,
+ model->head_dim,
+ /*token_offset=*/input_batch_start);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_rope kernel launch");
+ return status;
+ }
+
+ for (uint32_t t = 0; t < input_batch_size; t++) {
+ for (uint32_t kv = 0; kv < 2; kv++) {
+ for (uint32_t h = 0; h < model->num_kv_heads; h++) {
+ status = gptoss_metal_command_buffer_encode_copy_buffer(
+ command_buffer,
+ &context->qkv_activation_buffer,
+ /*input_offset=*/(t * attn_qkv_dim + (model->num_heads + kv * model->num_kv_heads + h) * model->head_dim) * sizeof(float),
+ &context->kvcache_buffer,
+ /*output_offset=*/(((n * model->num_kv_heads + h) * context->max_tokens + input_batch_start + t) * 2 + kv) * model->head_dim * sizeof(float),
+ /*size=*/model->head_dim * sizeof(float));
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode copy of token %" PRIu32 " to KV cache", t);
+ return status;
+ }
+ }
+ }
+ }
+ } else {
+ status = gptoss_metal_command_buffer_encode_launch_f32_bf16w_matmul_qkv(
+ command_buffer,
+ &model->f32_bf16w_matmul_qkv_fn,
+ model->attn_qkv_threadgroup_size,
+ &context->rmsnorm_activation_buffer,
+ /*input_offset=*/0,
+ &model->shared_weight_buffer,
+ /*weight_offset=*/model->attn_qkv_weight_offset + model->per_block_shared_weights_size * n,
+ &model->shared_weight_buffer,
+ /*bias_offset=*/model->attn_qkv_bias_offset + model->per_block_shared_weights_size * n,
+ &context->qkv_activation_buffer,
+ /*output_offset=*/0,
+ &context->kvcache_buffer,
+ /*kv_offset=*/n * model->num_kv_heads * context->max_tokens * 2 * model->head_dim * sizeof(float),
+ &context->control_buffer,
+ /*control_offset=*/0,
+ /*num_tokens=*/input_batch_size,
+ /*num_cols=*/model->embedding_dim,
+ /*num_q_heads=*/model->num_heads,
+ /*num_kv_heads=*/model->num_kv_heads,
+ /*attn_head_dim=*/model->head_dim,
+ /*token_offset=*/input_batch_start,
+ /*max_tokens=*/context->max_tokens,
+ /*rope_base=*/model->rope_theta,
+ /*interpolation_scale=*/model->interpolation_scale,
+ /*yarn_offset=*/model->yarn_offset,
+ /*yarn_scale=*/model->yarn_scale,
+ /*yarn_multiplier=*/model->yarn_multiplier);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_bf16w_matmul_qkv kernel launch");
+ return status;
+ }
+ }
+
+ if (num_block_output_tokens != 0) {
+ status = gptoss_metal_command_buffer_encode_launch_f32_sdpa(
+ command_buffer,
+ &model->f32_sdpa_q8_d64_fn,
+ &context->qkv_activation_buffer,
+ /*q_offset=*/attn_qkv_dim * (input_batch_size - num_block_output_tokens) * sizeof(float),
+ &context->kvcache_buffer,
+ /*kv_offset=*/n * model->num_kv_heads * context->max_tokens * 2 * model->head_dim * sizeof(float),
+ &model->shared_weight_buffer,
+ /*s_offset=*/model->attn_sdpa_sink_offset + model->per_block_shared_weights_size * n,
+ &context->sdpa_activation_buffer,
+ /*output_offset=*/0,
+ &context->control_buffer,
+ /*control_offset=*/0,
+ /*window=*/n % 2 == 0 ? model->attention_window : UINT32_MAX,
+ /*kv_stride=*/2 * context->max_tokens * model->head_dim,
+ num_block_output_tokens,
+ input_batch_start + input_batch_size - num_block_output_tokens,
+ model->num_heads, model->num_kv_heads, model->head_dim);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_sdpa kernel launch");
+ return status;
+ }
+
+ if (input_batch_size % dense_matmul_kernel_token_multiple_constraint == 0) {
+ status = gptoss_metal_command_buffer_encode_launch_f32_bf16w_dense_matmul_attn_output(
+ command_buffer,
+ &model->f32_bf16w_dense_matmul_attn_output_fn,
+ &context->sdpa_activation_buffer,
+ /*input_offset=*/0,
+ &model->shared_weight_buffer,
+ /*weight_offset=*/model->attn_out_weight_offset + model->per_block_shared_weights_size * n,
+ &model->shared_weight_buffer,
+ /*bias_offset=*/model->attn_out_bias_offset + model->per_block_shared_weights_size * n,
+ &context->residual_activation_buffer,
+ /*output_offset=*/model->embedding_dim * (input_batch_size - num_block_output_tokens) * sizeof(float),
+ &context->control_buffer,
+ /*control_offset=*/0,
+ /*num_tokens=*/num_block_output_tokens,
+ /*num_cols=*/model->num_heads * model->head_dim,
+ /*num_rows=*/model->embedding_dim);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_bf16w_dense_matmul_attn_output kernel launch");
+ return status;
+ }
+ } else {
+ status = gptoss_metal_command_buffer_encode_launch_f32_bf16w_matmul_add(
+ command_buffer,
+ &model->f32_bf16w_matmul_fn,
+ model->attn_out_threadgroup_size,
+ &context->sdpa_activation_buffer,
+ /*input_offset=*/0,
+ &model->shared_weight_buffer,
+ /*weight_offset=*/model->attn_out_weight_offset + model->per_block_shared_weights_size * n,
+ &model->shared_weight_buffer,
+ /*bias_offset=*/model->attn_out_bias_offset + model->per_block_shared_weights_size * n,
+ &context->residual_activation_buffer,
+ /*output_offset=*/model->embedding_dim * (input_batch_size - num_block_output_tokens) * sizeof(float),
+ &context->control_buffer,
+ /*control_offset=*/0,
+ /*num_tokens=*/num_block_output_tokens,
+ /*num_cols=*/model->num_heads * model->head_dim,
+ /*num_rows=*/model->embedding_dim);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_bf16w_matmul_add kernel launch");
+ return status;
+ }
+ }
+ status = gptoss_metal_command_buffer_encode_launch_f32_bf16w_rmsnorm(
+ command_buffer,
+ &model->f32_bf16w_rmsnorm_fn,
+ &context->residual_activation_buffer,
+ /*input_offset=*/model->embedding_dim * (input_batch_size - num_block_output_tokens) * sizeof(float),
+ &model->shared_weight_buffer,
+ /*weight_offset=*/model->mlp_rmsnorm_gain_offset + model->per_block_shared_weights_size * n,
+ &context->rmsnorm_activation_buffer,
+ /*output_offset=*/0,
+ &context->control_buffer,
+ /*control_offset=*/0,
+ num_block_output_tokens,
+ model->embedding_dim,
+ model->rmsnorm_epsilon);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_bf16w_rmsnorm kernel launch");
+ return status;
+ }
+ if (input_batch_size % dense_matmul_kernel_token_multiple_constraint == 0) {
+ status = gptoss_metal_command_buffer_encode_launch_f32_bf16w_dense_matmul_mlp_gate(
+ command_buffer,
+ &model->f32_bf16w_dense_matmul_mlp_gate_fn,
+ &context->rmsnorm_activation_buffer,
+ /*input_offset=*/0,
+ &model->shared_weight_buffer,
+ /*weight_offset=*/model->mlp_gate_weight_offset + model->per_block_shared_weights_size * n,
+ &model->shared_weight_buffer,
+ /*bias_offset=*/model->mlp_gate_bias_offset + model->per_block_shared_weights_size * n,
+ &context->gate_activation_buffer,
+ /*output_offset=*/0,
+ &context->control_buffer,
+ /*control_offset=*/0,
+ num_block_output_tokens,
+ model->embedding_dim,
+ model->num_experts);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_bf16w_dense_matmul_mlp_gate kernel launch");
+ return status;
+ }
+ } else {
+ status = gptoss_metal_command_buffer_encode_launch_f32_bf16w_matmul(
+ command_buffer,
+ &model->f32_bf16w_matmul_fn,
+ model->mlp_gate_threadgroup_size,
+ &context->rmsnorm_activation_buffer,
+ /*input_offset=*/0,
+ &model->shared_weight_buffer,
+ /*weight_offset=*/model->mlp_gate_weight_offset + model->per_block_shared_weights_size * n,
+ &model->shared_weight_buffer,
+ /*bias_offset=*/model->mlp_gate_bias_offset + model->per_block_shared_weights_size * n,
+ &context->gate_activation_buffer,
+ /*output_offset=*/0,
+ &context->control_buffer,
+ /*control_offset=*/0,
+ /*num_tokens=*/num_block_output_tokens,
+ /*num_cols=*/model->embedding_dim,
+ /*num_rows=*/model->num_experts);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_bf16w_matmul kernel launch");
+ return status;
+ }
+ }
+
+ const char* kernel_name = NULL;
+ switch (model->num_experts) {
+ case 32:
+ kernel_name = "f32_topk_softmax_e32_k4_fn";
+ status = gptoss_metal_command_buffer_encode_launch_f32_topk(
+ command_buffer,
+ &model->f32_topk_softmax_e32_k4_fn,
+ &context->gate_activation_buffer, /*input_offset=*/0,
+ &context->expert_activation_buffer, /*output_offset=*/0,
+ &context->control_buffer, /*control_offset=*/0,
+ num_block_output_tokens,
+ model->num_experts,
+ model->num_active_experts);
+ break;
+ case 128:
+ kernel_name = "f32_topk_softmax_e128_k4_fn";
+ status = gptoss_metal_command_buffer_encode_launch_f32_topk(
+ command_buffer,
+ &model->f32_topk_softmax_e128_k4_fn,
+ &context->gate_activation_buffer, /*input_offset=*/0,
+ &context->expert_activation_buffer, /*output_offset=*/0,
+ &context->control_buffer, /*control_offset=*/0,
+ num_block_output_tokens,
+ model->num_experts,
+ model->num_active_experts);
+ break;
+ default:
+ status = gptoss_status_unsupported_argument;
+ GPTOSS_LOG_ERROR("missing Top-K kernel for %" PRIu32 " experts", model->num_experts);
+ return status;
+ }
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode %s kernel launch", kernel_name);
+ return status;
+ }
+
+ // If we have enough tokens in prefill, we will pick the prefill-optimized kernels.
+ if (num_block_output_tokens >= min_tokens_for_dense_moe_kernels) {
+ // Commit and wait for the command buffer to complete.
+ // As we need topk output to compute routing metadata.
+ status = gptoss_metal_command_buffer_commit(command_buffer);
+ if (status != gptoss_status_success) {
+ return status;
+ }
+
+ status = gptoss_metal_command_buffer_wait_completion(command_buffer, NULL);
+ if (status != gptoss_status_success) {
+ return status;
+ }
+ gptoss_metal_command_buffer_release(command_buffer);
+
+ const size_t E = model->num_experts;
+ const size_t T = num_block_output_tokens * model->num_active_experts;
+
+ const struct gptoss_expert_prediction* preds =
+ (const struct gptoss_expert_prediction*) context->expert_activation_buffer.ptr;
+
+ uint32_t* token_to_expert_routing = (uint32_t*) context->token_to_expert_routing_buffer.ptr;
+ uint32_t* expert_offset = (uint32_t*) context->expert_offset_buffer.ptr;
+ // Zero out the expert offset buffer.
+ memset(expert_offset, 0, E * sizeof(uint32_t));
+
+ for (size_t i = 0; i < T; i++) {
+ const uint32_t expert_id = preds[i].expert_id;
+ token_to_expert_routing[i] = expert_offset[expert_id];
+ expert_offset[expert_id]++;
+ }
+
+ uint32_t total = 0;
+ // Prefix sum.
+ for (size_t i = 0; i < model->num_experts; i++) {
+ const uint32_t bin_size = expert_offset[i];
+ expert_offset[i] = total;
+ total += bin_size;
+ }
+
+ // Create a new command buffer.
+ status = gptoss_metal_command_buffer_create(&context->model->command_queue, command_buffer);
+ if (status != gptoss_status_success) {
+ return status;
+ }
+ status = gptoss_metal_command_buffer_encode_launch_f32_scatter(
+ command_buffer,
+ &model->f32_scatter_e4_fn,
+ &context->rmsnorm_activation_buffer,
+ /*input_offset=*/0,
+ &context->expert_activation_buffer,
+ /*expert_predictions_offset=*/0,
+ &context->expert_offset_buffer,
+ /*expert_offsets_offset=*/0,
+ &context->token_to_expert_routing_buffer,
+ /*intra_expert_offsets_offset=*/0,
+ &context->swiglu_input_buffer,
+ /*output_offset=*/0,
+ model->embedding_dim,
+ num_block_output_tokens,
+ model->num_active_experts);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_scatter kernel launch");
+ return status;
+ }
+ // Dense MoE SwiGLU matmul -- iterate over all experts.
+ const size_t total_tokens = num_block_output_tokens * model->num_active_experts;
+ for (size_t e = 0; e < model->num_experts; e++) {
+ bool last_expert = e == model->num_experts - 1;
+ uint32_t expert_tokens = last_expert ? total_tokens - expert_offset[e] : expert_offset[e + 1] - expert_offset[e];
+ if (expert_tokens == 0) {
+ continue;
+ }
+ status = gptoss_metal_command_buffer_encode_launch_f32_mf4w_moe_dense_matmul_swiglu(
+ command_buffer,
+ &model->f32_mf4w_moe_dense_matmul_swiglu_fn,
+ &context->swiglu_input_buffer,
+ /*input_offset=*/0,
+ &model->block_weight_buffers[n],
+ /*weight_block_offset=*/0,
+ &model->block_weight_buffers[n],
+ /*weight_scale_offset=*/model->mlp_swiglu_scale_offset,
+ &model->block_weight_buffers[n],
+ /*bias_offset=*/model->mlp_swiglu_bias_offset,
+ &context->swiglu_activation_buffer,
+ /*output_offset=*/0,
+ model->swiglu_limit,
+ /*expert_stride_bytes=*/model->per_expert_block_weight_size,
+ expert_tokens,
+ expert_offset[e],
+ e,
+ model->embedding_dim,
+ 2 * model->mlp_dim);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_mf4w_moe_dense_matmul_swiglu kernel launch");
+ return status;
+ }
+ }
+ // Dense MoE proj matmul -- again iterate over all experts.
+ for (size_t e = 0; e < model->num_experts; e++) {
+ bool last_expert = e == model->num_experts - 1;
+ uint32_t expert_tokens = last_expert ? total_tokens - expert_offset[e] : expert_offset[e + 1] - expert_offset[e];
+ if (expert_tokens == 0) {
+ continue;
+ }
+ status = gptoss_metal_command_buffer_encode_launch_f32_mf4w_moe_dense_matmul(
+ command_buffer,
+ &model->f32_mf4w_moe_dense_matmul_fn,
+ &context->swiglu_activation_buffer,
+ /*input_offset=*/0,
+ &model->block_weight_buffers[n],
+ /*weight_block_offset=*/model->mlp_out_block_offset,
+ &model->block_weight_buffers[n],
+ /*weight_scale_offset=*/model->mlp_out_scale_offset,
+ &model->block_weight_buffers[n],
+ /*bias_offset=*/model->mlp_out_bias_offset,
+ &context->moe_activation_buffer,
+ /*output_offset=*/0,
+ /*expert_stride_bytes=*/model->per_expert_block_weight_size,
+ expert_tokens,
+ expert_offset[e],
+ e,
+ model->mlp_dim,
+ model->embedding_dim);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_mf4w_moe_dense_matmul_swiglu kernel launch");
+ return status;
+ }
+ }
+
+ status = gptoss_metal_command_buffer_encode_launch_f32_gather_and_accumulate_e4(
+ command_buffer,
+ &model->f32_gather_and_accumulate_e4_fn,
+ &context->moe_activation_buffer,
+ /*input_offset=*/0,
+ &context->expert_activation_buffer,
+ /*expert_predictions_offset=*/0,
+ &context->expert_offset_buffer,
+ /*expert_offsets_offset=*/0,
+ &context->token_to_expert_routing_buffer,
+ /*intra_expert_offsets_offset=*/0,
+ &context->residual_activation_buffer,
+ /*output_offset=*/model->embedding_dim * (input_batch_size - num_block_output_tokens) * sizeof(float),
+ model->embedding_dim,
+ num_block_output_tokens,
+ model->num_active_experts);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_gather_and_accumulate_e4 kernel launch");
+ return status;
+ }
+
+ } else {
+ status = gptoss_metal_command_buffer_encode_launch_f32_mf4w_moe_matmul_swiglu(
+ command_buffer,
+ &model->f32_mf4w_moe_matmul_swiglu_fn,
+ model->mlp_swiglu_threadgroup_size,
+ &context->rmsnorm_activation_buffer,
+ /*input_offset=*/0,
+ &context->expert_activation_buffer,
+ /*expert_offset=*/0,
+ &model->block_weight_buffers[n],
+ /*weight_block_offset=*/0,
+ &model->block_weight_buffers[n],
+ /*weight_scale_offset=*/model->mlp_swiglu_scale_offset,
+ &model->block_weight_buffers[n],
+ /*bias_offset=*/model->mlp_swiglu_bias_offset,
+ &context->swiglu_activation_buffer,
+ /*output_offset=*/0,
+ &context->control_buffer,
+ /*control_offset=*/0,
+ model->swiglu_limit,
+ model->per_expert_block_weight_size,
+ num_block_output_tokens,
+ model->num_active_experts,
+ model->embedding_dim,
+ model->mlp_dim);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_mf4w_moe_matmul_swiglu kernel launch");
+ return status;
+ }
+
+ status = gptoss_metal_command_buffer_encode_launch_f32_mf4w_moe_matmul(
+ command_buffer,
+ &model->f32_mf4w_moe_matmul_fn,
+ model->mlp_out_threadgroup_size,
+ &context->swiglu_activation_buffer,
+ /*input_offset=*/0,
+ &context->expert_activation_buffer,
+ /*expert_offset=*/0,
+ &model->block_weight_buffers[n],
+ /*weight_block_offset=*/model->mlp_out_block_offset,
+ &model->block_weight_buffers[n],
+ /*weight_scale_offset=*/model->mlp_out_scale_offset,
+ &model->block_weight_buffers[n],
+ /*bias_offset=*/model->mlp_out_bias_offset,
+ &context->moe_activation_buffer,
+ /*output_offset=*/0,
+ &context->control_buffer,
+ /*control_offset=*/0,
+ model->per_expert_block_weight_size,
+ num_block_output_tokens,
+ model->num_active_experts,
+ model->mlp_dim,
+ model->embedding_dim);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_mf4w_moe_matmul kernel launch");
+ return status;
+ }
+
+ status = gptoss_metal_command_buffer_encode_launch_f32_accumulate(
+ command_buffer,
+ &model->f32_accumulate_e4_fn,
+ model->mlp_acc_threadgroup_size,
+ model->max_threadgroups,
+ &context->moe_activation_buffer,
+ /*input_offset=*/0,
+ &context->expert_activation_buffer,
+ /*expert_offset=*/0,
+ &context->residual_activation_buffer,
+ /*output_offset=*/model->embedding_dim * (input_batch_size - num_block_output_tokens) * sizeof(float),
+ &context->control_buffer,
+ /*control_offset=*/0,
+ model->embedding_dim,
+ num_block_output_tokens,
+ model->num_active_experts);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_accumulate kernel launch");
+ return status;
+ }
+ }
+ }
+ }
+
+ if (output_batch_size != 0) {
+ status = gptoss_metal_command_buffer_encode_launch_f32_bf16w_rmsnorm(
+ command_buffer,
+ &model->f32_bf16w_rmsnorm_fn,
+ &context->residual_activation_buffer,
+ /*input_offset=*/model->embedding_dim * (input_batch_size - output_batch_size) * sizeof(float),
+ &model->shared_weight_buffer,
+ /*weight_offset=*/model->rmsnorm_weight_offset,
+ &context->rmsnorm_activation_buffer,
+ /*output_offset=*/0,
+ &context->control_buffer,
+ /*control_offset=*/0,
+ /*num_tokens=*/output_batch_size,
+ /*num_channels=*/model->embedding_dim,
+ model->rmsnorm_epsilon);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_bf16w_rmsnorm kernel launch");
+ return status;
+ }
+
+ status = gptoss_metal_command_buffer_encode_fill_buffer(
+ command_buffer,
+ &context->argmax_buffer,
+ /*offset=*/0,
+ /*size=*/sizeof(uint64_t) * output_batch_size,
+ /*fill_value=*/0xFF);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode fill buffer command");
+ return status;
+ }
+
+ status = gptoss_metal_command_buffer_encode_launch_f32_bf16w_unembedding(
+ command_buffer,
+ &model->f32_bf16w_unembedding_fn,
+ model->unembedding_threadgroup_size,
+ model->max_threadgroups,
+ &context->rmsnorm_activation_buffer,
+ /*input_offset=*/0,
+ &model->shared_weight_buffer,
+ /*weight_offset=*/model->unembedding_weight_offset,
+ &context->score_buffer,
+ /*output_offset=*/0,
+ &context->argmax_buffer,
+ /*argmax_offset=*/0,
+ &context->control_buffer,
+ /*control_offset=*/0,
+ /*num_tokens=*/output_batch_size,
+ /*num_cols=*/model->embedding_dim,
+ /*num_rows=*/model->vocabulary_size);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_bf16w_unembedding kernel launch");
+ return status;
+ }
+ }
+ }
+ return gptoss_status_success;
+}
+
+enum gptoss_status GPTOSS_ABI gptoss_context_append_chars(
+ gptoss_context_t context,
+ const char* text,
+ size_t text_length,
+ size_t* num_tokens_out)
+{
+ enum gptoss_status status = gptoss_status_success;
+ const struct gptoss_model* model = context->model;
+ const struct gptoss_tokenizer* tokenizer = model->tokenizer;
+ size_t num_appended_tokens = 0;
+ while (text_length != 0) {
+ if (context->num_tokens == context->max_tokens) {
+ status = gptoss_status_context_overflow;
+ break;
+ }
+ const char* tokens = tokenizer->tokens_ptr;
+ uint32_t best_token = UINT32_MAX;
+ uint32_t best_token_length = 0;
+ for (size_t t = 0; t < tokenizer->num_text_tokens; t++) {
+ uint16_t token_length;
+ memcpy(&token_length, tokens, sizeof(uint16_t));
+ tokens += sizeof(uint16_t);
+ if (token_length <= text_length && token_length > best_token_length) {
+ if (memcmp(text, tokens, token_length) == 0) {
+ if (token_length > best_token_length) {
+ best_token = (uint32_t) t;
+ best_token_length = token_length;
+ }
+ }
+ }
+ tokens += token_length;
+ }
+
+ if (best_token == UINT32_MAX) {
+ GPTOSS_LOG_ERROR("failed to tokenize text \"%.*s\"", (int) text_length, text);
+ return gptoss_status_invalid_argument;
+ }
+
+ uint32_t* input_tokens = (uint32_t*) context->token_buffer.ptr;
+ if (context->num_kv_tokens > context->num_tokens) {
+ if (input_tokens[context->num_tokens] != best_token) {
+ input_tokens[context->num_tokens] = best_token;
+
+ // Invalidate the KV cache starting with the newly added token.
+ context->num_kv_tokens = context->num_tokens;
+ }
+ context->num_tokens++;
+ } else {
+ input_tokens[context->num_tokens++] = best_token;
+ }
+ num_appended_tokens++;
+ text += best_token_length;
+ text_length -= best_token_length;
+ }
+ if (num_tokens_out != NULL) {
+ *num_tokens_out = num_appended_tokens;
+ }
+ return status;
+}
+
+enum gptoss_status GPTOSS_ABI gptoss_context_append_tokens(
+ gptoss_context_t context,
+ size_t num_tokens,
+ const uint32_t* tokens)
+{
+ const struct gptoss_model* model = context->model;
+
+ // Validate all tokens
+ for (size_t t = 0; t < num_tokens; t++) {
+ const uint32_t token = tokens[t];
+ if (token >= model->vocabulary_size) {
+ GPTOSS_LOG_ERROR("token %" PRIu32 " at index %zu is out of bounds for vocabulary size %" PRIu32,
+ token, t, context->model->vocabulary_size);
+ return gptoss_status_invalid_argument;
+ }
+ }
+
+ enum gptoss_status status = gptoss_status_success;
+ uint32_t* input_tokens = (uint32_t*) context->token_buffer.ptr;
+ while (num_tokens != 0) {
+ if (context->num_tokens == context->max_tokens) {
+ status = gptoss_status_context_overflow;
+ break;
+ }
+
+ if (context->num_kv_tokens > context->num_tokens) {
+ const size_t num_tokens_to_verify = math_min(context->num_kv_tokens - context->num_tokens, num_tokens);
+ size_t num_verified_tokens = 0;
+ for (; num_verified_tokens < num_tokens_to_verify; num_verified_tokens++) {
+ if (input_tokens[context->num_tokens + num_verified_tokens] != tokens[num_verified_tokens]) {
+ // Invalidate the KV cache starting with the newly added tokens.
+ context->num_kv_tokens = context->num_tokens + num_verified_tokens;
+ break;
+ }
+ }
+
+ context->num_tokens += num_verified_tokens;
+ tokens += num_verified_tokens;
+ num_tokens -= num_verified_tokens;
+ } else {
+ const size_t num_tokens_to_copy = math_min(context->max_tokens - context->num_tokens, num_tokens);
+ memcpy(input_tokens + context->num_tokens, tokens, num_tokens_to_copy * sizeof(uint32_t));
+ context->num_tokens += num_tokens_to_copy;
+ tokens += num_tokens_to_copy;
+ num_tokens -= num_tokens_to_copy;
+ }
+ }
+
+ return status;
+}
+
+enum gptoss_status GPTOSS_ABI gptoss_context_process(
+ gptoss_context_t context)
+{
+ if (context->num_tokens > context->num_kv_tokens) {
+ struct gptoss_metal_command_buffer command_buffer = {0};
+
+ enum gptoss_status status = gptoss_metal_command_buffer_create(&context->model->command_queue, &command_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+
+ struct gptoss_control* control = (struct gptoss_control*) context->control_buffer.ptr;
+ control->abort = 0;
+
+ status = process_tokens(
+ context,
+ &command_buffer,
+ /*input_tokens_offset=*/context->num_kv_tokens,
+ /*num_input_tokens=*/context->num_tokens - context->num_kv_tokens,
+ /*num_output_tokens=*/0);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+
+ status = gptoss_metal_command_buffer_commit(&command_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+
+ status = gptoss_metal_command_buffer_wait_completion(&command_buffer, NULL);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+
+ context->num_kv_tokens = context->num_tokens;
+
+cleanup:
+ gptoss_metal_command_buffer_release(&command_buffer);
+ return status;
+ }
+
+ return gptoss_status_success;
+}
+
+enum gptoss_status GPTOSS_ABI gptoss_context_sample(
+ gptoss_context_t context,
+ float temperature,
+ uint64_t seed,
+ size_t max_tokens,
+ uint32_t* tokens_out,
+ size_t* num_tokens_out)
+{
+ enum gptoss_status status = gptoss_status_success;
+ const struct gptoss_model* model = context->model;
+ struct gptoss_metal_command_buffer command_buffer = {0};
+
+ *num_tokens_out = 0;
+
+ const uint32_t num_original_tokens = context->num_tokens;
+
+ status = gptoss_metal_command_buffer_create(&context->model->command_queue, &command_buffer);
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+
+ struct gptoss_control* control = (struct gptoss_control*) context->control_buffer.ptr;
+ control->abort = 0;
+
+ for (size_t t = 0; t < max_tokens; t++) {
+ if (context->num_kv_tokens < context->num_tokens) {
+ status = process_tokens(
+ context,
+ &command_buffer,
+ /*input_tokens_offset=*/context->num_kv_tokens,
+ /*num_input_tokens=*/context->num_tokens - context->num_kv_tokens,
+ /*num_output_tokens=*/1);
+ context->num_kv_tokens = context->num_tokens;
+ } else {
+ status = process_tokens(
+ context,
+ &command_buffer,
+ /*input_tokens_offset=*/context->num_tokens - 1,
+ /*num_input_tokens=*/1,
+ /*num_output_tokens=*/1);
+ }
+ if (status != gptoss_status_success) {
+ goto cleanup;
+ }
+
+ if (temperature != 0.0f) {
+ assert(context->num_processed_tokens != 0);
+ uint32_t num_threadgroups = 0;
+ uint32_t num_dims_per_threadgroup = 0;
+ status = gptoss_metal_command_buffer_encode_launch_f32_softmax(
+ &command_buffer,
+ &model->f32_softmax_fn,
+ /*threadgroup_size=*/512,
+ model->max_threadgroups,
+ &context->score_buffer,
+ /*score_offset=*/0,
+ &context->argmax_buffer,
+ /*argmax_offset=*/0,
+ &context->prob_buffer,
+ /*prob_offset=*/0,
+ &context->sum_buffer,
+ /*sum_offset=*/0,
+ &context->control_buffer,
+ /*control_offset=*/0,
+ model->vocabulary_size,
+ /*num_tokens=*/1,
+ temperature,
+ &num_threadgroups,
+ &num_dims_per_threadgroup);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_softmax kernel launch");
+ goto cleanup;
+ }
+
+ status = gptoss_metal_command_buffer_encode_launch_f32_sample(
+ &command_buffer,
+ &model->f32_sample_fn,
+ /*min_threadgroup_size=*/512,
+ &context->prob_buffer,
+ /*prob_offset=*/0,
+ &context->sum_buffer,
+ /*sum_offset=*/0,
+ &context->token_buffer,
+ /*token_offset=*/context->num_tokens * sizeof(uint32_t),
+ &context->control_buffer,
+ /*control_offset=*/0,
+ /*rng_seed=*/seed + UINT64_C(0x123456789ABCDEF),
+ /*rng_offset=*/context->num_tokens,
+ /*num_blocks=*/num_threadgroups,
+ /*num_channels=*/model->vocabulary_size,
+ /*num_channels_per_block=*/num_dims_per_threadgroup);
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode f32_sample kernel launch");
+ goto cleanup;
+ }
+ } else {
+ status = gptoss_metal_command_buffer_encode_copy_buffer(
+ &command_buffer,
+ &context->argmax_buffer,
+ /*input_offset=*/0,
+ &context->token_buffer,
+ /*output_offset=*/context->num_tokens * sizeof(uint32_t),
+ /*size=*/sizeof(uint32_t));
+ if (status != gptoss_status_success) {
+ GPTOSS_LOG_ERROR("failed to encode copy buffer");
+ goto cleanup;
+ }
+ }
+ context->num_tokens += 1;
+ context->num_kv_tokens = context->num_tokens;
+ }
+
+ gptoss_metal_command_buffer_commit(&command_buffer);
+ gptoss_metal_command_buffer_wait_completion(&command_buffer, NULL);
+
+ const uint32_t* token_ptr = (const uint32_t*) context->token_buffer.ptr;
+ const uint32_t num_generated_tokens = context->num_tokens - num_original_tokens;
+ memcpy(tokens_out, token_ptr + num_original_tokens, num_generated_tokens * sizeof(uint32_t));
+ *num_tokens_out = num_generated_tokens;
+
+cleanup:
+ gptoss_metal_command_buffer_release(&command_buffer);
+ return status;
+}
+
+enum gptoss_status GPTOSS_ABI gptoss_context_reset(
+ gptoss_context_t context)
+{
+ context->num_tokens = 0;
+
+ // Note: context->num_kv_tokens is not reset and context->input_tokens_buffer is not cleared.
+ // If the subsequently added tokens match the tokens already in the KV cache, we reuse the KV cache.
+
+ return gptoss_status_success;
+}
+
+enum gptoss_status GPTOSS_ABI gptoss_context_retain(
+ gptoss_context_t context)
+{
+ atomic_fetch_add_explicit(&context->ref_count, 1, memory_order_relaxed);
+ return gptoss_status_success;
+}
+
+enum gptoss_status GPTOSS_ABI gptoss_context_release(
+ gptoss_context_t context)
+{
+ if (context != NULL) {
+ if (atomic_fetch_sub_explicit(&context->ref_count, 1, memory_order_acq_rel) == 1) {
+ // Activation buffers
+ gptoss_metal_buffer_release(&context->residual_activation_buffer);
+ gptoss_metal_buffer_release(&context->rmsnorm_activation_buffer);
+ gptoss_metal_buffer_release(&context->qkv_activation_buffer);
+ gptoss_metal_buffer_release(&context->sdpa_activation_buffer);
+ gptoss_metal_buffer_release(&context->gate_activation_buffer);
+ gptoss_metal_buffer_release(&context->expert_activation_buffer);
+ gptoss_metal_buffer_release(&context->swiglu_activation_buffer);
+ gptoss_metal_buffer_release(&context->moe_activation_buffer);
+ gptoss_metal_buffer_release(&context->expert_offset_buffer);
+ gptoss_metal_buffer_release(&context->token_to_expert_routing_buffer);
+ gptoss_metal_buffer_release(&context->swiglu_input_buffer);
+
+ // Input/output buffers
+ gptoss_metal_buffer_release(&context->control_buffer);
+ gptoss_metal_buffer_release(&context->token_buffer);
+ gptoss_metal_buffer_release(&context->score_buffer);
+ gptoss_metal_buffer_release(&context->prob_buffer);
+ gptoss_metal_buffer_release(&context->sum_buffer);
+ gptoss_metal_buffer_release(&context->argmax_buffer);
+ gptoss_metal_buffer_release(&context->kvcache_buffer);
+
+ gptoss_model_release(context->model);
+
+ memset(context, 0, sizeof(struct gptoss_context));
+ free(context);
+ }
+ }
+ return gptoss_status_success;
+}
diff --git a/gpt_oss/metal/source/convert.metal b/gpt_oss/metal/source/convert.metal
new file mode 100644
index 0000000000000000000000000000000000000000..1c44c39e0459438a92b27a480138743564c6db01
--- /dev/null
+++ b/gpt_oss/metal/source/convert.metal
@@ -0,0 +1,64 @@
+#include
+
+#include
+
+#pragma METAL fp math_mode(safe)
+#pragma METAL fp contract(off)
+
+
+kernel void gptoss_mf4_f32_convert(
+ constant gptoss_convert_args& args [[ buffer(0) ]],
+ const device uint4* blocks [[ buffer(1) ]],
+ const device uchar* scales [[ buffer(2) ]],
+ device float4* output [[ buffer(3) ]],
+ uint gid [[threadgroup_position_in_grid]],
+ uint tid [[thread_position_in_threadgroup]],
+ uint threadgroup_size [[ threads_per_threadgroup ]])
+{
+ const ulong num_vecs_per_threadgroup = args.num_vecs_per_threadgroup;
+ const ulong threadgroup_start = gid * num_vecs_per_threadgroup;
+ const ulong threadgroup_end = metal::min(threadgroup_start + num_vecs_per_threadgroup, args.num_vecs);
+ const ulong thread_start = threadgroup_start + tid;
+ uint num_iter = static_cast((threadgroup_end - thread_start + (threadgroup_size - 1)) / threadgroup_size);
+
+ blocks += thread_start;
+ scales += thread_start;
+ output += 8 * thread_start;
+ for (; num_iter != 0; num_iter--) {
+ const uint4 block = *blocks;
+ const float scale = as_type((static_cast(*scales) + 14) << 23);
+ uint4 block02468ACEGIKMOQSU = block + block;
+ uint4 block13579BDFHJLNPRTV = block >> 3;
+ block02468ACEGIKMOQSU &= 0x1E1E1E1Eu;
+ block13579BDFHJLNPRTV &= 0x1E1E1E1Eu;
+ block02468ACEGIKMOQSU += 0x70707070u;
+ block13579BDFHJLNPRTV += 0x70707070u;
+ block02468ACEGIKMOQSU &= 0x8E8E8E8Eu;
+ block13579BDFHJLNPRTV &= 0x8E8E8E8Eu;
+ const uint4 block26AEIMQU = block02468ACEGIKMOQSU & 0xFF00FF00u;
+ const uint4 block048CGKOS = (block02468ACEGIKMOQSU << 8) & 0xFF00FF00u;
+ const uint4 block37BFJNRV = block13579BDFHJLNPRTV & 0xFF00FF00u;
+ const uint4 block159DHLPT = (block13579BDFHJLNPRTV << 8) & 0xFF00FF00u;
+ const float4 block048C = static_cast(as_type(block048CGKOS.xy)) * scale;
+ const float4 blockGKOS = static_cast(as_type(block048CGKOS.zw)) * scale;
+ const float4 block26AE = static_cast(as_type(block26AEIMQU.xy)) * scale;
+ const float4 blockIMQU = static_cast(as_type(block26AEIMQU.zw)) * scale;
+ const float4 block159D = static_cast(as_type(block159DHLPT.xy)) * scale;
+ const float4 blockHLPT = static_cast(as_type(block159DHLPT.zw)) * scale;
+ const float4 block37BF = static_cast(as_type(block37BFJNRV.xy)) * scale;
+ const float4 blockJNRV = static_cast(as_type(block37BFJNRV.zw)) * scale;
+
+ output[0] = (float4) { block048C.x, block159D.x, block26AE.x, block37BF.x };
+ output[1] = (float4) { block048C.y, block159D.y, block26AE.y, block37BF.y };
+ output[2] = (float4) { block048C.z, block159D.z, block26AE.z, block37BF.z };
+ output[3] = (float4) { block048C.w, block159D.w, block26AE.w, block37BF.w };
+ output[4] = (float4) { blockGKOS.x, blockHLPT.x, blockIMQU.x, blockJNRV.x };
+ output[5] = (float4) { blockGKOS.y, blockHLPT.y, blockIMQU.y, blockJNRV.y };
+ output[6] = (float4) { blockGKOS.z, blockHLPT.z, blockIMQU.z, blockJNRV.z };
+ output[7] = (float4) { blockGKOS.w, blockHLPT.w, blockIMQU.w, blockJNRV.w };
+
+ blocks += threadgroup_size;
+ scales += threadgroup_size;
+ output += 8 * threadgroup_size;
+ }
+}
diff --git a/gpt_oss/metal/source/embeddings.metal b/gpt_oss/metal/source/embeddings.metal
new file mode 100644
index 0000000000000000000000000000000000000000..9cc7d1214fe100a897060081ddc0f8855b163b3e
--- /dev/null
+++ b/gpt_oss/metal/source/embeddings.metal
@@ -0,0 +1,29 @@
+#include
+
+#pragma METAL fp math_mode(safe)
+#pragma METAL fp contract(off)
+
+
+kernel void gptoss_bf16_f32_embeddings(
+ constant gptoss_embeddings_args& args [[ buffer(0) ]],
+ const device uint* tokens [[ buffer(1) ]],
+ const device bfloat4* weights [[ buffer(2) ]],
+ device float4* output [[ buffer(3) ]],
+ const device gptoss_control* control [[ buffer(4) ]],
+ uint gid [[threadgroup_position_in_grid]],
+ uint tid [[thread_position_in_threadgroup]],
+ uint threadgroup_size [[ threads_per_threadgroup ]])
+{
+ if (control->abort != 0) {
+ return;
+ }
+
+ const uint t = tokens[gid];
+
+ weights += t * args.num_vecs;
+ output += gid * args.num_vecs;
+ for (uint i = tid; i < args.num_vecs; i += threadgroup_size) {
+ const bfloat4 w = weights[i];
+ output[i] = static_cast(w);
+ }
+}
diff --git a/gpt_oss/metal/source/gather_and_accumulate.metal b/gpt_oss/metal/source/gather_and_accumulate.metal
new file mode 100644
index 0000000000000000000000000000000000000000..fd70a94fec8571b871dc765c67897fa333b9f316
--- /dev/null
+++ b/gpt_oss/metal/source/gather_and_accumulate.metal
@@ -0,0 +1,74 @@
+#include
+#include
+#include
+#include
+
+// TODO(ibrahim): This is not optimal as each thread only gathers and accumulates a single float4. To amortize the
+// cost of reading the expert, offset and scales for a token, we should let each thread gather and accumulate several
+// float4s.
+kernel void gptoss_f32_gather_and_accumulate_e4(
+ constant gptoss_gather_args& args [[ buffer(0) ]],
+ const device float* in [[ buffer(1) ]],
+ const device gptoss_expert_prediction* __restrict__ expert_predictions [[ buffer(2) ]],
+ const device uint* expert_offsets [[ buffer(3) ]],
+ const device uint* intra_expert_offsets [[ buffer(4) ]],
+ device float* out [[ buffer(5) ]],
+ uint3 gid [[thread_position_in_grid]])
+{
+ const uint T = args.tokens;
+ const uint k = args.active_experts_per_token;
+ const uint D = args.token_stride;
+
+ assert((D & 3u) == 0);
+ assert(k == 4);
+
+ const uint row = gid.y;
+ if (row >= T) {
+ return;
+ }
+
+ const uint col_vec4 = gid.x;
+ const uint col = col_vec4 * 4u;
+ if (col >= D) {
+ return;
+ }
+
+ device float4* dst4 = reinterpret_cast(out + row * D + col);
+
+ const uint base = row * k;
+ const gptoss_expert_prediction expert0 = expert_predictions[base];
+ const gptoss_expert_prediction expert1 = expert_predictions[base + 1];
+ const gptoss_expert_prediction expert2 = expert_predictions[base + 2];
+ const gptoss_expert_prediction expert3 = expert_predictions[base + 3];
+ const uint expert0_id = expert0.expert_id;
+ const uint expert1_id = expert1.expert_id;
+ const uint expert2_id = expert2.expert_id;
+ const uint expert3_id = expert3.expert_id;
+ const float scale0 = expert0.score;
+ const float scale1 = expert1.score;
+ const float scale2 = expert2.score;
+ const float scale3 = expert3.score;
+ const uint4 current_intra_expert_offsets =
+ *reinterpret_cast(&intra_expert_offsets[base]);
+ // Get the row indices for the current expert ids
+ const uint r0 = expert_offsets[expert0_id] + current_intra_expert_offsets.x;
+ const uint r1 = expert_offsets[expert1_id] + current_intra_expert_offsets.y;
+ const uint r2 = expert_offsets[expert2_id] + current_intra_expert_offsets.z;
+ const uint r3 = expert_offsets[expert3_id] + current_intra_expert_offsets.w;
+
+ const device float4* src0 =
+ reinterpret_cast(in + r0 * D + col);
+ const device float4* src1 =
+ reinterpret_cast(in + r1 * D + col);
+ const device float4* src2 =
+ reinterpret_cast(in + r2 * D + col);
+ const device float4* src3 =
+ reinterpret_cast(in + r3 * D + col);
+
+ float4 acc = *dst4;
+ acc = metal::fma(*src0, scale0, acc);
+ acc = metal::fma(*src1, scale1, acc);
+ acc = metal::fma(*src2, scale2, acc);
+ acc = metal::fma(*src3, scale3, acc);
+ *dst4 = acc;
+}
\ No newline at end of file
diff --git a/gpt_oss/metal/source/generate.c b/gpt_oss/metal/source/generate.c
new file mode 100644
index 0000000000000000000000000000000000000000..ae6e680d30cb0c8e3de524a450a2429d348916ee
--- /dev/null
+++ b/gpt_oss/metal/source/generate.c
@@ -0,0 +1,317 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+
+#include "internal/model.h"
+
+struct {
+ atomic_uint_least64_t inference_bytes;
+ atomic_size_t num_prefill_tokens;
+ atomic_uint_least64_t prefill_microseconds;
+ atomic_size_t num_generated_tokens;
+ atomic_uint_least64_t generation_microseconds;
+} globals = {
+ .inference_bytes = 0,
+ .num_prefill_tokens = 0,
+ .prefill_microseconds = 0,
+ .num_generated_tokens = 0,
+ .generation_microseconds = 0,
+};
+
+struct options {
+ const char* model;
+ const char* prompt;
+ size_t context_length;
+ size_t max_tokens;
+ float temperature;
+ bool verbose;
+};
+
+static inline double mach_timestamp_diff_to_seconds(uint64_t start_timestamp, uint64_t end_timestamp) {
+ static mach_timebase_info_data_t timebase_info = {0};
+ if (timebase_info.denom == 0) {
+ mach_timebase_info(&timebase_info);
+ }
+ const uint64_t elapsed_mach_time = end_timestamp - start_timestamp;
+ return ((double) elapsed_mach_time * (double) timebase_info.numer) / ((double) timebase_info.denom * 1.0e+9);
+}
+
+static inline uint64_t mach_timestamp_diff_to_microseconds(uint64_t start_timestamp, uint64_t end_timestamp) {
+ static mach_timebase_info_data_t timebase_info = {0};
+ if (timebase_info.denom == 0) {
+ mach_timebase_info(&timebase_info);
+ }
+ const uint64_t elapsed_mach_time = end_timestamp - start_timestamp;
+ const uint64_t denominator = timebase_info.denom * UINT64_C(1000);
+ return (elapsed_mach_time * timebase_info.numer + denominator / 2) / denominator;
+}
+
+static void print_usage(const char* program_name) {
+ printf("Usage: %s [-p ] [-n ]\n", program_name);
+}
+
+struct options parse_options(int argc, char** argv) {
+ struct options options = (struct options) {
+ .model = NULL,
+ .prompt = NULL,
+ .context_length = 0,
+ .max_tokens = 0,
+ .temperature = 0.0f,
+ .verbose = false,
+ };
+ if (argc < 2) {
+ fprintf(stderr, "Error: missing required command-line argument\n");
+ print_usage(argv[0]);
+ exit(EXIT_FAILURE);
+ }
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "--help") == 0) {
+ print_usage(argv[0]);
+ exit(EXIT_SUCCESS);
+ } else if (strcmp(argv[i], "-p") == 0 || strcmp(argv[i], "--prompt") == 0) {
+ if (i + 1 >= argc) {
+ fprintf(stderr, "Error: missing argument for %s\n", argv[i]);
+ print_usage(argv[0]);
+ exit(EXIT_FAILURE);
+ }
+ options.prompt = argv[++i];
+ } else if (strcmp(argv[i], "--context-length") == 0) {
+ if (i + 1 >= argc) {
+ fprintf(stderr, "Error: missing argument for --context-length\n");
+ print_usage(argv[0]);
+ exit(EXIT_FAILURE);
+ }
+ char* context_length_start = argv[++i];
+ char* context_length_end = context_length_start;
+ options.context_length = strtoul(context_length_start, &context_length_end, 10);
+ if (context_length_end == context_length_start || *context_length_end != 0) {
+ fprintf(stderr, "Error: failed to parse context length value \"%s\"\n", context_length_start);
+ exit(EXIT_FAILURE);
+ }
+ } else if (strcmp(argv[i], "-n") == 0 || strcmp(argv[i], "--max-tokens") == 0) {
+ if (i + 1 >= argc) {
+ fprintf(stderr, "Error: missing argument for %s\n", argv[i]);
+ print_usage(argv[0]);
+ exit(EXIT_FAILURE);
+ }
+ char* max_tokens_start = argv[++i];
+ char* max_tokens_end = max_tokens_start;
+ options.max_tokens = strtoul(max_tokens_start, &max_tokens_end, 10);
+ if (max_tokens_end == max_tokens_start || *max_tokens_end != 0) {
+ fprintf(stderr, "Error: failed to max tokens value \"%s\"\n", max_tokens_start);
+ exit(EXIT_FAILURE);
+ }
+ if (options.max_tokens == 0) {
+ fprintf(stderr, "Error: invalid max tokens value %zu\n", options.max_tokens);
+ exit(EXIT_FAILURE);
+ }
+ } else if (strcmp(argv[i], "-t") == 0 || strcmp(argv[i], "--temperature") == 0) {
+ if (i + 1 >= argc) {
+ fprintf(stderr, "Error: missing argument for %s\n", argv[i]);
+ print_usage(argv[0]);
+ exit(EXIT_FAILURE);
+ }
+ char* temperature_start = argv[++i];
+ char* temperature_end = temperature_start;
+ options.temperature = strtof(temperature_start, &temperature_end);
+ if (temperature_end == temperature_start || *temperature_end != 0) {
+ fprintf(stderr, "Error: failed to parse temperature value \"%s\"\n", temperature_start);
+ exit(EXIT_FAILURE);
+ }
+ if (signbit(options.temperature) != 0 || !(options.temperature <= 2.0f)) {
+ fprintf(stderr, "Error: invalid temperature value %f\n", options.temperature);
+ exit(EXIT_FAILURE);
+ }
+ } else if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ options.verbose = true;
+ } else {
+ if (options.model == NULL) {
+ options.model = argv[i];
+ } else {
+ fprintf(stderr, "Error: unexpected command-line argument %s\n", argv[i]);
+ print_usage(argv[0]);
+ exit(EXIT_FAILURE);
+ }
+ }
+ }
+ if (options.model == NULL) {
+ fprintf(stderr, "Error: missing required model argument\n");
+ print_usage(argv[0]);
+ exit(EXIT_FAILURE);
+ }
+ if (options.prompt == NULL) {
+ fprintf(stderr, "Error: missing required prompt argument\n");
+ print_usage(argv[0]);
+ exit(EXIT_FAILURE);
+ }
+ return options;
+}
+
+
+static void print_profile() {
+ const size_t num_prefill_tokens = atomic_load(&globals.num_prefill_tokens);
+ const uint64_t prefill_microseconds = atomic_load(&globals.prefill_microseconds);
+ const size_t num_generated_tokens = atomic_load(&globals.num_generated_tokens);
+ const uint64_t generation_microseconds = atomic_load(&globals.generation_microseconds);
+ const uint64_t inference_bytes = atomic_load(&globals.inference_bytes);
+ if (num_prefill_tokens != 0 || num_generated_tokens != 0) {
+ printf("\n");
+ }
+ if (num_prefill_tokens != 0) {
+ printf("Prefill speed (%zu tokens): %.1f tokens/second\n",
+ num_prefill_tokens,
+ (double) num_prefill_tokens / (double) prefill_microseconds * 1.0e+6);
+ }
+ if (num_generated_tokens != 0) {
+ printf("Generation speed (%zu tokens): %.1f tokens/second\n",
+ num_generated_tokens,
+ (double) num_generated_tokens / (double) generation_microseconds * 1.0e+6);
+ }
+}
+
+static void ctrl_c_handler(int signum) {
+ print_profile();
+ exit(EXIT_SUCCESS);
+}
+
+int main(int argc, char *argv[]) {
+ enum gptoss_status status;
+ gptoss_model_t model = NULL;
+ gptoss_tokenizer_t tokenizer = NULL;
+ gptoss_context_t context = NULL;
+
+ struct sigaction act;
+ act.sa_handler = ctrl_c_handler;
+ sigaction(SIGINT, &act, NULL);
+
+ setvbuf(stdout, NULL, _IONBF, 0);
+
+ struct options options = parse_options(argc, argv);
+
+ const uint64_t load_start_time = mach_continuous_time();
+ status = gptoss_model_create_from_file(options.model, &model);
+ if (status != gptoss_status_success) {
+ fprintf(stderr, "Error: failed to load model from file %s\n", options.model);
+ goto error;
+ }
+ size_t max_model_context_length = 0;
+ status = gptoss_model_get_max_context_length(model, &max_model_context_length);
+ if (status != gptoss_status_success) {
+ fprintf(stderr, "Error: failed to query maximum context length\n");
+ goto error;
+ }
+ assert(max_model_context_length != 0);
+ if (options.context_length == 0) {
+ options.context_length = max_model_context_length;
+ } else if (options.context_length > max_model_context_length) {
+ fprintf(stderr, "Error: context length %zu exceeds maximum context length %zu supported by the model\n", options.context_length, max_model_context_length);
+ goto error;
+ }
+
+ status = gptoss_model_get_tokenizer(model, &tokenizer);
+ if (status != gptoss_status_success) {
+ fprintf(stderr, "Error: failed to retrieve Tokenizer\n");
+ goto error;
+ }
+
+ uint32_t return_token_id = UINT32_MAX;
+ status = gptoss_tokenizer_get_special_token_id(tokenizer, gptoss_special_token_return, &return_token_id);
+ if (status != gptoss_status_success) {
+ fprintf(stderr, "Error: failed to query end-of-text token ID\n");
+ goto error;
+ }
+
+ status = gptoss_context_create(model, options.context_length, /*max_batch_tokens=*/0, &context);
+ if (status != gptoss_status_success) {
+ fprintf(stderr, "Error: failed to create Context object\n");
+ goto error;
+ }
+ if (options.verbose) {
+ printf("Model weights size: %.2lf MB\n", (double) model->weights_size * 0x1.0p-20);
+ printf("Model allocation size: %.2lf MB\n", (double) model->allocation_size * 0x1.0p-20);
+ printf("Context allocation size: %.2lf MB\n", (double) context->allocation_size * 0x1.0p-20);
+ printf(" Including KV cache: %.2lf MB\n", (double) context->kvcache_size * 0x1.0p-20);
+ }
+
+ const uint64_t load_end_time = mach_continuous_time();
+ const double load_elapsed_seconds = mach_timestamp_diff_to_seconds(load_start_time, load_end_time);
+ if (options.verbose) {
+ printf("Loaded model in %.3f seconds\n", load_elapsed_seconds);
+ }
+
+ const uint64_t prefill_start_time = mach_continuous_time();
+ size_t num_prefill_tokens = 0;
+ status = gptoss_context_append_chars(context, options.prompt, strlen(options.prompt), &num_prefill_tokens);
+ if (status != gptoss_status_success) {
+ fprintf(stderr, "Error: failed to tokenize prompt \"%s\"\n", options.prompt);
+ goto error;
+ }
+ atomic_store(&globals.num_prefill_tokens, num_prefill_tokens);
+ status = gptoss_context_process(context);
+ if (status != gptoss_status_success) {
+ fprintf(stderr, "Error: failed to process Context object\n");
+ goto error;
+ }
+ const uint64_t prefill_end_time = mach_continuous_time();
+
+ while (options.max_tokens == 0 || atomic_load(&globals.num_generated_tokens) < options.max_tokens) {
+
+ uint32_t predicted_token = UINT32_MAX;
+ size_t num_predicted_tokens = 0;
+ const uint64_t inference_start_timestamp = mach_continuous_time();
+ status = gptoss_context_sample(context, options.temperature, /*rng_state=*/0, /*num_tokens=*/1, &predicted_token, &num_predicted_tokens);
+ if (status != gptoss_status_success) {
+ fprintf(stderr, "Error: failed to sample from the Context object\n");
+ goto error;
+ }
+ const uint64_t inference_end_timestamp = mach_continuous_time();
+
+ if (predicted_token == return_token_id) {
+ // Yield token -> stop generation
+ break;
+ }
+
+ // Unembedding: detokenize
+ size_t token_size = 0;
+ const void* token_ptr = NULL;
+ status = gptoss_tokenizer_decode(tokenizer, predicted_token, &token_ptr, &token_size);
+ if (status != gptoss_status_success) {
+ fprintf(stderr, "Error: failed to detokenize predicted token %" PRIu32 "\n", predicted_token);
+ goto error;
+ }
+ const size_t previous_num_generated_tokens = atomic_fetch_add(&globals.num_generated_tokens, 1);
+ if (previous_num_generated_tokens == 0) {
+ atomic_fetch_add(&globals.prefill_microseconds, mach_timestamp_diff_to_microseconds(prefill_start_time, prefill_end_time));
+ } else {
+ atomic_fetch_add(&globals.generation_microseconds, mach_timestamp_diff_to_microseconds(inference_start_timestamp, inference_end_timestamp));
+ }
+ printf("%.*s", (int) token_size, (const char*) token_ptr);
+
+ status = gptoss_context_append_tokens(context, 1, &predicted_token);
+ if (status != gptoss_status_success) {
+ fprintf(stderr, "Error: failed to append predicted token %" PRIu32 " to context\n", predicted_token);
+ goto error;
+ }
+ }
+
+ print_profile();
+
+ return EXIT_SUCCESS;
+
+error:
+ gptoss_context_release(context);
+ gptoss_tokenizer_release(tokenizer);
+ gptoss_model_release(model);
+ return EXIT_FAILURE;
+}
diff --git a/gpt_oss/metal/source/include/internal/datatype.h b/gpt_oss/metal/source/include/internal/datatype.h
new file mode 100644
index 0000000000000000000000000000000000000000..27e6117c4907d93c661832ff7aaa864a060435b3
--- /dev/null
+++ b/gpt_oss/metal/source/include/internal/datatype.h
@@ -0,0 +1,41 @@
+#pragma once
+
+#include
+
+#include
+
+
+typedef struct GPTOSS_DENSELY_PACKED_STRUCTURE {
+ GPTOSS_ALIGN(2) uint16_t bits;
+} gptoss_bfloat16;
+static_assert(sizeof(gptoss_bfloat16) == 2, "bfloat16 size is not 2 bytes");
+
+
+typedef struct GPTOSS_DENSELY_PACKED_STRUCTURE {
+ GPTOSS_ALIGN(2) uint16_t bits;
+} gptoss_float16;
+static_assert(sizeof(gptoss_float16) == 2, "float16 size is not 2 bytes");
+
+
+typedef struct GPTOSS_DENSELY_PACKED_STRUCTURE {
+ GPTOSS_ALIGN(1) uint8_t bits;
+} gptoss_float8ue8m0;
+static_assert(sizeof(gptoss_float8ue8m0) == 1, "gptoss_float8ue8m0 size is not 1 bytes");
+
+
+typedef struct GPTOSS_DENSELY_PACKED_STRUCTURE {
+ GPTOSS_ALIGN(1) uint8_t bits;
+} gptoss_float8e5m2;
+static_assert(sizeof(gptoss_float8e5m2) == 1, "float8e5m2 size is not 1 bytes");
+
+
+typedef struct GPTOSS_DENSELY_PACKED_STRUCTURE {
+ GPTOSS_ALIGN(1) uint8_t bits;
+} gptoss_float8e4m3;
+static_assert(sizeof(gptoss_float8e4m3) == 1, "gptoss_float8e4m3 size is not 1 bytes");
+
+
+typedef struct GPTOSS_DENSELY_PACKED_STRUCTURE {
+ GPTOSS_ALIGN(1) uint8_t bits;
+} gptoss_float4e2m1x2;
+static_assert(sizeof(gptoss_float4e2m1x2) == 1, "gptoss_float4e2m1x2 size is not 1 bytes");
diff --git a/gpt_oss/metal/source/include/internal/datatype.hpp b/gpt_oss/metal/source/include/internal/datatype.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..4093927056748a7de160546ff89e9067f0264b08
--- /dev/null
+++ b/gpt_oss/metal/source/include/internal/datatype.hpp
@@ -0,0 +1,87 @@
+#pragma once
+
+#include
+
+#include
+
+
+namespace gptoss {
+
+template
+WideT upcast(NarrowT);
+
+template <>
+inline float upcast(gptoss_bfloat16 bf16_value) {
+ const uint32_t bits = static_cast(bf16_value.bits) << 16;
+ return std::bit_cast(bits);
+}
+
+template <>
+inline float upcast(gptoss_float16 fp16_value) {
+ return static_cast(std::bit_cast<_Float16>(fp16_value.bits));
+}
+
+template <>
+inline float upcast(gptoss_float8e4m3 fp8_value) {
+ static constexpr uint16_t fp8e4m3_to_fp32[256] = {
+ 0x0000, 0x3B00, 0x3B80, 0x3BC0, 0x3C00, 0x3C20, 0x3C40, 0x3C60,
+ 0x3C80, 0x3C90, 0x3CA0, 0x3CB0, 0x3CC0, 0x3CD0, 0x3CE0, 0x3CF0,
+ 0x3D00, 0x3D10, 0x3D20, 0x3D30, 0x3D40, 0x3D50, 0x3D60, 0x3D70,
+ 0x3D80, 0x3D90, 0x3DA0, 0x3DB0, 0x3DC0, 0x3DD0, 0x3DE0, 0x3DF0,
+ 0x3E00, 0x3E10, 0x3E20, 0x3E30, 0x3E40, 0x3E50, 0x3E60, 0x3E70,
+ 0x3E80, 0x3E90, 0x3EA0, 0x3EB0, 0x3EC0, 0x3ED0, 0x3EE0, 0x3EF0,
+ 0x3F00, 0x3F10, 0x3F20, 0x3F30, 0x3F40, 0x3F50, 0x3F60, 0x3F70,
+ 0x3F80, 0x3F90, 0x3FA0, 0x3FB0, 0x3FC0, 0x3FD0, 0x3FE0, 0x3FF0,
+ 0x4000, 0x4010, 0x4020, 0x4030, 0x4040, 0x4050, 0x4060, 0x4070,
+ 0x4080, 0x4090, 0x40A0, 0x40B0, 0x40C0, 0x40D0, 0x40E0, 0x40F0,
+ 0x4100, 0x4110, 0x4120, 0x4130, 0x4140, 0x4150, 0x4160, 0x4170,
+ 0x4180, 0x4190, 0x41A0, 0x41B0, 0x41C0, 0x41D0, 0x41E0, 0x41F0,
+ 0x4200, 0x4210, 0x4220, 0x4230, 0x4240, 0x4250, 0x4260, 0x4270,
+ 0x4280, 0x4290, 0x42A0, 0x42B0, 0x42C0, 0x42D0, 0x42E0, 0x42F0,
+ 0x4300, 0x4310, 0x4320, 0x4330, 0x4340, 0x4350, 0x4360, 0x4370,
+ 0x4380, 0x4390, 0x43A0, 0x43B0, 0x43C0, 0x43D0, 0x43E0, 0x7FF0,
+ 0x8000, 0xBB00, 0xBB80, 0xBBC0, 0xBC00, 0xBC20, 0xBC40, 0xBC60,
+ 0xBC80, 0xBC90, 0xBCA0, 0xBCB0, 0xBCC0, 0xBCD0, 0xBCE0, 0xBCF0,
+ 0xBD00, 0xBD10, 0xBD20, 0xBD30, 0xBD40, 0xBD50, 0xBD60, 0xBD70,
+ 0xBD80, 0xBD90, 0xBDA0, 0xBDB0, 0xBDC0, 0xBDD0, 0xBDE0, 0xBDF0,
+ 0xBE00, 0xBE10, 0xBE20, 0xBE30, 0xBE40, 0xBE50, 0xBE60, 0xBE70,
+ 0xBE80, 0xBE90, 0xBEA0, 0xBEB0, 0xBEC0, 0xBED0, 0xBEE0, 0xBEF0,
+ 0xBF00, 0xBF10, 0xBF20, 0xBF30, 0xBF40, 0xBF50, 0xBF60, 0xBF70,
+ 0xBF80, 0xBF90, 0xBFA0, 0xBFB0, 0xBFC0, 0xBFD0, 0xBFE0, 0xBFF0,
+ 0xC000, 0xC010, 0xC020, 0xC030, 0xC040, 0xC050, 0xC060, 0xC070,
+ 0xC080, 0xC090, 0xC0A0, 0xC0B0, 0xC0C0, 0xC0D0, 0xC0E0, 0xC0F0,
+ 0xC100, 0xC110, 0xC120, 0xC130, 0xC140, 0xC150, 0xC160, 0xC170,
+ 0xC180, 0xC190, 0xC1A0, 0xC1B0, 0xC1C0, 0xC1D0, 0xC1E0, 0xC1F0,
+ 0xC200, 0xC210, 0xC220, 0xC230, 0xC240, 0xC250, 0xC260, 0xC270,
+ 0xC280, 0xC290, 0xC2A0, 0xC2B0, 0xC2C0, 0xC2D0, 0xC2E0, 0xC2F0,
+ 0xC300, 0xC310, 0xC320, 0xC330, 0xC340, 0xC350, 0xC360, 0xC370,
+ 0xC380, 0xC390, 0xC3A0, 0xC3B0, 0xC3C0, 0xC3D0, 0xC3E0, 0xFFF0,
+ };
+ const gptoss_bfloat16 bf16_value{.bits = fp8e4m3_to_fp32[fp8_value.bits]};
+ return upcast(bf16_value);
+}
+
+template <>
+inline double upcast(float fp32_value) {
+ return static_cast(fp32_value);
+}
+
+template <>
+inline double upcast(gptoss_bfloat16 bf16_value) {
+ const float fp32_value = upcast(bf16_value);
+ return upcast(fp32_value);
+}
+
+template <>
+inline double upcast(gptoss_float16 fp16_value) {
+ const float fp32_value = upcast(fp16_value);
+ return upcast(fp32_value);
+}
+
+template <>
+inline double upcast(gptoss_float8e4m3 fp8_value) {
+ const float fp32_value = upcast(fp8_value);
+ return upcast(fp32_value);
+}
+
+} // namespace gptoss
diff --git a/gpt_oss/metal/source/include/internal/kernel-args.h b/gpt_oss/metal/source/include/internal/kernel-args.h
new file mode 100644
index 0000000000000000000000000000000000000000..6d398b7a41e22b8911243afc12175b489dcfc2be
--- /dev/null
+++ b/gpt_oss/metal/source/include/internal/kernel-args.h
@@ -0,0 +1,202 @@
+#pragma once
+
+#if !defined(__METAL_VERSION__)
+#include
+#endif
+
+// TODO(ibahmed): specalize using metal function constants.
+#define QKV_Bm 64
+#define QKV_Bn 64
+#define QKV_Bk 32
+#define QKV_Sg_Bm 32
+#define QKV_Sg_Bn 32
+
+#define ATTN_OUTPUT_Bm 32
+#define ATTN_OUTPUT_Bn 64
+#define ATTN_OUTPUT_Bk 64
+#define ATTN_OUTPUT_Sg_Bm 32
+#define ATTN_OUTPUT_Sg_Bn 16
+
+#define MLP_GATE_Bm 64
+#define MLP_GATE_Bn 16
+#define MLP_GATE_Bk 64
+#define MLP_GATE_Sg_Bm 16
+#define MLP_GATE_Sg_Bn 16
+
+#define MOE_DENSE_MATMUL_SWIGLU_Bm 32
+#define MOE_DENSE_MATMUL_SWIGLU_Bn 64
+#define MOE_DENSE_MATMUL_SWIGLU_Bk 16
+#define MOE_DENSE_MATMUL_SWIGLU_Sg_Bm 32
+#define MOE_DENSE_MATMUL_SWIGLU_Sg_Bn 16
+
+#define MOE_DENSE_MATMUL_Bm 32
+#define MOE_DENSE_MATMUL_Bn 64
+#define MOE_DENSE_MATMUL_Bk 16
+#define MOE_DENSE_MATMUL_Sg_Bm 32
+#define MOE_DENSE_MATMUL_Sg_Bn 16
+
+struct gptoss_expert_prediction {
+ uint32_t expert_id;
+ float score;
+};
+
+struct gptoss_control {
+ uint32_t abort;
+};
+
+struct gptoss_topk_args {
+ uint32_t num_vecs_per_token;
+};
+
+struct gptoss_sdpa_args {
+ uint32_t qkv_dim;
+ uint32_t num_kv_tokens;
+ uint32_t kv_stride;
+ uint32_t window;
+};
+
+struct gptoss_u32_fill_random_args {
+ uint64_t num_vecs_per_threadgroup;
+ uint64_t num_vecs;
+ uint64_t offset;
+ uint64_t seed;
+};
+
+struct gptoss_f32_fill_random_args {
+ uint64_t num_vecs_per_threadgroup;
+ uint64_t num_vecs;
+ uint64_t offset;
+ uint64_t seed;
+ float scale;
+ float bias;
+};
+
+struct gptoss_accumulate_args {
+ uint32_t num_vecs_per_expert;
+ uint32_t num_vecs_per_threadgroup;
+ uint32_t num_vecs;
+};
+
+struct gptoss_convert_args {
+ uint64_t num_vecs_per_threadgroup;
+ uint64_t num_vecs;
+};
+
+struct gptoss_embeddings_args {
+ uint32_t num_vecs;
+};
+
+struct gptoss_rmsnorm_args {
+ uint32_t num_vecs;
+ float num_channels;
+ float epsilon;
+};
+
+struct gptoss_matmul_args {
+ uint32_t num_column_vecs;
+ uint32_t num_rows;
+ uint32_t add;
+};
+
+struct gptoss_dense_matmul_args {
+ uint32_t m;
+ uint32_t n;
+ uint32_t k;
+};
+
+struct gptoss_scatter_args {
+ uint32_t tokens;
+ uint32_t active_experts_per_token;
+ uint32_t token_stride;
+};
+
+struct gptoss_moe_dense_matmul_swiglu_args {
+ uint32_t expert_token_count;
+ uint32_t k;
+ uint32_t n;
+ uint32_t expert_id;
+ uint32_t expert_token_offset;
+ uint32_t weight_blocks_expert_stride_bytes;
+ uint32_t weight_scales_expert_stride_bytes;
+ uint32_t bias_expert_stride_bytes;
+ float swiglu_min;
+ float swiglu_max;
+};
+struct gptoss_moe_dense_matmul_args {
+ uint32_t expert_token_count;
+ uint32_t k;
+ uint32_t n;
+ uint32_t expert_id;
+ uint32_t expert_token_offset;
+ uint32_t weight_blocks_expert_stride_bytes;
+ uint32_t weight_scales_expert_stride_bytes;
+ uint32_t bias_expert_stride_bytes;
+};
+
+struct gptoss_gather_args {
+ uint32_t tokens;
+ uint32_t active_experts_per_token;
+ uint32_t token_stride;
+};
+
+struct gptoss_unembedding_args {
+ uint32_t num_column_vecs;
+ uint32_t num_rows_per_threadgroup;
+ uint32_t num_rows;
+};
+
+struct gptoss_moe_matmul_swiglu_args {
+ uint32_t num_column_vecs;
+ uint32_t num_rows;
+ uint32_t num_active_experts;
+ uint32_t weight_expert_stride; // in bytes
+ uint32_t output_expert_stride; // in elements
+ float swiglu_min;
+ float swiglu_max;
+};
+
+struct gptoss_moe_matmul_args {
+ uint32_t num_column_vecs;
+ uint32_t num_rows;
+ uint32_t num_active_experts;
+ uint32_t input_expert_stride; // in blocks of 32 elements
+ uint32_t weight_expert_stride; // in bytes
+ uint32_t output_expert_stride; // in elements
+};
+
+struct gptoss_rope_args {
+ uint32_t token_stride;
+ uint32_t token_offset;
+ float freq_scale;
+ float interpolation_scale;
+ float yarn_offset;
+ float yarn_scale;
+ float yarn_multiplier;
+};
+
+struct gptoss_qkv_args {
+ uint32_t num_column_vecs;
+ uint32_t num_rows;
+ uint32_t token_offset;
+ float freq_scale;
+ float interpolation_scale;
+ float yarn_offset;
+ float yarn_scale;
+ float yarn_multiplier;
+ uint32_t max_tokens;
+};
+
+struct gptoss_softmax_args {
+ uint32_t num_vecs;
+ uint32_t num_vecs_per_threadgroup;
+ uint32_t max_threadgroups;
+ float temperature;
+};
+
+struct gptoss_sample_args {
+ uint64_t rng_seed;
+ uint32_t rng_offset;
+ uint32_t num_blocks;
+ uint32_t num_dims;
+ uint32_t num_dims_per_block;
+};
diff --git a/gpt_oss/metal/source/include/internal/log.h b/gpt_oss/metal/source/include/internal/log.h
new file mode 100644
index 0000000000000000000000000000000000000000..0e94d501350e8541b8ebb3ecbf1c3bf321f31fe1
--- /dev/null
+++ b/gpt_oss/metal/source/include/internal/log.h
@@ -0,0 +1,20 @@
+#pragma once
+
+#include
+
+
+void gptoss_format_log(const char* format, va_list args);
+
+__attribute__((__format__(__printf__, 1, 2)))
+inline static void gptoss_log(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ gptoss_format_log(format, args);
+ va_end(args);
+}
+
+#define GPTOSS_LOG_ERROR(message, ...) \
+ gptoss_log("Error: " message "\n", ##__VA_ARGS__)
+
+#define GPTOSS_LOG_WARNING(message, ...) \
+ gptoss_log("Warning: " message "\n", ##__VA_ARGS__)
diff --git a/gpt_oss/metal/source/include/internal/macros.h b/gpt_oss/metal/source/include/internal/macros.h
new file mode 100644
index 0000000000000000000000000000000000000000..90361df3076a3ee71ee202500347145e2c8b5c6d
--- /dev/null
+++ b/gpt_oss/metal/source/include/internal/macros.h
@@ -0,0 +1,107 @@
+#pragma once
+
+/***** Architecture detection macros *****/
+
+#ifdef GPTOSS_ARCH_X86_64
+ #if GPTOSS_ARCH_X86_64 != 0 && GPTOSS_ARCH_X86_64 != 1
+ #error "Invalid GPTOSS_ARCH_X86_64 value: must be either 0 or 1"
+ #endif
+#else
+ #if defined(__x86_64__) || defined(_M_X64) && !defined(_M_ARM64EC)
+ #define GPTOSS_ARCH_X86_64 1
+ #else
+ #define GPTOSS_ARCH_X86_64 0
+ #endif
+#endif
+
+#ifdef GPTOSS_ARCH_ARM64
+ #if GPTOSS_ARCH_ARM64 != 0 && GPTOSS_ARCH_ARM64 != 1
+ #error "Invalid GPTOSS_ARCH_ARM64 value: must be either 0 or 1"
+ #endif
+#else
+ #if defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC)
+ #define GPTOSS_ARCH_ARM64 1
+ #else
+ #define GPTOSS_ARCH_ARM64 0
+ #endif
+#endif
+
+#if GPTOSS_ARCH_X86_64 + GPTOSS_ARCH_ARM64 == 0
+ #error "Unsupported architecture: neither x86-64 nor ARM64 detected"
+#elif GPTOSS_ARCH_X86_64 + GPTOSS_ARCH_ARM64 != 1
+ #error "Inconsistent architecture detection: both x86-64 and ARM64 detection macros are specified"
+#endif
+
+/***** Compiler portability macros *****/
+
+#ifndef GPTOSS_LIKELY
+ #if defined(__GNUC__)
+ #define GPTOSS_LIKELY(condition) (__builtin_expect(!!(condition), 1))
+ #else
+ #define GPTOSS_LIKELY(condition) (!!(condition))
+ #endif
+#endif
+
+#ifndef GPTOSS_UNLIKELY
+ #if defined(__GNUC__)
+ #define GPTOSS_UNLIKELY(condition) (__builtin_expect(!!(condition), 0))
+ #else
+ #define GPTOSS_UNLIKELY(condition) (!!(condition))
+ #endif
+#endif
+
+#ifndef GPTOSS_UNPREDICTABLE
+ #if defined(__has_builtin)
+ #if __has_builtin(__builtin_unpredictable)
+ #define GPTOSS_UNPREDICTABLE(condition) (__builtin_unpredictable(!!(condition)))
+ #endif
+ #endif
+#endif
+#ifndef GPTOSS_UNPREDICTABLE
+ #if defined(__GNUC__) && (__GNUC__ >= 9) && !defined(__INTEL_COMPILER)
+ #define GPTOSS_UNPREDICTABLE(condition) (__builtin_expect_with_probability(!!(condition), 0, 0.5))
+ #else
+ #define GPTOSS_UNPREDICTABLE(condition) (!!(condition))
+ #endif
+#endif
+
+// Disable padding for structure members.
+#ifndef GPTOSS_DENSELY_PACKED_STRUCTURE
+ #if defined(__GNUC__)
+ #define GPTOSS_DENSELY_PACKED_STRUCTURE __attribute__((__packed__))
+ #else
+ #error "Compiler-specific implementation of GPTOSS_DENSELY_PACKED_STRUCTURE required"
+ #endif
+#endif
+
+#ifndef GPTOSS_ALIGN
+ #if defined(__GNUC__)
+ #define GPTOSS_ALIGN(alignment) __attribute__((__aligned__(alignment)))
+ #elif defined(_MSC_VER)
+ #define GPTOSS_ALIGN(alignment) __declspec(align(alignment))
+ #else
+ #error "Compiler-specific implementation of GPTOSS_ALIGN required"
+ #endif
+#endif
+
+#ifndef GPTOSS_FORCE_INLINE
+ #if defined(__GNUC__)
+ #define GPTOSS_FORCE_INLINE inline __attribute__((__always_inline__))
+ #elif defined(_MSC_VER)
+ #define GPTOSS_FORCE_INLINE __forceinline
+ #else
+ #define GPTOSS_FORCE_INLINE inline
+ #endif
+#endif
+
+/***** Symbol visibility macros *****/
+
+#ifndef GPTOSS_INTERNAL_SYMBOL
+ #if defined(__ELF__)
+ #define GPTOSS_INTERNAL_SYMBOL __attribute__((__visibility__("internal")))
+ #elif defined(__MACH__)
+ #define GPTOSS_INTERNAL_SYMBOL __attribute__((__visibility__("hidden")))
+ #else
+ #define GPTOSS_INTERNAL_SYMBOL
+ #endif
+#endif
diff --git a/gpt_oss/metal/source/include/internal/math.h b/gpt_oss/metal/source/include/internal/math.h
new file mode 100644
index 0000000000000000000000000000000000000000..06f2b1f1dfd049ed94e2c79d6de424319c654e6b
--- /dev/null
+++ b/gpt_oss/metal/source/include/internal/math.h
@@ -0,0 +1,40 @@
+#pragma once
+
+#include
+#include
+#include
+
+inline static size_t math_ceil_div(size_t numer, size_t denom) {
+ return (numer + denom - 1) / denom;
+}
+
+inline static size_t math_max(size_t a, size_t b) {
+ return a >= b ? a : b;
+}
+
+inline static size_t math_min(size_t a, size_t b) {
+ return a < b ? a : b;
+}
+
+inline static size_t math_sub_sat(size_t a, size_t b) {
+ return a > b ? a - b : 0;
+}
+
+static size_t math_round_down_po2(size_t number, size_t multiple) {
+ assert(multiple != 0);
+ assert((multiple & (multiple - 1)) == 0);
+
+ return number & -multiple;
+}
+
+static size_t math_round_up_po2(size_t number, size_t multiple) {
+ assert(multiple != 0);
+ assert((multiple & (multiple - 1)) == 0);
+
+ const size_t multiple_mask = multiple - 1;
+ if ((number & multiple_mask) != 0) {
+ number |= multiple_mask;
+ number += 1;
+ }
+ return number;
+}
diff --git a/gpt_oss/metal/source/include/internal/metal-kernels.h b/gpt_oss/metal/source/include/internal/metal-kernels.h
new file mode 100644
index 0000000000000000000000000000000000000000..3addb3cc6803a32391237797bb724fd6ee2d9505
--- /dev/null
+++ b/gpt_oss/metal/source/include/internal/metal-kernels.h
@@ -0,0 +1,471 @@
+#pragma once
+
+#include
+#include
+
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+#include
+
+#include
+#include
+#include
+
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_u32_fill_random(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* u32_fill_random_fn,
+ size_t threadgroup_size,
+ size_t max_threadgroups,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ uint64_t num_elements,
+ uint64_t rng_seed,
+ uint64_t rng_offset);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_fill_random(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_fill_random_fn,
+ size_t threadgroup_size,
+ size_t max_threadgroups,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ uint64_t num_elements,
+ uint64_t rng_seed,
+ uint64_t rng_offset,
+ float rng_min,
+ float rng_max);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_bf16_fill_random(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* bf16_fill_random_fn,
+ size_t threadgroup_size,
+ size_t max_threadgroups,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ uint64_t num_elements,
+ uint64_t rng_seed,
+ uint64_t rng_offset,
+ float rng_min,
+ float rng_max);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_mf4_f32_convert(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* mf4_f32_convert_fn,
+ size_t threadgroup_size,
+ size_t max_threadgroups,
+ const struct gptoss_metal_buffer* block_buffer,
+ const struct gptoss_metal_buffer* scale_buffer,
+ const struct gptoss_metal_buffer* output_buffer,
+ uint64_t num_elements);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_bf16_f32_embeddings(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* bf16_f32_embeddings_fn,
+ size_t threadgroup_size,
+ const struct gptoss_metal_buffer* token_buffer,
+ size_t token_offset,
+ const struct gptoss_metal_buffer* weight_buffer,
+ size_t weight_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ uint32_t num_tokens,
+ uint32_t num_channels);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_bf16w_rmsnorm(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_bf16w_rmsnorm_fn,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* weight_buffer,
+ size_t weight_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ uint32_t num_tokens,
+ uint32_t num_channels,
+ float epsilon);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_bf16w_matmul(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_bf16w_matmul_fn,
+ size_t threadgroup_size,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* weight_buffer,
+ size_t weight_offset,
+ const struct gptoss_metal_buffer* bias_buffer,
+ size_t bias_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ uint32_t num_tokens,
+ uint32_t num_cols,
+ uint32_t num_rows);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_bf16w_matmul_qkv(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_bf16w_matmul_qkv_fn,
+ size_t threadgroup_size,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* weight_buffer,
+ size_t weight_offset,
+ const struct gptoss_metal_buffer* bias_buffer,
+ size_t bias_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ const struct gptoss_metal_buffer* kv_buffer,
+ size_t kv_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ uint32_t num_tokens,
+ uint32_t num_cols,
+ uint32_t num_q_heads,
+ uint32_t num_kv_heads,
+ uint32_t attn_head_dim,
+ uint32_t token_offset,
+ uint32_t max_tokens,
+ float rope_base,
+ float interpolation_scale,
+ float yarn_offset,
+ float yarn_scale,
+ float yarn_multiplier);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_bf16w_matmul_add(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_bf16w_matmul_fn,
+ size_t threadgroup_size,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* weight_buffer,
+ size_t weight_offset,
+ const struct gptoss_metal_buffer* bias_buffer,
+ size_t bias_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ uint32_t num_tokens,
+ uint32_t num_cols,
+ uint32_t num_rows);
+
+enum gptoss_status
+gptoss_metal_command_buffer_encode_launch_f32_bf16w_dense_matmul_qkv(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_bf16w_dense_matmul_fn,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* weight_buffer,
+ size_t weight_offset,
+ const struct gptoss_metal_buffer* bias_buffer,
+ size_t bias_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ uint32_t num_tokens,
+ uint32_t num_cols,
+ uint32_t num_rows);
+
+enum gptoss_status
+gptoss_metal_command_buffer_encode_launch_f32_bf16w_dense_matmul_attn_output(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_bf16w_dense_matmul_fn,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* weight_buffer,
+ size_t weight_offset,
+ const struct gptoss_metal_buffer* bias_buffer,
+ size_t bias_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ uint32_t num_tokens,
+ uint32_t num_cols,
+ uint32_t num_rows);
+
+enum gptoss_status
+gptoss_metal_command_buffer_encode_launch_f32_bf16w_dense_matmul_mlp_gate(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_bf16w_dense_matmul_fn,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* weight_buffer,
+ size_t weight_offset,
+ const struct gptoss_metal_buffer* bias_buffer,
+ size_t bias_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ uint32_t num_tokens,
+ uint32_t num_cols,
+ uint32_t num_rows);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_bf16w_unembedding(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_bf16w_matmul_fn,
+ size_t threadgroup_size,
+ size_t max_threadgroups,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* weight_buffer,
+ size_t weight_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ const struct gptoss_metal_buffer* argmax_buffer,
+ size_t argmax_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ uint32_t num_tokens,
+ uint32_t num_cols,
+ uint32_t num_rows);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_mf4w_moe_matmul_swiglu(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_mf4w_moe_matmul_swiglu_fn,
+ size_t threadgroup_size,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* expert_buffer,
+ size_t expert_offset,
+ const struct gptoss_metal_buffer* weight_block_buffer,
+ size_t weight_block_offset,
+ const struct gptoss_metal_buffer* weight_scale_buffer,
+ size_t weight_scale_offset,
+ const struct gptoss_metal_buffer* bias_buffer,
+ size_t bias_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ float swiglu_limit,
+ uint32_t expert_stride,
+ uint32_t num_tokens,
+ uint32_t num_active_experts,
+ uint32_t num_cols,
+ uint32_t num_rows);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_mf4w_moe_matmul(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_mf4w_moe_matmul_fn,
+ size_t threadgroup_size,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* expert_buffer,
+ size_t expert_offset,
+ const struct gptoss_metal_buffer* weight_block_buffer,
+ size_t weight_block_offset,
+ const struct gptoss_metal_buffer* weight_scale_buffer,
+ size_t weight_scale_offset,
+ const struct gptoss_metal_buffer* bias_buffer,
+ size_t bias_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ uint32_t expert_stride,
+ uint32_t num_tokens,
+ uint32_t num_active_experts,
+ uint32_t num_cols,
+ uint32_t num_rows);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_rope(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_rope_fn,
+ size_t threadgroup_size,
+ const struct gptoss_metal_buffer* activations_buffer,
+ size_t activations_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ float rope_base,
+ float interpolation_scale,
+ float yarn_offset,
+ float yarn_scale,
+ float yarn_multiplier,
+ uint32_t num_tokens,
+ uint32_t num_q_heads,
+ uint32_t num_kv_heads,
+ uint32_t attn_head_dim,
+ uint32_t token_offset);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_accumulate(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_accumulate_fn,
+ size_t threadgroup_size,
+ size_t max_threadgroups,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* expert_buffer,
+ size_t expert_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ uint32_t num_channels,
+ uint32_t num_tokens,
+ uint32_t num_experts);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_scatter(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_scatter_fn,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* expert_predictions_buffer,
+ size_t expert_predictions_offset,
+ const struct gptoss_metal_buffer* expert_offsets_buffer,
+ size_t expert_offsets_offset,
+ const struct gptoss_metal_buffer* intra_expert_offsets_buffer,
+ size_t intra_expert_offsets_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ uint32_t num_channels,
+ uint32_t num_tokens,
+ uint32_t num_active_experts);
+
+enum gptoss_status
+gptoss_metal_command_buffer_encode_launch_f32_mf4w_moe_dense_matmul_swiglu(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_mf4w_moe_dense_matmul_swiglu_fn,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* weight_block_buffer,
+ size_t weight_block_offset,
+ const struct gptoss_metal_buffer* weight_scale_buffer,
+ size_t weight_scale_offset,
+ const struct gptoss_metal_buffer* bias_buffer,
+ size_t bias_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ float swiglu_limit,
+ uint32_t expert_stride_bytes,
+ uint32_t num_tokens,
+ uint32_t expert_token_offset,
+ uint32_t expert_id,
+ uint32_t num_cols,
+ uint32_t num_rows);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_mf4w_moe_dense_matmul(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_mf4w_moe_dense_matmul_fn,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* weight_block_buffer,
+ size_t weight_block_offset,
+ const struct gptoss_metal_buffer* weight_scale_buffer,
+ size_t weight_scale_offset,
+ const struct gptoss_metal_buffer* bias_buffer,
+ size_t bias_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ uint32_t expert_stride_bytes,
+ uint32_t num_tokens,
+ uint32_t expert_token_offset,
+ uint32_t expert_id,
+ uint32_t num_cols,
+ uint32_t num_rows);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_gather_and_accumulate_e4(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_gather_and_accumulate_e4_fn,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* expert_predictions_buffer,
+ size_t expert_predictions_offset,
+ const struct gptoss_metal_buffer* expert_offsets_buffer,
+ size_t expert_offsets_offset,
+ const struct gptoss_metal_buffer* intra_expert_offsets_buffer,
+ size_t intra_expert_offsets_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ uint32_t num_channels,
+ uint32_t num_tokens,
+ uint32_t num_active_experts);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_topk(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_topk_fn,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ uint32_t num_tokens,
+ uint32_t num_experts,
+ uint32_t num_active_experts);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_sdpa(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_sdpa_fn,
+ const struct gptoss_metal_buffer* q_buffer,
+ size_t q_offset,
+ const struct gptoss_metal_buffer* kv_buffer,
+ size_t kv_offset,
+ const struct gptoss_metal_buffer* s_buffer,
+ size_t s_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ uint32_t window,
+ uint32_t kv_stride,
+ uint32_t num_q_tokens,
+ uint32_t num_kv_tokens,
+ uint32_t num_q_heads,
+ uint32_t num_kv_heads,
+ uint32_t head_dim);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_softmax(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_softmax_fn,
+ size_t threadgroup_size,
+ size_t max_threadgroups,
+ const struct gptoss_metal_buffer* score_buffer,
+ size_t score_offset,
+ const struct gptoss_metal_buffer* argmax_buffer,
+ size_t argmax_offset,
+ const struct gptoss_metal_buffer* prob_buffer,
+ size_t prob_offset,
+ const struct gptoss_metal_buffer* sum_buffer,
+ size_t sum_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ uint32_t num_channels,
+ uint32_t num_tokens,
+ float temperature,
+ uint32_t* num_threadgroups_out,
+ uint32_t* num_channels_per_threadgroup_out);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_f32_sample(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* f32_sample_fn,
+ size_t min_threadgroup_size,
+ const struct gptoss_metal_buffer* prob_buffer,
+ size_t prob_offset,
+ const struct gptoss_metal_buffer* sum_buffer,
+ size_t sum_offset,
+ const struct gptoss_metal_buffer* token_buffer,
+ size_t token_offset,
+ const struct gptoss_metal_buffer* control_buffer,
+ size_t control_offset,
+ uint64_t rng_seed,
+ uint32_t rng_offset,
+ uint32_t num_blocks,
+ uint32_t num_channels,
+ uint32_t num_channels_per_block);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
diff --git a/gpt_oss/metal/source/include/internal/metal.h b/gpt_oss/metal/source/include/internal/metal.h
new file mode 100644
index 0000000000000000000000000000000000000000..f38190f06e693de0b4817c2365dec5c6b9d7569c
--- /dev/null
+++ b/gpt_oss/metal/source/include/internal/metal.h
@@ -0,0 +1,138 @@
+#pragma once
+
+#include
+
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct gptoss_metal_device {
+ void* object; // id
+ size_t num_cores;
+ size_t max_buffer_size;
+ size_t max_threadgroup_memory;
+ size_t max_threadgroup_threads_x;
+ size_t max_threadgroup_threads_y;
+ size_t max_threadgroup_threads_z;
+};
+
+enum gptoss_status gptoss_metal_device_create_system_default(
+ struct gptoss_metal_device* device_out);
+
+enum gptoss_status gptoss_metal_device_release(
+ struct gptoss_metal_device* device);
+
+
+struct gptoss_metal_library {
+ void* object; // id
+};
+
+enum gptoss_status gptoss_metal_library_create_default(
+ const struct gptoss_metal_device* device,
+ struct gptoss_metal_library* library_out);
+
+enum gptoss_status gptoss_metal_library_release(
+ struct gptoss_metal_library* library);
+
+struct gptoss_metal_function {
+ void* function_object; // id
+ void* pipeline_state_object; // id
+ size_t max_threadgroup_threads;
+ size_t simdgroup_threads;
+ size_t static_threadgroup_memory;
+};
+
+enum gptoss_status gptoss_metal_function_create(
+ const struct gptoss_metal_library* library,
+ const char* name,
+ struct gptoss_metal_function* function_out);
+
+enum gptoss_status gptoss_metal_function_release(
+ struct gptoss_metal_function* function);
+
+struct gptoss_metal_buffer {
+ void* object; // id
+ size_t size;
+ void* ptr;
+};
+
+enum gptoss_status gptoss_metal_buffer_create(
+ const struct gptoss_metal_device* device,
+ size_t size,
+ const void* data,
+ struct gptoss_metal_buffer* buffer_out);
+
+enum gptoss_status gptoss_metal_buffer_wrap(
+ const struct gptoss_metal_device* device,
+ size_t size,
+ const void* data,
+ struct gptoss_metal_buffer* buffer_out);
+
+enum gptoss_status gptoss_metal_buffer_release(
+ struct gptoss_metal_buffer* buffer);
+
+struct gptoss_metal_command_queue {
+ void* object; // id
+};
+
+enum gptoss_status gptoss_metal_command_queue_create(
+ const struct gptoss_metal_device* device,
+ struct gptoss_metal_command_queue* command_queue_out);
+
+enum gptoss_status gptoss_metal_command_queue_release(
+ struct gptoss_metal_command_queue* command_queue);
+
+struct gptoss_metal_command_buffer {
+ void* object; // id
+};
+
+enum gptoss_status gptoss_metal_command_buffer_create(
+ const struct gptoss_metal_command_queue* command_queue,
+ struct gptoss_metal_command_buffer* command_buffer_out);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_fill_buffer(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_buffer* buffer,
+ size_t offset,
+ size_t size,
+ uint8_t fill_value);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_copy_buffer(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_buffer* input_buffer,
+ size_t input_offset,
+ const struct gptoss_metal_buffer* output_buffer,
+ size_t output_offset,
+ size_t size);
+
+enum gptoss_status gptoss_metal_command_buffer_encode_launch_kernel(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ const struct gptoss_metal_function* function,
+ size_t threadgroup_size_x,
+ size_t threadgroup_size_y,
+ size_t threadgroup_size_z,
+ size_t num_threadgroups_x,
+ size_t num_threadgroups_y,
+ size_t num_threadgroups_z,
+ size_t params_size,
+ const void* params,
+ size_t num_device_buffers,
+ const struct gptoss_metal_buffer** device_buffers,
+ const size_t* device_buffer_offsets,
+ size_t threadgroup_buffer_size);
+
+enum gptoss_status gptoss_metal_command_buffer_commit(
+ const struct gptoss_metal_command_buffer* command_buffer);
+
+enum gptoss_status gptoss_metal_command_buffer_wait_completion(
+ const struct gptoss_metal_command_buffer* command_buffer,
+ double* elapsed_seconds);
+
+enum gptoss_status gptoss_metal_command_buffer_release(
+ struct gptoss_metal_command_buffer* command_buffer);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
diff --git a/gpt_oss/metal/source/include/internal/metal.hpp b/gpt_oss/metal/source/include/internal/metal.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a143a11ab319cd5ce864b88affebd60d2eb29c6d
--- /dev/null
+++ b/gpt_oss/metal/source/include/internal/metal.hpp
@@ -0,0 +1,342 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+
+namespace gptoss {
+
+inline void Check(gptoss_status s, const char* what) {
+ if (s != gptoss_status_success) {
+ throw std::runtime_error(what);
+ }
+}
+
+inline std::size_t round_up(std::size_t p, std::size_t q) {
+ const std::size_t r = p % q;
+ if (r == 0) {
+ return p;
+ } else {
+ return p - r + q;
+ }
+}
+
+namespace metal {
+
+class Device {
+public:
+ inline Device() {
+ Check(gptoss_metal_device_create_system_default(&device_), "create Device");
+ }
+
+ inline ~Device() {
+ gptoss_metal_device_release(&device_);
+ }
+
+ Device(const Device&) = delete;
+ Device& operator=(const Device&) = delete;
+
+ inline Device(Device&& other) noexcept {
+ device_ = other.device_;
+ std::memset(&other.device_, 0, sizeof(other.device_));
+ }
+
+ inline Device& operator=(Device&& other) noexcept {
+ if (this != &other) {
+ gptoss_metal_device_release(&device_);
+ device_ = other.device_;
+ std::memset(&other.device_, 0, sizeof(other.device_));
+ }
+ return *this;
+ }
+
+ inline const gptoss_metal_device* handle() const noexcept { return &device_; }
+
+ inline size_t max_buffer_size() const noexcept { return device_.max_buffer_size; }
+ inline size_t max_threadgroup_memory() const noexcept { return device_.max_threadgroup_memory; }
+ inline size_t max_threadgroup_threads_x() const noexcept { return device_.max_threadgroup_threads_x; }
+ inline size_t max_threadgroup_threads_y() const noexcept { return device_.max_threadgroup_threads_y; }
+ inline size_t max_threadgroup_threads_z() const noexcept { return device_.max_threadgroup_threads_z; }
+
+private:
+ gptoss_metal_device device_{};
+};
+
+class Library {
+public:
+ inline explicit Library(const Device& dev) {
+ Check(gptoss_metal_library_create_default(dev.handle(), &library_),
+ "gptoss_metal_library_create_default");
+ }
+
+ inline ~Library() {
+ gptoss_metal_library_release(&library_);
+ }
+
+ Library(const Library&) = delete;
+ Library& operator=(const Library&) = delete;
+
+ inline Library(Library&& other) noexcept {
+ library_ = other.library_;
+ std::memset(&other.library_, 0, sizeof(other.library_));
+ }
+
+ inline Library& operator=(Library&& other) noexcept {
+ if (this != &other) {
+ gptoss_metal_library_release(&library_);
+ library_ = other.library_;
+ std::memset(&other.library_, 0, sizeof(other.library_));
+ }
+ return *this;
+ }
+
+ inline const gptoss_metal_library* handle() const noexcept {
+ return &library_;
+ }
+
+private:
+ gptoss_metal_library library_{};
+};
+
+class Function {
+public:
+ inline Function(const Library& library, const char* name) {
+ Check(gptoss_metal_function_create(library.handle(), name, &function_),
+ "gptoss_metal_function_create");
+ }
+
+ inline ~Function() {
+ gptoss_metal_function_release(&function_);
+ }
+
+ Function(const Function&) = delete;
+ Function& operator=(const Function&) = delete;
+
+ inline Function(Function&& other) noexcept {
+ function_ = other.function_;
+ std::memset(&other.function_, 0, sizeof(other.function_));
+ }
+
+ inline Function& operator=(Function&& other) noexcept {
+ if (this != &other) {
+ gptoss_metal_function_release(&function_);
+ function_ = other.function_;
+ std::memset(&other.function_, 0, sizeof(other.function_));
+ }
+ return *this;
+ }
+
+ inline const gptoss_metal_function* handle() const noexcept { return &function_; }
+
+ inline size_t max_threadgroup_threads() const noexcept { return function_.max_threadgroup_threads; }
+ inline size_t simdgroup_threads() const noexcept { return function_.simdgroup_threads; }
+ inline size_t static_threadgroup_memory() const noexcept { return function_.static_threadgroup_memory; }
+
+private:
+ gptoss_metal_function function_{};
+};
+
+class Buffer {
+public:
+ inline Buffer(const Device& dev, size_t size, const void* data = nullptr) {
+ Check(gptoss_metal_buffer_create(dev.handle(), size, data, &buffer_), "create buffer");
+ }
+
+ inline ~Buffer() {
+ gptoss_metal_buffer_release(&buffer_);
+ }
+
+ Buffer(const Buffer&) = delete;
+ Buffer& operator=(const Buffer&) = delete;
+
+ inline Buffer(Buffer&& other) noexcept {
+ buffer_ = other.buffer_;
+ std::memset(&other.buffer_, 0, sizeof(other.buffer_));
+ }
+
+ inline Buffer& operator=(Buffer&& other) noexcept {
+ if (this != &other) {
+ gptoss_metal_buffer_release(&buffer_);
+ buffer_ = other.buffer_;
+ std::memset(&other.buffer_, 0, sizeof(other.buffer_));
+ }
+ return *this;
+ }
+
+ inline size_t size() const noexcept { return buffer_.size; }
+ inline void* ptr() const noexcept { return buffer_.ptr; }
+
+ inline const gptoss_metal_buffer* handle() const noexcept { return &buffer_; }
+
+private:
+ gptoss_metal_buffer buffer_{};
+};
+
+class CommandQueue {
+public:
+ inline explicit CommandQueue(const Device& dev) {
+ Check(gptoss_metal_command_queue_create(dev.handle(), &command_queue_),
+ "gptoss_metal_command_queue_create");
+ }
+
+ inline ~CommandQueue() {
+ gptoss_metal_command_queue_release(&command_queue_);
+ }
+
+ CommandQueue(const CommandQueue&) = delete;
+ CommandQueue& operator=(const CommandQueue&) = delete;
+
+ inline CommandQueue(CommandQueue&& other) noexcept {
+ command_queue_ = other.command_queue_;
+ std::memset(&other.command_queue_, 0, sizeof(other.command_queue_));
+ }
+
+ inline CommandQueue& operator=(CommandQueue&& other) noexcept {
+ if (this != &other) {
+ gptoss_metal_command_queue_release(&command_queue_);
+ command_queue_ = other.command_queue_;
+ std::memset(&other.command_queue_, 0, sizeof(other.command_queue_));
+ }
+ return *this;
+ }
+
+ inline const gptoss_metal_command_queue* handle() const noexcept {
+ return &command_queue_;
+ }
+
+private:
+ gptoss_metal_command_queue command_queue_{};
+};
+
+class CommandBuffer {
+public:
+ inline explicit CommandBuffer(const CommandQueue& command_queue) {
+ Check(gptoss_metal_command_buffer_create(command_queue.handle(), &command_buffer_),
+ "gptoss_metal_command_buffer_create");
+ }
+ inline ~CommandBuffer() {
+ gptoss_metal_command_buffer_release(&command_buffer_);
+ }
+
+ CommandBuffer(const CommandBuffer&) = delete;
+ CommandBuffer& operator=(const CommandBuffer&) = delete;
+
+ inline CommandBuffer(CommandBuffer&& other) noexcept {
+ command_buffer_ = other.command_buffer_;
+ std::memset(&other.command_buffer_, 0, sizeof(other.command_buffer_));
+ }
+
+ inline CommandBuffer& operator=(CommandBuffer&& other) noexcept {
+ if (this != &other) {
+ gptoss_metal_command_buffer_release(&command_buffer_);
+ command_buffer_ = other.command_buffer_;
+ std::memset(&other.command_buffer_, 0, sizeof(other.command_buffer_));
+ }
+ return *this;
+ }
+
+ inline void encode_launch_kernel(const Function& function,
+ const std::array& threadgroup_size,
+ const std::array& num_threadgroups,
+ size_t params_size, const void* params,
+ std::initializer_list device_buffers = {},
+ size_t threadgroup_buffer_size = 0)
+ {
+ std::vector buffer_handles(device_buffers.size());
+ std::transform(device_buffers.begin(), device_buffers.end(), buffer_handles.begin(),
+ [](const Buffer* buffer) -> const gptoss_metal_buffer* { return buffer->handle(); });
+ Check(gptoss_metal_command_buffer_encode_launch_kernel(
+ &command_buffer_, function.handle(),
+ threadgroup_size[0], threadgroup_size[1], threadgroup_size[2],
+ num_threadgroups[0], num_threadgroups[1], num_threadgroups[2],
+ params_size, params,
+ buffer_handles.size(),
+ buffer_handles.data(),
+ /*buffer_offsets=*/nullptr,
+ threadgroup_buffer_size),
+ "gptoss_metal_command_buffer_encode_launch_kernel");
+ }
+
+ inline void encode_launch_f32_fill_random(const Function& f32_fill_random_fn,
+ size_t threadgroup_size,
+ size_t num_threadgroups,
+ const Buffer& output_buffer,
+ size_t output_offset,
+ size_t num_channels,
+ uint64_t rng_seed,
+ uint64_t rng_offset,
+ float rng_min,
+ float rng_max)
+ {
+ Check(gptoss_metal_command_buffer_encode_launch_f32_fill_random(
+ &command_buffer_, f32_fill_random_fn.handle(),
+ threadgroup_size, num_threadgroups,
+ output_buffer.handle(), output_offset,
+ num_channels,
+ rng_seed, rng_offset, rng_min, rng_max),
+ "gptoss_metal_command_buffer_encode_launch_f32_fill_random");
+ }
+
+ inline void encode_launch_bf16_fill_random(const Function& bf16_fill_random_fn,
+ size_t threadgroup_size,
+ size_t num_threadgroups,
+ const Buffer& output_buffer,
+ size_t output_offset,
+ size_t num_channels,
+ uint64_t rng_seed,
+ uint64_t rng_offset,
+ float rng_min,
+ float rng_max)
+ {
+ Check(gptoss_metal_command_buffer_encode_launch_bf16_fill_random(
+ &command_buffer_, bf16_fill_random_fn.handle(),
+ threadgroup_size, num_threadgroups,
+ output_buffer.handle(), output_offset,
+ num_channels,
+ rng_seed, rng_offset, rng_min, rng_max),
+ "gptoss_metal_command_buffer_encode_launch_bf16_fill_random");
+ }
+
+ inline void encode_launch_u32_fill_random(const Function& u32_fill_random_fn,
+ size_t threadgroup_size,
+ size_t num_threadgroups,
+ const Buffer& output_buffer,
+ size_t output_offset,
+ size_t num_channels,
+ uint64_t rng_seed,
+ uint64_t rng_offset)
+ {
+ Check(gptoss_metal_command_buffer_encode_launch_u32_fill_random(
+ &command_buffer_, u32_fill_random_fn.handle(),
+ threadgroup_size, num_threadgroups,
+ output_buffer.handle(), output_offset,
+ num_channels,
+ rng_seed, rng_offset),
+ "gptoss_metal_command_buffer_encode_launch_u32_fill_random");
+ }
+
+ inline void commit() {
+ Check(gptoss_metal_command_buffer_commit(&command_buffer_), "commit");
+ }
+
+ inline double wait_completion() {
+ double secs = 0.0;
+ Check(gptoss_metal_command_buffer_wait_completion(&command_buffer_, &secs), "wait completion");
+ return secs;
+ }
+
+ inline const gptoss_metal_command_buffer* handle() const noexcept { return &command_buffer_; }
+
+private:
+ gptoss_metal_command_buffer command_buffer_{};
+};
+
+} // namespace metal
+} // namespace gptoss
diff --git a/gpt_oss/metal/source/include/internal/model.h b/gpt_oss/metal/source/include/internal/model.h
new file mode 100644
index 0000000000000000000000000000000000000000..d27743b2b4643517c0673e89247f20326782fd28
--- /dev/null
+++ b/gpt_oss/metal/source/include/internal/model.h
@@ -0,0 +1,177 @@
+#pragma once
+
+#ifndef __cplusplus
+ #include
+#endif
+#include
+#include
+#include
+
+#include "internal/metal.h"
+
+
+struct gptoss_tokenizer {
+#ifndef __cplusplus
+ atomic_uint_least64_t ref_count;
+#else
+ uint_least64_t ref_count;
+#endif
+
+ void* mapping_ptr;
+ size_t mapping_size;
+
+ const char* regex_ptr;
+ const char* tokens_ptr;
+
+ uint32_t num_text_tokens;
+ uint32_t num_special_tokens;
+
+ uint32_t special_token_id[gptoss_special_token_max - 1];
+};
+
+struct gptoss_model {
+#ifndef __cplusplus
+ atomic_uint_least64_t ref_count;
+#else
+ uint_least64_t ref_count;
+#endif
+
+ struct gptoss_tokenizer* tokenizer;
+
+ void* mapping_ptr;
+ size_t mapping_size;
+
+ uint32_t context_length;
+ uint32_t num_blocks;
+ uint32_t num_experts;
+ uint32_t num_active_experts;
+ uint32_t embedding_dim;
+ uint32_t mlp_dim;
+ float swiglu_limit;
+ uint32_t head_dim;
+ uint32_t num_heads;
+ uint32_t num_kv_heads;
+ uint32_t attention_window;
+ float rope_theta;
+ float interpolation_scale;
+ float yarn_offset;
+ float yarn_scale;
+ float yarn_multiplier;
+ float rmsnorm_epsilon;
+
+ uint32_t vocabulary_size;
+
+ bool lock_memory;
+
+ size_t weights_size;
+ size_t allocation_size;
+
+ // Metal objects
+ struct gptoss_metal_device device;
+ size_t max_threadgroups;
+ struct gptoss_metal_command_queue command_queue;
+ struct gptoss_metal_library library;
+ struct gptoss_metal_function bf16_f32_embeddings_fn;
+ struct gptoss_metal_function f32_bf16w_rmsnorm_fn;
+ struct gptoss_metal_function f32_bf16w_matmul_fn;
+ struct gptoss_metal_function f32_bf16w_matmul_qkv_fn;
+ struct gptoss_metal_function f32_bf16w_dense_matmul_qkv_fn;
+ struct gptoss_metal_function f32_bf16w_dense_matmul_attn_output_fn;
+ struct gptoss_metal_function f32_bf16w_dense_matmul_mlp_gate_fn;
+ struct gptoss_metal_function f32_bf16w_unembedding_fn;
+ struct gptoss_metal_function f32_rope_fn;
+ struct gptoss_metal_function f32_mf4w_moe_matmul_swiglu_fn;
+ struct gptoss_metal_function f32_mf4w_moe_matmul_fn;
+ struct gptoss_metal_function f32_accumulate_e4_fn;
+ struct gptoss_metal_function f32_scatter_e4_fn;
+ struct gptoss_metal_function f32_mf4w_moe_dense_matmul_swiglu_fn;
+ struct gptoss_metal_function f32_mf4w_moe_dense_matmul_fn;
+ struct gptoss_metal_function f32_gather_and_accumulate_e4_fn;
+ struct gptoss_metal_function f32_topk_softmax_e32_k4_fn;
+ struct gptoss_metal_function f32_topk_softmax_e128_k4_fn;
+ struct gptoss_metal_function f32_sdpa_q8_d64_fn;
+ struct gptoss_metal_function f32_softmax_fn;
+ struct gptoss_metal_function f32_sample_fn;
+
+ size_t per_block_shared_weights_size;
+ size_t per_expert_block_weight_size;
+
+ size_t embeddings_threadgroup_size;
+ size_t attn_qkv_threadgroup_size;
+ size_t attn_out_threadgroup_size;
+ size_t mlp_gate_threadgroup_size;
+ size_t mlp_swiglu_threadgroup_size;
+ size_t mlp_out_threadgroup_size;
+ size_t mlp_acc_threadgroup_size;
+ size_t unembedding_threadgroup_size;
+
+ size_t attn_rmsnorm_gain_offset;
+ size_t attn_qkv_weight_offset;
+ size_t attn_qkv_bias_offset;
+ size_t attn_sdpa_sink_offset;
+ size_t attn_out_weight_offset;
+ size_t attn_out_bias_offset;
+ size_t mlp_rmsnorm_gain_offset;
+ size_t mlp_gate_weight_offset;
+ size_t mlp_gate_bias_offset;
+ size_t mlp_swiglu_scale_offset;
+ size_t mlp_swiglu_bias_offset;
+ size_t mlp_out_block_offset;
+ size_t mlp_out_scale_offset;
+ size_t mlp_out_bias_offset;
+ size_t rmsnorm_weight_offset;
+ size_t unembedding_weight_offset;
+
+ // Buffer with non-MoE weights. Includes MoE gates, embeddings/unembeddings.
+ struct gptoss_metal_buffer shared_weight_buffer;
+ // num_blocks per-block buffers with MoE weights to follow.
+ struct gptoss_metal_buffer block_weight_buffers[];
+};
+
+#define GPTOSS_DEFAULT_BATCH_SIZE 128
+
+struct gptoss_context {
+#ifndef __cplusplus
+ atomic_uint_least64_t ref_count;
+#else
+ uint_least64_t ref_count;
+#endif
+
+ struct gptoss_model* model;
+ // Number of tokens processed in the context.
+ size_t num_tokens;
+ // Number of tokens in the KV cache.
+ size_t num_kv_tokens;
+ // Length of the context.
+ size_t max_tokens;
+ // Maximum number of tokens that can be processed in a single batch.
+ // Activation buffers are allocated with this size.
+ size_t max_batch_tokens;
+
+
+ size_t kvcache_size;
+ size_t allocation_size;
+
+ // Activation buffers.
+ // TODO: merge into a single buffer.
+ struct gptoss_metal_buffer residual_activation_buffer; // Residual stream
+ struct gptoss_metal_buffer rmsnorm_activation_buffer; // Both attention & MLP RMSNorm output
+ struct gptoss_metal_buffer qkv_activation_buffer; // QKV projection output
+ struct gptoss_metal_buffer sdpa_activation_buffer; // SDPA output
+ struct gptoss_metal_buffer gate_activation_buffer; // MoE gating output
+ struct gptoss_metal_buffer expert_activation_buffer; // MoE expert predictions
+ struct gptoss_metal_buffer expert_offset_buffer; // MoE expert histograms cumsum
+ struct gptoss_metal_buffer token_to_expert_routing_buffer; // MoE token to expert routing
+ struct gptoss_metal_buffer swiglu_input_buffer; // MLP+SwiGLU input for prefill.
+ struct gptoss_metal_buffer swiglu_activation_buffer; // MLP+SwiGLU output
+ struct gptoss_metal_buffer moe_activation_buffer; // MoE MLP output (per-active expert)
+
+ // Input/output buffers.
+ struct gptoss_metal_buffer control_buffer;
+ struct gptoss_metal_buffer token_buffer; // uint32 token IDs
+ struct gptoss_metal_buffer score_buffer; // unembedding outputs
+ struct gptoss_metal_buffer prob_buffer;
+ struct gptoss_metal_buffer sum_buffer;
+ struct gptoss_metal_buffer argmax_buffer;
+ struct gptoss_metal_buffer kvcache_buffer;
+};
diff --git a/gpt_oss/metal/source/include/internal/rng.h b/gpt_oss/metal/source/include/internal/rng.h
new file mode 100644
index 0000000000000000000000000000000000000000..048b8b7d49764f86bb7ee6b4d36ef99eba6f2884
--- /dev/null
+++ b/gpt_oss/metal/source/include/internal/rng.h
@@ -0,0 +1,24 @@
+#pragma once
+
+#include
+
+inline static uint32_t rng_squares32(uint64_t offset, uint64_t seed) {
+ const uint64_t y = offset * seed;
+ const uint64_t z = y + seed;
+
+ /* Round 1 */
+ uint64_t x = y * y + y;
+ x = (x >> 32) | (x << 32);
+
+ /* Round 2 */
+ x = x * x + z;
+ x = (x >> 32) | (x << 32);
+
+ /* Round 3 */
+ x = x * x + y;
+ x = (x >> 32) | (x << 32);
+
+ /* Round 4 */
+ x = x * x + z;
+ return (uint32_t) (x >> 32);
+}
diff --git a/gpt_oss/metal/source/include/internal/rng.hpp b/gpt_oss/metal/source/include/internal/rng.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e327a3398004c4bb9e37ff11a6a545712787a7c4
--- /dev/null
+++ b/gpt_oss/metal/source/include/internal/rng.hpp
@@ -0,0 +1,32 @@
+#pragma once
+
+#include
+
+namespace gptoss {
+
+namespace rng {
+
+inline static std::uint32_t squares32(std::uint64_t offset, std::uint64_t seed) {
+ const std::uint64_t y = offset * seed;
+ const std::uint64_t z = y + seed;
+
+ /* Round 1 */
+ std::uint64_t x = y * y + y;
+ x = (x >> 32) | (x << 32);
+
+ /* Round 2 */
+ x = x * x + z;
+ x = (x >> 32) | (x << 32);
+
+ /* Round 3 */
+ x = x * x + y;
+ x = (x >> 32) | (x << 32);
+
+ /* Round 4 */
+ x = x * x + z;
+ return static_cast(x >> 32);
+}
+
+} // namespace rng
+
+} // namespace gptoss
diff --git a/gpt_oss/metal/source/include/internal/storage.h b/gpt_oss/metal/source/include/internal/storage.h
new file mode 100644
index 0000000000000000000000000000000000000000..cc7ed7b4fee10cb407b78224db9d4b1ea8445625
--- /dev/null
+++ b/gpt_oss/metal/source/include/internal/storage.h
@@ -0,0 +1,36 @@
+#pragma once
+
+#include
+#include
+
+struct gptoss_file_header {
+ char magic[12];
+ uint32_t zero;
+};
+
+struct gptoss_gptoss_model_header {
+ uint32_t context_length;
+ uint32_t num_blocks;
+ uint32_t num_experts;
+ uint32_t num_active_experts;
+ uint32_t embedding_dim;
+ uint32_t mlp_dim;
+ float swiglu_limit;
+ uint32_t head_dim;
+ uint32_t num_heads;
+ uint32_t num_kv_heads;
+ uint32_t attention_window;
+ float rope_theta;
+ float interpolation_scale;
+ float yarn_offset;
+ float yarn_scale;
+ float yarn_multiplier;
+ float rmsnorm_epsilon;
+};
+
+struct gptoss_tiktoken_tokenizer_header {
+ uint32_t num_special_tokens;
+ uint32_t num_text_tokens;
+ uint32_t regex_size;
+ uint32_t tokens_size;
+};
diff --git a/gpt_oss/metal/source/include/internal/uuid.h b/gpt_oss/metal/source/include/internal/uuid.h
new file mode 100644
index 0000000000000000000000000000000000000000..0f52f8a425b32d2fc3d9b1edf1354e18213eb1b2
--- /dev/null
+++ b/gpt_oss/metal/source/include/internal/uuid.h
@@ -0,0 +1,114 @@
+#pragma once
+
+#include
+#include
+#include
+
+#include "internal/macros.h"
+
+
+struct GPTOSS_DENSELY_PACKED_STRUCTURE gptoss_uuid {
+ uint8_t bytes[16];
+};
+static_assert(sizeof(struct gptoss_uuid) == 16, "UUID size is not 16 bytes");
+
+
+#define UUID_FORMAT "%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X"
+#define UUID_ARGS(uuid) (uuid).bytes[0], (uuid).bytes[1], (uuid).bytes[2], (uuid).bytes[3], \
+ (uuid).bytes[4], (uuid).bytes[5], (uuid).bytes[6], (uuid).bytes[7], (uuid).bytes[8], (uuid).bytes[9], \
+ (uuid).bytes[10], (uuid).bytes[11], (uuid).bytes[12], (uuid).bytes[13], (uuid).bytes[14], (uuid).bytes[15]
+
+static inline bool gptoss_is_gptoss_model_uuid(const struct gptoss_uuid* uuid) {
+ return memcmp(
+ &(struct gptoss_uuid) {0xDF, 0x52, 0xDC, 0x86, 0x17, 0x89, 0x4E, 0xD0, 0xA2, 0x95, 0x66, 0xF1, 0x05, 0x08, 0x14, 0x5B},
+ uuid,
+ sizeof(struct gptoss_uuid)) == 0;
+}
+
+static inline bool gptoss_is_applegpu_layout_uuid(const struct gptoss_uuid* uuid) {
+ return memcmp(
+ &(struct gptoss_uuid) {0x22, 0x91, 0x77, 0xA8, 0x57, 0x75, 0x42, 0x68, 0xBF, 0xD8, 0xD5, 0x88, 0xB3, 0x51, 0xC5, 0x6D},
+ uuid,
+ sizeof(struct gptoss_uuid)) == 0;
+}
+
+static inline bool gptoss_is_tiktoken_tokenizer_uuid(const struct gptoss_uuid* uuid) {
+ return memcmp(
+ &(struct gptoss_uuid) {0x74, 0x01, 0xAD, 0xED, 0x2A, 0x95, 0x40, 0xCB, 0xB7, 0x82, 0x9C, 0xCE, 0xBA, 0xAF, 0xE7, 0x2B},
+ uuid,
+ sizeof(struct gptoss_uuid)) == 0;
+}
+
+static inline enum gptoss_special_token gptoss_special_token_decode_uuid(const struct gptoss_uuid* uuid) {
+ if (memcmp(
+ &(struct gptoss_uuid) {0x55, 0xA7, 0x7C, 0x2F, 0x8A, 0x01, 0x4C, 0x54, 0x8A, 0xC2, 0x31, 0x3B, 0xFC, 0x7E, 0x20, 0x8D},
+ uuid,
+ sizeof(struct gptoss_uuid)) == 0)
+ {
+ return gptoss_special_token_start;
+ } else if (memcmp(
+ &(struct gptoss_uuid) {0x16, 0xE4, 0x04, 0x31, 0xF4, 0x7F, 0x4B, 0x22, 0xB5, 0x9B, 0x8B, 0x27, 0x8F, 0xC3, 0x0A, 0x54},
+ uuid,
+ sizeof(struct gptoss_uuid)) == 0)
+ {
+ return gptoss_special_token_message;
+ } else if (memcmp(
+ &(struct gptoss_uuid) {0xFC, 0xAC, 0x2F, 0x6D, 0x47, 0x05, 0x4F, 0x6B, 0xB2, 0x28, 0x64, 0x2A, 0xCC, 0xAC, 0x72, 0x38},
+ uuid,
+ sizeof(struct gptoss_uuid)) == 0)
+ {
+ return gptoss_special_token_end;
+ } else if (memcmp(
+ &(struct gptoss_uuid) {0xF7, 0x99, 0xFF, 0x69, 0x19, 0x92, 0x43, 0xC4, 0xA3, 0xD8, 0xD8, 0x31, 0xF4, 0x75, 0xDC, 0x75},
+ uuid,
+ sizeof(struct gptoss_uuid)) == 0)
+ {
+ return gptoss_special_token_return;
+ } else if (memcmp(
+ &(struct gptoss_uuid) {0xE1, 0x5B, 0xA7, 0x02, 0x28, 0xC4, 0x42, 0x92, 0xAB, 0x8F, 0xFF, 0xA4, 0x34, 0x70, 0x91, 0x28},
+ uuid,
+ sizeof(struct gptoss_uuid)) == 0)
+ {
+ return gptoss_special_token_refusal;
+ } else if (memcmp(
+ &(struct gptoss_uuid) {0xC0, 0xBB, 0x14, 0xC7, 0x60, 0x22, 0x49, 0xDA, 0xAD, 0x08, 0x79, 0x2D, 0x67, 0xE8, 0xB4, 0x70},
+ uuid,
+ sizeof(struct gptoss_uuid)) == 0)
+ {
+ return gptoss_special_token_constrain;
+ } else if (memcmp(
+ &(struct gptoss_uuid) {0xFD, 0x3D, 0xDA, 0x11, 0xC8, 0xAB, 0x40, 0x33, 0x87, 0x6E, 0xD9, 0x3D, 0xEB, 0x17, 0x2C, 0x93},
+ uuid,
+ sizeof(struct gptoss_uuid)) == 0)
+ {
+ return gptoss_special_token_channel;
+ } else if (memcmp(
+ &(struct gptoss_uuid) {0x12, 0x20, 0xF7, 0x96, 0xE3, 0x88, 0x4D, 0xE5, 0xB4, 0x87, 0xFE, 0x2E, 0xB5, 0xFE, 0x03, 0xC0},
+ uuid,
+ sizeof(struct gptoss_uuid)) == 0)
+ {
+ return gptoss_special_token_call;
+ } else if (memcmp(
+ &(struct gptoss_uuid) {0x07, 0xD7, 0xDA, 0x55, 0xB3, 0x46, 0x4C, 0xFF, 0x8B, 0x37, 0x7C, 0xEF, 0xAC, 0xF8, 0xA3, 0xE8},
+ uuid,
+ sizeof(struct gptoss_uuid)) == 0)
+ {
+ return gptoss_special_token_untrusted;
+ } else if (memcmp(
+ &(struct gptoss_uuid) {0xF2, 0x65, 0xBD, 0x9C, 0xC7, 0x17, 0x46, 0x9E, 0xA4, 0x47, 0x92, 0x06, 0x87, 0xD6, 0x5D, 0x90},
+ uuid,
+ sizeof(struct gptoss_uuid)) == 0)
+ {
+ return gptoss_special_token_end_untrusted;
+ } else if (memcmp(
+ &(struct gptoss_uuid) {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ uuid,
+ sizeof(struct gptoss_uuid)) == 0)
+ {
+ // Suppress warning
+ return gptoss_special_token_invalid;
+ } else {
+ GPTOSS_LOG_WARNING("unsupported special token " UUID_FORMAT, UUID_ARGS(*uuid));
+ return gptoss_special_token_invalid;
+ }
+}
diff --git a/gpt_oss/metal/source/log.c b/gpt_oss/metal/source/log.c
new file mode 100644
index 0000000000000000000000000000000000000000..ff7b2375eb97b4a0c1ca8bd28fdebf121c69c725
--- /dev/null
+++ b/gpt_oss/metal/source/log.c
@@ -0,0 +1,50 @@
+#include // assert
+#include // va_list, va_copy, va_end
+#include // vsnprintf
+#include